xref: /openbmc/linux/drivers/md/md.c (revision e4781421e883340b796da5a724bda7226817990b)
1 /*
2    md.c : Multiple Devices driver for Linux
3      Copyright (C) 1998, 1999, 2000 Ingo Molnar
4 
5      completely rewritten, based on the MD driver code from Marc Zyngier
6 
7    Changes:
8 
9    - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10    - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11    - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12    - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13    - kmod support by: Cyrus Durgin
14    - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15    - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
16 
17    - lots of fixes and improvements to the RAID1/RAID5 and generic
18      RAID code (such as request based resynchronization):
19 
20      Neil Brown <neilb@cse.unsw.edu.au>.
21 
22    - persistent bitmap code
23      Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
24 
25    This program is free software; you can redistribute it and/or modify
26    it under the terms of the GNU General Public License as published by
27    the Free Software Foundation; either version 2, or (at your option)
28    any later version.
29 
30    You should have received a copy of the GNU General Public License
31    (for example /usr/src/linux/COPYING); if not, write to the Free
32    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
33 
34    Errors, Warnings, etc.
35    Please use:
36      pr_crit() for error conditions that risk data loss
37      pr_err() for error conditions that are unexpected, like an IO error
38          or internal inconsistency
39      pr_warn() for error conditions that could have been predicated, like
40          adding a device to an array when it has incompatible metadata
41      pr_info() for every interesting, very rare events, like an array starting
42          or stopping, or resync starting or stopping
43      pr_debug() for everything else.
44 
45 */
46 
47 #include <linux/kthread.h>
48 #include <linux/blkdev.h>
49 #include <linux/badblocks.h>
50 #include <linux/sysctl.h>
51 #include <linux/seq_file.h>
52 #include <linux/fs.h>
53 #include <linux/poll.h>
54 #include <linux/ctype.h>
55 #include <linux/string.h>
56 #include <linux/hdreg.h>
57 #include <linux/proc_fs.h>
58 #include <linux/random.h>
59 #include <linux/module.h>
60 #include <linux/reboot.h>
61 #include <linux/file.h>
62 #include <linux/compat.h>
63 #include <linux/delay.h>
64 #include <linux/raid/md_p.h>
65 #include <linux/raid/md_u.h>
66 #include <linux/slab.h>
67 #include <trace/events/block.h>
68 #include "md.h"
69 #include "bitmap.h"
70 #include "md-cluster.h"
71 
72 #ifndef MODULE
73 static void autostart_arrays(int part);
74 #endif
75 
76 /* pers_list is a list of registered personalities protected
77  * by pers_lock.
78  * pers_lock does extra service to protect accesses to
79  * mddev->thread when the mutex cannot be held.
80  */
81 static LIST_HEAD(pers_list);
82 static DEFINE_SPINLOCK(pers_lock);
83 
84 struct md_cluster_operations *md_cluster_ops;
85 EXPORT_SYMBOL(md_cluster_ops);
86 struct module *md_cluster_mod;
87 EXPORT_SYMBOL(md_cluster_mod);
88 
89 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
90 static struct workqueue_struct *md_wq;
91 static struct workqueue_struct *md_misc_wq;
92 
93 static int remove_and_add_spares(struct mddev *mddev,
94 				 struct md_rdev *this);
95 static void mddev_detach(struct mddev *mddev);
96 
97 /*
98  * Default number of read corrections we'll attempt on an rdev
99  * before ejecting it from the array. We divide the read error
100  * count by 2 for every hour elapsed between read errors.
101  */
102 #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
103 /*
104  * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
105  * is 1000 KB/sec, so the extra system load does not show up that much.
106  * Increase it if you want to have more _guaranteed_ speed. Note that
107  * the RAID driver will use the maximum available bandwidth if the IO
108  * subsystem is idle. There is also an 'absolute maximum' reconstruction
109  * speed limit - in case reconstruction slows down your system despite
110  * idle IO detection.
111  *
112  * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
113  * or /sys/block/mdX/md/sync_speed_{min,max}
114  */
115 
116 static int sysctl_speed_limit_min = 1000;
117 static int sysctl_speed_limit_max = 200000;
118 static inline int speed_min(struct mddev *mddev)
119 {
120 	return mddev->sync_speed_min ?
121 		mddev->sync_speed_min : sysctl_speed_limit_min;
122 }
123 
124 static inline int speed_max(struct mddev *mddev)
125 {
126 	return mddev->sync_speed_max ?
127 		mddev->sync_speed_max : sysctl_speed_limit_max;
128 }
129 
130 static struct ctl_table_header *raid_table_header;
131 
132 static struct ctl_table raid_table[] = {
133 	{
134 		.procname	= "speed_limit_min",
135 		.data		= &sysctl_speed_limit_min,
136 		.maxlen		= sizeof(int),
137 		.mode		= S_IRUGO|S_IWUSR,
138 		.proc_handler	= proc_dointvec,
139 	},
140 	{
141 		.procname	= "speed_limit_max",
142 		.data		= &sysctl_speed_limit_max,
143 		.maxlen		= sizeof(int),
144 		.mode		= S_IRUGO|S_IWUSR,
145 		.proc_handler	= proc_dointvec,
146 	},
147 	{ }
148 };
149 
150 static struct ctl_table raid_dir_table[] = {
151 	{
152 		.procname	= "raid",
153 		.maxlen		= 0,
154 		.mode		= S_IRUGO|S_IXUGO,
155 		.child		= raid_table,
156 	},
157 	{ }
158 };
159 
160 static struct ctl_table raid_root_table[] = {
161 	{
162 		.procname	= "dev",
163 		.maxlen		= 0,
164 		.mode		= 0555,
165 		.child		= raid_dir_table,
166 	},
167 	{  }
168 };
169 
170 static const struct block_device_operations md_fops;
171 
172 static int start_readonly;
173 
174 /* bio_clone_mddev
175  * like bio_clone, but with a local bio set
176  */
177 
178 struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
179 			    struct mddev *mddev)
180 {
181 	struct bio *b;
182 
183 	if (!mddev || !mddev->bio_set)
184 		return bio_alloc(gfp_mask, nr_iovecs);
185 
186 	b = bio_alloc_bioset(gfp_mask, nr_iovecs, mddev->bio_set);
187 	if (!b)
188 		return NULL;
189 	return b;
190 }
191 EXPORT_SYMBOL_GPL(bio_alloc_mddev);
192 
193 struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
194 			    struct mddev *mddev)
195 {
196 	if (!mddev || !mddev->bio_set)
197 		return bio_clone(bio, gfp_mask);
198 
199 	return bio_clone_bioset(bio, gfp_mask, mddev->bio_set);
200 }
201 EXPORT_SYMBOL_GPL(bio_clone_mddev);
202 
203 /*
204  * We have a system wide 'event count' that is incremented
205  * on any 'interesting' event, and readers of /proc/mdstat
206  * can use 'poll' or 'select' to find out when the event
207  * count increases.
208  *
209  * Events are:
210  *  start array, stop array, error, add device, remove device,
211  *  start build, activate spare
212  */
213 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
214 static atomic_t md_event_count;
215 void md_new_event(struct mddev *mddev)
216 {
217 	atomic_inc(&md_event_count);
218 	wake_up(&md_event_waiters);
219 }
220 EXPORT_SYMBOL_GPL(md_new_event);
221 
222 /*
223  * Enables to iterate over all existing md arrays
224  * all_mddevs_lock protects this list.
225  */
226 static LIST_HEAD(all_mddevs);
227 static DEFINE_SPINLOCK(all_mddevs_lock);
228 
229 /*
230  * iterates through all used mddevs in the system.
231  * We take care to grab the all_mddevs_lock whenever navigating
232  * the list, and to always hold a refcount when unlocked.
233  * Any code which breaks out of this loop while own
234  * a reference to the current mddev and must mddev_put it.
235  */
236 #define for_each_mddev(_mddev,_tmp)					\
237 									\
238 	for (({ spin_lock(&all_mddevs_lock);				\
239 		_tmp = all_mddevs.next;					\
240 		_mddev = NULL;});					\
241 	     ({ if (_tmp != &all_mddevs)				\
242 			mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
243 		spin_unlock(&all_mddevs_lock);				\
244 		if (_mddev) mddev_put(_mddev);				\
245 		_mddev = list_entry(_tmp, struct mddev, all_mddevs);	\
246 		_tmp != &all_mddevs;});					\
247 	     ({ spin_lock(&all_mddevs_lock);				\
248 		_tmp = _tmp->next;})					\
249 		)
250 
251 /* Rather than calling directly into the personality make_request function,
252  * IO requests come here first so that we can check if the device is
253  * being suspended pending a reconfiguration.
254  * We hold a refcount over the call to ->make_request.  By the time that
255  * call has finished, the bio has been linked into some internal structure
256  * and so is visible to ->quiesce(), so we don't need the refcount any more.
257  */
258 static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
259 {
260 	const int rw = bio_data_dir(bio);
261 	struct mddev *mddev = q->queuedata;
262 	unsigned int sectors;
263 	int cpu;
264 
265 	blk_queue_split(q, &bio, q->bio_split);
266 
267 	if (mddev == NULL || mddev->pers == NULL) {
268 		bio_io_error(bio);
269 		return BLK_QC_T_NONE;
270 	}
271 	if (mddev->ro == 1 && unlikely(rw == WRITE)) {
272 		if (bio_sectors(bio) != 0)
273 			bio->bi_error = -EROFS;
274 		bio_endio(bio);
275 		return BLK_QC_T_NONE;
276 	}
277 	smp_rmb(); /* Ensure implications of  'active' are visible */
278 	rcu_read_lock();
279 	if (mddev->suspended) {
280 		DEFINE_WAIT(__wait);
281 		for (;;) {
282 			prepare_to_wait(&mddev->sb_wait, &__wait,
283 					TASK_UNINTERRUPTIBLE);
284 			if (!mddev->suspended)
285 				break;
286 			rcu_read_unlock();
287 			schedule();
288 			rcu_read_lock();
289 		}
290 		finish_wait(&mddev->sb_wait, &__wait);
291 	}
292 	atomic_inc(&mddev->active_io);
293 	rcu_read_unlock();
294 
295 	/*
296 	 * save the sectors now since our bio can
297 	 * go away inside make_request
298 	 */
299 	sectors = bio_sectors(bio);
300 	/* bio could be mergeable after passing to underlayer */
301 	bio->bi_opf &= ~REQ_NOMERGE;
302 	mddev->pers->make_request(mddev, bio);
303 
304 	cpu = part_stat_lock();
305 	part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
306 	part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
307 	part_stat_unlock();
308 
309 	if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
310 		wake_up(&mddev->sb_wait);
311 
312 	return BLK_QC_T_NONE;
313 }
314 
315 /* mddev_suspend makes sure no new requests are submitted
316  * to the device, and that any requests that have been submitted
317  * are completely handled.
318  * Once mddev_detach() is called and completes, the module will be
319  * completely unused.
320  */
321 void mddev_suspend(struct mddev *mddev)
322 {
323 	WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk);
324 	if (mddev->suspended++)
325 		return;
326 	synchronize_rcu();
327 	wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
328 	mddev->pers->quiesce(mddev, 1);
329 
330 	del_timer_sync(&mddev->safemode_timer);
331 }
332 EXPORT_SYMBOL_GPL(mddev_suspend);
333 
334 void mddev_resume(struct mddev *mddev)
335 {
336 	if (--mddev->suspended)
337 		return;
338 	wake_up(&mddev->sb_wait);
339 	mddev->pers->quiesce(mddev, 0);
340 
341 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
342 	md_wakeup_thread(mddev->thread);
343 	md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
344 }
345 EXPORT_SYMBOL_GPL(mddev_resume);
346 
347 int mddev_congested(struct mddev *mddev, int bits)
348 {
349 	struct md_personality *pers = mddev->pers;
350 	int ret = 0;
351 
352 	rcu_read_lock();
353 	if (mddev->suspended)
354 		ret = 1;
355 	else if (pers && pers->congested)
356 		ret = pers->congested(mddev, bits);
357 	rcu_read_unlock();
358 	return ret;
359 }
360 EXPORT_SYMBOL_GPL(mddev_congested);
361 static int md_congested(void *data, int bits)
362 {
363 	struct mddev *mddev = data;
364 	return mddev_congested(mddev, bits);
365 }
366 
367 /*
368  * Generic flush handling for md
369  */
370 
371 static void md_end_flush(struct bio *bio)
372 {
373 	struct md_rdev *rdev = bio->bi_private;
374 	struct mddev *mddev = rdev->mddev;
375 
376 	rdev_dec_pending(rdev, mddev);
377 
378 	if (atomic_dec_and_test(&mddev->flush_pending)) {
379 		/* The pre-request flush has finished */
380 		queue_work(md_wq, &mddev->flush_work);
381 	}
382 	bio_put(bio);
383 }
384 
385 static void md_submit_flush_data(struct work_struct *ws);
386 
387 static void submit_flushes(struct work_struct *ws)
388 {
389 	struct mddev *mddev = container_of(ws, struct mddev, flush_work);
390 	struct md_rdev *rdev;
391 
392 	INIT_WORK(&mddev->flush_work, md_submit_flush_data);
393 	atomic_set(&mddev->flush_pending, 1);
394 	rcu_read_lock();
395 	rdev_for_each_rcu(rdev, mddev)
396 		if (rdev->raid_disk >= 0 &&
397 		    !test_bit(Faulty, &rdev->flags)) {
398 			/* Take two references, one is dropped
399 			 * when request finishes, one after
400 			 * we reclaim rcu_read_lock
401 			 */
402 			struct bio *bi;
403 			atomic_inc(&rdev->nr_pending);
404 			atomic_inc(&rdev->nr_pending);
405 			rcu_read_unlock();
406 			bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
407 			bi->bi_end_io = md_end_flush;
408 			bi->bi_private = rdev;
409 			bi->bi_bdev = rdev->bdev;
410 			bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
411 			atomic_inc(&mddev->flush_pending);
412 			submit_bio(bi);
413 			rcu_read_lock();
414 			rdev_dec_pending(rdev, mddev);
415 		}
416 	rcu_read_unlock();
417 	if (atomic_dec_and_test(&mddev->flush_pending))
418 		queue_work(md_wq, &mddev->flush_work);
419 }
420 
421 static void md_submit_flush_data(struct work_struct *ws)
422 {
423 	struct mddev *mddev = container_of(ws, struct mddev, flush_work);
424 	struct bio *bio = mddev->flush_bio;
425 
426 	if (bio->bi_iter.bi_size == 0)
427 		/* an empty barrier - all done */
428 		bio_endio(bio);
429 	else {
430 		bio->bi_opf &= ~REQ_PREFLUSH;
431 		mddev->pers->make_request(mddev, bio);
432 	}
433 
434 	mddev->flush_bio = NULL;
435 	wake_up(&mddev->sb_wait);
436 }
437 
438 void md_flush_request(struct mddev *mddev, struct bio *bio)
439 {
440 	spin_lock_irq(&mddev->lock);
441 	wait_event_lock_irq(mddev->sb_wait,
442 			    !mddev->flush_bio,
443 			    mddev->lock);
444 	mddev->flush_bio = bio;
445 	spin_unlock_irq(&mddev->lock);
446 
447 	INIT_WORK(&mddev->flush_work, submit_flushes);
448 	queue_work(md_wq, &mddev->flush_work);
449 }
450 EXPORT_SYMBOL(md_flush_request);
451 
452 void md_unplug(struct blk_plug_cb *cb, bool from_schedule)
453 {
454 	struct mddev *mddev = cb->data;
455 	md_wakeup_thread(mddev->thread);
456 	kfree(cb);
457 }
458 EXPORT_SYMBOL(md_unplug);
459 
460 static inline struct mddev *mddev_get(struct mddev *mddev)
461 {
462 	atomic_inc(&mddev->active);
463 	return mddev;
464 }
465 
466 static void mddev_delayed_delete(struct work_struct *ws);
467 
468 static void mddev_put(struct mddev *mddev)
469 {
470 	struct bio_set *bs = NULL;
471 
472 	if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
473 		return;
474 	if (!mddev->raid_disks && list_empty(&mddev->disks) &&
475 	    mddev->ctime == 0 && !mddev->hold_active) {
476 		/* Array is not configured at all, and not held active,
477 		 * so destroy it */
478 		list_del_init(&mddev->all_mddevs);
479 		bs = mddev->bio_set;
480 		mddev->bio_set = NULL;
481 		if (mddev->gendisk) {
482 			/* We did a probe so need to clean up.  Call
483 			 * queue_work inside the spinlock so that
484 			 * flush_workqueue() after mddev_find will
485 			 * succeed in waiting for the work to be done.
486 			 */
487 			INIT_WORK(&mddev->del_work, mddev_delayed_delete);
488 			queue_work(md_misc_wq, &mddev->del_work);
489 		} else
490 			kfree(mddev);
491 	}
492 	spin_unlock(&all_mddevs_lock);
493 	if (bs)
494 		bioset_free(bs);
495 }
496 
497 static void md_safemode_timeout(unsigned long data);
498 
499 void mddev_init(struct mddev *mddev)
500 {
501 	mutex_init(&mddev->open_mutex);
502 	mutex_init(&mddev->reconfig_mutex);
503 	mutex_init(&mddev->bitmap_info.mutex);
504 	INIT_LIST_HEAD(&mddev->disks);
505 	INIT_LIST_HEAD(&mddev->all_mddevs);
506 	setup_timer(&mddev->safemode_timer, md_safemode_timeout,
507 		    (unsigned long) mddev);
508 	atomic_set(&mddev->active, 1);
509 	atomic_set(&mddev->openers, 0);
510 	atomic_set(&mddev->active_io, 0);
511 	spin_lock_init(&mddev->lock);
512 	atomic_set(&mddev->flush_pending, 0);
513 	init_waitqueue_head(&mddev->sb_wait);
514 	init_waitqueue_head(&mddev->recovery_wait);
515 	mddev->reshape_position = MaxSector;
516 	mddev->reshape_backwards = 0;
517 	mddev->last_sync_action = "none";
518 	mddev->resync_min = 0;
519 	mddev->resync_max = MaxSector;
520 	mddev->level = LEVEL_NONE;
521 }
522 EXPORT_SYMBOL_GPL(mddev_init);
523 
524 static struct mddev *mddev_find(dev_t unit)
525 {
526 	struct mddev *mddev, *new = NULL;
527 
528 	if (unit && MAJOR(unit) != MD_MAJOR)
529 		unit &= ~((1<<MdpMinorShift)-1);
530 
531  retry:
532 	spin_lock(&all_mddevs_lock);
533 
534 	if (unit) {
535 		list_for_each_entry(mddev, &all_mddevs, all_mddevs)
536 			if (mddev->unit == unit) {
537 				mddev_get(mddev);
538 				spin_unlock(&all_mddevs_lock);
539 				kfree(new);
540 				return mddev;
541 			}
542 
543 		if (new) {
544 			list_add(&new->all_mddevs, &all_mddevs);
545 			spin_unlock(&all_mddevs_lock);
546 			new->hold_active = UNTIL_IOCTL;
547 			return new;
548 		}
549 	} else if (new) {
550 		/* find an unused unit number */
551 		static int next_minor = 512;
552 		int start = next_minor;
553 		int is_free = 0;
554 		int dev = 0;
555 		while (!is_free) {
556 			dev = MKDEV(MD_MAJOR, next_minor);
557 			next_minor++;
558 			if (next_minor > MINORMASK)
559 				next_minor = 0;
560 			if (next_minor == start) {
561 				/* Oh dear, all in use. */
562 				spin_unlock(&all_mddevs_lock);
563 				kfree(new);
564 				return NULL;
565 			}
566 
567 			is_free = 1;
568 			list_for_each_entry(mddev, &all_mddevs, all_mddevs)
569 				if (mddev->unit == dev) {
570 					is_free = 0;
571 					break;
572 				}
573 		}
574 		new->unit = dev;
575 		new->md_minor = MINOR(dev);
576 		new->hold_active = UNTIL_STOP;
577 		list_add(&new->all_mddevs, &all_mddevs);
578 		spin_unlock(&all_mddevs_lock);
579 		return new;
580 	}
581 	spin_unlock(&all_mddevs_lock);
582 
583 	new = kzalloc(sizeof(*new), GFP_KERNEL);
584 	if (!new)
585 		return NULL;
586 
587 	new->unit = unit;
588 	if (MAJOR(unit) == MD_MAJOR)
589 		new->md_minor = MINOR(unit);
590 	else
591 		new->md_minor = MINOR(unit) >> MdpMinorShift;
592 
593 	mddev_init(new);
594 
595 	goto retry;
596 }
597 
598 static struct attribute_group md_redundancy_group;
599 
600 void mddev_unlock(struct mddev *mddev)
601 {
602 	if (mddev->to_remove) {
603 		/* These cannot be removed under reconfig_mutex as
604 		 * an access to the files will try to take reconfig_mutex
605 		 * while holding the file unremovable, which leads to
606 		 * a deadlock.
607 		 * So hold set sysfs_active while the remove in happeing,
608 		 * and anything else which might set ->to_remove or my
609 		 * otherwise change the sysfs namespace will fail with
610 		 * -EBUSY if sysfs_active is still set.
611 		 * We set sysfs_active under reconfig_mutex and elsewhere
612 		 * test it under the same mutex to ensure its correct value
613 		 * is seen.
614 		 */
615 		struct attribute_group *to_remove = mddev->to_remove;
616 		mddev->to_remove = NULL;
617 		mddev->sysfs_active = 1;
618 		mutex_unlock(&mddev->reconfig_mutex);
619 
620 		if (mddev->kobj.sd) {
621 			if (to_remove != &md_redundancy_group)
622 				sysfs_remove_group(&mddev->kobj, to_remove);
623 			if (mddev->pers == NULL ||
624 			    mddev->pers->sync_request == NULL) {
625 				sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
626 				if (mddev->sysfs_action)
627 					sysfs_put(mddev->sysfs_action);
628 				mddev->sysfs_action = NULL;
629 			}
630 		}
631 		mddev->sysfs_active = 0;
632 	} else
633 		mutex_unlock(&mddev->reconfig_mutex);
634 
635 	/* As we've dropped the mutex we need a spinlock to
636 	 * make sure the thread doesn't disappear
637 	 */
638 	spin_lock(&pers_lock);
639 	md_wakeup_thread(mddev->thread);
640 	spin_unlock(&pers_lock);
641 }
642 EXPORT_SYMBOL_GPL(mddev_unlock);
643 
644 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr)
645 {
646 	struct md_rdev *rdev;
647 
648 	rdev_for_each_rcu(rdev, mddev)
649 		if (rdev->desc_nr == nr)
650 			return rdev;
651 
652 	return NULL;
653 }
654 EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu);
655 
656 static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
657 {
658 	struct md_rdev *rdev;
659 
660 	rdev_for_each(rdev, mddev)
661 		if (rdev->bdev->bd_dev == dev)
662 			return rdev;
663 
664 	return NULL;
665 }
666 
667 static struct md_rdev *find_rdev_rcu(struct mddev *mddev, dev_t dev)
668 {
669 	struct md_rdev *rdev;
670 
671 	rdev_for_each_rcu(rdev, mddev)
672 		if (rdev->bdev->bd_dev == dev)
673 			return rdev;
674 
675 	return NULL;
676 }
677 
678 static struct md_personality *find_pers(int level, char *clevel)
679 {
680 	struct md_personality *pers;
681 	list_for_each_entry(pers, &pers_list, list) {
682 		if (level != LEVEL_NONE && pers->level == level)
683 			return pers;
684 		if (strcmp(pers->name, clevel)==0)
685 			return pers;
686 	}
687 	return NULL;
688 }
689 
690 /* return the offset of the super block in 512byte sectors */
691 static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
692 {
693 	sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512;
694 	return MD_NEW_SIZE_SECTORS(num_sectors);
695 }
696 
697 static int alloc_disk_sb(struct md_rdev *rdev)
698 {
699 	rdev->sb_page = alloc_page(GFP_KERNEL);
700 	if (!rdev->sb_page)
701 		return -ENOMEM;
702 	return 0;
703 }
704 
705 void md_rdev_clear(struct md_rdev *rdev)
706 {
707 	if (rdev->sb_page) {
708 		put_page(rdev->sb_page);
709 		rdev->sb_loaded = 0;
710 		rdev->sb_page = NULL;
711 		rdev->sb_start = 0;
712 		rdev->sectors = 0;
713 	}
714 	if (rdev->bb_page) {
715 		put_page(rdev->bb_page);
716 		rdev->bb_page = NULL;
717 	}
718 	badblocks_exit(&rdev->badblocks);
719 }
720 EXPORT_SYMBOL_GPL(md_rdev_clear);
721 
722 static void super_written(struct bio *bio)
723 {
724 	struct md_rdev *rdev = bio->bi_private;
725 	struct mddev *mddev = rdev->mddev;
726 
727 	if (bio->bi_error) {
728 		pr_err("md: super_written gets error=%d\n", bio->bi_error);
729 		md_error(mddev, rdev);
730 		if (!test_bit(Faulty, &rdev->flags)
731 		    && (bio->bi_opf & MD_FAILFAST)) {
732 			set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags);
733 			set_bit(LastDev, &rdev->flags);
734 		}
735 	} else
736 		clear_bit(LastDev, &rdev->flags);
737 
738 	if (atomic_dec_and_test(&mddev->pending_writes))
739 		wake_up(&mddev->sb_wait);
740 	rdev_dec_pending(rdev, mddev);
741 	bio_put(bio);
742 }
743 
744 void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
745 		   sector_t sector, int size, struct page *page)
746 {
747 	/* write first size bytes of page to sector of rdev
748 	 * Increment mddev->pending_writes before returning
749 	 * and decrement it on completion, waking up sb_wait
750 	 * if zero is reached.
751 	 * If an error occurred, call md_error
752 	 */
753 	struct bio *bio;
754 	int ff = 0;
755 
756 	if (test_bit(Faulty, &rdev->flags))
757 		return;
758 
759 	bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
760 
761 	atomic_inc(&rdev->nr_pending);
762 
763 	bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
764 	bio->bi_iter.bi_sector = sector;
765 	bio_add_page(bio, page, size, 0);
766 	bio->bi_private = rdev;
767 	bio->bi_end_io = super_written;
768 
769 	if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) &&
770 	    test_bit(FailFast, &rdev->flags) &&
771 	    !test_bit(LastDev, &rdev->flags))
772 		ff = MD_FAILFAST;
773 	bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA | ff;
774 
775 	atomic_inc(&mddev->pending_writes);
776 	submit_bio(bio);
777 }
778 
779 int md_super_wait(struct mddev *mddev)
780 {
781 	/* wait for all superblock writes that were scheduled to complete */
782 	wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
783 	if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags))
784 		return -EAGAIN;
785 	return 0;
786 }
787 
788 int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
789 		 struct page *page, int op, int op_flags, bool metadata_op)
790 {
791 	struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
792 	int ret;
793 
794 	bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
795 		rdev->meta_bdev : rdev->bdev;
796 	bio_set_op_attrs(bio, op, op_flags);
797 	if (metadata_op)
798 		bio->bi_iter.bi_sector = sector + rdev->sb_start;
799 	else if (rdev->mddev->reshape_position != MaxSector &&
800 		 (rdev->mddev->reshape_backwards ==
801 		  (sector >= rdev->mddev->reshape_position)))
802 		bio->bi_iter.bi_sector = sector + rdev->new_data_offset;
803 	else
804 		bio->bi_iter.bi_sector = sector + rdev->data_offset;
805 	bio_add_page(bio, page, size, 0);
806 
807 	submit_bio_wait(bio);
808 
809 	ret = !bio->bi_error;
810 	bio_put(bio);
811 	return ret;
812 }
813 EXPORT_SYMBOL_GPL(sync_page_io);
814 
815 static int read_disk_sb(struct md_rdev *rdev, int size)
816 {
817 	char b[BDEVNAME_SIZE];
818 
819 	if (rdev->sb_loaded)
820 		return 0;
821 
822 	if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true))
823 		goto fail;
824 	rdev->sb_loaded = 1;
825 	return 0;
826 
827 fail:
828 	pr_err("md: disabled device %s, could not read superblock.\n",
829 	       bdevname(rdev->bdev,b));
830 	return -EINVAL;
831 }
832 
833 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
834 {
835 	return	sb1->set_uuid0 == sb2->set_uuid0 &&
836 		sb1->set_uuid1 == sb2->set_uuid1 &&
837 		sb1->set_uuid2 == sb2->set_uuid2 &&
838 		sb1->set_uuid3 == sb2->set_uuid3;
839 }
840 
841 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
842 {
843 	int ret;
844 	mdp_super_t *tmp1, *tmp2;
845 
846 	tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
847 	tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
848 
849 	if (!tmp1 || !tmp2) {
850 		ret = 0;
851 		goto abort;
852 	}
853 
854 	*tmp1 = *sb1;
855 	*tmp2 = *sb2;
856 
857 	/*
858 	 * nr_disks is not constant
859 	 */
860 	tmp1->nr_disks = 0;
861 	tmp2->nr_disks = 0;
862 
863 	ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
864 abort:
865 	kfree(tmp1);
866 	kfree(tmp2);
867 	return ret;
868 }
869 
870 static u32 md_csum_fold(u32 csum)
871 {
872 	csum = (csum & 0xffff) + (csum >> 16);
873 	return (csum & 0xffff) + (csum >> 16);
874 }
875 
876 static unsigned int calc_sb_csum(mdp_super_t *sb)
877 {
878 	u64 newcsum = 0;
879 	u32 *sb32 = (u32*)sb;
880 	int i;
881 	unsigned int disk_csum, csum;
882 
883 	disk_csum = sb->sb_csum;
884 	sb->sb_csum = 0;
885 
886 	for (i = 0; i < MD_SB_BYTES/4 ; i++)
887 		newcsum += sb32[i];
888 	csum = (newcsum & 0xffffffff) + (newcsum>>32);
889 
890 #ifdef CONFIG_ALPHA
891 	/* This used to use csum_partial, which was wrong for several
892 	 * reasons including that different results are returned on
893 	 * different architectures.  It isn't critical that we get exactly
894 	 * the same return value as before (we always csum_fold before
895 	 * testing, and that removes any differences).  However as we
896 	 * know that csum_partial always returned a 16bit value on
897 	 * alphas, do a fold to maximise conformity to previous behaviour.
898 	 */
899 	sb->sb_csum = md_csum_fold(disk_csum);
900 #else
901 	sb->sb_csum = disk_csum;
902 #endif
903 	return csum;
904 }
905 
906 /*
907  * Handle superblock details.
908  * We want to be able to handle multiple superblock formats
909  * so we have a common interface to them all, and an array of
910  * different handlers.
911  * We rely on user-space to write the initial superblock, and support
912  * reading and updating of superblocks.
913  * Interface methods are:
914  *   int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version)
915  *      loads and validates a superblock on dev.
916  *      if refdev != NULL, compare superblocks on both devices
917  *    Return:
918  *      0 - dev has a superblock that is compatible with refdev
919  *      1 - dev has a superblock that is compatible and newer than refdev
920  *          so dev should be used as the refdev in future
921  *     -EINVAL superblock incompatible or invalid
922  *     -othererror e.g. -EIO
923  *
924  *   int validate_super(struct mddev *mddev, struct md_rdev *dev)
925  *      Verify that dev is acceptable into mddev.
926  *       The first time, mddev->raid_disks will be 0, and data from
927  *       dev should be merged in.  Subsequent calls check that dev
928  *       is new enough.  Return 0 or -EINVAL
929  *
930  *   void sync_super(struct mddev *mddev, struct md_rdev *dev)
931  *     Update the superblock for rdev with data in mddev
932  *     This does not write to disc.
933  *
934  */
935 
936 struct super_type  {
937 	char		    *name;
938 	struct module	    *owner;
939 	int		    (*load_super)(struct md_rdev *rdev,
940 					  struct md_rdev *refdev,
941 					  int minor_version);
942 	int		    (*validate_super)(struct mddev *mddev,
943 					      struct md_rdev *rdev);
944 	void		    (*sync_super)(struct mddev *mddev,
945 					  struct md_rdev *rdev);
946 	unsigned long long  (*rdev_size_change)(struct md_rdev *rdev,
947 						sector_t num_sectors);
948 	int		    (*allow_new_offset)(struct md_rdev *rdev,
949 						unsigned long long new_offset);
950 };
951 
952 /*
953  * Check that the given mddev has no bitmap.
954  *
955  * This function is called from the run method of all personalities that do not
956  * support bitmaps. It prints an error message and returns non-zero if mddev
957  * has a bitmap. Otherwise, it returns 0.
958  *
959  */
960 int md_check_no_bitmap(struct mddev *mddev)
961 {
962 	if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
963 		return 0;
964 	pr_warn("%s: bitmaps are not supported for %s\n",
965 		mdname(mddev), mddev->pers->name);
966 	return 1;
967 }
968 EXPORT_SYMBOL(md_check_no_bitmap);
969 
970 /*
971  * load_super for 0.90.0
972  */
973 static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
974 {
975 	char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
976 	mdp_super_t *sb;
977 	int ret;
978 
979 	/*
980 	 * Calculate the position of the superblock (512byte sectors),
981 	 * it's at the end of the disk.
982 	 *
983 	 * It also happens to be a multiple of 4Kb.
984 	 */
985 	rdev->sb_start = calc_dev_sboffset(rdev);
986 
987 	ret = read_disk_sb(rdev, MD_SB_BYTES);
988 	if (ret)
989 		return ret;
990 
991 	ret = -EINVAL;
992 
993 	bdevname(rdev->bdev, b);
994 	sb = page_address(rdev->sb_page);
995 
996 	if (sb->md_magic != MD_SB_MAGIC) {
997 		pr_warn("md: invalid raid superblock magic on %s\n", b);
998 		goto abort;
999 	}
1000 
1001 	if (sb->major_version != 0 ||
1002 	    sb->minor_version < 90 ||
1003 	    sb->minor_version > 91) {
1004 		pr_warn("Bad version number %d.%d on %s\n",
1005 			sb->major_version, sb->minor_version, b);
1006 		goto abort;
1007 	}
1008 
1009 	if (sb->raid_disks <= 0)
1010 		goto abort;
1011 
1012 	if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
1013 		pr_warn("md: invalid superblock checksum on %s\n", b);
1014 		goto abort;
1015 	}
1016 
1017 	rdev->preferred_minor = sb->md_minor;
1018 	rdev->data_offset = 0;
1019 	rdev->new_data_offset = 0;
1020 	rdev->sb_size = MD_SB_BYTES;
1021 	rdev->badblocks.shift = -1;
1022 
1023 	if (sb->level == LEVEL_MULTIPATH)
1024 		rdev->desc_nr = -1;
1025 	else
1026 		rdev->desc_nr = sb->this_disk.number;
1027 
1028 	if (!refdev) {
1029 		ret = 1;
1030 	} else {
1031 		__u64 ev1, ev2;
1032 		mdp_super_t *refsb = page_address(refdev->sb_page);
1033 		if (!uuid_equal(refsb, sb)) {
1034 			pr_warn("md: %s has different UUID to %s\n",
1035 				b, bdevname(refdev->bdev,b2));
1036 			goto abort;
1037 		}
1038 		if (!sb_equal(refsb, sb)) {
1039 			pr_warn("md: %s has same UUID but different superblock to %s\n",
1040 				b, bdevname(refdev->bdev, b2));
1041 			goto abort;
1042 		}
1043 		ev1 = md_event(sb);
1044 		ev2 = md_event(refsb);
1045 		if (ev1 > ev2)
1046 			ret = 1;
1047 		else
1048 			ret = 0;
1049 	}
1050 	rdev->sectors = rdev->sb_start;
1051 	/* Limit to 4TB as metadata cannot record more than that.
1052 	 * (not needed for Linear and RAID0 as metadata doesn't
1053 	 * record this size)
1054 	 */
1055 	if (IS_ENABLED(CONFIG_LBDAF) && (u64)rdev->sectors >= (2ULL << 32) &&
1056 	    sb->level >= 1)
1057 		rdev->sectors = (sector_t)(2ULL << 32) - 2;
1058 
1059 	if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
1060 		/* "this cannot possibly happen" ... */
1061 		ret = -EINVAL;
1062 
1063  abort:
1064 	return ret;
1065 }
1066 
1067 /*
1068  * validate_super for 0.90.0
1069  */
1070 static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
1071 {
1072 	mdp_disk_t *desc;
1073 	mdp_super_t *sb = page_address(rdev->sb_page);
1074 	__u64 ev1 = md_event(sb);
1075 
1076 	rdev->raid_disk = -1;
1077 	clear_bit(Faulty, &rdev->flags);
1078 	clear_bit(In_sync, &rdev->flags);
1079 	clear_bit(Bitmap_sync, &rdev->flags);
1080 	clear_bit(WriteMostly, &rdev->flags);
1081 
1082 	if (mddev->raid_disks == 0) {
1083 		mddev->major_version = 0;
1084 		mddev->minor_version = sb->minor_version;
1085 		mddev->patch_version = sb->patch_version;
1086 		mddev->external = 0;
1087 		mddev->chunk_sectors = sb->chunk_size >> 9;
1088 		mddev->ctime = sb->ctime;
1089 		mddev->utime = sb->utime;
1090 		mddev->level = sb->level;
1091 		mddev->clevel[0] = 0;
1092 		mddev->layout = sb->layout;
1093 		mddev->raid_disks = sb->raid_disks;
1094 		mddev->dev_sectors = ((sector_t)sb->size) * 2;
1095 		mddev->events = ev1;
1096 		mddev->bitmap_info.offset = 0;
1097 		mddev->bitmap_info.space = 0;
1098 		/* bitmap can use 60 K after the 4K superblocks */
1099 		mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
1100 		mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
1101 		mddev->reshape_backwards = 0;
1102 
1103 		if (mddev->minor_version >= 91) {
1104 			mddev->reshape_position = sb->reshape_position;
1105 			mddev->delta_disks = sb->delta_disks;
1106 			mddev->new_level = sb->new_level;
1107 			mddev->new_layout = sb->new_layout;
1108 			mddev->new_chunk_sectors = sb->new_chunk >> 9;
1109 			if (mddev->delta_disks < 0)
1110 				mddev->reshape_backwards = 1;
1111 		} else {
1112 			mddev->reshape_position = MaxSector;
1113 			mddev->delta_disks = 0;
1114 			mddev->new_level = mddev->level;
1115 			mddev->new_layout = mddev->layout;
1116 			mddev->new_chunk_sectors = mddev->chunk_sectors;
1117 		}
1118 
1119 		if (sb->state & (1<<MD_SB_CLEAN))
1120 			mddev->recovery_cp = MaxSector;
1121 		else {
1122 			if (sb->events_hi == sb->cp_events_hi &&
1123 				sb->events_lo == sb->cp_events_lo) {
1124 				mddev->recovery_cp = sb->recovery_cp;
1125 			} else
1126 				mddev->recovery_cp = 0;
1127 		}
1128 
1129 		memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
1130 		memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
1131 		memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
1132 		memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
1133 
1134 		mddev->max_disks = MD_SB_DISKS;
1135 
1136 		if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
1137 		    mddev->bitmap_info.file == NULL) {
1138 			mddev->bitmap_info.offset =
1139 				mddev->bitmap_info.default_offset;
1140 			mddev->bitmap_info.space =
1141 				mddev->bitmap_info.default_space;
1142 		}
1143 
1144 	} else if (mddev->pers == NULL) {
1145 		/* Insist on good event counter while assembling, except
1146 		 * for spares (which don't need an event count) */
1147 		++ev1;
1148 		if (sb->disks[rdev->desc_nr].state & (
1149 			    (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
1150 			if (ev1 < mddev->events)
1151 				return -EINVAL;
1152 	} else if (mddev->bitmap) {
1153 		/* if adding to array with a bitmap, then we can accept an
1154 		 * older device ... but not too old.
1155 		 */
1156 		if (ev1 < mddev->bitmap->events_cleared)
1157 			return 0;
1158 		if (ev1 < mddev->events)
1159 			set_bit(Bitmap_sync, &rdev->flags);
1160 	} else {
1161 		if (ev1 < mddev->events)
1162 			/* just a hot-add of a new device, leave raid_disk at -1 */
1163 			return 0;
1164 	}
1165 
1166 	if (mddev->level != LEVEL_MULTIPATH) {
1167 		desc = sb->disks + rdev->desc_nr;
1168 
1169 		if (desc->state & (1<<MD_DISK_FAULTY))
1170 			set_bit(Faulty, &rdev->flags);
1171 		else if (desc->state & (1<<MD_DISK_SYNC) /* &&
1172 			    desc->raid_disk < mddev->raid_disks */) {
1173 			set_bit(In_sync, &rdev->flags);
1174 			rdev->raid_disk = desc->raid_disk;
1175 			rdev->saved_raid_disk = desc->raid_disk;
1176 		} else if (desc->state & (1<<MD_DISK_ACTIVE)) {
1177 			/* active but not in sync implies recovery up to
1178 			 * reshape position.  We don't know exactly where
1179 			 * that is, so set to zero for now */
1180 			if (mddev->minor_version >= 91) {
1181 				rdev->recovery_offset = 0;
1182 				rdev->raid_disk = desc->raid_disk;
1183 			}
1184 		}
1185 		if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
1186 			set_bit(WriteMostly, &rdev->flags);
1187 		if (desc->state & (1<<MD_DISK_FAILFAST))
1188 			set_bit(FailFast, &rdev->flags);
1189 	} else /* MULTIPATH are always insync */
1190 		set_bit(In_sync, &rdev->flags);
1191 	return 0;
1192 }
1193 
1194 /*
1195  * sync_super for 0.90.0
1196  */
1197 static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
1198 {
1199 	mdp_super_t *sb;
1200 	struct md_rdev *rdev2;
1201 	int next_spare = mddev->raid_disks;
1202 
1203 	/* make rdev->sb match mddev data..
1204 	 *
1205 	 * 1/ zero out disks
1206 	 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
1207 	 * 3/ any empty disks < next_spare become removed
1208 	 *
1209 	 * disks[0] gets initialised to REMOVED because
1210 	 * we cannot be sure from other fields if it has
1211 	 * been initialised or not.
1212 	 */
1213 	int i;
1214 	int active=0, working=0,failed=0,spare=0,nr_disks=0;
1215 
1216 	rdev->sb_size = MD_SB_BYTES;
1217 
1218 	sb = page_address(rdev->sb_page);
1219 
1220 	memset(sb, 0, sizeof(*sb));
1221 
1222 	sb->md_magic = MD_SB_MAGIC;
1223 	sb->major_version = mddev->major_version;
1224 	sb->patch_version = mddev->patch_version;
1225 	sb->gvalid_words  = 0; /* ignored */
1226 	memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
1227 	memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
1228 	memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
1229 	memcpy(&sb->set_uuid3, mddev->uuid+12,4);
1230 
1231 	sb->ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
1232 	sb->level = mddev->level;
1233 	sb->size = mddev->dev_sectors / 2;
1234 	sb->raid_disks = mddev->raid_disks;
1235 	sb->md_minor = mddev->md_minor;
1236 	sb->not_persistent = 0;
1237 	sb->utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
1238 	sb->state = 0;
1239 	sb->events_hi = (mddev->events>>32);
1240 	sb->events_lo = (u32)mddev->events;
1241 
1242 	if (mddev->reshape_position == MaxSector)
1243 		sb->minor_version = 90;
1244 	else {
1245 		sb->minor_version = 91;
1246 		sb->reshape_position = mddev->reshape_position;
1247 		sb->new_level = mddev->new_level;
1248 		sb->delta_disks = mddev->delta_disks;
1249 		sb->new_layout = mddev->new_layout;
1250 		sb->new_chunk = mddev->new_chunk_sectors << 9;
1251 	}
1252 	mddev->minor_version = sb->minor_version;
1253 	if (mddev->in_sync)
1254 	{
1255 		sb->recovery_cp = mddev->recovery_cp;
1256 		sb->cp_events_hi = (mddev->events>>32);
1257 		sb->cp_events_lo = (u32)mddev->events;
1258 		if (mddev->recovery_cp == MaxSector)
1259 			sb->state = (1<< MD_SB_CLEAN);
1260 	} else
1261 		sb->recovery_cp = 0;
1262 
1263 	sb->layout = mddev->layout;
1264 	sb->chunk_size = mddev->chunk_sectors << 9;
1265 
1266 	if (mddev->bitmap && mddev->bitmap_info.file == NULL)
1267 		sb->state |= (1<<MD_SB_BITMAP_PRESENT);
1268 
1269 	sb->disks[0].state = (1<<MD_DISK_REMOVED);
1270 	rdev_for_each(rdev2, mddev) {
1271 		mdp_disk_t *d;
1272 		int desc_nr;
1273 		int is_active = test_bit(In_sync, &rdev2->flags);
1274 
1275 		if (rdev2->raid_disk >= 0 &&
1276 		    sb->minor_version >= 91)
1277 			/* we have nowhere to store the recovery_offset,
1278 			 * but if it is not below the reshape_position,
1279 			 * we can piggy-back on that.
1280 			 */
1281 			is_active = 1;
1282 		if (rdev2->raid_disk < 0 ||
1283 		    test_bit(Faulty, &rdev2->flags))
1284 			is_active = 0;
1285 		if (is_active)
1286 			desc_nr = rdev2->raid_disk;
1287 		else
1288 			desc_nr = next_spare++;
1289 		rdev2->desc_nr = desc_nr;
1290 		d = &sb->disks[rdev2->desc_nr];
1291 		nr_disks++;
1292 		d->number = rdev2->desc_nr;
1293 		d->major = MAJOR(rdev2->bdev->bd_dev);
1294 		d->minor = MINOR(rdev2->bdev->bd_dev);
1295 		if (is_active)
1296 			d->raid_disk = rdev2->raid_disk;
1297 		else
1298 			d->raid_disk = rdev2->desc_nr; /* compatibility */
1299 		if (test_bit(Faulty, &rdev2->flags))
1300 			d->state = (1<<MD_DISK_FAULTY);
1301 		else if (is_active) {
1302 			d->state = (1<<MD_DISK_ACTIVE);
1303 			if (test_bit(In_sync, &rdev2->flags))
1304 				d->state |= (1<<MD_DISK_SYNC);
1305 			active++;
1306 			working++;
1307 		} else {
1308 			d->state = 0;
1309 			spare++;
1310 			working++;
1311 		}
1312 		if (test_bit(WriteMostly, &rdev2->flags))
1313 			d->state |= (1<<MD_DISK_WRITEMOSTLY);
1314 		if (test_bit(FailFast, &rdev2->flags))
1315 			d->state |= (1<<MD_DISK_FAILFAST);
1316 	}
1317 	/* now set the "removed" and "faulty" bits on any missing devices */
1318 	for (i=0 ; i < mddev->raid_disks ; i++) {
1319 		mdp_disk_t *d = &sb->disks[i];
1320 		if (d->state == 0 && d->number == 0) {
1321 			d->number = i;
1322 			d->raid_disk = i;
1323 			d->state = (1<<MD_DISK_REMOVED);
1324 			d->state |= (1<<MD_DISK_FAULTY);
1325 			failed++;
1326 		}
1327 	}
1328 	sb->nr_disks = nr_disks;
1329 	sb->active_disks = active;
1330 	sb->working_disks = working;
1331 	sb->failed_disks = failed;
1332 	sb->spare_disks = spare;
1333 
1334 	sb->this_disk = sb->disks[rdev->desc_nr];
1335 	sb->sb_csum = calc_sb_csum(sb);
1336 }
1337 
1338 /*
1339  * rdev_size_change for 0.90.0
1340  */
1341 static unsigned long long
1342 super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1343 {
1344 	if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1345 		return 0; /* component must fit device */
1346 	if (rdev->mddev->bitmap_info.offset)
1347 		return 0; /* can't move bitmap */
1348 	rdev->sb_start = calc_dev_sboffset(rdev);
1349 	if (!num_sectors || num_sectors > rdev->sb_start)
1350 		num_sectors = rdev->sb_start;
1351 	/* Limit to 4TB as metadata cannot record more than that.
1352 	 * 4TB == 2^32 KB, or 2*2^32 sectors.
1353 	 */
1354 	if (IS_ENABLED(CONFIG_LBDAF) && (u64)num_sectors >= (2ULL << 32) &&
1355 	    rdev->mddev->level >= 1)
1356 		num_sectors = (sector_t)(2ULL << 32) - 2;
1357 	do {
1358 		md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1359 		       rdev->sb_page);
1360 	} while (md_super_wait(rdev->mddev) < 0);
1361 	return num_sectors;
1362 }
1363 
1364 static int
1365 super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset)
1366 {
1367 	/* non-zero offset changes not possible with v0.90 */
1368 	return new_offset == 0;
1369 }
1370 
1371 /*
1372  * version 1 superblock
1373  */
1374 
1375 static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb)
1376 {
1377 	__le32 disk_csum;
1378 	u32 csum;
1379 	unsigned long long newcsum;
1380 	int size = 256 + le32_to_cpu(sb->max_dev)*2;
1381 	__le32 *isuper = (__le32*)sb;
1382 
1383 	disk_csum = sb->sb_csum;
1384 	sb->sb_csum = 0;
1385 	newcsum = 0;
1386 	for (; size >= 4; size -= 4)
1387 		newcsum += le32_to_cpu(*isuper++);
1388 
1389 	if (size == 2)
1390 		newcsum += le16_to_cpu(*(__le16*) isuper);
1391 
1392 	csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1393 	sb->sb_csum = disk_csum;
1394 	return cpu_to_le32(csum);
1395 }
1396 
1397 static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
1398 {
1399 	struct mdp_superblock_1 *sb;
1400 	int ret;
1401 	sector_t sb_start;
1402 	sector_t sectors;
1403 	char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1404 	int bmask;
1405 
1406 	/*
1407 	 * Calculate the position of the superblock in 512byte sectors.
1408 	 * It is always aligned to a 4K boundary and
1409 	 * depeding on minor_version, it can be:
1410 	 * 0: At least 8K, but less than 12K, from end of device
1411 	 * 1: At start of device
1412 	 * 2: 4K from start of device.
1413 	 */
1414 	switch(minor_version) {
1415 	case 0:
1416 		sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
1417 		sb_start -= 8*2;
1418 		sb_start &= ~(sector_t)(4*2-1);
1419 		break;
1420 	case 1:
1421 		sb_start = 0;
1422 		break;
1423 	case 2:
1424 		sb_start = 8;
1425 		break;
1426 	default:
1427 		return -EINVAL;
1428 	}
1429 	rdev->sb_start = sb_start;
1430 
1431 	/* superblock is rarely larger than 1K, but it can be larger,
1432 	 * and it is safe to read 4k, so we do that
1433 	 */
1434 	ret = read_disk_sb(rdev, 4096);
1435 	if (ret) return ret;
1436 
1437 	sb = page_address(rdev->sb_page);
1438 
1439 	if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1440 	    sb->major_version != cpu_to_le32(1) ||
1441 	    le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1442 	    le64_to_cpu(sb->super_offset) != rdev->sb_start ||
1443 	    (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1444 		return -EINVAL;
1445 
1446 	if (calc_sb_1_csum(sb) != sb->sb_csum) {
1447 		pr_warn("md: invalid superblock checksum on %s\n",
1448 			bdevname(rdev->bdev,b));
1449 		return -EINVAL;
1450 	}
1451 	if (le64_to_cpu(sb->data_size) < 10) {
1452 		pr_warn("md: data_size too small on %s\n",
1453 			bdevname(rdev->bdev,b));
1454 		return -EINVAL;
1455 	}
1456 	if (sb->pad0 ||
1457 	    sb->pad3[0] ||
1458 	    memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1])))
1459 		/* Some padding is non-zero, might be a new feature */
1460 		return -EINVAL;
1461 
1462 	rdev->preferred_minor = 0xffff;
1463 	rdev->data_offset = le64_to_cpu(sb->data_offset);
1464 	rdev->new_data_offset = rdev->data_offset;
1465 	if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
1466 	    (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
1467 		rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
1468 	atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1469 
1470 	rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1471 	bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1472 	if (rdev->sb_size & bmask)
1473 		rdev->sb_size = (rdev->sb_size | bmask) + 1;
1474 
1475 	if (minor_version
1476 	    && rdev->data_offset < sb_start + (rdev->sb_size/512))
1477 		return -EINVAL;
1478 	if (minor_version
1479 	    && rdev->new_data_offset < sb_start + (rdev->sb_size/512))
1480 		return -EINVAL;
1481 
1482 	if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1483 		rdev->desc_nr = -1;
1484 	else
1485 		rdev->desc_nr = le32_to_cpu(sb->dev_number);
1486 
1487 	if (!rdev->bb_page) {
1488 		rdev->bb_page = alloc_page(GFP_KERNEL);
1489 		if (!rdev->bb_page)
1490 			return -ENOMEM;
1491 	}
1492 	if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
1493 	    rdev->badblocks.count == 0) {
1494 		/* need to load the bad block list.
1495 		 * Currently we limit it to one page.
1496 		 */
1497 		s32 offset;
1498 		sector_t bb_sector;
1499 		u64 *bbp;
1500 		int i;
1501 		int sectors = le16_to_cpu(sb->bblog_size);
1502 		if (sectors > (PAGE_SIZE / 512))
1503 			return -EINVAL;
1504 		offset = le32_to_cpu(sb->bblog_offset);
1505 		if (offset == 0)
1506 			return -EINVAL;
1507 		bb_sector = (long long)offset;
1508 		if (!sync_page_io(rdev, bb_sector, sectors << 9,
1509 				  rdev->bb_page, REQ_OP_READ, 0, true))
1510 			return -EIO;
1511 		bbp = (u64 *)page_address(rdev->bb_page);
1512 		rdev->badblocks.shift = sb->bblog_shift;
1513 		for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
1514 			u64 bb = le64_to_cpu(*bbp);
1515 			int count = bb & (0x3ff);
1516 			u64 sector = bb >> 10;
1517 			sector <<= sb->bblog_shift;
1518 			count <<= sb->bblog_shift;
1519 			if (bb + 1 == 0)
1520 				break;
1521 			if (badblocks_set(&rdev->badblocks, sector, count, 1))
1522 				return -EINVAL;
1523 		}
1524 	} else if (sb->bblog_offset != 0)
1525 		rdev->badblocks.shift = 0;
1526 
1527 	if (!refdev) {
1528 		ret = 1;
1529 	} else {
1530 		__u64 ev1, ev2;
1531 		struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
1532 
1533 		if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1534 		    sb->level != refsb->level ||
1535 		    sb->layout != refsb->layout ||
1536 		    sb->chunksize != refsb->chunksize) {
1537 			pr_warn("md: %s has strangely different superblock to %s\n",
1538 				bdevname(rdev->bdev,b),
1539 				bdevname(refdev->bdev,b2));
1540 			return -EINVAL;
1541 		}
1542 		ev1 = le64_to_cpu(sb->events);
1543 		ev2 = le64_to_cpu(refsb->events);
1544 
1545 		if (ev1 > ev2)
1546 			ret = 1;
1547 		else
1548 			ret = 0;
1549 	}
1550 	if (minor_version) {
1551 		sectors = (i_size_read(rdev->bdev->bd_inode) >> 9);
1552 		sectors -= rdev->data_offset;
1553 	} else
1554 		sectors = rdev->sb_start;
1555 	if (sectors < le64_to_cpu(sb->data_size))
1556 		return -EINVAL;
1557 	rdev->sectors = le64_to_cpu(sb->data_size);
1558 	return ret;
1559 }
1560 
1561 static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
1562 {
1563 	struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
1564 	__u64 ev1 = le64_to_cpu(sb->events);
1565 
1566 	rdev->raid_disk = -1;
1567 	clear_bit(Faulty, &rdev->flags);
1568 	clear_bit(In_sync, &rdev->flags);
1569 	clear_bit(Bitmap_sync, &rdev->flags);
1570 	clear_bit(WriteMostly, &rdev->flags);
1571 
1572 	if (mddev->raid_disks == 0) {
1573 		mddev->major_version = 1;
1574 		mddev->patch_version = 0;
1575 		mddev->external = 0;
1576 		mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
1577 		mddev->ctime = le64_to_cpu(sb->ctime);
1578 		mddev->utime = le64_to_cpu(sb->utime);
1579 		mddev->level = le32_to_cpu(sb->level);
1580 		mddev->clevel[0] = 0;
1581 		mddev->layout = le32_to_cpu(sb->layout);
1582 		mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1583 		mddev->dev_sectors = le64_to_cpu(sb->size);
1584 		mddev->events = ev1;
1585 		mddev->bitmap_info.offset = 0;
1586 		mddev->bitmap_info.space = 0;
1587 		/* Default location for bitmap is 1K after superblock
1588 		 * using 3K - total of 4K
1589 		 */
1590 		mddev->bitmap_info.default_offset = 1024 >> 9;
1591 		mddev->bitmap_info.default_space = (4096-1024) >> 9;
1592 		mddev->reshape_backwards = 0;
1593 
1594 		mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1595 		memcpy(mddev->uuid, sb->set_uuid, 16);
1596 
1597 		mddev->max_disks =  (4096-256)/2;
1598 
1599 		if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1600 		    mddev->bitmap_info.file == NULL) {
1601 			mddev->bitmap_info.offset =
1602 				(__s32)le32_to_cpu(sb->bitmap_offset);
1603 			/* Metadata doesn't record how much space is available.
1604 			 * For 1.0, we assume we can use up to the superblock
1605 			 * if before, else to 4K beyond superblock.
1606 			 * For others, assume no change is possible.
1607 			 */
1608 			if (mddev->minor_version > 0)
1609 				mddev->bitmap_info.space = 0;
1610 			else if (mddev->bitmap_info.offset > 0)
1611 				mddev->bitmap_info.space =
1612 					8 - mddev->bitmap_info.offset;
1613 			else
1614 				mddev->bitmap_info.space =
1615 					-mddev->bitmap_info.offset;
1616 		}
1617 
1618 		if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1619 			mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1620 			mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1621 			mddev->new_level = le32_to_cpu(sb->new_level);
1622 			mddev->new_layout = le32_to_cpu(sb->new_layout);
1623 			mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
1624 			if (mddev->delta_disks < 0 ||
1625 			    (mddev->delta_disks == 0 &&
1626 			     (le32_to_cpu(sb->feature_map)
1627 			      & MD_FEATURE_RESHAPE_BACKWARDS)))
1628 				mddev->reshape_backwards = 1;
1629 		} else {
1630 			mddev->reshape_position = MaxSector;
1631 			mddev->delta_disks = 0;
1632 			mddev->new_level = mddev->level;
1633 			mddev->new_layout = mddev->layout;
1634 			mddev->new_chunk_sectors = mddev->chunk_sectors;
1635 		}
1636 
1637 		if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)
1638 			set_bit(MD_HAS_JOURNAL, &mddev->flags);
1639 	} else if (mddev->pers == NULL) {
1640 		/* Insist of good event counter while assembling, except for
1641 		 * spares (which don't need an event count) */
1642 		++ev1;
1643 		if (rdev->desc_nr >= 0 &&
1644 		    rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1645 		    (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
1646 		     le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL))
1647 			if (ev1 < mddev->events)
1648 				return -EINVAL;
1649 	} else if (mddev->bitmap) {
1650 		/* If adding to array with a bitmap, then we can accept an
1651 		 * older device, but not too old.
1652 		 */
1653 		if (ev1 < mddev->bitmap->events_cleared)
1654 			return 0;
1655 		if (ev1 < mddev->events)
1656 			set_bit(Bitmap_sync, &rdev->flags);
1657 	} else {
1658 		if (ev1 < mddev->events)
1659 			/* just a hot-add of a new device, leave raid_disk at -1 */
1660 			return 0;
1661 	}
1662 	if (mddev->level != LEVEL_MULTIPATH) {
1663 		int role;
1664 		if (rdev->desc_nr < 0 ||
1665 		    rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
1666 			role = MD_DISK_ROLE_SPARE;
1667 			rdev->desc_nr = -1;
1668 		} else
1669 			role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1670 		switch(role) {
1671 		case MD_DISK_ROLE_SPARE: /* spare */
1672 			break;
1673 		case MD_DISK_ROLE_FAULTY: /* faulty */
1674 			set_bit(Faulty, &rdev->flags);
1675 			break;
1676 		case MD_DISK_ROLE_JOURNAL: /* journal device */
1677 			if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) {
1678 				/* journal device without journal feature */
1679 				pr_warn("md: journal device provided without journal feature, ignoring the device\n");
1680 				return -EINVAL;
1681 			}
1682 			set_bit(Journal, &rdev->flags);
1683 			rdev->journal_tail = le64_to_cpu(sb->journal_tail);
1684 			rdev->raid_disk = 0;
1685 			break;
1686 		default:
1687 			rdev->saved_raid_disk = role;
1688 			if ((le32_to_cpu(sb->feature_map) &
1689 			     MD_FEATURE_RECOVERY_OFFSET)) {
1690 				rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1691 				if (!(le32_to_cpu(sb->feature_map) &
1692 				      MD_FEATURE_RECOVERY_BITMAP))
1693 					rdev->saved_raid_disk = -1;
1694 			} else
1695 				set_bit(In_sync, &rdev->flags);
1696 			rdev->raid_disk = role;
1697 			break;
1698 		}
1699 		if (sb->devflags & WriteMostly1)
1700 			set_bit(WriteMostly, &rdev->flags);
1701 		if (sb->devflags & FailFast1)
1702 			set_bit(FailFast, &rdev->flags);
1703 		if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
1704 			set_bit(Replacement, &rdev->flags);
1705 	} else /* MULTIPATH are always insync */
1706 		set_bit(In_sync, &rdev->flags);
1707 
1708 	return 0;
1709 }
1710 
1711 static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
1712 {
1713 	struct mdp_superblock_1 *sb;
1714 	struct md_rdev *rdev2;
1715 	int max_dev, i;
1716 	/* make rdev->sb match mddev and rdev data. */
1717 
1718 	sb = page_address(rdev->sb_page);
1719 
1720 	sb->feature_map = 0;
1721 	sb->pad0 = 0;
1722 	sb->recovery_offset = cpu_to_le64(0);
1723 	memset(sb->pad3, 0, sizeof(sb->pad3));
1724 
1725 	sb->utime = cpu_to_le64((__u64)mddev->utime);
1726 	sb->events = cpu_to_le64(mddev->events);
1727 	if (mddev->in_sync)
1728 		sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1729 	else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags))
1730 		sb->resync_offset = cpu_to_le64(MaxSector);
1731 	else
1732 		sb->resync_offset = cpu_to_le64(0);
1733 
1734 	sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
1735 
1736 	sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1737 	sb->size = cpu_to_le64(mddev->dev_sectors);
1738 	sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
1739 	sb->level = cpu_to_le32(mddev->level);
1740 	sb->layout = cpu_to_le32(mddev->layout);
1741 	if (test_bit(FailFast, &rdev->flags))
1742 		sb->devflags |= FailFast1;
1743 	else
1744 		sb->devflags &= ~FailFast1;
1745 
1746 	if (test_bit(WriteMostly, &rdev->flags))
1747 		sb->devflags |= WriteMostly1;
1748 	else
1749 		sb->devflags &= ~WriteMostly1;
1750 	sb->data_offset = cpu_to_le64(rdev->data_offset);
1751 	sb->data_size = cpu_to_le64(rdev->sectors);
1752 
1753 	if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
1754 		sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
1755 		sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1756 	}
1757 
1758 	if (rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags) &&
1759 	    !test_bit(In_sync, &rdev->flags)) {
1760 		sb->feature_map |=
1761 			cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
1762 		sb->recovery_offset =
1763 			cpu_to_le64(rdev->recovery_offset);
1764 		if (rdev->saved_raid_disk >= 0 && mddev->bitmap)
1765 			sb->feature_map |=
1766 				cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP);
1767 	}
1768 	/* Note: recovery_offset and journal_tail share space  */
1769 	if (test_bit(Journal, &rdev->flags))
1770 		sb->journal_tail = cpu_to_le64(rdev->journal_tail);
1771 	if (test_bit(Replacement, &rdev->flags))
1772 		sb->feature_map |=
1773 			cpu_to_le32(MD_FEATURE_REPLACEMENT);
1774 
1775 	if (mddev->reshape_position != MaxSector) {
1776 		sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
1777 		sb->reshape_position = cpu_to_le64(mddev->reshape_position);
1778 		sb->new_layout = cpu_to_le32(mddev->new_layout);
1779 		sb->delta_disks = cpu_to_le32(mddev->delta_disks);
1780 		sb->new_level = cpu_to_le32(mddev->new_level);
1781 		sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
1782 		if (mddev->delta_disks == 0 &&
1783 		    mddev->reshape_backwards)
1784 			sb->feature_map
1785 				|= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS);
1786 		if (rdev->new_data_offset != rdev->data_offset) {
1787 			sb->feature_map
1788 				|= cpu_to_le32(MD_FEATURE_NEW_OFFSET);
1789 			sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset
1790 							     - rdev->data_offset));
1791 		}
1792 	}
1793 
1794 	if (mddev_is_clustered(mddev))
1795 		sb->feature_map |= cpu_to_le32(MD_FEATURE_CLUSTERED);
1796 
1797 	if (rdev->badblocks.count == 0)
1798 		/* Nothing to do for bad blocks*/ ;
1799 	else if (sb->bblog_offset == 0)
1800 		/* Cannot record bad blocks on this device */
1801 		md_error(mddev, rdev);
1802 	else {
1803 		struct badblocks *bb = &rdev->badblocks;
1804 		u64 *bbp = (u64 *)page_address(rdev->bb_page);
1805 		u64 *p = bb->page;
1806 		sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
1807 		if (bb->changed) {
1808 			unsigned seq;
1809 
1810 retry:
1811 			seq = read_seqbegin(&bb->lock);
1812 
1813 			memset(bbp, 0xff, PAGE_SIZE);
1814 
1815 			for (i = 0 ; i < bb->count ; i++) {
1816 				u64 internal_bb = p[i];
1817 				u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
1818 						| BB_LEN(internal_bb));
1819 				bbp[i] = cpu_to_le64(store_bb);
1820 			}
1821 			bb->changed = 0;
1822 			if (read_seqretry(&bb->lock, seq))
1823 				goto retry;
1824 
1825 			bb->sector = (rdev->sb_start +
1826 				      (int)le32_to_cpu(sb->bblog_offset));
1827 			bb->size = le16_to_cpu(sb->bblog_size);
1828 		}
1829 	}
1830 
1831 	max_dev = 0;
1832 	rdev_for_each(rdev2, mddev)
1833 		if (rdev2->desc_nr+1 > max_dev)
1834 			max_dev = rdev2->desc_nr+1;
1835 
1836 	if (max_dev > le32_to_cpu(sb->max_dev)) {
1837 		int bmask;
1838 		sb->max_dev = cpu_to_le32(max_dev);
1839 		rdev->sb_size = max_dev * 2 + 256;
1840 		bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1841 		if (rdev->sb_size & bmask)
1842 			rdev->sb_size = (rdev->sb_size | bmask) + 1;
1843 	} else
1844 		max_dev = le32_to_cpu(sb->max_dev);
1845 
1846 	for (i=0; i<max_dev;i++)
1847 		sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY);
1848 
1849 	if (test_bit(MD_HAS_JOURNAL, &mddev->flags))
1850 		sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL);
1851 
1852 	rdev_for_each(rdev2, mddev) {
1853 		i = rdev2->desc_nr;
1854 		if (test_bit(Faulty, &rdev2->flags))
1855 			sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY);
1856 		else if (test_bit(In_sync, &rdev2->flags))
1857 			sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1858 		else if (test_bit(Journal, &rdev2->flags))
1859 			sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_JOURNAL);
1860 		else if (rdev2->raid_disk >= 0)
1861 			sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1862 		else
1863 			sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
1864 	}
1865 
1866 	sb->sb_csum = calc_sb_1_csum(sb);
1867 }
1868 
1869 static unsigned long long
1870 super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1871 {
1872 	struct mdp_superblock_1 *sb;
1873 	sector_t max_sectors;
1874 	if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1875 		return 0; /* component must fit device */
1876 	if (rdev->data_offset != rdev->new_data_offset)
1877 		return 0; /* too confusing */
1878 	if (rdev->sb_start < rdev->data_offset) {
1879 		/* minor versions 1 and 2; superblock before data */
1880 		max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
1881 		max_sectors -= rdev->data_offset;
1882 		if (!num_sectors || num_sectors > max_sectors)
1883 			num_sectors = max_sectors;
1884 	} else if (rdev->mddev->bitmap_info.offset) {
1885 		/* minor version 0 with bitmap we can't move */
1886 		return 0;
1887 	} else {
1888 		/* minor version 0; superblock after data */
1889 		sector_t sb_start;
1890 		sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2;
1891 		sb_start &= ~(sector_t)(4*2 - 1);
1892 		max_sectors = rdev->sectors + sb_start - rdev->sb_start;
1893 		if (!num_sectors || num_sectors > max_sectors)
1894 			num_sectors = max_sectors;
1895 		rdev->sb_start = sb_start;
1896 	}
1897 	sb = page_address(rdev->sb_page);
1898 	sb->data_size = cpu_to_le64(num_sectors);
1899 	sb->super_offset = rdev->sb_start;
1900 	sb->sb_csum = calc_sb_1_csum(sb);
1901 	do {
1902 		md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1903 			       rdev->sb_page);
1904 	} while (md_super_wait(rdev->mddev) < 0);
1905 	return num_sectors;
1906 
1907 }
1908 
1909 static int
1910 super_1_allow_new_offset(struct md_rdev *rdev,
1911 			 unsigned long long new_offset)
1912 {
1913 	/* All necessary checks on new >= old have been done */
1914 	struct bitmap *bitmap;
1915 	if (new_offset >= rdev->data_offset)
1916 		return 1;
1917 
1918 	/* with 1.0 metadata, there is no metadata to tread on
1919 	 * so we can always move back */
1920 	if (rdev->mddev->minor_version == 0)
1921 		return 1;
1922 
1923 	/* otherwise we must be sure not to step on
1924 	 * any metadata, so stay:
1925 	 * 36K beyond start of superblock
1926 	 * beyond end of badblocks
1927 	 * beyond write-intent bitmap
1928 	 */
1929 	if (rdev->sb_start + (32+4)*2 > new_offset)
1930 		return 0;
1931 	bitmap = rdev->mddev->bitmap;
1932 	if (bitmap && !rdev->mddev->bitmap_info.file &&
1933 	    rdev->sb_start + rdev->mddev->bitmap_info.offset +
1934 	    bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset)
1935 		return 0;
1936 	if (rdev->badblocks.sector + rdev->badblocks.size > new_offset)
1937 		return 0;
1938 
1939 	return 1;
1940 }
1941 
1942 static struct super_type super_types[] = {
1943 	[0] = {
1944 		.name	= "0.90.0",
1945 		.owner	= THIS_MODULE,
1946 		.load_super	    = super_90_load,
1947 		.validate_super	    = super_90_validate,
1948 		.sync_super	    = super_90_sync,
1949 		.rdev_size_change   = super_90_rdev_size_change,
1950 		.allow_new_offset   = super_90_allow_new_offset,
1951 	},
1952 	[1] = {
1953 		.name	= "md-1",
1954 		.owner	= THIS_MODULE,
1955 		.load_super	    = super_1_load,
1956 		.validate_super	    = super_1_validate,
1957 		.sync_super	    = super_1_sync,
1958 		.rdev_size_change   = super_1_rdev_size_change,
1959 		.allow_new_offset   = super_1_allow_new_offset,
1960 	},
1961 };
1962 
1963 static void sync_super(struct mddev *mddev, struct md_rdev *rdev)
1964 {
1965 	if (mddev->sync_super) {
1966 		mddev->sync_super(mddev, rdev);
1967 		return;
1968 	}
1969 
1970 	BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
1971 
1972 	super_types[mddev->major_version].sync_super(mddev, rdev);
1973 }
1974 
1975 static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2)
1976 {
1977 	struct md_rdev *rdev, *rdev2;
1978 
1979 	rcu_read_lock();
1980 	rdev_for_each_rcu(rdev, mddev1) {
1981 		if (test_bit(Faulty, &rdev->flags) ||
1982 		    test_bit(Journal, &rdev->flags) ||
1983 		    rdev->raid_disk == -1)
1984 			continue;
1985 		rdev_for_each_rcu(rdev2, mddev2) {
1986 			if (test_bit(Faulty, &rdev2->flags) ||
1987 			    test_bit(Journal, &rdev2->flags) ||
1988 			    rdev2->raid_disk == -1)
1989 				continue;
1990 			if (rdev->bdev->bd_contains ==
1991 			    rdev2->bdev->bd_contains) {
1992 				rcu_read_unlock();
1993 				return 1;
1994 			}
1995 		}
1996 	}
1997 	rcu_read_unlock();
1998 	return 0;
1999 }
2000 
2001 static LIST_HEAD(pending_raid_disks);
2002 
2003 /*
2004  * Try to register data integrity profile for an mddev
2005  *
2006  * This is called when an array is started and after a disk has been kicked
2007  * from the array. It only succeeds if all working and active component devices
2008  * are integrity capable with matching profiles.
2009  */
2010 int md_integrity_register(struct mddev *mddev)
2011 {
2012 	struct md_rdev *rdev, *reference = NULL;
2013 
2014 	if (list_empty(&mddev->disks))
2015 		return 0; /* nothing to do */
2016 	if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
2017 		return 0; /* shouldn't register, or already is */
2018 	rdev_for_each(rdev, mddev) {
2019 		/* skip spares and non-functional disks */
2020 		if (test_bit(Faulty, &rdev->flags))
2021 			continue;
2022 		if (rdev->raid_disk < 0)
2023 			continue;
2024 		if (!reference) {
2025 			/* Use the first rdev as the reference */
2026 			reference = rdev;
2027 			continue;
2028 		}
2029 		/* does this rdev's profile match the reference profile? */
2030 		if (blk_integrity_compare(reference->bdev->bd_disk,
2031 				rdev->bdev->bd_disk) < 0)
2032 			return -EINVAL;
2033 	}
2034 	if (!reference || !bdev_get_integrity(reference->bdev))
2035 		return 0;
2036 	/*
2037 	 * All component devices are integrity capable and have matching
2038 	 * profiles, register the common profile for the md device.
2039 	 */
2040 	blk_integrity_register(mddev->gendisk,
2041 			       bdev_get_integrity(reference->bdev));
2042 
2043 	pr_debug("md: data integrity enabled on %s\n", mdname(mddev));
2044 	if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) {
2045 		pr_err("md: failed to create integrity pool for %s\n",
2046 		       mdname(mddev));
2047 		return -EINVAL;
2048 	}
2049 	return 0;
2050 }
2051 EXPORT_SYMBOL(md_integrity_register);
2052 
2053 /*
2054  * Attempt to add an rdev, but only if it is consistent with the current
2055  * integrity profile
2056  */
2057 int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
2058 {
2059 	struct blk_integrity *bi_rdev;
2060 	struct blk_integrity *bi_mddev;
2061 	char name[BDEVNAME_SIZE];
2062 
2063 	if (!mddev->gendisk)
2064 		return 0;
2065 
2066 	bi_rdev = bdev_get_integrity(rdev->bdev);
2067 	bi_mddev = blk_get_integrity(mddev->gendisk);
2068 
2069 	if (!bi_mddev) /* nothing to do */
2070 		return 0;
2071 
2072 	if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) {
2073 		pr_err("%s: incompatible integrity profile for %s\n",
2074 		       mdname(mddev), bdevname(rdev->bdev, name));
2075 		return -ENXIO;
2076 	}
2077 
2078 	return 0;
2079 }
2080 EXPORT_SYMBOL(md_integrity_add_rdev);
2081 
2082 static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
2083 {
2084 	char b[BDEVNAME_SIZE];
2085 	struct kobject *ko;
2086 	int err;
2087 
2088 	/* prevent duplicates */
2089 	if (find_rdev(mddev, rdev->bdev->bd_dev))
2090 		return -EEXIST;
2091 
2092 	/* make sure rdev->sectors exceeds mddev->dev_sectors */
2093 	if (!test_bit(Journal, &rdev->flags) &&
2094 	    rdev->sectors &&
2095 	    (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) {
2096 		if (mddev->pers) {
2097 			/* Cannot change size, so fail
2098 			 * If mddev->level <= 0, then we don't care
2099 			 * about aligning sizes (e.g. linear)
2100 			 */
2101 			if (mddev->level > 0)
2102 				return -ENOSPC;
2103 		} else
2104 			mddev->dev_sectors = rdev->sectors;
2105 	}
2106 
2107 	/* Verify rdev->desc_nr is unique.
2108 	 * If it is -1, assign a free number, else
2109 	 * check number is not in use
2110 	 */
2111 	rcu_read_lock();
2112 	if (rdev->desc_nr < 0) {
2113 		int choice = 0;
2114 		if (mddev->pers)
2115 			choice = mddev->raid_disks;
2116 		while (md_find_rdev_nr_rcu(mddev, choice))
2117 			choice++;
2118 		rdev->desc_nr = choice;
2119 	} else {
2120 		if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) {
2121 			rcu_read_unlock();
2122 			return -EBUSY;
2123 		}
2124 	}
2125 	rcu_read_unlock();
2126 	if (!test_bit(Journal, &rdev->flags) &&
2127 	    mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
2128 		pr_warn("md: %s: array is limited to %d devices\n",
2129 			mdname(mddev), mddev->max_disks);
2130 		return -EBUSY;
2131 	}
2132 	bdevname(rdev->bdev,b);
2133 	strreplace(b, '/', '!');
2134 
2135 	rdev->mddev = mddev;
2136 	pr_debug("md: bind<%s>\n", b);
2137 
2138 	if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
2139 		goto fail;
2140 
2141 	ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
2142 	if (sysfs_create_link(&rdev->kobj, ko, "block"))
2143 		/* failure here is OK */;
2144 	rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
2145 
2146 	list_add_rcu(&rdev->same_set, &mddev->disks);
2147 	bd_link_disk_holder(rdev->bdev, mddev->gendisk);
2148 
2149 	/* May as well allow recovery to be retried once */
2150 	mddev->recovery_disabled++;
2151 
2152 	return 0;
2153 
2154  fail:
2155 	pr_warn("md: failed to register dev-%s for %s\n",
2156 		b, mdname(mddev));
2157 	return err;
2158 }
2159 
2160 static void md_delayed_delete(struct work_struct *ws)
2161 {
2162 	struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work);
2163 	kobject_del(&rdev->kobj);
2164 	kobject_put(&rdev->kobj);
2165 }
2166 
2167 static void unbind_rdev_from_array(struct md_rdev *rdev)
2168 {
2169 	char b[BDEVNAME_SIZE];
2170 
2171 	bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
2172 	list_del_rcu(&rdev->same_set);
2173 	pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b));
2174 	rdev->mddev = NULL;
2175 	sysfs_remove_link(&rdev->kobj, "block");
2176 	sysfs_put(rdev->sysfs_state);
2177 	rdev->sysfs_state = NULL;
2178 	rdev->badblocks.count = 0;
2179 	/* We need to delay this, otherwise we can deadlock when
2180 	 * writing to 'remove' to "dev/state".  We also need
2181 	 * to delay it due to rcu usage.
2182 	 */
2183 	synchronize_rcu();
2184 	INIT_WORK(&rdev->del_work, md_delayed_delete);
2185 	kobject_get(&rdev->kobj);
2186 	queue_work(md_misc_wq, &rdev->del_work);
2187 }
2188 
2189 /*
2190  * prevent the device from being mounted, repartitioned or
2191  * otherwise reused by a RAID array (or any other kernel
2192  * subsystem), by bd_claiming the device.
2193  */
2194 static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared)
2195 {
2196 	int err = 0;
2197 	struct block_device *bdev;
2198 	char b[BDEVNAME_SIZE];
2199 
2200 	bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
2201 				 shared ? (struct md_rdev *)lock_rdev : rdev);
2202 	if (IS_ERR(bdev)) {
2203 		pr_warn("md: could not open %s.\n", __bdevname(dev, b));
2204 		return PTR_ERR(bdev);
2205 	}
2206 	rdev->bdev = bdev;
2207 	return err;
2208 }
2209 
2210 static void unlock_rdev(struct md_rdev *rdev)
2211 {
2212 	struct block_device *bdev = rdev->bdev;
2213 	rdev->bdev = NULL;
2214 	blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
2215 }
2216 
2217 void md_autodetect_dev(dev_t dev);
2218 
2219 static void export_rdev(struct md_rdev *rdev)
2220 {
2221 	char b[BDEVNAME_SIZE];
2222 
2223 	pr_debug("md: export_rdev(%s)\n", bdevname(rdev->bdev,b));
2224 	md_rdev_clear(rdev);
2225 #ifndef MODULE
2226 	if (test_bit(AutoDetected, &rdev->flags))
2227 		md_autodetect_dev(rdev->bdev->bd_dev);
2228 #endif
2229 	unlock_rdev(rdev);
2230 	kobject_put(&rdev->kobj);
2231 }
2232 
2233 void md_kick_rdev_from_array(struct md_rdev *rdev)
2234 {
2235 	unbind_rdev_from_array(rdev);
2236 	export_rdev(rdev);
2237 }
2238 EXPORT_SYMBOL_GPL(md_kick_rdev_from_array);
2239 
2240 static void export_array(struct mddev *mddev)
2241 {
2242 	struct md_rdev *rdev;
2243 
2244 	while (!list_empty(&mddev->disks)) {
2245 		rdev = list_first_entry(&mddev->disks, struct md_rdev,
2246 					same_set);
2247 		md_kick_rdev_from_array(rdev);
2248 	}
2249 	mddev->raid_disks = 0;
2250 	mddev->major_version = 0;
2251 }
2252 
2253 static void sync_sbs(struct mddev *mddev, int nospares)
2254 {
2255 	/* Update each superblock (in-memory image), but
2256 	 * if we are allowed to, skip spares which already
2257 	 * have the right event counter, or have one earlier
2258 	 * (which would mean they aren't being marked as dirty
2259 	 * with the rest of the array)
2260 	 */
2261 	struct md_rdev *rdev;
2262 	rdev_for_each(rdev, mddev) {
2263 		if (rdev->sb_events == mddev->events ||
2264 		    (nospares &&
2265 		     rdev->raid_disk < 0 &&
2266 		     rdev->sb_events+1 == mddev->events)) {
2267 			/* Don't update this superblock */
2268 			rdev->sb_loaded = 2;
2269 		} else {
2270 			sync_super(mddev, rdev);
2271 			rdev->sb_loaded = 1;
2272 		}
2273 	}
2274 }
2275 
2276 static bool does_sb_need_changing(struct mddev *mddev)
2277 {
2278 	struct md_rdev *rdev;
2279 	struct mdp_superblock_1 *sb;
2280 	int role;
2281 
2282 	/* Find a good rdev */
2283 	rdev_for_each(rdev, mddev)
2284 		if ((rdev->raid_disk >= 0) && !test_bit(Faulty, &rdev->flags))
2285 			break;
2286 
2287 	/* No good device found. */
2288 	if (!rdev)
2289 		return false;
2290 
2291 	sb = page_address(rdev->sb_page);
2292 	/* Check if a device has become faulty or a spare become active */
2293 	rdev_for_each(rdev, mddev) {
2294 		role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
2295 		/* Device activated? */
2296 		if (role == 0xffff && rdev->raid_disk >=0 &&
2297 		    !test_bit(Faulty, &rdev->flags))
2298 			return true;
2299 		/* Device turned faulty? */
2300 		if (test_bit(Faulty, &rdev->flags) && (role < 0xfffd))
2301 			return true;
2302 	}
2303 
2304 	/* Check if any mddev parameters have changed */
2305 	if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
2306 	    (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
2307 	    (mddev->layout != le64_to_cpu(sb->layout)) ||
2308 	    (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
2309 	    (mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
2310 		return true;
2311 
2312 	return false;
2313 }
2314 
2315 void md_update_sb(struct mddev *mddev, int force_change)
2316 {
2317 	struct md_rdev *rdev;
2318 	int sync_req;
2319 	int nospares = 0;
2320 	int any_badblocks_changed = 0;
2321 	int ret = -1;
2322 
2323 	if (mddev->ro) {
2324 		if (force_change)
2325 			set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2326 		return;
2327 	}
2328 
2329 repeat:
2330 	if (mddev_is_clustered(mddev)) {
2331 		if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
2332 			force_change = 1;
2333 		if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
2334 			nospares = 1;
2335 		ret = md_cluster_ops->metadata_update_start(mddev);
2336 		/* Has someone else has updated the sb */
2337 		if (!does_sb_need_changing(mddev)) {
2338 			if (ret == 0)
2339 				md_cluster_ops->metadata_update_cancel(mddev);
2340 			bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
2341 							 BIT(MD_SB_CHANGE_DEVS) |
2342 							 BIT(MD_SB_CHANGE_CLEAN));
2343 			return;
2344 		}
2345 	}
2346 
2347 	/* First make sure individual recovery_offsets are correct */
2348 	rdev_for_each(rdev, mddev) {
2349 		if (rdev->raid_disk >= 0 &&
2350 		    mddev->delta_disks >= 0 &&
2351 		    !test_bit(Journal, &rdev->flags) &&
2352 		    !test_bit(In_sync, &rdev->flags) &&
2353 		    mddev->curr_resync_completed > rdev->recovery_offset)
2354 				rdev->recovery_offset = mddev->curr_resync_completed;
2355 
2356 	}
2357 	if (!mddev->persistent) {
2358 		clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
2359 		clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2360 		if (!mddev->external) {
2361 			clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
2362 			rdev_for_each(rdev, mddev) {
2363 				if (rdev->badblocks.changed) {
2364 					rdev->badblocks.changed = 0;
2365 					ack_all_badblocks(&rdev->badblocks);
2366 					md_error(mddev, rdev);
2367 				}
2368 				clear_bit(Blocked, &rdev->flags);
2369 				clear_bit(BlockedBadBlocks, &rdev->flags);
2370 				wake_up(&rdev->blocked_wait);
2371 			}
2372 		}
2373 		wake_up(&mddev->sb_wait);
2374 		return;
2375 	}
2376 
2377 	spin_lock(&mddev->lock);
2378 
2379 	mddev->utime = ktime_get_real_seconds();
2380 
2381 	if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
2382 		force_change = 1;
2383 	if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
2384 		/* just a clean<-> dirty transition, possibly leave spares alone,
2385 		 * though if events isn't the right even/odd, we will have to do
2386 		 * spares after all
2387 		 */
2388 		nospares = 1;
2389 	if (force_change)
2390 		nospares = 0;
2391 	if (mddev->degraded)
2392 		/* If the array is degraded, then skipping spares is both
2393 		 * dangerous and fairly pointless.
2394 		 * Dangerous because a device that was removed from the array
2395 		 * might have a event_count that still looks up-to-date,
2396 		 * so it can be re-added without a resync.
2397 		 * Pointless because if there are any spares to skip,
2398 		 * then a recovery will happen and soon that array won't
2399 		 * be degraded any more and the spare can go back to sleep then.
2400 		 */
2401 		nospares = 0;
2402 
2403 	sync_req = mddev->in_sync;
2404 
2405 	/* If this is just a dirty<->clean transition, and the array is clean
2406 	 * and 'events' is odd, we can roll back to the previous clean state */
2407 	if (nospares
2408 	    && (mddev->in_sync && mddev->recovery_cp == MaxSector)
2409 	    && mddev->can_decrease_events
2410 	    && mddev->events != 1) {
2411 		mddev->events--;
2412 		mddev->can_decrease_events = 0;
2413 	} else {
2414 		/* otherwise we have to go forward and ... */
2415 		mddev->events ++;
2416 		mddev->can_decrease_events = nospares;
2417 	}
2418 
2419 	/*
2420 	 * This 64-bit counter should never wrap.
2421 	 * Either we are in around ~1 trillion A.C., assuming
2422 	 * 1 reboot per second, or we have a bug...
2423 	 */
2424 	WARN_ON(mddev->events == 0);
2425 
2426 	rdev_for_each(rdev, mddev) {
2427 		if (rdev->badblocks.changed)
2428 			any_badblocks_changed++;
2429 		if (test_bit(Faulty, &rdev->flags))
2430 			set_bit(FaultRecorded, &rdev->flags);
2431 	}
2432 
2433 	sync_sbs(mddev, nospares);
2434 	spin_unlock(&mddev->lock);
2435 
2436 	pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
2437 		 mdname(mddev), mddev->in_sync);
2438 
2439 	if (mddev->queue)
2440 		blk_add_trace_msg(mddev->queue, "md md_update_sb");
2441 rewrite:
2442 	bitmap_update_sb(mddev->bitmap);
2443 	rdev_for_each(rdev, mddev) {
2444 		char b[BDEVNAME_SIZE];
2445 
2446 		if (rdev->sb_loaded != 1)
2447 			continue; /* no noise on spare devices */
2448 
2449 		if (!test_bit(Faulty, &rdev->flags)) {
2450 			md_super_write(mddev,rdev,
2451 				       rdev->sb_start, rdev->sb_size,
2452 				       rdev->sb_page);
2453 			pr_debug("md: (write) %s's sb offset: %llu\n",
2454 				 bdevname(rdev->bdev, b),
2455 				 (unsigned long long)rdev->sb_start);
2456 			rdev->sb_events = mddev->events;
2457 			if (rdev->badblocks.size) {
2458 				md_super_write(mddev, rdev,
2459 					       rdev->badblocks.sector,
2460 					       rdev->badblocks.size << 9,
2461 					       rdev->bb_page);
2462 				rdev->badblocks.size = 0;
2463 			}
2464 
2465 		} else
2466 			pr_debug("md: %s (skipping faulty)\n",
2467 				 bdevname(rdev->bdev, b));
2468 
2469 		if (mddev->level == LEVEL_MULTIPATH)
2470 			/* only need to write one superblock... */
2471 			break;
2472 	}
2473 	if (md_super_wait(mddev) < 0)
2474 		goto rewrite;
2475 	/* if there was a failure, MD_SB_CHANGE_DEVS was set, and we re-write super */
2476 
2477 	if (mddev_is_clustered(mddev) && ret == 0)
2478 		md_cluster_ops->metadata_update_finish(mddev);
2479 
2480 	if (mddev->in_sync != sync_req ||
2481 	    !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
2482 			       BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_CLEAN)))
2483 		/* have to write it out again */
2484 		goto repeat;
2485 	wake_up(&mddev->sb_wait);
2486 	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
2487 		sysfs_notify(&mddev->kobj, NULL, "sync_completed");
2488 
2489 	rdev_for_each(rdev, mddev) {
2490 		if (test_and_clear_bit(FaultRecorded, &rdev->flags))
2491 			clear_bit(Blocked, &rdev->flags);
2492 
2493 		if (any_badblocks_changed)
2494 			ack_all_badblocks(&rdev->badblocks);
2495 		clear_bit(BlockedBadBlocks, &rdev->flags);
2496 		wake_up(&rdev->blocked_wait);
2497 	}
2498 }
2499 EXPORT_SYMBOL(md_update_sb);
2500 
2501 static int add_bound_rdev(struct md_rdev *rdev)
2502 {
2503 	struct mddev *mddev = rdev->mddev;
2504 	int err = 0;
2505 	bool add_journal = test_bit(Journal, &rdev->flags);
2506 
2507 	if (!mddev->pers->hot_remove_disk || add_journal) {
2508 		/* If there is hot_add_disk but no hot_remove_disk
2509 		 * then added disks for geometry changes,
2510 		 * and should be added immediately.
2511 		 */
2512 		super_types[mddev->major_version].
2513 			validate_super(mddev, rdev);
2514 		if (add_journal)
2515 			mddev_suspend(mddev);
2516 		err = mddev->pers->hot_add_disk(mddev, rdev);
2517 		if (add_journal)
2518 			mddev_resume(mddev);
2519 		if (err) {
2520 			md_kick_rdev_from_array(rdev);
2521 			return err;
2522 		}
2523 	}
2524 	sysfs_notify_dirent_safe(rdev->sysfs_state);
2525 
2526 	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2527 	if (mddev->degraded)
2528 		set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
2529 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2530 	md_new_event(mddev);
2531 	md_wakeup_thread(mddev->thread);
2532 	return 0;
2533 }
2534 
2535 /* words written to sysfs files may, or may not, be \n terminated.
2536  * We want to accept with case. For this we use cmd_match.
2537  */
2538 static int cmd_match(const char *cmd, const char *str)
2539 {
2540 	/* See if cmd, written into a sysfs file, matches
2541 	 * str.  They must either be the same, or cmd can
2542 	 * have a trailing newline
2543 	 */
2544 	while (*cmd && *str && *cmd == *str) {
2545 		cmd++;
2546 		str++;
2547 	}
2548 	if (*cmd == '\n')
2549 		cmd++;
2550 	if (*str || *cmd)
2551 		return 0;
2552 	return 1;
2553 }
2554 
2555 struct rdev_sysfs_entry {
2556 	struct attribute attr;
2557 	ssize_t (*show)(struct md_rdev *, char *);
2558 	ssize_t (*store)(struct md_rdev *, const char *, size_t);
2559 };
2560 
2561 static ssize_t
2562 state_show(struct md_rdev *rdev, char *page)
2563 {
2564 	char *sep = ",";
2565 	size_t len = 0;
2566 	unsigned long flags = ACCESS_ONCE(rdev->flags);
2567 
2568 	if (test_bit(Faulty, &flags) ||
2569 	    (!test_bit(ExternalBbl, &flags) &&
2570 	    rdev->badblocks.unacked_exist))
2571 		len += sprintf(page+len, "faulty%s", sep);
2572 	if (test_bit(In_sync, &flags))
2573 		len += sprintf(page+len, "in_sync%s", sep);
2574 	if (test_bit(Journal, &flags))
2575 		len += sprintf(page+len, "journal%s", sep);
2576 	if (test_bit(WriteMostly, &flags))
2577 		len += sprintf(page+len, "write_mostly%s", sep);
2578 	if (test_bit(Blocked, &flags) ||
2579 	    (rdev->badblocks.unacked_exist
2580 	     && !test_bit(Faulty, &flags)))
2581 		len += sprintf(page+len, "blocked%s", sep);
2582 	if (!test_bit(Faulty, &flags) &&
2583 	    !test_bit(Journal, &flags) &&
2584 	    !test_bit(In_sync, &flags))
2585 		len += sprintf(page+len, "spare%s", sep);
2586 	if (test_bit(WriteErrorSeen, &flags))
2587 		len += sprintf(page+len, "write_error%s", sep);
2588 	if (test_bit(WantReplacement, &flags))
2589 		len += sprintf(page+len, "want_replacement%s", sep);
2590 	if (test_bit(Replacement, &flags))
2591 		len += sprintf(page+len, "replacement%s", sep);
2592 	if (test_bit(ExternalBbl, &flags))
2593 		len += sprintf(page+len, "external_bbl%s", sep);
2594 	if (test_bit(FailFast, &flags))
2595 		len += sprintf(page+len, "failfast%s", sep);
2596 
2597 	if (len)
2598 		len -= strlen(sep);
2599 
2600 	return len+sprintf(page+len, "\n");
2601 }
2602 
2603 static ssize_t
2604 state_store(struct md_rdev *rdev, const char *buf, size_t len)
2605 {
2606 	/* can write
2607 	 *  faulty  - simulates an error
2608 	 *  remove  - disconnects the device
2609 	 *  writemostly - sets write_mostly
2610 	 *  -writemostly - clears write_mostly
2611 	 *  blocked - sets the Blocked flags
2612 	 *  -blocked - clears the Blocked and possibly simulates an error
2613 	 *  insync - sets Insync providing device isn't active
2614 	 *  -insync - clear Insync for a device with a slot assigned,
2615 	 *            so that it gets rebuilt based on bitmap
2616 	 *  write_error - sets WriteErrorSeen
2617 	 *  -write_error - clears WriteErrorSeen
2618 	 *  {,-}failfast - set/clear FailFast
2619 	 */
2620 	int err = -EINVAL;
2621 	if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
2622 		md_error(rdev->mddev, rdev);
2623 		if (test_bit(Faulty, &rdev->flags))
2624 			err = 0;
2625 		else
2626 			err = -EBUSY;
2627 	} else if (cmd_match(buf, "remove")) {
2628 		if (rdev->mddev->pers) {
2629 			clear_bit(Blocked, &rdev->flags);
2630 			remove_and_add_spares(rdev->mddev, rdev);
2631 		}
2632 		if (rdev->raid_disk >= 0)
2633 			err = -EBUSY;
2634 		else {
2635 			struct mddev *mddev = rdev->mddev;
2636 			err = 0;
2637 			if (mddev_is_clustered(mddev))
2638 				err = md_cluster_ops->remove_disk(mddev, rdev);
2639 
2640 			if (err == 0) {
2641 				md_kick_rdev_from_array(rdev);
2642 				if (mddev->pers) {
2643 					set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2644 					md_wakeup_thread(mddev->thread);
2645 				}
2646 				md_new_event(mddev);
2647 			}
2648 		}
2649 	} else if (cmd_match(buf, "writemostly")) {
2650 		set_bit(WriteMostly, &rdev->flags);
2651 		err = 0;
2652 	} else if (cmd_match(buf, "-writemostly")) {
2653 		clear_bit(WriteMostly, &rdev->flags);
2654 		err = 0;
2655 	} else if (cmd_match(buf, "blocked")) {
2656 		set_bit(Blocked, &rdev->flags);
2657 		err = 0;
2658 	} else if (cmd_match(buf, "-blocked")) {
2659 		if (!test_bit(Faulty, &rdev->flags) &&
2660 		    !test_bit(ExternalBbl, &rdev->flags) &&
2661 		    rdev->badblocks.unacked_exist) {
2662 			/* metadata handler doesn't understand badblocks,
2663 			 * so we need to fail the device
2664 			 */
2665 			md_error(rdev->mddev, rdev);
2666 		}
2667 		clear_bit(Blocked, &rdev->flags);
2668 		clear_bit(BlockedBadBlocks, &rdev->flags);
2669 		wake_up(&rdev->blocked_wait);
2670 		set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2671 		md_wakeup_thread(rdev->mddev->thread);
2672 
2673 		err = 0;
2674 	} else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
2675 		set_bit(In_sync, &rdev->flags);
2676 		err = 0;
2677 	} else if (cmd_match(buf, "failfast")) {
2678 		set_bit(FailFast, &rdev->flags);
2679 		err = 0;
2680 	} else if (cmd_match(buf, "-failfast")) {
2681 		clear_bit(FailFast, &rdev->flags);
2682 		err = 0;
2683 	} else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 &&
2684 		   !test_bit(Journal, &rdev->flags)) {
2685 		if (rdev->mddev->pers == NULL) {
2686 			clear_bit(In_sync, &rdev->flags);
2687 			rdev->saved_raid_disk = rdev->raid_disk;
2688 			rdev->raid_disk = -1;
2689 			err = 0;
2690 		}
2691 	} else if (cmd_match(buf, "write_error")) {
2692 		set_bit(WriteErrorSeen, &rdev->flags);
2693 		err = 0;
2694 	} else if (cmd_match(buf, "-write_error")) {
2695 		clear_bit(WriteErrorSeen, &rdev->flags);
2696 		err = 0;
2697 	} else if (cmd_match(buf, "want_replacement")) {
2698 		/* Any non-spare device that is not a replacement can
2699 		 * become want_replacement at any time, but we then need to
2700 		 * check if recovery is needed.
2701 		 */
2702 		if (rdev->raid_disk >= 0 &&
2703 		    !test_bit(Journal, &rdev->flags) &&
2704 		    !test_bit(Replacement, &rdev->flags))
2705 			set_bit(WantReplacement, &rdev->flags);
2706 		set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2707 		md_wakeup_thread(rdev->mddev->thread);
2708 		err = 0;
2709 	} else if (cmd_match(buf, "-want_replacement")) {
2710 		/* Clearing 'want_replacement' is always allowed.
2711 		 * Once replacements starts it is too late though.
2712 		 */
2713 		err = 0;
2714 		clear_bit(WantReplacement, &rdev->flags);
2715 	} else if (cmd_match(buf, "replacement")) {
2716 		/* Can only set a device as a replacement when array has not
2717 		 * yet been started.  Once running, replacement is automatic
2718 		 * from spares, or by assigning 'slot'.
2719 		 */
2720 		if (rdev->mddev->pers)
2721 			err = -EBUSY;
2722 		else {
2723 			set_bit(Replacement, &rdev->flags);
2724 			err = 0;
2725 		}
2726 	} else if (cmd_match(buf, "-replacement")) {
2727 		/* Similarly, can only clear Replacement before start */
2728 		if (rdev->mddev->pers)
2729 			err = -EBUSY;
2730 		else {
2731 			clear_bit(Replacement, &rdev->flags);
2732 			err = 0;
2733 		}
2734 	} else if (cmd_match(buf, "re-add")) {
2735 		if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1)) {
2736 			/* clear_bit is performed _after_ all the devices
2737 			 * have their local Faulty bit cleared. If any writes
2738 			 * happen in the meantime in the local node, they
2739 			 * will land in the local bitmap, which will be synced
2740 			 * by this node eventually
2741 			 */
2742 			if (!mddev_is_clustered(rdev->mddev) ||
2743 			    (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) {
2744 				clear_bit(Faulty, &rdev->flags);
2745 				err = add_bound_rdev(rdev);
2746 			}
2747 		} else
2748 			err = -EBUSY;
2749 	} else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) {
2750 		set_bit(ExternalBbl, &rdev->flags);
2751 		rdev->badblocks.shift = 0;
2752 		err = 0;
2753 	} else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) {
2754 		clear_bit(ExternalBbl, &rdev->flags);
2755 		err = 0;
2756 	}
2757 	if (!err)
2758 		sysfs_notify_dirent_safe(rdev->sysfs_state);
2759 	return err ? err : len;
2760 }
2761 static struct rdev_sysfs_entry rdev_state =
2762 __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
2763 
2764 static ssize_t
2765 errors_show(struct md_rdev *rdev, char *page)
2766 {
2767 	return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
2768 }
2769 
2770 static ssize_t
2771 errors_store(struct md_rdev *rdev, const char *buf, size_t len)
2772 {
2773 	unsigned int n;
2774 	int rv;
2775 
2776 	rv = kstrtouint(buf, 10, &n);
2777 	if (rv < 0)
2778 		return rv;
2779 	atomic_set(&rdev->corrected_errors, n);
2780 	return len;
2781 }
2782 static struct rdev_sysfs_entry rdev_errors =
2783 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
2784 
2785 static ssize_t
2786 slot_show(struct md_rdev *rdev, char *page)
2787 {
2788 	if (test_bit(Journal, &rdev->flags))
2789 		return sprintf(page, "journal\n");
2790 	else if (rdev->raid_disk < 0)
2791 		return sprintf(page, "none\n");
2792 	else
2793 		return sprintf(page, "%d\n", rdev->raid_disk);
2794 }
2795 
2796 static ssize_t
2797 slot_store(struct md_rdev *rdev, const char *buf, size_t len)
2798 {
2799 	int slot;
2800 	int err;
2801 
2802 	if (test_bit(Journal, &rdev->flags))
2803 		return -EBUSY;
2804 	if (strncmp(buf, "none", 4)==0)
2805 		slot = -1;
2806 	else {
2807 		err = kstrtouint(buf, 10, (unsigned int *)&slot);
2808 		if (err < 0)
2809 			return err;
2810 	}
2811 	if (rdev->mddev->pers && slot == -1) {
2812 		/* Setting 'slot' on an active array requires also
2813 		 * updating the 'rd%d' link, and communicating
2814 		 * with the personality with ->hot_*_disk.
2815 		 * For now we only support removing
2816 		 * failed/spare devices.  This normally happens automatically,
2817 		 * but not when the metadata is externally managed.
2818 		 */
2819 		if (rdev->raid_disk == -1)
2820 			return -EEXIST;
2821 		/* personality does all needed checks */
2822 		if (rdev->mddev->pers->hot_remove_disk == NULL)
2823 			return -EINVAL;
2824 		clear_bit(Blocked, &rdev->flags);
2825 		remove_and_add_spares(rdev->mddev, rdev);
2826 		if (rdev->raid_disk >= 0)
2827 			return -EBUSY;
2828 		set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2829 		md_wakeup_thread(rdev->mddev->thread);
2830 	} else if (rdev->mddev->pers) {
2831 		/* Activating a spare .. or possibly reactivating
2832 		 * if we ever get bitmaps working here.
2833 		 */
2834 		int err;
2835 
2836 		if (rdev->raid_disk != -1)
2837 			return -EBUSY;
2838 
2839 		if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
2840 			return -EBUSY;
2841 
2842 		if (rdev->mddev->pers->hot_add_disk == NULL)
2843 			return -EINVAL;
2844 
2845 		if (slot >= rdev->mddev->raid_disks &&
2846 		    slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
2847 			return -ENOSPC;
2848 
2849 		rdev->raid_disk = slot;
2850 		if (test_bit(In_sync, &rdev->flags))
2851 			rdev->saved_raid_disk = slot;
2852 		else
2853 			rdev->saved_raid_disk = -1;
2854 		clear_bit(In_sync, &rdev->flags);
2855 		clear_bit(Bitmap_sync, &rdev->flags);
2856 		err = rdev->mddev->pers->
2857 			hot_add_disk(rdev->mddev, rdev);
2858 		if (err) {
2859 			rdev->raid_disk = -1;
2860 			return err;
2861 		} else
2862 			sysfs_notify_dirent_safe(rdev->sysfs_state);
2863 		if (sysfs_link_rdev(rdev->mddev, rdev))
2864 			/* failure here is OK */;
2865 		/* don't wakeup anyone, leave that to userspace. */
2866 	} else {
2867 		if (slot >= rdev->mddev->raid_disks &&
2868 		    slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
2869 			return -ENOSPC;
2870 		rdev->raid_disk = slot;
2871 		/* assume it is working */
2872 		clear_bit(Faulty, &rdev->flags);
2873 		clear_bit(WriteMostly, &rdev->flags);
2874 		set_bit(In_sync, &rdev->flags);
2875 		sysfs_notify_dirent_safe(rdev->sysfs_state);
2876 	}
2877 	return len;
2878 }
2879 
2880 static struct rdev_sysfs_entry rdev_slot =
2881 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
2882 
2883 static ssize_t
2884 offset_show(struct md_rdev *rdev, char *page)
2885 {
2886 	return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
2887 }
2888 
2889 static ssize_t
2890 offset_store(struct md_rdev *rdev, const char *buf, size_t len)
2891 {
2892 	unsigned long long offset;
2893 	if (kstrtoull(buf, 10, &offset) < 0)
2894 		return -EINVAL;
2895 	if (rdev->mddev->pers && rdev->raid_disk >= 0)
2896 		return -EBUSY;
2897 	if (rdev->sectors && rdev->mddev->external)
2898 		/* Must set offset before size, so overlap checks
2899 		 * can be sane */
2900 		return -EBUSY;
2901 	rdev->data_offset = offset;
2902 	rdev->new_data_offset = offset;
2903 	return len;
2904 }
2905 
2906 static struct rdev_sysfs_entry rdev_offset =
2907 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
2908 
2909 static ssize_t new_offset_show(struct md_rdev *rdev, char *page)
2910 {
2911 	return sprintf(page, "%llu\n",
2912 		       (unsigned long long)rdev->new_data_offset);
2913 }
2914 
2915 static ssize_t new_offset_store(struct md_rdev *rdev,
2916 				const char *buf, size_t len)
2917 {
2918 	unsigned long long new_offset;
2919 	struct mddev *mddev = rdev->mddev;
2920 
2921 	if (kstrtoull(buf, 10, &new_offset) < 0)
2922 		return -EINVAL;
2923 
2924 	if (mddev->sync_thread ||
2925 	    test_bit(MD_RECOVERY_RUNNING,&mddev->recovery))
2926 		return -EBUSY;
2927 	if (new_offset == rdev->data_offset)
2928 		/* reset is always permitted */
2929 		;
2930 	else if (new_offset > rdev->data_offset) {
2931 		/* must not push array size beyond rdev_sectors */
2932 		if (new_offset - rdev->data_offset
2933 		    + mddev->dev_sectors > rdev->sectors)
2934 				return -E2BIG;
2935 	}
2936 	/* Metadata worries about other space details. */
2937 
2938 	/* decreasing the offset is inconsistent with a backwards
2939 	 * reshape.
2940 	 */
2941 	if (new_offset < rdev->data_offset &&
2942 	    mddev->reshape_backwards)
2943 		return -EINVAL;
2944 	/* Increasing offset is inconsistent with forwards
2945 	 * reshape.  reshape_direction should be set to
2946 	 * 'backwards' first.
2947 	 */
2948 	if (new_offset > rdev->data_offset &&
2949 	    !mddev->reshape_backwards)
2950 		return -EINVAL;
2951 
2952 	if (mddev->pers && mddev->persistent &&
2953 	    !super_types[mddev->major_version]
2954 	    .allow_new_offset(rdev, new_offset))
2955 		return -E2BIG;
2956 	rdev->new_data_offset = new_offset;
2957 	if (new_offset > rdev->data_offset)
2958 		mddev->reshape_backwards = 1;
2959 	else if (new_offset < rdev->data_offset)
2960 		mddev->reshape_backwards = 0;
2961 
2962 	return len;
2963 }
2964 static struct rdev_sysfs_entry rdev_new_offset =
2965 __ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store);
2966 
2967 static ssize_t
2968 rdev_size_show(struct md_rdev *rdev, char *page)
2969 {
2970 	return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
2971 }
2972 
2973 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
2974 {
2975 	/* check if two start/length pairs overlap */
2976 	if (s1+l1 <= s2)
2977 		return 0;
2978 	if (s2+l2 <= s1)
2979 		return 0;
2980 	return 1;
2981 }
2982 
2983 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
2984 {
2985 	unsigned long long blocks;
2986 	sector_t new;
2987 
2988 	if (kstrtoull(buf, 10, &blocks) < 0)
2989 		return -EINVAL;
2990 
2991 	if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
2992 		return -EINVAL; /* sector conversion overflow */
2993 
2994 	new = blocks * 2;
2995 	if (new != blocks * 2)
2996 		return -EINVAL; /* unsigned long long to sector_t overflow */
2997 
2998 	*sectors = new;
2999 	return 0;
3000 }
3001 
3002 static ssize_t
3003 rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
3004 {
3005 	struct mddev *my_mddev = rdev->mddev;
3006 	sector_t oldsectors = rdev->sectors;
3007 	sector_t sectors;
3008 
3009 	if (test_bit(Journal, &rdev->flags))
3010 		return -EBUSY;
3011 	if (strict_blocks_to_sectors(buf, &sectors) < 0)
3012 		return -EINVAL;
3013 	if (rdev->data_offset != rdev->new_data_offset)
3014 		return -EINVAL; /* too confusing */
3015 	if (my_mddev->pers && rdev->raid_disk >= 0) {
3016 		if (my_mddev->persistent) {
3017 			sectors = super_types[my_mddev->major_version].
3018 				rdev_size_change(rdev, sectors);
3019 			if (!sectors)
3020 				return -EBUSY;
3021 		} else if (!sectors)
3022 			sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
3023 				rdev->data_offset;
3024 		if (!my_mddev->pers->resize)
3025 			/* Cannot change size for RAID0 or Linear etc */
3026 			return -EINVAL;
3027 	}
3028 	if (sectors < my_mddev->dev_sectors)
3029 		return -EINVAL; /* component must fit device */
3030 
3031 	rdev->sectors = sectors;
3032 	if (sectors > oldsectors && my_mddev->external) {
3033 		/* Need to check that all other rdevs with the same
3034 		 * ->bdev do not overlap.  'rcu' is sufficient to walk
3035 		 * the rdev lists safely.
3036 		 * This check does not provide a hard guarantee, it
3037 		 * just helps avoid dangerous mistakes.
3038 		 */
3039 		struct mddev *mddev;
3040 		int overlap = 0;
3041 		struct list_head *tmp;
3042 
3043 		rcu_read_lock();
3044 		for_each_mddev(mddev, tmp) {
3045 			struct md_rdev *rdev2;
3046 
3047 			rdev_for_each(rdev2, mddev)
3048 				if (rdev->bdev == rdev2->bdev &&
3049 				    rdev != rdev2 &&
3050 				    overlaps(rdev->data_offset, rdev->sectors,
3051 					     rdev2->data_offset,
3052 					     rdev2->sectors)) {
3053 					overlap = 1;
3054 					break;
3055 				}
3056 			if (overlap) {
3057 				mddev_put(mddev);
3058 				break;
3059 			}
3060 		}
3061 		rcu_read_unlock();
3062 		if (overlap) {
3063 			/* Someone else could have slipped in a size
3064 			 * change here, but doing so is just silly.
3065 			 * We put oldsectors back because we *know* it is
3066 			 * safe, and trust userspace not to race with
3067 			 * itself
3068 			 */
3069 			rdev->sectors = oldsectors;
3070 			return -EBUSY;
3071 		}
3072 	}
3073 	return len;
3074 }
3075 
3076 static struct rdev_sysfs_entry rdev_size =
3077 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
3078 
3079 static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
3080 {
3081 	unsigned long long recovery_start = rdev->recovery_offset;
3082 
3083 	if (test_bit(In_sync, &rdev->flags) ||
3084 	    recovery_start == MaxSector)
3085 		return sprintf(page, "none\n");
3086 
3087 	return sprintf(page, "%llu\n", recovery_start);
3088 }
3089 
3090 static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len)
3091 {
3092 	unsigned long long recovery_start;
3093 
3094 	if (cmd_match(buf, "none"))
3095 		recovery_start = MaxSector;
3096 	else if (kstrtoull(buf, 10, &recovery_start))
3097 		return -EINVAL;
3098 
3099 	if (rdev->mddev->pers &&
3100 	    rdev->raid_disk >= 0)
3101 		return -EBUSY;
3102 
3103 	rdev->recovery_offset = recovery_start;
3104 	if (recovery_start == MaxSector)
3105 		set_bit(In_sync, &rdev->flags);
3106 	else
3107 		clear_bit(In_sync, &rdev->flags);
3108 	return len;
3109 }
3110 
3111 static struct rdev_sysfs_entry rdev_recovery_start =
3112 __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
3113 
3114 /* sysfs access to bad-blocks list.
3115  * We present two files.
3116  * 'bad-blocks' lists sector numbers and lengths of ranges that
3117  *    are recorded as bad.  The list is truncated to fit within
3118  *    the one-page limit of sysfs.
3119  *    Writing "sector length" to this file adds an acknowledged
3120  *    bad block list.
3121  * 'unacknowledged-bad-blocks' lists bad blocks that have not yet
3122  *    been acknowledged.  Writing to this file adds bad blocks
3123  *    without acknowledging them.  This is largely for testing.
3124  */
3125 static ssize_t bb_show(struct md_rdev *rdev, char *page)
3126 {
3127 	return badblocks_show(&rdev->badblocks, page, 0);
3128 }
3129 static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
3130 {
3131 	int rv = badblocks_store(&rdev->badblocks, page, len, 0);
3132 	/* Maybe that ack was all we needed */
3133 	if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
3134 		wake_up(&rdev->blocked_wait);
3135 	return rv;
3136 }
3137 static struct rdev_sysfs_entry rdev_bad_blocks =
3138 __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
3139 
3140 static ssize_t ubb_show(struct md_rdev *rdev, char *page)
3141 {
3142 	return badblocks_show(&rdev->badblocks, page, 1);
3143 }
3144 static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
3145 {
3146 	return badblocks_store(&rdev->badblocks, page, len, 1);
3147 }
3148 static struct rdev_sysfs_entry rdev_unack_bad_blocks =
3149 __ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);
3150 
3151 static struct attribute *rdev_default_attrs[] = {
3152 	&rdev_state.attr,
3153 	&rdev_errors.attr,
3154 	&rdev_slot.attr,
3155 	&rdev_offset.attr,
3156 	&rdev_new_offset.attr,
3157 	&rdev_size.attr,
3158 	&rdev_recovery_start.attr,
3159 	&rdev_bad_blocks.attr,
3160 	&rdev_unack_bad_blocks.attr,
3161 	NULL,
3162 };
3163 static ssize_t
3164 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
3165 {
3166 	struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
3167 	struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3168 
3169 	if (!entry->show)
3170 		return -EIO;
3171 	if (!rdev->mddev)
3172 		return -EBUSY;
3173 	return entry->show(rdev, page);
3174 }
3175 
3176 static ssize_t
3177 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
3178 	      const char *page, size_t length)
3179 {
3180 	struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
3181 	struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3182 	ssize_t rv;
3183 	struct mddev *mddev = rdev->mddev;
3184 
3185 	if (!entry->store)
3186 		return -EIO;
3187 	if (!capable(CAP_SYS_ADMIN))
3188 		return -EACCES;
3189 	rv = mddev ? mddev_lock(mddev): -EBUSY;
3190 	if (!rv) {
3191 		if (rdev->mddev == NULL)
3192 			rv = -EBUSY;
3193 		else
3194 			rv = entry->store(rdev, page, length);
3195 		mddev_unlock(mddev);
3196 	}
3197 	return rv;
3198 }
3199 
3200 static void rdev_free(struct kobject *ko)
3201 {
3202 	struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj);
3203 	kfree(rdev);
3204 }
3205 static const struct sysfs_ops rdev_sysfs_ops = {
3206 	.show		= rdev_attr_show,
3207 	.store		= rdev_attr_store,
3208 };
3209 static struct kobj_type rdev_ktype = {
3210 	.release	= rdev_free,
3211 	.sysfs_ops	= &rdev_sysfs_ops,
3212 	.default_attrs	= rdev_default_attrs,
3213 };
3214 
3215 int md_rdev_init(struct md_rdev *rdev)
3216 {
3217 	rdev->desc_nr = -1;
3218 	rdev->saved_raid_disk = -1;
3219 	rdev->raid_disk = -1;
3220 	rdev->flags = 0;
3221 	rdev->data_offset = 0;
3222 	rdev->new_data_offset = 0;
3223 	rdev->sb_events = 0;
3224 	rdev->last_read_error = 0;
3225 	rdev->sb_loaded = 0;
3226 	rdev->bb_page = NULL;
3227 	atomic_set(&rdev->nr_pending, 0);
3228 	atomic_set(&rdev->read_errors, 0);
3229 	atomic_set(&rdev->corrected_errors, 0);
3230 
3231 	INIT_LIST_HEAD(&rdev->same_set);
3232 	init_waitqueue_head(&rdev->blocked_wait);
3233 
3234 	/* Add space to store bad block list.
3235 	 * This reserves the space even on arrays where it cannot
3236 	 * be used - I wonder if that matters
3237 	 */
3238 	return badblocks_init(&rdev->badblocks, 0);
3239 }
3240 EXPORT_SYMBOL_GPL(md_rdev_init);
3241 /*
3242  * Import a device. If 'super_format' >= 0, then sanity check the superblock
3243  *
3244  * mark the device faulty if:
3245  *
3246  *   - the device is nonexistent (zero size)
3247  *   - the device has no valid superblock
3248  *
3249  * a faulty rdev _never_ has rdev->sb set.
3250  */
3251 static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
3252 {
3253 	char b[BDEVNAME_SIZE];
3254 	int err;
3255 	struct md_rdev *rdev;
3256 	sector_t size;
3257 
3258 	rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
3259 	if (!rdev)
3260 		return ERR_PTR(-ENOMEM);
3261 
3262 	err = md_rdev_init(rdev);
3263 	if (err)
3264 		goto abort_free;
3265 	err = alloc_disk_sb(rdev);
3266 	if (err)
3267 		goto abort_free;
3268 
3269 	err = lock_rdev(rdev, newdev, super_format == -2);
3270 	if (err)
3271 		goto abort_free;
3272 
3273 	kobject_init(&rdev->kobj, &rdev_ktype);
3274 
3275 	size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
3276 	if (!size) {
3277 		pr_warn("md: %s has zero or unknown size, marking faulty!\n",
3278 			bdevname(rdev->bdev,b));
3279 		err = -EINVAL;
3280 		goto abort_free;
3281 	}
3282 
3283 	if (super_format >= 0) {
3284 		err = super_types[super_format].
3285 			load_super(rdev, NULL, super_minor);
3286 		if (err == -EINVAL) {
3287 			pr_warn("md: %s does not have a valid v%d.%d superblock, not importing!\n",
3288 				bdevname(rdev->bdev,b),
3289 				super_format, super_minor);
3290 			goto abort_free;
3291 		}
3292 		if (err < 0) {
3293 			pr_warn("md: could not read %s's sb, not importing!\n",
3294 				bdevname(rdev->bdev,b));
3295 			goto abort_free;
3296 		}
3297 	}
3298 
3299 	return rdev;
3300 
3301 abort_free:
3302 	if (rdev->bdev)
3303 		unlock_rdev(rdev);
3304 	md_rdev_clear(rdev);
3305 	kfree(rdev);
3306 	return ERR_PTR(err);
3307 }
3308 
3309 /*
3310  * Check a full RAID array for plausibility
3311  */
3312 
3313 static void analyze_sbs(struct mddev *mddev)
3314 {
3315 	int i;
3316 	struct md_rdev *rdev, *freshest, *tmp;
3317 	char b[BDEVNAME_SIZE];
3318 
3319 	freshest = NULL;
3320 	rdev_for_each_safe(rdev, tmp, mddev)
3321 		switch (super_types[mddev->major_version].
3322 			load_super(rdev, freshest, mddev->minor_version)) {
3323 		case 1:
3324 			freshest = rdev;
3325 			break;
3326 		case 0:
3327 			break;
3328 		default:
3329 			pr_warn("md: fatal superblock inconsistency in %s -- removing from array\n",
3330 				bdevname(rdev->bdev,b));
3331 			md_kick_rdev_from_array(rdev);
3332 		}
3333 
3334 	super_types[mddev->major_version].
3335 		validate_super(mddev, freshest);
3336 
3337 	i = 0;
3338 	rdev_for_each_safe(rdev, tmp, mddev) {
3339 		if (mddev->max_disks &&
3340 		    (rdev->desc_nr >= mddev->max_disks ||
3341 		     i > mddev->max_disks)) {
3342 			pr_warn("md: %s: %s: only %d devices permitted\n",
3343 				mdname(mddev), bdevname(rdev->bdev, b),
3344 				mddev->max_disks);
3345 			md_kick_rdev_from_array(rdev);
3346 			continue;
3347 		}
3348 		if (rdev != freshest) {
3349 			if (super_types[mddev->major_version].
3350 			    validate_super(mddev, rdev)) {
3351 				pr_warn("md: kicking non-fresh %s from array!\n",
3352 					bdevname(rdev->bdev,b));
3353 				md_kick_rdev_from_array(rdev);
3354 				continue;
3355 			}
3356 		}
3357 		if (mddev->level == LEVEL_MULTIPATH) {
3358 			rdev->desc_nr = i++;
3359 			rdev->raid_disk = rdev->desc_nr;
3360 			set_bit(In_sync, &rdev->flags);
3361 		} else if (rdev->raid_disk >=
3362 			    (mddev->raid_disks - min(0, mddev->delta_disks)) &&
3363 			   !test_bit(Journal, &rdev->flags)) {
3364 			rdev->raid_disk = -1;
3365 			clear_bit(In_sync, &rdev->flags);
3366 		}
3367 	}
3368 }
3369 
3370 /* Read a fixed-point number.
3371  * Numbers in sysfs attributes should be in "standard" units where
3372  * possible, so time should be in seconds.
3373  * However we internally use a a much smaller unit such as
3374  * milliseconds or jiffies.
3375  * This function takes a decimal number with a possible fractional
3376  * component, and produces an integer which is the result of
3377  * multiplying that number by 10^'scale'.
3378  * all without any floating-point arithmetic.
3379  */
3380 int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
3381 {
3382 	unsigned long result = 0;
3383 	long decimals = -1;
3384 	while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
3385 		if (*cp == '.')
3386 			decimals = 0;
3387 		else if (decimals < scale) {
3388 			unsigned int value;
3389 			value = *cp - '0';
3390 			result = result * 10 + value;
3391 			if (decimals >= 0)
3392 				decimals++;
3393 		}
3394 		cp++;
3395 	}
3396 	if (*cp == '\n')
3397 		cp++;
3398 	if (*cp)
3399 		return -EINVAL;
3400 	if (decimals < 0)
3401 		decimals = 0;
3402 	while (decimals < scale) {
3403 		result *= 10;
3404 		decimals ++;
3405 	}
3406 	*res = result;
3407 	return 0;
3408 }
3409 
3410 static ssize_t
3411 safe_delay_show(struct mddev *mddev, char *page)
3412 {
3413 	int msec = (mddev->safemode_delay*1000)/HZ;
3414 	return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
3415 }
3416 static ssize_t
3417 safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
3418 {
3419 	unsigned long msec;
3420 
3421 	if (mddev_is_clustered(mddev)) {
3422 		pr_warn("md: Safemode is disabled for clustered mode\n");
3423 		return -EINVAL;
3424 	}
3425 
3426 	if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
3427 		return -EINVAL;
3428 	if (msec == 0)
3429 		mddev->safemode_delay = 0;
3430 	else {
3431 		unsigned long old_delay = mddev->safemode_delay;
3432 		unsigned long new_delay = (msec*HZ)/1000;
3433 
3434 		if (new_delay == 0)
3435 			new_delay = 1;
3436 		mddev->safemode_delay = new_delay;
3437 		if (new_delay < old_delay || old_delay == 0)
3438 			mod_timer(&mddev->safemode_timer, jiffies+1);
3439 	}
3440 	return len;
3441 }
3442 static struct md_sysfs_entry md_safe_delay =
3443 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
3444 
3445 static ssize_t
3446 level_show(struct mddev *mddev, char *page)
3447 {
3448 	struct md_personality *p;
3449 	int ret;
3450 	spin_lock(&mddev->lock);
3451 	p = mddev->pers;
3452 	if (p)
3453 		ret = sprintf(page, "%s\n", p->name);
3454 	else if (mddev->clevel[0])
3455 		ret = sprintf(page, "%s\n", mddev->clevel);
3456 	else if (mddev->level != LEVEL_NONE)
3457 		ret = sprintf(page, "%d\n", mddev->level);
3458 	else
3459 		ret = 0;
3460 	spin_unlock(&mddev->lock);
3461 	return ret;
3462 }
3463 
3464 static ssize_t
3465 level_store(struct mddev *mddev, const char *buf, size_t len)
3466 {
3467 	char clevel[16];
3468 	ssize_t rv;
3469 	size_t slen = len;
3470 	struct md_personality *pers, *oldpers;
3471 	long level;
3472 	void *priv, *oldpriv;
3473 	struct md_rdev *rdev;
3474 
3475 	if (slen == 0 || slen >= sizeof(clevel))
3476 		return -EINVAL;
3477 
3478 	rv = mddev_lock(mddev);
3479 	if (rv)
3480 		return rv;
3481 
3482 	if (mddev->pers == NULL) {
3483 		strncpy(mddev->clevel, buf, slen);
3484 		if (mddev->clevel[slen-1] == '\n')
3485 			slen--;
3486 		mddev->clevel[slen] = 0;
3487 		mddev->level = LEVEL_NONE;
3488 		rv = len;
3489 		goto out_unlock;
3490 	}
3491 	rv = -EROFS;
3492 	if (mddev->ro)
3493 		goto out_unlock;
3494 
3495 	/* request to change the personality.  Need to ensure:
3496 	 *  - array is not engaged in resync/recovery/reshape
3497 	 *  - old personality can be suspended
3498 	 *  - new personality will access other array.
3499 	 */
3500 
3501 	rv = -EBUSY;
3502 	if (mddev->sync_thread ||
3503 	    test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3504 	    mddev->reshape_position != MaxSector ||
3505 	    mddev->sysfs_active)
3506 		goto out_unlock;
3507 
3508 	rv = -EINVAL;
3509 	if (!mddev->pers->quiesce) {
3510 		pr_warn("md: %s: %s does not support online personality change\n",
3511 			mdname(mddev), mddev->pers->name);
3512 		goto out_unlock;
3513 	}
3514 
3515 	/* Now find the new personality */
3516 	strncpy(clevel, buf, slen);
3517 	if (clevel[slen-1] == '\n')
3518 		slen--;
3519 	clevel[slen] = 0;
3520 	if (kstrtol(clevel, 10, &level))
3521 		level = LEVEL_NONE;
3522 
3523 	if (request_module("md-%s", clevel) != 0)
3524 		request_module("md-level-%s", clevel);
3525 	spin_lock(&pers_lock);
3526 	pers = find_pers(level, clevel);
3527 	if (!pers || !try_module_get(pers->owner)) {
3528 		spin_unlock(&pers_lock);
3529 		pr_warn("md: personality %s not loaded\n", clevel);
3530 		rv = -EINVAL;
3531 		goto out_unlock;
3532 	}
3533 	spin_unlock(&pers_lock);
3534 
3535 	if (pers == mddev->pers) {
3536 		/* Nothing to do! */
3537 		module_put(pers->owner);
3538 		rv = len;
3539 		goto out_unlock;
3540 	}
3541 	if (!pers->takeover) {
3542 		module_put(pers->owner);
3543 		pr_warn("md: %s: %s does not support personality takeover\n",
3544 			mdname(mddev), clevel);
3545 		rv = -EINVAL;
3546 		goto out_unlock;
3547 	}
3548 
3549 	rdev_for_each(rdev, mddev)
3550 		rdev->new_raid_disk = rdev->raid_disk;
3551 
3552 	/* ->takeover must set new_* and/or delta_disks
3553 	 * if it succeeds, and may set them when it fails.
3554 	 */
3555 	priv = pers->takeover(mddev);
3556 	if (IS_ERR(priv)) {
3557 		mddev->new_level = mddev->level;
3558 		mddev->new_layout = mddev->layout;
3559 		mddev->new_chunk_sectors = mddev->chunk_sectors;
3560 		mddev->raid_disks -= mddev->delta_disks;
3561 		mddev->delta_disks = 0;
3562 		mddev->reshape_backwards = 0;
3563 		module_put(pers->owner);
3564 		pr_warn("md: %s: %s would not accept array\n",
3565 			mdname(mddev), clevel);
3566 		rv = PTR_ERR(priv);
3567 		goto out_unlock;
3568 	}
3569 
3570 	/* Looks like we have a winner */
3571 	mddev_suspend(mddev);
3572 	mddev_detach(mddev);
3573 
3574 	spin_lock(&mddev->lock);
3575 	oldpers = mddev->pers;
3576 	oldpriv = mddev->private;
3577 	mddev->pers = pers;
3578 	mddev->private = priv;
3579 	strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
3580 	mddev->level = mddev->new_level;
3581 	mddev->layout = mddev->new_layout;
3582 	mddev->chunk_sectors = mddev->new_chunk_sectors;
3583 	mddev->delta_disks = 0;
3584 	mddev->reshape_backwards = 0;
3585 	mddev->degraded = 0;
3586 	spin_unlock(&mddev->lock);
3587 
3588 	if (oldpers->sync_request == NULL &&
3589 	    mddev->external) {
3590 		/* We are converting from a no-redundancy array
3591 		 * to a redundancy array and metadata is managed
3592 		 * externally so we need to be sure that writes
3593 		 * won't block due to a need to transition
3594 		 *      clean->dirty
3595 		 * until external management is started.
3596 		 */
3597 		mddev->in_sync = 0;
3598 		mddev->safemode_delay = 0;
3599 		mddev->safemode = 0;
3600 	}
3601 
3602 	oldpers->free(mddev, oldpriv);
3603 
3604 	if (oldpers->sync_request == NULL &&
3605 	    pers->sync_request != NULL) {
3606 		/* need to add the md_redundancy_group */
3607 		if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
3608 			pr_warn("md: cannot register extra attributes for %s\n",
3609 				mdname(mddev));
3610 		mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
3611 	}
3612 	if (oldpers->sync_request != NULL &&
3613 	    pers->sync_request == NULL) {
3614 		/* need to remove the md_redundancy_group */
3615 		if (mddev->to_remove == NULL)
3616 			mddev->to_remove = &md_redundancy_group;
3617 	}
3618 
3619 	module_put(oldpers->owner);
3620 
3621 	rdev_for_each(rdev, mddev) {
3622 		if (rdev->raid_disk < 0)
3623 			continue;
3624 		if (rdev->new_raid_disk >= mddev->raid_disks)
3625 			rdev->new_raid_disk = -1;
3626 		if (rdev->new_raid_disk == rdev->raid_disk)
3627 			continue;
3628 		sysfs_unlink_rdev(mddev, rdev);
3629 	}
3630 	rdev_for_each(rdev, mddev) {
3631 		if (rdev->raid_disk < 0)
3632 			continue;
3633 		if (rdev->new_raid_disk == rdev->raid_disk)
3634 			continue;
3635 		rdev->raid_disk = rdev->new_raid_disk;
3636 		if (rdev->raid_disk < 0)
3637 			clear_bit(In_sync, &rdev->flags);
3638 		else {
3639 			if (sysfs_link_rdev(mddev, rdev))
3640 				pr_warn("md: cannot register rd%d for %s after level change\n",
3641 					rdev->raid_disk, mdname(mddev));
3642 		}
3643 	}
3644 
3645 	if (pers->sync_request == NULL) {
3646 		/* this is now an array without redundancy, so
3647 		 * it must always be in_sync
3648 		 */
3649 		mddev->in_sync = 1;
3650 		del_timer_sync(&mddev->safemode_timer);
3651 	}
3652 	blk_set_stacking_limits(&mddev->queue->limits);
3653 	pers->run(mddev);
3654 	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
3655 	mddev_resume(mddev);
3656 	if (!mddev->thread)
3657 		md_update_sb(mddev, 1);
3658 	sysfs_notify(&mddev->kobj, NULL, "level");
3659 	md_new_event(mddev);
3660 	rv = len;
3661 out_unlock:
3662 	mddev_unlock(mddev);
3663 	return rv;
3664 }
3665 
3666 static struct md_sysfs_entry md_level =
3667 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
3668 
3669 static ssize_t
3670 layout_show(struct mddev *mddev, char *page)
3671 {
3672 	/* just a number, not meaningful for all levels */
3673 	if (mddev->reshape_position != MaxSector &&
3674 	    mddev->layout != mddev->new_layout)
3675 		return sprintf(page, "%d (%d)\n",
3676 			       mddev->new_layout, mddev->layout);
3677 	return sprintf(page, "%d\n", mddev->layout);
3678 }
3679 
3680 static ssize_t
3681 layout_store(struct mddev *mddev, const char *buf, size_t len)
3682 {
3683 	unsigned int n;
3684 	int err;
3685 
3686 	err = kstrtouint(buf, 10, &n);
3687 	if (err < 0)
3688 		return err;
3689 	err = mddev_lock(mddev);
3690 	if (err)
3691 		return err;
3692 
3693 	if (mddev->pers) {
3694 		if (mddev->pers->check_reshape == NULL)
3695 			err = -EBUSY;
3696 		else if (mddev->ro)
3697 			err = -EROFS;
3698 		else {
3699 			mddev->new_layout = n;
3700 			err = mddev->pers->check_reshape(mddev);
3701 			if (err)
3702 				mddev->new_layout = mddev->layout;
3703 		}
3704 	} else {
3705 		mddev->new_layout = n;
3706 		if (mddev->reshape_position == MaxSector)
3707 			mddev->layout = n;
3708 	}
3709 	mddev_unlock(mddev);
3710 	return err ?: len;
3711 }
3712 static struct md_sysfs_entry md_layout =
3713 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
3714 
3715 static ssize_t
3716 raid_disks_show(struct mddev *mddev, char *page)
3717 {
3718 	if (mddev->raid_disks == 0)
3719 		return 0;
3720 	if (mddev->reshape_position != MaxSector &&
3721 	    mddev->delta_disks != 0)
3722 		return sprintf(page, "%d (%d)\n", mddev->raid_disks,
3723 			       mddev->raid_disks - mddev->delta_disks);
3724 	return sprintf(page, "%d\n", mddev->raid_disks);
3725 }
3726 
3727 static int update_raid_disks(struct mddev *mddev, int raid_disks);
3728 
3729 static ssize_t
3730 raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
3731 {
3732 	unsigned int n;
3733 	int err;
3734 
3735 	err = kstrtouint(buf, 10, &n);
3736 	if (err < 0)
3737 		return err;
3738 
3739 	err = mddev_lock(mddev);
3740 	if (err)
3741 		return err;
3742 	if (mddev->pers)
3743 		err = update_raid_disks(mddev, n);
3744 	else if (mddev->reshape_position != MaxSector) {
3745 		struct md_rdev *rdev;
3746 		int olddisks = mddev->raid_disks - mddev->delta_disks;
3747 
3748 		err = -EINVAL;
3749 		rdev_for_each(rdev, mddev) {
3750 			if (olddisks < n &&
3751 			    rdev->data_offset < rdev->new_data_offset)
3752 				goto out_unlock;
3753 			if (olddisks > n &&
3754 			    rdev->data_offset > rdev->new_data_offset)
3755 				goto out_unlock;
3756 		}
3757 		err = 0;
3758 		mddev->delta_disks = n - olddisks;
3759 		mddev->raid_disks = n;
3760 		mddev->reshape_backwards = (mddev->delta_disks < 0);
3761 	} else
3762 		mddev->raid_disks = n;
3763 out_unlock:
3764 	mddev_unlock(mddev);
3765 	return err ? err : len;
3766 }
3767 static struct md_sysfs_entry md_raid_disks =
3768 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
3769 
3770 static ssize_t
3771 chunk_size_show(struct mddev *mddev, char *page)
3772 {
3773 	if (mddev->reshape_position != MaxSector &&
3774 	    mddev->chunk_sectors != mddev->new_chunk_sectors)
3775 		return sprintf(page, "%d (%d)\n",
3776 			       mddev->new_chunk_sectors << 9,
3777 			       mddev->chunk_sectors << 9);
3778 	return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
3779 }
3780 
3781 static ssize_t
3782 chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
3783 {
3784 	unsigned long n;
3785 	int err;
3786 
3787 	err = kstrtoul(buf, 10, &n);
3788 	if (err < 0)
3789 		return err;
3790 
3791 	err = mddev_lock(mddev);
3792 	if (err)
3793 		return err;
3794 	if (mddev->pers) {
3795 		if (mddev->pers->check_reshape == NULL)
3796 			err = -EBUSY;
3797 		else if (mddev->ro)
3798 			err = -EROFS;
3799 		else {
3800 			mddev->new_chunk_sectors = n >> 9;
3801 			err = mddev->pers->check_reshape(mddev);
3802 			if (err)
3803 				mddev->new_chunk_sectors = mddev->chunk_sectors;
3804 		}
3805 	} else {
3806 		mddev->new_chunk_sectors = n >> 9;
3807 		if (mddev->reshape_position == MaxSector)
3808 			mddev->chunk_sectors = n >> 9;
3809 	}
3810 	mddev_unlock(mddev);
3811 	return err ?: len;
3812 }
3813 static struct md_sysfs_entry md_chunk_size =
3814 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
3815 
3816 static ssize_t
3817 resync_start_show(struct mddev *mddev, char *page)
3818 {
3819 	if (mddev->recovery_cp == MaxSector)
3820 		return sprintf(page, "none\n");
3821 	return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
3822 }
3823 
3824 static ssize_t
3825 resync_start_store(struct mddev *mddev, const char *buf, size_t len)
3826 {
3827 	unsigned long long n;
3828 	int err;
3829 
3830 	if (cmd_match(buf, "none"))
3831 		n = MaxSector;
3832 	else {
3833 		err = kstrtoull(buf, 10, &n);
3834 		if (err < 0)
3835 			return err;
3836 		if (n != (sector_t)n)
3837 			return -EINVAL;
3838 	}
3839 
3840 	err = mddev_lock(mddev);
3841 	if (err)
3842 		return err;
3843 	if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
3844 		err = -EBUSY;
3845 
3846 	if (!err) {
3847 		mddev->recovery_cp = n;
3848 		if (mddev->pers)
3849 			set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
3850 	}
3851 	mddev_unlock(mddev);
3852 	return err ?: len;
3853 }
3854 static struct md_sysfs_entry md_resync_start =
3855 __ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR,
3856 		resync_start_show, resync_start_store);
3857 
3858 /*
3859  * The array state can be:
3860  *
3861  * clear
3862  *     No devices, no size, no level
3863  *     Equivalent to STOP_ARRAY ioctl
3864  * inactive
3865  *     May have some settings, but array is not active
3866  *        all IO results in error
3867  *     When written, doesn't tear down array, but just stops it
3868  * suspended (not supported yet)
3869  *     All IO requests will block. The array can be reconfigured.
3870  *     Writing this, if accepted, will block until array is quiescent
3871  * readonly
3872  *     no resync can happen.  no superblocks get written.
3873  *     write requests fail
3874  * read-auto
3875  *     like readonly, but behaves like 'clean' on a write request.
3876  *
3877  * clean - no pending writes, but otherwise active.
3878  *     When written to inactive array, starts without resync
3879  *     If a write request arrives then
3880  *       if metadata is known, mark 'dirty' and switch to 'active'.
3881  *       if not known, block and switch to write-pending
3882  *     If written to an active array that has pending writes, then fails.
3883  * active
3884  *     fully active: IO and resync can be happening.
3885  *     When written to inactive array, starts with resync
3886  *
3887  * write-pending
3888  *     clean, but writes are blocked waiting for 'active' to be written.
3889  *
3890  * active-idle
3891  *     like active, but no writes have been seen for a while (100msec).
3892  *
3893  */
3894 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
3895 		   write_pending, active_idle, bad_word};
3896 static char *array_states[] = {
3897 	"clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
3898 	"write-pending", "active-idle", NULL };
3899 
3900 static int match_word(const char *word, char **list)
3901 {
3902 	int n;
3903 	for (n=0; list[n]; n++)
3904 		if (cmd_match(word, list[n]))
3905 			break;
3906 	return n;
3907 }
3908 
3909 static ssize_t
3910 array_state_show(struct mddev *mddev, char *page)
3911 {
3912 	enum array_state st = inactive;
3913 
3914 	if (mddev->pers)
3915 		switch(mddev->ro) {
3916 		case 1:
3917 			st = readonly;
3918 			break;
3919 		case 2:
3920 			st = read_auto;
3921 			break;
3922 		case 0:
3923 			if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
3924 				st = write_pending;
3925 			else if (mddev->in_sync)
3926 				st = clean;
3927 			else if (mddev->safemode)
3928 				st = active_idle;
3929 			else
3930 				st = active;
3931 		}
3932 	else {
3933 		if (list_empty(&mddev->disks) &&
3934 		    mddev->raid_disks == 0 &&
3935 		    mddev->dev_sectors == 0)
3936 			st = clear;
3937 		else
3938 			st = inactive;
3939 	}
3940 	return sprintf(page, "%s\n", array_states[st]);
3941 }
3942 
3943 static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
3944 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
3945 static int do_md_run(struct mddev *mddev);
3946 static int restart_array(struct mddev *mddev);
3947 
3948 static ssize_t
3949 array_state_store(struct mddev *mddev, const char *buf, size_t len)
3950 {
3951 	int err;
3952 	enum array_state st = match_word(buf, array_states);
3953 
3954 	if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) {
3955 		/* don't take reconfig_mutex when toggling between
3956 		 * clean and active
3957 		 */
3958 		spin_lock(&mddev->lock);
3959 		if (st == active) {
3960 			restart_array(mddev);
3961 			clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
3962 			md_wakeup_thread(mddev->thread);
3963 			wake_up(&mddev->sb_wait);
3964 			err = 0;
3965 		} else /* st == clean */ {
3966 			restart_array(mddev);
3967 			if (atomic_read(&mddev->writes_pending) == 0) {
3968 				if (mddev->in_sync == 0) {
3969 					mddev->in_sync = 1;
3970 					if (mddev->safemode == 1)
3971 						mddev->safemode = 0;
3972 					set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
3973 				}
3974 				err = 0;
3975 			} else
3976 				err = -EBUSY;
3977 		}
3978 		if (!err)
3979 			sysfs_notify_dirent_safe(mddev->sysfs_state);
3980 		spin_unlock(&mddev->lock);
3981 		return err ?: len;
3982 	}
3983 	err = mddev_lock(mddev);
3984 	if (err)
3985 		return err;
3986 	err = -EINVAL;
3987 	switch(st) {
3988 	case bad_word:
3989 		break;
3990 	case clear:
3991 		/* stopping an active array */
3992 		err = do_md_stop(mddev, 0, NULL);
3993 		break;
3994 	case inactive:
3995 		/* stopping an active array */
3996 		if (mddev->pers)
3997 			err = do_md_stop(mddev, 2, NULL);
3998 		else
3999 			err = 0; /* already inactive */
4000 		break;
4001 	case suspended:
4002 		break; /* not supported yet */
4003 	case readonly:
4004 		if (mddev->pers)
4005 			err = md_set_readonly(mddev, NULL);
4006 		else {
4007 			mddev->ro = 1;
4008 			set_disk_ro(mddev->gendisk, 1);
4009 			err = do_md_run(mddev);
4010 		}
4011 		break;
4012 	case read_auto:
4013 		if (mddev->pers) {
4014 			if (mddev->ro == 0)
4015 				err = md_set_readonly(mddev, NULL);
4016 			else if (mddev->ro == 1)
4017 				err = restart_array(mddev);
4018 			if (err == 0) {
4019 				mddev->ro = 2;
4020 				set_disk_ro(mddev->gendisk, 0);
4021 			}
4022 		} else {
4023 			mddev->ro = 2;
4024 			err = do_md_run(mddev);
4025 		}
4026 		break;
4027 	case clean:
4028 		if (mddev->pers) {
4029 			err = restart_array(mddev);
4030 			if (err)
4031 				break;
4032 			spin_lock(&mddev->lock);
4033 			if (atomic_read(&mddev->writes_pending) == 0) {
4034 				if (mddev->in_sync == 0) {
4035 					mddev->in_sync = 1;
4036 					if (mddev->safemode == 1)
4037 						mddev->safemode = 0;
4038 					set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
4039 				}
4040 				err = 0;
4041 			} else
4042 				err = -EBUSY;
4043 			spin_unlock(&mddev->lock);
4044 		} else
4045 			err = -EINVAL;
4046 		break;
4047 	case active:
4048 		if (mddev->pers) {
4049 			err = restart_array(mddev);
4050 			if (err)
4051 				break;
4052 			clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
4053 			wake_up(&mddev->sb_wait);
4054 			err = 0;
4055 		} else {
4056 			mddev->ro = 0;
4057 			set_disk_ro(mddev->gendisk, 0);
4058 			err = do_md_run(mddev);
4059 		}
4060 		break;
4061 	case write_pending:
4062 	case active_idle:
4063 		/* these cannot be set */
4064 		break;
4065 	}
4066 
4067 	if (!err) {
4068 		if (mddev->hold_active == UNTIL_IOCTL)
4069 			mddev->hold_active = 0;
4070 		sysfs_notify_dirent_safe(mddev->sysfs_state);
4071 	}
4072 	mddev_unlock(mddev);
4073 	return err ?: len;
4074 }
4075 static struct md_sysfs_entry md_array_state =
4076 __ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
4077 
4078 static ssize_t
4079 max_corrected_read_errors_show(struct mddev *mddev, char *page) {
4080 	return sprintf(page, "%d\n",
4081 		       atomic_read(&mddev->max_corr_read_errors));
4082 }
4083 
4084 static ssize_t
4085 max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len)
4086 {
4087 	unsigned int n;
4088 	int rv;
4089 
4090 	rv = kstrtouint(buf, 10, &n);
4091 	if (rv < 0)
4092 		return rv;
4093 	atomic_set(&mddev->max_corr_read_errors, n);
4094 	return len;
4095 }
4096 
4097 static struct md_sysfs_entry max_corr_read_errors =
4098 __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
4099 	max_corrected_read_errors_store);
4100 
4101 static ssize_t
4102 null_show(struct mddev *mddev, char *page)
4103 {
4104 	return -EINVAL;
4105 }
4106 
4107 static ssize_t
4108 new_dev_store(struct mddev *mddev, const char *buf, size_t len)
4109 {
4110 	/* buf must be %d:%d\n? giving major and minor numbers */
4111 	/* The new device is added to the array.
4112 	 * If the array has a persistent superblock, we read the
4113 	 * superblock to initialise info and check validity.
4114 	 * Otherwise, only checking done is that in bind_rdev_to_array,
4115 	 * which mainly checks size.
4116 	 */
4117 	char *e;
4118 	int major = simple_strtoul(buf, &e, 10);
4119 	int minor;
4120 	dev_t dev;
4121 	struct md_rdev *rdev;
4122 	int err;
4123 
4124 	if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
4125 		return -EINVAL;
4126 	minor = simple_strtoul(e+1, &e, 10);
4127 	if (*e && *e != '\n')
4128 		return -EINVAL;
4129 	dev = MKDEV(major, minor);
4130 	if (major != MAJOR(dev) ||
4131 	    minor != MINOR(dev))
4132 		return -EOVERFLOW;
4133 
4134 	flush_workqueue(md_misc_wq);
4135 
4136 	err = mddev_lock(mddev);
4137 	if (err)
4138 		return err;
4139 	if (mddev->persistent) {
4140 		rdev = md_import_device(dev, mddev->major_version,
4141 					mddev->minor_version);
4142 		if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
4143 			struct md_rdev *rdev0
4144 				= list_entry(mddev->disks.next,
4145 					     struct md_rdev, same_set);
4146 			err = super_types[mddev->major_version]
4147 				.load_super(rdev, rdev0, mddev->minor_version);
4148 			if (err < 0)
4149 				goto out;
4150 		}
4151 	} else if (mddev->external)
4152 		rdev = md_import_device(dev, -2, -1);
4153 	else
4154 		rdev = md_import_device(dev, -1, -1);
4155 
4156 	if (IS_ERR(rdev)) {
4157 		mddev_unlock(mddev);
4158 		return PTR_ERR(rdev);
4159 	}
4160 	err = bind_rdev_to_array(rdev, mddev);
4161  out:
4162 	if (err)
4163 		export_rdev(rdev);
4164 	mddev_unlock(mddev);
4165 	return err ? err : len;
4166 }
4167 
4168 static struct md_sysfs_entry md_new_device =
4169 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
4170 
4171 static ssize_t
4172 bitmap_store(struct mddev *mddev, const char *buf, size_t len)
4173 {
4174 	char *end;
4175 	unsigned long chunk, end_chunk;
4176 	int err;
4177 
4178 	err = mddev_lock(mddev);
4179 	if (err)
4180 		return err;
4181 	if (!mddev->bitmap)
4182 		goto out;
4183 	/* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
4184 	while (*buf) {
4185 		chunk = end_chunk = simple_strtoul(buf, &end, 0);
4186 		if (buf == end) break;
4187 		if (*end == '-') { /* range */
4188 			buf = end + 1;
4189 			end_chunk = simple_strtoul(buf, &end, 0);
4190 			if (buf == end) break;
4191 		}
4192 		if (*end && !isspace(*end)) break;
4193 		bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
4194 		buf = skip_spaces(end);
4195 	}
4196 	bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
4197 out:
4198 	mddev_unlock(mddev);
4199 	return len;
4200 }
4201 
4202 static struct md_sysfs_entry md_bitmap =
4203 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
4204 
4205 static ssize_t
4206 size_show(struct mddev *mddev, char *page)
4207 {
4208 	return sprintf(page, "%llu\n",
4209 		(unsigned long long)mddev->dev_sectors / 2);
4210 }
4211 
4212 static int update_size(struct mddev *mddev, sector_t num_sectors);
4213 
4214 static ssize_t
4215 size_store(struct mddev *mddev, const char *buf, size_t len)
4216 {
4217 	/* If array is inactive, we can reduce the component size, but
4218 	 * not increase it (except from 0).
4219 	 * If array is active, we can try an on-line resize
4220 	 */
4221 	sector_t sectors;
4222 	int err = strict_blocks_to_sectors(buf, &sectors);
4223 
4224 	if (err < 0)
4225 		return err;
4226 	err = mddev_lock(mddev);
4227 	if (err)
4228 		return err;
4229 	if (mddev->pers) {
4230 		err = update_size(mddev, sectors);
4231 		if (err == 0)
4232 			md_update_sb(mddev, 1);
4233 	} else {
4234 		if (mddev->dev_sectors == 0 ||
4235 		    mddev->dev_sectors > sectors)
4236 			mddev->dev_sectors = sectors;
4237 		else
4238 			err = -ENOSPC;
4239 	}
4240 	mddev_unlock(mddev);
4241 	return err ? err : len;
4242 }
4243 
4244 static struct md_sysfs_entry md_size =
4245 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
4246 
4247 /* Metadata version.
4248  * This is one of
4249  *   'none' for arrays with no metadata (good luck...)
4250  *   'external' for arrays with externally managed metadata,
4251  * or N.M for internally known formats
4252  */
4253 static ssize_t
4254 metadata_show(struct mddev *mddev, char *page)
4255 {
4256 	if (mddev->persistent)
4257 		return sprintf(page, "%d.%d\n",
4258 			       mddev->major_version, mddev->minor_version);
4259 	else if (mddev->external)
4260 		return sprintf(page, "external:%s\n", mddev->metadata_type);
4261 	else
4262 		return sprintf(page, "none\n");
4263 }
4264 
4265 static ssize_t
4266 metadata_store(struct mddev *mddev, const char *buf, size_t len)
4267 {
4268 	int major, minor;
4269 	char *e;
4270 	int err;
4271 	/* Changing the details of 'external' metadata is
4272 	 * always permitted.  Otherwise there must be
4273 	 * no devices attached to the array.
4274 	 */
4275 
4276 	err = mddev_lock(mddev);
4277 	if (err)
4278 		return err;
4279 	err = -EBUSY;
4280 	if (mddev->external && strncmp(buf, "external:", 9) == 0)
4281 		;
4282 	else if (!list_empty(&mddev->disks))
4283 		goto out_unlock;
4284 
4285 	err = 0;
4286 	if (cmd_match(buf, "none")) {
4287 		mddev->persistent = 0;
4288 		mddev->external = 0;
4289 		mddev->major_version = 0;
4290 		mddev->minor_version = 90;
4291 		goto out_unlock;
4292 	}
4293 	if (strncmp(buf, "external:", 9) == 0) {
4294 		size_t namelen = len-9;
4295 		if (namelen >= sizeof(mddev->metadata_type))
4296 			namelen = sizeof(mddev->metadata_type)-1;
4297 		strncpy(mddev->metadata_type, buf+9, namelen);
4298 		mddev->metadata_type[namelen] = 0;
4299 		if (namelen && mddev->metadata_type[namelen-1] == '\n')
4300 			mddev->metadata_type[--namelen] = 0;
4301 		mddev->persistent = 0;
4302 		mddev->external = 1;
4303 		mddev->major_version = 0;
4304 		mddev->minor_version = 90;
4305 		goto out_unlock;
4306 	}
4307 	major = simple_strtoul(buf, &e, 10);
4308 	err = -EINVAL;
4309 	if (e==buf || *e != '.')
4310 		goto out_unlock;
4311 	buf = e+1;
4312 	minor = simple_strtoul(buf, &e, 10);
4313 	if (e==buf || (*e && *e != '\n') )
4314 		goto out_unlock;
4315 	err = -ENOENT;
4316 	if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
4317 		goto out_unlock;
4318 	mddev->major_version = major;
4319 	mddev->minor_version = minor;
4320 	mddev->persistent = 1;
4321 	mddev->external = 0;
4322 	err = 0;
4323 out_unlock:
4324 	mddev_unlock(mddev);
4325 	return err ?: len;
4326 }
4327 
4328 static struct md_sysfs_entry md_metadata =
4329 __ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
4330 
4331 static ssize_t
4332 action_show(struct mddev *mddev, char *page)
4333 {
4334 	char *type = "idle";
4335 	unsigned long recovery = mddev->recovery;
4336 	if (test_bit(MD_RECOVERY_FROZEN, &recovery))
4337 		type = "frozen";
4338 	else if (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
4339 	    (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) {
4340 		if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
4341 			type = "reshape";
4342 		else if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
4343 			if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
4344 				type = "resync";
4345 			else if (test_bit(MD_RECOVERY_CHECK, &recovery))
4346 				type = "check";
4347 			else
4348 				type = "repair";
4349 		} else if (test_bit(MD_RECOVERY_RECOVER, &recovery))
4350 			type = "recover";
4351 		else if (mddev->reshape_position != MaxSector)
4352 			type = "reshape";
4353 	}
4354 	return sprintf(page, "%s\n", type);
4355 }
4356 
4357 static ssize_t
4358 action_store(struct mddev *mddev, const char *page, size_t len)
4359 {
4360 	if (!mddev->pers || !mddev->pers->sync_request)
4361 		return -EINVAL;
4362 
4363 
4364 	if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
4365 		if (cmd_match(page, "frozen"))
4366 			set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4367 		else
4368 			clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4369 		if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
4370 		    mddev_lock(mddev) == 0) {
4371 			flush_workqueue(md_misc_wq);
4372 			if (mddev->sync_thread) {
4373 				set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4374 				md_reap_sync_thread(mddev);
4375 			}
4376 			mddev_unlock(mddev);
4377 		}
4378 	} else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4379 		return -EBUSY;
4380 	else if (cmd_match(page, "resync"))
4381 		clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4382 	else if (cmd_match(page, "recover")) {
4383 		clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4384 		set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
4385 	} else if (cmd_match(page, "reshape")) {
4386 		int err;
4387 		if (mddev->pers->start_reshape == NULL)
4388 			return -EINVAL;
4389 		err = mddev_lock(mddev);
4390 		if (!err) {
4391 			if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4392 				err =  -EBUSY;
4393 			else {
4394 				clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4395 				err = mddev->pers->start_reshape(mddev);
4396 			}
4397 			mddev_unlock(mddev);
4398 		}
4399 		if (err)
4400 			return err;
4401 		sysfs_notify(&mddev->kobj, NULL, "degraded");
4402 	} else {
4403 		if (cmd_match(page, "check"))
4404 			set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4405 		else if (!cmd_match(page, "repair"))
4406 			return -EINVAL;
4407 		clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4408 		set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
4409 		set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4410 	}
4411 	if (mddev->ro == 2) {
4412 		/* A write to sync_action is enough to justify
4413 		 * canceling read-auto mode
4414 		 */
4415 		mddev->ro = 0;
4416 		md_wakeup_thread(mddev->sync_thread);
4417 	}
4418 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4419 	md_wakeup_thread(mddev->thread);
4420 	sysfs_notify_dirent_safe(mddev->sysfs_action);
4421 	return len;
4422 }
4423 
4424 static struct md_sysfs_entry md_scan_mode =
4425 __ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
4426 
4427 static ssize_t
4428 last_sync_action_show(struct mddev *mddev, char *page)
4429 {
4430 	return sprintf(page, "%s\n", mddev->last_sync_action);
4431 }
4432 
4433 static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action);
4434 
4435 static ssize_t
4436 mismatch_cnt_show(struct mddev *mddev, char *page)
4437 {
4438 	return sprintf(page, "%llu\n",
4439 		       (unsigned long long)
4440 		       atomic64_read(&mddev->resync_mismatches));
4441 }
4442 
4443 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
4444 
4445 static ssize_t
4446 sync_min_show(struct mddev *mddev, char *page)
4447 {
4448 	return sprintf(page, "%d (%s)\n", speed_min(mddev),
4449 		       mddev->sync_speed_min ? "local": "system");
4450 }
4451 
4452 static ssize_t
4453 sync_min_store(struct mddev *mddev, const char *buf, size_t len)
4454 {
4455 	unsigned int min;
4456 	int rv;
4457 
4458 	if (strncmp(buf, "system", 6)==0) {
4459 		min = 0;
4460 	} else {
4461 		rv = kstrtouint(buf, 10, &min);
4462 		if (rv < 0)
4463 			return rv;
4464 		if (min == 0)
4465 			return -EINVAL;
4466 	}
4467 	mddev->sync_speed_min = min;
4468 	return len;
4469 }
4470 
4471 static struct md_sysfs_entry md_sync_min =
4472 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
4473 
4474 static ssize_t
4475 sync_max_show(struct mddev *mddev, char *page)
4476 {
4477 	return sprintf(page, "%d (%s)\n", speed_max(mddev),
4478 		       mddev->sync_speed_max ? "local": "system");
4479 }
4480 
4481 static ssize_t
4482 sync_max_store(struct mddev *mddev, const char *buf, size_t len)
4483 {
4484 	unsigned int max;
4485 	int rv;
4486 
4487 	if (strncmp(buf, "system", 6)==0) {
4488 		max = 0;
4489 	} else {
4490 		rv = kstrtouint(buf, 10, &max);
4491 		if (rv < 0)
4492 			return rv;
4493 		if (max == 0)
4494 			return -EINVAL;
4495 	}
4496 	mddev->sync_speed_max = max;
4497 	return len;
4498 }
4499 
4500 static struct md_sysfs_entry md_sync_max =
4501 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
4502 
4503 static ssize_t
4504 degraded_show(struct mddev *mddev, char *page)
4505 {
4506 	return sprintf(page, "%d\n", mddev->degraded);
4507 }
4508 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
4509 
4510 static ssize_t
4511 sync_force_parallel_show(struct mddev *mddev, char *page)
4512 {
4513 	return sprintf(page, "%d\n", mddev->parallel_resync);
4514 }
4515 
4516 static ssize_t
4517 sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len)
4518 {
4519 	long n;
4520 
4521 	if (kstrtol(buf, 10, &n))
4522 		return -EINVAL;
4523 
4524 	if (n != 0 && n != 1)
4525 		return -EINVAL;
4526 
4527 	mddev->parallel_resync = n;
4528 
4529 	if (mddev->sync_thread)
4530 		wake_up(&resync_wait);
4531 
4532 	return len;
4533 }
4534 
4535 /* force parallel resync, even with shared block devices */
4536 static struct md_sysfs_entry md_sync_force_parallel =
4537 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
4538        sync_force_parallel_show, sync_force_parallel_store);
4539 
4540 static ssize_t
4541 sync_speed_show(struct mddev *mddev, char *page)
4542 {
4543 	unsigned long resync, dt, db;
4544 	if (mddev->curr_resync == 0)
4545 		return sprintf(page, "none\n");
4546 	resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
4547 	dt = (jiffies - mddev->resync_mark) / HZ;
4548 	if (!dt) dt++;
4549 	db = resync - mddev->resync_mark_cnt;
4550 	return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
4551 }
4552 
4553 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
4554 
4555 static ssize_t
4556 sync_completed_show(struct mddev *mddev, char *page)
4557 {
4558 	unsigned long long max_sectors, resync;
4559 
4560 	if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4561 		return sprintf(page, "none\n");
4562 
4563 	if (mddev->curr_resync == 1 ||
4564 	    mddev->curr_resync == 2)
4565 		return sprintf(page, "delayed\n");
4566 
4567 	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
4568 	    test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
4569 		max_sectors = mddev->resync_max_sectors;
4570 	else
4571 		max_sectors = mddev->dev_sectors;
4572 
4573 	resync = mddev->curr_resync_completed;
4574 	return sprintf(page, "%llu / %llu\n", resync, max_sectors);
4575 }
4576 
4577 static struct md_sysfs_entry md_sync_completed =
4578 	__ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL);
4579 
4580 static ssize_t
4581 min_sync_show(struct mddev *mddev, char *page)
4582 {
4583 	return sprintf(page, "%llu\n",
4584 		       (unsigned long long)mddev->resync_min);
4585 }
4586 static ssize_t
4587 min_sync_store(struct mddev *mddev, const char *buf, size_t len)
4588 {
4589 	unsigned long long min;
4590 	int err;
4591 
4592 	if (kstrtoull(buf, 10, &min))
4593 		return -EINVAL;
4594 
4595 	spin_lock(&mddev->lock);
4596 	err = -EINVAL;
4597 	if (min > mddev->resync_max)
4598 		goto out_unlock;
4599 
4600 	err = -EBUSY;
4601 	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4602 		goto out_unlock;
4603 
4604 	/* Round down to multiple of 4K for safety */
4605 	mddev->resync_min = round_down(min, 8);
4606 	err = 0;
4607 
4608 out_unlock:
4609 	spin_unlock(&mddev->lock);
4610 	return err ?: len;
4611 }
4612 
4613 static struct md_sysfs_entry md_min_sync =
4614 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
4615 
4616 static ssize_t
4617 max_sync_show(struct mddev *mddev, char *page)
4618 {
4619 	if (mddev->resync_max == MaxSector)
4620 		return sprintf(page, "max\n");
4621 	else
4622 		return sprintf(page, "%llu\n",
4623 			       (unsigned long long)mddev->resync_max);
4624 }
4625 static ssize_t
4626 max_sync_store(struct mddev *mddev, const char *buf, size_t len)
4627 {
4628 	int err;
4629 	spin_lock(&mddev->lock);
4630 	if (strncmp(buf, "max", 3) == 0)
4631 		mddev->resync_max = MaxSector;
4632 	else {
4633 		unsigned long long max;
4634 		int chunk;
4635 
4636 		err = -EINVAL;
4637 		if (kstrtoull(buf, 10, &max))
4638 			goto out_unlock;
4639 		if (max < mddev->resync_min)
4640 			goto out_unlock;
4641 
4642 		err = -EBUSY;
4643 		if (max < mddev->resync_max &&
4644 		    mddev->ro == 0 &&
4645 		    test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4646 			goto out_unlock;
4647 
4648 		/* Must be a multiple of chunk_size */
4649 		chunk = mddev->chunk_sectors;
4650 		if (chunk) {
4651 			sector_t temp = max;
4652 
4653 			err = -EINVAL;
4654 			if (sector_div(temp, chunk))
4655 				goto out_unlock;
4656 		}
4657 		mddev->resync_max = max;
4658 	}
4659 	wake_up(&mddev->recovery_wait);
4660 	err = 0;
4661 out_unlock:
4662 	spin_unlock(&mddev->lock);
4663 	return err ?: len;
4664 }
4665 
4666 static struct md_sysfs_entry md_max_sync =
4667 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
4668 
4669 static ssize_t
4670 suspend_lo_show(struct mddev *mddev, char *page)
4671 {
4672 	return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
4673 }
4674 
4675 static ssize_t
4676 suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
4677 {
4678 	unsigned long long old, new;
4679 	int err;
4680 
4681 	err = kstrtoull(buf, 10, &new);
4682 	if (err < 0)
4683 		return err;
4684 	if (new != (sector_t)new)
4685 		return -EINVAL;
4686 
4687 	err = mddev_lock(mddev);
4688 	if (err)
4689 		return err;
4690 	err = -EINVAL;
4691 	if (mddev->pers == NULL ||
4692 	    mddev->pers->quiesce == NULL)
4693 		goto unlock;
4694 	old = mddev->suspend_lo;
4695 	mddev->suspend_lo = new;
4696 	if (new >= old)
4697 		/* Shrinking suspended region */
4698 		mddev->pers->quiesce(mddev, 2);
4699 	else {
4700 		/* Expanding suspended region - need to wait */
4701 		mddev->pers->quiesce(mddev, 1);
4702 		mddev->pers->quiesce(mddev, 0);
4703 	}
4704 	err = 0;
4705 unlock:
4706 	mddev_unlock(mddev);
4707 	return err ?: len;
4708 }
4709 static struct md_sysfs_entry md_suspend_lo =
4710 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
4711 
4712 static ssize_t
4713 suspend_hi_show(struct mddev *mddev, char *page)
4714 {
4715 	return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
4716 }
4717 
4718 static ssize_t
4719 suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
4720 {
4721 	unsigned long long old, new;
4722 	int err;
4723 
4724 	err = kstrtoull(buf, 10, &new);
4725 	if (err < 0)
4726 		return err;
4727 	if (new != (sector_t)new)
4728 		return -EINVAL;
4729 
4730 	err = mddev_lock(mddev);
4731 	if (err)
4732 		return err;
4733 	err = -EINVAL;
4734 	if (mddev->pers == NULL ||
4735 	    mddev->pers->quiesce == NULL)
4736 		goto unlock;
4737 	old = mddev->suspend_hi;
4738 	mddev->suspend_hi = new;
4739 	if (new <= old)
4740 		/* Shrinking suspended region */
4741 		mddev->pers->quiesce(mddev, 2);
4742 	else {
4743 		/* Expanding suspended region - need to wait */
4744 		mddev->pers->quiesce(mddev, 1);
4745 		mddev->pers->quiesce(mddev, 0);
4746 	}
4747 	err = 0;
4748 unlock:
4749 	mddev_unlock(mddev);
4750 	return err ?: len;
4751 }
4752 static struct md_sysfs_entry md_suspend_hi =
4753 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
4754 
4755 static ssize_t
4756 reshape_position_show(struct mddev *mddev, char *page)
4757 {
4758 	if (mddev->reshape_position != MaxSector)
4759 		return sprintf(page, "%llu\n",
4760 			       (unsigned long long)mddev->reshape_position);
4761 	strcpy(page, "none\n");
4762 	return 5;
4763 }
4764 
4765 static ssize_t
4766 reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
4767 {
4768 	struct md_rdev *rdev;
4769 	unsigned long long new;
4770 	int err;
4771 
4772 	err = kstrtoull(buf, 10, &new);
4773 	if (err < 0)
4774 		return err;
4775 	if (new != (sector_t)new)
4776 		return -EINVAL;
4777 	err = mddev_lock(mddev);
4778 	if (err)
4779 		return err;
4780 	err = -EBUSY;
4781 	if (mddev->pers)
4782 		goto unlock;
4783 	mddev->reshape_position = new;
4784 	mddev->delta_disks = 0;
4785 	mddev->reshape_backwards = 0;
4786 	mddev->new_level = mddev->level;
4787 	mddev->new_layout = mddev->layout;
4788 	mddev->new_chunk_sectors = mddev->chunk_sectors;
4789 	rdev_for_each(rdev, mddev)
4790 		rdev->new_data_offset = rdev->data_offset;
4791 	err = 0;
4792 unlock:
4793 	mddev_unlock(mddev);
4794 	return err ?: len;
4795 }
4796 
4797 static struct md_sysfs_entry md_reshape_position =
4798 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
4799        reshape_position_store);
4800 
4801 static ssize_t
4802 reshape_direction_show(struct mddev *mddev, char *page)
4803 {
4804 	return sprintf(page, "%s\n",
4805 		       mddev->reshape_backwards ? "backwards" : "forwards");
4806 }
4807 
4808 static ssize_t
4809 reshape_direction_store(struct mddev *mddev, const char *buf, size_t len)
4810 {
4811 	int backwards = 0;
4812 	int err;
4813 
4814 	if (cmd_match(buf, "forwards"))
4815 		backwards = 0;
4816 	else if (cmd_match(buf, "backwards"))
4817 		backwards = 1;
4818 	else
4819 		return -EINVAL;
4820 	if (mddev->reshape_backwards == backwards)
4821 		return len;
4822 
4823 	err = mddev_lock(mddev);
4824 	if (err)
4825 		return err;
4826 	/* check if we are allowed to change */
4827 	if (mddev->delta_disks)
4828 		err = -EBUSY;
4829 	else if (mddev->persistent &&
4830 	    mddev->major_version == 0)
4831 		err =  -EINVAL;
4832 	else
4833 		mddev->reshape_backwards = backwards;
4834 	mddev_unlock(mddev);
4835 	return err ?: len;
4836 }
4837 
4838 static struct md_sysfs_entry md_reshape_direction =
4839 __ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show,
4840        reshape_direction_store);
4841 
4842 static ssize_t
4843 array_size_show(struct mddev *mddev, char *page)
4844 {
4845 	if (mddev->external_size)
4846 		return sprintf(page, "%llu\n",
4847 			       (unsigned long long)mddev->array_sectors/2);
4848 	else
4849 		return sprintf(page, "default\n");
4850 }
4851 
4852 static ssize_t
4853 array_size_store(struct mddev *mddev, const char *buf, size_t len)
4854 {
4855 	sector_t sectors;
4856 	int err;
4857 
4858 	err = mddev_lock(mddev);
4859 	if (err)
4860 		return err;
4861 
4862 	/* cluster raid doesn't support change array_sectors */
4863 	if (mddev_is_clustered(mddev))
4864 		return -EINVAL;
4865 
4866 	if (strncmp(buf, "default", 7) == 0) {
4867 		if (mddev->pers)
4868 			sectors = mddev->pers->size(mddev, 0, 0);
4869 		else
4870 			sectors = mddev->array_sectors;
4871 
4872 		mddev->external_size = 0;
4873 	} else {
4874 		if (strict_blocks_to_sectors(buf, &sectors) < 0)
4875 			err = -EINVAL;
4876 		else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
4877 			err = -E2BIG;
4878 		else
4879 			mddev->external_size = 1;
4880 	}
4881 
4882 	if (!err) {
4883 		mddev->array_sectors = sectors;
4884 		if (mddev->pers) {
4885 			set_capacity(mddev->gendisk, mddev->array_sectors);
4886 			revalidate_disk(mddev->gendisk);
4887 		}
4888 	}
4889 	mddev_unlock(mddev);
4890 	return err ?: len;
4891 }
4892 
4893 static struct md_sysfs_entry md_array_size =
4894 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
4895        array_size_store);
4896 
4897 static struct attribute *md_default_attrs[] = {
4898 	&md_level.attr,
4899 	&md_layout.attr,
4900 	&md_raid_disks.attr,
4901 	&md_chunk_size.attr,
4902 	&md_size.attr,
4903 	&md_resync_start.attr,
4904 	&md_metadata.attr,
4905 	&md_new_device.attr,
4906 	&md_safe_delay.attr,
4907 	&md_array_state.attr,
4908 	&md_reshape_position.attr,
4909 	&md_reshape_direction.attr,
4910 	&md_array_size.attr,
4911 	&max_corr_read_errors.attr,
4912 	NULL,
4913 };
4914 
4915 static struct attribute *md_redundancy_attrs[] = {
4916 	&md_scan_mode.attr,
4917 	&md_last_scan_mode.attr,
4918 	&md_mismatches.attr,
4919 	&md_sync_min.attr,
4920 	&md_sync_max.attr,
4921 	&md_sync_speed.attr,
4922 	&md_sync_force_parallel.attr,
4923 	&md_sync_completed.attr,
4924 	&md_min_sync.attr,
4925 	&md_max_sync.attr,
4926 	&md_suspend_lo.attr,
4927 	&md_suspend_hi.attr,
4928 	&md_bitmap.attr,
4929 	&md_degraded.attr,
4930 	NULL,
4931 };
4932 static struct attribute_group md_redundancy_group = {
4933 	.name = NULL,
4934 	.attrs = md_redundancy_attrs,
4935 };
4936 
4937 static ssize_t
4938 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
4939 {
4940 	struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
4941 	struct mddev *mddev = container_of(kobj, struct mddev, kobj);
4942 	ssize_t rv;
4943 
4944 	if (!entry->show)
4945 		return -EIO;
4946 	spin_lock(&all_mddevs_lock);
4947 	if (list_empty(&mddev->all_mddevs)) {
4948 		spin_unlock(&all_mddevs_lock);
4949 		return -EBUSY;
4950 	}
4951 	mddev_get(mddev);
4952 	spin_unlock(&all_mddevs_lock);
4953 
4954 	rv = entry->show(mddev, page);
4955 	mddev_put(mddev);
4956 	return rv;
4957 }
4958 
4959 static ssize_t
4960 md_attr_store(struct kobject *kobj, struct attribute *attr,
4961 	      const char *page, size_t length)
4962 {
4963 	struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
4964 	struct mddev *mddev = container_of(kobj, struct mddev, kobj);
4965 	ssize_t rv;
4966 
4967 	if (!entry->store)
4968 		return -EIO;
4969 	if (!capable(CAP_SYS_ADMIN))
4970 		return -EACCES;
4971 	spin_lock(&all_mddevs_lock);
4972 	if (list_empty(&mddev->all_mddevs)) {
4973 		spin_unlock(&all_mddevs_lock);
4974 		return -EBUSY;
4975 	}
4976 	mddev_get(mddev);
4977 	spin_unlock(&all_mddevs_lock);
4978 	rv = entry->store(mddev, page, length);
4979 	mddev_put(mddev);
4980 	return rv;
4981 }
4982 
4983 static void md_free(struct kobject *ko)
4984 {
4985 	struct mddev *mddev = container_of(ko, struct mddev, kobj);
4986 
4987 	if (mddev->sysfs_state)
4988 		sysfs_put(mddev->sysfs_state);
4989 
4990 	if (mddev->queue)
4991 		blk_cleanup_queue(mddev->queue);
4992 	if (mddev->gendisk) {
4993 		del_gendisk(mddev->gendisk);
4994 		put_disk(mddev->gendisk);
4995 	}
4996 
4997 	kfree(mddev);
4998 }
4999 
5000 static const struct sysfs_ops md_sysfs_ops = {
5001 	.show	= md_attr_show,
5002 	.store	= md_attr_store,
5003 };
5004 static struct kobj_type md_ktype = {
5005 	.release	= md_free,
5006 	.sysfs_ops	= &md_sysfs_ops,
5007 	.default_attrs	= md_default_attrs,
5008 };
5009 
5010 int mdp_major = 0;
5011 
5012 static void mddev_delayed_delete(struct work_struct *ws)
5013 {
5014 	struct mddev *mddev = container_of(ws, struct mddev, del_work);
5015 
5016 	sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
5017 	kobject_del(&mddev->kobj);
5018 	kobject_put(&mddev->kobj);
5019 }
5020 
5021 static int md_alloc(dev_t dev, char *name)
5022 {
5023 	static DEFINE_MUTEX(disks_mutex);
5024 	struct mddev *mddev = mddev_find(dev);
5025 	struct gendisk *disk;
5026 	int partitioned;
5027 	int shift;
5028 	int unit;
5029 	int error;
5030 
5031 	if (!mddev)
5032 		return -ENODEV;
5033 
5034 	partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
5035 	shift = partitioned ? MdpMinorShift : 0;
5036 	unit = MINOR(mddev->unit) >> shift;
5037 
5038 	/* wait for any previous instance of this device to be
5039 	 * completely removed (mddev_delayed_delete).
5040 	 */
5041 	flush_workqueue(md_misc_wq);
5042 
5043 	mutex_lock(&disks_mutex);
5044 	error = -EEXIST;
5045 	if (mddev->gendisk)
5046 		goto abort;
5047 
5048 	if (name) {
5049 		/* Need to ensure that 'name' is not a duplicate.
5050 		 */
5051 		struct mddev *mddev2;
5052 		spin_lock(&all_mddevs_lock);
5053 
5054 		list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
5055 			if (mddev2->gendisk &&
5056 			    strcmp(mddev2->gendisk->disk_name, name) == 0) {
5057 				spin_unlock(&all_mddevs_lock);
5058 				goto abort;
5059 			}
5060 		spin_unlock(&all_mddevs_lock);
5061 	}
5062 
5063 	error = -ENOMEM;
5064 	mddev->queue = blk_alloc_queue(GFP_KERNEL);
5065 	if (!mddev->queue)
5066 		goto abort;
5067 	mddev->queue->queuedata = mddev;
5068 
5069 	blk_queue_make_request(mddev->queue, md_make_request);
5070 	blk_set_stacking_limits(&mddev->queue->limits);
5071 
5072 	disk = alloc_disk(1 << shift);
5073 	if (!disk) {
5074 		blk_cleanup_queue(mddev->queue);
5075 		mddev->queue = NULL;
5076 		goto abort;
5077 	}
5078 	disk->major = MAJOR(mddev->unit);
5079 	disk->first_minor = unit << shift;
5080 	if (name)
5081 		strcpy(disk->disk_name, name);
5082 	else if (partitioned)
5083 		sprintf(disk->disk_name, "md_d%d", unit);
5084 	else
5085 		sprintf(disk->disk_name, "md%d", unit);
5086 	disk->fops = &md_fops;
5087 	disk->private_data = mddev;
5088 	disk->queue = mddev->queue;
5089 	blk_queue_write_cache(mddev->queue, true, true);
5090 	/* Allow extended partitions.  This makes the
5091 	 * 'mdp' device redundant, but we can't really
5092 	 * remove it now.
5093 	 */
5094 	disk->flags |= GENHD_FL_EXT_DEVT;
5095 	mddev->gendisk = disk;
5096 	/* As soon as we call add_disk(), another thread could get
5097 	 * through to md_open, so make sure it doesn't get too far
5098 	 */
5099 	mutex_lock(&mddev->open_mutex);
5100 	add_disk(disk);
5101 
5102 	error = kobject_init_and_add(&mddev->kobj, &md_ktype,
5103 				     &disk_to_dev(disk)->kobj, "%s", "md");
5104 	if (error) {
5105 		/* This isn't possible, but as kobject_init_and_add is marked
5106 		 * __must_check, we must do something with the result
5107 		 */
5108 		pr_debug("md: cannot register %s/md - name in use\n",
5109 			 disk->disk_name);
5110 		error = 0;
5111 	}
5112 	if (mddev->kobj.sd &&
5113 	    sysfs_create_group(&mddev->kobj, &md_bitmap_group))
5114 		pr_debug("pointless warning\n");
5115 	mutex_unlock(&mddev->open_mutex);
5116  abort:
5117 	mutex_unlock(&disks_mutex);
5118 	if (!error && mddev->kobj.sd) {
5119 		kobject_uevent(&mddev->kobj, KOBJ_ADD);
5120 		mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
5121 	}
5122 	mddev_put(mddev);
5123 	return error;
5124 }
5125 
5126 static struct kobject *md_probe(dev_t dev, int *part, void *data)
5127 {
5128 	md_alloc(dev, NULL);
5129 	return NULL;
5130 }
5131 
5132 static int add_named_array(const char *val, struct kernel_param *kp)
5133 {
5134 	/* val must be "md_*" where * is not all digits.
5135 	 * We allocate an array with a large free minor number, and
5136 	 * set the name to val.  val must not already be an active name.
5137 	 */
5138 	int len = strlen(val);
5139 	char buf[DISK_NAME_LEN];
5140 
5141 	while (len && val[len-1] == '\n')
5142 		len--;
5143 	if (len >= DISK_NAME_LEN)
5144 		return -E2BIG;
5145 	strlcpy(buf, val, len+1);
5146 	if (strncmp(buf, "md_", 3) != 0)
5147 		return -EINVAL;
5148 	return md_alloc(0, buf);
5149 }
5150 
5151 static void md_safemode_timeout(unsigned long data)
5152 {
5153 	struct mddev *mddev = (struct mddev *) data;
5154 
5155 	if (!atomic_read(&mddev->writes_pending)) {
5156 		mddev->safemode = 1;
5157 		if (mddev->external)
5158 			sysfs_notify_dirent_safe(mddev->sysfs_state);
5159 	}
5160 	md_wakeup_thread(mddev->thread);
5161 }
5162 
5163 static int start_dirty_degraded;
5164 
5165 int md_run(struct mddev *mddev)
5166 {
5167 	int err;
5168 	struct md_rdev *rdev;
5169 	struct md_personality *pers;
5170 
5171 	if (list_empty(&mddev->disks))
5172 		/* cannot run an array with no devices.. */
5173 		return -EINVAL;
5174 
5175 	if (mddev->pers)
5176 		return -EBUSY;
5177 	/* Cannot run until previous stop completes properly */
5178 	if (mddev->sysfs_active)
5179 		return -EBUSY;
5180 
5181 	/*
5182 	 * Analyze all RAID superblock(s)
5183 	 */
5184 	if (!mddev->raid_disks) {
5185 		if (!mddev->persistent)
5186 			return -EINVAL;
5187 		analyze_sbs(mddev);
5188 	}
5189 
5190 	if (mddev->level != LEVEL_NONE)
5191 		request_module("md-level-%d", mddev->level);
5192 	else if (mddev->clevel[0])
5193 		request_module("md-%s", mddev->clevel);
5194 
5195 	/*
5196 	 * Drop all container device buffers, from now on
5197 	 * the only valid external interface is through the md
5198 	 * device.
5199 	 */
5200 	rdev_for_each(rdev, mddev) {
5201 		if (test_bit(Faulty, &rdev->flags))
5202 			continue;
5203 		sync_blockdev(rdev->bdev);
5204 		invalidate_bdev(rdev->bdev);
5205 
5206 		/* perform some consistency tests on the device.
5207 		 * We don't want the data to overlap the metadata,
5208 		 * Internal Bitmap issues have been handled elsewhere.
5209 		 */
5210 		if (rdev->meta_bdev) {
5211 			/* Nothing to check */;
5212 		} else if (rdev->data_offset < rdev->sb_start) {
5213 			if (mddev->dev_sectors &&
5214 			    rdev->data_offset + mddev->dev_sectors
5215 			    > rdev->sb_start) {
5216 				pr_warn("md: %s: data overlaps metadata\n",
5217 					mdname(mddev));
5218 				return -EINVAL;
5219 			}
5220 		} else {
5221 			if (rdev->sb_start + rdev->sb_size/512
5222 			    > rdev->data_offset) {
5223 				pr_warn("md: %s: metadata overlaps data\n",
5224 					mdname(mddev));
5225 				return -EINVAL;
5226 			}
5227 		}
5228 		sysfs_notify_dirent_safe(rdev->sysfs_state);
5229 	}
5230 
5231 	if (mddev->bio_set == NULL)
5232 		mddev->bio_set = bioset_create(BIO_POOL_SIZE, 0);
5233 
5234 	spin_lock(&pers_lock);
5235 	pers = find_pers(mddev->level, mddev->clevel);
5236 	if (!pers || !try_module_get(pers->owner)) {
5237 		spin_unlock(&pers_lock);
5238 		if (mddev->level != LEVEL_NONE)
5239 			pr_warn("md: personality for level %d is not loaded!\n",
5240 				mddev->level);
5241 		else
5242 			pr_warn("md: personality for level %s is not loaded!\n",
5243 				mddev->clevel);
5244 		return -EINVAL;
5245 	}
5246 	spin_unlock(&pers_lock);
5247 	if (mddev->level != pers->level) {
5248 		mddev->level = pers->level;
5249 		mddev->new_level = pers->level;
5250 	}
5251 	strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
5252 
5253 	if (mddev->reshape_position != MaxSector &&
5254 	    pers->start_reshape == NULL) {
5255 		/* This personality cannot handle reshaping... */
5256 		module_put(pers->owner);
5257 		return -EINVAL;
5258 	}
5259 
5260 	if (pers->sync_request) {
5261 		/* Warn if this is a potentially silly
5262 		 * configuration.
5263 		 */
5264 		char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
5265 		struct md_rdev *rdev2;
5266 		int warned = 0;
5267 
5268 		rdev_for_each(rdev, mddev)
5269 			rdev_for_each(rdev2, mddev) {
5270 				if (rdev < rdev2 &&
5271 				    rdev->bdev->bd_contains ==
5272 				    rdev2->bdev->bd_contains) {
5273 					pr_warn("%s: WARNING: %s appears to be on the same physical disk as %s.\n",
5274 						mdname(mddev),
5275 						bdevname(rdev->bdev,b),
5276 						bdevname(rdev2->bdev,b2));
5277 					warned = 1;
5278 				}
5279 			}
5280 
5281 		if (warned)
5282 			pr_warn("True protection against single-disk failure might be compromised.\n");
5283 	}
5284 
5285 	mddev->recovery = 0;
5286 	/* may be over-ridden by personality */
5287 	mddev->resync_max_sectors = mddev->dev_sectors;
5288 
5289 	mddev->ok_start_degraded = start_dirty_degraded;
5290 
5291 	if (start_readonly && mddev->ro == 0)
5292 		mddev->ro = 2; /* read-only, but switch on first write */
5293 
5294 	err = pers->run(mddev);
5295 	if (err)
5296 		pr_warn("md: pers->run() failed ...\n");
5297 	else if (pers->size(mddev, 0, 0) < mddev->array_sectors) {
5298 		WARN_ONCE(!mddev->external_size,
5299 			  "%s: default size too small, but 'external_size' not in effect?\n",
5300 			  __func__);
5301 		pr_warn("md: invalid array_size %llu > default size %llu\n",
5302 			(unsigned long long)mddev->array_sectors / 2,
5303 			(unsigned long long)pers->size(mddev, 0, 0) / 2);
5304 		err = -EINVAL;
5305 	}
5306 	if (err == 0 && pers->sync_request &&
5307 	    (mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
5308 		struct bitmap *bitmap;
5309 
5310 		bitmap = bitmap_create(mddev, -1);
5311 		if (IS_ERR(bitmap)) {
5312 			err = PTR_ERR(bitmap);
5313 			pr_warn("%s: failed to create bitmap (%d)\n",
5314 				mdname(mddev), err);
5315 		} else
5316 			mddev->bitmap = bitmap;
5317 
5318 	}
5319 	if (err) {
5320 		mddev_detach(mddev);
5321 		if (mddev->private)
5322 			pers->free(mddev, mddev->private);
5323 		mddev->private = NULL;
5324 		module_put(pers->owner);
5325 		bitmap_destroy(mddev);
5326 		return err;
5327 	}
5328 	if (mddev->queue) {
5329 		bool nonrot = true;
5330 
5331 		rdev_for_each(rdev, mddev) {
5332 			if (rdev->raid_disk >= 0 &&
5333 			    !blk_queue_nonrot(bdev_get_queue(rdev->bdev))) {
5334 				nonrot = false;
5335 				break;
5336 			}
5337 		}
5338 		if (mddev->degraded)
5339 			nonrot = false;
5340 		if (nonrot)
5341 			queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mddev->queue);
5342 		else
5343 			queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, mddev->queue);
5344 		mddev->queue->backing_dev_info.congested_data = mddev;
5345 		mddev->queue->backing_dev_info.congested_fn = md_congested;
5346 	}
5347 	if (pers->sync_request) {
5348 		if (mddev->kobj.sd &&
5349 		    sysfs_create_group(&mddev->kobj, &md_redundancy_group))
5350 			pr_warn("md: cannot register extra attributes for %s\n",
5351 				mdname(mddev));
5352 		mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
5353 	} else if (mddev->ro == 2) /* auto-readonly not meaningful */
5354 		mddev->ro = 0;
5355 
5356 	atomic_set(&mddev->writes_pending,0);
5357 	atomic_set(&mddev->max_corr_read_errors,
5358 		   MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
5359 	mddev->safemode = 0;
5360 	if (mddev_is_clustered(mddev))
5361 		mddev->safemode_delay = 0;
5362 	else
5363 		mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
5364 	mddev->in_sync = 1;
5365 	smp_wmb();
5366 	spin_lock(&mddev->lock);
5367 	mddev->pers = pers;
5368 	spin_unlock(&mddev->lock);
5369 	rdev_for_each(rdev, mddev)
5370 		if (rdev->raid_disk >= 0)
5371 			if (sysfs_link_rdev(mddev, rdev))
5372 				/* failure here is OK */;
5373 
5374 	if (mddev->degraded && !mddev->ro)
5375 		/* This ensures that recovering status is reported immediately
5376 		 * via sysfs - until a lack of spares is confirmed.
5377 		 */
5378 		set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
5379 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5380 
5381 	if (mddev->sb_flags)
5382 		md_update_sb(mddev, 0);
5383 
5384 	md_new_event(mddev);
5385 	sysfs_notify_dirent_safe(mddev->sysfs_state);
5386 	sysfs_notify_dirent_safe(mddev->sysfs_action);
5387 	sysfs_notify(&mddev->kobj, NULL, "degraded");
5388 	return 0;
5389 }
5390 EXPORT_SYMBOL_GPL(md_run);
5391 
5392 static int do_md_run(struct mddev *mddev)
5393 {
5394 	int err;
5395 
5396 	err = md_run(mddev);
5397 	if (err)
5398 		goto out;
5399 	err = bitmap_load(mddev);
5400 	if (err) {
5401 		bitmap_destroy(mddev);
5402 		goto out;
5403 	}
5404 
5405 	if (mddev_is_clustered(mddev))
5406 		md_allow_write(mddev);
5407 
5408 	md_wakeup_thread(mddev->thread);
5409 	md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
5410 
5411 	set_capacity(mddev->gendisk, mddev->array_sectors);
5412 	revalidate_disk(mddev->gendisk);
5413 	mddev->changed = 1;
5414 	kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
5415 out:
5416 	return err;
5417 }
5418 
5419 static int restart_array(struct mddev *mddev)
5420 {
5421 	struct gendisk *disk = mddev->gendisk;
5422 
5423 	/* Complain if it has no devices */
5424 	if (list_empty(&mddev->disks))
5425 		return -ENXIO;
5426 	if (!mddev->pers)
5427 		return -EINVAL;
5428 	if (!mddev->ro)
5429 		return -EBUSY;
5430 	if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
5431 		struct md_rdev *rdev;
5432 		bool has_journal = false;
5433 
5434 		rcu_read_lock();
5435 		rdev_for_each_rcu(rdev, mddev) {
5436 			if (test_bit(Journal, &rdev->flags) &&
5437 			    !test_bit(Faulty, &rdev->flags)) {
5438 				has_journal = true;
5439 				break;
5440 			}
5441 		}
5442 		rcu_read_unlock();
5443 
5444 		/* Don't restart rw with journal missing/faulty */
5445 		if (!has_journal)
5446 			return -EINVAL;
5447 	}
5448 
5449 	mddev->safemode = 0;
5450 	mddev->ro = 0;
5451 	set_disk_ro(disk, 0);
5452 	pr_debug("md: %s switched to read-write mode.\n", mdname(mddev));
5453 	/* Kick recovery or resync if necessary */
5454 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5455 	md_wakeup_thread(mddev->thread);
5456 	md_wakeup_thread(mddev->sync_thread);
5457 	sysfs_notify_dirent_safe(mddev->sysfs_state);
5458 	return 0;
5459 }
5460 
5461 static void md_clean(struct mddev *mddev)
5462 {
5463 	mddev->array_sectors = 0;
5464 	mddev->external_size = 0;
5465 	mddev->dev_sectors = 0;
5466 	mddev->raid_disks = 0;
5467 	mddev->recovery_cp = 0;
5468 	mddev->resync_min = 0;
5469 	mddev->resync_max = MaxSector;
5470 	mddev->reshape_position = MaxSector;
5471 	mddev->external = 0;
5472 	mddev->persistent = 0;
5473 	mddev->level = LEVEL_NONE;
5474 	mddev->clevel[0] = 0;
5475 	mddev->flags = 0;
5476 	mddev->sb_flags = 0;
5477 	mddev->ro = 0;
5478 	mddev->metadata_type[0] = 0;
5479 	mddev->chunk_sectors = 0;
5480 	mddev->ctime = mddev->utime = 0;
5481 	mddev->layout = 0;
5482 	mddev->max_disks = 0;
5483 	mddev->events = 0;
5484 	mddev->can_decrease_events = 0;
5485 	mddev->delta_disks = 0;
5486 	mddev->reshape_backwards = 0;
5487 	mddev->new_level = LEVEL_NONE;
5488 	mddev->new_layout = 0;
5489 	mddev->new_chunk_sectors = 0;
5490 	mddev->curr_resync = 0;
5491 	atomic64_set(&mddev->resync_mismatches, 0);
5492 	mddev->suspend_lo = mddev->suspend_hi = 0;
5493 	mddev->sync_speed_min = mddev->sync_speed_max = 0;
5494 	mddev->recovery = 0;
5495 	mddev->in_sync = 0;
5496 	mddev->changed = 0;
5497 	mddev->degraded = 0;
5498 	mddev->safemode = 0;
5499 	mddev->private = NULL;
5500 	mddev->cluster_info = NULL;
5501 	mddev->bitmap_info.offset = 0;
5502 	mddev->bitmap_info.default_offset = 0;
5503 	mddev->bitmap_info.default_space = 0;
5504 	mddev->bitmap_info.chunksize = 0;
5505 	mddev->bitmap_info.daemon_sleep = 0;
5506 	mddev->bitmap_info.max_write_behind = 0;
5507 	mddev->bitmap_info.nodes = 0;
5508 }
5509 
5510 static void __md_stop_writes(struct mddev *mddev)
5511 {
5512 	set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5513 	flush_workqueue(md_misc_wq);
5514 	if (mddev->sync_thread) {
5515 		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5516 		md_reap_sync_thread(mddev);
5517 	}
5518 
5519 	del_timer_sync(&mddev->safemode_timer);
5520 
5521 	if (mddev->pers && mddev->pers->quiesce) {
5522 		mddev->pers->quiesce(mddev, 1);
5523 		mddev->pers->quiesce(mddev, 0);
5524 	}
5525 	bitmap_flush(mddev);
5526 
5527 	if (mddev->ro == 0 &&
5528 	    ((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
5529 	     mddev->sb_flags)) {
5530 		/* mark array as shutdown cleanly */
5531 		if (!mddev_is_clustered(mddev))
5532 			mddev->in_sync = 1;
5533 		md_update_sb(mddev, 1);
5534 	}
5535 }
5536 
5537 void md_stop_writes(struct mddev *mddev)
5538 {
5539 	mddev_lock_nointr(mddev);
5540 	__md_stop_writes(mddev);
5541 	mddev_unlock(mddev);
5542 }
5543 EXPORT_SYMBOL_GPL(md_stop_writes);
5544 
5545 static void mddev_detach(struct mddev *mddev)
5546 {
5547 	struct bitmap *bitmap = mddev->bitmap;
5548 	/* wait for behind writes to complete */
5549 	if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
5550 		pr_debug("md:%s: behind writes in progress - waiting to stop.\n",
5551 			 mdname(mddev));
5552 		/* need to kick something here to make sure I/O goes? */
5553 		wait_event(bitmap->behind_wait,
5554 			   atomic_read(&bitmap->behind_writes) == 0);
5555 	}
5556 	if (mddev->pers && mddev->pers->quiesce) {
5557 		mddev->pers->quiesce(mddev, 1);
5558 		mddev->pers->quiesce(mddev, 0);
5559 	}
5560 	md_unregister_thread(&mddev->thread);
5561 	if (mddev->queue)
5562 		blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
5563 }
5564 
5565 static void __md_stop(struct mddev *mddev)
5566 {
5567 	struct md_personality *pers = mddev->pers;
5568 	mddev_detach(mddev);
5569 	/* Ensure ->event_work is done */
5570 	flush_workqueue(md_misc_wq);
5571 	spin_lock(&mddev->lock);
5572 	mddev->pers = NULL;
5573 	spin_unlock(&mddev->lock);
5574 	pers->free(mddev, mddev->private);
5575 	mddev->private = NULL;
5576 	if (pers->sync_request && mddev->to_remove == NULL)
5577 		mddev->to_remove = &md_redundancy_group;
5578 	module_put(pers->owner);
5579 	clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5580 }
5581 
5582 void md_stop(struct mddev *mddev)
5583 {
5584 	/* stop the array and free an attached data structures.
5585 	 * This is called from dm-raid
5586 	 */
5587 	__md_stop(mddev);
5588 	bitmap_destroy(mddev);
5589 	if (mddev->bio_set)
5590 		bioset_free(mddev->bio_set);
5591 }
5592 
5593 EXPORT_SYMBOL_GPL(md_stop);
5594 
5595 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
5596 {
5597 	int err = 0;
5598 	int did_freeze = 0;
5599 
5600 	if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
5601 		did_freeze = 1;
5602 		set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5603 		md_wakeup_thread(mddev->thread);
5604 	}
5605 	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5606 		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5607 	if (mddev->sync_thread)
5608 		/* Thread might be blocked waiting for metadata update
5609 		 * which will now never happen */
5610 		wake_up_process(mddev->sync_thread->tsk);
5611 
5612 	if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
5613 		return -EBUSY;
5614 	mddev_unlock(mddev);
5615 	wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
5616 					  &mddev->recovery));
5617 	wait_event(mddev->sb_wait,
5618 		   !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
5619 	mddev_lock_nointr(mddev);
5620 
5621 	mutex_lock(&mddev->open_mutex);
5622 	if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
5623 	    mddev->sync_thread ||
5624 	    test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
5625 		pr_warn("md: %s still in use.\n",mdname(mddev));
5626 		if (did_freeze) {
5627 			clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5628 			set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5629 			md_wakeup_thread(mddev->thread);
5630 		}
5631 		err = -EBUSY;
5632 		goto out;
5633 	}
5634 	if (mddev->pers) {
5635 		__md_stop_writes(mddev);
5636 
5637 		err  = -ENXIO;
5638 		if (mddev->ro==1)
5639 			goto out;
5640 		mddev->ro = 1;
5641 		set_disk_ro(mddev->gendisk, 1);
5642 		clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5643 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5644 		md_wakeup_thread(mddev->thread);
5645 		sysfs_notify_dirent_safe(mddev->sysfs_state);
5646 		err = 0;
5647 	}
5648 out:
5649 	mutex_unlock(&mddev->open_mutex);
5650 	return err;
5651 }
5652 
5653 /* mode:
5654  *   0 - completely stop and dis-assemble array
5655  *   2 - stop but do not disassemble array
5656  */
5657 static int do_md_stop(struct mddev *mddev, int mode,
5658 		      struct block_device *bdev)
5659 {
5660 	struct gendisk *disk = mddev->gendisk;
5661 	struct md_rdev *rdev;
5662 	int did_freeze = 0;
5663 
5664 	if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
5665 		did_freeze = 1;
5666 		set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5667 		md_wakeup_thread(mddev->thread);
5668 	}
5669 	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5670 		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5671 	if (mddev->sync_thread)
5672 		/* Thread might be blocked waiting for metadata update
5673 		 * which will now never happen */
5674 		wake_up_process(mddev->sync_thread->tsk);
5675 
5676 	mddev_unlock(mddev);
5677 	wait_event(resync_wait, (mddev->sync_thread == NULL &&
5678 				 !test_bit(MD_RECOVERY_RUNNING,
5679 					   &mddev->recovery)));
5680 	mddev_lock_nointr(mddev);
5681 
5682 	mutex_lock(&mddev->open_mutex);
5683 	if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
5684 	    mddev->sysfs_active ||
5685 	    mddev->sync_thread ||
5686 	    test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
5687 		pr_warn("md: %s still in use.\n",mdname(mddev));
5688 		mutex_unlock(&mddev->open_mutex);
5689 		if (did_freeze) {
5690 			clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5691 			set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5692 			md_wakeup_thread(mddev->thread);
5693 		}
5694 		return -EBUSY;
5695 	}
5696 	if (mddev->pers) {
5697 		if (mddev->ro)
5698 			set_disk_ro(disk, 0);
5699 
5700 		__md_stop_writes(mddev);
5701 		__md_stop(mddev);
5702 		mddev->queue->backing_dev_info.congested_fn = NULL;
5703 
5704 		/* tell userspace to handle 'inactive' */
5705 		sysfs_notify_dirent_safe(mddev->sysfs_state);
5706 
5707 		rdev_for_each(rdev, mddev)
5708 			if (rdev->raid_disk >= 0)
5709 				sysfs_unlink_rdev(mddev, rdev);
5710 
5711 		set_capacity(disk, 0);
5712 		mutex_unlock(&mddev->open_mutex);
5713 		mddev->changed = 1;
5714 		revalidate_disk(disk);
5715 
5716 		if (mddev->ro)
5717 			mddev->ro = 0;
5718 	} else
5719 		mutex_unlock(&mddev->open_mutex);
5720 	/*
5721 	 * Free resources if final stop
5722 	 */
5723 	if (mode == 0) {
5724 		pr_info("md: %s stopped.\n", mdname(mddev));
5725 
5726 		bitmap_destroy(mddev);
5727 		if (mddev->bitmap_info.file) {
5728 			struct file *f = mddev->bitmap_info.file;
5729 			spin_lock(&mddev->lock);
5730 			mddev->bitmap_info.file = NULL;
5731 			spin_unlock(&mddev->lock);
5732 			fput(f);
5733 		}
5734 		mddev->bitmap_info.offset = 0;
5735 
5736 		export_array(mddev);
5737 
5738 		md_clean(mddev);
5739 		if (mddev->hold_active == UNTIL_STOP)
5740 			mddev->hold_active = 0;
5741 	}
5742 	md_new_event(mddev);
5743 	sysfs_notify_dirent_safe(mddev->sysfs_state);
5744 	return 0;
5745 }
5746 
5747 #ifndef MODULE
5748 static void autorun_array(struct mddev *mddev)
5749 {
5750 	struct md_rdev *rdev;
5751 	int err;
5752 
5753 	if (list_empty(&mddev->disks))
5754 		return;
5755 
5756 	pr_info("md: running: ");
5757 
5758 	rdev_for_each(rdev, mddev) {
5759 		char b[BDEVNAME_SIZE];
5760 		pr_cont("<%s>", bdevname(rdev->bdev,b));
5761 	}
5762 	pr_cont("\n");
5763 
5764 	err = do_md_run(mddev);
5765 	if (err) {
5766 		pr_warn("md: do_md_run() returned %d\n", err);
5767 		do_md_stop(mddev, 0, NULL);
5768 	}
5769 }
5770 
5771 /*
5772  * lets try to run arrays based on all disks that have arrived
5773  * until now. (those are in pending_raid_disks)
5774  *
5775  * the method: pick the first pending disk, collect all disks with
5776  * the same UUID, remove all from the pending list and put them into
5777  * the 'same_array' list. Then order this list based on superblock
5778  * update time (freshest comes first), kick out 'old' disks and
5779  * compare superblocks. If everything's fine then run it.
5780  *
5781  * If "unit" is allocated, then bump its reference count
5782  */
5783 static void autorun_devices(int part)
5784 {
5785 	struct md_rdev *rdev0, *rdev, *tmp;
5786 	struct mddev *mddev;
5787 	char b[BDEVNAME_SIZE];
5788 
5789 	pr_info("md: autorun ...\n");
5790 	while (!list_empty(&pending_raid_disks)) {
5791 		int unit;
5792 		dev_t dev;
5793 		LIST_HEAD(candidates);
5794 		rdev0 = list_entry(pending_raid_disks.next,
5795 					 struct md_rdev, same_set);
5796 
5797 		pr_debug("md: considering %s ...\n", bdevname(rdev0->bdev,b));
5798 		INIT_LIST_HEAD(&candidates);
5799 		rdev_for_each_list(rdev, tmp, &pending_raid_disks)
5800 			if (super_90_load(rdev, rdev0, 0) >= 0) {
5801 				pr_debug("md:  adding %s ...\n",
5802 					 bdevname(rdev->bdev,b));
5803 				list_move(&rdev->same_set, &candidates);
5804 			}
5805 		/*
5806 		 * now we have a set of devices, with all of them having
5807 		 * mostly sane superblocks. It's time to allocate the
5808 		 * mddev.
5809 		 */
5810 		if (part) {
5811 			dev = MKDEV(mdp_major,
5812 				    rdev0->preferred_minor << MdpMinorShift);
5813 			unit = MINOR(dev) >> MdpMinorShift;
5814 		} else {
5815 			dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
5816 			unit = MINOR(dev);
5817 		}
5818 		if (rdev0->preferred_minor != unit) {
5819 			pr_warn("md: unit number in %s is bad: %d\n",
5820 				bdevname(rdev0->bdev, b), rdev0->preferred_minor);
5821 			break;
5822 		}
5823 
5824 		md_probe(dev, NULL, NULL);
5825 		mddev = mddev_find(dev);
5826 		if (!mddev || !mddev->gendisk) {
5827 			if (mddev)
5828 				mddev_put(mddev);
5829 			break;
5830 		}
5831 		if (mddev_lock(mddev))
5832 			pr_warn("md: %s locked, cannot run\n", mdname(mddev));
5833 		else if (mddev->raid_disks || mddev->major_version
5834 			 || !list_empty(&mddev->disks)) {
5835 			pr_warn("md: %s already running, cannot run %s\n",
5836 				mdname(mddev), bdevname(rdev0->bdev,b));
5837 			mddev_unlock(mddev);
5838 		} else {
5839 			pr_debug("md: created %s\n", mdname(mddev));
5840 			mddev->persistent = 1;
5841 			rdev_for_each_list(rdev, tmp, &candidates) {
5842 				list_del_init(&rdev->same_set);
5843 				if (bind_rdev_to_array(rdev, mddev))
5844 					export_rdev(rdev);
5845 			}
5846 			autorun_array(mddev);
5847 			mddev_unlock(mddev);
5848 		}
5849 		/* on success, candidates will be empty, on error
5850 		 * it won't...
5851 		 */
5852 		rdev_for_each_list(rdev, tmp, &candidates) {
5853 			list_del_init(&rdev->same_set);
5854 			export_rdev(rdev);
5855 		}
5856 		mddev_put(mddev);
5857 	}
5858 	pr_info("md: ... autorun DONE.\n");
5859 }
5860 #endif /* !MODULE */
5861 
5862 static int get_version(void __user *arg)
5863 {
5864 	mdu_version_t ver;
5865 
5866 	ver.major = MD_MAJOR_VERSION;
5867 	ver.minor = MD_MINOR_VERSION;
5868 	ver.patchlevel = MD_PATCHLEVEL_VERSION;
5869 
5870 	if (copy_to_user(arg, &ver, sizeof(ver)))
5871 		return -EFAULT;
5872 
5873 	return 0;
5874 }
5875 
5876 static int get_array_info(struct mddev *mddev, void __user *arg)
5877 {
5878 	mdu_array_info_t info;
5879 	int nr,working,insync,failed,spare;
5880 	struct md_rdev *rdev;
5881 
5882 	nr = working = insync = failed = spare = 0;
5883 	rcu_read_lock();
5884 	rdev_for_each_rcu(rdev, mddev) {
5885 		nr++;
5886 		if (test_bit(Faulty, &rdev->flags))
5887 			failed++;
5888 		else {
5889 			working++;
5890 			if (test_bit(In_sync, &rdev->flags))
5891 				insync++;
5892 			else if (test_bit(Journal, &rdev->flags))
5893 				/* TODO: add journal count to md_u.h */
5894 				;
5895 			else
5896 				spare++;
5897 		}
5898 	}
5899 	rcu_read_unlock();
5900 
5901 	info.major_version = mddev->major_version;
5902 	info.minor_version = mddev->minor_version;
5903 	info.patch_version = MD_PATCHLEVEL_VERSION;
5904 	info.ctime         = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
5905 	info.level         = mddev->level;
5906 	info.size          = mddev->dev_sectors / 2;
5907 	if (info.size != mddev->dev_sectors / 2) /* overflow */
5908 		info.size = -1;
5909 	info.nr_disks      = nr;
5910 	info.raid_disks    = mddev->raid_disks;
5911 	info.md_minor      = mddev->md_minor;
5912 	info.not_persistent= !mddev->persistent;
5913 
5914 	info.utime         = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
5915 	info.state         = 0;
5916 	if (mddev->in_sync)
5917 		info.state = (1<<MD_SB_CLEAN);
5918 	if (mddev->bitmap && mddev->bitmap_info.offset)
5919 		info.state |= (1<<MD_SB_BITMAP_PRESENT);
5920 	if (mddev_is_clustered(mddev))
5921 		info.state |= (1<<MD_SB_CLUSTERED);
5922 	info.active_disks  = insync;
5923 	info.working_disks = working;
5924 	info.failed_disks  = failed;
5925 	info.spare_disks   = spare;
5926 
5927 	info.layout        = mddev->layout;
5928 	info.chunk_size    = mddev->chunk_sectors << 9;
5929 
5930 	if (copy_to_user(arg, &info, sizeof(info)))
5931 		return -EFAULT;
5932 
5933 	return 0;
5934 }
5935 
5936 static int get_bitmap_file(struct mddev *mddev, void __user * arg)
5937 {
5938 	mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
5939 	char *ptr;
5940 	int err;
5941 
5942 	file = kzalloc(sizeof(*file), GFP_NOIO);
5943 	if (!file)
5944 		return -ENOMEM;
5945 
5946 	err = 0;
5947 	spin_lock(&mddev->lock);
5948 	/* bitmap enabled */
5949 	if (mddev->bitmap_info.file) {
5950 		ptr = file_path(mddev->bitmap_info.file, file->pathname,
5951 				sizeof(file->pathname));
5952 		if (IS_ERR(ptr))
5953 			err = PTR_ERR(ptr);
5954 		else
5955 			memmove(file->pathname, ptr,
5956 				sizeof(file->pathname)-(ptr-file->pathname));
5957 	}
5958 	spin_unlock(&mddev->lock);
5959 
5960 	if (err == 0 &&
5961 	    copy_to_user(arg, file, sizeof(*file)))
5962 		err = -EFAULT;
5963 
5964 	kfree(file);
5965 	return err;
5966 }
5967 
5968 static int get_disk_info(struct mddev *mddev, void __user * arg)
5969 {
5970 	mdu_disk_info_t info;
5971 	struct md_rdev *rdev;
5972 
5973 	if (copy_from_user(&info, arg, sizeof(info)))
5974 		return -EFAULT;
5975 
5976 	rcu_read_lock();
5977 	rdev = md_find_rdev_nr_rcu(mddev, info.number);
5978 	if (rdev) {
5979 		info.major = MAJOR(rdev->bdev->bd_dev);
5980 		info.minor = MINOR(rdev->bdev->bd_dev);
5981 		info.raid_disk = rdev->raid_disk;
5982 		info.state = 0;
5983 		if (test_bit(Faulty, &rdev->flags))
5984 			info.state |= (1<<MD_DISK_FAULTY);
5985 		else if (test_bit(In_sync, &rdev->flags)) {
5986 			info.state |= (1<<MD_DISK_ACTIVE);
5987 			info.state |= (1<<MD_DISK_SYNC);
5988 		}
5989 		if (test_bit(Journal, &rdev->flags))
5990 			info.state |= (1<<MD_DISK_JOURNAL);
5991 		if (test_bit(WriteMostly, &rdev->flags))
5992 			info.state |= (1<<MD_DISK_WRITEMOSTLY);
5993 		if (test_bit(FailFast, &rdev->flags))
5994 			info.state |= (1<<MD_DISK_FAILFAST);
5995 	} else {
5996 		info.major = info.minor = 0;
5997 		info.raid_disk = -1;
5998 		info.state = (1<<MD_DISK_REMOVED);
5999 	}
6000 	rcu_read_unlock();
6001 
6002 	if (copy_to_user(arg, &info, sizeof(info)))
6003 		return -EFAULT;
6004 
6005 	return 0;
6006 }
6007 
6008 static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
6009 {
6010 	char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
6011 	struct md_rdev *rdev;
6012 	dev_t dev = MKDEV(info->major,info->minor);
6013 
6014 	if (mddev_is_clustered(mddev) &&
6015 		!(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) {
6016 		pr_warn("%s: Cannot add to clustered mddev.\n",
6017 			mdname(mddev));
6018 		return -EINVAL;
6019 	}
6020 
6021 	if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
6022 		return -EOVERFLOW;
6023 
6024 	if (!mddev->raid_disks) {
6025 		int err;
6026 		/* expecting a device which has a superblock */
6027 		rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
6028 		if (IS_ERR(rdev)) {
6029 			pr_warn("md: md_import_device returned %ld\n",
6030 				PTR_ERR(rdev));
6031 			return PTR_ERR(rdev);
6032 		}
6033 		if (!list_empty(&mddev->disks)) {
6034 			struct md_rdev *rdev0
6035 				= list_entry(mddev->disks.next,
6036 					     struct md_rdev, same_set);
6037 			err = super_types[mddev->major_version]
6038 				.load_super(rdev, rdev0, mddev->minor_version);
6039 			if (err < 0) {
6040 				pr_warn("md: %s has different UUID to %s\n",
6041 					bdevname(rdev->bdev,b),
6042 					bdevname(rdev0->bdev,b2));
6043 				export_rdev(rdev);
6044 				return -EINVAL;
6045 			}
6046 		}
6047 		err = bind_rdev_to_array(rdev, mddev);
6048 		if (err)
6049 			export_rdev(rdev);
6050 		return err;
6051 	}
6052 
6053 	/*
6054 	 * add_new_disk can be used once the array is assembled
6055 	 * to add "hot spares".  They must already have a superblock
6056 	 * written
6057 	 */
6058 	if (mddev->pers) {
6059 		int err;
6060 		if (!mddev->pers->hot_add_disk) {
6061 			pr_warn("%s: personality does not support diskops!\n",
6062 				mdname(mddev));
6063 			return -EINVAL;
6064 		}
6065 		if (mddev->persistent)
6066 			rdev = md_import_device(dev, mddev->major_version,
6067 						mddev->minor_version);
6068 		else
6069 			rdev = md_import_device(dev, -1, -1);
6070 		if (IS_ERR(rdev)) {
6071 			pr_warn("md: md_import_device returned %ld\n",
6072 				PTR_ERR(rdev));
6073 			return PTR_ERR(rdev);
6074 		}
6075 		/* set saved_raid_disk if appropriate */
6076 		if (!mddev->persistent) {
6077 			if (info->state & (1<<MD_DISK_SYNC)  &&
6078 			    info->raid_disk < mddev->raid_disks) {
6079 				rdev->raid_disk = info->raid_disk;
6080 				set_bit(In_sync, &rdev->flags);
6081 				clear_bit(Bitmap_sync, &rdev->flags);
6082 			} else
6083 				rdev->raid_disk = -1;
6084 			rdev->saved_raid_disk = rdev->raid_disk;
6085 		} else
6086 			super_types[mddev->major_version].
6087 				validate_super(mddev, rdev);
6088 		if ((info->state & (1<<MD_DISK_SYNC)) &&
6089 		     rdev->raid_disk != info->raid_disk) {
6090 			/* This was a hot-add request, but events doesn't
6091 			 * match, so reject it.
6092 			 */
6093 			export_rdev(rdev);
6094 			return -EINVAL;
6095 		}
6096 
6097 		clear_bit(In_sync, &rdev->flags); /* just to be sure */
6098 		if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6099 			set_bit(WriteMostly, &rdev->flags);
6100 		else
6101 			clear_bit(WriteMostly, &rdev->flags);
6102 		if (info->state & (1<<MD_DISK_FAILFAST))
6103 			set_bit(FailFast, &rdev->flags);
6104 		else
6105 			clear_bit(FailFast, &rdev->flags);
6106 
6107 		if (info->state & (1<<MD_DISK_JOURNAL)) {
6108 			struct md_rdev *rdev2;
6109 			bool has_journal = false;
6110 
6111 			/* make sure no existing journal disk */
6112 			rdev_for_each(rdev2, mddev) {
6113 				if (test_bit(Journal, &rdev2->flags)) {
6114 					has_journal = true;
6115 					break;
6116 				}
6117 			}
6118 			if (has_journal) {
6119 				export_rdev(rdev);
6120 				return -EBUSY;
6121 			}
6122 			set_bit(Journal, &rdev->flags);
6123 		}
6124 		/*
6125 		 * check whether the device shows up in other nodes
6126 		 */
6127 		if (mddev_is_clustered(mddev)) {
6128 			if (info->state & (1 << MD_DISK_CANDIDATE))
6129 				set_bit(Candidate, &rdev->flags);
6130 			else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) {
6131 				/* --add initiated by this node */
6132 				err = md_cluster_ops->add_new_disk(mddev, rdev);
6133 				if (err) {
6134 					export_rdev(rdev);
6135 					return err;
6136 				}
6137 			}
6138 		}
6139 
6140 		rdev->raid_disk = -1;
6141 		err = bind_rdev_to_array(rdev, mddev);
6142 
6143 		if (err)
6144 			export_rdev(rdev);
6145 
6146 		if (mddev_is_clustered(mddev)) {
6147 			if (info->state & (1 << MD_DISK_CANDIDATE)) {
6148 				if (!err) {
6149 					err = md_cluster_ops->new_disk_ack(mddev,
6150 						err == 0);
6151 					if (err)
6152 						md_kick_rdev_from_array(rdev);
6153 				}
6154 			} else {
6155 				if (err)
6156 					md_cluster_ops->add_new_disk_cancel(mddev);
6157 				else
6158 					err = add_bound_rdev(rdev);
6159 			}
6160 
6161 		} else if (!err)
6162 			err = add_bound_rdev(rdev);
6163 
6164 		return err;
6165 	}
6166 
6167 	/* otherwise, add_new_disk is only allowed
6168 	 * for major_version==0 superblocks
6169 	 */
6170 	if (mddev->major_version != 0) {
6171 		pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev));
6172 		return -EINVAL;
6173 	}
6174 
6175 	if (!(info->state & (1<<MD_DISK_FAULTY))) {
6176 		int err;
6177 		rdev = md_import_device(dev, -1, 0);
6178 		if (IS_ERR(rdev)) {
6179 			pr_warn("md: error, md_import_device() returned %ld\n",
6180 				PTR_ERR(rdev));
6181 			return PTR_ERR(rdev);
6182 		}
6183 		rdev->desc_nr = info->number;
6184 		if (info->raid_disk < mddev->raid_disks)
6185 			rdev->raid_disk = info->raid_disk;
6186 		else
6187 			rdev->raid_disk = -1;
6188 
6189 		if (rdev->raid_disk < mddev->raid_disks)
6190 			if (info->state & (1<<MD_DISK_SYNC))
6191 				set_bit(In_sync, &rdev->flags);
6192 
6193 		if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6194 			set_bit(WriteMostly, &rdev->flags);
6195 		if (info->state & (1<<MD_DISK_FAILFAST))
6196 			set_bit(FailFast, &rdev->flags);
6197 
6198 		if (!mddev->persistent) {
6199 			pr_debug("md: nonpersistent superblock ...\n");
6200 			rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
6201 		} else
6202 			rdev->sb_start = calc_dev_sboffset(rdev);
6203 		rdev->sectors = rdev->sb_start;
6204 
6205 		err = bind_rdev_to_array(rdev, mddev);
6206 		if (err) {
6207 			export_rdev(rdev);
6208 			return err;
6209 		}
6210 	}
6211 
6212 	return 0;
6213 }
6214 
6215 static int hot_remove_disk(struct mddev *mddev, dev_t dev)
6216 {
6217 	char b[BDEVNAME_SIZE];
6218 	struct md_rdev *rdev;
6219 
6220 	rdev = find_rdev(mddev, dev);
6221 	if (!rdev)
6222 		return -ENXIO;
6223 
6224 	if (rdev->raid_disk < 0)
6225 		goto kick_rdev;
6226 
6227 	clear_bit(Blocked, &rdev->flags);
6228 	remove_and_add_spares(mddev, rdev);
6229 
6230 	if (rdev->raid_disk >= 0)
6231 		goto busy;
6232 
6233 kick_rdev:
6234 	if (mddev_is_clustered(mddev))
6235 		md_cluster_ops->remove_disk(mddev, rdev);
6236 
6237 	md_kick_rdev_from_array(rdev);
6238 	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
6239 	if (mddev->thread)
6240 		md_wakeup_thread(mddev->thread);
6241 	else
6242 		md_update_sb(mddev, 1);
6243 	md_new_event(mddev);
6244 
6245 	return 0;
6246 busy:
6247 	pr_debug("md: cannot remove active disk %s from %s ...\n",
6248 		 bdevname(rdev->bdev,b), mdname(mddev));
6249 	return -EBUSY;
6250 }
6251 
6252 static int hot_add_disk(struct mddev *mddev, dev_t dev)
6253 {
6254 	char b[BDEVNAME_SIZE];
6255 	int err;
6256 	struct md_rdev *rdev;
6257 
6258 	if (!mddev->pers)
6259 		return -ENODEV;
6260 
6261 	if (mddev->major_version != 0) {
6262 		pr_warn("%s: HOT_ADD may only be used with version-0 superblocks.\n",
6263 			mdname(mddev));
6264 		return -EINVAL;
6265 	}
6266 	if (!mddev->pers->hot_add_disk) {
6267 		pr_warn("%s: personality does not support diskops!\n",
6268 			mdname(mddev));
6269 		return -EINVAL;
6270 	}
6271 
6272 	rdev = md_import_device(dev, -1, 0);
6273 	if (IS_ERR(rdev)) {
6274 		pr_warn("md: error, md_import_device() returned %ld\n",
6275 			PTR_ERR(rdev));
6276 		return -EINVAL;
6277 	}
6278 
6279 	if (mddev->persistent)
6280 		rdev->sb_start = calc_dev_sboffset(rdev);
6281 	else
6282 		rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
6283 
6284 	rdev->sectors = rdev->sb_start;
6285 
6286 	if (test_bit(Faulty, &rdev->flags)) {
6287 		pr_warn("md: can not hot-add faulty %s disk to %s!\n",
6288 			bdevname(rdev->bdev,b), mdname(mddev));
6289 		err = -EINVAL;
6290 		goto abort_export;
6291 	}
6292 
6293 	clear_bit(In_sync, &rdev->flags);
6294 	rdev->desc_nr = -1;
6295 	rdev->saved_raid_disk = -1;
6296 	err = bind_rdev_to_array(rdev, mddev);
6297 	if (err)
6298 		goto abort_export;
6299 
6300 	/*
6301 	 * The rest should better be atomic, we can have disk failures
6302 	 * noticed in interrupt contexts ...
6303 	 */
6304 
6305 	rdev->raid_disk = -1;
6306 
6307 	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
6308 	if (!mddev->thread)
6309 		md_update_sb(mddev, 1);
6310 	/*
6311 	 * Kick recovery, maybe this spare has to be added to the
6312 	 * array immediately.
6313 	 */
6314 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6315 	md_wakeup_thread(mddev->thread);
6316 	md_new_event(mddev);
6317 	return 0;
6318 
6319 abort_export:
6320 	export_rdev(rdev);
6321 	return err;
6322 }
6323 
6324 static int set_bitmap_file(struct mddev *mddev, int fd)
6325 {
6326 	int err = 0;
6327 
6328 	if (mddev->pers) {
6329 		if (!mddev->pers->quiesce || !mddev->thread)
6330 			return -EBUSY;
6331 		if (mddev->recovery || mddev->sync_thread)
6332 			return -EBUSY;
6333 		/* we should be able to change the bitmap.. */
6334 	}
6335 
6336 	if (fd >= 0) {
6337 		struct inode *inode;
6338 		struct file *f;
6339 
6340 		if (mddev->bitmap || mddev->bitmap_info.file)
6341 			return -EEXIST; /* cannot add when bitmap is present */
6342 		f = fget(fd);
6343 
6344 		if (f == NULL) {
6345 			pr_warn("%s: error: failed to get bitmap file\n",
6346 				mdname(mddev));
6347 			return -EBADF;
6348 		}
6349 
6350 		inode = f->f_mapping->host;
6351 		if (!S_ISREG(inode->i_mode)) {
6352 			pr_warn("%s: error: bitmap file must be a regular file\n",
6353 				mdname(mddev));
6354 			err = -EBADF;
6355 		} else if (!(f->f_mode & FMODE_WRITE)) {
6356 			pr_warn("%s: error: bitmap file must open for write\n",
6357 				mdname(mddev));
6358 			err = -EBADF;
6359 		} else if (atomic_read(&inode->i_writecount) != 1) {
6360 			pr_warn("%s: error: bitmap file is already in use\n",
6361 				mdname(mddev));
6362 			err = -EBUSY;
6363 		}
6364 		if (err) {
6365 			fput(f);
6366 			return err;
6367 		}
6368 		mddev->bitmap_info.file = f;
6369 		mddev->bitmap_info.offset = 0; /* file overrides offset */
6370 	} else if (mddev->bitmap == NULL)
6371 		return -ENOENT; /* cannot remove what isn't there */
6372 	err = 0;
6373 	if (mddev->pers) {
6374 		mddev->pers->quiesce(mddev, 1);
6375 		if (fd >= 0) {
6376 			struct bitmap *bitmap;
6377 
6378 			bitmap = bitmap_create(mddev, -1);
6379 			if (!IS_ERR(bitmap)) {
6380 				mddev->bitmap = bitmap;
6381 				err = bitmap_load(mddev);
6382 			} else
6383 				err = PTR_ERR(bitmap);
6384 		}
6385 		if (fd < 0 || err) {
6386 			bitmap_destroy(mddev);
6387 			fd = -1; /* make sure to put the file */
6388 		}
6389 		mddev->pers->quiesce(mddev, 0);
6390 	}
6391 	if (fd < 0) {
6392 		struct file *f = mddev->bitmap_info.file;
6393 		if (f) {
6394 			spin_lock(&mddev->lock);
6395 			mddev->bitmap_info.file = NULL;
6396 			spin_unlock(&mddev->lock);
6397 			fput(f);
6398 		}
6399 	}
6400 
6401 	return err;
6402 }
6403 
6404 /*
6405  * set_array_info is used two different ways
6406  * The original usage is when creating a new array.
6407  * In this usage, raid_disks is > 0 and it together with
6408  *  level, size, not_persistent,layout,chunksize determine the
6409  *  shape of the array.
6410  *  This will always create an array with a type-0.90.0 superblock.
6411  * The newer usage is when assembling an array.
6412  *  In this case raid_disks will be 0, and the major_version field is
6413  *  use to determine which style super-blocks are to be found on the devices.
6414  *  The minor and patch _version numbers are also kept incase the
6415  *  super_block handler wishes to interpret them.
6416  */
6417 static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
6418 {
6419 
6420 	if (info->raid_disks == 0) {
6421 		/* just setting version number for superblock loading */
6422 		if (info->major_version < 0 ||
6423 		    info->major_version >= ARRAY_SIZE(super_types) ||
6424 		    super_types[info->major_version].name == NULL) {
6425 			/* maybe try to auto-load a module? */
6426 			pr_warn("md: superblock version %d not known\n",
6427 				info->major_version);
6428 			return -EINVAL;
6429 		}
6430 		mddev->major_version = info->major_version;
6431 		mddev->minor_version = info->minor_version;
6432 		mddev->patch_version = info->patch_version;
6433 		mddev->persistent = !info->not_persistent;
6434 		/* ensure mddev_put doesn't delete this now that there
6435 		 * is some minimal configuration.
6436 		 */
6437 		mddev->ctime         = ktime_get_real_seconds();
6438 		return 0;
6439 	}
6440 	mddev->major_version = MD_MAJOR_VERSION;
6441 	mddev->minor_version = MD_MINOR_VERSION;
6442 	mddev->patch_version = MD_PATCHLEVEL_VERSION;
6443 	mddev->ctime         = ktime_get_real_seconds();
6444 
6445 	mddev->level         = info->level;
6446 	mddev->clevel[0]     = 0;
6447 	mddev->dev_sectors   = 2 * (sector_t)info->size;
6448 	mddev->raid_disks    = info->raid_disks;
6449 	/* don't set md_minor, it is determined by which /dev/md* was
6450 	 * openned
6451 	 */
6452 	if (info->state & (1<<MD_SB_CLEAN))
6453 		mddev->recovery_cp = MaxSector;
6454 	else
6455 		mddev->recovery_cp = 0;
6456 	mddev->persistent    = ! info->not_persistent;
6457 	mddev->external	     = 0;
6458 
6459 	mddev->layout        = info->layout;
6460 	mddev->chunk_sectors = info->chunk_size >> 9;
6461 
6462 	mddev->max_disks     = MD_SB_DISKS;
6463 
6464 	if (mddev->persistent) {
6465 		mddev->flags         = 0;
6466 		mddev->sb_flags         = 0;
6467 	}
6468 	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
6469 
6470 	mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
6471 	mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
6472 	mddev->bitmap_info.offset = 0;
6473 
6474 	mddev->reshape_position = MaxSector;
6475 
6476 	/*
6477 	 * Generate a 128 bit UUID
6478 	 */
6479 	get_random_bytes(mddev->uuid, 16);
6480 
6481 	mddev->new_level = mddev->level;
6482 	mddev->new_chunk_sectors = mddev->chunk_sectors;
6483 	mddev->new_layout = mddev->layout;
6484 	mddev->delta_disks = 0;
6485 	mddev->reshape_backwards = 0;
6486 
6487 	return 0;
6488 }
6489 
6490 void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
6491 {
6492 	WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__);
6493 
6494 	if (mddev->external_size)
6495 		return;
6496 
6497 	mddev->array_sectors = array_sectors;
6498 }
6499 EXPORT_SYMBOL(md_set_array_sectors);
6500 
6501 static int update_size(struct mddev *mddev, sector_t num_sectors)
6502 {
6503 	struct md_rdev *rdev;
6504 	int rv;
6505 	int fit = (num_sectors == 0);
6506 
6507 	/* cluster raid doesn't support update size */
6508 	if (mddev_is_clustered(mddev))
6509 		return -EINVAL;
6510 
6511 	if (mddev->pers->resize == NULL)
6512 		return -EINVAL;
6513 	/* The "num_sectors" is the number of sectors of each device that
6514 	 * is used.  This can only make sense for arrays with redundancy.
6515 	 * linear and raid0 always use whatever space is available. We can only
6516 	 * consider changing this number if no resync or reconstruction is
6517 	 * happening, and if the new size is acceptable. It must fit before the
6518 	 * sb_start or, if that is <data_offset, it must fit before the size
6519 	 * of each device.  If num_sectors is zero, we find the largest size
6520 	 * that fits.
6521 	 */
6522 	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
6523 	    mddev->sync_thread)
6524 		return -EBUSY;
6525 	if (mddev->ro)
6526 		return -EROFS;
6527 
6528 	rdev_for_each(rdev, mddev) {
6529 		sector_t avail = rdev->sectors;
6530 
6531 		if (fit && (num_sectors == 0 || num_sectors > avail))
6532 			num_sectors = avail;
6533 		if (avail < num_sectors)
6534 			return -ENOSPC;
6535 	}
6536 	rv = mddev->pers->resize(mddev, num_sectors);
6537 	if (!rv)
6538 		revalidate_disk(mddev->gendisk);
6539 	return rv;
6540 }
6541 
6542 static int update_raid_disks(struct mddev *mddev, int raid_disks)
6543 {
6544 	int rv;
6545 	struct md_rdev *rdev;
6546 	/* change the number of raid disks */
6547 	if (mddev->pers->check_reshape == NULL)
6548 		return -EINVAL;
6549 	if (mddev->ro)
6550 		return -EROFS;
6551 	if (raid_disks <= 0 ||
6552 	    (mddev->max_disks && raid_disks >= mddev->max_disks))
6553 		return -EINVAL;
6554 	if (mddev->sync_thread ||
6555 	    test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
6556 	    mddev->reshape_position != MaxSector)
6557 		return -EBUSY;
6558 
6559 	rdev_for_each(rdev, mddev) {
6560 		if (mddev->raid_disks < raid_disks &&
6561 		    rdev->data_offset < rdev->new_data_offset)
6562 			return -EINVAL;
6563 		if (mddev->raid_disks > raid_disks &&
6564 		    rdev->data_offset > rdev->new_data_offset)
6565 			return -EINVAL;
6566 	}
6567 
6568 	mddev->delta_disks = raid_disks - mddev->raid_disks;
6569 	if (mddev->delta_disks < 0)
6570 		mddev->reshape_backwards = 1;
6571 	else if (mddev->delta_disks > 0)
6572 		mddev->reshape_backwards = 0;
6573 
6574 	rv = mddev->pers->check_reshape(mddev);
6575 	if (rv < 0) {
6576 		mddev->delta_disks = 0;
6577 		mddev->reshape_backwards = 0;
6578 	}
6579 	return rv;
6580 }
6581 
6582 /*
6583  * update_array_info is used to change the configuration of an
6584  * on-line array.
6585  * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
6586  * fields in the info are checked against the array.
6587  * Any differences that cannot be handled will cause an error.
6588  * Normally, only one change can be managed at a time.
6589  */
6590 static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
6591 {
6592 	int rv = 0;
6593 	int cnt = 0;
6594 	int state = 0;
6595 
6596 	/* calculate expected state,ignoring low bits */
6597 	if (mddev->bitmap && mddev->bitmap_info.offset)
6598 		state |= (1 << MD_SB_BITMAP_PRESENT);
6599 
6600 	if (mddev->major_version != info->major_version ||
6601 	    mddev->minor_version != info->minor_version ||
6602 /*	    mddev->patch_version != info->patch_version || */
6603 	    mddev->ctime         != info->ctime         ||
6604 	    mddev->level         != info->level         ||
6605 /*	    mddev->layout        != info->layout        || */
6606 	    mddev->persistent	 != !info->not_persistent ||
6607 	    mddev->chunk_sectors != info->chunk_size >> 9 ||
6608 	    /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
6609 	    ((state^info->state) & 0xfffffe00)
6610 		)
6611 		return -EINVAL;
6612 	/* Check there is only one change */
6613 	if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
6614 		cnt++;
6615 	if (mddev->raid_disks != info->raid_disks)
6616 		cnt++;
6617 	if (mddev->layout != info->layout)
6618 		cnt++;
6619 	if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
6620 		cnt++;
6621 	if (cnt == 0)
6622 		return 0;
6623 	if (cnt > 1)
6624 		return -EINVAL;
6625 
6626 	if (mddev->layout != info->layout) {
6627 		/* Change layout
6628 		 * we don't need to do anything at the md level, the
6629 		 * personality will take care of it all.
6630 		 */
6631 		if (mddev->pers->check_reshape == NULL)
6632 			return -EINVAL;
6633 		else {
6634 			mddev->new_layout = info->layout;
6635 			rv = mddev->pers->check_reshape(mddev);
6636 			if (rv)
6637 				mddev->new_layout = mddev->layout;
6638 			return rv;
6639 		}
6640 	}
6641 	if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
6642 		rv = update_size(mddev, (sector_t)info->size * 2);
6643 
6644 	if (mddev->raid_disks    != info->raid_disks)
6645 		rv = update_raid_disks(mddev, info->raid_disks);
6646 
6647 	if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
6648 		if (mddev->pers->quiesce == NULL || mddev->thread == NULL) {
6649 			rv = -EINVAL;
6650 			goto err;
6651 		}
6652 		if (mddev->recovery || mddev->sync_thread) {
6653 			rv = -EBUSY;
6654 			goto err;
6655 		}
6656 		if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
6657 			struct bitmap *bitmap;
6658 			/* add the bitmap */
6659 			if (mddev->bitmap) {
6660 				rv = -EEXIST;
6661 				goto err;
6662 			}
6663 			if (mddev->bitmap_info.default_offset == 0) {
6664 				rv = -EINVAL;
6665 				goto err;
6666 			}
6667 			mddev->bitmap_info.offset =
6668 				mddev->bitmap_info.default_offset;
6669 			mddev->bitmap_info.space =
6670 				mddev->bitmap_info.default_space;
6671 			mddev->pers->quiesce(mddev, 1);
6672 			bitmap = bitmap_create(mddev, -1);
6673 			if (!IS_ERR(bitmap)) {
6674 				mddev->bitmap = bitmap;
6675 				rv = bitmap_load(mddev);
6676 			} else
6677 				rv = PTR_ERR(bitmap);
6678 			if (rv)
6679 				bitmap_destroy(mddev);
6680 			mddev->pers->quiesce(mddev, 0);
6681 		} else {
6682 			/* remove the bitmap */
6683 			if (!mddev->bitmap) {
6684 				rv = -ENOENT;
6685 				goto err;
6686 			}
6687 			if (mddev->bitmap->storage.file) {
6688 				rv = -EINVAL;
6689 				goto err;
6690 			}
6691 			if (mddev->bitmap_info.nodes) {
6692 				/* hold PW on all the bitmap lock */
6693 				if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) {
6694 					pr_warn("md: can't change bitmap to none since the array is in use by more than one node\n");
6695 					rv = -EPERM;
6696 					md_cluster_ops->unlock_all_bitmaps(mddev);
6697 					goto err;
6698 				}
6699 
6700 				mddev->bitmap_info.nodes = 0;
6701 				md_cluster_ops->leave(mddev);
6702 			}
6703 			mddev->pers->quiesce(mddev, 1);
6704 			bitmap_destroy(mddev);
6705 			mddev->pers->quiesce(mddev, 0);
6706 			mddev->bitmap_info.offset = 0;
6707 		}
6708 	}
6709 	md_update_sb(mddev, 1);
6710 	return rv;
6711 err:
6712 	return rv;
6713 }
6714 
6715 static int set_disk_faulty(struct mddev *mddev, dev_t dev)
6716 {
6717 	struct md_rdev *rdev;
6718 	int err = 0;
6719 
6720 	if (mddev->pers == NULL)
6721 		return -ENODEV;
6722 
6723 	rcu_read_lock();
6724 	rdev = find_rdev_rcu(mddev, dev);
6725 	if (!rdev)
6726 		err =  -ENODEV;
6727 	else {
6728 		md_error(mddev, rdev);
6729 		if (!test_bit(Faulty, &rdev->flags))
6730 			err = -EBUSY;
6731 	}
6732 	rcu_read_unlock();
6733 	return err;
6734 }
6735 
6736 /*
6737  * We have a problem here : there is no easy way to give a CHS
6738  * virtual geometry. We currently pretend that we have a 2 heads
6739  * 4 sectors (with a BIG number of cylinders...). This drives
6740  * dosfs just mad... ;-)
6741  */
6742 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
6743 {
6744 	struct mddev *mddev = bdev->bd_disk->private_data;
6745 
6746 	geo->heads = 2;
6747 	geo->sectors = 4;
6748 	geo->cylinders = mddev->array_sectors / 8;
6749 	return 0;
6750 }
6751 
6752 static inline bool md_ioctl_valid(unsigned int cmd)
6753 {
6754 	switch (cmd) {
6755 	case ADD_NEW_DISK:
6756 	case BLKROSET:
6757 	case GET_ARRAY_INFO:
6758 	case GET_BITMAP_FILE:
6759 	case GET_DISK_INFO:
6760 	case HOT_ADD_DISK:
6761 	case HOT_REMOVE_DISK:
6762 	case RAID_AUTORUN:
6763 	case RAID_VERSION:
6764 	case RESTART_ARRAY_RW:
6765 	case RUN_ARRAY:
6766 	case SET_ARRAY_INFO:
6767 	case SET_BITMAP_FILE:
6768 	case SET_DISK_FAULTY:
6769 	case STOP_ARRAY:
6770 	case STOP_ARRAY_RO:
6771 	case CLUSTERED_DISK_NACK:
6772 		return true;
6773 	default:
6774 		return false;
6775 	}
6776 }
6777 
6778 static int md_ioctl(struct block_device *bdev, fmode_t mode,
6779 			unsigned int cmd, unsigned long arg)
6780 {
6781 	int err = 0;
6782 	void __user *argp = (void __user *)arg;
6783 	struct mddev *mddev = NULL;
6784 	int ro;
6785 
6786 	if (!md_ioctl_valid(cmd))
6787 		return -ENOTTY;
6788 
6789 	switch (cmd) {
6790 	case RAID_VERSION:
6791 	case GET_ARRAY_INFO:
6792 	case GET_DISK_INFO:
6793 		break;
6794 	default:
6795 		if (!capable(CAP_SYS_ADMIN))
6796 			return -EACCES;
6797 	}
6798 
6799 	/*
6800 	 * Commands dealing with the RAID driver but not any
6801 	 * particular array:
6802 	 */
6803 	switch (cmd) {
6804 	case RAID_VERSION:
6805 		err = get_version(argp);
6806 		goto out;
6807 
6808 #ifndef MODULE
6809 	case RAID_AUTORUN:
6810 		err = 0;
6811 		autostart_arrays(arg);
6812 		goto out;
6813 #endif
6814 	default:;
6815 	}
6816 
6817 	/*
6818 	 * Commands creating/starting a new array:
6819 	 */
6820 
6821 	mddev = bdev->bd_disk->private_data;
6822 
6823 	if (!mddev) {
6824 		BUG();
6825 		goto out;
6826 	}
6827 
6828 	/* Some actions do not requires the mutex */
6829 	switch (cmd) {
6830 	case GET_ARRAY_INFO:
6831 		if (!mddev->raid_disks && !mddev->external)
6832 			err = -ENODEV;
6833 		else
6834 			err = get_array_info(mddev, argp);
6835 		goto out;
6836 
6837 	case GET_DISK_INFO:
6838 		if (!mddev->raid_disks && !mddev->external)
6839 			err = -ENODEV;
6840 		else
6841 			err = get_disk_info(mddev, argp);
6842 		goto out;
6843 
6844 	case SET_DISK_FAULTY:
6845 		err = set_disk_faulty(mddev, new_decode_dev(arg));
6846 		goto out;
6847 
6848 	case GET_BITMAP_FILE:
6849 		err = get_bitmap_file(mddev, argp);
6850 		goto out;
6851 
6852 	}
6853 
6854 	if (cmd == ADD_NEW_DISK)
6855 		/* need to ensure md_delayed_delete() has completed */
6856 		flush_workqueue(md_misc_wq);
6857 
6858 	if (cmd == HOT_REMOVE_DISK)
6859 		/* need to ensure recovery thread has run */
6860 		wait_event_interruptible_timeout(mddev->sb_wait,
6861 						 !test_bit(MD_RECOVERY_NEEDED,
6862 							   &mddev->recovery),
6863 						 msecs_to_jiffies(5000));
6864 	if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
6865 		/* Need to flush page cache, and ensure no-one else opens
6866 		 * and writes
6867 		 */
6868 		mutex_lock(&mddev->open_mutex);
6869 		if (mddev->pers && atomic_read(&mddev->openers) > 1) {
6870 			mutex_unlock(&mddev->open_mutex);
6871 			err = -EBUSY;
6872 			goto out;
6873 		}
6874 		set_bit(MD_CLOSING, &mddev->flags);
6875 		mutex_unlock(&mddev->open_mutex);
6876 		sync_blockdev(bdev);
6877 	}
6878 	err = mddev_lock(mddev);
6879 	if (err) {
6880 		pr_debug("md: ioctl lock interrupted, reason %d, cmd %d\n",
6881 			 err, cmd);
6882 		goto out;
6883 	}
6884 
6885 	if (cmd == SET_ARRAY_INFO) {
6886 		mdu_array_info_t info;
6887 		if (!arg)
6888 			memset(&info, 0, sizeof(info));
6889 		else if (copy_from_user(&info, argp, sizeof(info))) {
6890 			err = -EFAULT;
6891 			goto unlock;
6892 		}
6893 		if (mddev->pers) {
6894 			err = update_array_info(mddev, &info);
6895 			if (err) {
6896 				pr_warn("md: couldn't update array info. %d\n", err);
6897 				goto unlock;
6898 			}
6899 			goto unlock;
6900 		}
6901 		if (!list_empty(&mddev->disks)) {
6902 			pr_warn("md: array %s already has disks!\n", mdname(mddev));
6903 			err = -EBUSY;
6904 			goto unlock;
6905 		}
6906 		if (mddev->raid_disks) {
6907 			pr_warn("md: array %s already initialised!\n", mdname(mddev));
6908 			err = -EBUSY;
6909 			goto unlock;
6910 		}
6911 		err = set_array_info(mddev, &info);
6912 		if (err) {
6913 			pr_warn("md: couldn't set array info. %d\n", err);
6914 			goto unlock;
6915 		}
6916 		goto unlock;
6917 	}
6918 
6919 	/*
6920 	 * Commands querying/configuring an existing array:
6921 	 */
6922 	/* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
6923 	 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
6924 	if ((!mddev->raid_disks && !mddev->external)
6925 	    && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
6926 	    && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
6927 	    && cmd != GET_BITMAP_FILE) {
6928 		err = -ENODEV;
6929 		goto unlock;
6930 	}
6931 
6932 	/*
6933 	 * Commands even a read-only array can execute:
6934 	 */
6935 	switch (cmd) {
6936 	case RESTART_ARRAY_RW:
6937 		err = restart_array(mddev);
6938 		goto unlock;
6939 
6940 	case STOP_ARRAY:
6941 		err = do_md_stop(mddev, 0, bdev);
6942 		goto unlock;
6943 
6944 	case STOP_ARRAY_RO:
6945 		err = md_set_readonly(mddev, bdev);
6946 		goto unlock;
6947 
6948 	case HOT_REMOVE_DISK:
6949 		err = hot_remove_disk(mddev, new_decode_dev(arg));
6950 		goto unlock;
6951 
6952 	case ADD_NEW_DISK:
6953 		/* We can support ADD_NEW_DISK on read-only arrays
6954 		 * only if we are re-adding a preexisting device.
6955 		 * So require mddev->pers and MD_DISK_SYNC.
6956 		 */
6957 		if (mddev->pers) {
6958 			mdu_disk_info_t info;
6959 			if (copy_from_user(&info, argp, sizeof(info)))
6960 				err = -EFAULT;
6961 			else if (!(info.state & (1<<MD_DISK_SYNC)))
6962 				/* Need to clear read-only for this */
6963 				break;
6964 			else
6965 				err = add_new_disk(mddev, &info);
6966 			goto unlock;
6967 		}
6968 		break;
6969 
6970 	case BLKROSET:
6971 		if (get_user(ro, (int __user *)(arg))) {
6972 			err = -EFAULT;
6973 			goto unlock;
6974 		}
6975 		err = -EINVAL;
6976 
6977 		/* if the bdev is going readonly the value of mddev->ro
6978 		 * does not matter, no writes are coming
6979 		 */
6980 		if (ro)
6981 			goto unlock;
6982 
6983 		/* are we are already prepared for writes? */
6984 		if (mddev->ro != 1)
6985 			goto unlock;
6986 
6987 		/* transitioning to readauto need only happen for
6988 		 * arrays that call md_write_start
6989 		 */
6990 		if (mddev->pers) {
6991 			err = restart_array(mddev);
6992 			if (err == 0) {
6993 				mddev->ro = 2;
6994 				set_disk_ro(mddev->gendisk, 0);
6995 			}
6996 		}
6997 		goto unlock;
6998 	}
6999 
7000 	/*
7001 	 * The remaining ioctls are changing the state of the
7002 	 * superblock, so we do not allow them on read-only arrays.
7003 	 */
7004 	if (mddev->ro && mddev->pers) {
7005 		if (mddev->ro == 2) {
7006 			mddev->ro = 0;
7007 			sysfs_notify_dirent_safe(mddev->sysfs_state);
7008 			set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7009 			/* mddev_unlock will wake thread */
7010 			/* If a device failed while we were read-only, we
7011 			 * need to make sure the metadata is updated now.
7012 			 */
7013 			if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) {
7014 				mddev_unlock(mddev);
7015 				wait_event(mddev->sb_wait,
7016 					   !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) &&
7017 					   !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
7018 				mddev_lock_nointr(mddev);
7019 			}
7020 		} else {
7021 			err = -EROFS;
7022 			goto unlock;
7023 		}
7024 	}
7025 
7026 	switch (cmd) {
7027 	case ADD_NEW_DISK:
7028 	{
7029 		mdu_disk_info_t info;
7030 		if (copy_from_user(&info, argp, sizeof(info)))
7031 			err = -EFAULT;
7032 		else
7033 			err = add_new_disk(mddev, &info);
7034 		goto unlock;
7035 	}
7036 
7037 	case CLUSTERED_DISK_NACK:
7038 		if (mddev_is_clustered(mddev))
7039 			md_cluster_ops->new_disk_ack(mddev, false);
7040 		else
7041 			err = -EINVAL;
7042 		goto unlock;
7043 
7044 	case HOT_ADD_DISK:
7045 		err = hot_add_disk(mddev, new_decode_dev(arg));
7046 		goto unlock;
7047 
7048 	case RUN_ARRAY:
7049 		err = do_md_run(mddev);
7050 		goto unlock;
7051 
7052 	case SET_BITMAP_FILE:
7053 		err = set_bitmap_file(mddev, (int)arg);
7054 		goto unlock;
7055 
7056 	default:
7057 		err = -EINVAL;
7058 		goto unlock;
7059 	}
7060 
7061 unlock:
7062 	if (mddev->hold_active == UNTIL_IOCTL &&
7063 	    err != -EINVAL)
7064 		mddev->hold_active = 0;
7065 	mddev_unlock(mddev);
7066 out:
7067 	return err;
7068 }
7069 #ifdef CONFIG_COMPAT
7070 static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
7071 		    unsigned int cmd, unsigned long arg)
7072 {
7073 	switch (cmd) {
7074 	case HOT_REMOVE_DISK:
7075 	case HOT_ADD_DISK:
7076 	case SET_DISK_FAULTY:
7077 	case SET_BITMAP_FILE:
7078 		/* These take in integer arg, do not convert */
7079 		break;
7080 	default:
7081 		arg = (unsigned long)compat_ptr(arg);
7082 		break;
7083 	}
7084 
7085 	return md_ioctl(bdev, mode, cmd, arg);
7086 }
7087 #endif /* CONFIG_COMPAT */
7088 
7089 static int md_open(struct block_device *bdev, fmode_t mode)
7090 {
7091 	/*
7092 	 * Succeed if we can lock the mddev, which confirms that
7093 	 * it isn't being stopped right now.
7094 	 */
7095 	struct mddev *mddev = mddev_find(bdev->bd_dev);
7096 	int err;
7097 
7098 	if (!mddev)
7099 		return -ENODEV;
7100 
7101 	if (mddev->gendisk != bdev->bd_disk) {
7102 		/* we are racing with mddev_put which is discarding this
7103 		 * bd_disk.
7104 		 */
7105 		mddev_put(mddev);
7106 		/* Wait until bdev->bd_disk is definitely gone */
7107 		flush_workqueue(md_misc_wq);
7108 		/* Then retry the open from the top */
7109 		return -ERESTARTSYS;
7110 	}
7111 	BUG_ON(mddev != bdev->bd_disk->private_data);
7112 
7113 	if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
7114 		goto out;
7115 
7116 	if (test_bit(MD_CLOSING, &mddev->flags)) {
7117 		mutex_unlock(&mddev->open_mutex);
7118 		err = -ENODEV;
7119 		goto out;
7120 	}
7121 
7122 	err = 0;
7123 	atomic_inc(&mddev->openers);
7124 	mutex_unlock(&mddev->open_mutex);
7125 
7126 	check_disk_change(bdev);
7127  out:
7128 	if (err)
7129 		mddev_put(mddev);
7130 	return err;
7131 }
7132 
7133 static void md_release(struct gendisk *disk, fmode_t mode)
7134 {
7135 	struct mddev *mddev = disk->private_data;
7136 
7137 	BUG_ON(!mddev);
7138 	atomic_dec(&mddev->openers);
7139 	mddev_put(mddev);
7140 }
7141 
7142 static int md_media_changed(struct gendisk *disk)
7143 {
7144 	struct mddev *mddev = disk->private_data;
7145 
7146 	return mddev->changed;
7147 }
7148 
7149 static int md_revalidate(struct gendisk *disk)
7150 {
7151 	struct mddev *mddev = disk->private_data;
7152 
7153 	mddev->changed = 0;
7154 	return 0;
7155 }
7156 static const struct block_device_operations md_fops =
7157 {
7158 	.owner		= THIS_MODULE,
7159 	.open		= md_open,
7160 	.release	= md_release,
7161 	.ioctl		= md_ioctl,
7162 #ifdef CONFIG_COMPAT
7163 	.compat_ioctl	= md_compat_ioctl,
7164 #endif
7165 	.getgeo		= md_getgeo,
7166 	.media_changed  = md_media_changed,
7167 	.revalidate_disk= md_revalidate,
7168 };
7169 
7170 static int md_thread(void *arg)
7171 {
7172 	struct md_thread *thread = arg;
7173 
7174 	/*
7175 	 * md_thread is a 'system-thread', it's priority should be very
7176 	 * high. We avoid resource deadlocks individually in each
7177 	 * raid personality. (RAID5 does preallocation) We also use RR and
7178 	 * the very same RT priority as kswapd, thus we will never get
7179 	 * into a priority inversion deadlock.
7180 	 *
7181 	 * we definitely have to have equal or higher priority than
7182 	 * bdflush, otherwise bdflush will deadlock if there are too
7183 	 * many dirty RAID5 blocks.
7184 	 */
7185 
7186 	allow_signal(SIGKILL);
7187 	while (!kthread_should_stop()) {
7188 
7189 		/* We need to wait INTERRUPTIBLE so that
7190 		 * we don't add to the load-average.
7191 		 * That means we need to be sure no signals are
7192 		 * pending
7193 		 */
7194 		if (signal_pending(current))
7195 			flush_signals(current);
7196 
7197 		wait_event_interruptible_timeout
7198 			(thread->wqueue,
7199 			 test_bit(THREAD_WAKEUP, &thread->flags)
7200 			 || kthread_should_stop() || kthread_should_park(),
7201 			 thread->timeout);
7202 
7203 		clear_bit(THREAD_WAKEUP, &thread->flags);
7204 		if (kthread_should_park())
7205 			kthread_parkme();
7206 		if (!kthread_should_stop())
7207 			thread->run(thread);
7208 	}
7209 
7210 	return 0;
7211 }
7212 
7213 void md_wakeup_thread(struct md_thread *thread)
7214 {
7215 	if (thread) {
7216 		pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm);
7217 		set_bit(THREAD_WAKEUP, &thread->flags);
7218 		wake_up(&thread->wqueue);
7219 	}
7220 }
7221 EXPORT_SYMBOL(md_wakeup_thread);
7222 
7223 struct md_thread *md_register_thread(void (*run) (struct md_thread *),
7224 		struct mddev *mddev, const char *name)
7225 {
7226 	struct md_thread *thread;
7227 
7228 	thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL);
7229 	if (!thread)
7230 		return NULL;
7231 
7232 	init_waitqueue_head(&thread->wqueue);
7233 
7234 	thread->run = run;
7235 	thread->mddev = mddev;
7236 	thread->timeout = MAX_SCHEDULE_TIMEOUT;
7237 	thread->tsk = kthread_run(md_thread, thread,
7238 				  "%s_%s",
7239 				  mdname(thread->mddev),
7240 				  name);
7241 	if (IS_ERR(thread->tsk)) {
7242 		kfree(thread);
7243 		return NULL;
7244 	}
7245 	return thread;
7246 }
7247 EXPORT_SYMBOL(md_register_thread);
7248 
7249 void md_unregister_thread(struct md_thread **threadp)
7250 {
7251 	struct md_thread *thread = *threadp;
7252 	if (!thread)
7253 		return;
7254 	pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
7255 	/* Locking ensures that mddev_unlock does not wake_up a
7256 	 * non-existent thread
7257 	 */
7258 	spin_lock(&pers_lock);
7259 	*threadp = NULL;
7260 	spin_unlock(&pers_lock);
7261 
7262 	kthread_stop(thread->tsk);
7263 	kfree(thread);
7264 }
7265 EXPORT_SYMBOL(md_unregister_thread);
7266 
7267 void md_error(struct mddev *mddev, struct md_rdev *rdev)
7268 {
7269 	if (!rdev || test_bit(Faulty, &rdev->flags))
7270 		return;
7271 
7272 	if (!mddev->pers || !mddev->pers->error_handler)
7273 		return;
7274 	mddev->pers->error_handler(mddev,rdev);
7275 	if (mddev->degraded)
7276 		set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
7277 	sysfs_notify_dirent_safe(rdev->sysfs_state);
7278 	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7279 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7280 	md_wakeup_thread(mddev->thread);
7281 	if (mddev->event_work.func)
7282 		queue_work(md_misc_wq, &mddev->event_work);
7283 	md_new_event(mddev);
7284 }
7285 EXPORT_SYMBOL(md_error);
7286 
7287 /* seq_file implementation /proc/mdstat */
7288 
7289 static void status_unused(struct seq_file *seq)
7290 {
7291 	int i = 0;
7292 	struct md_rdev *rdev;
7293 
7294 	seq_printf(seq, "unused devices: ");
7295 
7296 	list_for_each_entry(rdev, &pending_raid_disks, same_set) {
7297 		char b[BDEVNAME_SIZE];
7298 		i++;
7299 		seq_printf(seq, "%s ",
7300 			      bdevname(rdev->bdev,b));
7301 	}
7302 	if (!i)
7303 		seq_printf(seq, "<none>");
7304 
7305 	seq_printf(seq, "\n");
7306 }
7307 
7308 static int status_resync(struct seq_file *seq, struct mddev *mddev)
7309 {
7310 	sector_t max_sectors, resync, res;
7311 	unsigned long dt, db;
7312 	sector_t rt;
7313 	int scale;
7314 	unsigned int per_milli;
7315 
7316 	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
7317 	    test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
7318 		max_sectors = mddev->resync_max_sectors;
7319 	else
7320 		max_sectors = mddev->dev_sectors;
7321 
7322 	resync = mddev->curr_resync;
7323 	if (resync <= 3) {
7324 		if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
7325 			/* Still cleaning up */
7326 			resync = max_sectors;
7327 	} else
7328 		resync -= atomic_read(&mddev->recovery_active);
7329 
7330 	if (resync == 0) {
7331 		if (mddev->recovery_cp < MaxSector) {
7332 			seq_printf(seq, "\tresync=PENDING");
7333 			return 1;
7334 		}
7335 		return 0;
7336 	}
7337 	if (resync < 3) {
7338 		seq_printf(seq, "\tresync=DELAYED");
7339 		return 1;
7340 	}
7341 
7342 	WARN_ON(max_sectors == 0);
7343 	/* Pick 'scale' such that (resync>>scale)*1000 will fit
7344 	 * in a sector_t, and (max_sectors>>scale) will fit in a
7345 	 * u32, as those are the requirements for sector_div.
7346 	 * Thus 'scale' must be at least 10
7347 	 */
7348 	scale = 10;
7349 	if (sizeof(sector_t) > sizeof(unsigned long)) {
7350 		while ( max_sectors/2 > (1ULL<<(scale+32)))
7351 			scale++;
7352 	}
7353 	res = (resync>>scale)*1000;
7354 	sector_div(res, (u32)((max_sectors>>scale)+1));
7355 
7356 	per_milli = res;
7357 	{
7358 		int i, x = per_milli/50, y = 20-x;
7359 		seq_printf(seq, "[");
7360 		for (i = 0; i < x; i++)
7361 			seq_printf(seq, "=");
7362 		seq_printf(seq, ">");
7363 		for (i = 0; i < y; i++)
7364 			seq_printf(seq, ".");
7365 		seq_printf(seq, "] ");
7366 	}
7367 	seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
7368 		   (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
7369 		    "reshape" :
7370 		    (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
7371 		     "check" :
7372 		     (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
7373 		      "resync" : "recovery"))),
7374 		   per_milli/10, per_milli % 10,
7375 		   (unsigned long long) resync/2,
7376 		   (unsigned long long) max_sectors/2);
7377 
7378 	/*
7379 	 * dt: time from mark until now
7380 	 * db: blocks written from mark until now
7381 	 * rt: remaining time
7382 	 *
7383 	 * rt is a sector_t, so could be 32bit or 64bit.
7384 	 * So we divide before multiply in case it is 32bit and close
7385 	 * to the limit.
7386 	 * We scale the divisor (db) by 32 to avoid losing precision
7387 	 * near the end of resync when the number of remaining sectors
7388 	 * is close to 'db'.
7389 	 * We then divide rt by 32 after multiplying by db to compensate.
7390 	 * The '+1' avoids division by zero if db is very small.
7391 	 */
7392 	dt = ((jiffies - mddev->resync_mark) / HZ);
7393 	if (!dt) dt++;
7394 	db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
7395 		- mddev->resync_mark_cnt;
7396 
7397 	rt = max_sectors - resync;    /* number of remaining sectors */
7398 	sector_div(rt, db/32+1);
7399 	rt *= dt;
7400 	rt >>= 5;
7401 
7402 	seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
7403 		   ((unsigned long)rt % 60)/6);
7404 
7405 	seq_printf(seq, " speed=%ldK/sec", db/2/dt);
7406 	return 1;
7407 }
7408 
7409 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
7410 {
7411 	struct list_head *tmp;
7412 	loff_t l = *pos;
7413 	struct mddev *mddev;
7414 
7415 	if (l >= 0x10000)
7416 		return NULL;
7417 	if (!l--)
7418 		/* header */
7419 		return (void*)1;
7420 
7421 	spin_lock(&all_mddevs_lock);
7422 	list_for_each(tmp,&all_mddevs)
7423 		if (!l--) {
7424 			mddev = list_entry(tmp, struct mddev, all_mddevs);
7425 			mddev_get(mddev);
7426 			spin_unlock(&all_mddevs_lock);
7427 			return mddev;
7428 		}
7429 	spin_unlock(&all_mddevs_lock);
7430 	if (!l--)
7431 		return (void*)2;/* tail */
7432 	return NULL;
7433 }
7434 
7435 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
7436 {
7437 	struct list_head *tmp;
7438 	struct mddev *next_mddev, *mddev = v;
7439 
7440 	++*pos;
7441 	if (v == (void*)2)
7442 		return NULL;
7443 
7444 	spin_lock(&all_mddevs_lock);
7445 	if (v == (void*)1)
7446 		tmp = all_mddevs.next;
7447 	else
7448 		tmp = mddev->all_mddevs.next;
7449 	if (tmp != &all_mddevs)
7450 		next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs));
7451 	else {
7452 		next_mddev = (void*)2;
7453 		*pos = 0x10000;
7454 	}
7455 	spin_unlock(&all_mddevs_lock);
7456 
7457 	if (v != (void*)1)
7458 		mddev_put(mddev);
7459 	return next_mddev;
7460 
7461 }
7462 
7463 static void md_seq_stop(struct seq_file *seq, void *v)
7464 {
7465 	struct mddev *mddev = v;
7466 
7467 	if (mddev && v != (void*)1 && v != (void*)2)
7468 		mddev_put(mddev);
7469 }
7470 
7471 static int md_seq_show(struct seq_file *seq, void *v)
7472 {
7473 	struct mddev *mddev = v;
7474 	sector_t sectors;
7475 	struct md_rdev *rdev;
7476 
7477 	if (v == (void*)1) {
7478 		struct md_personality *pers;
7479 		seq_printf(seq, "Personalities : ");
7480 		spin_lock(&pers_lock);
7481 		list_for_each_entry(pers, &pers_list, list)
7482 			seq_printf(seq, "[%s] ", pers->name);
7483 
7484 		spin_unlock(&pers_lock);
7485 		seq_printf(seq, "\n");
7486 		seq->poll_event = atomic_read(&md_event_count);
7487 		return 0;
7488 	}
7489 	if (v == (void*)2) {
7490 		status_unused(seq);
7491 		return 0;
7492 	}
7493 
7494 	spin_lock(&mddev->lock);
7495 	if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
7496 		seq_printf(seq, "%s : %sactive", mdname(mddev),
7497 						mddev->pers ? "" : "in");
7498 		if (mddev->pers) {
7499 			if (mddev->ro==1)
7500 				seq_printf(seq, " (read-only)");
7501 			if (mddev->ro==2)
7502 				seq_printf(seq, " (auto-read-only)");
7503 			seq_printf(seq, " %s", mddev->pers->name);
7504 		}
7505 
7506 		sectors = 0;
7507 		rcu_read_lock();
7508 		rdev_for_each_rcu(rdev, mddev) {
7509 			char b[BDEVNAME_SIZE];
7510 			seq_printf(seq, " %s[%d]",
7511 				bdevname(rdev->bdev,b), rdev->desc_nr);
7512 			if (test_bit(WriteMostly, &rdev->flags))
7513 				seq_printf(seq, "(W)");
7514 			if (test_bit(Journal, &rdev->flags))
7515 				seq_printf(seq, "(J)");
7516 			if (test_bit(Faulty, &rdev->flags)) {
7517 				seq_printf(seq, "(F)");
7518 				continue;
7519 			}
7520 			if (rdev->raid_disk < 0)
7521 				seq_printf(seq, "(S)"); /* spare */
7522 			if (test_bit(Replacement, &rdev->flags))
7523 				seq_printf(seq, "(R)");
7524 			sectors += rdev->sectors;
7525 		}
7526 		rcu_read_unlock();
7527 
7528 		if (!list_empty(&mddev->disks)) {
7529 			if (mddev->pers)
7530 				seq_printf(seq, "\n      %llu blocks",
7531 					   (unsigned long long)
7532 					   mddev->array_sectors / 2);
7533 			else
7534 				seq_printf(seq, "\n      %llu blocks",
7535 					   (unsigned long long)sectors / 2);
7536 		}
7537 		if (mddev->persistent) {
7538 			if (mddev->major_version != 0 ||
7539 			    mddev->minor_version != 90) {
7540 				seq_printf(seq," super %d.%d",
7541 					   mddev->major_version,
7542 					   mddev->minor_version);
7543 			}
7544 		} else if (mddev->external)
7545 			seq_printf(seq, " super external:%s",
7546 				   mddev->metadata_type);
7547 		else
7548 			seq_printf(seq, " super non-persistent");
7549 
7550 		if (mddev->pers) {
7551 			mddev->pers->status(seq, mddev);
7552 			seq_printf(seq, "\n      ");
7553 			if (mddev->pers->sync_request) {
7554 				if (status_resync(seq, mddev))
7555 					seq_printf(seq, "\n      ");
7556 			}
7557 		} else
7558 			seq_printf(seq, "\n       ");
7559 
7560 		bitmap_status(seq, mddev->bitmap);
7561 
7562 		seq_printf(seq, "\n");
7563 	}
7564 	spin_unlock(&mddev->lock);
7565 
7566 	return 0;
7567 }
7568 
7569 static const struct seq_operations md_seq_ops = {
7570 	.start  = md_seq_start,
7571 	.next   = md_seq_next,
7572 	.stop   = md_seq_stop,
7573 	.show   = md_seq_show,
7574 };
7575 
7576 static int md_seq_open(struct inode *inode, struct file *file)
7577 {
7578 	struct seq_file *seq;
7579 	int error;
7580 
7581 	error = seq_open(file, &md_seq_ops);
7582 	if (error)
7583 		return error;
7584 
7585 	seq = file->private_data;
7586 	seq->poll_event = atomic_read(&md_event_count);
7587 	return error;
7588 }
7589 
7590 static int md_unloading;
7591 static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
7592 {
7593 	struct seq_file *seq = filp->private_data;
7594 	int mask;
7595 
7596 	if (md_unloading)
7597 		return POLLIN|POLLRDNORM|POLLERR|POLLPRI;
7598 	poll_wait(filp, &md_event_waiters, wait);
7599 
7600 	/* always allow read */
7601 	mask = POLLIN | POLLRDNORM;
7602 
7603 	if (seq->poll_event != atomic_read(&md_event_count))
7604 		mask |= POLLERR | POLLPRI;
7605 	return mask;
7606 }
7607 
7608 static const struct file_operations md_seq_fops = {
7609 	.owner		= THIS_MODULE,
7610 	.open           = md_seq_open,
7611 	.read           = seq_read,
7612 	.llseek         = seq_lseek,
7613 	.release	= seq_release_private,
7614 	.poll		= mdstat_poll,
7615 };
7616 
7617 int register_md_personality(struct md_personality *p)
7618 {
7619 	pr_debug("md: %s personality registered for level %d\n",
7620 		 p->name, p->level);
7621 	spin_lock(&pers_lock);
7622 	list_add_tail(&p->list, &pers_list);
7623 	spin_unlock(&pers_lock);
7624 	return 0;
7625 }
7626 EXPORT_SYMBOL(register_md_personality);
7627 
7628 int unregister_md_personality(struct md_personality *p)
7629 {
7630 	pr_debug("md: %s personality unregistered\n", p->name);
7631 	spin_lock(&pers_lock);
7632 	list_del_init(&p->list);
7633 	spin_unlock(&pers_lock);
7634 	return 0;
7635 }
7636 EXPORT_SYMBOL(unregister_md_personality);
7637 
7638 int register_md_cluster_operations(struct md_cluster_operations *ops,
7639 				   struct module *module)
7640 {
7641 	int ret = 0;
7642 	spin_lock(&pers_lock);
7643 	if (md_cluster_ops != NULL)
7644 		ret = -EALREADY;
7645 	else {
7646 		md_cluster_ops = ops;
7647 		md_cluster_mod = module;
7648 	}
7649 	spin_unlock(&pers_lock);
7650 	return ret;
7651 }
7652 EXPORT_SYMBOL(register_md_cluster_operations);
7653 
7654 int unregister_md_cluster_operations(void)
7655 {
7656 	spin_lock(&pers_lock);
7657 	md_cluster_ops = NULL;
7658 	spin_unlock(&pers_lock);
7659 	return 0;
7660 }
7661 EXPORT_SYMBOL(unregister_md_cluster_operations);
7662 
7663 int md_setup_cluster(struct mddev *mddev, int nodes)
7664 {
7665 	if (!md_cluster_ops)
7666 		request_module("md-cluster");
7667 	spin_lock(&pers_lock);
7668 	/* ensure module won't be unloaded */
7669 	if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
7670 		pr_warn("can't find md-cluster module or get it's reference.\n");
7671 		spin_unlock(&pers_lock);
7672 		return -ENOENT;
7673 	}
7674 	spin_unlock(&pers_lock);
7675 
7676 	return md_cluster_ops->join(mddev, nodes);
7677 }
7678 
7679 void md_cluster_stop(struct mddev *mddev)
7680 {
7681 	if (!md_cluster_ops)
7682 		return;
7683 	md_cluster_ops->leave(mddev);
7684 	module_put(md_cluster_mod);
7685 }
7686 
7687 static int is_mddev_idle(struct mddev *mddev, int init)
7688 {
7689 	struct md_rdev *rdev;
7690 	int idle;
7691 	int curr_events;
7692 
7693 	idle = 1;
7694 	rcu_read_lock();
7695 	rdev_for_each_rcu(rdev, mddev) {
7696 		struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
7697 		curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
7698 			      (int)part_stat_read(&disk->part0, sectors[1]) -
7699 			      atomic_read(&disk->sync_io);
7700 		/* sync IO will cause sync_io to increase before the disk_stats
7701 		 * as sync_io is counted when a request starts, and
7702 		 * disk_stats is counted when it completes.
7703 		 * So resync activity will cause curr_events to be smaller than
7704 		 * when there was no such activity.
7705 		 * non-sync IO will cause disk_stat to increase without
7706 		 * increasing sync_io so curr_events will (eventually)
7707 		 * be larger than it was before.  Once it becomes
7708 		 * substantially larger, the test below will cause
7709 		 * the array to appear non-idle, and resync will slow
7710 		 * down.
7711 		 * If there is a lot of outstanding resync activity when
7712 		 * we set last_event to curr_events, then all that activity
7713 		 * completing might cause the array to appear non-idle
7714 		 * and resync will be slowed down even though there might
7715 		 * not have been non-resync activity.  This will only
7716 		 * happen once though.  'last_events' will soon reflect
7717 		 * the state where there is little or no outstanding
7718 		 * resync requests, and further resync activity will
7719 		 * always make curr_events less than last_events.
7720 		 *
7721 		 */
7722 		if (init || curr_events - rdev->last_events > 64) {
7723 			rdev->last_events = curr_events;
7724 			idle = 0;
7725 		}
7726 	}
7727 	rcu_read_unlock();
7728 	return idle;
7729 }
7730 
7731 void md_done_sync(struct mddev *mddev, int blocks, int ok)
7732 {
7733 	/* another "blocks" (512byte) blocks have been synced */
7734 	atomic_sub(blocks, &mddev->recovery_active);
7735 	wake_up(&mddev->recovery_wait);
7736 	if (!ok) {
7737 		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7738 		set_bit(MD_RECOVERY_ERROR, &mddev->recovery);
7739 		md_wakeup_thread(mddev->thread);
7740 		// stop recovery, signal do_sync ....
7741 	}
7742 }
7743 EXPORT_SYMBOL(md_done_sync);
7744 
7745 /* md_write_start(mddev, bi)
7746  * If we need to update some array metadata (e.g. 'active' flag
7747  * in superblock) before writing, schedule a superblock update
7748  * and wait for it to complete.
7749  */
7750 void md_write_start(struct mddev *mddev, struct bio *bi)
7751 {
7752 	int did_change = 0;
7753 	if (bio_data_dir(bi) != WRITE)
7754 		return;
7755 
7756 	BUG_ON(mddev->ro == 1);
7757 	if (mddev->ro == 2) {
7758 		/* need to switch to read/write */
7759 		mddev->ro = 0;
7760 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7761 		md_wakeup_thread(mddev->thread);
7762 		md_wakeup_thread(mddev->sync_thread);
7763 		did_change = 1;
7764 	}
7765 	atomic_inc(&mddev->writes_pending);
7766 	if (mddev->safemode == 1)
7767 		mddev->safemode = 0;
7768 	if (mddev->in_sync) {
7769 		spin_lock(&mddev->lock);
7770 		if (mddev->in_sync) {
7771 			mddev->in_sync = 0;
7772 			set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
7773 			set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
7774 			md_wakeup_thread(mddev->thread);
7775 			did_change = 1;
7776 		}
7777 		spin_unlock(&mddev->lock);
7778 	}
7779 	if (did_change)
7780 		sysfs_notify_dirent_safe(mddev->sysfs_state);
7781 	wait_event(mddev->sb_wait,
7782 		   !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
7783 }
7784 EXPORT_SYMBOL(md_write_start);
7785 
7786 void md_write_end(struct mddev *mddev)
7787 {
7788 	if (atomic_dec_and_test(&mddev->writes_pending)) {
7789 		if (mddev->safemode == 2)
7790 			md_wakeup_thread(mddev->thread);
7791 		else if (mddev->safemode_delay)
7792 			mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
7793 	}
7794 }
7795 EXPORT_SYMBOL(md_write_end);
7796 
7797 /* md_allow_write(mddev)
7798  * Calling this ensures that the array is marked 'active' so that writes
7799  * may proceed without blocking.  It is important to call this before
7800  * attempting a GFP_KERNEL allocation while holding the mddev lock.
7801  * Must be called with mddev_lock held.
7802  *
7803  * In the ->external case MD_SB_CHANGE_PENDING can not be cleared until mddev->lock
7804  * is dropped, so return -EAGAIN after notifying userspace.
7805  */
7806 int md_allow_write(struct mddev *mddev)
7807 {
7808 	if (!mddev->pers)
7809 		return 0;
7810 	if (mddev->ro)
7811 		return 0;
7812 	if (!mddev->pers->sync_request)
7813 		return 0;
7814 
7815 	spin_lock(&mddev->lock);
7816 	if (mddev->in_sync) {
7817 		mddev->in_sync = 0;
7818 		set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
7819 		set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
7820 		if (mddev->safemode_delay &&
7821 		    mddev->safemode == 0)
7822 			mddev->safemode = 1;
7823 		spin_unlock(&mddev->lock);
7824 		md_update_sb(mddev, 0);
7825 		sysfs_notify_dirent_safe(mddev->sysfs_state);
7826 	} else
7827 		spin_unlock(&mddev->lock);
7828 
7829 	if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
7830 		return -EAGAIN;
7831 	else
7832 		return 0;
7833 }
7834 EXPORT_SYMBOL_GPL(md_allow_write);
7835 
7836 #define SYNC_MARKS	10
7837 #define	SYNC_MARK_STEP	(3*HZ)
7838 #define UPDATE_FREQUENCY (5*60*HZ)
7839 void md_do_sync(struct md_thread *thread)
7840 {
7841 	struct mddev *mddev = thread->mddev;
7842 	struct mddev *mddev2;
7843 	unsigned int currspeed = 0,
7844 		 window;
7845 	sector_t max_sectors,j, io_sectors, recovery_done;
7846 	unsigned long mark[SYNC_MARKS];
7847 	unsigned long update_time;
7848 	sector_t mark_cnt[SYNC_MARKS];
7849 	int last_mark,m;
7850 	struct list_head *tmp;
7851 	sector_t last_check;
7852 	int skipped = 0;
7853 	struct md_rdev *rdev;
7854 	char *desc, *action = NULL;
7855 	struct blk_plug plug;
7856 	int ret;
7857 
7858 	/* just incase thread restarts... */
7859 	if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
7860 		return;
7861 	if (mddev->ro) {/* never try to sync a read-only array */
7862 		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7863 		return;
7864 	}
7865 
7866 	if (mddev_is_clustered(mddev)) {
7867 		ret = md_cluster_ops->resync_start(mddev);
7868 		if (ret)
7869 			goto skip;
7870 
7871 		set_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags);
7872 		if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
7873 			test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ||
7874 			test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
7875 		     && ((unsigned long long)mddev->curr_resync_completed
7876 			 < (unsigned long long)mddev->resync_max_sectors))
7877 			goto skip;
7878 	}
7879 
7880 	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
7881 		if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
7882 			desc = "data-check";
7883 			action = "check";
7884 		} else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
7885 			desc = "requested-resync";
7886 			action = "repair";
7887 		} else
7888 			desc = "resync";
7889 	} else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
7890 		desc = "reshape";
7891 	else
7892 		desc = "recovery";
7893 
7894 	mddev->last_sync_action = action ?: desc;
7895 
7896 	/* we overload curr_resync somewhat here.
7897 	 * 0 == not engaged in resync at all
7898 	 * 2 == checking that there is no conflict with another sync
7899 	 * 1 == like 2, but have yielded to allow conflicting resync to
7900 	 *		commense
7901 	 * other == active in resync - this many blocks
7902 	 *
7903 	 * Before starting a resync we must have set curr_resync to
7904 	 * 2, and then checked that every "conflicting" array has curr_resync
7905 	 * less than ours.  When we find one that is the same or higher
7906 	 * we wait on resync_wait.  To avoid deadlock, we reduce curr_resync
7907 	 * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
7908 	 * This will mean we have to start checking from the beginning again.
7909 	 *
7910 	 */
7911 
7912 	do {
7913 		int mddev2_minor = -1;
7914 		mddev->curr_resync = 2;
7915 
7916 	try_again:
7917 		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
7918 			goto skip;
7919 		for_each_mddev(mddev2, tmp) {
7920 			if (mddev2 == mddev)
7921 				continue;
7922 			if (!mddev->parallel_resync
7923 			&&  mddev2->curr_resync
7924 			&&  match_mddev_units(mddev, mddev2)) {
7925 				DEFINE_WAIT(wq);
7926 				if (mddev < mddev2 && mddev->curr_resync == 2) {
7927 					/* arbitrarily yield */
7928 					mddev->curr_resync = 1;
7929 					wake_up(&resync_wait);
7930 				}
7931 				if (mddev > mddev2 && mddev->curr_resync == 1)
7932 					/* no need to wait here, we can wait the next
7933 					 * time 'round when curr_resync == 2
7934 					 */
7935 					continue;
7936 				/* We need to wait 'interruptible' so as not to
7937 				 * contribute to the load average, and not to
7938 				 * be caught by 'softlockup'
7939 				 */
7940 				prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
7941 				if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
7942 				    mddev2->curr_resync >= mddev->curr_resync) {
7943 					if (mddev2_minor != mddev2->md_minor) {
7944 						mddev2_minor = mddev2->md_minor;
7945 						pr_info("md: delaying %s of %s until %s has finished (they share one or more physical units)\n",
7946 							desc, mdname(mddev),
7947 							mdname(mddev2));
7948 					}
7949 					mddev_put(mddev2);
7950 					if (signal_pending(current))
7951 						flush_signals(current);
7952 					schedule();
7953 					finish_wait(&resync_wait, &wq);
7954 					goto try_again;
7955 				}
7956 				finish_wait(&resync_wait, &wq);
7957 			}
7958 		}
7959 	} while (mddev->curr_resync < 2);
7960 
7961 	j = 0;
7962 	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
7963 		/* resync follows the size requested by the personality,
7964 		 * which defaults to physical size, but can be virtual size
7965 		 */
7966 		max_sectors = mddev->resync_max_sectors;
7967 		atomic64_set(&mddev->resync_mismatches, 0);
7968 		/* we don't use the checkpoint if there's a bitmap */
7969 		if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
7970 			j = mddev->resync_min;
7971 		else if (!mddev->bitmap)
7972 			j = mddev->recovery_cp;
7973 
7974 	} else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
7975 		max_sectors = mddev->resync_max_sectors;
7976 	else {
7977 		/* recovery follows the physical size of devices */
7978 		max_sectors = mddev->dev_sectors;
7979 		j = MaxSector;
7980 		rcu_read_lock();
7981 		rdev_for_each_rcu(rdev, mddev)
7982 			if (rdev->raid_disk >= 0 &&
7983 			    !test_bit(Journal, &rdev->flags) &&
7984 			    !test_bit(Faulty, &rdev->flags) &&
7985 			    !test_bit(In_sync, &rdev->flags) &&
7986 			    rdev->recovery_offset < j)
7987 				j = rdev->recovery_offset;
7988 		rcu_read_unlock();
7989 
7990 		/* If there is a bitmap, we need to make sure all
7991 		 * writes that started before we added a spare
7992 		 * complete before we start doing a recovery.
7993 		 * Otherwise the write might complete and (via
7994 		 * bitmap_endwrite) set a bit in the bitmap after the
7995 		 * recovery has checked that bit and skipped that
7996 		 * region.
7997 		 */
7998 		if (mddev->bitmap) {
7999 			mddev->pers->quiesce(mddev, 1);
8000 			mddev->pers->quiesce(mddev, 0);
8001 		}
8002 	}
8003 
8004 	pr_info("md: %s of RAID array %s\n", desc, mdname(mddev));
8005 	pr_debug("md: minimum _guaranteed_  speed: %d KB/sec/disk.\n", speed_min(mddev));
8006 	pr_debug("md: using maximum available idle IO bandwidth (but not more than %d KB/sec) for %s.\n",
8007 		 speed_max(mddev), desc);
8008 
8009 	is_mddev_idle(mddev, 1); /* this initializes IO event counters */
8010 
8011 	io_sectors = 0;
8012 	for (m = 0; m < SYNC_MARKS; m++) {
8013 		mark[m] = jiffies;
8014 		mark_cnt[m] = io_sectors;
8015 	}
8016 	last_mark = 0;
8017 	mddev->resync_mark = mark[last_mark];
8018 	mddev->resync_mark_cnt = mark_cnt[last_mark];
8019 
8020 	/*
8021 	 * Tune reconstruction:
8022 	 */
8023 	window = 32*(PAGE_SIZE/512);
8024 	pr_debug("md: using %dk window, over a total of %lluk.\n",
8025 		 window/2, (unsigned long long)max_sectors/2);
8026 
8027 	atomic_set(&mddev->recovery_active, 0);
8028 	last_check = 0;
8029 
8030 	if (j>2) {
8031 		pr_debug("md: resuming %s of %s from checkpoint.\n",
8032 			 desc, mdname(mddev));
8033 		mddev->curr_resync = j;
8034 	} else
8035 		mddev->curr_resync = 3; /* no longer delayed */
8036 	mddev->curr_resync_completed = j;
8037 	sysfs_notify(&mddev->kobj, NULL, "sync_completed");
8038 	md_new_event(mddev);
8039 	update_time = jiffies;
8040 
8041 	blk_start_plug(&plug);
8042 	while (j < max_sectors) {
8043 		sector_t sectors;
8044 
8045 		skipped = 0;
8046 
8047 		if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8048 		    ((mddev->curr_resync > mddev->curr_resync_completed &&
8049 		      (mddev->curr_resync - mddev->curr_resync_completed)
8050 		      > (max_sectors >> 4)) ||
8051 		     time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) ||
8052 		     (j - mddev->curr_resync_completed)*2
8053 		     >= mddev->resync_max - mddev->curr_resync_completed ||
8054 		     mddev->curr_resync_completed > mddev->resync_max
8055 			    )) {
8056 			/* time to update curr_resync_completed */
8057 			wait_event(mddev->recovery_wait,
8058 				   atomic_read(&mddev->recovery_active) == 0);
8059 			mddev->curr_resync_completed = j;
8060 			if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
8061 			    j > mddev->recovery_cp)
8062 				mddev->recovery_cp = j;
8063 			update_time = jiffies;
8064 			set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
8065 			sysfs_notify(&mddev->kobj, NULL, "sync_completed");
8066 		}
8067 
8068 		while (j >= mddev->resync_max &&
8069 		       !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8070 			/* As this condition is controlled by user-space,
8071 			 * we can block indefinitely, so use '_interruptible'
8072 			 * to avoid triggering warnings.
8073 			 */
8074 			flush_signals(current); /* just in case */
8075 			wait_event_interruptible(mddev->recovery_wait,
8076 						 mddev->resync_max > j
8077 						 || test_bit(MD_RECOVERY_INTR,
8078 							     &mddev->recovery));
8079 		}
8080 
8081 		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8082 			break;
8083 
8084 		sectors = mddev->pers->sync_request(mddev, j, &skipped);
8085 		if (sectors == 0) {
8086 			set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8087 			break;
8088 		}
8089 
8090 		if (!skipped) { /* actual IO requested */
8091 			io_sectors += sectors;
8092 			atomic_add(sectors, &mddev->recovery_active);
8093 		}
8094 
8095 		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8096 			break;
8097 
8098 		j += sectors;
8099 		if (j > max_sectors)
8100 			/* when skipping, extra large numbers can be returned. */
8101 			j = max_sectors;
8102 		if (j > 2)
8103 			mddev->curr_resync = j;
8104 		mddev->curr_mark_cnt = io_sectors;
8105 		if (last_check == 0)
8106 			/* this is the earliest that rebuild will be
8107 			 * visible in /proc/mdstat
8108 			 */
8109 			md_new_event(mddev);
8110 
8111 		if (last_check + window > io_sectors || j == max_sectors)
8112 			continue;
8113 
8114 		last_check = io_sectors;
8115 	repeat:
8116 		if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
8117 			/* step marks */
8118 			int next = (last_mark+1) % SYNC_MARKS;
8119 
8120 			mddev->resync_mark = mark[next];
8121 			mddev->resync_mark_cnt = mark_cnt[next];
8122 			mark[next] = jiffies;
8123 			mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
8124 			last_mark = next;
8125 		}
8126 
8127 		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8128 			break;
8129 
8130 		/*
8131 		 * this loop exits only if either when we are slower than
8132 		 * the 'hard' speed limit, or the system was IO-idle for
8133 		 * a jiffy.
8134 		 * the system might be non-idle CPU-wise, but we only care
8135 		 * about not overloading the IO subsystem. (things like an
8136 		 * e2fsck being done on the RAID array should execute fast)
8137 		 */
8138 		cond_resched();
8139 
8140 		recovery_done = io_sectors - atomic_read(&mddev->recovery_active);
8141 		currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2
8142 			/((jiffies-mddev->resync_mark)/HZ +1) +1;
8143 
8144 		if (currspeed > speed_min(mddev)) {
8145 			if (currspeed > speed_max(mddev)) {
8146 				msleep(500);
8147 				goto repeat;
8148 			}
8149 			if (!is_mddev_idle(mddev, 0)) {
8150 				/*
8151 				 * Give other IO more of a chance.
8152 				 * The faster the devices, the less we wait.
8153 				 */
8154 				wait_event(mddev->recovery_wait,
8155 					   !atomic_read(&mddev->recovery_active));
8156 			}
8157 		}
8158 	}
8159 	pr_info("md: %s: %s %s.\n",mdname(mddev), desc,
8160 		test_bit(MD_RECOVERY_INTR, &mddev->recovery)
8161 		? "interrupted" : "done");
8162 	/*
8163 	 * this also signals 'finished resyncing' to md_stop
8164 	 */
8165 	blk_finish_plug(&plug);
8166 	wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
8167 
8168 	if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8169 	    !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
8170 	    mddev->curr_resync > 3) {
8171 		mddev->curr_resync_completed = mddev->curr_resync;
8172 		sysfs_notify(&mddev->kobj, NULL, "sync_completed");
8173 	}
8174 	mddev->pers->sync_request(mddev, max_sectors, &skipped);
8175 
8176 	if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
8177 	    mddev->curr_resync > 3) {
8178 		if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
8179 			if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8180 				if (mddev->curr_resync >= mddev->recovery_cp) {
8181 					pr_debug("md: checkpointing %s of %s.\n",
8182 						 desc, mdname(mddev));
8183 					if (test_bit(MD_RECOVERY_ERROR,
8184 						&mddev->recovery))
8185 						mddev->recovery_cp =
8186 							mddev->curr_resync_completed;
8187 					else
8188 						mddev->recovery_cp =
8189 							mddev->curr_resync;
8190 				}
8191 			} else
8192 				mddev->recovery_cp = MaxSector;
8193 		} else {
8194 			if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8195 				mddev->curr_resync = MaxSector;
8196 			rcu_read_lock();
8197 			rdev_for_each_rcu(rdev, mddev)
8198 				if (rdev->raid_disk >= 0 &&
8199 				    mddev->delta_disks >= 0 &&
8200 				    !test_bit(Journal, &rdev->flags) &&
8201 				    !test_bit(Faulty, &rdev->flags) &&
8202 				    !test_bit(In_sync, &rdev->flags) &&
8203 				    rdev->recovery_offset < mddev->curr_resync)
8204 					rdev->recovery_offset = mddev->curr_resync;
8205 			rcu_read_unlock();
8206 		}
8207 	}
8208  skip:
8209 	/* set CHANGE_PENDING here since maybe another update is needed,
8210 	 * so other nodes are informed. It should be harmless for normal
8211 	 * raid */
8212 	set_mask_bits(&mddev->sb_flags, 0,
8213 		      BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS));
8214 
8215 	spin_lock(&mddev->lock);
8216 	if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8217 		/* We completed so min/max setting can be forgotten if used. */
8218 		if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
8219 			mddev->resync_min = 0;
8220 		mddev->resync_max = MaxSector;
8221 	} else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
8222 		mddev->resync_min = mddev->curr_resync_completed;
8223 	set_bit(MD_RECOVERY_DONE, &mddev->recovery);
8224 	mddev->curr_resync = 0;
8225 	spin_unlock(&mddev->lock);
8226 
8227 	wake_up(&resync_wait);
8228 	md_wakeup_thread(mddev->thread);
8229 	return;
8230 }
8231 EXPORT_SYMBOL_GPL(md_do_sync);
8232 
8233 static int remove_and_add_spares(struct mddev *mddev,
8234 				 struct md_rdev *this)
8235 {
8236 	struct md_rdev *rdev;
8237 	int spares = 0;
8238 	int removed = 0;
8239 	bool remove_some = false;
8240 
8241 	rdev_for_each(rdev, mddev) {
8242 		if ((this == NULL || rdev == this) &&
8243 		    rdev->raid_disk >= 0 &&
8244 		    !test_bit(Blocked, &rdev->flags) &&
8245 		    test_bit(Faulty, &rdev->flags) &&
8246 		    atomic_read(&rdev->nr_pending)==0) {
8247 			/* Faulty non-Blocked devices with nr_pending == 0
8248 			 * never get nr_pending incremented,
8249 			 * never get Faulty cleared, and never get Blocked set.
8250 			 * So we can synchronize_rcu now rather than once per device
8251 			 */
8252 			remove_some = true;
8253 			set_bit(RemoveSynchronized, &rdev->flags);
8254 		}
8255 	}
8256 
8257 	if (remove_some)
8258 		synchronize_rcu();
8259 	rdev_for_each(rdev, mddev) {
8260 		if ((this == NULL || rdev == this) &&
8261 		    rdev->raid_disk >= 0 &&
8262 		    !test_bit(Blocked, &rdev->flags) &&
8263 		    ((test_bit(RemoveSynchronized, &rdev->flags) ||
8264 		     (!test_bit(In_sync, &rdev->flags) &&
8265 		      !test_bit(Journal, &rdev->flags))) &&
8266 		    atomic_read(&rdev->nr_pending)==0)) {
8267 			if (mddev->pers->hot_remove_disk(
8268 				    mddev, rdev) == 0) {
8269 				sysfs_unlink_rdev(mddev, rdev);
8270 				rdev->raid_disk = -1;
8271 				removed++;
8272 			}
8273 		}
8274 		if (remove_some && test_bit(RemoveSynchronized, &rdev->flags))
8275 			clear_bit(RemoveSynchronized, &rdev->flags);
8276 	}
8277 
8278 	if (removed && mddev->kobj.sd)
8279 		sysfs_notify(&mddev->kobj, NULL, "degraded");
8280 
8281 	if (this && removed)
8282 		goto no_add;
8283 
8284 	rdev_for_each(rdev, mddev) {
8285 		if (this && this != rdev)
8286 			continue;
8287 		if (test_bit(Candidate, &rdev->flags))
8288 			continue;
8289 		if (rdev->raid_disk >= 0 &&
8290 		    !test_bit(In_sync, &rdev->flags) &&
8291 		    !test_bit(Journal, &rdev->flags) &&
8292 		    !test_bit(Faulty, &rdev->flags))
8293 			spares++;
8294 		if (rdev->raid_disk >= 0)
8295 			continue;
8296 		if (test_bit(Faulty, &rdev->flags))
8297 			continue;
8298 		if (!test_bit(Journal, &rdev->flags)) {
8299 			if (mddev->ro &&
8300 			    ! (rdev->saved_raid_disk >= 0 &&
8301 			       !test_bit(Bitmap_sync, &rdev->flags)))
8302 				continue;
8303 
8304 			rdev->recovery_offset = 0;
8305 		}
8306 		if (mddev->pers->
8307 		    hot_add_disk(mddev, rdev) == 0) {
8308 			if (sysfs_link_rdev(mddev, rdev))
8309 				/* failure here is OK */;
8310 			if (!test_bit(Journal, &rdev->flags))
8311 				spares++;
8312 			md_new_event(mddev);
8313 			set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
8314 		}
8315 	}
8316 no_add:
8317 	if (removed)
8318 		set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
8319 	return spares;
8320 }
8321 
8322 static void md_start_sync(struct work_struct *ws)
8323 {
8324 	struct mddev *mddev = container_of(ws, struct mddev, del_work);
8325 
8326 	mddev->sync_thread = md_register_thread(md_do_sync,
8327 						mddev,
8328 						"resync");
8329 	if (!mddev->sync_thread) {
8330 		pr_warn("%s: could not start resync thread...\n",
8331 			mdname(mddev));
8332 		/* leave the spares where they are, it shouldn't hurt */
8333 		clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8334 		clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
8335 		clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
8336 		clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
8337 		clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
8338 		wake_up(&resync_wait);
8339 		if (test_and_clear_bit(MD_RECOVERY_RECOVER,
8340 				       &mddev->recovery))
8341 			if (mddev->sysfs_action)
8342 				sysfs_notify_dirent_safe(mddev->sysfs_action);
8343 	} else
8344 		md_wakeup_thread(mddev->sync_thread);
8345 	sysfs_notify_dirent_safe(mddev->sysfs_action);
8346 	md_new_event(mddev);
8347 }
8348 
8349 /*
8350  * This routine is regularly called by all per-raid-array threads to
8351  * deal with generic issues like resync and super-block update.
8352  * Raid personalities that don't have a thread (linear/raid0) do not
8353  * need this as they never do any recovery or update the superblock.
8354  *
8355  * It does not do any resync itself, but rather "forks" off other threads
8356  * to do that as needed.
8357  * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
8358  * "->recovery" and create a thread at ->sync_thread.
8359  * When the thread finishes it sets MD_RECOVERY_DONE
8360  * and wakeups up this thread which will reap the thread and finish up.
8361  * This thread also removes any faulty devices (with nr_pending == 0).
8362  *
8363  * The overall approach is:
8364  *  1/ if the superblock needs updating, update it.
8365  *  2/ If a recovery thread is running, don't do anything else.
8366  *  3/ If recovery has finished, clean up, possibly marking spares active.
8367  *  4/ If there are any faulty devices, remove them.
8368  *  5/ If array is degraded, try to add spares devices
8369  *  6/ If array has spares or is not in-sync, start a resync thread.
8370  */
8371 void md_check_recovery(struct mddev *mddev)
8372 {
8373 	if (mddev->suspended)
8374 		return;
8375 
8376 	if (mddev->bitmap)
8377 		bitmap_daemon_work(mddev);
8378 
8379 	if (signal_pending(current)) {
8380 		if (mddev->pers->sync_request && !mddev->external) {
8381 			pr_debug("md: %s in immediate safe mode\n",
8382 				 mdname(mddev));
8383 			mddev->safemode = 2;
8384 		}
8385 		flush_signals(current);
8386 	}
8387 
8388 	if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
8389 		return;
8390 	if ( ! (
8391 		(mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) ||
8392 		test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
8393 		test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
8394 		test_bit(MD_RELOAD_SB, &mddev->flags) ||
8395 		(mddev->external == 0 && mddev->safemode == 1) ||
8396 		(mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
8397 		 && !mddev->in_sync && mddev->recovery_cp == MaxSector)
8398 		))
8399 		return;
8400 
8401 	if (mddev_trylock(mddev)) {
8402 		int spares = 0;
8403 
8404 		if (mddev->ro) {
8405 			struct md_rdev *rdev;
8406 			if (!mddev->external && mddev->in_sync)
8407 				/* 'Blocked' flag not needed as failed devices
8408 				 * will be recorded if array switched to read/write.
8409 				 * Leaving it set will prevent the device
8410 				 * from being removed.
8411 				 */
8412 				rdev_for_each(rdev, mddev)
8413 					clear_bit(Blocked, &rdev->flags);
8414 			/* On a read-only array we can:
8415 			 * - remove failed devices
8416 			 * - add already-in_sync devices if the array itself
8417 			 *   is in-sync.
8418 			 * As we only add devices that are already in-sync,
8419 			 * we can activate the spares immediately.
8420 			 */
8421 			remove_and_add_spares(mddev, NULL);
8422 			/* There is no thread, but we need to call
8423 			 * ->spare_active and clear saved_raid_disk
8424 			 */
8425 			set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8426 			md_reap_sync_thread(mddev);
8427 			clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8428 			clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8429 			clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
8430 			goto unlock;
8431 		}
8432 
8433 		if (mddev_is_clustered(mddev)) {
8434 			struct md_rdev *rdev;
8435 			/* kick the device if another node issued a
8436 			 * remove disk.
8437 			 */
8438 			rdev_for_each(rdev, mddev) {
8439 				if (test_and_clear_bit(ClusterRemove, &rdev->flags) &&
8440 						rdev->raid_disk < 0)
8441 					md_kick_rdev_from_array(rdev);
8442 			}
8443 
8444 			if (test_and_clear_bit(MD_RELOAD_SB, &mddev->flags))
8445 				md_reload_sb(mddev, mddev->good_device_nr);
8446 		}
8447 
8448 		if (!mddev->external) {
8449 			int did_change = 0;
8450 			spin_lock(&mddev->lock);
8451 			if (mddev->safemode &&
8452 			    !atomic_read(&mddev->writes_pending) &&
8453 			    !mddev->in_sync &&
8454 			    mddev->recovery_cp == MaxSector) {
8455 				mddev->in_sync = 1;
8456 				did_change = 1;
8457 				set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
8458 			}
8459 			if (mddev->safemode == 1)
8460 				mddev->safemode = 0;
8461 			spin_unlock(&mddev->lock);
8462 			if (did_change)
8463 				sysfs_notify_dirent_safe(mddev->sysfs_state);
8464 		}
8465 
8466 		if (mddev->sb_flags)
8467 			md_update_sb(mddev, 0);
8468 
8469 		if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
8470 		    !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
8471 			/* resync/recovery still happening */
8472 			clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8473 			goto unlock;
8474 		}
8475 		if (mddev->sync_thread) {
8476 			md_reap_sync_thread(mddev);
8477 			goto unlock;
8478 		}
8479 		/* Set RUNNING before clearing NEEDED to avoid
8480 		 * any transients in the value of "sync_action".
8481 		 */
8482 		mddev->curr_resync_completed = 0;
8483 		spin_lock(&mddev->lock);
8484 		set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
8485 		spin_unlock(&mddev->lock);
8486 		/* Clear some bits that don't mean anything, but
8487 		 * might be left set
8488 		 */
8489 		clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
8490 		clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
8491 
8492 		if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
8493 		    test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
8494 			goto not_running;
8495 		/* no recovery is running.
8496 		 * remove any failed drives, then
8497 		 * add spares if possible.
8498 		 * Spares are also removed and re-added, to allow
8499 		 * the personality to fail the re-add.
8500 		 */
8501 
8502 		if (mddev->reshape_position != MaxSector) {
8503 			if (mddev->pers->check_reshape == NULL ||
8504 			    mddev->pers->check_reshape(mddev) != 0)
8505 				/* Cannot proceed */
8506 				goto not_running;
8507 			set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
8508 			clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8509 		} else if ((spares = remove_and_add_spares(mddev, NULL))) {
8510 			clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8511 			clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
8512 			clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
8513 			set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8514 		} else if (mddev->recovery_cp < MaxSector) {
8515 			set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8516 			clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8517 		} else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
8518 			/* nothing to be done ... */
8519 			goto not_running;
8520 
8521 		if (mddev->pers->sync_request) {
8522 			if (spares) {
8523 				/* We are adding a device or devices to an array
8524 				 * which has the bitmap stored on all devices.
8525 				 * So make sure all bitmap pages get written
8526 				 */
8527 				bitmap_write_all(mddev->bitmap);
8528 			}
8529 			INIT_WORK(&mddev->del_work, md_start_sync);
8530 			queue_work(md_misc_wq, &mddev->del_work);
8531 			goto unlock;
8532 		}
8533 	not_running:
8534 		if (!mddev->sync_thread) {
8535 			clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
8536 			wake_up(&resync_wait);
8537 			if (test_and_clear_bit(MD_RECOVERY_RECOVER,
8538 					       &mddev->recovery))
8539 				if (mddev->sysfs_action)
8540 					sysfs_notify_dirent_safe(mddev->sysfs_action);
8541 		}
8542 	unlock:
8543 		wake_up(&mddev->sb_wait);
8544 		mddev_unlock(mddev);
8545 	}
8546 }
8547 EXPORT_SYMBOL(md_check_recovery);
8548 
8549 void md_reap_sync_thread(struct mddev *mddev)
8550 {
8551 	struct md_rdev *rdev;
8552 
8553 	/* resync has finished, collect result */
8554 	md_unregister_thread(&mddev->sync_thread);
8555 	if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
8556 	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
8557 		/* success...*/
8558 		/* activate any spares */
8559 		if (mddev->pers->spare_active(mddev)) {
8560 			sysfs_notify(&mddev->kobj, NULL,
8561 				     "degraded");
8562 			set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
8563 		}
8564 	}
8565 	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8566 	    mddev->pers->finish_reshape)
8567 		mddev->pers->finish_reshape(mddev);
8568 
8569 	/* If array is no-longer degraded, then any saved_raid_disk
8570 	 * information must be scrapped.
8571 	 */
8572 	if (!mddev->degraded)
8573 		rdev_for_each(rdev, mddev)
8574 			rdev->saved_raid_disk = -1;
8575 
8576 	md_update_sb(mddev, 1);
8577 	/* MD_SB_CHANGE_PENDING should be cleared by md_update_sb, so we can
8578 	 * call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by
8579 	 * clustered raid */
8580 	if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags))
8581 		md_cluster_ops->resync_finish(mddev);
8582 	clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
8583 	clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
8584 	clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8585 	clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
8586 	clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
8587 	clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
8588 	wake_up(&resync_wait);
8589 	/* flag recovery needed just to double check */
8590 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8591 	sysfs_notify_dirent_safe(mddev->sysfs_action);
8592 	md_new_event(mddev);
8593 	if (mddev->event_work.func)
8594 		queue_work(md_misc_wq, &mddev->event_work);
8595 }
8596 EXPORT_SYMBOL(md_reap_sync_thread);
8597 
8598 void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
8599 {
8600 	sysfs_notify_dirent_safe(rdev->sysfs_state);
8601 	wait_event_timeout(rdev->blocked_wait,
8602 			   !test_bit(Blocked, &rdev->flags) &&
8603 			   !test_bit(BlockedBadBlocks, &rdev->flags),
8604 			   msecs_to_jiffies(5000));
8605 	rdev_dec_pending(rdev, mddev);
8606 }
8607 EXPORT_SYMBOL(md_wait_for_blocked_rdev);
8608 
8609 void md_finish_reshape(struct mddev *mddev)
8610 {
8611 	/* called be personality module when reshape completes. */
8612 	struct md_rdev *rdev;
8613 
8614 	rdev_for_each(rdev, mddev) {
8615 		if (rdev->data_offset > rdev->new_data_offset)
8616 			rdev->sectors += rdev->data_offset - rdev->new_data_offset;
8617 		else
8618 			rdev->sectors -= rdev->new_data_offset - rdev->data_offset;
8619 		rdev->data_offset = rdev->new_data_offset;
8620 	}
8621 }
8622 EXPORT_SYMBOL(md_finish_reshape);
8623 
8624 /* Bad block management */
8625 
8626 /* Returns 1 on success, 0 on failure */
8627 int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
8628 		       int is_new)
8629 {
8630 	struct mddev *mddev = rdev->mddev;
8631 	int rv;
8632 	if (is_new)
8633 		s += rdev->new_data_offset;
8634 	else
8635 		s += rdev->data_offset;
8636 	rv = badblocks_set(&rdev->badblocks, s, sectors, 0);
8637 	if (rv == 0) {
8638 		/* Make sure they get written out promptly */
8639 		if (test_bit(ExternalBbl, &rdev->flags))
8640 			sysfs_notify(&rdev->kobj, NULL,
8641 				     "unacknowledged_bad_blocks");
8642 		sysfs_notify_dirent_safe(rdev->sysfs_state);
8643 		set_mask_bits(&mddev->sb_flags, 0,
8644 			      BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING));
8645 		md_wakeup_thread(rdev->mddev->thread);
8646 		return 1;
8647 	} else
8648 		return 0;
8649 }
8650 EXPORT_SYMBOL_GPL(rdev_set_badblocks);
8651 
8652 int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
8653 			 int is_new)
8654 {
8655 	int rv;
8656 	if (is_new)
8657 		s += rdev->new_data_offset;
8658 	else
8659 		s += rdev->data_offset;
8660 	rv = badblocks_clear(&rdev->badblocks, s, sectors);
8661 	if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags))
8662 		sysfs_notify(&rdev->kobj, NULL, "bad_blocks");
8663 	return rv;
8664 }
8665 EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
8666 
8667 static int md_notify_reboot(struct notifier_block *this,
8668 			    unsigned long code, void *x)
8669 {
8670 	struct list_head *tmp;
8671 	struct mddev *mddev;
8672 	int need_delay = 0;
8673 
8674 	for_each_mddev(mddev, tmp) {
8675 		if (mddev_trylock(mddev)) {
8676 			if (mddev->pers)
8677 				__md_stop_writes(mddev);
8678 			if (mddev->persistent)
8679 				mddev->safemode = 2;
8680 			mddev_unlock(mddev);
8681 		}
8682 		need_delay = 1;
8683 	}
8684 	/*
8685 	 * certain more exotic SCSI devices are known to be
8686 	 * volatile wrt too early system reboots. While the
8687 	 * right place to handle this issue is the given
8688 	 * driver, we do want to have a safe RAID driver ...
8689 	 */
8690 	if (need_delay)
8691 		mdelay(1000*1);
8692 
8693 	return NOTIFY_DONE;
8694 }
8695 
8696 static struct notifier_block md_notifier = {
8697 	.notifier_call	= md_notify_reboot,
8698 	.next		= NULL,
8699 	.priority	= INT_MAX, /* before any real devices */
8700 };
8701 
8702 static void md_geninit(void)
8703 {
8704 	pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
8705 
8706 	proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
8707 }
8708 
8709 static int __init md_init(void)
8710 {
8711 	int ret = -ENOMEM;
8712 
8713 	md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
8714 	if (!md_wq)
8715 		goto err_wq;
8716 
8717 	md_misc_wq = alloc_workqueue("md_misc", 0, 0);
8718 	if (!md_misc_wq)
8719 		goto err_misc_wq;
8720 
8721 	if ((ret = register_blkdev(MD_MAJOR, "md")) < 0)
8722 		goto err_md;
8723 
8724 	if ((ret = register_blkdev(0, "mdp")) < 0)
8725 		goto err_mdp;
8726 	mdp_major = ret;
8727 
8728 	blk_register_region(MKDEV(MD_MAJOR, 0), 512, THIS_MODULE,
8729 			    md_probe, NULL, NULL);
8730 	blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
8731 			    md_probe, NULL, NULL);
8732 
8733 	register_reboot_notifier(&md_notifier);
8734 	raid_table_header = register_sysctl_table(raid_root_table);
8735 
8736 	md_geninit();
8737 	return 0;
8738 
8739 err_mdp:
8740 	unregister_blkdev(MD_MAJOR, "md");
8741 err_md:
8742 	destroy_workqueue(md_misc_wq);
8743 err_misc_wq:
8744 	destroy_workqueue(md_wq);
8745 err_wq:
8746 	return ret;
8747 }
8748 
8749 static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
8750 {
8751 	struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
8752 	struct md_rdev *rdev2;
8753 	int role, ret;
8754 	char b[BDEVNAME_SIZE];
8755 
8756 	/* Check for change of roles in the active devices */
8757 	rdev_for_each(rdev2, mddev) {
8758 		if (test_bit(Faulty, &rdev2->flags))
8759 			continue;
8760 
8761 		/* Check if the roles changed */
8762 		role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]);
8763 
8764 		if (test_bit(Candidate, &rdev2->flags)) {
8765 			if (role == 0xfffe) {
8766 				pr_info("md: Removing Candidate device %s because add failed\n", bdevname(rdev2->bdev,b));
8767 				md_kick_rdev_from_array(rdev2);
8768 				continue;
8769 			}
8770 			else
8771 				clear_bit(Candidate, &rdev2->flags);
8772 		}
8773 
8774 		if (role != rdev2->raid_disk) {
8775 			/* got activated */
8776 			if (rdev2->raid_disk == -1 && role != 0xffff) {
8777 				rdev2->saved_raid_disk = role;
8778 				ret = remove_and_add_spares(mddev, rdev2);
8779 				pr_info("Activated spare: %s\n",
8780 					bdevname(rdev2->bdev,b));
8781 				/* wakeup mddev->thread here, so array could
8782 				 * perform resync with the new activated disk */
8783 				set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8784 				md_wakeup_thread(mddev->thread);
8785 
8786 			}
8787 			/* device faulty
8788 			 * We just want to do the minimum to mark the disk
8789 			 * as faulty. The recovery is performed by the
8790 			 * one who initiated the error.
8791 			 */
8792 			if ((role == 0xfffe) || (role == 0xfffd)) {
8793 				md_error(mddev, rdev2);
8794 				clear_bit(Blocked, &rdev2->flags);
8795 			}
8796 		}
8797 	}
8798 
8799 	if (mddev->raid_disks != le32_to_cpu(sb->raid_disks))
8800 		update_raid_disks(mddev, le32_to_cpu(sb->raid_disks));
8801 
8802 	/* Finally set the event to be up to date */
8803 	mddev->events = le64_to_cpu(sb->events);
8804 }
8805 
8806 static int read_rdev(struct mddev *mddev, struct md_rdev *rdev)
8807 {
8808 	int err;
8809 	struct page *swapout = rdev->sb_page;
8810 	struct mdp_superblock_1 *sb;
8811 
8812 	/* Store the sb page of the rdev in the swapout temporary
8813 	 * variable in case we err in the future
8814 	 */
8815 	rdev->sb_page = NULL;
8816 	err = alloc_disk_sb(rdev);
8817 	if (err == 0) {
8818 		ClearPageUptodate(rdev->sb_page);
8819 		rdev->sb_loaded = 0;
8820 		err = super_types[mddev->major_version].
8821 			load_super(rdev, NULL, mddev->minor_version);
8822 	}
8823 	if (err < 0) {
8824 		pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n",
8825 				__func__, __LINE__, rdev->desc_nr, err);
8826 		if (rdev->sb_page)
8827 			put_page(rdev->sb_page);
8828 		rdev->sb_page = swapout;
8829 		rdev->sb_loaded = 1;
8830 		return err;
8831 	}
8832 
8833 	sb = page_address(rdev->sb_page);
8834 	/* Read the offset unconditionally, even if MD_FEATURE_RECOVERY_OFFSET
8835 	 * is not set
8836 	 */
8837 
8838 	if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_OFFSET))
8839 		rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
8840 
8841 	/* The other node finished recovery, call spare_active to set
8842 	 * device In_sync and mddev->degraded
8843 	 */
8844 	if (rdev->recovery_offset == MaxSector &&
8845 	    !test_bit(In_sync, &rdev->flags) &&
8846 	    mddev->pers->spare_active(mddev))
8847 		sysfs_notify(&mddev->kobj, NULL, "degraded");
8848 
8849 	put_page(swapout);
8850 	return 0;
8851 }
8852 
8853 void md_reload_sb(struct mddev *mddev, int nr)
8854 {
8855 	struct md_rdev *rdev;
8856 	int err;
8857 
8858 	/* Find the rdev */
8859 	rdev_for_each_rcu(rdev, mddev) {
8860 		if (rdev->desc_nr == nr)
8861 			break;
8862 	}
8863 
8864 	if (!rdev || rdev->desc_nr != nr) {
8865 		pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr);
8866 		return;
8867 	}
8868 
8869 	err = read_rdev(mddev, rdev);
8870 	if (err < 0)
8871 		return;
8872 
8873 	check_sb_changes(mddev, rdev);
8874 
8875 	/* Read all rdev's to update recovery_offset */
8876 	rdev_for_each_rcu(rdev, mddev)
8877 		read_rdev(mddev, rdev);
8878 }
8879 EXPORT_SYMBOL(md_reload_sb);
8880 
8881 #ifndef MODULE
8882 
8883 /*
8884  * Searches all registered partitions for autorun RAID arrays
8885  * at boot time.
8886  */
8887 
8888 static DEFINE_MUTEX(detected_devices_mutex);
8889 static LIST_HEAD(all_detected_devices);
8890 struct detected_devices_node {
8891 	struct list_head list;
8892 	dev_t dev;
8893 };
8894 
8895 void md_autodetect_dev(dev_t dev)
8896 {
8897 	struct detected_devices_node *node_detected_dev;
8898 
8899 	node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
8900 	if (node_detected_dev) {
8901 		node_detected_dev->dev = dev;
8902 		mutex_lock(&detected_devices_mutex);
8903 		list_add_tail(&node_detected_dev->list, &all_detected_devices);
8904 		mutex_unlock(&detected_devices_mutex);
8905 	}
8906 }
8907 
8908 static void autostart_arrays(int part)
8909 {
8910 	struct md_rdev *rdev;
8911 	struct detected_devices_node *node_detected_dev;
8912 	dev_t dev;
8913 	int i_scanned, i_passed;
8914 
8915 	i_scanned = 0;
8916 	i_passed = 0;
8917 
8918 	pr_info("md: Autodetecting RAID arrays.\n");
8919 
8920 	mutex_lock(&detected_devices_mutex);
8921 	while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
8922 		i_scanned++;
8923 		node_detected_dev = list_entry(all_detected_devices.next,
8924 					struct detected_devices_node, list);
8925 		list_del(&node_detected_dev->list);
8926 		dev = node_detected_dev->dev;
8927 		kfree(node_detected_dev);
8928 		mutex_unlock(&detected_devices_mutex);
8929 		rdev = md_import_device(dev,0, 90);
8930 		mutex_lock(&detected_devices_mutex);
8931 		if (IS_ERR(rdev))
8932 			continue;
8933 
8934 		if (test_bit(Faulty, &rdev->flags))
8935 			continue;
8936 
8937 		set_bit(AutoDetected, &rdev->flags);
8938 		list_add(&rdev->same_set, &pending_raid_disks);
8939 		i_passed++;
8940 	}
8941 	mutex_unlock(&detected_devices_mutex);
8942 
8943 	pr_debug("md: Scanned %d and added %d devices.\n", i_scanned, i_passed);
8944 
8945 	autorun_devices(part);
8946 }
8947 
8948 #endif /* !MODULE */
8949 
8950 static __exit void md_exit(void)
8951 {
8952 	struct mddev *mddev;
8953 	struct list_head *tmp;
8954 	int delay = 1;
8955 
8956 	blk_unregister_region(MKDEV(MD_MAJOR,0), 512);
8957 	blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
8958 
8959 	unregister_blkdev(MD_MAJOR,"md");
8960 	unregister_blkdev(mdp_major, "mdp");
8961 	unregister_reboot_notifier(&md_notifier);
8962 	unregister_sysctl_table(raid_table_header);
8963 
8964 	/* We cannot unload the modules while some process is
8965 	 * waiting for us in select() or poll() - wake them up
8966 	 */
8967 	md_unloading = 1;
8968 	while (waitqueue_active(&md_event_waiters)) {
8969 		/* not safe to leave yet */
8970 		wake_up(&md_event_waiters);
8971 		msleep(delay);
8972 		delay += delay;
8973 	}
8974 	remove_proc_entry("mdstat", NULL);
8975 
8976 	for_each_mddev(mddev, tmp) {
8977 		export_array(mddev);
8978 		mddev->hold_active = 0;
8979 	}
8980 	destroy_workqueue(md_misc_wq);
8981 	destroy_workqueue(md_wq);
8982 }
8983 
8984 subsys_initcall(md_init);
8985 module_exit(md_exit)
8986 
8987 static int get_ro(char *buffer, struct kernel_param *kp)
8988 {
8989 	return sprintf(buffer, "%d", start_readonly);
8990 }
8991 static int set_ro(const char *val, struct kernel_param *kp)
8992 {
8993 	return kstrtouint(val, 10, (unsigned int *)&start_readonly);
8994 }
8995 
8996 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
8997 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
8998 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
8999 
9000 MODULE_LICENSE("GPL");
9001 MODULE_DESCRIPTION("MD RAID framework");
9002 MODULE_ALIAS("md");
9003 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);
9004