xref: /openbmc/linux/drivers/md/dm-raid.c (revision 93d90ad7)
1 /*
2  * Copyright (C) 2010-2011 Neil Brown
3  * Copyright (C) 2010-2014 Red Hat, Inc. All rights reserved.
4  *
5  * This file is released under the GPL.
6  */
7 
8 #include <linux/slab.h>
9 #include <linux/module.h>
10 
11 #include "md.h"
12 #include "raid1.h"
13 #include "raid5.h"
14 #include "raid10.h"
15 #include "bitmap.h"
16 
17 #include <linux/device-mapper.h>
18 
19 #define DM_MSG_PREFIX "raid"
20 
21 static bool devices_handle_discard_safely = false;
22 
23 /*
24  * The following flags are used by dm-raid.c to set up the array state.
25  * They must be cleared before md_run is called.
26  */
27 #define FirstUse 10             /* rdev flag */
28 
29 struct raid_dev {
30 	/*
31 	 * Two DM devices, one to hold metadata and one to hold the
32 	 * actual data/parity.  The reason for this is to not confuse
33 	 * ti->len and give more flexibility in altering size and
34 	 * characteristics.
35 	 *
36 	 * While it is possible for this device to be associated
37 	 * with a different physical device than the data_dev, it
38 	 * is intended for it to be the same.
39 	 *    |--------- Physical Device ---------|
40 	 *    |- meta_dev -|------ data_dev ------|
41 	 */
42 	struct dm_dev *meta_dev;
43 	struct dm_dev *data_dev;
44 	struct md_rdev rdev;
45 };
46 
47 /*
48  * Flags for rs->print_flags field.
49  */
50 #define DMPF_SYNC              0x1
51 #define DMPF_NOSYNC            0x2
52 #define DMPF_REBUILD           0x4
53 #define DMPF_DAEMON_SLEEP      0x8
54 #define DMPF_MIN_RECOVERY_RATE 0x10
55 #define DMPF_MAX_RECOVERY_RATE 0x20
56 #define DMPF_MAX_WRITE_BEHIND  0x40
57 #define DMPF_STRIPE_CACHE      0x80
58 #define DMPF_REGION_SIZE       0x100
59 #define DMPF_RAID10_COPIES     0x200
60 #define DMPF_RAID10_FORMAT     0x400
61 
62 struct raid_set {
63 	struct dm_target *ti;
64 
65 	uint32_t bitmap_loaded;
66 	uint32_t print_flags;
67 
68 	struct mddev md;
69 	struct raid_type *raid_type;
70 	struct dm_target_callbacks callbacks;
71 
72 	struct raid_dev dev[0];
73 };
74 
75 /* Supported raid types and properties. */
76 static struct raid_type {
77 	const char *name;		/* RAID algorithm. */
78 	const char *descr;		/* Descriptor text for logging. */
79 	const unsigned parity_devs;	/* # of parity devices. */
80 	const unsigned minimal_devs;	/* minimal # of devices in set. */
81 	const unsigned level;		/* RAID level. */
82 	const unsigned algorithm;	/* RAID algorithm. */
83 } raid_types[] = {
84 	{"raid1",    "RAID1 (mirroring)",               0, 2, 1, 0 /* NONE */},
85 	{"raid10",   "RAID10 (striped mirrors)",        0, 2, 10, UINT_MAX /* Varies */},
86 	{"raid4",    "RAID4 (dedicated parity disk)",	1, 2, 5, ALGORITHM_PARITY_0},
87 	{"raid5_la", "RAID5 (left asymmetric)",		1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC},
88 	{"raid5_ra", "RAID5 (right asymmetric)",	1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC},
89 	{"raid5_ls", "RAID5 (left symmetric)",		1, 2, 5, ALGORITHM_LEFT_SYMMETRIC},
90 	{"raid5_rs", "RAID5 (right symmetric)",		1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC},
91 	{"raid6_zr", "RAID6 (zero restart)",		2, 4, 6, ALGORITHM_ROTATING_ZERO_RESTART},
92 	{"raid6_nr", "RAID6 (N restart)",		2, 4, 6, ALGORITHM_ROTATING_N_RESTART},
93 	{"raid6_nc", "RAID6 (N continue)",		2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE}
94 };
95 
96 static char *raid10_md_layout_to_format(int layout)
97 {
98 	/*
99 	 * Bit 16 and 17 stand for "offset" and "use_far_sets"
100 	 * Refer to MD's raid10.c for details
101 	 */
102 	if ((layout & 0x10000) && (layout & 0x20000))
103 		return "offset";
104 
105 	if ((layout & 0xFF) > 1)
106 		return "near";
107 
108 	return "far";
109 }
110 
111 static unsigned raid10_md_layout_to_copies(int layout)
112 {
113 	if ((layout & 0xFF) > 1)
114 		return layout & 0xFF;
115 	return (layout >> 8) & 0xFF;
116 }
117 
118 static int raid10_format_to_md_layout(char *format, unsigned copies)
119 {
120 	unsigned n = 1, f = 1;
121 
122 	if (!strcmp("near", format))
123 		n = copies;
124 	else
125 		f = copies;
126 
127 	if (!strcmp("offset", format))
128 		return 0x30000 | (f << 8) | n;
129 
130 	if (!strcmp("far", format))
131 		return 0x20000 | (f << 8) | n;
132 
133 	return (f << 8) | n;
134 }
135 
136 static struct raid_type *get_raid_type(char *name)
137 {
138 	int i;
139 
140 	for (i = 0; i < ARRAY_SIZE(raid_types); i++)
141 		if (!strcmp(raid_types[i].name, name))
142 			return &raid_types[i];
143 
144 	return NULL;
145 }
146 
147 static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *raid_type, unsigned raid_devs)
148 {
149 	unsigned i;
150 	struct raid_set *rs;
151 
152 	if (raid_devs <= raid_type->parity_devs) {
153 		ti->error = "Insufficient number of devices";
154 		return ERR_PTR(-EINVAL);
155 	}
156 
157 	rs = kzalloc(sizeof(*rs) + raid_devs * sizeof(rs->dev[0]), GFP_KERNEL);
158 	if (!rs) {
159 		ti->error = "Cannot allocate raid context";
160 		return ERR_PTR(-ENOMEM);
161 	}
162 
163 	mddev_init(&rs->md);
164 
165 	rs->ti = ti;
166 	rs->raid_type = raid_type;
167 	rs->md.raid_disks = raid_devs;
168 	rs->md.level = raid_type->level;
169 	rs->md.new_level = rs->md.level;
170 	rs->md.layout = raid_type->algorithm;
171 	rs->md.new_layout = rs->md.layout;
172 	rs->md.delta_disks = 0;
173 	rs->md.recovery_cp = 0;
174 
175 	for (i = 0; i < raid_devs; i++)
176 		md_rdev_init(&rs->dev[i].rdev);
177 
178 	/*
179 	 * Remaining items to be initialized by further RAID params:
180 	 *  rs->md.persistent
181 	 *  rs->md.external
182 	 *  rs->md.chunk_sectors
183 	 *  rs->md.new_chunk_sectors
184 	 *  rs->md.dev_sectors
185 	 */
186 
187 	return rs;
188 }
189 
190 static void context_free(struct raid_set *rs)
191 {
192 	int i;
193 
194 	for (i = 0; i < rs->md.raid_disks; i++) {
195 		if (rs->dev[i].meta_dev)
196 			dm_put_device(rs->ti, rs->dev[i].meta_dev);
197 		md_rdev_clear(&rs->dev[i].rdev);
198 		if (rs->dev[i].data_dev)
199 			dm_put_device(rs->ti, rs->dev[i].data_dev);
200 	}
201 
202 	kfree(rs);
203 }
204 
205 /*
206  * For every device we have two words
207  *  <meta_dev>: meta device name or '-' if missing
208  *  <data_dev>: data device name or '-' if missing
209  *
210  * The following are permitted:
211  *    - -
212  *    - <data_dev>
213  *    <meta_dev> <data_dev>
214  *
215  * The following is not allowed:
216  *    <meta_dev> -
217  *
218  * This code parses those words.  If there is a failure,
219  * the caller must use context_free to unwind the operations.
220  */
221 static int dev_parms(struct raid_set *rs, char **argv)
222 {
223 	int i;
224 	int rebuild = 0;
225 	int metadata_available = 0;
226 	int ret = 0;
227 
228 	for (i = 0; i < rs->md.raid_disks; i++, argv += 2) {
229 		rs->dev[i].rdev.raid_disk = i;
230 
231 		rs->dev[i].meta_dev = NULL;
232 		rs->dev[i].data_dev = NULL;
233 
234 		/*
235 		 * There are no offsets, since there is a separate device
236 		 * for data and metadata.
237 		 */
238 		rs->dev[i].rdev.data_offset = 0;
239 		rs->dev[i].rdev.mddev = &rs->md;
240 
241 		if (strcmp(argv[0], "-")) {
242 			ret = dm_get_device(rs->ti, argv[0],
243 					    dm_table_get_mode(rs->ti->table),
244 					    &rs->dev[i].meta_dev);
245 			rs->ti->error = "RAID metadata device lookup failure";
246 			if (ret)
247 				return ret;
248 
249 			rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL);
250 			if (!rs->dev[i].rdev.sb_page)
251 				return -ENOMEM;
252 		}
253 
254 		if (!strcmp(argv[1], "-")) {
255 			if (!test_bit(In_sync, &rs->dev[i].rdev.flags) &&
256 			    (!rs->dev[i].rdev.recovery_offset)) {
257 				rs->ti->error = "Drive designated for rebuild not specified";
258 				return -EINVAL;
259 			}
260 
261 			rs->ti->error = "No data device supplied with metadata device";
262 			if (rs->dev[i].meta_dev)
263 				return -EINVAL;
264 
265 			continue;
266 		}
267 
268 		ret = dm_get_device(rs->ti, argv[1],
269 				    dm_table_get_mode(rs->ti->table),
270 				    &rs->dev[i].data_dev);
271 		if (ret) {
272 			rs->ti->error = "RAID device lookup failure";
273 			return ret;
274 		}
275 
276 		if (rs->dev[i].meta_dev) {
277 			metadata_available = 1;
278 			rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev;
279 		}
280 		rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev;
281 		list_add(&rs->dev[i].rdev.same_set, &rs->md.disks);
282 		if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
283 			rebuild++;
284 	}
285 
286 	if (metadata_available) {
287 		rs->md.external = 0;
288 		rs->md.persistent = 1;
289 		rs->md.major_version = 2;
290 	} else if (rebuild && !rs->md.recovery_cp) {
291 		/*
292 		 * Without metadata, we will not be able to tell if the array
293 		 * is in-sync or not - we must assume it is not.  Therefore,
294 		 * it is impossible to rebuild a drive.
295 		 *
296 		 * Even if there is metadata, the on-disk information may
297 		 * indicate that the array is not in-sync and it will then
298 		 * fail at that time.
299 		 *
300 		 * User could specify 'nosync' option if desperate.
301 		 */
302 		DMERR("Unable to rebuild drive while array is not in-sync");
303 		rs->ti->error = "RAID device lookup failure";
304 		return -EINVAL;
305 	}
306 
307 	return 0;
308 }
309 
310 /*
311  * validate_region_size
312  * @rs
313  * @region_size:  region size in sectors.  If 0, pick a size (4MiB default).
314  *
315  * Set rs->md.bitmap_info.chunksize (which really refers to 'region size').
316  * Ensure that (ti->len/region_size < 2^21) - required by MD bitmap.
317  *
318  * Returns: 0 on success, -EINVAL on failure.
319  */
320 static int validate_region_size(struct raid_set *rs, unsigned long region_size)
321 {
322 	unsigned long min_region_size = rs->ti->len / (1 << 21);
323 
324 	if (!region_size) {
325 		/*
326 		 * Choose a reasonable default.  All figures in sectors.
327 		 */
328 		if (min_region_size > (1 << 13)) {
329 			/* If not a power of 2, make it the next power of 2 */
330 			if (min_region_size & (min_region_size - 1))
331 				region_size = 1 << fls(region_size);
332 			DMINFO("Choosing default region size of %lu sectors",
333 			       region_size);
334 		} else {
335 			DMINFO("Choosing default region size of 4MiB");
336 			region_size = 1 << 13; /* sectors */
337 		}
338 	} else {
339 		/*
340 		 * Validate user-supplied value.
341 		 */
342 		if (region_size > rs->ti->len) {
343 			rs->ti->error = "Supplied region size is too large";
344 			return -EINVAL;
345 		}
346 
347 		if (region_size < min_region_size) {
348 			DMERR("Supplied region_size (%lu sectors) below minimum (%lu)",
349 			      region_size, min_region_size);
350 			rs->ti->error = "Supplied region size is too small";
351 			return -EINVAL;
352 		}
353 
354 		if (!is_power_of_2(region_size)) {
355 			rs->ti->error = "Region size is not a power of 2";
356 			return -EINVAL;
357 		}
358 
359 		if (region_size < rs->md.chunk_sectors) {
360 			rs->ti->error = "Region size is smaller than the chunk size";
361 			return -EINVAL;
362 		}
363 	}
364 
365 	/*
366 	 * Convert sectors to bytes.
367 	 */
368 	rs->md.bitmap_info.chunksize = (region_size << 9);
369 
370 	return 0;
371 }
372 
373 /*
374  * validate_raid_redundancy
375  * @rs
376  *
377  * Determine if there are enough devices in the array that haven't
378  * failed (or are being rebuilt) to form a usable array.
379  *
380  * Returns: 0 on success, -EINVAL on failure.
381  */
382 static int validate_raid_redundancy(struct raid_set *rs)
383 {
384 	unsigned i, rebuild_cnt = 0;
385 	unsigned rebuilds_per_group = 0, copies, d;
386 	unsigned group_size, last_group_start;
387 
388 	for (i = 0; i < rs->md.raid_disks; i++)
389 		if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
390 		    !rs->dev[i].rdev.sb_page)
391 			rebuild_cnt++;
392 
393 	switch (rs->raid_type->level) {
394 	case 1:
395 		if (rebuild_cnt >= rs->md.raid_disks)
396 			goto too_many;
397 		break;
398 	case 4:
399 	case 5:
400 	case 6:
401 		if (rebuild_cnt > rs->raid_type->parity_devs)
402 			goto too_many;
403 		break;
404 	case 10:
405 		copies = raid10_md_layout_to_copies(rs->md.layout);
406 		if (rebuild_cnt < copies)
407 			break;
408 
409 		/*
410 		 * It is possible to have a higher rebuild count for RAID10,
411 		 * as long as the failed devices occur in different mirror
412 		 * groups (i.e. different stripes).
413 		 *
414 		 * When checking "near" format, make sure no adjacent devices
415 		 * have failed beyond what can be handled.  In addition to the
416 		 * simple case where the number of devices is a multiple of the
417 		 * number of copies, we must also handle cases where the number
418 		 * of devices is not a multiple of the number of copies.
419 		 * E.g.    dev1 dev2 dev3 dev4 dev5
420 		 *          A    A    B    B    C
421 		 *          C    D    D    E    E
422 		 */
423 		if (!strcmp("near", raid10_md_layout_to_format(rs->md.layout))) {
424 			for (i = 0; i < rs->md.raid_disks * copies; i++) {
425 				if (!(i % copies))
426 					rebuilds_per_group = 0;
427 				d = i % rs->md.raid_disks;
428 				if ((!rs->dev[d].rdev.sb_page ||
429 				     !test_bit(In_sync, &rs->dev[d].rdev.flags)) &&
430 				    (++rebuilds_per_group >= copies))
431 					goto too_many;
432 			}
433 			break;
434 		}
435 
436 		/*
437 		 * When checking "far" and "offset" formats, we need to ensure
438 		 * that the device that holds its copy is not also dead or
439 		 * being rebuilt.  (Note that "far" and "offset" formats only
440 		 * support two copies right now.  These formats also only ever
441 		 * use the 'use_far_sets' variant.)
442 		 *
443 		 * This check is somewhat complicated by the need to account
444 		 * for arrays that are not a multiple of (far) copies.  This
445 		 * results in the need to treat the last (potentially larger)
446 		 * set differently.
447 		 */
448 		group_size = (rs->md.raid_disks / copies);
449 		last_group_start = (rs->md.raid_disks / group_size) - 1;
450 		last_group_start *= group_size;
451 		for (i = 0; i < rs->md.raid_disks; i++) {
452 			if (!(i % copies) && !(i > last_group_start))
453 				rebuilds_per_group = 0;
454 			if ((!rs->dev[i].rdev.sb_page ||
455 			     !test_bit(In_sync, &rs->dev[i].rdev.flags)) &&
456 			    (++rebuilds_per_group >= copies))
457 					goto too_many;
458 		}
459 		break;
460 	default:
461 		if (rebuild_cnt)
462 			return -EINVAL;
463 	}
464 
465 	return 0;
466 
467 too_many:
468 	return -EINVAL;
469 }
470 
471 /*
472  * Possible arguments are...
473  *	<chunk_size> [optional_args]
474  *
475  * Argument definitions
476  *    <chunk_size>			The number of sectors per disk that
477  *                                      will form the "stripe"
478  *    [[no]sync]			Force or prevent recovery of the
479  *                                      entire array
480  *    [devices_handle_discard_safely]	Allow discards on RAID4/5/6; useful if RAID
481  *					member device(s) properly support TRIM/UNMAP
482  *    [rebuild <idx>]			Rebuild the drive indicated by the index
483  *    [daemon_sleep <ms>]		Time between bitmap daemon work to
484  *                                      clear bits
485  *    [min_recovery_rate <kB/sec/disk>]	Throttle RAID initialization
486  *    [max_recovery_rate <kB/sec/disk>]	Throttle RAID initialization
487  *    [write_mostly <idx>]		Indicate a write mostly drive via index
488  *    [max_write_behind <sectors>]	See '-write-behind=' (man mdadm)
489  *    [stripe_cache <sectors>]		Stripe cache size for higher RAIDs
490  *    [region_size <sectors>]           Defines granularity of bitmap
491  *
492  * RAID10-only options:
493  *    [raid10_copies <# copies>]        Number of copies.  (Default: 2)
494  *    [raid10_format <near|far|offset>] Layout algorithm.  (Default: near)
495  */
496 static int parse_raid_params(struct raid_set *rs, char **argv,
497 			     unsigned num_raid_params)
498 {
499 	char *raid10_format = "near";
500 	unsigned raid10_copies = 2;
501 	unsigned i;
502 	unsigned long value, region_size = 0;
503 	sector_t sectors_per_dev = rs->ti->len;
504 	sector_t max_io_len;
505 	char *key;
506 
507 	/*
508 	 * First, parse the in-order required arguments
509 	 * "chunk_size" is the only argument of this type.
510 	 */
511 	if ((kstrtoul(argv[0], 10, &value) < 0)) {
512 		rs->ti->error = "Bad chunk size";
513 		return -EINVAL;
514 	} else if (rs->raid_type->level == 1) {
515 		if (value)
516 			DMERR("Ignoring chunk size parameter for RAID 1");
517 		value = 0;
518 	} else if (!is_power_of_2(value)) {
519 		rs->ti->error = "Chunk size must be a power of 2";
520 		return -EINVAL;
521 	} else if (value < 8) {
522 		rs->ti->error = "Chunk size value is too small";
523 		return -EINVAL;
524 	}
525 
526 	rs->md.new_chunk_sectors = rs->md.chunk_sectors = value;
527 	argv++;
528 	num_raid_params--;
529 
530 	/*
531 	 * We set each individual device as In_sync with a completed
532 	 * 'recovery_offset'.  If there has been a device failure or
533 	 * replacement then one of the following cases applies:
534 	 *
535 	 *   1) User specifies 'rebuild'.
536 	 *      - Device is reset when param is read.
537 	 *   2) A new device is supplied.
538 	 *      - No matching superblock found, resets device.
539 	 *   3) Device failure was transient and returns on reload.
540 	 *      - Failure noticed, resets device for bitmap replay.
541 	 *   4) Device hadn't completed recovery after previous failure.
542 	 *      - Superblock is read and overrides recovery_offset.
543 	 *
544 	 * What is found in the superblocks of the devices is always
545 	 * authoritative, unless 'rebuild' or '[no]sync' was specified.
546 	 */
547 	for (i = 0; i < rs->md.raid_disks; i++) {
548 		set_bit(In_sync, &rs->dev[i].rdev.flags);
549 		rs->dev[i].rdev.recovery_offset = MaxSector;
550 	}
551 
552 	/*
553 	 * Second, parse the unordered optional arguments
554 	 */
555 	for (i = 0; i < num_raid_params; i++) {
556 		if (!strcasecmp(argv[i], "nosync")) {
557 			rs->md.recovery_cp = MaxSector;
558 			rs->print_flags |= DMPF_NOSYNC;
559 			continue;
560 		}
561 		if (!strcasecmp(argv[i], "sync")) {
562 			rs->md.recovery_cp = 0;
563 			rs->print_flags |= DMPF_SYNC;
564 			continue;
565 		}
566 
567 		/* The rest of the optional arguments come in key/value pairs */
568 		if ((i + 1) >= num_raid_params) {
569 			rs->ti->error = "Wrong number of raid parameters given";
570 			return -EINVAL;
571 		}
572 
573 		key = argv[i++];
574 
575 		/* Parameters that take a string value are checked here. */
576 		if (!strcasecmp(key, "raid10_format")) {
577 			if (rs->raid_type->level != 10) {
578 				rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type";
579 				return -EINVAL;
580 			}
581 			if (strcmp("near", argv[i]) &&
582 			    strcmp("far", argv[i]) &&
583 			    strcmp("offset", argv[i])) {
584 				rs->ti->error = "Invalid 'raid10_format' value given";
585 				return -EINVAL;
586 			}
587 			raid10_format = argv[i];
588 			rs->print_flags |= DMPF_RAID10_FORMAT;
589 			continue;
590 		}
591 
592 		if (kstrtoul(argv[i], 10, &value) < 0) {
593 			rs->ti->error = "Bad numerical argument given in raid params";
594 			return -EINVAL;
595 		}
596 
597 		/* Parameters that take a numeric value are checked here */
598 		if (!strcasecmp(key, "rebuild")) {
599 			if (value >= rs->md.raid_disks) {
600 				rs->ti->error = "Invalid rebuild index given";
601 				return -EINVAL;
602 			}
603 			clear_bit(In_sync, &rs->dev[value].rdev.flags);
604 			rs->dev[value].rdev.recovery_offset = 0;
605 			rs->print_flags |= DMPF_REBUILD;
606 		} else if (!strcasecmp(key, "write_mostly")) {
607 			if (rs->raid_type->level != 1) {
608 				rs->ti->error = "write_mostly option is only valid for RAID1";
609 				return -EINVAL;
610 			}
611 			if (value >= rs->md.raid_disks) {
612 				rs->ti->error = "Invalid write_mostly drive index given";
613 				return -EINVAL;
614 			}
615 			set_bit(WriteMostly, &rs->dev[value].rdev.flags);
616 		} else if (!strcasecmp(key, "max_write_behind")) {
617 			if (rs->raid_type->level != 1) {
618 				rs->ti->error = "max_write_behind option is only valid for RAID1";
619 				return -EINVAL;
620 			}
621 			rs->print_flags |= DMPF_MAX_WRITE_BEHIND;
622 
623 			/*
624 			 * In device-mapper, we specify things in sectors, but
625 			 * MD records this value in kB
626 			 */
627 			value /= 2;
628 			if (value > COUNTER_MAX) {
629 				rs->ti->error = "Max write-behind limit out of range";
630 				return -EINVAL;
631 			}
632 			rs->md.bitmap_info.max_write_behind = value;
633 		} else if (!strcasecmp(key, "daemon_sleep")) {
634 			rs->print_flags |= DMPF_DAEMON_SLEEP;
635 			if (!value || (value > MAX_SCHEDULE_TIMEOUT)) {
636 				rs->ti->error = "daemon sleep period out of range";
637 				return -EINVAL;
638 			}
639 			rs->md.bitmap_info.daemon_sleep = value;
640 		} else if (!strcasecmp(key, "stripe_cache")) {
641 			rs->print_flags |= DMPF_STRIPE_CACHE;
642 
643 			/*
644 			 * In device-mapper, we specify things in sectors, but
645 			 * MD records this value in kB
646 			 */
647 			value /= 2;
648 
649 			if ((rs->raid_type->level != 5) &&
650 			    (rs->raid_type->level != 6)) {
651 				rs->ti->error = "Inappropriate argument: stripe_cache";
652 				return -EINVAL;
653 			}
654 			if (raid5_set_cache_size(&rs->md, (int)value)) {
655 				rs->ti->error = "Bad stripe_cache size";
656 				return -EINVAL;
657 			}
658 		} else if (!strcasecmp(key, "min_recovery_rate")) {
659 			rs->print_flags |= DMPF_MIN_RECOVERY_RATE;
660 			if (value > INT_MAX) {
661 				rs->ti->error = "min_recovery_rate out of range";
662 				return -EINVAL;
663 			}
664 			rs->md.sync_speed_min = (int)value;
665 		} else if (!strcasecmp(key, "max_recovery_rate")) {
666 			rs->print_flags |= DMPF_MAX_RECOVERY_RATE;
667 			if (value > INT_MAX) {
668 				rs->ti->error = "max_recovery_rate out of range";
669 				return -EINVAL;
670 			}
671 			rs->md.sync_speed_max = (int)value;
672 		} else if (!strcasecmp(key, "region_size")) {
673 			rs->print_flags |= DMPF_REGION_SIZE;
674 			region_size = value;
675 		} else if (!strcasecmp(key, "raid10_copies") &&
676 			   (rs->raid_type->level == 10)) {
677 			if ((value < 2) || (value > 0xFF)) {
678 				rs->ti->error = "Bad value for 'raid10_copies'";
679 				return -EINVAL;
680 			}
681 			rs->print_flags |= DMPF_RAID10_COPIES;
682 			raid10_copies = value;
683 		} else {
684 			DMERR("Unable to parse RAID parameter: %s", key);
685 			rs->ti->error = "Unable to parse RAID parameters";
686 			return -EINVAL;
687 		}
688 	}
689 
690 	if (validate_region_size(rs, region_size))
691 		return -EINVAL;
692 
693 	if (rs->md.chunk_sectors)
694 		max_io_len = rs->md.chunk_sectors;
695 	else
696 		max_io_len = region_size;
697 
698 	if (dm_set_target_max_io_len(rs->ti, max_io_len))
699 		return -EINVAL;
700 
701 	if (rs->raid_type->level == 10) {
702 		if (raid10_copies > rs->md.raid_disks) {
703 			rs->ti->error = "Not enough devices to satisfy specification";
704 			return -EINVAL;
705 		}
706 
707 		/*
708 		 * If the format is not "near", we only support
709 		 * two copies at the moment.
710 		 */
711 		if (strcmp("near", raid10_format) && (raid10_copies > 2)) {
712 			rs->ti->error = "Too many copies for given RAID10 format.";
713 			return -EINVAL;
714 		}
715 
716 		/* (Len * #mirrors) / #devices */
717 		sectors_per_dev = rs->ti->len * raid10_copies;
718 		sector_div(sectors_per_dev, rs->md.raid_disks);
719 
720 		rs->md.layout = raid10_format_to_md_layout(raid10_format,
721 							   raid10_copies);
722 		rs->md.new_layout = rs->md.layout;
723 	} else if ((rs->raid_type->level > 1) &&
724 		   sector_div(sectors_per_dev,
725 			      (rs->md.raid_disks - rs->raid_type->parity_devs))) {
726 		rs->ti->error = "Target length not divisible by number of data devices";
727 		return -EINVAL;
728 	}
729 	rs->md.dev_sectors = sectors_per_dev;
730 
731 	/* Assume there are no metadata devices until the drives are parsed */
732 	rs->md.persistent = 0;
733 	rs->md.external = 1;
734 
735 	return 0;
736 }
737 
738 static void do_table_event(struct work_struct *ws)
739 {
740 	struct raid_set *rs = container_of(ws, struct raid_set, md.event_work);
741 
742 	dm_table_event(rs->ti->table);
743 }
744 
745 static int raid_is_congested(struct dm_target_callbacks *cb, int bits)
746 {
747 	struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
748 
749 	if (rs->raid_type->level == 1)
750 		return md_raid1_congested(&rs->md, bits);
751 
752 	if (rs->raid_type->level == 10)
753 		return md_raid10_congested(&rs->md, bits);
754 
755 	return md_raid5_congested(&rs->md, bits);
756 }
757 
758 /*
759  * This structure is never routinely used by userspace, unlike md superblocks.
760  * Devices with this superblock should only ever be accessed via device-mapper.
761  */
762 #define DM_RAID_MAGIC 0x64526D44
763 struct dm_raid_superblock {
764 	__le32 magic;		/* "DmRd" */
765 	__le32 features;	/* Used to indicate possible future changes */
766 
767 	__le32 num_devices;	/* Number of devices in this array. (Max 64) */
768 	__le32 array_position;	/* The position of this drive in the array */
769 
770 	__le64 events;		/* Incremented by md when superblock updated */
771 	__le64 failed_devices;	/* Bit field of devices to indicate failures */
772 
773 	/*
774 	 * This offset tracks the progress of the repair or replacement of
775 	 * an individual drive.
776 	 */
777 	__le64 disk_recovery_offset;
778 
779 	/*
780 	 * This offset tracks the progress of the initial array
781 	 * synchronisation/parity calculation.
782 	 */
783 	__le64 array_resync_offset;
784 
785 	/*
786 	 * RAID characteristics
787 	 */
788 	__le32 level;
789 	__le32 layout;
790 	__le32 stripe_sectors;
791 
792 	/* Remainder of a logical block is zero-filled when writing (see super_sync()). */
793 } __packed;
794 
795 static int read_disk_sb(struct md_rdev *rdev, int size)
796 {
797 	BUG_ON(!rdev->sb_page);
798 
799 	if (rdev->sb_loaded)
800 		return 0;
801 
802 	if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, 1)) {
803 		DMERR("Failed to read superblock of device at position %d",
804 		      rdev->raid_disk);
805 		md_error(rdev->mddev, rdev);
806 		return -EINVAL;
807 	}
808 
809 	rdev->sb_loaded = 1;
810 
811 	return 0;
812 }
813 
814 static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
815 {
816 	int i;
817 	uint64_t failed_devices;
818 	struct dm_raid_superblock *sb;
819 	struct raid_set *rs = container_of(mddev, struct raid_set, md);
820 
821 	sb = page_address(rdev->sb_page);
822 	failed_devices = le64_to_cpu(sb->failed_devices);
823 
824 	for (i = 0; i < mddev->raid_disks; i++)
825 		if (!rs->dev[i].data_dev ||
826 		    test_bit(Faulty, &(rs->dev[i].rdev.flags)))
827 			failed_devices |= (1ULL << i);
828 
829 	memset(sb + 1, 0, rdev->sb_size - sizeof(*sb));
830 
831 	sb->magic = cpu_to_le32(DM_RAID_MAGIC);
832 	sb->features = cpu_to_le32(0);	/* No features yet */
833 
834 	sb->num_devices = cpu_to_le32(mddev->raid_disks);
835 	sb->array_position = cpu_to_le32(rdev->raid_disk);
836 
837 	sb->events = cpu_to_le64(mddev->events);
838 	sb->failed_devices = cpu_to_le64(failed_devices);
839 
840 	sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset);
841 	sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp);
842 
843 	sb->level = cpu_to_le32(mddev->level);
844 	sb->layout = cpu_to_le32(mddev->layout);
845 	sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors);
846 }
847 
848 /*
849  * super_load
850  *
851  * This function creates a superblock if one is not found on the device
852  * and will decide which superblock to use if there's a choice.
853  *
854  * Return: 1 if use rdev, 0 if use refdev, -Exxx otherwise
855  */
856 static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
857 {
858 	int ret;
859 	struct dm_raid_superblock *sb;
860 	struct dm_raid_superblock *refsb;
861 	uint64_t events_sb, events_refsb;
862 
863 	rdev->sb_start = 0;
864 	rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev);
865 	if (rdev->sb_size < sizeof(*sb) || rdev->sb_size > PAGE_SIZE) {
866 		DMERR("superblock size of a logical block is no longer valid");
867 		return -EINVAL;
868 	}
869 
870 	ret = read_disk_sb(rdev, rdev->sb_size);
871 	if (ret)
872 		return ret;
873 
874 	sb = page_address(rdev->sb_page);
875 
876 	/*
877 	 * Two cases that we want to write new superblocks and rebuild:
878 	 * 1) New device (no matching magic number)
879 	 * 2) Device specified for rebuild (!In_sync w/ offset == 0)
880 	 */
881 	if ((sb->magic != cpu_to_le32(DM_RAID_MAGIC)) ||
882 	    (!test_bit(In_sync, &rdev->flags) && !rdev->recovery_offset)) {
883 		super_sync(rdev->mddev, rdev);
884 
885 		set_bit(FirstUse, &rdev->flags);
886 
887 		/* Force writing of superblocks to disk */
888 		set_bit(MD_CHANGE_DEVS, &rdev->mddev->flags);
889 
890 		/* Any superblock is better than none, choose that if given */
891 		return refdev ? 0 : 1;
892 	}
893 
894 	if (!refdev)
895 		return 1;
896 
897 	events_sb = le64_to_cpu(sb->events);
898 
899 	refsb = page_address(refdev->sb_page);
900 	events_refsb = le64_to_cpu(refsb->events);
901 
902 	return (events_sb > events_refsb) ? 1 : 0;
903 }
904 
905 static int super_init_validation(struct mddev *mddev, struct md_rdev *rdev)
906 {
907 	int role;
908 	struct raid_set *rs = container_of(mddev, struct raid_set, md);
909 	uint64_t events_sb;
910 	uint64_t failed_devices;
911 	struct dm_raid_superblock *sb;
912 	uint32_t new_devs = 0;
913 	uint32_t rebuilds = 0;
914 	struct md_rdev *r;
915 	struct dm_raid_superblock *sb2;
916 
917 	sb = page_address(rdev->sb_page);
918 	events_sb = le64_to_cpu(sb->events);
919 	failed_devices = le64_to_cpu(sb->failed_devices);
920 
921 	/*
922 	 * Initialise to 1 if this is a new superblock.
923 	 */
924 	mddev->events = events_sb ? : 1;
925 
926 	/*
927 	 * Reshaping is not currently allowed
928 	 */
929 	if (le32_to_cpu(sb->level) != mddev->level) {
930 		DMERR("Reshaping arrays not yet supported. (RAID level change)");
931 		return -EINVAL;
932 	}
933 	if (le32_to_cpu(sb->layout) != mddev->layout) {
934 		DMERR("Reshaping arrays not yet supported. (RAID layout change)");
935 		DMERR("  0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout);
936 		DMERR("  Old layout: %s w/ %d copies",
937 		      raid10_md_layout_to_format(le32_to_cpu(sb->layout)),
938 		      raid10_md_layout_to_copies(le32_to_cpu(sb->layout)));
939 		DMERR("  New layout: %s w/ %d copies",
940 		      raid10_md_layout_to_format(mddev->layout),
941 		      raid10_md_layout_to_copies(mddev->layout));
942 		return -EINVAL;
943 	}
944 	if (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors) {
945 		DMERR("Reshaping arrays not yet supported. (stripe sectors change)");
946 		return -EINVAL;
947 	}
948 
949 	/* We can only change the number of devices in RAID1 right now */
950 	if ((rs->raid_type->level != 1) &&
951 	    (le32_to_cpu(sb->num_devices) != mddev->raid_disks)) {
952 		DMERR("Reshaping arrays not yet supported. (device count change)");
953 		return -EINVAL;
954 	}
955 
956 	if (!(rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC)))
957 		mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset);
958 
959 	/*
960 	 * During load, we set FirstUse if a new superblock was written.
961 	 * There are two reasons we might not have a superblock:
962 	 * 1) The array is brand new - in which case, all of the
963 	 *    devices must have their In_sync bit set.  Also,
964 	 *    recovery_cp must be 0, unless forced.
965 	 * 2) This is a new device being added to an old array
966 	 *    and the new device needs to be rebuilt - in which
967 	 *    case the In_sync bit will /not/ be set and
968 	 *    recovery_cp must be MaxSector.
969 	 */
970 	rdev_for_each(r, mddev) {
971 		if (!test_bit(In_sync, &r->flags)) {
972 			DMINFO("Device %d specified for rebuild: "
973 			       "Clearing superblock", r->raid_disk);
974 			rebuilds++;
975 		} else if (test_bit(FirstUse, &r->flags))
976 			new_devs++;
977 	}
978 
979 	if (!rebuilds) {
980 		if (new_devs == mddev->raid_disks) {
981 			DMINFO("Superblocks created for new array");
982 			set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
983 		} else if (new_devs) {
984 			DMERR("New device injected "
985 			      "into existing array without 'rebuild' "
986 			      "parameter specified");
987 			return -EINVAL;
988 		}
989 	} else if (new_devs) {
990 		DMERR("'rebuild' devices cannot be "
991 		      "injected into an array with other first-time devices");
992 		return -EINVAL;
993 	} else if (mddev->recovery_cp != MaxSector) {
994 		DMERR("'rebuild' specified while array is not in-sync");
995 		return -EINVAL;
996 	}
997 
998 	/*
999 	 * Now we set the Faulty bit for those devices that are
1000 	 * recorded in the superblock as failed.
1001 	 */
1002 	rdev_for_each(r, mddev) {
1003 		if (!r->sb_page)
1004 			continue;
1005 		sb2 = page_address(r->sb_page);
1006 		sb2->failed_devices = 0;
1007 
1008 		/*
1009 		 * Check for any device re-ordering.
1010 		 */
1011 		if (!test_bit(FirstUse, &r->flags) && (r->raid_disk >= 0)) {
1012 			role = le32_to_cpu(sb2->array_position);
1013 			if (role != r->raid_disk) {
1014 				if (rs->raid_type->level != 1) {
1015 					rs->ti->error = "Cannot change device "
1016 						"positions in RAID array";
1017 					return -EINVAL;
1018 				}
1019 				DMINFO("RAID1 device #%d now at position #%d",
1020 				       role, r->raid_disk);
1021 			}
1022 
1023 			/*
1024 			 * Partial recovery is performed on
1025 			 * returning failed devices.
1026 			 */
1027 			if (failed_devices & (1 << role))
1028 				set_bit(Faulty, &r->flags);
1029 		}
1030 	}
1031 
1032 	return 0;
1033 }
1034 
1035 static int super_validate(struct mddev *mddev, struct md_rdev *rdev)
1036 {
1037 	struct dm_raid_superblock *sb = page_address(rdev->sb_page);
1038 
1039 	/*
1040 	 * If mddev->events is not set, we know we have not yet initialized
1041 	 * the array.
1042 	 */
1043 	if (!mddev->events && super_init_validation(mddev, rdev))
1044 		return -EINVAL;
1045 
1046 	mddev->bitmap_info.offset = 4096 >> 9; /* Enable bitmap creation */
1047 	rdev->mddev->bitmap_info.default_offset = 4096 >> 9;
1048 	if (!test_bit(FirstUse, &rdev->flags)) {
1049 		rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset);
1050 		if (rdev->recovery_offset != MaxSector)
1051 			clear_bit(In_sync, &rdev->flags);
1052 	}
1053 
1054 	/*
1055 	 * If a device comes back, set it as not In_sync and no longer faulty.
1056 	 */
1057 	if (test_bit(Faulty, &rdev->flags)) {
1058 		clear_bit(Faulty, &rdev->flags);
1059 		clear_bit(In_sync, &rdev->flags);
1060 		rdev->saved_raid_disk = rdev->raid_disk;
1061 		rdev->recovery_offset = 0;
1062 	}
1063 
1064 	clear_bit(FirstUse, &rdev->flags);
1065 
1066 	return 0;
1067 }
1068 
1069 /*
1070  * Analyse superblocks and select the freshest.
1071  */
1072 static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
1073 {
1074 	int ret;
1075 	struct raid_dev *dev;
1076 	struct md_rdev *rdev, *tmp, *freshest;
1077 	struct mddev *mddev = &rs->md;
1078 
1079 	freshest = NULL;
1080 	rdev_for_each_safe(rdev, tmp, mddev) {
1081 		/*
1082 		 * Skipping super_load due to DMPF_SYNC will cause
1083 		 * the array to undergo initialization again as
1084 		 * though it were new.  This is the intended effect
1085 		 * of the "sync" directive.
1086 		 *
1087 		 * When reshaping capability is added, we must ensure
1088 		 * that the "sync" directive is disallowed during the
1089 		 * reshape.
1090 		 */
1091 		if (rs->print_flags & DMPF_SYNC)
1092 			continue;
1093 
1094 		if (!rdev->meta_bdev)
1095 			continue;
1096 
1097 		ret = super_load(rdev, freshest);
1098 
1099 		switch (ret) {
1100 		case 1:
1101 			freshest = rdev;
1102 			break;
1103 		case 0:
1104 			break;
1105 		default:
1106 			dev = container_of(rdev, struct raid_dev, rdev);
1107 			if (dev->meta_dev)
1108 				dm_put_device(ti, dev->meta_dev);
1109 
1110 			dev->meta_dev = NULL;
1111 			rdev->meta_bdev = NULL;
1112 
1113 			if (rdev->sb_page)
1114 				put_page(rdev->sb_page);
1115 
1116 			rdev->sb_page = NULL;
1117 
1118 			rdev->sb_loaded = 0;
1119 
1120 			/*
1121 			 * We might be able to salvage the data device
1122 			 * even though the meta device has failed.  For
1123 			 * now, we behave as though '- -' had been
1124 			 * set for this device in the table.
1125 			 */
1126 			if (dev->data_dev)
1127 				dm_put_device(ti, dev->data_dev);
1128 
1129 			dev->data_dev = NULL;
1130 			rdev->bdev = NULL;
1131 
1132 			list_del(&rdev->same_set);
1133 		}
1134 	}
1135 
1136 	if (!freshest)
1137 		return 0;
1138 
1139 	if (validate_raid_redundancy(rs)) {
1140 		rs->ti->error = "Insufficient redundancy to activate array";
1141 		return -EINVAL;
1142 	}
1143 
1144 	/*
1145 	 * Validation of the freshest device provides the source of
1146 	 * validation for the remaining devices.
1147 	 */
1148 	ti->error = "Unable to assemble array: Invalid superblocks";
1149 	if (super_validate(mddev, freshest))
1150 		return -EINVAL;
1151 
1152 	rdev_for_each(rdev, mddev)
1153 		if ((rdev != freshest) && super_validate(mddev, rdev))
1154 			return -EINVAL;
1155 
1156 	return 0;
1157 }
1158 
1159 /*
1160  * Enable/disable discard support on RAID set depending on
1161  * RAID level and discard properties of underlying RAID members.
1162  */
1163 static void configure_discard_support(struct dm_target *ti, struct raid_set *rs)
1164 {
1165 	int i;
1166 	bool raid456;
1167 
1168 	/* Assume discards not supported until after checks below. */
1169 	ti->discards_supported = false;
1170 
1171 	/* RAID level 4,5,6 require discard_zeroes_data for data integrity! */
1172 	raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6);
1173 
1174 	for (i = 0; i < rs->md.raid_disks; i++) {
1175 		struct request_queue *q;
1176 
1177 		if (!rs->dev[i].rdev.bdev)
1178 			continue;
1179 
1180 		q = bdev_get_queue(rs->dev[i].rdev.bdev);
1181 		if (!q || !blk_queue_discard(q))
1182 			return;
1183 
1184 		if (raid456) {
1185 			if (!q->limits.discard_zeroes_data)
1186 				return;
1187 			if (!devices_handle_discard_safely) {
1188 				DMERR("raid456 discard support disabled due to discard_zeroes_data uncertainty.");
1189 				DMERR("Set dm-raid.devices_handle_discard_safely=Y to override.");
1190 				return;
1191 			}
1192 		}
1193 	}
1194 
1195 	/* All RAID members properly support discards */
1196 	ti->discards_supported = true;
1197 
1198 	/*
1199 	 * RAID1 and RAID10 personalities require bio splitting,
1200 	 * RAID0/4/5/6 don't and process large discard bios properly.
1201 	 */
1202 	ti->split_discard_bios = !!(rs->md.level == 1 || rs->md.level == 10);
1203 	ti->num_discard_bios = 1;
1204 }
1205 
1206 /*
1207  * Construct a RAID4/5/6 mapping:
1208  * Args:
1209  *	<raid_type> <#raid_params> <raid_params>		\
1210  *	<#raid_devs> { <meta_dev1> <dev1> .. <meta_devN> <devN> }
1211  *
1212  * <raid_params> varies by <raid_type>.  See 'parse_raid_params' for
1213  * details on possible <raid_params>.
1214  */
1215 static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
1216 {
1217 	int ret;
1218 	struct raid_type *rt;
1219 	unsigned long num_raid_params, num_raid_devs;
1220 	struct raid_set *rs = NULL;
1221 
1222 	/* Must have at least <raid_type> <#raid_params> */
1223 	if (argc < 2) {
1224 		ti->error = "Too few arguments";
1225 		return -EINVAL;
1226 	}
1227 
1228 	/* raid type */
1229 	rt = get_raid_type(argv[0]);
1230 	if (!rt) {
1231 		ti->error = "Unrecognised raid_type";
1232 		return -EINVAL;
1233 	}
1234 	argc--;
1235 	argv++;
1236 
1237 	/* number of RAID parameters */
1238 	if (kstrtoul(argv[0], 10, &num_raid_params) < 0) {
1239 		ti->error = "Cannot understand number of RAID parameters";
1240 		return -EINVAL;
1241 	}
1242 	argc--;
1243 	argv++;
1244 
1245 	/* Skip over RAID params for now and find out # of devices */
1246 	if (num_raid_params + 1 > argc) {
1247 		ti->error = "Arguments do not agree with counts given";
1248 		return -EINVAL;
1249 	}
1250 
1251 	if ((kstrtoul(argv[num_raid_params], 10, &num_raid_devs) < 0) ||
1252 	    (num_raid_devs >= INT_MAX)) {
1253 		ti->error = "Cannot understand number of raid devices";
1254 		return -EINVAL;
1255 	}
1256 
1257 	rs = context_alloc(ti, rt, (unsigned)num_raid_devs);
1258 	if (IS_ERR(rs))
1259 		return PTR_ERR(rs);
1260 
1261 	ret = parse_raid_params(rs, argv, (unsigned)num_raid_params);
1262 	if (ret)
1263 		goto bad;
1264 
1265 	ret = -EINVAL;
1266 
1267 	argc -= num_raid_params + 1; /* +1: we already have num_raid_devs */
1268 	argv += num_raid_params + 1;
1269 
1270 	if (argc != (num_raid_devs * 2)) {
1271 		ti->error = "Supplied RAID devices does not match the count given";
1272 		goto bad;
1273 	}
1274 
1275 	ret = dev_parms(rs, argv);
1276 	if (ret)
1277 		goto bad;
1278 
1279 	rs->md.sync_super = super_sync;
1280 	ret = analyse_superblocks(ti, rs);
1281 	if (ret)
1282 		goto bad;
1283 
1284 	INIT_WORK(&rs->md.event_work, do_table_event);
1285 	ti->private = rs;
1286 	ti->num_flush_bios = 1;
1287 
1288 	/*
1289 	 * Disable/enable discard support on RAID set.
1290 	 */
1291 	configure_discard_support(ti, rs);
1292 
1293 	mutex_lock(&rs->md.reconfig_mutex);
1294 	ret = md_run(&rs->md);
1295 	rs->md.in_sync = 0; /* Assume already marked dirty */
1296 	mutex_unlock(&rs->md.reconfig_mutex);
1297 
1298 	if (ret) {
1299 		ti->error = "Fail to run raid array";
1300 		goto bad;
1301 	}
1302 
1303 	if (ti->len != rs->md.array_sectors) {
1304 		ti->error = "Array size does not match requested target length";
1305 		ret = -EINVAL;
1306 		goto size_mismatch;
1307 	}
1308 	rs->callbacks.congested_fn = raid_is_congested;
1309 	dm_table_add_target_callbacks(ti->table, &rs->callbacks);
1310 
1311 	mddev_suspend(&rs->md);
1312 	return 0;
1313 
1314 size_mismatch:
1315 	md_stop(&rs->md);
1316 bad:
1317 	context_free(rs);
1318 
1319 	return ret;
1320 }
1321 
1322 static void raid_dtr(struct dm_target *ti)
1323 {
1324 	struct raid_set *rs = ti->private;
1325 
1326 	list_del_init(&rs->callbacks.list);
1327 	md_stop(&rs->md);
1328 	context_free(rs);
1329 }
1330 
1331 static int raid_map(struct dm_target *ti, struct bio *bio)
1332 {
1333 	struct raid_set *rs = ti->private;
1334 	struct mddev *mddev = &rs->md;
1335 
1336 	mddev->pers->make_request(mddev, bio);
1337 
1338 	return DM_MAPIO_SUBMITTED;
1339 }
1340 
1341 static const char *decipher_sync_action(struct mddev *mddev)
1342 {
1343 	if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
1344 		return "frozen";
1345 
1346 	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
1347 	    (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
1348 		if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
1349 			return "reshape";
1350 
1351 		if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
1352 			if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
1353 				return "resync";
1354 			else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
1355 				return "check";
1356 			return "repair";
1357 		}
1358 
1359 		if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
1360 			return "recover";
1361 	}
1362 
1363 	return "idle";
1364 }
1365 
1366 static void raid_status(struct dm_target *ti, status_type_t type,
1367 			unsigned status_flags, char *result, unsigned maxlen)
1368 {
1369 	struct raid_set *rs = ti->private;
1370 	unsigned raid_param_cnt = 1; /* at least 1 for chunksize */
1371 	unsigned sz = 0;
1372 	int i, array_in_sync = 0;
1373 	sector_t sync;
1374 
1375 	switch (type) {
1376 	case STATUSTYPE_INFO:
1377 		DMEMIT("%s %d ", rs->raid_type->name, rs->md.raid_disks);
1378 
1379 		if (test_bit(MD_RECOVERY_RUNNING, &rs->md.recovery))
1380 			sync = rs->md.curr_resync_completed;
1381 		else
1382 			sync = rs->md.recovery_cp;
1383 
1384 		if (sync >= rs->md.resync_max_sectors) {
1385 			/*
1386 			 * Sync complete.
1387 			 */
1388 			array_in_sync = 1;
1389 			sync = rs->md.resync_max_sectors;
1390 		} else if (test_bit(MD_RECOVERY_REQUESTED, &rs->md.recovery)) {
1391 			/*
1392 			 * If "check" or "repair" is occurring, the array has
1393 			 * undergone and initial sync and the health characters
1394 			 * should not be 'a' anymore.
1395 			 */
1396 			array_in_sync = 1;
1397 		} else {
1398 			/*
1399 			 * The array may be doing an initial sync, or it may
1400 			 * be rebuilding individual components.  If all the
1401 			 * devices are In_sync, then it is the array that is
1402 			 * being initialized.
1403 			 */
1404 			for (i = 0; i < rs->md.raid_disks; i++)
1405 				if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
1406 					array_in_sync = 1;
1407 		}
1408 
1409 		/*
1410 		 * Status characters:
1411 		 *  'D' = Dead/Failed device
1412 		 *  'a' = Alive but not in-sync
1413 		 *  'A' = Alive and in-sync
1414 		 */
1415 		for (i = 0; i < rs->md.raid_disks; i++) {
1416 			if (test_bit(Faulty, &rs->dev[i].rdev.flags))
1417 				DMEMIT("D");
1418 			else if (!array_in_sync ||
1419 				 !test_bit(In_sync, &rs->dev[i].rdev.flags))
1420 				DMEMIT("a");
1421 			else
1422 				DMEMIT("A");
1423 		}
1424 
1425 		/*
1426 		 * In-sync ratio:
1427 		 *  The in-sync ratio shows the progress of:
1428 		 *   - Initializing the array
1429 		 *   - Rebuilding a subset of devices of the array
1430 		 *  The user can distinguish between the two by referring
1431 		 *  to the status characters.
1432 		 */
1433 		DMEMIT(" %llu/%llu",
1434 		       (unsigned long long) sync,
1435 		       (unsigned long long) rs->md.resync_max_sectors);
1436 
1437 		/*
1438 		 * Sync action:
1439 		 *   See Documentation/device-mapper/dm-raid.c for
1440 		 *   information on each of these states.
1441 		 */
1442 		DMEMIT(" %s", decipher_sync_action(&rs->md));
1443 
1444 		/*
1445 		 * resync_mismatches/mismatch_cnt
1446 		 *   This field shows the number of discrepancies found when
1447 		 *   performing a "check" of the array.
1448 		 */
1449 		DMEMIT(" %llu",
1450 		       (strcmp(rs->md.last_sync_action, "check")) ? 0 :
1451 		       (unsigned long long)
1452 		       atomic64_read(&rs->md.resync_mismatches));
1453 		break;
1454 	case STATUSTYPE_TABLE:
1455 		/* The string you would use to construct this array */
1456 		for (i = 0; i < rs->md.raid_disks; i++) {
1457 			if ((rs->print_flags & DMPF_REBUILD) &&
1458 			    rs->dev[i].data_dev &&
1459 			    !test_bit(In_sync, &rs->dev[i].rdev.flags))
1460 				raid_param_cnt += 2; /* for rebuilds */
1461 			if (rs->dev[i].data_dev &&
1462 			    test_bit(WriteMostly, &rs->dev[i].rdev.flags))
1463 				raid_param_cnt += 2;
1464 		}
1465 
1466 		raid_param_cnt += (hweight32(rs->print_flags & ~DMPF_REBUILD) * 2);
1467 		if (rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC))
1468 			raid_param_cnt--;
1469 
1470 		DMEMIT("%s %u %u", rs->raid_type->name,
1471 		       raid_param_cnt, rs->md.chunk_sectors);
1472 
1473 		if ((rs->print_flags & DMPF_SYNC) &&
1474 		    (rs->md.recovery_cp == MaxSector))
1475 			DMEMIT(" sync");
1476 		if (rs->print_flags & DMPF_NOSYNC)
1477 			DMEMIT(" nosync");
1478 
1479 		for (i = 0; i < rs->md.raid_disks; i++)
1480 			if ((rs->print_flags & DMPF_REBUILD) &&
1481 			    rs->dev[i].data_dev &&
1482 			    !test_bit(In_sync, &rs->dev[i].rdev.flags))
1483 				DMEMIT(" rebuild %u", i);
1484 
1485 		if (rs->print_flags & DMPF_DAEMON_SLEEP)
1486 			DMEMIT(" daemon_sleep %lu",
1487 			       rs->md.bitmap_info.daemon_sleep);
1488 
1489 		if (rs->print_flags & DMPF_MIN_RECOVERY_RATE)
1490 			DMEMIT(" min_recovery_rate %d", rs->md.sync_speed_min);
1491 
1492 		if (rs->print_flags & DMPF_MAX_RECOVERY_RATE)
1493 			DMEMIT(" max_recovery_rate %d", rs->md.sync_speed_max);
1494 
1495 		for (i = 0; i < rs->md.raid_disks; i++)
1496 			if (rs->dev[i].data_dev &&
1497 			    test_bit(WriteMostly, &rs->dev[i].rdev.flags))
1498 				DMEMIT(" write_mostly %u", i);
1499 
1500 		if (rs->print_flags & DMPF_MAX_WRITE_BEHIND)
1501 			DMEMIT(" max_write_behind %lu",
1502 			       rs->md.bitmap_info.max_write_behind);
1503 
1504 		if (rs->print_flags & DMPF_STRIPE_CACHE) {
1505 			struct r5conf *conf = rs->md.private;
1506 
1507 			/* convert from kiB to sectors */
1508 			DMEMIT(" stripe_cache %d",
1509 			       conf ? conf->max_nr_stripes * 2 : 0);
1510 		}
1511 
1512 		if (rs->print_flags & DMPF_REGION_SIZE)
1513 			DMEMIT(" region_size %lu",
1514 			       rs->md.bitmap_info.chunksize >> 9);
1515 
1516 		if (rs->print_flags & DMPF_RAID10_COPIES)
1517 			DMEMIT(" raid10_copies %u",
1518 			       raid10_md_layout_to_copies(rs->md.layout));
1519 
1520 		if (rs->print_flags & DMPF_RAID10_FORMAT)
1521 			DMEMIT(" raid10_format %s",
1522 			       raid10_md_layout_to_format(rs->md.layout));
1523 
1524 		DMEMIT(" %d", rs->md.raid_disks);
1525 		for (i = 0; i < rs->md.raid_disks; i++) {
1526 			if (rs->dev[i].meta_dev)
1527 				DMEMIT(" %s", rs->dev[i].meta_dev->name);
1528 			else
1529 				DMEMIT(" -");
1530 
1531 			if (rs->dev[i].data_dev)
1532 				DMEMIT(" %s", rs->dev[i].data_dev->name);
1533 			else
1534 				DMEMIT(" -");
1535 		}
1536 	}
1537 }
1538 
1539 static int raid_message(struct dm_target *ti, unsigned argc, char **argv)
1540 {
1541 	struct raid_set *rs = ti->private;
1542 	struct mddev *mddev = &rs->md;
1543 
1544 	if (!strcasecmp(argv[0], "reshape")) {
1545 		DMERR("Reshape not supported.");
1546 		return -EINVAL;
1547 	}
1548 
1549 	if (!mddev->pers || !mddev->pers->sync_request)
1550 		return -EINVAL;
1551 
1552 	if (!strcasecmp(argv[0], "frozen"))
1553 		set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
1554 	else
1555 		clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
1556 
1557 	if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) {
1558 		if (mddev->sync_thread) {
1559 			set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1560 			md_reap_sync_thread(mddev);
1561 		}
1562 	} else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
1563 		   test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
1564 		return -EBUSY;
1565 	else if (!strcasecmp(argv[0], "resync"))
1566 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1567 	else if (!strcasecmp(argv[0], "recover")) {
1568 		set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
1569 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1570 	} else {
1571 		if (!strcasecmp(argv[0], "check"))
1572 			set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
1573 		else if (!!strcasecmp(argv[0], "repair"))
1574 			return -EINVAL;
1575 		set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
1576 		set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
1577 	}
1578 	if (mddev->ro == 2) {
1579 		/* A write to sync_action is enough to justify
1580 		 * canceling read-auto mode
1581 		 */
1582 		mddev->ro = 0;
1583 		if (!mddev->suspended)
1584 			md_wakeup_thread(mddev->sync_thread);
1585 	}
1586 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1587 	if (!mddev->suspended)
1588 		md_wakeup_thread(mddev->thread);
1589 
1590 	return 0;
1591 }
1592 
1593 static int raid_iterate_devices(struct dm_target *ti,
1594 				iterate_devices_callout_fn fn, void *data)
1595 {
1596 	struct raid_set *rs = ti->private;
1597 	unsigned i;
1598 	int ret = 0;
1599 
1600 	for (i = 0; !ret && i < rs->md.raid_disks; i++)
1601 		if (rs->dev[i].data_dev)
1602 			ret = fn(ti,
1603 				 rs->dev[i].data_dev,
1604 				 0, /* No offset on data devs */
1605 				 rs->md.dev_sectors,
1606 				 data);
1607 
1608 	return ret;
1609 }
1610 
1611 static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
1612 {
1613 	struct raid_set *rs = ti->private;
1614 	unsigned chunk_size = rs->md.chunk_sectors << 9;
1615 	struct r5conf *conf = rs->md.private;
1616 
1617 	blk_limits_io_min(limits, chunk_size);
1618 	blk_limits_io_opt(limits, chunk_size * (conf->raid_disks - conf->max_degraded));
1619 }
1620 
1621 static void raid_presuspend(struct dm_target *ti)
1622 {
1623 	struct raid_set *rs = ti->private;
1624 
1625 	md_stop_writes(&rs->md);
1626 }
1627 
1628 static void raid_postsuspend(struct dm_target *ti)
1629 {
1630 	struct raid_set *rs = ti->private;
1631 
1632 	mddev_suspend(&rs->md);
1633 }
1634 
1635 static void attempt_restore_of_faulty_devices(struct raid_set *rs)
1636 {
1637 	int i;
1638 	uint64_t failed_devices, cleared_failed_devices = 0;
1639 	unsigned long flags;
1640 	struct dm_raid_superblock *sb;
1641 	struct md_rdev *r;
1642 
1643 	for (i = 0; i < rs->md.raid_disks; i++) {
1644 		r = &rs->dev[i].rdev;
1645 		if (test_bit(Faulty, &r->flags) && r->sb_page &&
1646 		    sync_page_io(r, 0, r->sb_size, r->sb_page, READ, 1)) {
1647 			DMINFO("Faulty %s device #%d has readable super block."
1648 			       "  Attempting to revive it.",
1649 			       rs->raid_type->name, i);
1650 
1651 			/*
1652 			 * Faulty bit may be set, but sometimes the array can
1653 			 * be suspended before the personalities can respond
1654 			 * by removing the device from the array (i.e. calling
1655 			 * 'hot_remove_disk').  If they haven't yet removed
1656 			 * the failed device, its 'raid_disk' number will be
1657 			 * '>= 0' - meaning we must call this function
1658 			 * ourselves.
1659 			 */
1660 			if ((r->raid_disk >= 0) &&
1661 			    (r->mddev->pers->hot_remove_disk(r->mddev, r) != 0))
1662 				/* Failed to revive this device, try next */
1663 				continue;
1664 
1665 			r->raid_disk = i;
1666 			r->saved_raid_disk = i;
1667 			flags = r->flags;
1668 			clear_bit(Faulty, &r->flags);
1669 			clear_bit(WriteErrorSeen, &r->flags);
1670 			clear_bit(In_sync, &r->flags);
1671 			if (r->mddev->pers->hot_add_disk(r->mddev, r)) {
1672 				r->raid_disk = -1;
1673 				r->saved_raid_disk = -1;
1674 				r->flags = flags;
1675 			} else {
1676 				r->recovery_offset = 0;
1677 				cleared_failed_devices |= 1 << i;
1678 			}
1679 		}
1680 	}
1681 	if (cleared_failed_devices) {
1682 		rdev_for_each(r, &rs->md) {
1683 			sb = page_address(r->sb_page);
1684 			failed_devices = le64_to_cpu(sb->failed_devices);
1685 			failed_devices &= ~cleared_failed_devices;
1686 			sb->failed_devices = cpu_to_le64(failed_devices);
1687 		}
1688 	}
1689 }
1690 
1691 static void raid_resume(struct dm_target *ti)
1692 {
1693 	struct raid_set *rs = ti->private;
1694 
1695 	set_bit(MD_CHANGE_DEVS, &rs->md.flags);
1696 	if (!rs->bitmap_loaded) {
1697 		bitmap_load(&rs->md);
1698 		rs->bitmap_loaded = 1;
1699 	} else {
1700 		/*
1701 		 * A secondary resume while the device is active.
1702 		 * Take this opportunity to check whether any failed
1703 		 * devices are reachable again.
1704 		 */
1705 		attempt_restore_of_faulty_devices(rs);
1706 	}
1707 
1708 	clear_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
1709 	mddev_resume(&rs->md);
1710 }
1711 
1712 static struct target_type raid_target = {
1713 	.name = "raid",
1714 	.version = {1, 6, 0},
1715 	.module = THIS_MODULE,
1716 	.ctr = raid_ctr,
1717 	.dtr = raid_dtr,
1718 	.map = raid_map,
1719 	.status = raid_status,
1720 	.message = raid_message,
1721 	.iterate_devices = raid_iterate_devices,
1722 	.io_hints = raid_io_hints,
1723 	.presuspend = raid_presuspend,
1724 	.postsuspend = raid_postsuspend,
1725 	.resume = raid_resume,
1726 };
1727 
1728 static int __init dm_raid_init(void)
1729 {
1730 	DMINFO("Loading target version %u.%u.%u",
1731 	       raid_target.version[0],
1732 	       raid_target.version[1],
1733 	       raid_target.version[2]);
1734 	return dm_register_target(&raid_target);
1735 }
1736 
1737 static void __exit dm_raid_exit(void)
1738 {
1739 	dm_unregister_target(&raid_target);
1740 }
1741 
1742 module_init(dm_raid_init);
1743 module_exit(dm_raid_exit);
1744 
1745 module_param(devices_handle_discard_safely, bool, 0644);
1746 MODULE_PARM_DESC(devices_handle_discard_safely,
1747 		 "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
1748 
1749 MODULE_DESCRIPTION(DM_NAME " raid4/5/6 target");
1750 MODULE_ALIAS("dm-raid1");
1751 MODULE_ALIAS("dm-raid10");
1752 MODULE_ALIAS("dm-raid4");
1753 MODULE_ALIAS("dm-raid5");
1754 MODULE_ALIAS("dm-raid6");
1755 MODULE_AUTHOR("Neil Brown <dm-devel@redhat.com>");
1756 MODULE_LICENSE("GPL");
1757