xref: /openbmc/linux/drivers/md/raid0.c (revision b830f94f)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3    raid0.c : Multiple Devices driver for Linux
4 	     Copyright (C) 1994-96 Marc ZYNGIER
5 	     <zyngier@ufr-info-p7.ibp.fr> or
6 	     <maz@gloups.fdn.fr>
7 	     Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
8 
9    RAID-0 management functions.
10 
11 */
12 
13 #include <linux/blkdev.h>
14 #include <linux/seq_file.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <trace/events/block.h>
18 #include "md.h"
19 #include "raid0.h"
20 #include "raid5.h"
21 
22 #define UNSUPPORTED_MDDEV_FLAGS		\
23 	((1L << MD_HAS_JOURNAL) |	\
24 	 (1L << MD_JOURNAL_CLEAN) |	\
25 	 (1L << MD_FAILFAST_SUPPORTED) |\
26 	 (1L << MD_HAS_PPL) |		\
27 	 (1L << MD_HAS_MULTIPLE_PPLS))
28 
29 static int raid0_congested(struct mddev *mddev, int bits)
30 {
31 	struct r0conf *conf = mddev->private;
32 	struct md_rdev **devlist = conf->devlist;
33 	int raid_disks = conf->strip_zone[0].nb_dev;
34 	int i, ret = 0;
35 
36 	for (i = 0; i < raid_disks && !ret ; i++) {
37 		struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
38 
39 		ret |= bdi_congested(q->backing_dev_info, bits);
40 	}
41 	return ret;
42 }
43 
44 /*
45  * inform the user of the raid configuration
46 */
47 static void dump_zones(struct mddev *mddev)
48 {
49 	int j, k;
50 	sector_t zone_size = 0;
51 	sector_t zone_start = 0;
52 	char b[BDEVNAME_SIZE];
53 	struct r0conf *conf = mddev->private;
54 	int raid_disks = conf->strip_zone[0].nb_dev;
55 	pr_debug("md: RAID0 configuration for %s - %d zone%s\n",
56 		 mdname(mddev),
57 		 conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
58 	for (j = 0; j < conf->nr_strip_zones; j++) {
59 		char line[200];
60 		int len = 0;
61 
62 		for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
63 			len += snprintf(line+len, 200-len, "%s%s", k?"/":"",
64 					bdevname(conf->devlist[j*raid_disks
65 							       + k]->bdev, b));
66 		pr_debug("md: zone%d=[%s]\n", j, line);
67 
68 		zone_size  = conf->strip_zone[j].zone_end - zone_start;
69 		pr_debug("      zone-offset=%10lluKB, device-offset=%10lluKB, size=%10lluKB\n",
70 			(unsigned long long)zone_start>>1,
71 			(unsigned long long)conf->strip_zone[j].dev_start>>1,
72 			(unsigned long long)zone_size>>1);
73 		zone_start = conf->strip_zone[j].zone_end;
74 	}
75 }
76 
77 static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
78 {
79 	int i, c, err;
80 	sector_t curr_zone_end, sectors;
81 	struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
82 	struct strip_zone *zone;
83 	int cnt;
84 	char b[BDEVNAME_SIZE];
85 	char b2[BDEVNAME_SIZE];
86 	struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
87 	unsigned short blksize = 512;
88 
89 	*private_conf = ERR_PTR(-ENOMEM);
90 	if (!conf)
91 		return -ENOMEM;
92 	rdev_for_each(rdev1, mddev) {
93 		pr_debug("md/raid0:%s: looking at %s\n",
94 			 mdname(mddev),
95 			 bdevname(rdev1->bdev, b));
96 		c = 0;
97 
98 		/* round size to chunk_size */
99 		sectors = rdev1->sectors;
100 		sector_div(sectors, mddev->chunk_sectors);
101 		rdev1->sectors = sectors * mddev->chunk_sectors;
102 
103 		blksize = max(blksize, queue_logical_block_size(
104 				      rdev1->bdev->bd_disk->queue));
105 
106 		rdev_for_each(rdev2, mddev) {
107 			pr_debug("md/raid0:%s:   comparing %s(%llu)"
108 				 " with %s(%llu)\n",
109 				 mdname(mddev),
110 				 bdevname(rdev1->bdev,b),
111 				 (unsigned long long)rdev1->sectors,
112 				 bdevname(rdev2->bdev,b2),
113 				 (unsigned long long)rdev2->sectors);
114 			if (rdev2 == rdev1) {
115 				pr_debug("md/raid0:%s:   END\n",
116 					 mdname(mddev));
117 				break;
118 			}
119 			if (rdev2->sectors == rdev1->sectors) {
120 				/*
121 				 * Not unique, don't count it as a new
122 				 * group
123 				 */
124 				pr_debug("md/raid0:%s:   EQUAL\n",
125 					 mdname(mddev));
126 				c = 1;
127 				break;
128 			}
129 			pr_debug("md/raid0:%s:   NOT EQUAL\n",
130 				 mdname(mddev));
131 		}
132 		if (!c) {
133 			pr_debug("md/raid0:%s:   ==> UNIQUE\n",
134 				 mdname(mddev));
135 			conf->nr_strip_zones++;
136 			pr_debug("md/raid0:%s: %d zones\n",
137 				 mdname(mddev), conf->nr_strip_zones);
138 		}
139 	}
140 	pr_debug("md/raid0:%s: FINAL %d zones\n",
141 		 mdname(mddev), conf->nr_strip_zones);
142 	/*
143 	 * now since we have the hard sector sizes, we can make sure
144 	 * chunk size is a multiple of that sector size
145 	 */
146 	if ((mddev->chunk_sectors << 9) % blksize) {
147 		pr_warn("md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
148 			mdname(mddev),
149 			mddev->chunk_sectors << 9, blksize);
150 		err = -EINVAL;
151 		goto abort;
152 	}
153 
154 	err = -ENOMEM;
155 	conf->strip_zone = kcalloc(conf->nr_strip_zones,
156 				   sizeof(struct strip_zone),
157 				   GFP_KERNEL);
158 	if (!conf->strip_zone)
159 		goto abort;
160 	conf->devlist = kzalloc(array3_size(sizeof(struct md_rdev *),
161 					    conf->nr_strip_zones,
162 					    mddev->raid_disks),
163 				GFP_KERNEL);
164 	if (!conf->devlist)
165 		goto abort;
166 
167 	/* The first zone must contain all devices, so here we check that
168 	 * there is a proper alignment of slots to devices and find them all
169 	 */
170 	zone = &conf->strip_zone[0];
171 	cnt = 0;
172 	smallest = NULL;
173 	dev = conf->devlist;
174 	err = -EINVAL;
175 	rdev_for_each(rdev1, mddev) {
176 		int j = rdev1->raid_disk;
177 
178 		if (mddev->level == 10) {
179 			/* taking over a raid10-n2 array */
180 			j /= 2;
181 			rdev1->new_raid_disk = j;
182 		}
183 
184 		if (mddev->level == 1) {
185 			/* taiking over a raid1 array-
186 			 * we have only one active disk
187 			 */
188 			j = 0;
189 			rdev1->new_raid_disk = j;
190 		}
191 
192 		if (j < 0) {
193 			pr_warn("md/raid0:%s: remove inactive devices before converting to RAID0\n",
194 				mdname(mddev));
195 			goto abort;
196 		}
197 		if (j >= mddev->raid_disks) {
198 			pr_warn("md/raid0:%s: bad disk number %d - aborting!\n",
199 				mdname(mddev), j);
200 			goto abort;
201 		}
202 		if (dev[j]) {
203 			pr_warn("md/raid0:%s: multiple devices for %d - aborting!\n",
204 				mdname(mddev), j);
205 			goto abort;
206 		}
207 		dev[j] = rdev1;
208 
209 		if (!smallest || (rdev1->sectors < smallest->sectors))
210 			smallest = rdev1;
211 		cnt++;
212 	}
213 	if (cnt != mddev->raid_disks) {
214 		pr_warn("md/raid0:%s: too few disks (%d of %d) - aborting!\n",
215 			mdname(mddev), cnt, mddev->raid_disks);
216 		goto abort;
217 	}
218 	zone->nb_dev = cnt;
219 	zone->zone_end = smallest->sectors * cnt;
220 
221 	curr_zone_end = zone->zone_end;
222 
223 	/* now do the other zones */
224 	for (i = 1; i < conf->nr_strip_zones; i++)
225 	{
226 		int j;
227 
228 		zone = conf->strip_zone + i;
229 		dev = conf->devlist + i * mddev->raid_disks;
230 
231 		pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i);
232 		zone->dev_start = smallest->sectors;
233 		smallest = NULL;
234 		c = 0;
235 
236 		for (j=0; j<cnt; j++) {
237 			rdev = conf->devlist[j];
238 			if (rdev->sectors <= zone->dev_start) {
239 				pr_debug("md/raid0:%s: checking %s ... nope\n",
240 					 mdname(mddev),
241 					 bdevname(rdev->bdev, b));
242 				continue;
243 			}
244 			pr_debug("md/raid0:%s: checking %s ..."
245 				 " contained as device %d\n",
246 				 mdname(mddev),
247 				 bdevname(rdev->bdev, b), c);
248 			dev[c] = rdev;
249 			c++;
250 			if (!smallest || rdev->sectors < smallest->sectors) {
251 				smallest = rdev;
252 				pr_debug("md/raid0:%s:  (%llu) is smallest!.\n",
253 					 mdname(mddev),
254 					 (unsigned long long)rdev->sectors);
255 			}
256 		}
257 
258 		zone->nb_dev = c;
259 		sectors = (smallest->sectors - zone->dev_start) * c;
260 		pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
261 			 mdname(mddev),
262 			 zone->nb_dev, (unsigned long long)sectors);
263 
264 		curr_zone_end += sectors;
265 		zone->zone_end = curr_zone_end;
266 
267 		pr_debug("md/raid0:%s: current zone start: %llu\n",
268 			 mdname(mddev),
269 			 (unsigned long long)smallest->sectors);
270 	}
271 
272 	pr_debug("md/raid0:%s: done.\n", mdname(mddev));
273 	*private_conf = conf;
274 
275 	return 0;
276 abort:
277 	kfree(conf->strip_zone);
278 	kfree(conf->devlist);
279 	kfree(conf);
280 	*private_conf = ERR_PTR(err);
281 	return err;
282 }
283 
284 /* Find the zone which holds a particular offset
285  * Update *sectorp to be an offset in that zone
286  */
287 static struct strip_zone *find_zone(struct r0conf *conf,
288 				    sector_t *sectorp)
289 {
290 	int i;
291 	struct strip_zone *z = conf->strip_zone;
292 	sector_t sector = *sectorp;
293 
294 	for (i = 0; i < conf->nr_strip_zones; i++)
295 		if (sector < z[i].zone_end) {
296 			if (i)
297 				*sectorp = sector - z[i-1].zone_end;
298 			return z + i;
299 		}
300 	BUG();
301 }
302 
303 /*
304  * remaps the bio to the target device. we separate two flows.
305  * power 2 flow and a general flow for the sake of performance
306 */
307 static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
308 				sector_t sector, sector_t *sector_offset)
309 {
310 	unsigned int sect_in_chunk;
311 	sector_t chunk;
312 	struct r0conf *conf = mddev->private;
313 	int raid_disks = conf->strip_zone[0].nb_dev;
314 	unsigned int chunk_sects = mddev->chunk_sectors;
315 
316 	if (is_power_of_2(chunk_sects)) {
317 		int chunksect_bits = ffz(~chunk_sects);
318 		/* find the sector offset inside the chunk */
319 		sect_in_chunk  = sector & (chunk_sects - 1);
320 		sector >>= chunksect_bits;
321 		/* chunk in zone */
322 		chunk = *sector_offset;
323 		/* quotient is the chunk in real device*/
324 		sector_div(chunk, zone->nb_dev << chunksect_bits);
325 	} else{
326 		sect_in_chunk = sector_div(sector, chunk_sects);
327 		chunk = *sector_offset;
328 		sector_div(chunk, chunk_sects * zone->nb_dev);
329 	}
330 	/*
331 	*  position the bio over the real device
332 	*  real sector = chunk in device + starting of zone
333 	*	+ the position in the chunk
334 	*/
335 	*sector_offset = (chunk * chunk_sects) + sect_in_chunk;
336 	return conf->devlist[(zone - conf->strip_zone)*raid_disks
337 			     + sector_div(sector, zone->nb_dev)];
338 }
339 
340 static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
341 {
342 	sector_t array_sectors = 0;
343 	struct md_rdev *rdev;
344 
345 	WARN_ONCE(sectors || raid_disks,
346 		  "%s does not support generic reshape\n", __func__);
347 
348 	rdev_for_each(rdev, mddev)
349 		array_sectors += (rdev->sectors &
350 				  ~(sector_t)(mddev->chunk_sectors-1));
351 
352 	return array_sectors;
353 }
354 
355 static void raid0_free(struct mddev *mddev, void *priv);
356 
357 static int raid0_run(struct mddev *mddev)
358 {
359 	struct r0conf *conf;
360 	int ret;
361 
362 	if (mddev->chunk_sectors == 0) {
363 		pr_warn("md/raid0:%s: chunk size must be set.\n", mdname(mddev));
364 		return -EINVAL;
365 	}
366 	if (md_check_no_bitmap(mddev))
367 		return -EINVAL;
368 
369 	/* if private is not null, we are here after takeover */
370 	if (mddev->private == NULL) {
371 		ret = create_strip_zones(mddev, &conf);
372 		if (ret < 0)
373 			return ret;
374 		mddev->private = conf;
375 	}
376 	conf = mddev->private;
377 	if (mddev->queue) {
378 		struct md_rdev *rdev;
379 		bool discard_supported = false;
380 
381 		blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
382 		blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
383 		blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors);
384 		blk_queue_max_discard_sectors(mddev->queue, UINT_MAX);
385 
386 		blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
387 		blk_queue_io_opt(mddev->queue,
388 				 (mddev->chunk_sectors << 9) * mddev->raid_disks);
389 
390 		rdev_for_each(rdev, mddev) {
391 			disk_stack_limits(mddev->gendisk, rdev->bdev,
392 					  rdev->data_offset << 9);
393 			if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
394 				discard_supported = true;
395 		}
396 		if (!discard_supported)
397 			blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue);
398 		else
399 			blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
400 	}
401 
402 	/* calculate array device size */
403 	md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
404 
405 	pr_debug("md/raid0:%s: md_size is %llu sectors.\n",
406 		 mdname(mddev),
407 		 (unsigned long long)mddev->array_sectors);
408 
409 	if (mddev->queue) {
410 		/* calculate the max read-ahead size.
411 		 * For read-ahead of large files to be effective, we need to
412 		 * readahead at least twice a whole stripe. i.e. number of devices
413 		 * multiplied by chunk size times 2.
414 		 * If an individual device has an ra_pages greater than the
415 		 * chunk size, then we will not drive that device as hard as it
416 		 * wants.  We consider this a configuration error: a larger
417 		 * chunksize should be used in that case.
418 		 */
419 		int stripe = mddev->raid_disks *
420 			(mddev->chunk_sectors << 9) / PAGE_SIZE;
421 		if (mddev->queue->backing_dev_info->ra_pages < 2* stripe)
422 			mddev->queue->backing_dev_info->ra_pages = 2* stripe;
423 	}
424 
425 	dump_zones(mddev);
426 
427 	ret = md_integrity_register(mddev);
428 
429 	return ret;
430 }
431 
432 static void raid0_free(struct mddev *mddev, void *priv)
433 {
434 	struct r0conf *conf = priv;
435 
436 	kfree(conf->strip_zone);
437 	kfree(conf->devlist);
438 	kfree(conf);
439 }
440 
441 /*
442  * Is io distribute over 1 or more chunks ?
443 */
444 static inline int is_io_in_chunk_boundary(struct mddev *mddev,
445 			unsigned int chunk_sects, struct bio *bio)
446 {
447 	if (likely(is_power_of_2(chunk_sects))) {
448 		return chunk_sects >=
449 			((bio->bi_iter.bi_sector & (chunk_sects-1))
450 					+ bio_sectors(bio));
451 	} else{
452 		sector_t sector = bio->bi_iter.bi_sector;
453 		return chunk_sects >= (sector_div(sector, chunk_sects)
454 						+ bio_sectors(bio));
455 	}
456 }
457 
458 static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
459 {
460 	struct r0conf *conf = mddev->private;
461 	struct strip_zone *zone;
462 	sector_t start = bio->bi_iter.bi_sector;
463 	sector_t end;
464 	unsigned int stripe_size;
465 	sector_t first_stripe_index, last_stripe_index;
466 	sector_t start_disk_offset;
467 	unsigned int start_disk_index;
468 	sector_t end_disk_offset;
469 	unsigned int end_disk_index;
470 	unsigned int disk;
471 
472 	zone = find_zone(conf, &start);
473 
474 	if (bio_end_sector(bio) > zone->zone_end) {
475 		struct bio *split = bio_split(bio,
476 			zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO,
477 			&mddev->bio_set);
478 		bio_chain(split, bio);
479 		generic_make_request(bio);
480 		bio = split;
481 		end = zone->zone_end;
482 	} else
483 		end = bio_end_sector(bio);
484 
485 	if (zone != conf->strip_zone)
486 		end = end - zone[-1].zone_end;
487 
488 	/* Now start and end is the offset in zone */
489 	stripe_size = zone->nb_dev * mddev->chunk_sectors;
490 
491 	first_stripe_index = start;
492 	sector_div(first_stripe_index, stripe_size);
493 	last_stripe_index = end;
494 	sector_div(last_stripe_index, stripe_size);
495 
496 	start_disk_index = (int)(start - first_stripe_index * stripe_size) /
497 		mddev->chunk_sectors;
498 	start_disk_offset = ((int)(start - first_stripe_index * stripe_size) %
499 		mddev->chunk_sectors) +
500 		first_stripe_index * mddev->chunk_sectors;
501 	end_disk_index = (int)(end - last_stripe_index * stripe_size) /
502 		mddev->chunk_sectors;
503 	end_disk_offset = ((int)(end - last_stripe_index * stripe_size) %
504 		mddev->chunk_sectors) +
505 		last_stripe_index * mddev->chunk_sectors;
506 
507 	for (disk = 0; disk < zone->nb_dev; disk++) {
508 		sector_t dev_start, dev_end;
509 		struct bio *discard_bio = NULL;
510 		struct md_rdev *rdev;
511 
512 		if (disk < start_disk_index)
513 			dev_start = (first_stripe_index + 1) *
514 				mddev->chunk_sectors;
515 		else if (disk > start_disk_index)
516 			dev_start = first_stripe_index * mddev->chunk_sectors;
517 		else
518 			dev_start = start_disk_offset;
519 
520 		if (disk < end_disk_index)
521 			dev_end = (last_stripe_index + 1) * mddev->chunk_sectors;
522 		else if (disk > end_disk_index)
523 			dev_end = last_stripe_index * mddev->chunk_sectors;
524 		else
525 			dev_end = end_disk_offset;
526 
527 		if (dev_end <= dev_start)
528 			continue;
529 
530 		rdev = conf->devlist[(zone - conf->strip_zone) *
531 			conf->strip_zone[0].nb_dev + disk];
532 		if (__blkdev_issue_discard(rdev->bdev,
533 			dev_start + zone->dev_start + rdev->data_offset,
534 			dev_end - dev_start, GFP_NOIO, 0, &discard_bio) ||
535 		    !discard_bio)
536 			continue;
537 		bio_chain(discard_bio, bio);
538 		bio_clone_blkg_association(discard_bio, bio);
539 		if (mddev->gendisk)
540 			trace_block_bio_remap(bdev_get_queue(rdev->bdev),
541 				discard_bio, disk_devt(mddev->gendisk),
542 				bio->bi_iter.bi_sector);
543 		generic_make_request(discard_bio);
544 	}
545 	bio_endio(bio);
546 }
547 
548 static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
549 {
550 	struct strip_zone *zone;
551 	struct md_rdev *tmp_dev;
552 	sector_t bio_sector;
553 	sector_t sector;
554 	unsigned chunk_sects;
555 	unsigned sectors;
556 
557 	if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
558 		md_flush_request(mddev, bio);
559 		return true;
560 	}
561 
562 	if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) {
563 		raid0_handle_discard(mddev, bio);
564 		return true;
565 	}
566 
567 	bio_sector = bio->bi_iter.bi_sector;
568 	sector = bio_sector;
569 	chunk_sects = mddev->chunk_sectors;
570 
571 	sectors = chunk_sects -
572 		(likely(is_power_of_2(chunk_sects))
573 		 ? (sector & (chunk_sects-1))
574 		 : sector_div(sector, chunk_sects));
575 
576 	/* Restore due to sector_div */
577 	sector = bio_sector;
578 
579 	if (sectors < bio_sectors(bio)) {
580 		struct bio *split = bio_split(bio, sectors, GFP_NOIO,
581 					      &mddev->bio_set);
582 		bio_chain(split, bio);
583 		generic_make_request(bio);
584 		bio = split;
585 	}
586 
587 	zone = find_zone(mddev->private, &sector);
588 	tmp_dev = map_sector(mddev, zone, sector, &sector);
589 	bio_set_dev(bio, tmp_dev->bdev);
590 	bio->bi_iter.bi_sector = sector + zone->dev_start +
591 		tmp_dev->data_offset;
592 
593 	if (mddev->gendisk)
594 		trace_block_bio_remap(bio->bi_disk->queue, bio,
595 				disk_devt(mddev->gendisk), bio_sector);
596 	mddev_check_writesame(mddev, bio);
597 	mddev_check_write_zeroes(mddev, bio);
598 	generic_make_request(bio);
599 	return true;
600 }
601 
602 static void raid0_status(struct seq_file *seq, struct mddev *mddev)
603 {
604 	seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
605 	return;
606 }
607 
608 static void *raid0_takeover_raid45(struct mddev *mddev)
609 {
610 	struct md_rdev *rdev;
611 	struct r0conf *priv_conf;
612 
613 	if (mddev->degraded != 1) {
614 		pr_warn("md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
615 			mdname(mddev),
616 			mddev->degraded);
617 		return ERR_PTR(-EINVAL);
618 	}
619 
620 	rdev_for_each(rdev, mddev) {
621 		/* check slot number for a disk */
622 		if (rdev->raid_disk == mddev->raid_disks-1) {
623 			pr_warn("md/raid0:%s: raid5 must have missing parity disk!\n",
624 				mdname(mddev));
625 			return ERR_PTR(-EINVAL);
626 		}
627 		rdev->sectors = mddev->dev_sectors;
628 	}
629 
630 	/* Set new parameters */
631 	mddev->new_level = 0;
632 	mddev->new_layout = 0;
633 	mddev->new_chunk_sectors = mddev->chunk_sectors;
634 	mddev->raid_disks--;
635 	mddev->delta_disks = -1;
636 	/* make sure it will be not marked as dirty */
637 	mddev->recovery_cp = MaxSector;
638 	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
639 
640 	create_strip_zones(mddev, &priv_conf);
641 
642 	return priv_conf;
643 }
644 
645 static void *raid0_takeover_raid10(struct mddev *mddev)
646 {
647 	struct r0conf *priv_conf;
648 
649 	/* Check layout:
650 	 *  - far_copies must be 1
651 	 *  - near_copies must be 2
652 	 *  - disks number must be even
653 	 *  - all mirrors must be already degraded
654 	 */
655 	if (mddev->layout != ((1 << 8) + 2)) {
656 		pr_warn("md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
657 			mdname(mddev),
658 			mddev->layout);
659 		return ERR_PTR(-EINVAL);
660 	}
661 	if (mddev->raid_disks & 1) {
662 		pr_warn("md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
663 			mdname(mddev));
664 		return ERR_PTR(-EINVAL);
665 	}
666 	if (mddev->degraded != (mddev->raid_disks>>1)) {
667 		pr_warn("md/raid0:%s: All mirrors must be already degraded!\n",
668 			mdname(mddev));
669 		return ERR_PTR(-EINVAL);
670 	}
671 
672 	/* Set new parameters */
673 	mddev->new_level = 0;
674 	mddev->new_layout = 0;
675 	mddev->new_chunk_sectors = mddev->chunk_sectors;
676 	mddev->delta_disks = - mddev->raid_disks / 2;
677 	mddev->raid_disks += mddev->delta_disks;
678 	mddev->degraded = 0;
679 	/* make sure it will be not marked as dirty */
680 	mddev->recovery_cp = MaxSector;
681 	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
682 
683 	create_strip_zones(mddev, &priv_conf);
684 	return priv_conf;
685 }
686 
687 static void *raid0_takeover_raid1(struct mddev *mddev)
688 {
689 	struct r0conf *priv_conf;
690 	int chunksect;
691 
692 	/* Check layout:
693 	 *  - (N - 1) mirror drives must be already faulty
694 	 */
695 	if ((mddev->raid_disks - 1) != mddev->degraded) {
696 		pr_err("md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
697 		       mdname(mddev));
698 		return ERR_PTR(-EINVAL);
699 	}
700 
701 	/*
702 	 * a raid1 doesn't have the notion of chunk size, so
703 	 * figure out the largest suitable size we can use.
704 	 */
705 	chunksect = 64 * 2; /* 64K by default */
706 
707 	/* The array must be an exact multiple of chunksize */
708 	while (chunksect && (mddev->array_sectors & (chunksect - 1)))
709 		chunksect >>= 1;
710 
711 	if ((chunksect << 9) < PAGE_SIZE)
712 		/* array size does not allow a suitable chunk size */
713 		return ERR_PTR(-EINVAL);
714 
715 	/* Set new parameters */
716 	mddev->new_level = 0;
717 	mddev->new_layout = 0;
718 	mddev->new_chunk_sectors = chunksect;
719 	mddev->chunk_sectors = chunksect;
720 	mddev->delta_disks = 1 - mddev->raid_disks;
721 	mddev->raid_disks = 1;
722 	/* make sure it will be not marked as dirty */
723 	mddev->recovery_cp = MaxSector;
724 	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
725 
726 	create_strip_zones(mddev, &priv_conf);
727 	return priv_conf;
728 }
729 
730 static void *raid0_takeover(struct mddev *mddev)
731 {
732 	/* raid0 can take over:
733 	 *  raid4 - if all data disks are active.
734 	 *  raid5 - providing it is Raid4 layout and one disk is faulty
735 	 *  raid10 - assuming we have all necessary active disks
736 	 *  raid1 - with (N -1) mirror drives faulty
737 	 */
738 
739 	if (mddev->bitmap) {
740 		pr_warn("md/raid0: %s: cannot takeover array with bitmap\n",
741 			mdname(mddev));
742 		return ERR_PTR(-EBUSY);
743 	}
744 	if (mddev->level == 4)
745 		return raid0_takeover_raid45(mddev);
746 
747 	if (mddev->level == 5) {
748 		if (mddev->layout == ALGORITHM_PARITY_N)
749 			return raid0_takeover_raid45(mddev);
750 
751 		pr_warn("md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
752 			mdname(mddev), ALGORITHM_PARITY_N);
753 	}
754 
755 	if (mddev->level == 10)
756 		return raid0_takeover_raid10(mddev);
757 
758 	if (mddev->level == 1)
759 		return raid0_takeover_raid1(mddev);
760 
761 	pr_warn("Takeover from raid%i to raid0 not supported\n",
762 		mddev->level);
763 
764 	return ERR_PTR(-EINVAL);
765 }
766 
767 static void raid0_quiesce(struct mddev *mddev, int quiesce)
768 {
769 }
770 
771 static struct md_personality raid0_personality=
772 {
773 	.name		= "raid0",
774 	.level		= 0,
775 	.owner		= THIS_MODULE,
776 	.make_request	= raid0_make_request,
777 	.run		= raid0_run,
778 	.free		= raid0_free,
779 	.status		= raid0_status,
780 	.size		= raid0_size,
781 	.takeover	= raid0_takeover,
782 	.quiesce	= raid0_quiesce,
783 	.congested	= raid0_congested,
784 };
785 
786 static int __init raid0_init (void)
787 {
788 	return register_md_personality (&raid0_personality);
789 }
790 
791 static void raid0_exit (void)
792 {
793 	unregister_md_personality (&raid0_personality);
794 }
795 
796 module_init(raid0_init);
797 module_exit(raid0_exit);
798 MODULE_LICENSE("GPL");
799 MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
800 MODULE_ALIAS("md-personality-2"); /* RAID0 */
801 MODULE_ALIAS("md-raid0");
802 MODULE_ALIAS("md-level-0");
803