xref: /openbmc/linux/drivers/md/raid0.c (revision 83268fa6)
1 /*
2    raid0.c : Multiple Devices driver for Linux
3 	     Copyright (C) 1994-96 Marc ZYNGIER
4 	     <zyngier@ufr-info-p7.ibp.fr> or
5 	     <maz@gloups.fdn.fr>
6 	     Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
7 
8    RAID-0 management functions.
9 
10    This program is free software; you can redistribute it and/or modify
11    it under the terms of the GNU General Public License as published by
12    the Free Software Foundation; either version 2, or (at your option)
13    any later version.
14 
15    You should have received a copy of the GNU General Public License
16    (for example /usr/src/linux/COPYING); if not, write to the Free
17    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 */
19 
20 #include <linux/blkdev.h>
21 #include <linux/seq_file.h>
22 #include <linux/module.h>
23 #include <linux/slab.h>
24 #include <trace/events/block.h>
25 #include "md.h"
26 #include "raid0.h"
27 #include "raid5.h"
28 
29 #define UNSUPPORTED_MDDEV_FLAGS		\
30 	((1L << MD_HAS_JOURNAL) |	\
31 	 (1L << MD_JOURNAL_CLEAN) |	\
32 	 (1L << MD_FAILFAST_SUPPORTED) |\
33 	 (1L << MD_HAS_PPL) |		\
34 	 (1L << MD_HAS_MULTIPLE_PPLS))
35 
36 static int raid0_congested(struct mddev *mddev, int bits)
37 {
38 	struct r0conf *conf = mddev->private;
39 	struct md_rdev **devlist = conf->devlist;
40 	int raid_disks = conf->strip_zone[0].nb_dev;
41 	int i, ret = 0;
42 
43 	for (i = 0; i < raid_disks && !ret ; i++) {
44 		struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
45 
46 		ret |= bdi_congested(q->backing_dev_info, bits);
47 	}
48 	return ret;
49 }
50 
51 /*
52  * inform the user of the raid configuration
53 */
54 static void dump_zones(struct mddev *mddev)
55 {
56 	int j, k;
57 	sector_t zone_size = 0;
58 	sector_t zone_start = 0;
59 	char b[BDEVNAME_SIZE];
60 	struct r0conf *conf = mddev->private;
61 	int raid_disks = conf->strip_zone[0].nb_dev;
62 	pr_debug("md: RAID0 configuration for %s - %d zone%s\n",
63 		 mdname(mddev),
64 		 conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
65 	for (j = 0; j < conf->nr_strip_zones; j++) {
66 		char line[200];
67 		int len = 0;
68 
69 		for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
70 			len += snprintf(line+len, 200-len, "%s%s", k?"/":"",
71 					bdevname(conf->devlist[j*raid_disks
72 							       + k]->bdev, b));
73 		pr_debug("md: zone%d=[%s]\n", j, line);
74 
75 		zone_size  = conf->strip_zone[j].zone_end - zone_start;
76 		pr_debug("      zone-offset=%10lluKB, device-offset=%10lluKB, size=%10lluKB\n",
77 			(unsigned long long)zone_start>>1,
78 			(unsigned long long)conf->strip_zone[j].dev_start>>1,
79 			(unsigned long long)zone_size>>1);
80 		zone_start = conf->strip_zone[j].zone_end;
81 	}
82 }
83 
84 static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
85 {
86 	int i, c, err;
87 	sector_t curr_zone_end, sectors;
88 	struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
89 	struct strip_zone *zone;
90 	int cnt;
91 	char b[BDEVNAME_SIZE];
92 	char b2[BDEVNAME_SIZE];
93 	struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
94 	unsigned short blksize = 512;
95 
96 	*private_conf = ERR_PTR(-ENOMEM);
97 	if (!conf)
98 		return -ENOMEM;
99 	rdev_for_each(rdev1, mddev) {
100 		pr_debug("md/raid0:%s: looking at %s\n",
101 			 mdname(mddev),
102 			 bdevname(rdev1->bdev, b));
103 		c = 0;
104 
105 		/* round size to chunk_size */
106 		sectors = rdev1->sectors;
107 		sector_div(sectors, mddev->chunk_sectors);
108 		rdev1->sectors = sectors * mddev->chunk_sectors;
109 
110 		blksize = max(blksize, queue_logical_block_size(
111 				      rdev1->bdev->bd_disk->queue));
112 
113 		rdev_for_each(rdev2, mddev) {
114 			pr_debug("md/raid0:%s:   comparing %s(%llu)"
115 				 " with %s(%llu)\n",
116 				 mdname(mddev),
117 				 bdevname(rdev1->bdev,b),
118 				 (unsigned long long)rdev1->sectors,
119 				 bdevname(rdev2->bdev,b2),
120 				 (unsigned long long)rdev2->sectors);
121 			if (rdev2 == rdev1) {
122 				pr_debug("md/raid0:%s:   END\n",
123 					 mdname(mddev));
124 				break;
125 			}
126 			if (rdev2->sectors == rdev1->sectors) {
127 				/*
128 				 * Not unique, don't count it as a new
129 				 * group
130 				 */
131 				pr_debug("md/raid0:%s:   EQUAL\n",
132 					 mdname(mddev));
133 				c = 1;
134 				break;
135 			}
136 			pr_debug("md/raid0:%s:   NOT EQUAL\n",
137 				 mdname(mddev));
138 		}
139 		if (!c) {
140 			pr_debug("md/raid0:%s:   ==> UNIQUE\n",
141 				 mdname(mddev));
142 			conf->nr_strip_zones++;
143 			pr_debug("md/raid0:%s: %d zones\n",
144 				 mdname(mddev), conf->nr_strip_zones);
145 		}
146 	}
147 	pr_debug("md/raid0:%s: FINAL %d zones\n",
148 		 mdname(mddev), conf->nr_strip_zones);
149 	/*
150 	 * now since we have the hard sector sizes, we can make sure
151 	 * chunk size is a multiple of that sector size
152 	 */
153 	if ((mddev->chunk_sectors << 9) % blksize) {
154 		pr_warn("md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
155 			mdname(mddev),
156 			mddev->chunk_sectors << 9, blksize);
157 		err = -EINVAL;
158 		goto abort;
159 	}
160 
161 	err = -ENOMEM;
162 	conf->strip_zone = kcalloc(conf->nr_strip_zones,
163 				   sizeof(struct strip_zone),
164 				   GFP_KERNEL);
165 	if (!conf->strip_zone)
166 		goto abort;
167 	conf->devlist = kzalloc(array3_size(sizeof(struct md_rdev *),
168 					    conf->nr_strip_zones,
169 					    mddev->raid_disks),
170 				GFP_KERNEL);
171 	if (!conf->devlist)
172 		goto abort;
173 
174 	/* The first zone must contain all devices, so here we check that
175 	 * there is a proper alignment of slots to devices and find them all
176 	 */
177 	zone = &conf->strip_zone[0];
178 	cnt = 0;
179 	smallest = NULL;
180 	dev = conf->devlist;
181 	err = -EINVAL;
182 	rdev_for_each(rdev1, mddev) {
183 		int j = rdev1->raid_disk;
184 
185 		if (mddev->level == 10) {
186 			/* taking over a raid10-n2 array */
187 			j /= 2;
188 			rdev1->new_raid_disk = j;
189 		}
190 
191 		if (mddev->level == 1) {
192 			/* taiking over a raid1 array-
193 			 * we have only one active disk
194 			 */
195 			j = 0;
196 			rdev1->new_raid_disk = j;
197 		}
198 
199 		if (j < 0) {
200 			pr_warn("md/raid0:%s: remove inactive devices before converting to RAID0\n",
201 				mdname(mddev));
202 			goto abort;
203 		}
204 		if (j >= mddev->raid_disks) {
205 			pr_warn("md/raid0:%s: bad disk number %d - aborting!\n",
206 				mdname(mddev), j);
207 			goto abort;
208 		}
209 		if (dev[j]) {
210 			pr_warn("md/raid0:%s: multiple devices for %d - aborting!\n",
211 				mdname(mddev), j);
212 			goto abort;
213 		}
214 		dev[j] = rdev1;
215 
216 		if (!smallest || (rdev1->sectors < smallest->sectors))
217 			smallest = rdev1;
218 		cnt++;
219 	}
220 	if (cnt != mddev->raid_disks) {
221 		pr_warn("md/raid0:%s: too few disks (%d of %d) - aborting!\n",
222 			mdname(mddev), cnt, mddev->raid_disks);
223 		goto abort;
224 	}
225 	zone->nb_dev = cnt;
226 	zone->zone_end = smallest->sectors * cnt;
227 
228 	curr_zone_end = zone->zone_end;
229 
230 	/* now do the other zones */
231 	for (i = 1; i < conf->nr_strip_zones; i++)
232 	{
233 		int j;
234 
235 		zone = conf->strip_zone + i;
236 		dev = conf->devlist + i * mddev->raid_disks;
237 
238 		pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i);
239 		zone->dev_start = smallest->sectors;
240 		smallest = NULL;
241 		c = 0;
242 
243 		for (j=0; j<cnt; j++) {
244 			rdev = conf->devlist[j];
245 			if (rdev->sectors <= zone->dev_start) {
246 				pr_debug("md/raid0:%s: checking %s ... nope\n",
247 					 mdname(mddev),
248 					 bdevname(rdev->bdev, b));
249 				continue;
250 			}
251 			pr_debug("md/raid0:%s: checking %s ..."
252 				 " contained as device %d\n",
253 				 mdname(mddev),
254 				 bdevname(rdev->bdev, b), c);
255 			dev[c] = rdev;
256 			c++;
257 			if (!smallest || rdev->sectors < smallest->sectors) {
258 				smallest = rdev;
259 				pr_debug("md/raid0:%s:  (%llu) is smallest!.\n",
260 					 mdname(mddev),
261 					 (unsigned long long)rdev->sectors);
262 			}
263 		}
264 
265 		zone->nb_dev = c;
266 		sectors = (smallest->sectors - zone->dev_start) * c;
267 		pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
268 			 mdname(mddev),
269 			 zone->nb_dev, (unsigned long long)sectors);
270 
271 		curr_zone_end += sectors;
272 		zone->zone_end = curr_zone_end;
273 
274 		pr_debug("md/raid0:%s: current zone start: %llu\n",
275 			 mdname(mddev),
276 			 (unsigned long long)smallest->sectors);
277 	}
278 
279 	pr_debug("md/raid0:%s: done.\n", mdname(mddev));
280 	*private_conf = conf;
281 
282 	return 0;
283 abort:
284 	kfree(conf->strip_zone);
285 	kfree(conf->devlist);
286 	kfree(conf);
287 	*private_conf = ERR_PTR(err);
288 	return err;
289 }
290 
291 /* Find the zone which holds a particular offset
292  * Update *sectorp to be an offset in that zone
293  */
294 static struct strip_zone *find_zone(struct r0conf *conf,
295 				    sector_t *sectorp)
296 {
297 	int i;
298 	struct strip_zone *z = conf->strip_zone;
299 	sector_t sector = *sectorp;
300 
301 	for (i = 0; i < conf->nr_strip_zones; i++)
302 		if (sector < z[i].zone_end) {
303 			if (i)
304 				*sectorp = sector - z[i-1].zone_end;
305 			return z + i;
306 		}
307 	BUG();
308 }
309 
310 /*
311  * remaps the bio to the target device. we separate two flows.
312  * power 2 flow and a general flow for the sake of performance
313 */
314 static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
315 				sector_t sector, sector_t *sector_offset)
316 {
317 	unsigned int sect_in_chunk;
318 	sector_t chunk;
319 	struct r0conf *conf = mddev->private;
320 	int raid_disks = conf->strip_zone[0].nb_dev;
321 	unsigned int chunk_sects = mddev->chunk_sectors;
322 
323 	if (is_power_of_2(chunk_sects)) {
324 		int chunksect_bits = ffz(~chunk_sects);
325 		/* find the sector offset inside the chunk */
326 		sect_in_chunk  = sector & (chunk_sects - 1);
327 		sector >>= chunksect_bits;
328 		/* chunk in zone */
329 		chunk = *sector_offset;
330 		/* quotient is the chunk in real device*/
331 		sector_div(chunk, zone->nb_dev << chunksect_bits);
332 	} else{
333 		sect_in_chunk = sector_div(sector, chunk_sects);
334 		chunk = *sector_offset;
335 		sector_div(chunk, chunk_sects * zone->nb_dev);
336 	}
337 	/*
338 	*  position the bio over the real device
339 	*  real sector = chunk in device + starting of zone
340 	*	+ the position in the chunk
341 	*/
342 	*sector_offset = (chunk * chunk_sects) + sect_in_chunk;
343 	return conf->devlist[(zone - conf->strip_zone)*raid_disks
344 			     + sector_div(sector, zone->nb_dev)];
345 }
346 
347 static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
348 {
349 	sector_t array_sectors = 0;
350 	struct md_rdev *rdev;
351 
352 	WARN_ONCE(sectors || raid_disks,
353 		  "%s does not support generic reshape\n", __func__);
354 
355 	rdev_for_each(rdev, mddev)
356 		array_sectors += (rdev->sectors &
357 				  ~(sector_t)(mddev->chunk_sectors-1));
358 
359 	return array_sectors;
360 }
361 
362 static void raid0_free(struct mddev *mddev, void *priv);
363 
364 static int raid0_run(struct mddev *mddev)
365 {
366 	struct r0conf *conf;
367 	int ret;
368 
369 	if (mddev->chunk_sectors == 0) {
370 		pr_warn("md/raid0:%s: chunk size must be set.\n", mdname(mddev));
371 		return -EINVAL;
372 	}
373 	if (md_check_no_bitmap(mddev))
374 		return -EINVAL;
375 
376 	/* if private is not null, we are here after takeover */
377 	if (mddev->private == NULL) {
378 		ret = create_strip_zones(mddev, &conf);
379 		if (ret < 0)
380 			return ret;
381 		mddev->private = conf;
382 	}
383 	conf = mddev->private;
384 	if (mddev->queue) {
385 		struct md_rdev *rdev;
386 		bool discard_supported = false;
387 
388 		blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
389 		blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
390 		blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors);
391 		blk_queue_max_discard_sectors(mddev->queue, UINT_MAX);
392 
393 		blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
394 		blk_queue_io_opt(mddev->queue,
395 				 (mddev->chunk_sectors << 9) * mddev->raid_disks);
396 
397 		rdev_for_each(rdev, mddev) {
398 			disk_stack_limits(mddev->gendisk, rdev->bdev,
399 					  rdev->data_offset << 9);
400 			if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
401 				discard_supported = true;
402 		}
403 		if (!discard_supported)
404 			blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue);
405 		else
406 			blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
407 	}
408 
409 	/* calculate array device size */
410 	md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
411 
412 	pr_debug("md/raid0:%s: md_size is %llu sectors.\n",
413 		 mdname(mddev),
414 		 (unsigned long long)mddev->array_sectors);
415 
416 	if (mddev->queue) {
417 		/* calculate the max read-ahead size.
418 		 * For read-ahead of large files to be effective, we need to
419 		 * readahead at least twice a whole stripe. i.e. number of devices
420 		 * multiplied by chunk size times 2.
421 		 * If an individual device has an ra_pages greater than the
422 		 * chunk size, then we will not drive that device as hard as it
423 		 * wants.  We consider this a configuration error: a larger
424 		 * chunksize should be used in that case.
425 		 */
426 		int stripe = mddev->raid_disks *
427 			(mddev->chunk_sectors << 9) / PAGE_SIZE;
428 		if (mddev->queue->backing_dev_info->ra_pages < 2* stripe)
429 			mddev->queue->backing_dev_info->ra_pages = 2* stripe;
430 	}
431 
432 	dump_zones(mddev);
433 
434 	ret = md_integrity_register(mddev);
435 
436 	return ret;
437 }
438 
439 static void raid0_free(struct mddev *mddev, void *priv)
440 {
441 	struct r0conf *conf = priv;
442 
443 	kfree(conf->strip_zone);
444 	kfree(conf->devlist);
445 	kfree(conf);
446 }
447 
448 /*
449  * Is io distribute over 1 or more chunks ?
450 */
451 static inline int is_io_in_chunk_boundary(struct mddev *mddev,
452 			unsigned int chunk_sects, struct bio *bio)
453 {
454 	if (likely(is_power_of_2(chunk_sects))) {
455 		return chunk_sects >=
456 			((bio->bi_iter.bi_sector & (chunk_sects-1))
457 					+ bio_sectors(bio));
458 	} else{
459 		sector_t sector = bio->bi_iter.bi_sector;
460 		return chunk_sects >= (sector_div(sector, chunk_sects)
461 						+ bio_sectors(bio));
462 	}
463 }
464 
465 static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
466 {
467 	struct r0conf *conf = mddev->private;
468 	struct strip_zone *zone;
469 	sector_t start = bio->bi_iter.bi_sector;
470 	sector_t end;
471 	unsigned int stripe_size;
472 	sector_t first_stripe_index, last_stripe_index;
473 	sector_t start_disk_offset;
474 	unsigned int start_disk_index;
475 	sector_t end_disk_offset;
476 	unsigned int end_disk_index;
477 	unsigned int disk;
478 
479 	zone = find_zone(conf, &start);
480 
481 	if (bio_end_sector(bio) > zone->zone_end) {
482 		struct bio *split = bio_split(bio,
483 			zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO,
484 			&mddev->bio_set);
485 		bio_chain(split, bio);
486 		generic_make_request(bio);
487 		bio = split;
488 		end = zone->zone_end;
489 	} else
490 		end = bio_end_sector(bio);
491 
492 	if (zone != conf->strip_zone)
493 		end = end - zone[-1].zone_end;
494 
495 	/* Now start and end is the offset in zone */
496 	stripe_size = zone->nb_dev * mddev->chunk_sectors;
497 
498 	first_stripe_index = start;
499 	sector_div(first_stripe_index, stripe_size);
500 	last_stripe_index = end;
501 	sector_div(last_stripe_index, stripe_size);
502 
503 	start_disk_index = (int)(start - first_stripe_index * stripe_size) /
504 		mddev->chunk_sectors;
505 	start_disk_offset = ((int)(start - first_stripe_index * stripe_size) %
506 		mddev->chunk_sectors) +
507 		first_stripe_index * mddev->chunk_sectors;
508 	end_disk_index = (int)(end - last_stripe_index * stripe_size) /
509 		mddev->chunk_sectors;
510 	end_disk_offset = ((int)(end - last_stripe_index * stripe_size) %
511 		mddev->chunk_sectors) +
512 		last_stripe_index * mddev->chunk_sectors;
513 
514 	for (disk = 0; disk < zone->nb_dev; disk++) {
515 		sector_t dev_start, dev_end;
516 		struct bio *discard_bio = NULL;
517 		struct md_rdev *rdev;
518 
519 		if (disk < start_disk_index)
520 			dev_start = (first_stripe_index + 1) *
521 				mddev->chunk_sectors;
522 		else if (disk > start_disk_index)
523 			dev_start = first_stripe_index * mddev->chunk_sectors;
524 		else
525 			dev_start = start_disk_offset;
526 
527 		if (disk < end_disk_index)
528 			dev_end = (last_stripe_index + 1) * mddev->chunk_sectors;
529 		else if (disk > end_disk_index)
530 			dev_end = last_stripe_index * mddev->chunk_sectors;
531 		else
532 			dev_end = end_disk_offset;
533 
534 		if (dev_end <= dev_start)
535 			continue;
536 
537 		rdev = conf->devlist[(zone - conf->strip_zone) *
538 			conf->strip_zone[0].nb_dev + disk];
539 		if (__blkdev_issue_discard(rdev->bdev,
540 			dev_start + zone->dev_start + rdev->data_offset,
541 			dev_end - dev_start, GFP_NOIO, 0, &discard_bio) ||
542 		    !discard_bio)
543 			continue;
544 		bio_chain(discard_bio, bio);
545 		bio_clone_blkcg_association(discard_bio, bio);
546 		if (mddev->gendisk)
547 			trace_block_bio_remap(bdev_get_queue(rdev->bdev),
548 				discard_bio, disk_devt(mddev->gendisk),
549 				bio->bi_iter.bi_sector);
550 		generic_make_request(discard_bio);
551 	}
552 	bio_endio(bio);
553 }
554 
555 static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
556 {
557 	struct strip_zone *zone;
558 	struct md_rdev *tmp_dev;
559 	sector_t bio_sector;
560 	sector_t sector;
561 	unsigned chunk_sects;
562 	unsigned sectors;
563 
564 	if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
565 		md_flush_request(mddev, bio);
566 		return true;
567 	}
568 
569 	if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) {
570 		raid0_handle_discard(mddev, bio);
571 		return true;
572 	}
573 
574 	bio_sector = bio->bi_iter.bi_sector;
575 	sector = bio_sector;
576 	chunk_sects = mddev->chunk_sectors;
577 
578 	sectors = chunk_sects -
579 		(likely(is_power_of_2(chunk_sects))
580 		 ? (sector & (chunk_sects-1))
581 		 : sector_div(sector, chunk_sects));
582 
583 	/* Restore due to sector_div */
584 	sector = bio_sector;
585 
586 	if (sectors < bio_sectors(bio)) {
587 		struct bio *split = bio_split(bio, sectors, GFP_NOIO,
588 					      &mddev->bio_set);
589 		bio_chain(split, bio);
590 		generic_make_request(bio);
591 		bio = split;
592 	}
593 
594 	zone = find_zone(mddev->private, &sector);
595 	tmp_dev = map_sector(mddev, zone, sector, &sector);
596 	bio_set_dev(bio, tmp_dev->bdev);
597 	bio->bi_iter.bi_sector = sector + zone->dev_start +
598 		tmp_dev->data_offset;
599 
600 	if (mddev->gendisk)
601 		trace_block_bio_remap(bio->bi_disk->queue, bio,
602 				disk_devt(mddev->gendisk), bio_sector);
603 	mddev_check_writesame(mddev, bio);
604 	mddev_check_write_zeroes(mddev, bio);
605 	generic_make_request(bio);
606 	return true;
607 }
608 
609 static void raid0_status(struct seq_file *seq, struct mddev *mddev)
610 {
611 	seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
612 	return;
613 }
614 
615 static void *raid0_takeover_raid45(struct mddev *mddev)
616 {
617 	struct md_rdev *rdev;
618 	struct r0conf *priv_conf;
619 
620 	if (mddev->degraded != 1) {
621 		pr_warn("md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
622 			mdname(mddev),
623 			mddev->degraded);
624 		return ERR_PTR(-EINVAL);
625 	}
626 
627 	rdev_for_each(rdev, mddev) {
628 		/* check slot number for a disk */
629 		if (rdev->raid_disk == mddev->raid_disks-1) {
630 			pr_warn("md/raid0:%s: raid5 must have missing parity disk!\n",
631 				mdname(mddev));
632 			return ERR_PTR(-EINVAL);
633 		}
634 		rdev->sectors = mddev->dev_sectors;
635 	}
636 
637 	/* Set new parameters */
638 	mddev->new_level = 0;
639 	mddev->new_layout = 0;
640 	mddev->new_chunk_sectors = mddev->chunk_sectors;
641 	mddev->raid_disks--;
642 	mddev->delta_disks = -1;
643 	/* make sure it will be not marked as dirty */
644 	mddev->recovery_cp = MaxSector;
645 	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
646 
647 	create_strip_zones(mddev, &priv_conf);
648 
649 	return priv_conf;
650 }
651 
652 static void *raid0_takeover_raid10(struct mddev *mddev)
653 {
654 	struct r0conf *priv_conf;
655 
656 	/* Check layout:
657 	 *  - far_copies must be 1
658 	 *  - near_copies must be 2
659 	 *  - disks number must be even
660 	 *  - all mirrors must be already degraded
661 	 */
662 	if (mddev->layout != ((1 << 8) + 2)) {
663 		pr_warn("md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
664 			mdname(mddev),
665 			mddev->layout);
666 		return ERR_PTR(-EINVAL);
667 	}
668 	if (mddev->raid_disks & 1) {
669 		pr_warn("md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
670 			mdname(mddev));
671 		return ERR_PTR(-EINVAL);
672 	}
673 	if (mddev->degraded != (mddev->raid_disks>>1)) {
674 		pr_warn("md/raid0:%s: All mirrors must be already degraded!\n",
675 			mdname(mddev));
676 		return ERR_PTR(-EINVAL);
677 	}
678 
679 	/* Set new parameters */
680 	mddev->new_level = 0;
681 	mddev->new_layout = 0;
682 	mddev->new_chunk_sectors = mddev->chunk_sectors;
683 	mddev->delta_disks = - mddev->raid_disks / 2;
684 	mddev->raid_disks += mddev->delta_disks;
685 	mddev->degraded = 0;
686 	/* make sure it will be not marked as dirty */
687 	mddev->recovery_cp = MaxSector;
688 	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
689 
690 	create_strip_zones(mddev, &priv_conf);
691 	return priv_conf;
692 }
693 
694 static void *raid0_takeover_raid1(struct mddev *mddev)
695 {
696 	struct r0conf *priv_conf;
697 	int chunksect;
698 
699 	/* Check layout:
700 	 *  - (N - 1) mirror drives must be already faulty
701 	 */
702 	if ((mddev->raid_disks - 1) != mddev->degraded) {
703 		pr_err("md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
704 		       mdname(mddev));
705 		return ERR_PTR(-EINVAL);
706 	}
707 
708 	/*
709 	 * a raid1 doesn't have the notion of chunk size, so
710 	 * figure out the largest suitable size we can use.
711 	 */
712 	chunksect = 64 * 2; /* 64K by default */
713 
714 	/* The array must be an exact multiple of chunksize */
715 	while (chunksect && (mddev->array_sectors & (chunksect - 1)))
716 		chunksect >>= 1;
717 
718 	if ((chunksect << 9) < PAGE_SIZE)
719 		/* array size does not allow a suitable chunk size */
720 		return ERR_PTR(-EINVAL);
721 
722 	/* Set new parameters */
723 	mddev->new_level = 0;
724 	mddev->new_layout = 0;
725 	mddev->new_chunk_sectors = chunksect;
726 	mddev->chunk_sectors = chunksect;
727 	mddev->delta_disks = 1 - mddev->raid_disks;
728 	mddev->raid_disks = 1;
729 	/* make sure it will be not marked as dirty */
730 	mddev->recovery_cp = MaxSector;
731 	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
732 
733 	create_strip_zones(mddev, &priv_conf);
734 	return priv_conf;
735 }
736 
737 static void *raid0_takeover(struct mddev *mddev)
738 {
739 	/* raid0 can take over:
740 	 *  raid4 - if all data disks are active.
741 	 *  raid5 - providing it is Raid4 layout and one disk is faulty
742 	 *  raid10 - assuming we have all necessary active disks
743 	 *  raid1 - with (N -1) mirror drives faulty
744 	 */
745 
746 	if (mddev->bitmap) {
747 		pr_warn("md/raid0: %s: cannot takeover array with bitmap\n",
748 			mdname(mddev));
749 		return ERR_PTR(-EBUSY);
750 	}
751 	if (mddev->level == 4)
752 		return raid0_takeover_raid45(mddev);
753 
754 	if (mddev->level == 5) {
755 		if (mddev->layout == ALGORITHM_PARITY_N)
756 			return raid0_takeover_raid45(mddev);
757 
758 		pr_warn("md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
759 			mdname(mddev), ALGORITHM_PARITY_N);
760 	}
761 
762 	if (mddev->level == 10)
763 		return raid0_takeover_raid10(mddev);
764 
765 	if (mddev->level == 1)
766 		return raid0_takeover_raid1(mddev);
767 
768 	pr_warn("Takeover from raid%i to raid0 not supported\n",
769 		mddev->level);
770 
771 	return ERR_PTR(-EINVAL);
772 }
773 
774 static void raid0_quiesce(struct mddev *mddev, int quiesce)
775 {
776 }
777 
778 static struct md_personality raid0_personality=
779 {
780 	.name		= "raid0",
781 	.level		= 0,
782 	.owner		= THIS_MODULE,
783 	.make_request	= raid0_make_request,
784 	.run		= raid0_run,
785 	.free		= raid0_free,
786 	.status		= raid0_status,
787 	.size		= raid0_size,
788 	.takeover	= raid0_takeover,
789 	.quiesce	= raid0_quiesce,
790 	.congested	= raid0_congested,
791 };
792 
793 static int __init raid0_init (void)
794 {
795 	return register_md_personality (&raid0_personality);
796 }
797 
798 static void raid0_exit (void)
799 {
800 	unregister_md_personality (&raid0_personality);
801 }
802 
803 module_init(raid0_init);
804 module_exit(raid0_exit);
805 MODULE_LICENSE("GPL");
806 MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
807 MODULE_ALIAS("md-personality-2"); /* RAID0 */
808 MODULE_ALIAS("md-raid0");
809 MODULE_ALIAS("md-level-0");
810