xref: /openbmc/linux/drivers/md/raid0.c (revision 54cbac81)
1 /*
2    raid0.c : Multiple Devices driver for Linux
3              Copyright (C) 1994-96 Marc ZYNGIER
4 	     <zyngier@ufr-info-p7.ibp.fr> or
5 	     <maz@gloups.fdn.fr>
6              Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
7 
8 
9    RAID-0 management functions.
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License as published by
13    the Free Software Foundation; either version 2, or (at your option)
14    any later version.
15 
16    You should have received a copy of the GNU General Public License
17    (for example /usr/src/linux/COPYING); if not, write to the Free
18    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20 
21 #include <linux/blkdev.h>
22 #include <linux/seq_file.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include "md.h"
26 #include "raid0.h"
27 #include "raid5.h"
28 
29 static int raid0_congested(void *data, int bits)
30 {
31 	struct mddev *mddev = data;
32 	struct r0conf *conf = mddev->private;
33 	struct md_rdev **devlist = conf->devlist;
34 	int raid_disks = conf->strip_zone[0].nb_dev;
35 	int i, ret = 0;
36 
37 	if (mddev_congested(mddev, bits))
38 		return 1;
39 
40 	for (i = 0; i < raid_disks && !ret ; i++) {
41 		struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
42 
43 		ret |= bdi_congested(&q->backing_dev_info, bits);
44 	}
45 	return ret;
46 }
47 
48 /*
49  * inform the user of the raid configuration
50 */
51 static void dump_zones(struct mddev *mddev)
52 {
53 	int j, k;
54 	sector_t zone_size = 0;
55 	sector_t zone_start = 0;
56 	char b[BDEVNAME_SIZE];
57 	struct r0conf *conf = mddev->private;
58 	int raid_disks = conf->strip_zone[0].nb_dev;
59 	printk(KERN_INFO "md: RAID0 configuration for %s - %d zone%s\n",
60 	       mdname(mddev),
61 	       conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
62 	for (j = 0; j < conf->nr_strip_zones; j++) {
63 		printk(KERN_INFO "md: zone%d=[", j);
64 		for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
65 			printk(KERN_CONT "%s%s", k?"/":"",
66 			bdevname(conf->devlist[j*raid_disks
67 						+ k]->bdev, b));
68 		printk(KERN_CONT "]\n");
69 
70 		zone_size  = conf->strip_zone[j].zone_end - zone_start;
71 		printk(KERN_INFO "      zone-offset=%10lluKB, "
72 				"device-offset=%10lluKB, size=%10lluKB\n",
73 			(unsigned long long)zone_start>>1,
74 			(unsigned long long)conf->strip_zone[j].dev_start>>1,
75 			(unsigned long long)zone_size>>1);
76 		zone_start = conf->strip_zone[j].zone_end;
77 	}
78 	printk(KERN_INFO "\n");
79 }
80 
81 static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
82 {
83 	int i, c, err;
84 	sector_t curr_zone_end, sectors;
85 	struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
86 	struct strip_zone *zone;
87 	int cnt;
88 	char b[BDEVNAME_SIZE];
89 	char b2[BDEVNAME_SIZE];
90 	struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
91 	bool discard_supported = false;
92 
93 	if (!conf)
94 		return -ENOMEM;
95 	rdev_for_each(rdev1, mddev) {
96 		pr_debug("md/raid0:%s: looking at %s\n",
97 			 mdname(mddev),
98 			 bdevname(rdev1->bdev, b));
99 		c = 0;
100 
101 		/* round size to chunk_size */
102 		sectors = rdev1->sectors;
103 		sector_div(sectors, mddev->chunk_sectors);
104 		rdev1->sectors = sectors * mddev->chunk_sectors;
105 
106 		rdev_for_each(rdev2, mddev) {
107 			pr_debug("md/raid0:%s:   comparing %s(%llu)"
108 				 " with %s(%llu)\n",
109 				 mdname(mddev),
110 				 bdevname(rdev1->bdev,b),
111 				 (unsigned long long)rdev1->sectors,
112 				 bdevname(rdev2->bdev,b2),
113 				 (unsigned long long)rdev2->sectors);
114 			if (rdev2 == rdev1) {
115 				pr_debug("md/raid0:%s:   END\n",
116 					 mdname(mddev));
117 				break;
118 			}
119 			if (rdev2->sectors == rdev1->sectors) {
120 				/*
121 				 * Not unique, don't count it as a new
122 				 * group
123 				 */
124 				pr_debug("md/raid0:%s:   EQUAL\n",
125 					 mdname(mddev));
126 				c = 1;
127 				break;
128 			}
129 			pr_debug("md/raid0:%s:   NOT EQUAL\n",
130 				 mdname(mddev));
131 		}
132 		if (!c) {
133 			pr_debug("md/raid0:%s:   ==> UNIQUE\n",
134 				 mdname(mddev));
135 			conf->nr_strip_zones++;
136 			pr_debug("md/raid0:%s: %d zones\n",
137 				 mdname(mddev), conf->nr_strip_zones);
138 		}
139 	}
140 	pr_debug("md/raid0:%s: FINAL %d zones\n",
141 		 mdname(mddev), conf->nr_strip_zones);
142 	err = -ENOMEM;
143 	conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
144 				conf->nr_strip_zones, GFP_KERNEL);
145 	if (!conf->strip_zone)
146 		goto abort;
147 	conf->devlist = kzalloc(sizeof(struct md_rdev*)*
148 				conf->nr_strip_zones*mddev->raid_disks,
149 				GFP_KERNEL);
150 	if (!conf->devlist)
151 		goto abort;
152 
153 	/* The first zone must contain all devices, so here we check that
154 	 * there is a proper alignment of slots to devices and find them all
155 	 */
156 	zone = &conf->strip_zone[0];
157 	cnt = 0;
158 	smallest = NULL;
159 	dev = conf->devlist;
160 	err = -EINVAL;
161 	rdev_for_each(rdev1, mddev) {
162 		int j = rdev1->raid_disk;
163 
164 		if (mddev->level == 10) {
165 			/* taking over a raid10-n2 array */
166 			j /= 2;
167 			rdev1->new_raid_disk = j;
168 		}
169 
170 		if (mddev->level == 1) {
171 			/* taiking over a raid1 array-
172 			 * we have only one active disk
173 			 */
174 			j = 0;
175 			rdev1->new_raid_disk = j;
176 		}
177 
178 		if (j < 0 || j >= mddev->raid_disks) {
179 			printk(KERN_ERR "md/raid0:%s: bad disk number %d - "
180 			       "aborting!\n", mdname(mddev), j);
181 			goto abort;
182 		}
183 		if (dev[j]) {
184 			printk(KERN_ERR "md/raid0:%s: multiple devices for %d - "
185 			       "aborting!\n", mdname(mddev), j);
186 			goto abort;
187 		}
188 		dev[j] = rdev1;
189 
190 		disk_stack_limits(mddev->gendisk, rdev1->bdev,
191 				  rdev1->data_offset << 9);
192 
193 		if (rdev1->bdev->bd_disk->queue->merge_bvec_fn)
194 			conf->has_merge_bvec = 1;
195 
196 		if (!smallest || (rdev1->sectors < smallest->sectors))
197 			smallest = rdev1;
198 		cnt++;
199 
200 		if (blk_queue_discard(bdev_get_queue(rdev1->bdev)))
201 			discard_supported = true;
202 	}
203 	if (cnt != mddev->raid_disks) {
204 		printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - "
205 		       "aborting!\n", mdname(mddev), cnt, mddev->raid_disks);
206 		goto abort;
207 	}
208 	zone->nb_dev = cnt;
209 	zone->zone_end = smallest->sectors * cnt;
210 
211 	curr_zone_end = zone->zone_end;
212 
213 	/* now do the other zones */
214 	for (i = 1; i < conf->nr_strip_zones; i++)
215 	{
216 		int j;
217 
218 		zone = conf->strip_zone + i;
219 		dev = conf->devlist + i * mddev->raid_disks;
220 
221 		pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i);
222 		zone->dev_start = smallest->sectors;
223 		smallest = NULL;
224 		c = 0;
225 
226 		for (j=0; j<cnt; j++) {
227 			rdev = conf->devlist[j];
228 			if (rdev->sectors <= zone->dev_start) {
229 				pr_debug("md/raid0:%s: checking %s ... nope\n",
230 					 mdname(mddev),
231 					 bdevname(rdev->bdev, b));
232 				continue;
233 			}
234 			pr_debug("md/raid0:%s: checking %s ..."
235 				 " contained as device %d\n",
236 				 mdname(mddev),
237 				 bdevname(rdev->bdev, b), c);
238 			dev[c] = rdev;
239 			c++;
240 			if (!smallest || rdev->sectors < smallest->sectors) {
241 				smallest = rdev;
242 				pr_debug("md/raid0:%s:  (%llu) is smallest!.\n",
243 					 mdname(mddev),
244 					 (unsigned long long)rdev->sectors);
245 			}
246 		}
247 
248 		zone->nb_dev = c;
249 		sectors = (smallest->sectors - zone->dev_start) * c;
250 		pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
251 			 mdname(mddev),
252 			 zone->nb_dev, (unsigned long long)sectors);
253 
254 		curr_zone_end += sectors;
255 		zone->zone_end = curr_zone_end;
256 
257 		pr_debug("md/raid0:%s: current zone start: %llu\n",
258 			 mdname(mddev),
259 			 (unsigned long long)smallest->sectors);
260 	}
261 	mddev->queue->backing_dev_info.congested_fn = raid0_congested;
262 	mddev->queue->backing_dev_info.congested_data = mddev;
263 
264 	/*
265 	 * now since we have the hard sector sizes, we can make sure
266 	 * chunk size is a multiple of that sector size
267 	 */
268 	if ((mddev->chunk_sectors << 9) % queue_logical_block_size(mddev->queue)) {
269 		printk(KERN_ERR "md/raid0:%s: chunk_size of %d not valid\n",
270 		       mdname(mddev),
271 		       mddev->chunk_sectors << 9);
272 		goto abort;
273 	}
274 
275 	blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
276 	blk_queue_io_opt(mddev->queue,
277 			 (mddev->chunk_sectors << 9) * mddev->raid_disks);
278 
279 	if (!discard_supported)
280 		queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
281 	else
282 		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
283 
284 	pr_debug("md/raid0:%s: done.\n", mdname(mddev));
285 	*private_conf = conf;
286 
287 	return 0;
288 abort:
289 	kfree(conf->strip_zone);
290 	kfree(conf->devlist);
291 	kfree(conf);
292 	*private_conf = NULL;
293 	return err;
294 }
295 
296 /* Find the zone which holds a particular offset
297  * Update *sectorp to be an offset in that zone
298  */
299 static struct strip_zone *find_zone(struct r0conf *conf,
300 				    sector_t *sectorp)
301 {
302 	int i;
303 	struct strip_zone *z = conf->strip_zone;
304 	sector_t sector = *sectorp;
305 
306 	for (i = 0; i < conf->nr_strip_zones; i++)
307 		if (sector < z[i].zone_end) {
308 			if (i)
309 				*sectorp = sector - z[i-1].zone_end;
310 			return z + i;
311 		}
312 	BUG();
313 }
314 
315 /*
316  * remaps the bio to the target device. we separate two flows.
317  * power 2 flow and a general flow for the sake of perfromance
318 */
319 static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
320 				sector_t sector, sector_t *sector_offset)
321 {
322 	unsigned int sect_in_chunk;
323 	sector_t chunk;
324 	struct r0conf *conf = mddev->private;
325 	int raid_disks = conf->strip_zone[0].nb_dev;
326 	unsigned int chunk_sects = mddev->chunk_sectors;
327 
328 	if (is_power_of_2(chunk_sects)) {
329 		int chunksect_bits = ffz(~chunk_sects);
330 		/* find the sector offset inside the chunk */
331 		sect_in_chunk  = sector & (chunk_sects - 1);
332 		sector >>= chunksect_bits;
333 		/* chunk in zone */
334 		chunk = *sector_offset;
335 		/* quotient is the chunk in real device*/
336 		sector_div(chunk, zone->nb_dev << chunksect_bits);
337 	} else{
338 		sect_in_chunk = sector_div(sector, chunk_sects);
339 		chunk = *sector_offset;
340 		sector_div(chunk, chunk_sects * zone->nb_dev);
341 	}
342 	/*
343 	*  position the bio over the real device
344 	*  real sector = chunk in device + starting of zone
345 	*	+ the position in the chunk
346 	*/
347 	*sector_offset = (chunk * chunk_sects) + sect_in_chunk;
348 	return conf->devlist[(zone - conf->strip_zone)*raid_disks
349 			     + sector_div(sector, zone->nb_dev)];
350 }
351 
352 /**
353  *	raid0_mergeable_bvec -- tell bio layer if two requests can be merged
354  *	@q: request queue
355  *	@bvm: properties of new bio
356  *	@biovec: the request that could be merged to it.
357  *
358  *	Return amount of bytes we can accept at this offset
359  */
360 static int raid0_mergeable_bvec(struct request_queue *q,
361 				struct bvec_merge_data *bvm,
362 				struct bio_vec *biovec)
363 {
364 	struct mddev *mddev = q->queuedata;
365 	struct r0conf *conf = mddev->private;
366 	sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
367 	sector_t sector_offset = sector;
368 	int max;
369 	unsigned int chunk_sectors = mddev->chunk_sectors;
370 	unsigned int bio_sectors = bvm->bi_size >> 9;
371 	struct strip_zone *zone;
372 	struct md_rdev *rdev;
373 	struct request_queue *subq;
374 
375 	if (is_power_of_2(chunk_sectors))
376 		max =  (chunk_sectors - ((sector & (chunk_sectors-1))
377 						+ bio_sectors)) << 9;
378 	else
379 		max =  (chunk_sectors - (sector_div(sector, chunk_sectors)
380 						+ bio_sectors)) << 9;
381 	if (max < 0)
382 		max = 0; /* bio_add cannot handle a negative return */
383 	if (max <= biovec->bv_len && bio_sectors == 0)
384 		return biovec->bv_len;
385 	if (max < biovec->bv_len)
386 		/* too small already, no need to check further */
387 		return max;
388 	if (!conf->has_merge_bvec)
389 		return max;
390 
391 	/* May need to check subordinate device */
392 	sector = sector_offset;
393 	zone = find_zone(mddev->private, &sector_offset);
394 	rdev = map_sector(mddev, zone, sector, &sector_offset);
395 	subq = bdev_get_queue(rdev->bdev);
396 	if (subq->merge_bvec_fn) {
397 		bvm->bi_bdev = rdev->bdev;
398 		bvm->bi_sector = sector_offset + zone->dev_start +
399 			rdev->data_offset;
400 		return min(max, subq->merge_bvec_fn(subq, bvm, biovec));
401 	} else
402 		return max;
403 }
404 
405 static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
406 {
407 	sector_t array_sectors = 0;
408 	struct md_rdev *rdev;
409 
410 	WARN_ONCE(sectors || raid_disks,
411 		  "%s does not support generic reshape\n", __func__);
412 
413 	rdev_for_each(rdev, mddev)
414 		array_sectors += rdev->sectors;
415 
416 	return array_sectors;
417 }
418 
419 static int raid0_stop(struct mddev *mddev);
420 
421 static int raid0_run(struct mddev *mddev)
422 {
423 	struct r0conf *conf;
424 	int ret;
425 
426 	if (mddev->chunk_sectors == 0) {
427 		printk(KERN_ERR "md/raid0:%s: chunk size must be set.\n",
428 		       mdname(mddev));
429 		return -EINVAL;
430 	}
431 	if (md_check_no_bitmap(mddev))
432 		return -EINVAL;
433 	blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
434 	blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
435 	blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
436 
437 	/* if private is not null, we are here after takeover */
438 	if (mddev->private == NULL) {
439 		ret = create_strip_zones(mddev, &conf);
440 		if (ret < 0)
441 			return ret;
442 		mddev->private = conf;
443 	}
444 	conf = mddev->private;
445 
446 	/* calculate array device size */
447 	md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
448 
449 	printk(KERN_INFO "md/raid0:%s: md_size is %llu sectors.\n",
450 	       mdname(mddev),
451 	       (unsigned long long)mddev->array_sectors);
452 	/* calculate the max read-ahead size.
453 	 * For read-ahead of large files to be effective, we need to
454 	 * readahead at least twice a whole stripe. i.e. number of devices
455 	 * multiplied by chunk size times 2.
456 	 * If an individual device has an ra_pages greater than the
457 	 * chunk size, then we will not drive that device as hard as it
458 	 * wants.  We consider this a configuration error: a larger
459 	 * chunksize should be used in that case.
460 	 */
461 	{
462 		int stripe = mddev->raid_disks *
463 			(mddev->chunk_sectors << 9) / PAGE_SIZE;
464 		if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
465 			mddev->queue->backing_dev_info.ra_pages = 2* stripe;
466 	}
467 
468 	blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec);
469 	dump_zones(mddev);
470 
471 	ret = md_integrity_register(mddev);
472 	if (ret)
473 		raid0_stop(mddev);
474 
475 	return ret;
476 }
477 
478 static int raid0_stop(struct mddev *mddev)
479 {
480 	struct r0conf *conf = mddev->private;
481 
482 	blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
483 	kfree(conf->strip_zone);
484 	kfree(conf->devlist);
485 	kfree(conf);
486 	mddev->private = NULL;
487 	return 0;
488 }
489 
490 /*
491  * Is io distribute over 1 or more chunks ?
492 */
493 static inline int is_io_in_chunk_boundary(struct mddev *mddev,
494 			unsigned int chunk_sects, struct bio *bio)
495 {
496 	if (likely(is_power_of_2(chunk_sects))) {
497 		return chunk_sects >= ((bio->bi_sector & (chunk_sects-1))
498 					+ (bio->bi_size >> 9));
499 	} else{
500 		sector_t sector = bio->bi_sector;
501 		return chunk_sects >= (sector_div(sector, chunk_sects)
502 						+ (bio->bi_size >> 9));
503 	}
504 }
505 
506 static void raid0_make_request(struct mddev *mddev, struct bio *bio)
507 {
508 	unsigned int chunk_sects;
509 	sector_t sector_offset;
510 	struct strip_zone *zone;
511 	struct md_rdev *tmp_dev;
512 
513 	if (unlikely(bio->bi_rw & REQ_FLUSH)) {
514 		md_flush_request(mddev, bio);
515 		return;
516 	}
517 
518 	chunk_sects = mddev->chunk_sectors;
519 	if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) {
520 		sector_t sector = bio->bi_sector;
521 		struct bio_pair *bp;
522 		/* Sanity check -- queue functions should prevent this happening */
523 		if ((bio->bi_vcnt != 1 && bio->bi_vcnt != 0) ||
524 		    bio->bi_idx != 0)
525 			goto bad_map;
526 		/* This is a one page bio that upper layers
527 		 * refuse to split for us, so we need to split it.
528 		 */
529 		if (likely(is_power_of_2(chunk_sects)))
530 			bp = bio_split(bio, chunk_sects - (sector &
531 							   (chunk_sects-1)));
532 		else
533 			bp = bio_split(bio, chunk_sects -
534 				       sector_div(sector, chunk_sects));
535 		raid0_make_request(mddev, &bp->bio1);
536 		raid0_make_request(mddev, &bp->bio2);
537 		bio_pair_release(bp);
538 		return;
539 	}
540 
541 	sector_offset = bio->bi_sector;
542 	zone = find_zone(mddev->private, &sector_offset);
543 	tmp_dev = map_sector(mddev, zone, bio->bi_sector,
544 			     &sector_offset);
545 	bio->bi_bdev = tmp_dev->bdev;
546 	bio->bi_sector = sector_offset + zone->dev_start +
547 		tmp_dev->data_offset;
548 
549 	if (unlikely((bio->bi_rw & REQ_DISCARD) &&
550 		     !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) {
551 		/* Just ignore it */
552 		bio_endio(bio, 0);
553 		return;
554 	}
555 
556 	generic_make_request(bio);
557 	return;
558 
559 bad_map:
560 	printk("md/raid0:%s: make_request bug: can't convert block across chunks"
561 	       " or bigger than %dk %llu %d\n",
562 	       mdname(mddev), chunk_sects / 2,
563 	       (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
564 
565 	bio_io_error(bio);
566 	return;
567 }
568 
569 static void raid0_status(struct seq_file *seq, struct mddev *mddev)
570 {
571 	seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
572 	return;
573 }
574 
575 static void *raid0_takeover_raid45(struct mddev *mddev)
576 {
577 	struct md_rdev *rdev;
578 	struct r0conf *priv_conf;
579 
580 	if (mddev->degraded != 1) {
581 		printk(KERN_ERR "md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
582 		       mdname(mddev),
583 		       mddev->degraded);
584 		return ERR_PTR(-EINVAL);
585 	}
586 
587 	rdev_for_each(rdev, mddev) {
588 		/* check slot number for a disk */
589 		if (rdev->raid_disk == mddev->raid_disks-1) {
590 			printk(KERN_ERR "md/raid0:%s: raid5 must have missing parity disk!\n",
591 			       mdname(mddev));
592 			return ERR_PTR(-EINVAL);
593 		}
594 	}
595 
596 	/* Set new parameters */
597 	mddev->new_level = 0;
598 	mddev->new_layout = 0;
599 	mddev->new_chunk_sectors = mddev->chunk_sectors;
600 	mddev->raid_disks--;
601 	mddev->delta_disks = -1;
602 	/* make sure it will be not marked as dirty */
603 	mddev->recovery_cp = MaxSector;
604 
605 	create_strip_zones(mddev, &priv_conf);
606 	return priv_conf;
607 }
608 
609 static void *raid0_takeover_raid10(struct mddev *mddev)
610 {
611 	struct r0conf *priv_conf;
612 
613 	/* Check layout:
614 	 *  - far_copies must be 1
615 	 *  - near_copies must be 2
616 	 *  - disks number must be even
617 	 *  - all mirrors must be already degraded
618 	 */
619 	if (mddev->layout != ((1 << 8) + 2)) {
620 		printk(KERN_ERR "md/raid0:%s:: Raid0 cannot takover layout: 0x%x\n",
621 		       mdname(mddev),
622 		       mddev->layout);
623 		return ERR_PTR(-EINVAL);
624 	}
625 	if (mddev->raid_disks & 1) {
626 		printk(KERN_ERR "md/raid0:%s: Raid0 cannot takover Raid10 with odd disk number.\n",
627 		       mdname(mddev));
628 		return ERR_PTR(-EINVAL);
629 	}
630 	if (mddev->degraded != (mddev->raid_disks>>1)) {
631 		printk(KERN_ERR "md/raid0:%s: All mirrors must be already degraded!\n",
632 		       mdname(mddev));
633 		return ERR_PTR(-EINVAL);
634 	}
635 
636 	/* Set new parameters */
637 	mddev->new_level = 0;
638 	mddev->new_layout = 0;
639 	mddev->new_chunk_sectors = mddev->chunk_sectors;
640 	mddev->delta_disks = - mddev->raid_disks / 2;
641 	mddev->raid_disks += mddev->delta_disks;
642 	mddev->degraded = 0;
643 	/* make sure it will be not marked as dirty */
644 	mddev->recovery_cp = MaxSector;
645 
646 	create_strip_zones(mddev, &priv_conf);
647 	return priv_conf;
648 }
649 
650 static void *raid0_takeover_raid1(struct mddev *mddev)
651 {
652 	struct r0conf *priv_conf;
653 	int chunksect;
654 
655 	/* Check layout:
656 	 *  - (N - 1) mirror drives must be already faulty
657 	 */
658 	if ((mddev->raid_disks - 1) != mddev->degraded) {
659 		printk(KERN_ERR "md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
660 		       mdname(mddev));
661 		return ERR_PTR(-EINVAL);
662 	}
663 
664 	/*
665 	 * a raid1 doesn't have the notion of chunk size, so
666 	 * figure out the largest suitable size we can use.
667 	 */
668 	chunksect = 64 * 2; /* 64K by default */
669 
670 	/* The array must be an exact multiple of chunksize */
671 	while (chunksect && (mddev->array_sectors & (chunksect - 1)))
672 		chunksect >>= 1;
673 
674 	if ((chunksect << 9) < PAGE_SIZE)
675 		/* array size does not allow a suitable chunk size */
676 		return ERR_PTR(-EINVAL);
677 
678 	/* Set new parameters */
679 	mddev->new_level = 0;
680 	mddev->new_layout = 0;
681 	mddev->new_chunk_sectors = chunksect;
682 	mddev->chunk_sectors = chunksect;
683 	mddev->delta_disks = 1 - mddev->raid_disks;
684 	mddev->raid_disks = 1;
685 	/* make sure it will be not marked as dirty */
686 	mddev->recovery_cp = MaxSector;
687 
688 	create_strip_zones(mddev, &priv_conf);
689 	return priv_conf;
690 }
691 
692 static void *raid0_takeover(struct mddev *mddev)
693 {
694 	/* raid0 can take over:
695 	 *  raid4 - if all data disks are active.
696 	 *  raid5 - providing it is Raid4 layout and one disk is faulty
697 	 *  raid10 - assuming we have all necessary active disks
698 	 *  raid1 - with (N -1) mirror drives faulty
699 	 */
700 	if (mddev->level == 4)
701 		return raid0_takeover_raid45(mddev);
702 
703 	if (mddev->level == 5) {
704 		if (mddev->layout == ALGORITHM_PARITY_N)
705 			return raid0_takeover_raid45(mddev);
706 
707 		printk(KERN_ERR "md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
708 		       mdname(mddev), ALGORITHM_PARITY_N);
709 	}
710 
711 	if (mddev->level == 10)
712 		return raid0_takeover_raid10(mddev);
713 
714 	if (mddev->level == 1)
715 		return raid0_takeover_raid1(mddev);
716 
717 	printk(KERN_ERR "Takeover from raid%i to raid0 not supported\n",
718 		mddev->level);
719 
720 	return ERR_PTR(-EINVAL);
721 }
722 
723 static void raid0_quiesce(struct mddev *mddev, int state)
724 {
725 }
726 
727 static struct md_personality raid0_personality=
728 {
729 	.name		= "raid0",
730 	.level		= 0,
731 	.owner		= THIS_MODULE,
732 	.make_request	= raid0_make_request,
733 	.run		= raid0_run,
734 	.stop		= raid0_stop,
735 	.status		= raid0_status,
736 	.size		= raid0_size,
737 	.takeover	= raid0_takeover,
738 	.quiesce	= raid0_quiesce,
739 };
740 
741 static int __init raid0_init (void)
742 {
743 	return register_md_personality (&raid0_personality);
744 }
745 
746 static void raid0_exit (void)
747 {
748 	unregister_md_personality (&raid0_personality);
749 }
750 
751 module_init(raid0_init);
752 module_exit(raid0_exit);
753 MODULE_LICENSE("GPL");
754 MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
755 MODULE_ALIAS("md-personality-2"); /* RAID0 */
756 MODULE_ALIAS("md-raid0");
757 MODULE_ALIAS("md-level-0");
758