xref: /openbmc/linux/drivers/md/raid0.c (revision fea966f7)
1 /*
2    raid0.c : Multiple Devices driver for Linux
3              Copyright (C) 1994-96 Marc ZYNGIER
4 	     <zyngier@ufr-info-p7.ibp.fr> or
5 	     <maz@gloups.fdn.fr>
6              Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
7 
8 
9    RAID-0 management functions.
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License as published by
13    the Free Software Foundation; either version 2, or (at your option)
14    any later version.
15 
16    You should have received a copy of the GNU General Public License
17    (for example /usr/src/linux/COPYING); if not, write to the Free
18    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20 
21 #include <linux/blkdev.h>
22 #include <linux/seq_file.h>
23 #include "md.h"
24 #include "raid0.h"
25 
26 static void raid0_unplug(struct request_queue *q)
27 {
28 	mddev_t *mddev = q->queuedata;
29 	raid0_conf_t *conf = mddev->private;
30 	mdk_rdev_t **devlist = conf->devlist;
31 	int i;
32 
33 	for (i=0; i<mddev->raid_disks; i++) {
34 		struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);
35 
36 		blk_unplug(r_queue);
37 	}
38 }
39 
40 static int raid0_congested(void *data, int bits)
41 {
42 	mddev_t *mddev = data;
43 	raid0_conf_t *conf = mddev->private;
44 	mdk_rdev_t **devlist = conf->devlist;
45 	int i, ret = 0;
46 
47 	for (i = 0; i < mddev->raid_disks && !ret ; i++) {
48 		struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
49 
50 		ret |= bdi_congested(&q->backing_dev_info, bits);
51 	}
52 	return ret;
53 }
54 
55 /*
56  * inform the user of the raid configuration
57 */
58 static void dump_zones(mddev_t *mddev)
59 {
60 	int j, k, h;
61 	sector_t zone_size = 0;
62 	sector_t zone_start = 0;
63 	char b[BDEVNAME_SIZE];
64 	raid0_conf_t *conf = mddev->private;
65 	printk(KERN_INFO "******* %s configuration *********\n",
66 		mdname(mddev));
67 	h = 0;
68 	for (j = 0; j < conf->nr_strip_zones; j++) {
69 		printk(KERN_INFO "zone%d=[", j);
70 		for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
71 			printk("%s/",
72 			bdevname(conf->devlist[j*mddev->raid_disks
73 						+ k]->bdev, b));
74 		printk("]\n");
75 
76 		zone_size  = conf->strip_zone[j].zone_end - zone_start;
77 		printk(KERN_INFO "        zone offset=%llukb "
78 				"device offset=%llukb size=%llukb\n",
79 			(unsigned long long)zone_start>>1,
80 			(unsigned long long)conf->strip_zone[j].dev_start>>1,
81 			(unsigned long long)zone_size>>1);
82 		zone_start = conf->strip_zone[j].zone_end;
83 	}
84 	printk(KERN_INFO "**********************************\n\n");
85 }
86 
87 static int create_strip_zones(mddev_t *mddev)
88 {
89 	int i, c, j, err;
90 	sector_t curr_zone_end, sectors;
91 	mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev, **dev;
92 	struct strip_zone *zone;
93 	int cnt;
94 	char b[BDEVNAME_SIZE];
95 	raid0_conf_t *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
96 
97 	if (!conf)
98 		return -ENOMEM;
99 	list_for_each_entry(rdev1, &mddev->disks, same_set) {
100 		printk(KERN_INFO "raid0: looking at %s\n",
101 			bdevname(rdev1->bdev,b));
102 		c = 0;
103 
104 		/* round size to chunk_size */
105 		sectors = rdev1->sectors;
106 		sector_div(sectors, mddev->chunk_sectors);
107 		rdev1->sectors = sectors * mddev->chunk_sectors;
108 
109 		list_for_each_entry(rdev2, &mddev->disks, same_set) {
110 			printk(KERN_INFO "raid0:   comparing %s(%llu)",
111 			       bdevname(rdev1->bdev,b),
112 			       (unsigned long long)rdev1->sectors);
113 			printk(KERN_INFO " with %s(%llu)\n",
114 			       bdevname(rdev2->bdev,b),
115 			       (unsigned long long)rdev2->sectors);
116 			if (rdev2 == rdev1) {
117 				printk(KERN_INFO "raid0:   END\n");
118 				break;
119 			}
120 			if (rdev2->sectors == rdev1->sectors) {
121 				/*
122 				 * Not unique, don't count it as a new
123 				 * group
124 				 */
125 				printk(KERN_INFO "raid0:   EQUAL\n");
126 				c = 1;
127 				break;
128 			}
129 			printk(KERN_INFO "raid0:   NOT EQUAL\n");
130 		}
131 		if (!c) {
132 			printk(KERN_INFO "raid0:   ==> UNIQUE\n");
133 			conf->nr_strip_zones++;
134 			printk(KERN_INFO "raid0: %d zones\n",
135 				conf->nr_strip_zones);
136 		}
137 	}
138 	printk(KERN_INFO "raid0: FINAL %d zones\n", conf->nr_strip_zones);
139 	err = -ENOMEM;
140 	conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
141 				conf->nr_strip_zones, GFP_KERNEL);
142 	if (!conf->strip_zone)
143 		goto abort;
144 	conf->devlist = kzalloc(sizeof(mdk_rdev_t*)*
145 				conf->nr_strip_zones*mddev->raid_disks,
146 				GFP_KERNEL);
147 	if (!conf->devlist)
148 		goto abort;
149 
150 	/* The first zone must contain all devices, so here we check that
151 	 * there is a proper alignment of slots to devices and find them all
152 	 */
153 	zone = &conf->strip_zone[0];
154 	cnt = 0;
155 	smallest = NULL;
156 	dev = conf->devlist;
157 	err = -EINVAL;
158 	list_for_each_entry(rdev1, &mddev->disks, same_set) {
159 		int j = rdev1->raid_disk;
160 
161 		if (j < 0 || j >= mddev->raid_disks) {
162 			printk(KERN_ERR "raid0: bad disk number %d - "
163 				"aborting!\n", j);
164 			goto abort;
165 		}
166 		if (dev[j]) {
167 			printk(KERN_ERR "raid0: multiple devices for %d - "
168 				"aborting!\n", j);
169 			goto abort;
170 		}
171 		dev[j] = rdev1;
172 
173 		disk_stack_limits(mddev->gendisk, rdev1->bdev,
174 				  rdev1->data_offset << 9);
175 		/* as we don't honour merge_bvec_fn, we must never risk
176 		 * violating it, so limit ->max_sector to one PAGE, as
177 		 * a one page request is never in violation.
178 		 */
179 
180 		if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
181 		    queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
182 			blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
183 
184 		if (!smallest || (rdev1->sectors < smallest->sectors))
185 			smallest = rdev1;
186 		cnt++;
187 	}
188 	if (cnt != mddev->raid_disks) {
189 		printk(KERN_ERR "raid0: too few disks (%d of %d) - "
190 			"aborting!\n", cnt, mddev->raid_disks);
191 		goto abort;
192 	}
193 	zone->nb_dev = cnt;
194 	zone->zone_end = smallest->sectors * cnt;
195 
196 	curr_zone_end = zone->zone_end;
197 
198 	/* now do the other zones */
199 	for (i = 1; i < conf->nr_strip_zones; i++)
200 	{
201 		zone = conf->strip_zone + i;
202 		dev = conf->devlist + i * mddev->raid_disks;
203 
204 		printk(KERN_INFO "raid0: zone %d\n", i);
205 		zone->dev_start = smallest->sectors;
206 		smallest = NULL;
207 		c = 0;
208 
209 		for (j=0; j<cnt; j++) {
210 			char b[BDEVNAME_SIZE];
211 			rdev = conf->devlist[j];
212 			printk(KERN_INFO "raid0: checking %s ...",
213 				bdevname(rdev->bdev, b));
214 			if (rdev->sectors <= zone->dev_start) {
215 				printk(KERN_INFO " nope.\n");
216 				continue;
217 			}
218 			printk(KERN_INFO " contained as device %d\n", c);
219 			dev[c] = rdev;
220 			c++;
221 			if (!smallest || rdev->sectors < smallest->sectors) {
222 				smallest = rdev;
223 				printk(KERN_INFO "  (%llu) is smallest!.\n",
224 					(unsigned long long)rdev->sectors);
225 			}
226 		}
227 
228 		zone->nb_dev = c;
229 		sectors = (smallest->sectors - zone->dev_start) * c;
230 		printk(KERN_INFO "raid0: zone->nb_dev: %d, sectors: %llu\n",
231 			zone->nb_dev, (unsigned long long)sectors);
232 
233 		curr_zone_end += sectors;
234 		zone->zone_end = curr_zone_end;
235 
236 		printk(KERN_INFO "raid0: current zone start: %llu\n",
237 			(unsigned long long)smallest->sectors);
238 	}
239 	mddev->queue->unplug_fn = raid0_unplug;
240 	mddev->queue->backing_dev_info.congested_fn = raid0_congested;
241 	mddev->queue->backing_dev_info.congested_data = mddev;
242 
243 	/*
244 	 * now since we have the hard sector sizes, we can make sure
245 	 * chunk size is a multiple of that sector size
246 	 */
247 	if ((mddev->chunk_sectors << 9) % queue_logical_block_size(mddev->queue)) {
248 		printk(KERN_ERR "%s chunk_size of %d not valid\n",
249 		       mdname(mddev),
250 		       mddev->chunk_sectors << 9);
251 		goto abort;
252 	}
253 
254 	blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
255 	blk_queue_io_opt(mddev->queue,
256 			 (mddev->chunk_sectors << 9) * mddev->raid_disks);
257 
258 	printk(KERN_INFO "raid0: done.\n");
259 	mddev->private = conf;
260 	return 0;
261 abort:
262 	kfree(conf->strip_zone);
263 	kfree(conf->devlist);
264 	kfree(conf);
265 	mddev->private = NULL;
266 	return err;
267 }
268 
269 /**
270  *	raid0_mergeable_bvec -- tell bio layer if a two requests can be merged
271  *	@q: request queue
272  *	@bvm: properties of new bio
273  *	@biovec: the request that could be merged to it.
274  *
275  *	Return amount of bytes we can accept at this offset
276  */
277 static int raid0_mergeable_bvec(struct request_queue *q,
278 				struct bvec_merge_data *bvm,
279 				struct bio_vec *biovec)
280 {
281 	mddev_t *mddev = q->queuedata;
282 	sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
283 	int max;
284 	unsigned int chunk_sectors = mddev->chunk_sectors;
285 	unsigned int bio_sectors = bvm->bi_size >> 9;
286 
287 	if (is_power_of_2(chunk_sectors))
288 		max =  (chunk_sectors - ((sector & (chunk_sectors-1))
289 						+ bio_sectors)) << 9;
290 	else
291 		max =  (chunk_sectors - (sector_div(sector, chunk_sectors)
292 						+ bio_sectors)) << 9;
293 	if (max < 0) max = 0; /* bio_add cannot handle a negative return */
294 	if (max <= biovec->bv_len && bio_sectors == 0)
295 		return biovec->bv_len;
296 	else
297 		return max;
298 }
299 
300 static sector_t raid0_size(mddev_t *mddev, sector_t sectors, int raid_disks)
301 {
302 	sector_t array_sectors = 0;
303 	mdk_rdev_t *rdev;
304 
305 	WARN_ONCE(sectors || raid_disks,
306 		  "%s does not support generic reshape\n", __func__);
307 
308 	list_for_each_entry(rdev, &mddev->disks, same_set)
309 		array_sectors += rdev->sectors;
310 
311 	return array_sectors;
312 }
313 
314 static int raid0_run(mddev_t *mddev)
315 {
316 	int ret;
317 
318 	if (mddev->chunk_sectors == 0) {
319 		printk(KERN_ERR "md/raid0: chunk size must be set.\n");
320 		return -EINVAL;
321 	}
322 	if (md_check_no_bitmap(mddev))
323 		return -EINVAL;
324 	blk_queue_max_sectors(mddev->queue, mddev->chunk_sectors);
325 	mddev->queue->queue_lock = &mddev->queue->__queue_lock;
326 
327 	ret = create_strip_zones(mddev);
328 	if (ret < 0)
329 		return ret;
330 
331 	/* calculate array device size */
332 	md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
333 
334 	printk(KERN_INFO "raid0 : md_size is %llu sectors.\n",
335 		(unsigned long long)mddev->array_sectors);
336 	/* calculate the max read-ahead size.
337 	 * For read-ahead of large files to be effective, we need to
338 	 * readahead at least twice a whole stripe. i.e. number of devices
339 	 * multiplied by chunk size times 2.
340 	 * If an individual device has an ra_pages greater than the
341 	 * chunk size, then we will not drive that device as hard as it
342 	 * wants.  We consider this a configuration error: a larger
343 	 * chunksize should be used in that case.
344 	 */
345 	{
346 		int stripe = mddev->raid_disks *
347 			(mddev->chunk_sectors << 9) / PAGE_SIZE;
348 		if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
349 			mddev->queue->backing_dev_info.ra_pages = 2* stripe;
350 	}
351 
352 	blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec);
353 	dump_zones(mddev);
354 	md_integrity_register(mddev);
355 	return 0;
356 }
357 
358 static int raid0_stop(mddev_t *mddev)
359 {
360 	raid0_conf_t *conf = mddev->private;
361 
362 	blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
363 	kfree(conf->strip_zone);
364 	kfree(conf->devlist);
365 	kfree(conf);
366 	mddev->private = NULL;
367 	return 0;
368 }
369 
370 /* Find the zone which holds a particular offset
371  * Update *sectorp to be an offset in that zone
372  */
373 static struct strip_zone *find_zone(struct raid0_private_data *conf,
374 				    sector_t *sectorp)
375 {
376 	int i;
377 	struct strip_zone *z = conf->strip_zone;
378 	sector_t sector = *sectorp;
379 
380 	for (i = 0; i < conf->nr_strip_zones; i++)
381 		if (sector < z[i].zone_end) {
382 			if (i)
383 				*sectorp = sector - z[i-1].zone_end;
384 			return z + i;
385 		}
386 	BUG();
387 }
388 
389 /*
390  * remaps the bio to the target device. we separate two flows.
391  * power 2 flow and a general flow for the sake of perfromance
392 */
393 static mdk_rdev_t *map_sector(mddev_t *mddev, struct strip_zone *zone,
394 				sector_t sector, sector_t *sector_offset)
395 {
396 	unsigned int sect_in_chunk;
397 	sector_t chunk;
398 	raid0_conf_t *conf = mddev->private;
399 	unsigned int chunk_sects = mddev->chunk_sectors;
400 
401 	if (is_power_of_2(chunk_sects)) {
402 		int chunksect_bits = ffz(~chunk_sects);
403 		/* find the sector offset inside the chunk */
404 		sect_in_chunk  = sector & (chunk_sects - 1);
405 		sector >>= chunksect_bits;
406 		/* chunk in zone */
407 		chunk = *sector_offset;
408 		/* quotient is the chunk in real device*/
409 		sector_div(chunk, zone->nb_dev << chunksect_bits);
410 	} else{
411 		sect_in_chunk = sector_div(sector, chunk_sects);
412 		chunk = *sector_offset;
413 		sector_div(chunk, chunk_sects * zone->nb_dev);
414 	}
415 	/*
416 	*  position the bio over the real device
417 	*  real sector = chunk in device + starting of zone
418 	*	+ the position in the chunk
419 	*/
420 	*sector_offset = (chunk * chunk_sects) + sect_in_chunk;
421 	return conf->devlist[(zone - conf->strip_zone)*mddev->raid_disks
422 			     + sector_div(sector, zone->nb_dev)];
423 }
424 
425 /*
426  * Is io distribute over 1 or more chunks ?
427 */
428 static inline int is_io_in_chunk_boundary(mddev_t *mddev,
429 			unsigned int chunk_sects, struct bio *bio)
430 {
431 	if (likely(is_power_of_2(chunk_sects))) {
432 		return chunk_sects >= ((bio->bi_sector & (chunk_sects-1))
433 					+ (bio->bi_size >> 9));
434 	} else{
435 		sector_t sector = bio->bi_sector;
436 		return chunk_sects >= (sector_div(sector, chunk_sects)
437 						+ (bio->bi_size >> 9));
438 	}
439 }
440 
441 static int raid0_make_request(struct request_queue *q, struct bio *bio)
442 {
443 	mddev_t *mddev = q->queuedata;
444 	unsigned int chunk_sects;
445 	sector_t sector_offset;
446 	struct strip_zone *zone;
447 	mdk_rdev_t *tmp_dev;
448 	const int rw = bio_data_dir(bio);
449 	int cpu;
450 
451 	if (unlikely(bio_barrier(bio))) {
452 		bio_endio(bio, -EOPNOTSUPP);
453 		return 0;
454 	}
455 
456 	cpu = part_stat_lock();
457 	part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
458 	part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
459 		      bio_sectors(bio));
460 	part_stat_unlock();
461 
462 	chunk_sects = mddev->chunk_sectors;
463 	if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) {
464 		sector_t sector = bio->bi_sector;
465 		struct bio_pair *bp;
466 		/* Sanity check -- queue functions should prevent this happening */
467 		if (bio->bi_vcnt != 1 ||
468 		    bio->bi_idx != 0)
469 			goto bad_map;
470 		/* This is a one page bio that upper layers
471 		 * refuse to split for us, so we need to split it.
472 		 */
473 		if (likely(is_power_of_2(chunk_sects)))
474 			bp = bio_split(bio, chunk_sects - (sector &
475 							   (chunk_sects-1)));
476 		else
477 			bp = bio_split(bio, chunk_sects -
478 				       sector_div(sector, chunk_sects));
479 		if (raid0_make_request(q, &bp->bio1))
480 			generic_make_request(&bp->bio1);
481 		if (raid0_make_request(q, &bp->bio2))
482 			generic_make_request(&bp->bio2);
483 
484 		bio_pair_release(bp);
485 		return 0;
486 	}
487 
488 	sector_offset = bio->bi_sector;
489 	zone =  find_zone(mddev->private, &sector_offset);
490 	tmp_dev = map_sector(mddev, zone, bio->bi_sector,
491 			     &sector_offset);
492 	bio->bi_bdev = tmp_dev->bdev;
493 	bio->bi_sector = sector_offset + zone->dev_start +
494 		tmp_dev->data_offset;
495 	/*
496 	 * Let the main block layer submit the IO and resolve recursion:
497 	 */
498 	return 1;
499 
500 bad_map:
501 	printk("raid0_make_request bug: can't convert block across chunks"
502 		" or bigger than %dk %llu %d\n", chunk_sects / 2,
503 		(unsigned long long)bio->bi_sector, bio->bi_size >> 10);
504 
505 	bio_io_error(bio);
506 	return 0;
507 }
508 
509 static void raid0_status(struct seq_file *seq, mddev_t *mddev)
510 {
511 #undef MD_DEBUG
512 #ifdef MD_DEBUG
513 	int j, k, h;
514 	char b[BDEVNAME_SIZE];
515 	raid0_conf_t *conf = mddev->private;
516 
517 	sector_t zone_size;
518 	sector_t zone_start = 0;
519 	h = 0;
520 
521 	for (j = 0; j < conf->nr_strip_zones; j++) {
522 		seq_printf(seq, "      z%d", j);
523 		seq_printf(seq, "=[");
524 		for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
525 			seq_printf(seq, "%s/", bdevname(
526 				conf->devlist[j*mddev->raid_disks + k]
527 						->bdev, b));
528 
529 		zone_size  = conf->strip_zone[j].zone_end - zone_start;
530 		seq_printf(seq, "] ze=%lld ds=%lld s=%lld\n",
531 			(unsigned long long)zone_start>>1,
532 			(unsigned long long)conf->strip_zone[j].dev_start>>1,
533 			(unsigned long long)zone_size>>1);
534 		zone_start = conf->strip_zone[j].zone_end;
535 	}
536 #endif
537 	seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
538 	return;
539 }
540 
541 static struct mdk_personality raid0_personality=
542 {
543 	.name		= "raid0",
544 	.level		= 0,
545 	.owner		= THIS_MODULE,
546 	.make_request	= raid0_make_request,
547 	.run		= raid0_run,
548 	.stop		= raid0_stop,
549 	.status		= raid0_status,
550 	.size		= raid0_size,
551 };
552 
553 static int __init raid0_init (void)
554 {
555 	return register_md_personality (&raid0_personality);
556 }
557 
558 static void raid0_exit (void)
559 {
560 	unregister_md_personality (&raid0_personality);
561 }
562 
563 module_init(raid0_init);
564 module_exit(raid0_exit);
565 MODULE_LICENSE("GPL");
566 MODULE_ALIAS("md-personality-2"); /* RAID0 */
567 MODULE_ALIAS("md-raid0");
568 MODULE_ALIAS("md-level-0");
569