xref: /openbmc/linux/drivers/md/raid0.c (revision b04b4f78)
1 /*
2    raid0.c : Multiple Devices driver for Linux
3              Copyright (C) 1994-96 Marc ZYNGIER
4 	     <zyngier@ufr-info-p7.ibp.fr> or
5 	     <maz@gloups.fdn.fr>
6              Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
7 
8 
9    RAID-0 management functions.
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License as published by
13    the Free Software Foundation; either version 2, or (at your option)
14    any later version.
15 
16    You should have received a copy of the GNU General Public License
17    (for example /usr/src/linux/COPYING); if not, write to the Free
18    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20 
21 #include <linux/blkdev.h>
22 #include <linux/seq_file.h>
23 #include "md.h"
24 #include "raid0.h"
25 
26 static void raid0_unplug(struct request_queue *q)
27 {
28 	mddev_t *mddev = q->queuedata;
29 	raid0_conf_t *conf = mddev_to_conf(mddev);
30 	mdk_rdev_t **devlist = conf->strip_zone[0].dev;
31 	int i;
32 
33 	for (i=0; i<mddev->raid_disks; i++) {
34 		struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);
35 
36 		blk_unplug(r_queue);
37 	}
38 }
39 
40 static int raid0_congested(void *data, int bits)
41 {
42 	mddev_t *mddev = data;
43 	raid0_conf_t *conf = mddev_to_conf(mddev);
44 	mdk_rdev_t **devlist = conf->strip_zone[0].dev;
45 	int i, ret = 0;
46 
47 	for (i = 0; i < mddev->raid_disks && !ret ; i++) {
48 		struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
49 
50 		ret |= bdi_congested(&q->backing_dev_info, bits);
51 	}
52 	return ret;
53 }
54 
55 
56 static int create_strip_zones (mddev_t *mddev)
57 {
58 	int i, c, j;
59 	sector_t current_start, curr_zone_start;
60 	sector_t min_spacing;
61 	raid0_conf_t *conf = mddev_to_conf(mddev);
62 	mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev;
63 	struct strip_zone *zone;
64 	int cnt;
65 	char b[BDEVNAME_SIZE];
66 
67 	/*
68 	 * The number of 'same size groups'
69 	 */
70 	conf->nr_strip_zones = 0;
71 
72 	list_for_each_entry(rdev1, &mddev->disks, same_set) {
73 		printk(KERN_INFO "raid0: looking at %s\n",
74 			bdevname(rdev1->bdev,b));
75 		c = 0;
76 		list_for_each_entry(rdev2, &mddev->disks, same_set) {
77 			printk(KERN_INFO "raid0:   comparing %s(%llu)",
78 			       bdevname(rdev1->bdev,b),
79 			       (unsigned long long)rdev1->sectors);
80 			printk(KERN_INFO " with %s(%llu)\n",
81 			       bdevname(rdev2->bdev,b),
82 			       (unsigned long long)rdev2->sectors);
83 			if (rdev2 == rdev1) {
84 				printk(KERN_INFO "raid0:   END\n");
85 				break;
86 			}
87 			if (rdev2->sectors == rdev1->sectors) {
88 				/*
89 				 * Not unique, don't count it as a new
90 				 * group
91 				 */
92 				printk(KERN_INFO "raid0:   EQUAL\n");
93 				c = 1;
94 				break;
95 			}
96 			printk(KERN_INFO "raid0:   NOT EQUAL\n");
97 		}
98 		if (!c) {
99 			printk(KERN_INFO "raid0:   ==> UNIQUE\n");
100 			conf->nr_strip_zones++;
101 			printk(KERN_INFO "raid0: %d zones\n",
102 				conf->nr_strip_zones);
103 		}
104 	}
105 	printk(KERN_INFO "raid0: FINAL %d zones\n", conf->nr_strip_zones);
106 
107 	conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
108 				conf->nr_strip_zones, GFP_KERNEL);
109 	if (!conf->strip_zone)
110 		return 1;
111 	conf->devlist = kzalloc(sizeof(mdk_rdev_t*)*
112 				conf->nr_strip_zones*mddev->raid_disks,
113 				GFP_KERNEL);
114 	if (!conf->devlist)
115 		return 1;
116 
117 	/* The first zone must contain all devices, so here we check that
118 	 * there is a proper alignment of slots to devices and find them all
119 	 */
120 	zone = &conf->strip_zone[0];
121 	cnt = 0;
122 	smallest = NULL;
123 	zone->dev = conf->devlist;
124 	list_for_each_entry(rdev1, &mddev->disks, same_set) {
125 		int j = rdev1->raid_disk;
126 
127 		if (j < 0 || j >= mddev->raid_disks) {
128 			printk(KERN_ERR "raid0: bad disk number %d - "
129 				"aborting!\n", j);
130 			goto abort;
131 		}
132 		if (zone->dev[j]) {
133 			printk(KERN_ERR "raid0: multiple devices for %d - "
134 				"aborting!\n", j);
135 			goto abort;
136 		}
137 		zone->dev[j] = rdev1;
138 
139 		blk_queue_stack_limits(mddev->queue,
140 				       rdev1->bdev->bd_disk->queue);
141 		/* as we don't honour merge_bvec_fn, we must never risk
142 		 * violating it, so limit ->max_sector to one PAGE, as
143 		 * a one page request is never in violation.
144 		 */
145 
146 		if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
147 		    mddev->queue->max_sectors > (PAGE_SIZE>>9))
148 			blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
149 
150 		if (!smallest || (rdev1->sectors < smallest->sectors))
151 			smallest = rdev1;
152 		cnt++;
153 	}
154 	if (cnt != mddev->raid_disks) {
155 		printk(KERN_ERR "raid0: too few disks (%d of %d) - "
156 			"aborting!\n", cnt, mddev->raid_disks);
157 		goto abort;
158 	}
159 	zone->nb_dev = cnt;
160 	zone->sectors = smallest->sectors * cnt;
161 	zone->zone_start = 0;
162 
163 	current_start = smallest->sectors;
164 	curr_zone_start = zone->sectors;
165 
166 	/* now do the other zones */
167 	for (i = 1; i < conf->nr_strip_zones; i++)
168 	{
169 		zone = conf->strip_zone + i;
170 		zone->dev = conf->strip_zone[i-1].dev + mddev->raid_disks;
171 
172 		printk(KERN_INFO "raid0: zone %d\n", i);
173 		zone->dev_start = current_start;
174 		smallest = NULL;
175 		c = 0;
176 
177 		for (j=0; j<cnt; j++) {
178 			char b[BDEVNAME_SIZE];
179 			rdev = conf->strip_zone[0].dev[j];
180 			printk(KERN_INFO "raid0: checking %s ...",
181 				bdevname(rdev->bdev, b));
182 			if (rdev->sectors <= current_start) {
183 				printk(KERN_INFO " nope.\n");
184 				continue;
185 			}
186 			printk(KERN_INFO " contained as device %d\n", c);
187 			zone->dev[c] = rdev;
188 			c++;
189 			if (!smallest || rdev->sectors < smallest->sectors) {
190 				smallest = rdev;
191 				printk(KERN_INFO "  (%llu) is smallest!.\n",
192 					(unsigned long long)rdev->sectors);
193 			}
194 		}
195 
196 		zone->nb_dev = c;
197 		zone->sectors = (smallest->sectors - current_start) * c;
198 		printk(KERN_INFO "raid0: zone->nb_dev: %d, sectors: %llu\n",
199 			zone->nb_dev, (unsigned long long)zone->sectors);
200 
201 		zone->zone_start = curr_zone_start;
202 		curr_zone_start += zone->sectors;
203 
204 		current_start = smallest->sectors;
205 		printk(KERN_INFO "raid0: current zone start: %llu\n",
206 			(unsigned long long)current_start);
207 	}
208 
209 	/* Now find appropriate hash spacing.
210 	 * We want a number which causes most hash entries to cover
211 	 * at most two strips, but the hash table must be at most
212 	 * 1 PAGE.  We choose the smallest strip, or contiguous collection
213 	 * of strips, that has big enough size.  We never consider the last
214 	 * strip though as it's size has no bearing on the efficacy of the hash
215 	 * table.
216 	 */
217 	conf->spacing = curr_zone_start;
218 	min_spacing = curr_zone_start;
219 	sector_div(min_spacing, PAGE_SIZE/sizeof(struct strip_zone*));
220 	for (i=0; i < conf->nr_strip_zones-1; i++) {
221 		sector_t s = 0;
222 		for (j = i; j < conf->nr_strip_zones - 1 &&
223 				s < min_spacing; j++)
224 			s += conf->strip_zone[j].sectors;
225 		if (s >= min_spacing && s < conf->spacing)
226 			conf->spacing = s;
227 	}
228 
229 	mddev->queue->unplug_fn = raid0_unplug;
230 
231 	mddev->queue->backing_dev_info.congested_fn = raid0_congested;
232 	mddev->queue->backing_dev_info.congested_data = mddev;
233 
234 	printk(KERN_INFO "raid0: done.\n");
235 	return 0;
236  abort:
237 	return 1;
238 }
239 
240 /**
241  *	raid0_mergeable_bvec -- tell bio layer if a two requests can be merged
242  *	@q: request queue
243  *	@bvm: properties of new bio
244  *	@biovec: the request that could be merged to it.
245  *
246  *	Return amount of bytes we can accept at this offset
247  */
248 static int raid0_mergeable_bvec(struct request_queue *q,
249 				struct bvec_merge_data *bvm,
250 				struct bio_vec *biovec)
251 {
252 	mddev_t *mddev = q->queuedata;
253 	sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
254 	int max;
255 	unsigned int chunk_sectors = mddev->chunk_size >> 9;
256 	unsigned int bio_sectors = bvm->bi_size >> 9;
257 
258 	max =  (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
259 	if (max < 0) max = 0; /* bio_add cannot handle a negative return */
260 	if (max <= biovec->bv_len && bio_sectors == 0)
261 		return biovec->bv_len;
262 	else
263 		return max;
264 }
265 
266 static sector_t raid0_size(mddev_t *mddev, sector_t sectors, int raid_disks)
267 {
268 	sector_t array_sectors = 0;
269 	mdk_rdev_t *rdev;
270 
271 	WARN_ONCE(sectors || raid_disks,
272 		  "%s does not support generic reshape\n", __func__);
273 
274 	list_for_each_entry(rdev, &mddev->disks, same_set)
275 		array_sectors += rdev->sectors;
276 
277 	return array_sectors;
278 }
279 
280 static int raid0_run (mddev_t *mddev)
281 {
282 	unsigned  cur=0, i=0, nb_zone;
283 	s64 sectors;
284 	raid0_conf_t *conf;
285 
286 	if (mddev->chunk_size == 0) {
287 		printk(KERN_ERR "md/raid0: non-zero chunk size required.\n");
288 		return -EINVAL;
289 	}
290 	printk(KERN_INFO "%s: setting max_sectors to %d, segment boundary to %d\n",
291 	       mdname(mddev),
292 	       mddev->chunk_size >> 9,
293 	       (mddev->chunk_size>>1)-1);
294 	blk_queue_max_sectors(mddev->queue, mddev->chunk_size >> 9);
295 	blk_queue_segment_boundary(mddev->queue, (mddev->chunk_size>>1) - 1);
296 	mddev->queue->queue_lock = &mddev->queue->__queue_lock;
297 
298 	conf = kmalloc(sizeof (raid0_conf_t), GFP_KERNEL);
299 	if (!conf)
300 		goto out;
301 	mddev->private = (void *)conf;
302 
303 	conf->strip_zone = NULL;
304 	conf->devlist = NULL;
305 	if (create_strip_zones (mddev))
306 		goto out_free_conf;
307 
308 	/* calculate array device size */
309 	md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
310 
311 	printk(KERN_INFO "raid0 : md_size is %llu sectors.\n",
312 		(unsigned long long)mddev->array_sectors);
313 	printk(KERN_INFO "raid0 : conf->spacing is %llu sectors.\n",
314 		(unsigned long long)conf->spacing);
315 	{
316 		sector_t s = raid0_size(mddev, 0, 0);
317 		sector_t space = conf->spacing;
318 		int round;
319 		conf->sector_shift = 0;
320 		if (sizeof(sector_t) > sizeof(u32)) {
321 			/*shift down space and s so that sector_div will work */
322 			while (space > (sector_t) (~(u32)0)) {
323 				s >>= 1;
324 				space >>= 1;
325 				s += 1; /* force round-up */
326 				conf->sector_shift++;
327 			}
328 		}
329 		round = sector_div(s, (u32)space) ? 1 : 0;
330 		nb_zone = s + round;
331 	}
332 	printk(KERN_INFO "raid0 : nb_zone is %d.\n", nb_zone);
333 
334 	printk(KERN_INFO "raid0 : Allocating %zu bytes for hash.\n",
335 				nb_zone*sizeof(struct strip_zone*));
336 	conf->hash_table = kmalloc (sizeof (struct strip_zone *)*nb_zone, GFP_KERNEL);
337 	if (!conf->hash_table)
338 		goto out_free_conf;
339 	sectors = conf->strip_zone[cur].sectors;
340 
341 	conf->hash_table[0] = conf->strip_zone + cur;
342 	for (i=1; i< nb_zone; i++) {
343 		while (sectors <= conf->spacing) {
344 			cur++;
345 			sectors += conf->strip_zone[cur].sectors;
346 		}
347 		sectors -= conf->spacing;
348 		conf->hash_table[i] = conf->strip_zone + cur;
349 	}
350 	if (conf->sector_shift) {
351 		conf->spacing >>= conf->sector_shift;
352 		/* round spacing up so when we divide by it, we
353 		 * err on the side of too-low, which is safest
354 		 */
355 		conf->spacing++;
356 	}
357 
358 	/* calculate the max read-ahead size.
359 	 * For read-ahead of large files to be effective, we need to
360 	 * readahead at least twice a whole stripe. i.e. number of devices
361 	 * multiplied by chunk size times 2.
362 	 * If an individual device has an ra_pages greater than the
363 	 * chunk size, then we will not drive that device as hard as it
364 	 * wants.  We consider this a configuration error: a larger
365 	 * chunksize should be used in that case.
366 	 */
367 	{
368 		int stripe = mddev->raid_disks * mddev->chunk_size / PAGE_SIZE;
369 		if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
370 			mddev->queue->backing_dev_info.ra_pages = 2* stripe;
371 	}
372 
373 
374 	blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec);
375 	return 0;
376 
377 out_free_conf:
378 	kfree(conf->strip_zone);
379 	kfree(conf->devlist);
380 	kfree(conf);
381 	mddev->private = NULL;
382 out:
383 	return -ENOMEM;
384 }
385 
386 static int raid0_stop (mddev_t *mddev)
387 {
388 	raid0_conf_t *conf = mddev_to_conf(mddev);
389 
390 	blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
391 	kfree(conf->hash_table);
392 	conf->hash_table = NULL;
393 	kfree(conf->strip_zone);
394 	conf->strip_zone = NULL;
395 	kfree(conf);
396 	mddev->private = NULL;
397 
398 	return 0;
399 }
400 
401 static int raid0_make_request (struct request_queue *q, struct bio *bio)
402 {
403 	mddev_t *mddev = q->queuedata;
404 	unsigned int sect_in_chunk, chunksect_bits, chunk_sects;
405 	raid0_conf_t *conf = mddev_to_conf(mddev);
406 	struct strip_zone *zone;
407 	mdk_rdev_t *tmp_dev;
408 	sector_t chunk;
409 	sector_t sector, rsect;
410 	const int rw = bio_data_dir(bio);
411 	int cpu;
412 
413 	if (unlikely(bio_barrier(bio))) {
414 		bio_endio(bio, -EOPNOTSUPP);
415 		return 0;
416 	}
417 
418 	cpu = part_stat_lock();
419 	part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
420 	part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
421 		      bio_sectors(bio));
422 	part_stat_unlock();
423 
424 	chunk_sects = mddev->chunk_size >> 9;
425 	chunksect_bits = ffz(~chunk_sects);
426 	sector = bio->bi_sector;
427 
428 	if (unlikely(chunk_sects < (bio->bi_sector & (chunk_sects - 1)) + (bio->bi_size >> 9))) {
429 		struct bio_pair *bp;
430 		/* Sanity check -- queue functions should prevent this happening */
431 		if (bio->bi_vcnt != 1 ||
432 		    bio->bi_idx != 0)
433 			goto bad_map;
434 		/* This is a one page bio that upper layers
435 		 * refuse to split for us, so we need to split it.
436 		 */
437 		bp = bio_split(bio, chunk_sects - (bio->bi_sector & (chunk_sects - 1)));
438 		if (raid0_make_request(q, &bp->bio1))
439 			generic_make_request(&bp->bio1);
440 		if (raid0_make_request(q, &bp->bio2))
441 			generic_make_request(&bp->bio2);
442 
443 		bio_pair_release(bp);
444 		return 0;
445 	}
446 
447 
448 	{
449 		sector_t x = sector >> conf->sector_shift;
450 		sector_div(x, (u32)conf->spacing);
451 		zone = conf->hash_table[x];
452 	}
453 
454 	while (sector >= zone->zone_start + zone->sectors)
455 		zone++;
456 
457 	sect_in_chunk = bio->bi_sector & (chunk_sects - 1);
458 
459 
460 	{
461 		sector_t x = (sector - zone->zone_start) >> chunksect_bits;
462 
463 		sector_div(x, zone->nb_dev);
464 		chunk = x;
465 
466 		x = sector >> chunksect_bits;
467 		tmp_dev = zone->dev[sector_div(x, zone->nb_dev)];
468 	}
469 	rsect = (chunk << chunksect_bits) + zone->dev_start + sect_in_chunk;
470 
471 	bio->bi_bdev = tmp_dev->bdev;
472 	bio->bi_sector = rsect + tmp_dev->data_offset;
473 
474 	/*
475 	 * Let the main block layer submit the IO and resolve recursion:
476 	 */
477 	return 1;
478 
479 bad_map:
480 	printk("raid0_make_request bug: can't convert block across chunks"
481 		" or bigger than %dk %llu %d\n", chunk_sects / 2,
482 		(unsigned long long)bio->bi_sector, bio->bi_size >> 10);
483 
484 	bio_io_error(bio);
485 	return 0;
486 }
487 
488 static void raid0_status (struct seq_file *seq, mddev_t *mddev)
489 {
490 #undef MD_DEBUG
491 #ifdef MD_DEBUG
492 	int j, k, h;
493 	char b[BDEVNAME_SIZE];
494 	raid0_conf_t *conf = mddev_to_conf(mddev);
495 
496 	h = 0;
497 	for (j = 0; j < conf->nr_strip_zones; j++) {
498 		seq_printf(seq, "      z%d", j);
499 		if (conf->hash_table[h] == conf->strip_zone+j)
500 			seq_printf(seq, "(h%d)", h++);
501 		seq_printf(seq, "=[");
502 		for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
503 			seq_printf(seq, "%s/", bdevname(
504 				conf->strip_zone[j].dev[k]->bdev,b));
505 
506 		seq_printf(seq, "] zs=%d ds=%d s=%d\n",
507 				conf->strip_zone[j].zone_start,
508 				conf->strip_zone[j].dev_start,
509 				conf->strip_zone[j].sectors);
510 	}
511 #endif
512 	seq_printf(seq, " %dk chunks", mddev->chunk_size/1024);
513 	return;
514 }
515 
516 static struct mdk_personality raid0_personality=
517 {
518 	.name		= "raid0",
519 	.level		= 0,
520 	.owner		= THIS_MODULE,
521 	.make_request	= raid0_make_request,
522 	.run		= raid0_run,
523 	.stop		= raid0_stop,
524 	.status		= raid0_status,
525 	.size		= raid0_size,
526 };
527 
528 static int __init raid0_init (void)
529 {
530 	return register_md_personality (&raid0_personality);
531 }
532 
533 static void raid0_exit (void)
534 {
535 	unregister_md_personality (&raid0_personality);
536 }
537 
538 module_init(raid0_init);
539 module_exit(raid0_exit);
540 MODULE_LICENSE("GPL");
541 MODULE_ALIAS("md-personality-2"); /* RAID0 */
542 MODULE_ALIAS("md-raid0");
543 MODULE_ALIAS("md-level-0");
544