xref: /openbmc/linux/drivers/md/dm-stripe.c (revision a38711a8)
1 /*
2  * Copyright (C) 2001-2003 Sistina Software (UK) Limited.
3  *
4  * This file is released under the GPL.
5  */
6 
7 #include "dm.h"
8 #include <linux/device-mapper.h>
9 
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/blkdev.h>
13 #include <linux/bio.h>
14 #include <linux/dax.h>
15 #include <linux/slab.h>
16 #include <linux/log2.h>
17 
18 #define DM_MSG_PREFIX "striped"
19 #define DM_IO_ERROR_THRESHOLD 15
20 
21 struct stripe {
22 	struct dm_dev *dev;
23 	sector_t physical_start;
24 
25 	atomic_t error_count;
26 };
27 
28 struct stripe_c {
29 	uint32_t stripes;
30 	int stripes_shift;
31 
32 	/* The size of this target / num. stripes */
33 	sector_t stripe_width;
34 
35 	uint32_t chunk_size;
36 	int chunk_size_shift;
37 
38 	/* Needed for handling events */
39 	struct dm_target *ti;
40 
41 	/* Work struct used for triggering events*/
42 	struct work_struct trigger_event;
43 
44 	struct stripe stripe[0];
45 };
46 
47 /*
48  * An event is triggered whenever a drive
49  * drops out of a stripe volume.
50  */
51 static void trigger_event(struct work_struct *work)
52 {
53 	struct stripe_c *sc = container_of(work, struct stripe_c,
54 					   trigger_event);
55 	dm_table_event(sc->ti->table);
56 }
57 
58 static inline struct stripe_c *alloc_context(unsigned int stripes)
59 {
60 	size_t len;
61 
62 	if (dm_array_too_big(sizeof(struct stripe_c), sizeof(struct stripe),
63 			     stripes))
64 		return NULL;
65 
66 	len = sizeof(struct stripe_c) + (sizeof(struct stripe) * stripes);
67 
68 	return kmalloc(len, GFP_KERNEL);
69 }
70 
71 /*
72  * Parse a single <dev> <sector> pair
73  */
74 static int get_stripe(struct dm_target *ti, struct stripe_c *sc,
75 		      unsigned int stripe, char **argv)
76 {
77 	unsigned long long start;
78 	char dummy;
79 	int ret;
80 
81 	if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1)
82 		return -EINVAL;
83 
84 	ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
85 			    &sc->stripe[stripe].dev);
86 	if (ret)
87 		return ret;
88 
89 	sc->stripe[stripe].physical_start = start;
90 
91 	return 0;
92 }
93 
94 /*
95  * Construct a striped mapping.
96  * <number of stripes> <chunk size> [<dev_path> <offset>]+
97  */
98 static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
99 {
100 	struct stripe_c *sc;
101 	sector_t width, tmp_len;
102 	uint32_t stripes;
103 	uint32_t chunk_size;
104 	int r;
105 	unsigned int i;
106 
107 	if (argc < 2) {
108 		ti->error = "Not enough arguments";
109 		return -EINVAL;
110 	}
111 
112 	if (kstrtouint(argv[0], 10, &stripes) || !stripes) {
113 		ti->error = "Invalid stripe count";
114 		return -EINVAL;
115 	}
116 
117 	if (kstrtouint(argv[1], 10, &chunk_size) || !chunk_size) {
118 		ti->error = "Invalid chunk_size";
119 		return -EINVAL;
120 	}
121 
122 	width = ti->len;
123 	if (sector_div(width, stripes)) {
124 		ti->error = "Target length not divisible by "
125 		    "number of stripes";
126 		return -EINVAL;
127 	}
128 
129 	tmp_len = width;
130 	if (sector_div(tmp_len, chunk_size)) {
131 		ti->error = "Target length not divisible by "
132 		    "chunk size";
133 		return -EINVAL;
134 	}
135 
136 	/*
137 	 * Do we have enough arguments for that many stripes ?
138 	 */
139 	if (argc != (2 + 2 * stripes)) {
140 		ti->error = "Not enough destinations "
141 			"specified";
142 		return -EINVAL;
143 	}
144 
145 	sc = alloc_context(stripes);
146 	if (!sc) {
147 		ti->error = "Memory allocation for striped context "
148 		    "failed";
149 		return -ENOMEM;
150 	}
151 
152 	INIT_WORK(&sc->trigger_event, trigger_event);
153 
154 	/* Set pointer to dm target; used in trigger_event */
155 	sc->ti = ti;
156 	sc->stripes = stripes;
157 	sc->stripe_width = width;
158 
159 	if (stripes & (stripes - 1))
160 		sc->stripes_shift = -1;
161 	else
162 		sc->stripes_shift = __ffs(stripes);
163 
164 	r = dm_set_target_max_io_len(ti, chunk_size);
165 	if (r) {
166 		kfree(sc);
167 		return r;
168 	}
169 
170 	ti->num_flush_bios = stripes;
171 	ti->num_discard_bios = stripes;
172 	ti->num_write_same_bios = stripes;
173 	ti->num_write_zeroes_bios = stripes;
174 
175 	sc->chunk_size = chunk_size;
176 	if (chunk_size & (chunk_size - 1))
177 		sc->chunk_size_shift = -1;
178 	else
179 		sc->chunk_size_shift = __ffs(chunk_size);
180 
181 	/*
182 	 * Get the stripe destinations.
183 	 */
184 	for (i = 0; i < stripes; i++) {
185 		argv += 2;
186 
187 		r = get_stripe(ti, sc, i, argv);
188 		if (r < 0) {
189 			ti->error = "Couldn't parse stripe destination";
190 			while (i--)
191 				dm_put_device(ti, sc->stripe[i].dev);
192 			kfree(sc);
193 			return r;
194 		}
195 		atomic_set(&(sc->stripe[i].error_count), 0);
196 	}
197 
198 	ti->private = sc;
199 
200 	return 0;
201 }
202 
203 static void stripe_dtr(struct dm_target *ti)
204 {
205 	unsigned int i;
206 	struct stripe_c *sc = (struct stripe_c *) ti->private;
207 
208 	for (i = 0; i < sc->stripes; i++)
209 		dm_put_device(ti, sc->stripe[i].dev);
210 
211 	flush_work(&sc->trigger_event);
212 	kfree(sc);
213 }
214 
215 static void stripe_map_sector(struct stripe_c *sc, sector_t sector,
216 			      uint32_t *stripe, sector_t *result)
217 {
218 	sector_t chunk = dm_target_offset(sc->ti, sector);
219 	sector_t chunk_offset;
220 
221 	if (sc->chunk_size_shift < 0)
222 		chunk_offset = sector_div(chunk, sc->chunk_size);
223 	else {
224 		chunk_offset = chunk & (sc->chunk_size - 1);
225 		chunk >>= sc->chunk_size_shift;
226 	}
227 
228 	if (sc->stripes_shift < 0)
229 		*stripe = sector_div(chunk, sc->stripes);
230 	else {
231 		*stripe = chunk & (sc->stripes - 1);
232 		chunk >>= sc->stripes_shift;
233 	}
234 
235 	if (sc->chunk_size_shift < 0)
236 		chunk *= sc->chunk_size;
237 	else
238 		chunk <<= sc->chunk_size_shift;
239 
240 	*result = chunk + chunk_offset;
241 }
242 
243 static void stripe_map_range_sector(struct stripe_c *sc, sector_t sector,
244 				    uint32_t target_stripe, sector_t *result)
245 {
246 	uint32_t stripe;
247 
248 	stripe_map_sector(sc, sector, &stripe, result);
249 	if (stripe == target_stripe)
250 		return;
251 
252 	/* round down */
253 	sector = *result;
254 	if (sc->chunk_size_shift < 0)
255 		*result -= sector_div(sector, sc->chunk_size);
256 	else
257 		*result = sector & ~(sector_t)(sc->chunk_size - 1);
258 
259 	if (target_stripe < stripe)
260 		*result += sc->chunk_size;		/* next chunk */
261 }
262 
263 static int stripe_map_range(struct stripe_c *sc, struct bio *bio,
264 			    uint32_t target_stripe)
265 {
266 	sector_t begin, end;
267 
268 	stripe_map_range_sector(sc, bio->bi_iter.bi_sector,
269 				target_stripe, &begin);
270 	stripe_map_range_sector(sc, bio_end_sector(bio),
271 				target_stripe, &end);
272 	if (begin < end) {
273 		bio->bi_bdev = sc->stripe[target_stripe].dev->bdev;
274 		bio->bi_iter.bi_sector = begin +
275 			sc->stripe[target_stripe].physical_start;
276 		bio->bi_iter.bi_size = to_bytes(end - begin);
277 		return DM_MAPIO_REMAPPED;
278 	} else {
279 		/* The range doesn't map to the target stripe */
280 		bio_endio(bio);
281 		return DM_MAPIO_SUBMITTED;
282 	}
283 }
284 
285 static int stripe_map(struct dm_target *ti, struct bio *bio)
286 {
287 	struct stripe_c *sc = ti->private;
288 	uint32_t stripe;
289 	unsigned target_bio_nr;
290 
291 	if (bio->bi_opf & REQ_PREFLUSH) {
292 		target_bio_nr = dm_bio_get_target_bio_nr(bio);
293 		BUG_ON(target_bio_nr >= sc->stripes);
294 		bio->bi_bdev = sc->stripe[target_bio_nr].dev->bdev;
295 		return DM_MAPIO_REMAPPED;
296 	}
297 	if (unlikely(bio_op(bio) == REQ_OP_DISCARD) ||
298 	    unlikely(bio_op(bio) == REQ_OP_WRITE_ZEROES) ||
299 	    unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) {
300 		target_bio_nr = dm_bio_get_target_bio_nr(bio);
301 		BUG_ON(target_bio_nr >= sc->stripes);
302 		return stripe_map_range(sc, bio, target_bio_nr);
303 	}
304 
305 	stripe_map_sector(sc, bio->bi_iter.bi_sector,
306 			  &stripe, &bio->bi_iter.bi_sector);
307 
308 	bio->bi_iter.bi_sector += sc->stripe[stripe].physical_start;
309 	bio->bi_bdev = sc->stripe[stripe].dev->bdev;
310 
311 	return DM_MAPIO_REMAPPED;
312 }
313 
314 static long stripe_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
315 		long nr_pages, void **kaddr, pfn_t *pfn)
316 {
317 	sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
318 	struct stripe_c *sc = ti->private;
319 	struct dax_device *dax_dev;
320 	struct block_device *bdev;
321 	uint32_t stripe;
322 	long ret;
323 
324 	stripe_map_sector(sc, sector, &stripe, &dev_sector);
325 	dev_sector += sc->stripe[stripe].physical_start;
326 	dax_dev = sc->stripe[stripe].dev->dax_dev;
327 	bdev = sc->stripe[stripe].dev->bdev;
328 
329 	ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages * PAGE_SIZE, &pgoff);
330 	if (ret)
331 		return ret;
332 	return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
333 }
334 
335 static size_t stripe_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff,
336 		void *addr, size_t bytes, struct iov_iter *i)
337 {
338 	sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
339 	struct stripe_c *sc = ti->private;
340 	struct dax_device *dax_dev;
341 	struct block_device *bdev;
342 	uint32_t stripe;
343 
344 	stripe_map_sector(sc, sector, &stripe, &dev_sector);
345 	dev_sector += sc->stripe[stripe].physical_start;
346 	dax_dev = sc->stripe[stripe].dev->dax_dev;
347 	bdev = sc->stripe[stripe].dev->bdev;
348 
349 	if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
350 		return 0;
351 	return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i);
352 }
353 
354 static void stripe_dax_flush(struct dm_target *ti, pgoff_t pgoff, void *addr,
355 		size_t size)
356 {
357 	sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
358 	struct stripe_c *sc = ti->private;
359 	struct dax_device *dax_dev;
360 	struct block_device *bdev;
361 	uint32_t stripe;
362 
363 	stripe_map_sector(sc, sector, &stripe, &dev_sector);
364 	dev_sector += sc->stripe[stripe].physical_start;
365 	dax_dev = sc->stripe[stripe].dev->dax_dev;
366 	bdev = sc->stripe[stripe].dev->bdev;
367 
368 	if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(size, PAGE_SIZE), &pgoff))
369 		return;
370 	dax_flush(dax_dev, pgoff, addr, size);
371 }
372 
373 /*
374  * Stripe status:
375  *
376  * INFO
377  * #stripes [stripe_name <stripe_name>] [group word count]
378  * [error count 'A|D' <error count 'A|D'>]
379  *
380  * TABLE
381  * #stripes [stripe chunk size]
382  * [stripe_name physical_start <stripe_name physical_start>]
383  *
384  */
385 
386 static void stripe_status(struct dm_target *ti, status_type_t type,
387 			  unsigned status_flags, char *result, unsigned maxlen)
388 {
389 	struct stripe_c *sc = (struct stripe_c *) ti->private;
390 	char buffer[sc->stripes + 1];
391 	unsigned int sz = 0;
392 	unsigned int i;
393 
394 	switch (type) {
395 	case STATUSTYPE_INFO:
396 		DMEMIT("%d ", sc->stripes);
397 		for (i = 0; i < sc->stripes; i++)  {
398 			DMEMIT("%s ", sc->stripe[i].dev->name);
399 			buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
400 				'D' : 'A';
401 		}
402 		buffer[i] = '\0';
403 		DMEMIT("1 %s", buffer);
404 		break;
405 
406 	case STATUSTYPE_TABLE:
407 		DMEMIT("%d %llu", sc->stripes,
408 			(unsigned long long)sc->chunk_size);
409 		for (i = 0; i < sc->stripes; i++)
410 			DMEMIT(" %s %llu", sc->stripe[i].dev->name,
411 			    (unsigned long long)sc->stripe[i].physical_start);
412 		break;
413 	}
414 }
415 
416 static int stripe_end_io(struct dm_target *ti, struct bio *bio,
417 		blk_status_t *error)
418 {
419 	unsigned i;
420 	char major_minor[16];
421 	struct stripe_c *sc = ti->private;
422 
423 	if (!*error)
424 		return DM_ENDIO_DONE; /* I/O complete */
425 
426 	if (bio->bi_opf & REQ_RAHEAD)
427 		return DM_ENDIO_DONE;
428 
429 	if (*error == BLK_STS_NOTSUPP)
430 		return DM_ENDIO_DONE;
431 
432 	memset(major_minor, 0, sizeof(major_minor));
433 	sprintf(major_minor, "%d:%d",
434 		MAJOR(disk_devt(bio->bi_bdev->bd_disk)),
435 		MINOR(disk_devt(bio->bi_bdev->bd_disk)));
436 
437 	/*
438 	 * Test to see which stripe drive triggered the event
439 	 * and increment error count for all stripes on that device.
440 	 * If the error count for a given device exceeds the threshold
441 	 * value we will no longer trigger any further events.
442 	 */
443 	for (i = 0; i < sc->stripes; i++)
444 		if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
445 			atomic_inc(&(sc->stripe[i].error_count));
446 			if (atomic_read(&(sc->stripe[i].error_count)) <
447 			    DM_IO_ERROR_THRESHOLD)
448 				schedule_work(&sc->trigger_event);
449 		}
450 
451 	return DM_ENDIO_DONE;
452 }
453 
454 static int stripe_iterate_devices(struct dm_target *ti,
455 				  iterate_devices_callout_fn fn, void *data)
456 {
457 	struct stripe_c *sc = ti->private;
458 	int ret = 0;
459 	unsigned i = 0;
460 
461 	do {
462 		ret = fn(ti, sc->stripe[i].dev,
463 			 sc->stripe[i].physical_start,
464 			 sc->stripe_width, data);
465 	} while (!ret && ++i < sc->stripes);
466 
467 	return ret;
468 }
469 
470 static void stripe_io_hints(struct dm_target *ti,
471 			    struct queue_limits *limits)
472 {
473 	struct stripe_c *sc = ti->private;
474 	unsigned chunk_size = sc->chunk_size << SECTOR_SHIFT;
475 
476 	blk_limits_io_min(limits, chunk_size);
477 	blk_limits_io_opt(limits, chunk_size * sc->stripes);
478 }
479 
480 static struct target_type stripe_target = {
481 	.name   = "striped",
482 	.version = {1, 6, 0},
483 	.features = DM_TARGET_PASSES_INTEGRITY,
484 	.module = THIS_MODULE,
485 	.ctr    = stripe_ctr,
486 	.dtr    = stripe_dtr,
487 	.map    = stripe_map,
488 	.end_io = stripe_end_io,
489 	.status = stripe_status,
490 	.iterate_devices = stripe_iterate_devices,
491 	.io_hints = stripe_io_hints,
492 	.direct_access = stripe_dax_direct_access,
493 	.dax_copy_from_iter = stripe_dax_copy_from_iter,
494 	.dax_flush = stripe_dax_flush,
495 };
496 
497 int __init dm_stripe_init(void)
498 {
499 	int r;
500 
501 	r = dm_register_target(&stripe_target);
502 	if (r < 0)
503 		DMWARN("target registration failed");
504 
505 	return r;
506 }
507 
508 void dm_stripe_exit(void)
509 {
510 	dm_unregister_target(&stripe_target);
511 }
512