xref: /openbmc/linux/drivers/md/dm-flakey.c (revision b755c25f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2003 Sistina Software (UK) Limited.
4  * Copyright (C) 2004, 2010-2011 Red Hat, Inc. All rights reserved.
5  *
6  * This file is released under the GPL.
7  */
8 
9 #include <linux/device-mapper.h>
10 
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/blkdev.h>
14 #include <linux/bio.h>
15 #include <linux/slab.h>
16 
17 #define DM_MSG_PREFIX "flakey"
18 
19 #define PROBABILITY_BASE	1000000000
20 
21 #define all_corrupt_bio_flags_match(bio, fc)	\
22 	(((bio)->bi_opf & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags)
23 
24 /*
25  * Flakey: Used for testing only, simulates intermittent,
26  * catastrophic device failure.
27  */
28 struct flakey_c {
29 	struct dm_dev *dev;
30 	unsigned long start_time;
31 	sector_t start;
32 	unsigned int up_interval;
33 	unsigned int down_interval;
34 	unsigned long flags;
35 	unsigned int corrupt_bio_byte;
36 	unsigned int corrupt_bio_rw;
37 	unsigned int corrupt_bio_value;
38 	blk_opf_t corrupt_bio_flags;
39 	unsigned int random_read_corrupt;
40 	unsigned int random_write_corrupt;
41 };
42 
43 enum feature_flag_bits {
44 	ERROR_READS,
45 	DROP_WRITES,
46 	ERROR_WRITES
47 };
48 
49 struct per_bio_data {
50 	bool bio_submitted;
51 };
52 
53 static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
54 			  struct dm_target *ti)
55 {
56 	int r;
57 	unsigned int argc;
58 	const char *arg_name;
59 
60 	static const struct dm_arg _args[] = {
61 		{0, 11, "Invalid number of feature args"},
62 		{1, UINT_MAX, "Invalid corrupt bio byte"},
63 		{0, 255, "Invalid corrupt value to write into bio byte (0-255)"},
64 		{0, UINT_MAX, "Invalid corrupt bio flags mask"},
65 		{0, PROBABILITY_BASE, "Invalid random corrupt argument"},
66 	};
67 
68 	/* No feature arguments supplied. */
69 	if (!as->argc)
70 		return 0;
71 
72 	r = dm_read_arg_group(_args, as, &argc, &ti->error);
73 	if (r)
74 		return r;
75 
76 	while (argc) {
77 		arg_name = dm_shift_arg(as);
78 		argc--;
79 
80 		if (!arg_name) {
81 			ti->error = "Insufficient feature arguments";
82 			return -EINVAL;
83 		}
84 
85 		/*
86 		 * error_reads
87 		 */
88 		if (!strcasecmp(arg_name, "error_reads")) {
89 			if (test_and_set_bit(ERROR_READS, &fc->flags)) {
90 				ti->error = "Feature error_reads duplicated";
91 				return -EINVAL;
92 			}
93 			continue;
94 		}
95 
96 		/*
97 		 * drop_writes
98 		 */
99 		if (!strcasecmp(arg_name, "drop_writes")) {
100 			if (test_and_set_bit(DROP_WRITES, &fc->flags)) {
101 				ti->error = "Feature drop_writes duplicated";
102 				return -EINVAL;
103 			} else if (test_bit(ERROR_WRITES, &fc->flags)) {
104 				ti->error = "Feature drop_writes conflicts with feature error_writes";
105 				return -EINVAL;
106 			}
107 
108 			continue;
109 		}
110 
111 		/*
112 		 * error_writes
113 		 */
114 		if (!strcasecmp(arg_name, "error_writes")) {
115 			if (test_and_set_bit(ERROR_WRITES, &fc->flags)) {
116 				ti->error = "Feature error_writes duplicated";
117 				return -EINVAL;
118 
119 			} else if (test_bit(DROP_WRITES, &fc->flags)) {
120 				ti->error = "Feature error_writes conflicts with feature drop_writes";
121 				return -EINVAL;
122 			}
123 
124 			continue;
125 		}
126 
127 		/*
128 		 * corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>
129 		 */
130 		if (!strcasecmp(arg_name, "corrupt_bio_byte")) {
131 			if (!argc) {
132 				ti->error = "Feature corrupt_bio_byte requires parameters";
133 				return -EINVAL;
134 			}
135 
136 			r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error);
137 			if (r)
138 				return r;
139 			argc--;
140 
141 			/*
142 			 * Direction r or w?
143 			 */
144 			arg_name = dm_shift_arg(as);
145 			if (arg_name && !strcasecmp(arg_name, "w"))
146 				fc->corrupt_bio_rw = WRITE;
147 			else if (arg_name && !strcasecmp(arg_name, "r"))
148 				fc->corrupt_bio_rw = READ;
149 			else {
150 				ti->error = "Invalid corrupt bio direction (r or w)";
151 				return -EINVAL;
152 			}
153 			argc--;
154 
155 			/*
156 			 * Value of byte (0-255) to write in place of correct one.
157 			 */
158 			r = dm_read_arg(_args + 2, as, &fc->corrupt_bio_value, &ti->error);
159 			if (r)
160 				return r;
161 			argc--;
162 
163 			/*
164 			 * Only corrupt bios with these flags set.
165 			 */
166 			BUILD_BUG_ON(sizeof(fc->corrupt_bio_flags) !=
167 				     sizeof(unsigned int));
168 			r = dm_read_arg(_args + 3, as,
169 				(__force unsigned int *)&fc->corrupt_bio_flags,
170 				&ti->error);
171 			if (r)
172 				return r;
173 			argc--;
174 
175 			continue;
176 		}
177 
178 		if (!strcasecmp(arg_name, "random_read_corrupt")) {
179 			if (!argc) {
180 				ti->error = "Feature random_read_corrupt requires a parameter";
181 				return -EINVAL;
182 			}
183 			r = dm_read_arg(_args + 4, as, &fc->random_read_corrupt, &ti->error);
184 			if (r)
185 				return r;
186 			argc--;
187 
188 			continue;
189 		}
190 
191 		if (!strcasecmp(arg_name, "random_write_corrupt")) {
192 			if (!argc) {
193 				ti->error = "Feature random_write_corrupt requires a parameter";
194 				return -EINVAL;
195 			}
196 			r = dm_read_arg(_args + 4, as, &fc->random_write_corrupt, &ti->error);
197 			if (r)
198 				return r;
199 			argc--;
200 
201 			continue;
202 		}
203 
204 		ti->error = "Unrecognised flakey feature requested";
205 		return -EINVAL;
206 	}
207 
208 	if (test_bit(DROP_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) {
209 		ti->error = "drop_writes is incompatible with corrupt_bio_byte with the WRITE flag set";
210 		return -EINVAL;
211 
212 	} else if (test_bit(ERROR_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) {
213 		ti->error = "error_writes is incompatible with corrupt_bio_byte with the WRITE flag set";
214 		return -EINVAL;
215 	}
216 
217 	if (!fc->corrupt_bio_byte && !test_bit(ERROR_READS, &fc->flags) &&
218 	    !test_bit(DROP_WRITES, &fc->flags) && !test_bit(ERROR_WRITES, &fc->flags) &&
219 	    !fc->random_read_corrupt && !fc->random_write_corrupt) {
220 		set_bit(ERROR_WRITES, &fc->flags);
221 		set_bit(ERROR_READS, &fc->flags);
222 	}
223 
224 	return 0;
225 }
226 
227 /*
228  * Construct a flakey mapping:
229  * <dev_path> <offset> <up interval> <down interval> [<#feature args> [<arg>]*]
230  *
231  *   Feature args:
232  *     [drop_writes]
233  *     [corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>]
234  *
235  *   Nth_byte starts from 1 for the first byte.
236  *   Direction is r for READ or w for WRITE.
237  *   bio_flags is ignored if 0.
238  */
239 static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
240 {
241 	static const struct dm_arg _args[] = {
242 		{0, UINT_MAX, "Invalid up interval"},
243 		{0, UINT_MAX, "Invalid down interval"},
244 	};
245 
246 	int r;
247 	struct flakey_c *fc;
248 	unsigned long long tmpll;
249 	struct dm_arg_set as;
250 	const char *devname;
251 	char dummy;
252 
253 	as.argc = argc;
254 	as.argv = argv;
255 
256 	if (argc < 4) {
257 		ti->error = "Invalid argument count";
258 		return -EINVAL;
259 	}
260 
261 	fc = kzalloc(sizeof(*fc), GFP_KERNEL);
262 	if (!fc) {
263 		ti->error = "Cannot allocate context";
264 		return -ENOMEM;
265 	}
266 	fc->start_time = jiffies;
267 
268 	devname = dm_shift_arg(&as);
269 
270 	r = -EINVAL;
271 	if (sscanf(dm_shift_arg(&as), "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) {
272 		ti->error = "Invalid device sector";
273 		goto bad;
274 	}
275 	fc->start = tmpll;
276 
277 	r = dm_read_arg(_args, &as, &fc->up_interval, &ti->error);
278 	if (r)
279 		goto bad;
280 
281 	r = dm_read_arg(_args, &as, &fc->down_interval, &ti->error);
282 	if (r)
283 		goto bad;
284 
285 	if (!(fc->up_interval + fc->down_interval)) {
286 		ti->error = "Total (up + down) interval is zero";
287 		r = -EINVAL;
288 		goto bad;
289 	}
290 
291 	if (fc->up_interval + fc->down_interval < fc->up_interval) {
292 		ti->error = "Interval overflow";
293 		r = -EINVAL;
294 		goto bad;
295 	}
296 
297 	r = parse_features(&as, fc, ti);
298 	if (r)
299 		goto bad;
300 
301 	r = dm_get_device(ti, devname, dm_table_get_mode(ti->table), &fc->dev);
302 	if (r) {
303 		ti->error = "Device lookup failed";
304 		goto bad;
305 	}
306 
307 	ti->num_flush_bios = 1;
308 	ti->num_discard_bios = 1;
309 	ti->per_io_data_size = sizeof(struct per_bio_data);
310 	ti->private = fc;
311 	return 0;
312 
313 bad:
314 	kfree(fc);
315 	return r;
316 }
317 
318 static void flakey_dtr(struct dm_target *ti)
319 {
320 	struct flakey_c *fc = ti->private;
321 
322 	dm_put_device(ti, fc->dev);
323 	kfree(fc);
324 }
325 
326 static sector_t flakey_map_sector(struct dm_target *ti, sector_t bi_sector)
327 {
328 	struct flakey_c *fc = ti->private;
329 
330 	return fc->start + dm_target_offset(ti, bi_sector);
331 }
332 
333 static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
334 {
335 	struct flakey_c *fc = ti->private;
336 
337 	bio_set_dev(bio, fc->dev->bdev);
338 	bio->bi_iter.bi_sector = flakey_map_sector(ti, bio->bi_iter.bi_sector);
339 }
340 
341 static void corrupt_bio_common(struct bio *bio, unsigned int corrupt_bio_byte,
342 			       unsigned char corrupt_bio_value)
343 {
344 	struct bvec_iter iter;
345 	struct bio_vec bvec;
346 
347 	/*
348 	 * Overwrite the Nth byte of the bio's data, on whichever page
349 	 * it falls.
350 	 */
351 	bio_for_each_segment(bvec, bio, iter) {
352 		if (bio_iter_len(bio, iter) > corrupt_bio_byte) {
353 			unsigned char *segment = bvec_kmap_local(&bvec);
354 			segment[corrupt_bio_byte] = corrupt_bio_value;
355 			kunmap_local(segment);
356 			DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
357 				"(rw=%c bi_opf=%u bi_sector=%llu size=%u)\n",
358 				bio, corrupt_bio_value, corrupt_bio_byte,
359 				(bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_opf,
360 				(unsigned long long)bio->bi_iter.bi_sector,
361 				bio->bi_iter.bi_size);
362 			break;
363 		}
364 		corrupt_bio_byte -= bio_iter_len(bio, iter);
365 	}
366 }
367 
368 static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
369 {
370 	unsigned int corrupt_bio_byte = fc->corrupt_bio_byte - 1;
371 
372 	if (!bio_has_data(bio))
373 		return;
374 
375 	corrupt_bio_common(bio, corrupt_bio_byte, fc->corrupt_bio_value);
376 }
377 
378 static void corrupt_bio_random(struct bio *bio)
379 {
380 	unsigned int corrupt_byte;
381 	unsigned char corrupt_value;
382 
383 	if (!bio_has_data(bio))
384 		return;
385 
386 	corrupt_byte = get_random_u32() % bio->bi_iter.bi_size;
387 	corrupt_value = get_random_u8();
388 
389 	corrupt_bio_common(bio, corrupt_byte, corrupt_value);
390 }
391 
392 static void clone_free(struct bio *clone)
393 {
394 	struct folio_iter fi;
395 
396 	if (clone->bi_vcnt > 0) { /* bio_for_each_folio_all crashes with an empty bio */
397 		bio_for_each_folio_all(fi, clone)
398 			folio_put(fi.folio);
399 	}
400 
401 	bio_uninit(clone);
402 	kfree(clone);
403 }
404 
405 static void clone_endio(struct bio *clone)
406 {
407 	struct bio *bio = clone->bi_private;
408 	bio->bi_status = clone->bi_status;
409 	clone_free(clone);
410 	bio_endio(bio);
411 }
412 
413 static struct bio *clone_bio(struct dm_target *ti, struct flakey_c *fc, struct bio *bio)
414 {
415 	struct bio *clone;
416 	unsigned size, remaining_size, nr_iovecs, order;
417 	struct bvec_iter iter = bio->bi_iter;
418 
419 	if (unlikely(bio->bi_iter.bi_size > UIO_MAXIOV << PAGE_SHIFT))
420 		dm_accept_partial_bio(bio, UIO_MAXIOV << PAGE_SHIFT >> SECTOR_SHIFT);
421 
422 	size = bio->bi_iter.bi_size;
423 	nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
424 
425 	clone = bio_kmalloc(nr_iovecs, GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
426 	if (!clone)
427 		return NULL;
428 
429 	bio_init(clone, fc->dev->bdev, bio->bi_inline_vecs, nr_iovecs, bio->bi_opf);
430 
431 	clone->bi_iter.bi_sector = flakey_map_sector(ti, bio->bi_iter.bi_sector);
432 	clone->bi_private = bio;
433 	clone->bi_end_io = clone_endio;
434 
435 	remaining_size = size;
436 
437 	order = MAX_ORDER - 1;
438 	while (remaining_size) {
439 		struct page *pages;
440 		unsigned size_to_add, to_copy;
441 		unsigned char *virt;
442 		unsigned remaining_order = __fls((remaining_size + PAGE_SIZE - 1) >> PAGE_SHIFT);
443 		order = min(order, remaining_order);
444 
445 retry_alloc_pages:
446 		pages = alloc_pages(GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN | __GFP_COMP, order);
447 		if (unlikely(!pages)) {
448 			if (order) {
449 				order--;
450 				goto retry_alloc_pages;
451 			}
452 			clone_free(clone);
453 			return NULL;
454 		}
455 		size_to_add = min((unsigned)PAGE_SIZE << order, remaining_size);
456 
457 		virt = page_to_virt(pages);
458 		to_copy = size_to_add;
459 		do {
460 			struct bio_vec bvec = bvec_iter_bvec(bio->bi_io_vec, iter);
461 			unsigned this_step = min(bvec.bv_len, to_copy);
462 			void *map = bvec_kmap_local(&bvec);
463 			memcpy(virt, map, this_step);
464 			kunmap_local(map);
465 
466 			bvec_iter_advance(bio->bi_io_vec, &iter, this_step);
467 			to_copy -= this_step;
468 			virt += this_step;
469 		} while (to_copy);
470 
471 		__bio_add_page(clone, pages, size_to_add, 0);
472 		remaining_size -= size_to_add;
473 	}
474 
475 	return clone;
476 }
477 
478 static int flakey_map(struct dm_target *ti, struct bio *bio)
479 {
480 	struct flakey_c *fc = ti->private;
481 	unsigned int elapsed;
482 	struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
483 
484 	pb->bio_submitted = false;
485 
486 	if (op_is_zone_mgmt(bio_op(bio)))
487 		goto map_bio;
488 
489 	/* Are we alive ? */
490 	elapsed = (jiffies - fc->start_time) / HZ;
491 	if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval) {
492 		bool corrupt_fixed, corrupt_random;
493 		/*
494 		 * Flag this bio as submitted while down.
495 		 */
496 		pb->bio_submitted = true;
497 
498 		/*
499 		 * Error reads if neither corrupt_bio_byte or drop_writes or error_writes are set.
500 		 * Otherwise, flakey_end_io() will decide if the reads should be modified.
501 		 */
502 		if (bio_data_dir(bio) == READ) {
503 			if (test_bit(ERROR_READS, &fc->flags))
504 				return DM_MAPIO_KILL;
505 			goto map_bio;
506 		}
507 
508 		/*
509 		 * Drop or error writes?
510 		 */
511 		if (test_bit(DROP_WRITES, &fc->flags)) {
512 			bio_endio(bio);
513 			return DM_MAPIO_SUBMITTED;
514 		} else if (test_bit(ERROR_WRITES, &fc->flags)) {
515 			bio_io_error(bio);
516 			return DM_MAPIO_SUBMITTED;
517 		}
518 
519 		/*
520 		 * Corrupt matching writes.
521 		 */
522 		corrupt_fixed = false;
523 		corrupt_random = false;
524 		if (fc->corrupt_bio_byte && fc->corrupt_bio_rw == WRITE) {
525 			if (all_corrupt_bio_flags_match(bio, fc))
526 				corrupt_fixed = true;
527 		}
528 		if (fc->random_write_corrupt) {
529 			u64 rnd = get_random_u64();
530 			u32 rem = do_div(rnd, PROBABILITY_BASE);
531 			if (rem < fc->random_write_corrupt)
532 				corrupt_random = true;
533 		}
534 		if (corrupt_fixed || corrupt_random) {
535 			struct bio *clone = clone_bio(ti, fc, bio);
536 			if (clone) {
537 				if (corrupt_fixed)
538 					corrupt_bio_data(clone, fc);
539 				if (corrupt_random)
540 					corrupt_bio_random(clone);
541 				submit_bio(clone);
542 				return DM_MAPIO_SUBMITTED;
543 			}
544 		}
545 	}
546 
547 map_bio:
548 	flakey_map_bio(ti, bio);
549 
550 	return DM_MAPIO_REMAPPED;
551 }
552 
553 static int flakey_end_io(struct dm_target *ti, struct bio *bio,
554 			 blk_status_t *error)
555 {
556 	struct flakey_c *fc = ti->private;
557 	struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
558 
559 	if (op_is_zone_mgmt(bio_op(bio)))
560 		return DM_ENDIO_DONE;
561 
562 	if (!*error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
563 		if (fc->corrupt_bio_byte) {
564 			if ((fc->corrupt_bio_rw == READ) &&
565 			    all_corrupt_bio_flags_match(bio, fc)) {
566 				/*
567 				 * Corrupt successful matching READs while in down state.
568 				 */
569 				corrupt_bio_data(bio, fc);
570 			}
571 		}
572 		if (fc->random_read_corrupt) {
573 			u64 rnd = get_random_u64();
574 			u32 rem = do_div(rnd, PROBABILITY_BASE);
575 			if (rem < fc->random_read_corrupt)
576 				corrupt_bio_random(bio);
577 		}
578 		if (test_bit(ERROR_READS, &fc->flags)) {
579 			/*
580 			 * Error read during the down_interval if drop_writes
581 			 * and error_writes were not configured.
582 			 */
583 			*error = BLK_STS_IOERR;
584 		}
585 	}
586 
587 	return DM_ENDIO_DONE;
588 }
589 
590 static void flakey_status(struct dm_target *ti, status_type_t type,
591 			  unsigned int status_flags, char *result, unsigned int maxlen)
592 {
593 	unsigned int sz = 0;
594 	struct flakey_c *fc = ti->private;
595 	unsigned int error_reads, drop_writes, error_writes;
596 
597 	switch (type) {
598 	case STATUSTYPE_INFO:
599 		result[0] = '\0';
600 		break;
601 
602 	case STATUSTYPE_TABLE:
603 		DMEMIT("%s %llu %u %u", fc->dev->name,
604 		       (unsigned long long)fc->start, fc->up_interval,
605 		       fc->down_interval);
606 
607 		error_reads = test_bit(ERROR_READS, &fc->flags);
608 		drop_writes = test_bit(DROP_WRITES, &fc->flags);
609 		error_writes = test_bit(ERROR_WRITES, &fc->flags);
610 		DMEMIT(" %u", error_reads + drop_writes + error_writes +
611 			(fc->corrupt_bio_byte > 0) * 5 +
612 			(fc->random_read_corrupt > 0) * 2 +
613 			(fc->random_write_corrupt > 0) * 2);
614 
615 		if (error_reads)
616 			DMEMIT(" error_reads");
617 		if (drop_writes)
618 			DMEMIT(" drop_writes");
619 		else if (error_writes)
620 			DMEMIT(" error_writes");
621 
622 		if (fc->corrupt_bio_byte)
623 			DMEMIT(" corrupt_bio_byte %u %c %u %u",
624 			       fc->corrupt_bio_byte,
625 			       (fc->corrupt_bio_rw == WRITE) ? 'w' : 'r',
626 			       fc->corrupt_bio_value, fc->corrupt_bio_flags);
627 
628 		if (fc->random_read_corrupt > 0)
629 			DMEMIT(" random_read_corrupt %u", fc->random_read_corrupt);
630 		if (fc->random_write_corrupt > 0)
631 			DMEMIT(" random_write_corrupt %u", fc->random_write_corrupt);
632 
633 		break;
634 
635 	case STATUSTYPE_IMA:
636 		result[0] = '\0';
637 		break;
638 	}
639 }
640 
641 static int flakey_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
642 {
643 	struct flakey_c *fc = ti->private;
644 
645 	*bdev = fc->dev->bdev;
646 
647 	/*
648 	 * Only pass ioctls through if the device sizes match exactly.
649 	 */
650 	if (fc->start || ti->len != bdev_nr_sectors((*bdev)))
651 		return 1;
652 	return 0;
653 }
654 
655 #ifdef CONFIG_BLK_DEV_ZONED
656 static int flakey_report_zones(struct dm_target *ti,
657 		struct dm_report_zones_args *args, unsigned int nr_zones)
658 {
659 	struct flakey_c *fc = ti->private;
660 
661 	return dm_report_zones(fc->dev->bdev, fc->start,
662 			       flakey_map_sector(ti, args->next_sector),
663 			       args, nr_zones);
664 }
665 #else
666 #define flakey_report_zones NULL
667 #endif
668 
669 static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
670 {
671 	struct flakey_c *fc = ti->private;
672 
673 	return fn(ti, fc->dev, fc->start, ti->len, data);
674 }
675 
676 static struct target_type flakey_target = {
677 	.name   = "flakey",
678 	.version = {1, 5, 0},
679 	.features = DM_TARGET_ZONED_HM | DM_TARGET_PASSES_CRYPTO,
680 	.report_zones = flakey_report_zones,
681 	.module = THIS_MODULE,
682 	.ctr    = flakey_ctr,
683 	.dtr    = flakey_dtr,
684 	.map    = flakey_map,
685 	.end_io = flakey_end_io,
686 	.status = flakey_status,
687 	.prepare_ioctl = flakey_prepare_ioctl,
688 	.iterate_devices = flakey_iterate_devices,
689 };
690 module_dm(flakey);
691 
692 MODULE_DESCRIPTION(DM_NAME " flakey target");
693 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
694 MODULE_LICENSE("GPL");
695