xref: /openbmc/linux/drivers/md/dm-raid1.c (revision ae213c44)
1 /*
2  * Copyright (C) 2003 Sistina Software Limited.
3  * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
4  *
5  * This file is released under the GPL.
6  */
7 
8 #include "dm-bio-record.h"
9 
10 #include <linux/init.h>
11 #include <linux/mempool.h>
12 #include <linux/module.h>
13 #include <linux/pagemap.h>
14 #include <linux/slab.h>
15 #include <linux/workqueue.h>
16 #include <linux/device-mapper.h>
17 #include <linux/dm-io.h>
18 #include <linux/dm-dirty-log.h>
19 #include <linux/dm-kcopyd.h>
20 #include <linux/dm-region-hash.h>
21 
22 #define DM_MSG_PREFIX "raid1"
23 
24 #define MAX_RECOVERY 1	/* Maximum number of regions recovered in parallel. */
25 
26 #define MAX_NR_MIRRORS	(DM_KCOPYD_MAX_REGIONS + 1)
27 
28 #define DM_RAID1_HANDLE_ERRORS	0x01
29 #define DM_RAID1_KEEP_LOG	0x02
30 #define errors_handled(p)	((p)->features & DM_RAID1_HANDLE_ERRORS)
31 #define keep_log(p)		((p)->features & DM_RAID1_KEEP_LOG)
32 
33 static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
34 
35 /*-----------------------------------------------------------------
36  * Mirror set structures.
37  *---------------------------------------------------------------*/
38 enum dm_raid1_error {
39 	DM_RAID1_WRITE_ERROR,
40 	DM_RAID1_FLUSH_ERROR,
41 	DM_RAID1_SYNC_ERROR,
42 	DM_RAID1_READ_ERROR
43 };
44 
45 struct mirror {
46 	struct mirror_set *ms;
47 	atomic_t error_count;
48 	unsigned long error_type;
49 	struct dm_dev *dev;
50 	sector_t offset;
51 };
52 
53 struct mirror_set {
54 	struct dm_target *ti;
55 	struct list_head list;
56 
57 	uint64_t features;
58 
59 	spinlock_t lock;	/* protects the lists */
60 	struct bio_list reads;
61 	struct bio_list writes;
62 	struct bio_list failures;
63 	struct bio_list holds;	/* bios are waiting until suspend */
64 
65 	struct dm_region_hash *rh;
66 	struct dm_kcopyd_client *kcopyd_client;
67 	struct dm_io_client *io_client;
68 
69 	/* recovery */
70 	region_t nr_regions;
71 	int in_sync;
72 	int log_failure;
73 	int leg_failure;
74 	atomic_t suspend;
75 
76 	atomic_t default_mirror;	/* Default mirror */
77 
78 	struct workqueue_struct *kmirrord_wq;
79 	struct work_struct kmirrord_work;
80 	struct timer_list timer;
81 	unsigned long timer_pending;
82 
83 	struct work_struct trigger_event;
84 
85 	unsigned nr_mirrors;
86 	struct mirror mirror[0];
87 };
88 
89 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(raid1_resync_throttle,
90 		"A percentage of time allocated for raid resynchronization");
91 
92 static void wakeup_mirrord(void *context)
93 {
94 	struct mirror_set *ms = context;
95 
96 	queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
97 }
98 
99 static void delayed_wake_fn(struct timer_list *t)
100 {
101 	struct mirror_set *ms = from_timer(ms, t, timer);
102 
103 	clear_bit(0, &ms->timer_pending);
104 	wakeup_mirrord(ms);
105 }
106 
107 static void delayed_wake(struct mirror_set *ms)
108 {
109 	if (test_and_set_bit(0, &ms->timer_pending))
110 		return;
111 
112 	ms->timer.expires = jiffies + HZ / 5;
113 	add_timer(&ms->timer);
114 }
115 
116 static void wakeup_all_recovery_waiters(void *context)
117 {
118 	wake_up_all(&_kmirrord_recovery_stopped);
119 }
120 
121 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
122 {
123 	unsigned long flags;
124 	int should_wake = 0;
125 	struct bio_list *bl;
126 
127 	bl = (rw == WRITE) ? &ms->writes : &ms->reads;
128 	spin_lock_irqsave(&ms->lock, flags);
129 	should_wake = !(bl->head);
130 	bio_list_add(bl, bio);
131 	spin_unlock_irqrestore(&ms->lock, flags);
132 
133 	if (should_wake)
134 		wakeup_mirrord(ms);
135 }
136 
137 static void dispatch_bios(void *context, struct bio_list *bio_list)
138 {
139 	struct mirror_set *ms = context;
140 	struct bio *bio;
141 
142 	while ((bio = bio_list_pop(bio_list)))
143 		queue_bio(ms, bio, WRITE);
144 }
145 
146 struct dm_raid1_bio_record {
147 	struct mirror *m;
148 	/* if details->bi_disk == NULL, details were not saved */
149 	struct dm_bio_details details;
150 	region_t write_region;
151 };
152 
153 /*
154  * Every mirror should look like this one.
155  */
156 #define DEFAULT_MIRROR 0
157 
158 /*
159  * This is yucky.  We squirrel the mirror struct away inside
160  * bi_next for read/write buffers.  This is safe since the bh
161  * doesn't get submitted to the lower levels of block layer.
162  */
163 static struct mirror *bio_get_m(struct bio *bio)
164 {
165 	return (struct mirror *) bio->bi_next;
166 }
167 
168 static void bio_set_m(struct bio *bio, struct mirror *m)
169 {
170 	bio->bi_next = (struct bio *) m;
171 }
172 
173 static struct mirror *get_default_mirror(struct mirror_set *ms)
174 {
175 	return &ms->mirror[atomic_read(&ms->default_mirror)];
176 }
177 
178 static void set_default_mirror(struct mirror *m)
179 {
180 	struct mirror_set *ms = m->ms;
181 	struct mirror *m0 = &(ms->mirror[0]);
182 
183 	atomic_set(&ms->default_mirror, m - m0);
184 }
185 
186 static struct mirror *get_valid_mirror(struct mirror_set *ms)
187 {
188 	struct mirror *m;
189 
190 	for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
191 		if (!atomic_read(&m->error_count))
192 			return m;
193 
194 	return NULL;
195 }
196 
197 /* fail_mirror
198  * @m: mirror device to fail
199  * @error_type: one of the enum's, DM_RAID1_*_ERROR
200  *
201  * If errors are being handled, record the type of
202  * error encountered for this device.  If this type
203  * of error has already been recorded, we can return;
204  * otherwise, we must signal userspace by triggering
205  * an event.  Additionally, if the device is the
206  * primary device, we must choose a new primary, but
207  * only if the mirror is in-sync.
208  *
209  * This function must not block.
210  */
211 static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
212 {
213 	struct mirror_set *ms = m->ms;
214 	struct mirror *new;
215 
216 	ms->leg_failure = 1;
217 
218 	/*
219 	 * error_count is used for nothing more than a
220 	 * simple way to tell if a device has encountered
221 	 * errors.
222 	 */
223 	atomic_inc(&m->error_count);
224 
225 	if (test_and_set_bit(error_type, &m->error_type))
226 		return;
227 
228 	if (!errors_handled(ms))
229 		return;
230 
231 	if (m != get_default_mirror(ms))
232 		goto out;
233 
234 	if (!ms->in_sync && !keep_log(ms)) {
235 		/*
236 		 * Better to issue requests to same failing device
237 		 * than to risk returning corrupt data.
238 		 */
239 		DMERR("Primary mirror (%s) failed while out-of-sync: "
240 		      "Reads may fail.", m->dev->name);
241 		goto out;
242 	}
243 
244 	new = get_valid_mirror(ms);
245 	if (new)
246 		set_default_mirror(new);
247 	else
248 		DMWARN("All sides of mirror have failed.");
249 
250 out:
251 	schedule_work(&ms->trigger_event);
252 }
253 
254 static int mirror_flush(struct dm_target *ti)
255 {
256 	struct mirror_set *ms = ti->private;
257 	unsigned long error_bits;
258 
259 	unsigned int i;
260 	struct dm_io_region io[MAX_NR_MIRRORS];
261 	struct mirror *m;
262 	struct dm_io_request io_req = {
263 		.bi_op = REQ_OP_WRITE,
264 		.bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
265 		.mem.type = DM_IO_KMEM,
266 		.mem.ptr.addr = NULL,
267 		.client = ms->io_client,
268 	};
269 
270 	for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) {
271 		io[i].bdev = m->dev->bdev;
272 		io[i].sector = 0;
273 		io[i].count = 0;
274 	}
275 
276 	error_bits = -1;
277 	dm_io(&io_req, ms->nr_mirrors, io, &error_bits);
278 	if (unlikely(error_bits != 0)) {
279 		for (i = 0; i < ms->nr_mirrors; i++)
280 			if (test_bit(i, &error_bits))
281 				fail_mirror(ms->mirror + i,
282 					    DM_RAID1_FLUSH_ERROR);
283 		return -EIO;
284 	}
285 
286 	return 0;
287 }
288 
289 /*-----------------------------------------------------------------
290  * Recovery.
291  *
292  * When a mirror is first activated we may find that some regions
293  * are in the no-sync state.  We have to recover these by
294  * recopying from the default mirror to all the others.
295  *---------------------------------------------------------------*/
296 static void recovery_complete(int read_err, unsigned long write_err,
297 			      void *context)
298 {
299 	struct dm_region *reg = context;
300 	struct mirror_set *ms = dm_rh_region_context(reg);
301 	int m, bit = 0;
302 
303 	if (read_err) {
304 		/* Read error means the failure of default mirror. */
305 		DMERR_LIMIT("Unable to read primary mirror during recovery");
306 		fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR);
307 	}
308 
309 	if (write_err) {
310 		DMERR_LIMIT("Write error during recovery (error = 0x%lx)",
311 			    write_err);
312 		/*
313 		 * Bits correspond to devices (excluding default mirror).
314 		 * The default mirror cannot change during recovery.
315 		 */
316 		for (m = 0; m < ms->nr_mirrors; m++) {
317 			if (&ms->mirror[m] == get_default_mirror(ms))
318 				continue;
319 			if (test_bit(bit, &write_err))
320 				fail_mirror(ms->mirror + m,
321 					    DM_RAID1_SYNC_ERROR);
322 			bit++;
323 		}
324 	}
325 
326 	dm_rh_recovery_end(reg, !(read_err || write_err));
327 }
328 
329 static void recover(struct mirror_set *ms, struct dm_region *reg)
330 {
331 	unsigned i;
332 	struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
333 	struct mirror *m;
334 	unsigned long flags = 0;
335 	region_t key = dm_rh_get_region_key(reg);
336 	sector_t region_size = dm_rh_get_region_size(ms->rh);
337 
338 	/* fill in the source */
339 	m = get_default_mirror(ms);
340 	from.bdev = m->dev->bdev;
341 	from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
342 	if (key == (ms->nr_regions - 1)) {
343 		/*
344 		 * The final region may be smaller than
345 		 * region_size.
346 		 */
347 		from.count = ms->ti->len & (region_size - 1);
348 		if (!from.count)
349 			from.count = region_size;
350 	} else
351 		from.count = region_size;
352 
353 	/* fill in the destinations */
354 	for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
355 		if (&ms->mirror[i] == get_default_mirror(ms))
356 			continue;
357 
358 		m = ms->mirror + i;
359 		dest->bdev = m->dev->bdev;
360 		dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
361 		dest->count = from.count;
362 		dest++;
363 	}
364 
365 	/* hand to kcopyd */
366 	if (!errors_handled(ms))
367 		set_bit(DM_KCOPYD_IGNORE_ERROR, &flags);
368 
369 	dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
370 		       flags, recovery_complete, reg);
371 }
372 
373 static void reset_ms_flags(struct mirror_set *ms)
374 {
375 	unsigned int m;
376 
377 	ms->leg_failure = 0;
378 	for (m = 0; m < ms->nr_mirrors; m++) {
379 		atomic_set(&(ms->mirror[m].error_count), 0);
380 		ms->mirror[m].error_type = 0;
381 	}
382 }
383 
384 static void do_recovery(struct mirror_set *ms)
385 {
386 	struct dm_region *reg;
387 	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
388 
389 	/*
390 	 * Start quiescing some regions.
391 	 */
392 	dm_rh_recovery_prepare(ms->rh);
393 
394 	/*
395 	 * Copy any already quiesced regions.
396 	 */
397 	while ((reg = dm_rh_recovery_start(ms->rh)))
398 		recover(ms, reg);
399 
400 	/*
401 	 * Update the in sync flag.
402 	 */
403 	if (!ms->in_sync &&
404 	    (log->type->get_sync_count(log) == ms->nr_regions)) {
405 		/* the sync is complete */
406 		dm_table_event(ms->ti->table);
407 		ms->in_sync = 1;
408 		reset_ms_flags(ms);
409 	}
410 }
411 
412 /*-----------------------------------------------------------------
413  * Reads
414  *---------------------------------------------------------------*/
415 static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
416 {
417 	struct mirror *m = get_default_mirror(ms);
418 
419 	do {
420 		if (likely(!atomic_read(&m->error_count)))
421 			return m;
422 
423 		if (m-- == ms->mirror)
424 			m += ms->nr_mirrors;
425 	} while (m != get_default_mirror(ms));
426 
427 	return NULL;
428 }
429 
430 static int default_ok(struct mirror *m)
431 {
432 	struct mirror *default_mirror = get_default_mirror(m->ms);
433 
434 	return !atomic_read(&default_mirror->error_count);
435 }
436 
437 static int mirror_available(struct mirror_set *ms, struct bio *bio)
438 {
439 	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
440 	region_t region = dm_rh_bio_to_region(ms->rh, bio);
441 
442 	if (log->type->in_sync(log, region, 0))
443 		return choose_mirror(ms,  bio->bi_iter.bi_sector) ? 1 : 0;
444 
445 	return 0;
446 }
447 
448 /*
449  * remap a buffer to a particular mirror.
450  */
451 static sector_t map_sector(struct mirror *m, struct bio *bio)
452 {
453 	if (unlikely(!bio->bi_iter.bi_size))
454 		return 0;
455 	return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector);
456 }
457 
458 static void map_bio(struct mirror *m, struct bio *bio)
459 {
460 	bio_set_dev(bio, m->dev->bdev);
461 	bio->bi_iter.bi_sector = map_sector(m, bio);
462 }
463 
464 static void map_region(struct dm_io_region *io, struct mirror *m,
465 		       struct bio *bio)
466 {
467 	io->bdev = m->dev->bdev;
468 	io->sector = map_sector(m, bio);
469 	io->count = bio_sectors(bio);
470 }
471 
472 static void hold_bio(struct mirror_set *ms, struct bio *bio)
473 {
474 	/*
475 	 * Lock is required to avoid race condition during suspend
476 	 * process.
477 	 */
478 	spin_lock_irq(&ms->lock);
479 
480 	if (atomic_read(&ms->suspend)) {
481 		spin_unlock_irq(&ms->lock);
482 
483 		/*
484 		 * If device is suspended, complete the bio.
485 		 */
486 		if (dm_noflush_suspending(ms->ti))
487 			bio->bi_status = BLK_STS_DM_REQUEUE;
488 		else
489 			bio->bi_status = BLK_STS_IOERR;
490 
491 		bio_endio(bio);
492 		return;
493 	}
494 
495 	/*
496 	 * Hold bio until the suspend is complete.
497 	 */
498 	bio_list_add(&ms->holds, bio);
499 	spin_unlock_irq(&ms->lock);
500 }
501 
502 /*-----------------------------------------------------------------
503  * Reads
504  *---------------------------------------------------------------*/
505 static void read_callback(unsigned long error, void *context)
506 {
507 	struct bio *bio = context;
508 	struct mirror *m;
509 
510 	m = bio_get_m(bio);
511 	bio_set_m(bio, NULL);
512 
513 	if (likely(!error)) {
514 		bio_endio(bio);
515 		return;
516 	}
517 
518 	fail_mirror(m, DM_RAID1_READ_ERROR);
519 
520 	if (likely(default_ok(m)) || mirror_available(m->ms, bio)) {
521 		DMWARN_LIMIT("Read failure on mirror device %s.  "
522 			     "Trying alternative device.",
523 			     m->dev->name);
524 		queue_bio(m->ms, bio, bio_data_dir(bio));
525 		return;
526 	}
527 
528 	DMERR_LIMIT("Read failure on mirror device %s.  Failing I/O.",
529 		    m->dev->name);
530 	bio_io_error(bio);
531 }
532 
533 /* Asynchronous read. */
534 static void read_async_bio(struct mirror *m, struct bio *bio)
535 {
536 	struct dm_io_region io;
537 	struct dm_io_request io_req = {
538 		.bi_op = REQ_OP_READ,
539 		.bi_op_flags = 0,
540 		.mem.type = DM_IO_BIO,
541 		.mem.ptr.bio = bio,
542 		.notify.fn = read_callback,
543 		.notify.context = bio,
544 		.client = m->ms->io_client,
545 	};
546 
547 	map_region(&io, m, bio);
548 	bio_set_m(bio, m);
549 	BUG_ON(dm_io(&io_req, 1, &io, NULL));
550 }
551 
552 static inline int region_in_sync(struct mirror_set *ms, region_t region,
553 				 int may_block)
554 {
555 	int state = dm_rh_get_state(ms->rh, region, may_block);
556 	return state == DM_RH_CLEAN || state == DM_RH_DIRTY;
557 }
558 
559 static void do_reads(struct mirror_set *ms, struct bio_list *reads)
560 {
561 	region_t region;
562 	struct bio *bio;
563 	struct mirror *m;
564 
565 	while ((bio = bio_list_pop(reads))) {
566 		region = dm_rh_bio_to_region(ms->rh, bio);
567 		m = get_default_mirror(ms);
568 
569 		/*
570 		 * We can only read balance if the region is in sync.
571 		 */
572 		if (likely(region_in_sync(ms, region, 1)))
573 			m = choose_mirror(ms, bio->bi_iter.bi_sector);
574 		else if (m && atomic_read(&m->error_count))
575 			m = NULL;
576 
577 		if (likely(m))
578 			read_async_bio(m, bio);
579 		else
580 			bio_io_error(bio);
581 	}
582 }
583 
584 /*-----------------------------------------------------------------
585  * Writes.
586  *
587  * We do different things with the write io depending on the
588  * state of the region that it's in:
589  *
590  * SYNC: 	increment pending, use kcopyd to write to *all* mirrors
591  * RECOVERING:	delay the io until recovery completes
592  * NOSYNC:	increment pending, just write to the default mirror
593  *---------------------------------------------------------------*/
594 
595 
596 static void write_callback(unsigned long error, void *context)
597 {
598 	unsigned i;
599 	struct bio *bio = (struct bio *) context;
600 	struct mirror_set *ms;
601 	int should_wake = 0;
602 	unsigned long flags;
603 
604 	ms = bio_get_m(bio)->ms;
605 	bio_set_m(bio, NULL);
606 
607 	/*
608 	 * NOTE: We don't decrement the pending count here,
609 	 * instead it is done by the targets endio function.
610 	 * This way we handle both writes to SYNC and NOSYNC
611 	 * regions with the same code.
612 	 */
613 	if (likely(!error)) {
614 		bio_endio(bio);
615 		return;
616 	}
617 
618 	/*
619 	 * If the bio is discard, return an error, but do not
620 	 * degrade the array.
621 	 */
622 	if (bio_op(bio) == REQ_OP_DISCARD) {
623 		bio->bi_status = BLK_STS_NOTSUPP;
624 		bio_endio(bio);
625 		return;
626 	}
627 
628 	for (i = 0; i < ms->nr_mirrors; i++)
629 		if (test_bit(i, &error))
630 			fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
631 
632 	/*
633 	 * Need to raise event.  Since raising
634 	 * events can block, we need to do it in
635 	 * the main thread.
636 	 */
637 	spin_lock_irqsave(&ms->lock, flags);
638 	if (!ms->failures.head)
639 		should_wake = 1;
640 	bio_list_add(&ms->failures, bio);
641 	spin_unlock_irqrestore(&ms->lock, flags);
642 	if (should_wake)
643 		wakeup_mirrord(ms);
644 }
645 
646 static void do_write(struct mirror_set *ms, struct bio *bio)
647 {
648 	unsigned int i;
649 	struct dm_io_region io[MAX_NR_MIRRORS], *dest = io;
650 	struct mirror *m;
651 	struct dm_io_request io_req = {
652 		.bi_op = REQ_OP_WRITE,
653 		.bi_op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH),
654 		.mem.type = DM_IO_BIO,
655 		.mem.ptr.bio = bio,
656 		.notify.fn = write_callback,
657 		.notify.context = bio,
658 		.client = ms->io_client,
659 	};
660 
661 	if (bio_op(bio) == REQ_OP_DISCARD) {
662 		io_req.bi_op = REQ_OP_DISCARD;
663 		io_req.mem.type = DM_IO_KMEM;
664 		io_req.mem.ptr.addr = NULL;
665 	}
666 
667 	for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++)
668 		map_region(dest++, m, bio);
669 
670 	/*
671 	 * Use default mirror because we only need it to retrieve the reference
672 	 * to the mirror set in write_callback().
673 	 */
674 	bio_set_m(bio, get_default_mirror(ms));
675 
676 	BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL));
677 }
678 
679 static void do_writes(struct mirror_set *ms, struct bio_list *writes)
680 {
681 	int state;
682 	struct bio *bio;
683 	struct bio_list sync, nosync, recover, *this_list = NULL;
684 	struct bio_list requeue;
685 	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
686 	region_t region;
687 
688 	if (!writes->head)
689 		return;
690 
691 	/*
692 	 * Classify each write.
693 	 */
694 	bio_list_init(&sync);
695 	bio_list_init(&nosync);
696 	bio_list_init(&recover);
697 	bio_list_init(&requeue);
698 
699 	while ((bio = bio_list_pop(writes))) {
700 		if ((bio->bi_opf & REQ_PREFLUSH) ||
701 		    (bio_op(bio) == REQ_OP_DISCARD)) {
702 			bio_list_add(&sync, bio);
703 			continue;
704 		}
705 
706 		region = dm_rh_bio_to_region(ms->rh, bio);
707 
708 		if (log->type->is_remote_recovering &&
709 		    log->type->is_remote_recovering(log, region)) {
710 			bio_list_add(&requeue, bio);
711 			continue;
712 		}
713 
714 		state = dm_rh_get_state(ms->rh, region, 1);
715 		switch (state) {
716 		case DM_RH_CLEAN:
717 		case DM_RH_DIRTY:
718 			this_list = &sync;
719 			break;
720 
721 		case DM_RH_NOSYNC:
722 			this_list = &nosync;
723 			break;
724 
725 		case DM_RH_RECOVERING:
726 			this_list = &recover;
727 			break;
728 		}
729 
730 		bio_list_add(this_list, bio);
731 	}
732 
733 	/*
734 	 * Add bios that are delayed due to remote recovery
735 	 * back on to the write queue
736 	 */
737 	if (unlikely(requeue.head)) {
738 		spin_lock_irq(&ms->lock);
739 		bio_list_merge(&ms->writes, &requeue);
740 		spin_unlock_irq(&ms->lock);
741 		delayed_wake(ms);
742 	}
743 
744 	/*
745 	 * Increment the pending counts for any regions that will
746 	 * be written to (writes to recover regions are going to
747 	 * be delayed).
748 	 */
749 	dm_rh_inc_pending(ms->rh, &sync);
750 	dm_rh_inc_pending(ms->rh, &nosync);
751 
752 	/*
753 	 * If the flush fails on a previous call and succeeds here,
754 	 * we must not reset the log_failure variable.  We need
755 	 * userspace interaction to do that.
756 	 */
757 	ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure;
758 
759 	/*
760 	 * Dispatch io.
761 	 */
762 	if (unlikely(ms->log_failure) && errors_handled(ms)) {
763 		spin_lock_irq(&ms->lock);
764 		bio_list_merge(&ms->failures, &sync);
765 		spin_unlock_irq(&ms->lock);
766 		wakeup_mirrord(ms);
767 	} else
768 		while ((bio = bio_list_pop(&sync)))
769 			do_write(ms, bio);
770 
771 	while ((bio = bio_list_pop(&recover)))
772 		dm_rh_delay(ms->rh, bio);
773 
774 	while ((bio = bio_list_pop(&nosync))) {
775 		if (unlikely(ms->leg_failure) && errors_handled(ms) && !keep_log(ms)) {
776 			spin_lock_irq(&ms->lock);
777 			bio_list_add(&ms->failures, bio);
778 			spin_unlock_irq(&ms->lock);
779 			wakeup_mirrord(ms);
780 		} else {
781 			map_bio(get_default_mirror(ms), bio);
782 			generic_make_request(bio);
783 		}
784 	}
785 }
786 
787 static void do_failures(struct mirror_set *ms, struct bio_list *failures)
788 {
789 	struct bio *bio;
790 
791 	if (likely(!failures->head))
792 		return;
793 
794 	/*
795 	 * If the log has failed, unattempted writes are being
796 	 * put on the holds list.  We can't issue those writes
797 	 * until a log has been marked, so we must store them.
798 	 *
799 	 * If a 'noflush' suspend is in progress, we can requeue
800 	 * the I/O's to the core.  This give userspace a chance
801 	 * to reconfigure the mirror, at which point the core
802 	 * will reissue the writes.  If the 'noflush' flag is
803 	 * not set, we have no choice but to return errors.
804 	 *
805 	 * Some writes on the failures list may have been
806 	 * submitted before the log failure and represent a
807 	 * failure to write to one of the devices.  It is ok
808 	 * for us to treat them the same and requeue them
809 	 * as well.
810 	 */
811 	while ((bio = bio_list_pop(failures))) {
812 		if (!ms->log_failure) {
813 			ms->in_sync = 0;
814 			dm_rh_mark_nosync(ms->rh, bio);
815 		}
816 
817 		/*
818 		 * If all the legs are dead, fail the I/O.
819 		 * If the device has failed and keep_log is enabled,
820 		 * fail the I/O.
821 		 *
822 		 * If we have been told to handle errors, and keep_log
823 		 * isn't enabled, hold the bio and wait for userspace to
824 		 * deal with the problem.
825 		 *
826 		 * Otherwise pretend that the I/O succeeded. (This would
827 		 * be wrong if the failed leg returned after reboot and
828 		 * got replicated back to the good legs.)
829 		 */
830 		if (unlikely(!get_valid_mirror(ms) || (keep_log(ms) && ms->log_failure)))
831 			bio_io_error(bio);
832 		else if (errors_handled(ms) && !keep_log(ms))
833 			hold_bio(ms, bio);
834 		else
835 			bio_endio(bio);
836 	}
837 }
838 
839 static void trigger_event(struct work_struct *work)
840 {
841 	struct mirror_set *ms =
842 		container_of(work, struct mirror_set, trigger_event);
843 
844 	dm_table_event(ms->ti->table);
845 }
846 
847 /*-----------------------------------------------------------------
848  * kmirrord
849  *---------------------------------------------------------------*/
850 static void do_mirror(struct work_struct *work)
851 {
852 	struct mirror_set *ms = container_of(work, struct mirror_set,
853 					     kmirrord_work);
854 	struct bio_list reads, writes, failures;
855 	unsigned long flags;
856 
857 	spin_lock_irqsave(&ms->lock, flags);
858 	reads = ms->reads;
859 	writes = ms->writes;
860 	failures = ms->failures;
861 	bio_list_init(&ms->reads);
862 	bio_list_init(&ms->writes);
863 	bio_list_init(&ms->failures);
864 	spin_unlock_irqrestore(&ms->lock, flags);
865 
866 	dm_rh_update_states(ms->rh, errors_handled(ms));
867 	do_recovery(ms);
868 	do_reads(ms, &reads);
869 	do_writes(ms, &writes);
870 	do_failures(ms, &failures);
871 }
872 
873 /*-----------------------------------------------------------------
874  * Target functions
875  *---------------------------------------------------------------*/
876 static struct mirror_set *alloc_context(unsigned int nr_mirrors,
877 					uint32_t region_size,
878 					struct dm_target *ti,
879 					struct dm_dirty_log *dl)
880 {
881 	size_t len;
882 	struct mirror_set *ms = NULL;
883 
884 	len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
885 
886 	ms = kzalloc(len, GFP_KERNEL);
887 	if (!ms) {
888 		ti->error = "Cannot allocate mirror context";
889 		return NULL;
890 	}
891 
892 	spin_lock_init(&ms->lock);
893 	bio_list_init(&ms->reads);
894 	bio_list_init(&ms->writes);
895 	bio_list_init(&ms->failures);
896 	bio_list_init(&ms->holds);
897 
898 	ms->ti = ti;
899 	ms->nr_mirrors = nr_mirrors;
900 	ms->nr_regions = dm_sector_div_up(ti->len, region_size);
901 	ms->in_sync = 0;
902 	ms->log_failure = 0;
903 	ms->leg_failure = 0;
904 	atomic_set(&ms->suspend, 0);
905 	atomic_set(&ms->default_mirror, DEFAULT_MIRROR);
906 
907 	ms->io_client = dm_io_client_create();
908 	if (IS_ERR(ms->io_client)) {
909 		ti->error = "Error creating dm_io client";
910 		kfree(ms);
911  		return NULL;
912 	}
913 
914 	ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
915 				       wakeup_all_recovery_waiters,
916 				       ms->ti->begin, MAX_RECOVERY,
917 				       dl, region_size, ms->nr_regions);
918 	if (IS_ERR(ms->rh)) {
919 		ti->error = "Error creating dirty region hash";
920 		dm_io_client_destroy(ms->io_client);
921 		kfree(ms);
922 		return NULL;
923 	}
924 
925 	return ms;
926 }
927 
928 static void free_context(struct mirror_set *ms, struct dm_target *ti,
929 			 unsigned int m)
930 {
931 	while (m--)
932 		dm_put_device(ti, ms->mirror[m].dev);
933 
934 	dm_io_client_destroy(ms->io_client);
935 	dm_region_hash_destroy(ms->rh);
936 	kfree(ms);
937 }
938 
939 static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
940 		      unsigned int mirror, char **argv)
941 {
942 	unsigned long long offset;
943 	char dummy;
944 	int ret;
945 
946 	if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1 ||
947 	    offset != (sector_t)offset) {
948 		ti->error = "Invalid offset";
949 		return -EINVAL;
950 	}
951 
952 	ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
953 			    &ms->mirror[mirror].dev);
954 	if (ret) {
955 		ti->error = "Device lookup failure";
956 		return ret;
957 	}
958 
959 	ms->mirror[mirror].ms = ms;
960 	atomic_set(&(ms->mirror[mirror].error_count), 0);
961 	ms->mirror[mirror].error_type = 0;
962 	ms->mirror[mirror].offset = offset;
963 
964 	return 0;
965 }
966 
967 /*
968  * Create dirty log: log_type #log_params <log_params>
969  */
970 static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
971 					     unsigned argc, char **argv,
972 					     unsigned *args_used)
973 {
974 	unsigned param_count;
975 	struct dm_dirty_log *dl;
976 	char dummy;
977 
978 	if (argc < 2) {
979 		ti->error = "Insufficient mirror log arguments";
980 		return NULL;
981 	}
982 
983 	if (sscanf(argv[1], "%u%c", &param_count, &dummy) != 1) {
984 		ti->error = "Invalid mirror log argument count";
985 		return NULL;
986 	}
987 
988 	*args_used = 2 + param_count;
989 
990 	if (argc < *args_used) {
991 		ti->error = "Insufficient mirror log arguments";
992 		return NULL;
993 	}
994 
995 	dl = dm_dirty_log_create(argv[0], ti, mirror_flush, param_count,
996 				 argv + 2);
997 	if (!dl) {
998 		ti->error = "Error creating mirror dirty log";
999 		return NULL;
1000 	}
1001 
1002 	return dl;
1003 }
1004 
1005 static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
1006 			  unsigned *args_used)
1007 {
1008 	unsigned num_features;
1009 	struct dm_target *ti = ms->ti;
1010 	char dummy;
1011 	int i;
1012 
1013 	*args_used = 0;
1014 
1015 	if (!argc)
1016 		return 0;
1017 
1018 	if (sscanf(argv[0], "%u%c", &num_features, &dummy) != 1) {
1019 		ti->error = "Invalid number of features";
1020 		return -EINVAL;
1021 	}
1022 
1023 	argc--;
1024 	argv++;
1025 	(*args_used)++;
1026 
1027 	if (num_features > argc) {
1028 		ti->error = "Not enough arguments to support feature count";
1029 		return -EINVAL;
1030 	}
1031 
1032 	for (i = 0; i < num_features; i++) {
1033 		if (!strcmp("handle_errors", argv[0]))
1034 			ms->features |= DM_RAID1_HANDLE_ERRORS;
1035 		else if (!strcmp("keep_log", argv[0]))
1036 			ms->features |= DM_RAID1_KEEP_LOG;
1037 		else {
1038 			ti->error = "Unrecognised feature requested";
1039 			return -EINVAL;
1040 		}
1041 
1042 		argc--;
1043 		argv++;
1044 		(*args_used)++;
1045 	}
1046 	if (!errors_handled(ms) && keep_log(ms)) {
1047 		ti->error = "keep_log feature requires the handle_errors feature";
1048 		return -EINVAL;
1049 	}
1050 
1051 	return 0;
1052 }
1053 
1054 /*
1055  * Construct a mirror mapping:
1056  *
1057  * log_type #log_params <log_params>
1058  * #mirrors [mirror_path offset]{2,}
1059  * [#features <features>]
1060  *
1061  * log_type is "core" or "disk"
1062  * #log_params is between 1 and 3
1063  *
1064  * If present, supported features are "handle_errors" and "keep_log".
1065  */
1066 static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1067 {
1068 	int r;
1069 	unsigned int nr_mirrors, m, args_used;
1070 	struct mirror_set *ms;
1071 	struct dm_dirty_log *dl;
1072 	char dummy;
1073 
1074 	dl = create_dirty_log(ti, argc, argv, &args_used);
1075 	if (!dl)
1076 		return -EINVAL;
1077 
1078 	argv += args_used;
1079 	argc -= args_used;
1080 
1081 	if (!argc || sscanf(argv[0], "%u%c", &nr_mirrors, &dummy) != 1 ||
1082 	    nr_mirrors < 2 || nr_mirrors > MAX_NR_MIRRORS) {
1083 		ti->error = "Invalid number of mirrors";
1084 		dm_dirty_log_destroy(dl);
1085 		return -EINVAL;
1086 	}
1087 
1088 	argv++, argc--;
1089 
1090 	if (argc < nr_mirrors * 2) {
1091 		ti->error = "Too few mirror arguments";
1092 		dm_dirty_log_destroy(dl);
1093 		return -EINVAL;
1094 	}
1095 
1096 	ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
1097 	if (!ms) {
1098 		dm_dirty_log_destroy(dl);
1099 		return -ENOMEM;
1100 	}
1101 
1102 	/* Get the mirror parameter sets */
1103 	for (m = 0; m < nr_mirrors; m++) {
1104 		r = get_mirror(ms, ti, m, argv);
1105 		if (r) {
1106 			free_context(ms, ti, m);
1107 			return r;
1108 		}
1109 		argv += 2;
1110 		argc -= 2;
1111 	}
1112 
1113 	ti->private = ms;
1114 
1115 	r = dm_set_target_max_io_len(ti, dm_rh_get_region_size(ms->rh));
1116 	if (r)
1117 		goto err_free_context;
1118 
1119 	ti->num_flush_bios = 1;
1120 	ti->num_discard_bios = 1;
1121 	ti->per_io_data_size = sizeof(struct dm_raid1_bio_record);
1122 
1123 	ms->kmirrord_wq = alloc_workqueue("kmirrord", WQ_MEM_RECLAIM, 0);
1124 	if (!ms->kmirrord_wq) {
1125 		DMERR("couldn't start kmirrord");
1126 		r = -ENOMEM;
1127 		goto err_free_context;
1128 	}
1129 	INIT_WORK(&ms->kmirrord_work, do_mirror);
1130 	timer_setup(&ms->timer, delayed_wake_fn, 0);
1131 	ms->timer_pending = 0;
1132 	INIT_WORK(&ms->trigger_event, trigger_event);
1133 
1134 	r = parse_features(ms, argc, argv, &args_used);
1135 	if (r)
1136 		goto err_destroy_wq;
1137 
1138 	argv += args_used;
1139 	argc -= args_used;
1140 
1141 	/*
1142 	 * Any read-balancing addition depends on the
1143 	 * DM_RAID1_HANDLE_ERRORS flag being present.
1144 	 * This is because the decision to balance depends
1145 	 * on the sync state of a region.  If the above
1146 	 * flag is not present, we ignore errors; and
1147 	 * the sync state may be inaccurate.
1148 	 */
1149 
1150 	if (argc) {
1151 		ti->error = "Too many mirror arguments";
1152 		r = -EINVAL;
1153 		goto err_destroy_wq;
1154 	}
1155 
1156 	ms->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
1157 	if (IS_ERR(ms->kcopyd_client)) {
1158 		r = PTR_ERR(ms->kcopyd_client);
1159 		goto err_destroy_wq;
1160 	}
1161 
1162 	wakeup_mirrord(ms);
1163 	return 0;
1164 
1165 err_destroy_wq:
1166 	destroy_workqueue(ms->kmirrord_wq);
1167 err_free_context:
1168 	free_context(ms, ti, ms->nr_mirrors);
1169 	return r;
1170 }
1171 
1172 static void mirror_dtr(struct dm_target *ti)
1173 {
1174 	struct mirror_set *ms = (struct mirror_set *) ti->private;
1175 
1176 	del_timer_sync(&ms->timer);
1177 	flush_workqueue(ms->kmirrord_wq);
1178 	flush_work(&ms->trigger_event);
1179 	dm_kcopyd_client_destroy(ms->kcopyd_client);
1180 	destroy_workqueue(ms->kmirrord_wq);
1181 	free_context(ms, ti, ms->nr_mirrors);
1182 }
1183 
1184 /*
1185  * Mirror mapping function
1186  */
1187 static int mirror_map(struct dm_target *ti, struct bio *bio)
1188 {
1189 	int r, rw = bio_data_dir(bio);
1190 	struct mirror *m;
1191 	struct mirror_set *ms = ti->private;
1192 	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1193 	struct dm_raid1_bio_record *bio_record =
1194 	  dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
1195 
1196 	bio_record->details.bi_disk = NULL;
1197 
1198 	if (rw == WRITE) {
1199 		/* Save region for mirror_end_io() handler */
1200 		bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio);
1201 		queue_bio(ms, bio, rw);
1202 		return DM_MAPIO_SUBMITTED;
1203 	}
1204 
1205 	r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0);
1206 	if (r < 0 && r != -EWOULDBLOCK)
1207 		return DM_MAPIO_KILL;
1208 
1209 	/*
1210 	 * If region is not in-sync queue the bio.
1211 	 */
1212 	if (!r || (r == -EWOULDBLOCK)) {
1213 		if (bio->bi_opf & REQ_RAHEAD)
1214 			return DM_MAPIO_KILL;
1215 
1216 		queue_bio(ms, bio, rw);
1217 		return DM_MAPIO_SUBMITTED;
1218 	}
1219 
1220 	/*
1221 	 * The region is in-sync and we can perform reads directly.
1222 	 * Store enough information so we can retry if it fails.
1223 	 */
1224 	m = choose_mirror(ms, bio->bi_iter.bi_sector);
1225 	if (unlikely(!m))
1226 		return DM_MAPIO_KILL;
1227 
1228 	dm_bio_record(&bio_record->details, bio);
1229 	bio_record->m = m;
1230 
1231 	map_bio(m, bio);
1232 
1233 	return DM_MAPIO_REMAPPED;
1234 }
1235 
1236 static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1237 		blk_status_t *error)
1238 {
1239 	int rw = bio_data_dir(bio);
1240 	struct mirror_set *ms = (struct mirror_set *) ti->private;
1241 	struct mirror *m = NULL;
1242 	struct dm_bio_details *bd = NULL;
1243 	struct dm_raid1_bio_record *bio_record =
1244 	  dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
1245 
1246 	/*
1247 	 * We need to dec pending if this was a write.
1248 	 */
1249 	if (rw == WRITE) {
1250 		if (!(bio->bi_opf & REQ_PREFLUSH) &&
1251 		    bio_op(bio) != REQ_OP_DISCARD)
1252 			dm_rh_dec(ms->rh, bio_record->write_region);
1253 		return DM_ENDIO_DONE;
1254 	}
1255 
1256 	if (*error == BLK_STS_NOTSUPP)
1257 		goto out;
1258 
1259 	if (bio->bi_opf & REQ_RAHEAD)
1260 		goto out;
1261 
1262 	if (unlikely(*error)) {
1263 		if (!bio_record->details.bi_disk) {
1264 			/*
1265 			 * There wasn't enough memory to record necessary
1266 			 * information for a retry or there was no other
1267 			 * mirror in-sync.
1268 			 */
1269 			DMERR_LIMIT("Mirror read failed.");
1270 			return DM_ENDIO_DONE;
1271 		}
1272 
1273 		m = bio_record->m;
1274 
1275 		DMERR("Mirror read failed from %s. Trying alternative device.",
1276 		      m->dev->name);
1277 
1278 		fail_mirror(m, DM_RAID1_READ_ERROR);
1279 
1280 		/*
1281 		 * A failed read is requeued for another attempt using an intact
1282 		 * mirror.
1283 		 */
1284 		if (default_ok(m) || mirror_available(ms, bio)) {
1285 			bd = &bio_record->details;
1286 
1287 			dm_bio_restore(bd, bio);
1288 			bio_record->details.bi_disk = NULL;
1289 			bio->bi_status = 0;
1290 
1291 			queue_bio(ms, bio, rw);
1292 			return DM_ENDIO_INCOMPLETE;
1293 		}
1294 		DMERR("All replicated volumes dead, failing I/O");
1295 	}
1296 
1297 out:
1298 	bio_record->details.bi_disk = NULL;
1299 
1300 	return DM_ENDIO_DONE;
1301 }
1302 
1303 static void mirror_presuspend(struct dm_target *ti)
1304 {
1305 	struct mirror_set *ms = (struct mirror_set *) ti->private;
1306 	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1307 
1308 	struct bio_list holds;
1309 	struct bio *bio;
1310 
1311 	atomic_set(&ms->suspend, 1);
1312 
1313 	/*
1314 	 * Process bios in the hold list to start recovery waiting
1315 	 * for bios in the hold list. After the process, no bio has
1316 	 * a chance to be added in the hold list because ms->suspend
1317 	 * is set.
1318 	 */
1319 	spin_lock_irq(&ms->lock);
1320 	holds = ms->holds;
1321 	bio_list_init(&ms->holds);
1322 	spin_unlock_irq(&ms->lock);
1323 
1324 	while ((bio = bio_list_pop(&holds)))
1325 		hold_bio(ms, bio);
1326 
1327 	/*
1328 	 * We must finish up all the work that we've
1329 	 * generated (i.e. recovery work).
1330 	 */
1331 	dm_rh_stop_recovery(ms->rh);
1332 
1333 	wait_event(_kmirrord_recovery_stopped,
1334 		   !dm_rh_recovery_in_flight(ms->rh));
1335 
1336 	if (log->type->presuspend && log->type->presuspend(log))
1337 		/* FIXME: need better error handling */
1338 		DMWARN("log presuspend failed");
1339 
1340 	/*
1341 	 * Now that recovery is complete/stopped and the
1342 	 * delayed bios are queued, we need to wait for
1343 	 * the worker thread to complete.  This way,
1344 	 * we know that all of our I/O has been pushed.
1345 	 */
1346 	flush_workqueue(ms->kmirrord_wq);
1347 }
1348 
1349 static void mirror_postsuspend(struct dm_target *ti)
1350 {
1351 	struct mirror_set *ms = ti->private;
1352 	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1353 
1354 	if (log->type->postsuspend && log->type->postsuspend(log))
1355 		/* FIXME: need better error handling */
1356 		DMWARN("log postsuspend failed");
1357 }
1358 
1359 static void mirror_resume(struct dm_target *ti)
1360 {
1361 	struct mirror_set *ms = ti->private;
1362 	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1363 
1364 	atomic_set(&ms->suspend, 0);
1365 	if (log->type->resume && log->type->resume(log))
1366 		/* FIXME: need better error handling */
1367 		DMWARN("log resume failed");
1368 	dm_rh_start_recovery(ms->rh);
1369 }
1370 
1371 /*
1372  * device_status_char
1373  * @m: mirror device/leg we want the status of
1374  *
1375  * We return one character representing the most severe error
1376  * we have encountered.
1377  *    A => Alive - No failures
1378  *    D => Dead - A write failure occurred leaving mirror out-of-sync
1379  *    S => Sync - A sychronization failure occurred, mirror out-of-sync
1380  *    R => Read - A read failure occurred, mirror data unaffected
1381  *
1382  * Returns: <char>
1383  */
1384 static char device_status_char(struct mirror *m)
1385 {
1386 	if (!atomic_read(&(m->error_count)))
1387 		return 'A';
1388 
1389 	return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
1390 		(test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
1391 		(test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' :
1392 		(test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U';
1393 }
1394 
1395 
1396 static void mirror_status(struct dm_target *ti, status_type_t type,
1397 			  unsigned status_flags, char *result, unsigned maxlen)
1398 {
1399 	unsigned int m, sz = 0;
1400 	int num_feature_args = 0;
1401 	struct mirror_set *ms = (struct mirror_set *) ti->private;
1402 	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1403 	char buffer[MAX_NR_MIRRORS + 1];
1404 
1405 	switch (type) {
1406 	case STATUSTYPE_INFO:
1407 		DMEMIT("%d ", ms->nr_mirrors);
1408 		for (m = 0; m < ms->nr_mirrors; m++) {
1409 			DMEMIT("%s ", ms->mirror[m].dev->name);
1410 			buffer[m] = device_status_char(&(ms->mirror[m]));
1411 		}
1412 		buffer[m] = '\0';
1413 
1414 		DMEMIT("%llu/%llu 1 %s ",
1415 		      (unsigned long long)log->type->get_sync_count(log),
1416 		      (unsigned long long)ms->nr_regions, buffer);
1417 
1418 		sz += log->type->status(log, type, result+sz, maxlen-sz);
1419 
1420 		break;
1421 
1422 	case STATUSTYPE_TABLE:
1423 		sz = log->type->status(log, type, result, maxlen);
1424 
1425 		DMEMIT("%d", ms->nr_mirrors);
1426 		for (m = 0; m < ms->nr_mirrors; m++)
1427 			DMEMIT(" %s %llu", ms->mirror[m].dev->name,
1428 			       (unsigned long long)ms->mirror[m].offset);
1429 
1430 		num_feature_args += !!errors_handled(ms);
1431 		num_feature_args += !!keep_log(ms);
1432 		if (num_feature_args) {
1433 			DMEMIT(" %d", num_feature_args);
1434 			if (errors_handled(ms))
1435 				DMEMIT(" handle_errors");
1436 			if (keep_log(ms))
1437 				DMEMIT(" keep_log");
1438 		}
1439 
1440 		break;
1441 	}
1442 }
1443 
1444 static int mirror_iterate_devices(struct dm_target *ti,
1445 				  iterate_devices_callout_fn fn, void *data)
1446 {
1447 	struct mirror_set *ms = ti->private;
1448 	int ret = 0;
1449 	unsigned i;
1450 
1451 	for (i = 0; !ret && i < ms->nr_mirrors; i++)
1452 		ret = fn(ti, ms->mirror[i].dev,
1453 			 ms->mirror[i].offset, ti->len, data);
1454 
1455 	return ret;
1456 }
1457 
1458 static struct target_type mirror_target = {
1459 	.name	 = "mirror",
1460 	.version = {1, 14, 0},
1461 	.module	 = THIS_MODULE,
1462 	.ctr	 = mirror_ctr,
1463 	.dtr	 = mirror_dtr,
1464 	.map	 = mirror_map,
1465 	.end_io	 = mirror_end_io,
1466 	.presuspend = mirror_presuspend,
1467 	.postsuspend = mirror_postsuspend,
1468 	.resume	 = mirror_resume,
1469 	.status	 = mirror_status,
1470 	.iterate_devices = mirror_iterate_devices,
1471 };
1472 
1473 static int __init dm_mirror_init(void)
1474 {
1475 	int r;
1476 
1477 	r = dm_register_target(&mirror_target);
1478 	if (r < 0) {
1479 		DMERR("Failed to register mirror target");
1480 		goto bad_target;
1481 	}
1482 
1483 	return 0;
1484 
1485 bad_target:
1486 	return r;
1487 }
1488 
1489 static void __exit dm_mirror_exit(void)
1490 {
1491 	dm_unregister_target(&mirror_target);
1492 }
1493 
1494 /* Module hooks */
1495 module_init(dm_mirror_init);
1496 module_exit(dm_mirror_exit);
1497 
1498 MODULE_DESCRIPTION(DM_NAME " mirror target");
1499 MODULE_AUTHOR("Joe Thornber");
1500 MODULE_LICENSE("GPL");
1501