xref: /openbmc/linux/drivers/md/dm-raid1.c (revision 64c70b1c)
1 /*
2  * Copyright (C) 2003 Sistina Software Limited.
3  *
4  * This file is released under the GPL.
5  */
6 
7 #include "dm.h"
8 #include "dm-bio-list.h"
9 #include "dm-io.h"
10 #include "dm-log.h"
11 #include "kcopyd.h"
12 
13 #include <linux/ctype.h>
14 #include <linux/init.h>
15 #include <linux/mempool.h>
16 #include <linux/module.h>
17 #include <linux/pagemap.h>
18 #include <linux/slab.h>
19 #include <linux/time.h>
20 #include <linux/vmalloc.h>
21 #include <linux/workqueue.h>
22 
23 #define DM_MSG_PREFIX "raid1"
24 #define DM_IO_PAGES 64
25 
26 #define DM_RAID1_HANDLE_ERRORS 0x01
27 
28 static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
29 
30 /*-----------------------------------------------------------------
31  * Region hash
32  *
33  * The mirror splits itself up into discrete regions.  Each
34  * region can be in one of three states: clean, dirty,
35  * nosync.  There is no need to put clean regions in the hash.
36  *
37  * In addition to being present in the hash table a region _may_
38  * be present on one of three lists.
39  *
40  *   clean_regions: Regions on this list have no io pending to
41  *   them, they are in sync, we are no longer interested in them,
42  *   they are dull.  rh_update_states() will remove them from the
43  *   hash table.
44  *
45  *   quiesced_regions: These regions have been spun down, ready
46  *   for recovery.  rh_recovery_start() will remove regions from
47  *   this list and hand them to kmirrord, which will schedule the
48  *   recovery io with kcopyd.
49  *
50  *   recovered_regions: Regions that kcopyd has successfully
51  *   recovered.  rh_update_states() will now schedule any delayed
52  *   io, up the recovery_count, and remove the region from the
53  *   hash.
54  *
55  * There are 2 locks:
56  *   A rw spin lock 'hash_lock' protects just the hash table,
57  *   this is never held in write mode from interrupt context,
58  *   which I believe means that we only have to disable irqs when
59  *   doing a write lock.
60  *
61  *   An ordinary spin lock 'region_lock' that protects the three
62  *   lists in the region_hash, with the 'state', 'list' and
63  *   'bhs_delayed' fields of the regions.  This is used from irq
64  *   context, so all other uses will have to suspend local irqs.
65  *---------------------------------------------------------------*/
66 struct mirror_set;
67 struct region_hash {
68 	struct mirror_set *ms;
69 	uint32_t region_size;
70 	unsigned region_shift;
71 
72 	/* holds persistent region state */
73 	struct dirty_log *log;
74 
75 	/* hash table */
76 	rwlock_t hash_lock;
77 	mempool_t *region_pool;
78 	unsigned int mask;
79 	unsigned int nr_buckets;
80 	struct list_head *buckets;
81 
82 	spinlock_t region_lock;
83 	atomic_t recovery_in_flight;
84 	struct semaphore recovery_count;
85 	struct list_head clean_regions;
86 	struct list_head quiesced_regions;
87 	struct list_head recovered_regions;
88 };
89 
90 enum {
91 	RH_CLEAN,
92 	RH_DIRTY,
93 	RH_NOSYNC,
94 	RH_RECOVERING
95 };
96 
97 struct region {
98 	struct region_hash *rh;	/* FIXME: can we get rid of this ? */
99 	region_t key;
100 	int state;
101 
102 	struct list_head hash_list;
103 	struct list_head list;
104 
105 	atomic_t pending;
106 	struct bio_list delayed_bios;
107 };
108 
109 
110 /*-----------------------------------------------------------------
111  * Mirror set structures.
112  *---------------------------------------------------------------*/
113 struct mirror {
114 	atomic_t error_count;
115 	struct dm_dev *dev;
116 	sector_t offset;
117 };
118 
119 struct mirror_set {
120 	struct dm_target *ti;
121 	struct list_head list;
122 	struct region_hash rh;
123 	struct kcopyd_client *kcopyd_client;
124 	uint64_t features;
125 
126 	spinlock_t lock;	/* protects the next two lists */
127 	struct bio_list reads;
128 	struct bio_list writes;
129 
130 	struct dm_io_client *io_client;
131 
132 	/* recovery */
133 	region_t nr_regions;
134 	int in_sync;
135 
136 	struct mirror *default_mirror;	/* Default mirror */
137 
138 	struct workqueue_struct *kmirrord_wq;
139 	struct work_struct kmirrord_work;
140 
141 	unsigned int nr_mirrors;
142 	struct mirror mirror[0];
143 };
144 
145 /*
146  * Conversion fns
147  */
148 static inline region_t bio_to_region(struct region_hash *rh, struct bio *bio)
149 {
150 	return (bio->bi_sector - rh->ms->ti->begin) >> rh->region_shift;
151 }
152 
153 static inline sector_t region_to_sector(struct region_hash *rh, region_t region)
154 {
155 	return region << rh->region_shift;
156 }
157 
158 static void wake(struct mirror_set *ms)
159 {
160 	queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
161 }
162 
163 /* FIXME move this */
164 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw);
165 
166 #define MIN_REGIONS 64
167 #define MAX_RECOVERY 1
168 static int rh_init(struct region_hash *rh, struct mirror_set *ms,
169 		   struct dirty_log *log, uint32_t region_size,
170 		   region_t nr_regions)
171 {
172 	unsigned int nr_buckets, max_buckets;
173 	size_t i;
174 
175 	/*
176 	 * Calculate a suitable number of buckets for our hash
177 	 * table.
178 	 */
179 	max_buckets = nr_regions >> 6;
180 	for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1)
181 		;
182 	nr_buckets >>= 1;
183 
184 	rh->ms = ms;
185 	rh->log = log;
186 	rh->region_size = region_size;
187 	rh->region_shift = ffs(region_size) - 1;
188 	rwlock_init(&rh->hash_lock);
189 	rh->mask = nr_buckets - 1;
190 	rh->nr_buckets = nr_buckets;
191 
192 	rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets));
193 	if (!rh->buckets) {
194 		DMERR("unable to allocate region hash memory");
195 		return -ENOMEM;
196 	}
197 
198 	for (i = 0; i < nr_buckets; i++)
199 		INIT_LIST_HEAD(rh->buckets + i);
200 
201 	spin_lock_init(&rh->region_lock);
202 	sema_init(&rh->recovery_count, 0);
203 	atomic_set(&rh->recovery_in_flight, 0);
204 	INIT_LIST_HEAD(&rh->clean_regions);
205 	INIT_LIST_HEAD(&rh->quiesced_regions);
206 	INIT_LIST_HEAD(&rh->recovered_regions);
207 
208 	rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
209 						      sizeof(struct region));
210 	if (!rh->region_pool) {
211 		vfree(rh->buckets);
212 		rh->buckets = NULL;
213 		return -ENOMEM;
214 	}
215 
216 	return 0;
217 }
218 
219 static void rh_exit(struct region_hash *rh)
220 {
221 	unsigned int h;
222 	struct region *reg, *nreg;
223 
224 	BUG_ON(!list_empty(&rh->quiesced_regions));
225 	for (h = 0; h < rh->nr_buckets; h++) {
226 		list_for_each_entry_safe(reg, nreg, rh->buckets + h, hash_list) {
227 			BUG_ON(atomic_read(&reg->pending));
228 			mempool_free(reg, rh->region_pool);
229 		}
230 	}
231 
232 	if (rh->log)
233 		dm_destroy_dirty_log(rh->log);
234 	if (rh->region_pool)
235 		mempool_destroy(rh->region_pool);
236 	vfree(rh->buckets);
237 }
238 
239 #define RH_HASH_MULT 2654435387U
240 
241 static inline unsigned int rh_hash(struct region_hash *rh, region_t region)
242 {
243 	return (unsigned int) ((region * RH_HASH_MULT) >> 12) & rh->mask;
244 }
245 
246 static struct region *__rh_lookup(struct region_hash *rh, region_t region)
247 {
248 	struct region *reg;
249 
250 	list_for_each_entry (reg, rh->buckets + rh_hash(rh, region), hash_list)
251 		if (reg->key == region)
252 			return reg;
253 
254 	return NULL;
255 }
256 
257 static void __rh_insert(struct region_hash *rh, struct region *reg)
258 {
259 	unsigned int h = rh_hash(rh, reg->key);
260 	list_add(&reg->hash_list, rh->buckets + h);
261 }
262 
263 static struct region *__rh_alloc(struct region_hash *rh, region_t region)
264 {
265 	struct region *reg, *nreg;
266 
267 	read_unlock(&rh->hash_lock);
268 	nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
269 	if (unlikely(!nreg))
270 		nreg = kmalloc(sizeof(struct region), GFP_NOIO);
271 	nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
272 		RH_CLEAN : RH_NOSYNC;
273 	nreg->rh = rh;
274 	nreg->key = region;
275 
276 	INIT_LIST_HEAD(&nreg->list);
277 
278 	atomic_set(&nreg->pending, 0);
279 	bio_list_init(&nreg->delayed_bios);
280 	write_lock_irq(&rh->hash_lock);
281 
282 	reg = __rh_lookup(rh, region);
283 	if (reg)
284 		/* we lost the race */
285 		mempool_free(nreg, rh->region_pool);
286 
287 	else {
288 		__rh_insert(rh, nreg);
289 		if (nreg->state == RH_CLEAN) {
290 			spin_lock(&rh->region_lock);
291 			list_add(&nreg->list, &rh->clean_regions);
292 			spin_unlock(&rh->region_lock);
293 		}
294 		reg = nreg;
295 	}
296 	write_unlock_irq(&rh->hash_lock);
297 	read_lock(&rh->hash_lock);
298 
299 	return reg;
300 }
301 
302 static inline struct region *__rh_find(struct region_hash *rh, region_t region)
303 {
304 	struct region *reg;
305 
306 	reg = __rh_lookup(rh, region);
307 	if (!reg)
308 		reg = __rh_alloc(rh, region);
309 
310 	return reg;
311 }
312 
313 static int rh_state(struct region_hash *rh, region_t region, int may_block)
314 {
315 	int r;
316 	struct region *reg;
317 
318 	read_lock(&rh->hash_lock);
319 	reg = __rh_lookup(rh, region);
320 	read_unlock(&rh->hash_lock);
321 
322 	if (reg)
323 		return reg->state;
324 
325 	/*
326 	 * The region wasn't in the hash, so we fall back to the
327 	 * dirty log.
328 	 */
329 	r = rh->log->type->in_sync(rh->log, region, may_block);
330 
331 	/*
332 	 * Any error from the dirty log (eg. -EWOULDBLOCK) gets
333 	 * taken as a RH_NOSYNC
334 	 */
335 	return r == 1 ? RH_CLEAN : RH_NOSYNC;
336 }
337 
338 static inline int rh_in_sync(struct region_hash *rh,
339 			     region_t region, int may_block)
340 {
341 	int state = rh_state(rh, region, may_block);
342 	return state == RH_CLEAN || state == RH_DIRTY;
343 }
344 
345 static void dispatch_bios(struct mirror_set *ms, struct bio_list *bio_list)
346 {
347 	struct bio *bio;
348 
349 	while ((bio = bio_list_pop(bio_list))) {
350 		queue_bio(ms, bio, WRITE);
351 	}
352 }
353 
354 static void complete_resync_work(struct region *reg, int success)
355 {
356 	struct region_hash *rh = reg->rh;
357 
358 	rh->log->type->set_region_sync(rh->log, reg->key, success);
359 	dispatch_bios(rh->ms, &reg->delayed_bios);
360 	if (atomic_dec_and_test(&rh->recovery_in_flight))
361 		wake_up_all(&_kmirrord_recovery_stopped);
362 	up(&rh->recovery_count);
363 }
364 
365 static void rh_update_states(struct region_hash *rh)
366 {
367 	struct region *reg, *next;
368 
369 	LIST_HEAD(clean);
370 	LIST_HEAD(recovered);
371 
372 	/*
373 	 * Quickly grab the lists.
374 	 */
375 	write_lock_irq(&rh->hash_lock);
376 	spin_lock(&rh->region_lock);
377 	if (!list_empty(&rh->clean_regions)) {
378 		list_splice(&rh->clean_regions, &clean);
379 		INIT_LIST_HEAD(&rh->clean_regions);
380 
381 		list_for_each_entry (reg, &clean, list) {
382 			rh->log->type->clear_region(rh->log, reg->key);
383 			list_del(&reg->hash_list);
384 		}
385 	}
386 
387 	if (!list_empty(&rh->recovered_regions)) {
388 		list_splice(&rh->recovered_regions, &recovered);
389 		INIT_LIST_HEAD(&rh->recovered_regions);
390 
391 		list_for_each_entry (reg, &recovered, list)
392 			list_del(&reg->hash_list);
393 	}
394 	spin_unlock(&rh->region_lock);
395 	write_unlock_irq(&rh->hash_lock);
396 
397 	/*
398 	 * All the regions on the recovered and clean lists have
399 	 * now been pulled out of the system, so no need to do
400 	 * any more locking.
401 	 */
402 	list_for_each_entry_safe (reg, next, &recovered, list) {
403 		rh->log->type->clear_region(rh->log, reg->key);
404 		complete_resync_work(reg, 1);
405 		mempool_free(reg, rh->region_pool);
406 	}
407 
408 	rh->log->type->flush(rh->log);
409 
410 	list_for_each_entry_safe (reg, next, &clean, list)
411 		mempool_free(reg, rh->region_pool);
412 }
413 
414 static void rh_inc(struct region_hash *rh, region_t region)
415 {
416 	struct region *reg;
417 
418 	read_lock(&rh->hash_lock);
419 	reg = __rh_find(rh, region);
420 
421 	spin_lock_irq(&rh->region_lock);
422 	atomic_inc(&reg->pending);
423 
424 	if (reg->state == RH_CLEAN) {
425 		reg->state = RH_DIRTY;
426 		list_del_init(&reg->list);	/* take off the clean list */
427 		spin_unlock_irq(&rh->region_lock);
428 
429 		rh->log->type->mark_region(rh->log, reg->key);
430 	} else
431 		spin_unlock_irq(&rh->region_lock);
432 
433 
434 	read_unlock(&rh->hash_lock);
435 }
436 
437 static void rh_inc_pending(struct region_hash *rh, struct bio_list *bios)
438 {
439 	struct bio *bio;
440 
441 	for (bio = bios->head; bio; bio = bio->bi_next)
442 		rh_inc(rh, bio_to_region(rh, bio));
443 }
444 
445 static void rh_dec(struct region_hash *rh, region_t region)
446 {
447 	unsigned long flags;
448 	struct region *reg;
449 	int should_wake = 0;
450 
451 	read_lock(&rh->hash_lock);
452 	reg = __rh_lookup(rh, region);
453 	read_unlock(&rh->hash_lock);
454 
455 	spin_lock_irqsave(&rh->region_lock, flags);
456 	if (atomic_dec_and_test(&reg->pending)) {
457 		/*
458 		 * There is no pending I/O for this region.
459 		 * We can move the region to corresponding list for next action.
460 		 * At this point, the region is not yet connected to any list.
461 		 *
462 		 * If the state is RH_NOSYNC, the region should be kept off
463 		 * from clean list.
464 		 * The hash entry for RH_NOSYNC will remain in memory
465 		 * until the region is recovered or the map is reloaded.
466 		 */
467 
468 		/* do nothing for RH_NOSYNC */
469 		if (reg->state == RH_RECOVERING) {
470 			list_add_tail(&reg->list, &rh->quiesced_regions);
471 		} else if (reg->state == RH_DIRTY) {
472 			reg->state = RH_CLEAN;
473 			list_add(&reg->list, &rh->clean_regions);
474 		}
475 		should_wake = 1;
476 	}
477 	spin_unlock_irqrestore(&rh->region_lock, flags);
478 
479 	if (should_wake)
480 		wake(rh->ms);
481 }
482 
483 /*
484  * Starts quiescing a region in preparation for recovery.
485  */
486 static int __rh_recovery_prepare(struct region_hash *rh)
487 {
488 	int r;
489 	struct region *reg;
490 	region_t region;
491 
492 	/*
493 	 * Ask the dirty log what's next.
494 	 */
495 	r = rh->log->type->get_resync_work(rh->log, &region);
496 	if (r <= 0)
497 		return r;
498 
499 	/*
500 	 * Get this region, and start it quiescing by setting the
501 	 * recovering flag.
502 	 */
503 	read_lock(&rh->hash_lock);
504 	reg = __rh_find(rh, region);
505 	read_unlock(&rh->hash_lock);
506 
507 	spin_lock_irq(&rh->region_lock);
508 	reg->state = RH_RECOVERING;
509 
510 	/* Already quiesced ? */
511 	if (atomic_read(&reg->pending))
512 		list_del_init(&reg->list);
513 	else
514 		list_move(&reg->list, &rh->quiesced_regions);
515 
516 	spin_unlock_irq(&rh->region_lock);
517 
518 	return 1;
519 }
520 
521 static void rh_recovery_prepare(struct region_hash *rh)
522 {
523 	/* Extra reference to avoid race with rh_stop_recovery */
524 	atomic_inc(&rh->recovery_in_flight);
525 
526 	while (!down_trylock(&rh->recovery_count)) {
527 		atomic_inc(&rh->recovery_in_flight);
528 		if (__rh_recovery_prepare(rh) <= 0) {
529 			atomic_dec(&rh->recovery_in_flight);
530 			up(&rh->recovery_count);
531 			break;
532 		}
533 	}
534 
535 	/* Drop the extra reference */
536 	if (atomic_dec_and_test(&rh->recovery_in_flight))
537 		wake_up_all(&_kmirrord_recovery_stopped);
538 }
539 
540 /*
541  * Returns any quiesced regions.
542  */
543 static struct region *rh_recovery_start(struct region_hash *rh)
544 {
545 	struct region *reg = NULL;
546 
547 	spin_lock_irq(&rh->region_lock);
548 	if (!list_empty(&rh->quiesced_regions)) {
549 		reg = list_entry(rh->quiesced_regions.next,
550 				 struct region, list);
551 		list_del_init(&reg->list);	/* remove from the quiesced list */
552 	}
553 	spin_unlock_irq(&rh->region_lock);
554 
555 	return reg;
556 }
557 
558 /* FIXME: success ignored for now */
559 static void rh_recovery_end(struct region *reg, int success)
560 {
561 	struct region_hash *rh = reg->rh;
562 
563 	spin_lock_irq(&rh->region_lock);
564 	list_add(&reg->list, &reg->rh->recovered_regions);
565 	spin_unlock_irq(&rh->region_lock);
566 
567 	wake(rh->ms);
568 }
569 
570 static void rh_flush(struct region_hash *rh)
571 {
572 	rh->log->type->flush(rh->log);
573 }
574 
575 static void rh_delay(struct region_hash *rh, struct bio *bio)
576 {
577 	struct region *reg;
578 
579 	read_lock(&rh->hash_lock);
580 	reg = __rh_find(rh, bio_to_region(rh, bio));
581 	bio_list_add(&reg->delayed_bios, bio);
582 	read_unlock(&rh->hash_lock);
583 }
584 
585 static void rh_stop_recovery(struct region_hash *rh)
586 {
587 	int i;
588 
589 	/* wait for any recovering regions */
590 	for (i = 0; i < MAX_RECOVERY; i++)
591 		down(&rh->recovery_count);
592 }
593 
594 static void rh_start_recovery(struct region_hash *rh)
595 {
596 	int i;
597 
598 	for (i = 0; i < MAX_RECOVERY; i++)
599 		up(&rh->recovery_count);
600 
601 	wake(rh->ms);
602 }
603 
604 /*
605  * Every mirror should look like this one.
606  */
607 #define DEFAULT_MIRROR 0
608 
609 /*
610  * This is yucky.  We squirrel the mirror_set struct away inside
611  * bi_next for write buffers.  This is safe since the bh
612  * doesn't get submitted to the lower levels of block layer.
613  */
614 static struct mirror_set *bio_get_ms(struct bio *bio)
615 {
616 	return (struct mirror_set *) bio->bi_next;
617 }
618 
619 static void bio_set_ms(struct bio *bio, struct mirror_set *ms)
620 {
621 	bio->bi_next = (struct bio *) ms;
622 }
623 
624 /*-----------------------------------------------------------------
625  * Recovery.
626  *
627  * When a mirror is first activated we may find that some regions
628  * are in the no-sync state.  We have to recover these by
629  * recopying from the default mirror to all the others.
630  *---------------------------------------------------------------*/
631 static void recovery_complete(int read_err, unsigned int write_err,
632 			      void *context)
633 {
634 	struct region *reg = (struct region *) context;
635 
636 	/* FIXME: better error handling */
637 	rh_recovery_end(reg, !(read_err || write_err));
638 }
639 
640 static int recover(struct mirror_set *ms, struct region *reg)
641 {
642 	int r;
643 	unsigned int i;
644 	struct io_region from, to[KCOPYD_MAX_REGIONS], *dest;
645 	struct mirror *m;
646 	unsigned long flags = 0;
647 
648 	/* fill in the source */
649 	m = ms->default_mirror;
650 	from.bdev = m->dev->bdev;
651 	from.sector = m->offset + region_to_sector(reg->rh, reg->key);
652 	if (reg->key == (ms->nr_regions - 1)) {
653 		/*
654 		 * The final region may be smaller than
655 		 * region_size.
656 		 */
657 		from.count = ms->ti->len & (reg->rh->region_size - 1);
658 		if (!from.count)
659 			from.count = reg->rh->region_size;
660 	} else
661 		from.count = reg->rh->region_size;
662 
663 	/* fill in the destinations */
664 	for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
665 		if (&ms->mirror[i] == ms->default_mirror)
666 			continue;
667 
668 		m = ms->mirror + i;
669 		dest->bdev = m->dev->bdev;
670 		dest->sector = m->offset + region_to_sector(reg->rh, reg->key);
671 		dest->count = from.count;
672 		dest++;
673 	}
674 
675 	/* hand to kcopyd */
676 	set_bit(KCOPYD_IGNORE_ERROR, &flags);
677 	r = kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, flags,
678 			recovery_complete, reg);
679 
680 	return r;
681 }
682 
683 static void do_recovery(struct mirror_set *ms)
684 {
685 	int r;
686 	struct region *reg;
687 	struct dirty_log *log = ms->rh.log;
688 
689 	/*
690 	 * Start quiescing some regions.
691 	 */
692 	rh_recovery_prepare(&ms->rh);
693 
694 	/*
695 	 * Copy any already quiesced regions.
696 	 */
697 	while ((reg = rh_recovery_start(&ms->rh))) {
698 		r = recover(ms, reg);
699 		if (r)
700 			rh_recovery_end(reg, 0);
701 	}
702 
703 	/*
704 	 * Update the in sync flag.
705 	 */
706 	if (!ms->in_sync &&
707 	    (log->type->get_sync_count(log) == ms->nr_regions)) {
708 		/* the sync is complete */
709 		dm_table_event(ms->ti->table);
710 		ms->in_sync = 1;
711 	}
712 }
713 
714 /*-----------------------------------------------------------------
715  * Reads
716  *---------------------------------------------------------------*/
717 static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
718 {
719 	/* FIXME: add read balancing */
720 	return ms->default_mirror;
721 }
722 
723 /*
724  * remap a buffer to a particular mirror.
725  */
726 static void map_bio(struct mirror_set *ms, struct mirror *m, struct bio *bio)
727 {
728 	bio->bi_bdev = m->dev->bdev;
729 	bio->bi_sector = m->offset + (bio->bi_sector - ms->ti->begin);
730 }
731 
732 static void do_reads(struct mirror_set *ms, struct bio_list *reads)
733 {
734 	region_t region;
735 	struct bio *bio;
736 	struct mirror *m;
737 
738 	while ((bio = bio_list_pop(reads))) {
739 		region = bio_to_region(&ms->rh, bio);
740 
741 		/*
742 		 * We can only read balance if the region is in sync.
743 		 */
744 		if (rh_in_sync(&ms->rh, region, 1))
745 			m = choose_mirror(ms, bio->bi_sector);
746 		else
747 			m = ms->default_mirror;
748 
749 		map_bio(ms, m, bio);
750 		generic_make_request(bio);
751 	}
752 }
753 
754 /*-----------------------------------------------------------------
755  * Writes.
756  *
757  * We do different things with the write io depending on the
758  * state of the region that it's in:
759  *
760  * SYNC: 	increment pending, use kcopyd to write to *all* mirrors
761  * RECOVERING:	delay the io until recovery completes
762  * NOSYNC:	increment pending, just write to the default mirror
763  *---------------------------------------------------------------*/
764 static void write_callback(unsigned long error, void *context)
765 {
766 	unsigned int i;
767 	int uptodate = 1;
768 	struct bio *bio = (struct bio *) context;
769 	struct mirror_set *ms;
770 
771 	ms = bio_get_ms(bio);
772 	bio_set_ms(bio, NULL);
773 
774 	/*
775 	 * NOTE: We don't decrement the pending count here,
776 	 * instead it is done by the targets endio function.
777 	 * This way we handle both writes to SYNC and NOSYNC
778 	 * regions with the same code.
779 	 */
780 
781 	if (error) {
782 		/*
783 		 * only error the io if all mirrors failed.
784 		 * FIXME: bogus
785 		 */
786 		uptodate = 0;
787 		for (i = 0; i < ms->nr_mirrors; i++)
788 			if (!test_bit(i, &error)) {
789 				uptodate = 1;
790 				break;
791 			}
792 	}
793 	bio_endio(bio, bio->bi_size, 0);
794 }
795 
796 static void do_write(struct mirror_set *ms, struct bio *bio)
797 {
798 	unsigned int i;
799 	struct io_region io[KCOPYD_MAX_REGIONS+1];
800 	struct mirror *m;
801 	struct dm_io_request io_req = {
802 		.bi_rw = WRITE,
803 		.mem.type = DM_IO_BVEC,
804 		.mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
805 		.notify.fn = write_callback,
806 		.notify.context = bio,
807 		.client = ms->io_client,
808 	};
809 
810 	for (i = 0; i < ms->nr_mirrors; i++) {
811 		m = ms->mirror + i;
812 
813 		io[i].bdev = m->dev->bdev;
814 		io[i].sector = m->offset + (bio->bi_sector - ms->ti->begin);
815 		io[i].count = bio->bi_size >> 9;
816 	}
817 
818 	bio_set_ms(bio, ms);
819 
820 	(void) dm_io(&io_req, ms->nr_mirrors, io, NULL);
821 }
822 
823 static void do_writes(struct mirror_set *ms, struct bio_list *writes)
824 {
825 	int state;
826 	struct bio *bio;
827 	struct bio_list sync, nosync, recover, *this_list = NULL;
828 
829 	if (!writes->head)
830 		return;
831 
832 	/*
833 	 * Classify each write.
834 	 */
835 	bio_list_init(&sync);
836 	bio_list_init(&nosync);
837 	bio_list_init(&recover);
838 
839 	while ((bio = bio_list_pop(writes))) {
840 		state = rh_state(&ms->rh, bio_to_region(&ms->rh, bio), 1);
841 		switch (state) {
842 		case RH_CLEAN:
843 		case RH_DIRTY:
844 			this_list = &sync;
845 			break;
846 
847 		case RH_NOSYNC:
848 			this_list = &nosync;
849 			break;
850 
851 		case RH_RECOVERING:
852 			this_list = &recover;
853 			break;
854 		}
855 
856 		bio_list_add(this_list, bio);
857 	}
858 
859 	/*
860 	 * Increment the pending counts for any regions that will
861 	 * be written to (writes to recover regions are going to
862 	 * be delayed).
863 	 */
864 	rh_inc_pending(&ms->rh, &sync);
865 	rh_inc_pending(&ms->rh, &nosync);
866 	rh_flush(&ms->rh);
867 
868 	/*
869 	 * Dispatch io.
870 	 */
871 	while ((bio = bio_list_pop(&sync)))
872 		do_write(ms, bio);
873 
874 	while ((bio = bio_list_pop(&recover)))
875 		rh_delay(&ms->rh, bio);
876 
877 	while ((bio = bio_list_pop(&nosync))) {
878 		map_bio(ms, ms->default_mirror, bio);
879 		generic_make_request(bio);
880 	}
881 }
882 
883 /*-----------------------------------------------------------------
884  * kmirrord
885  *---------------------------------------------------------------*/
886 static void do_mirror(struct work_struct *work)
887 {
888 	struct mirror_set *ms =container_of(work, struct mirror_set,
889 					    kmirrord_work);
890 	struct bio_list reads, writes;
891 
892 	spin_lock(&ms->lock);
893 	reads = ms->reads;
894 	writes = ms->writes;
895 	bio_list_init(&ms->reads);
896 	bio_list_init(&ms->writes);
897 	spin_unlock(&ms->lock);
898 
899 	rh_update_states(&ms->rh);
900 	do_recovery(ms);
901 	do_reads(ms, &reads);
902 	do_writes(ms, &writes);
903 }
904 
905 /*-----------------------------------------------------------------
906  * Target functions
907  *---------------------------------------------------------------*/
908 static struct mirror_set *alloc_context(unsigned int nr_mirrors,
909 					uint32_t region_size,
910 					struct dm_target *ti,
911 					struct dirty_log *dl)
912 {
913 	size_t len;
914 	struct mirror_set *ms = NULL;
915 
916 	if (array_too_big(sizeof(*ms), sizeof(ms->mirror[0]), nr_mirrors))
917 		return NULL;
918 
919 	len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
920 
921 	ms = kmalloc(len, GFP_KERNEL);
922 	if (!ms) {
923 		ti->error = "Cannot allocate mirror context";
924 		return NULL;
925 	}
926 
927 	memset(ms, 0, len);
928 	spin_lock_init(&ms->lock);
929 
930 	ms->ti = ti;
931 	ms->nr_mirrors = nr_mirrors;
932 	ms->nr_regions = dm_sector_div_up(ti->len, region_size);
933 	ms->in_sync = 0;
934 	ms->default_mirror = &ms->mirror[DEFAULT_MIRROR];
935 
936 	ms->io_client = dm_io_client_create(DM_IO_PAGES);
937 	if (IS_ERR(ms->io_client)) {
938 		ti->error = "Error creating dm_io client";
939 		kfree(ms);
940  		return NULL;
941 	}
942 
943 	if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) {
944 		ti->error = "Error creating dirty region hash";
945 		kfree(ms);
946 		return NULL;
947 	}
948 
949 	return ms;
950 }
951 
952 static void free_context(struct mirror_set *ms, struct dm_target *ti,
953 			 unsigned int m)
954 {
955 	while (m--)
956 		dm_put_device(ti, ms->mirror[m].dev);
957 
958 	dm_io_client_destroy(ms->io_client);
959 	rh_exit(&ms->rh);
960 	kfree(ms);
961 }
962 
963 static inline int _check_region_size(struct dm_target *ti, uint32_t size)
964 {
965 	return !(size % (PAGE_SIZE >> 9) || (size & (size - 1)) ||
966 		 size > ti->len);
967 }
968 
969 static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
970 		      unsigned int mirror, char **argv)
971 {
972 	unsigned long long offset;
973 
974 	if (sscanf(argv[1], "%llu", &offset) != 1) {
975 		ti->error = "Invalid offset";
976 		return -EINVAL;
977 	}
978 
979 	if (dm_get_device(ti, argv[0], offset, ti->len,
980 			  dm_table_get_mode(ti->table),
981 			  &ms->mirror[mirror].dev)) {
982 		ti->error = "Device lookup failure";
983 		return -ENXIO;
984 	}
985 
986 	ms->mirror[mirror].offset = offset;
987 
988 	return 0;
989 }
990 
991 /*
992  * Create dirty log: log_type #log_params <log_params>
993  */
994 static struct dirty_log *create_dirty_log(struct dm_target *ti,
995 					  unsigned int argc, char **argv,
996 					  unsigned int *args_used)
997 {
998 	unsigned int param_count;
999 	struct dirty_log *dl;
1000 
1001 	if (argc < 2) {
1002 		ti->error = "Insufficient mirror log arguments";
1003 		return NULL;
1004 	}
1005 
1006 	if (sscanf(argv[1], "%u", &param_count) != 1) {
1007 		ti->error = "Invalid mirror log argument count";
1008 		return NULL;
1009 	}
1010 
1011 	*args_used = 2 + param_count;
1012 
1013 	if (argc < *args_used) {
1014 		ti->error = "Insufficient mirror log arguments";
1015 		return NULL;
1016 	}
1017 
1018 	dl = dm_create_dirty_log(argv[0], ti, param_count, argv + 2);
1019 	if (!dl) {
1020 		ti->error = "Error creating mirror dirty log";
1021 		return NULL;
1022 	}
1023 
1024 	if (!_check_region_size(ti, dl->type->get_region_size(dl))) {
1025 		ti->error = "Invalid region size";
1026 		dm_destroy_dirty_log(dl);
1027 		return NULL;
1028 	}
1029 
1030 	return dl;
1031 }
1032 
1033 static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
1034 			  unsigned *args_used)
1035 {
1036 	unsigned num_features;
1037 	struct dm_target *ti = ms->ti;
1038 
1039 	*args_used = 0;
1040 
1041 	if (!argc)
1042 		return 0;
1043 
1044 	if (sscanf(argv[0], "%u", &num_features) != 1) {
1045 		ti->error = "Invalid number of features";
1046 		return -EINVAL;
1047 	}
1048 
1049 	argc--;
1050 	argv++;
1051 	(*args_used)++;
1052 
1053 	if (num_features > argc) {
1054 		ti->error = "Not enough arguments to support feature count";
1055 		return -EINVAL;
1056 	}
1057 
1058 	if (!strcmp("handle_errors", argv[0]))
1059 		ms->features |= DM_RAID1_HANDLE_ERRORS;
1060 	else {
1061 		ti->error = "Unrecognised feature requested";
1062 		return -EINVAL;
1063 	}
1064 
1065 	(*args_used)++;
1066 
1067 	return 0;
1068 }
1069 
1070 /*
1071  * Construct a mirror mapping:
1072  *
1073  * log_type #log_params <log_params>
1074  * #mirrors [mirror_path offset]{2,}
1075  * [#features <features>]
1076  *
1077  * log_type is "core" or "disk"
1078  * #log_params is between 1 and 3
1079  *
1080  * If present, features must be "handle_errors".
1081  */
1082 static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1083 {
1084 	int r;
1085 	unsigned int nr_mirrors, m, args_used;
1086 	struct mirror_set *ms;
1087 	struct dirty_log *dl;
1088 
1089 	dl = create_dirty_log(ti, argc, argv, &args_used);
1090 	if (!dl)
1091 		return -EINVAL;
1092 
1093 	argv += args_used;
1094 	argc -= args_used;
1095 
1096 	if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 ||
1097 	    nr_mirrors < 2 || nr_mirrors > KCOPYD_MAX_REGIONS + 1) {
1098 		ti->error = "Invalid number of mirrors";
1099 		dm_destroy_dirty_log(dl);
1100 		return -EINVAL;
1101 	}
1102 
1103 	argv++, argc--;
1104 
1105 	if (argc < nr_mirrors * 2) {
1106 		ti->error = "Too few mirror arguments";
1107 		dm_destroy_dirty_log(dl);
1108 		return -EINVAL;
1109 	}
1110 
1111 	ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
1112 	if (!ms) {
1113 		dm_destroy_dirty_log(dl);
1114 		return -ENOMEM;
1115 	}
1116 
1117 	/* Get the mirror parameter sets */
1118 	for (m = 0; m < nr_mirrors; m++) {
1119 		r = get_mirror(ms, ti, m, argv);
1120 		if (r) {
1121 			free_context(ms, ti, m);
1122 			return r;
1123 		}
1124 		argv += 2;
1125 		argc -= 2;
1126 	}
1127 
1128 	ti->private = ms;
1129  	ti->split_io = ms->rh.region_size;
1130 
1131 	ms->kmirrord_wq = create_singlethread_workqueue("kmirrord");
1132 	if (!ms->kmirrord_wq) {
1133 		DMERR("couldn't start kmirrord");
1134 		free_context(ms, ti, m);
1135 		return -ENOMEM;
1136 	}
1137 	INIT_WORK(&ms->kmirrord_work, do_mirror);
1138 
1139 	r = parse_features(ms, argc, argv, &args_used);
1140 	if (r) {
1141 		free_context(ms, ti, ms->nr_mirrors);
1142 		return r;
1143 	}
1144 
1145 	argv += args_used;
1146 	argc -= args_used;
1147 
1148 	if (argc) {
1149 		ti->error = "Too many mirror arguments";
1150 		free_context(ms, ti, ms->nr_mirrors);
1151 		return -EINVAL;
1152 	}
1153 
1154 	r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client);
1155 	if (r) {
1156 		destroy_workqueue(ms->kmirrord_wq);
1157 		free_context(ms, ti, ms->nr_mirrors);
1158 		return r;
1159 	}
1160 
1161 	wake(ms);
1162 	return 0;
1163 }
1164 
1165 static void mirror_dtr(struct dm_target *ti)
1166 {
1167 	struct mirror_set *ms = (struct mirror_set *) ti->private;
1168 
1169 	flush_workqueue(ms->kmirrord_wq);
1170 	kcopyd_client_destroy(ms->kcopyd_client);
1171 	destroy_workqueue(ms->kmirrord_wq);
1172 	free_context(ms, ti, ms->nr_mirrors);
1173 }
1174 
1175 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
1176 {
1177 	int should_wake = 0;
1178 	struct bio_list *bl;
1179 
1180 	bl = (rw == WRITE) ? &ms->writes : &ms->reads;
1181 	spin_lock(&ms->lock);
1182 	should_wake = !(bl->head);
1183 	bio_list_add(bl, bio);
1184 	spin_unlock(&ms->lock);
1185 
1186 	if (should_wake)
1187 		wake(ms);
1188 }
1189 
1190 /*
1191  * Mirror mapping function
1192  */
1193 static int mirror_map(struct dm_target *ti, struct bio *bio,
1194 		      union map_info *map_context)
1195 {
1196 	int r, rw = bio_rw(bio);
1197 	struct mirror *m;
1198 	struct mirror_set *ms = ti->private;
1199 
1200 	map_context->ll = bio_to_region(&ms->rh, bio);
1201 
1202 	if (rw == WRITE) {
1203 		queue_bio(ms, bio, rw);
1204 		return DM_MAPIO_SUBMITTED;
1205 	}
1206 
1207 	r = ms->rh.log->type->in_sync(ms->rh.log,
1208 				      bio_to_region(&ms->rh, bio), 0);
1209 	if (r < 0 && r != -EWOULDBLOCK)
1210 		return r;
1211 
1212 	if (r == -EWOULDBLOCK)	/* FIXME: ugly */
1213 		r = DM_MAPIO_SUBMITTED;
1214 
1215 	/*
1216 	 * We don't want to fast track a recovery just for a read
1217 	 * ahead.  So we just let it silently fail.
1218 	 * FIXME: get rid of this.
1219 	 */
1220 	if (!r && rw == READA)
1221 		return -EIO;
1222 
1223 	if (!r) {
1224 		/* Pass this io over to the daemon */
1225 		queue_bio(ms, bio, rw);
1226 		return DM_MAPIO_SUBMITTED;
1227 	}
1228 
1229 	m = choose_mirror(ms, bio->bi_sector);
1230 	if (!m)
1231 		return -EIO;
1232 
1233 	map_bio(ms, m, bio);
1234 	return DM_MAPIO_REMAPPED;
1235 }
1236 
1237 static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1238 			 int error, union map_info *map_context)
1239 {
1240 	int rw = bio_rw(bio);
1241 	struct mirror_set *ms = (struct mirror_set *) ti->private;
1242 	region_t region = map_context->ll;
1243 
1244 	/*
1245 	 * We need to dec pending if this was a write.
1246 	 */
1247 	if (rw == WRITE)
1248 		rh_dec(&ms->rh, region);
1249 
1250 	return 0;
1251 }
1252 
1253 static void mirror_postsuspend(struct dm_target *ti)
1254 {
1255 	struct mirror_set *ms = (struct mirror_set *) ti->private;
1256 	struct dirty_log *log = ms->rh.log;
1257 
1258 	rh_stop_recovery(&ms->rh);
1259 
1260 	/* Wait for all I/O we generated to complete */
1261 	wait_event(_kmirrord_recovery_stopped,
1262 		   !atomic_read(&ms->rh.recovery_in_flight));
1263 
1264 	if (log->type->suspend && log->type->suspend(log))
1265 		/* FIXME: need better error handling */
1266 		DMWARN("log suspend failed");
1267 }
1268 
1269 static void mirror_resume(struct dm_target *ti)
1270 {
1271 	struct mirror_set *ms = (struct mirror_set *) ti->private;
1272 	struct dirty_log *log = ms->rh.log;
1273 	if (log->type->resume && log->type->resume(log))
1274 		/* FIXME: need better error handling */
1275 		DMWARN("log resume failed");
1276 	rh_start_recovery(&ms->rh);
1277 }
1278 
1279 static int mirror_status(struct dm_target *ti, status_type_t type,
1280 			 char *result, unsigned int maxlen)
1281 {
1282 	unsigned int m, sz = 0;
1283 	struct mirror_set *ms = (struct mirror_set *) ti->private;
1284 
1285 	switch (type) {
1286 	case STATUSTYPE_INFO:
1287 		DMEMIT("%d ", ms->nr_mirrors);
1288 		for (m = 0; m < ms->nr_mirrors; m++)
1289 			DMEMIT("%s ", ms->mirror[m].dev->name);
1290 
1291 		DMEMIT("%llu/%llu",
1292 			(unsigned long long)ms->rh.log->type->
1293 				get_sync_count(ms->rh.log),
1294 			(unsigned long long)ms->nr_regions);
1295 
1296 		sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen);
1297 
1298 		break;
1299 
1300 	case STATUSTYPE_TABLE:
1301 		sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen);
1302 
1303 		DMEMIT("%d", ms->nr_mirrors);
1304 		for (m = 0; m < ms->nr_mirrors; m++)
1305 			DMEMIT(" %s %llu", ms->mirror[m].dev->name,
1306 				(unsigned long long)ms->mirror[m].offset);
1307 
1308 		if (ms->features & DM_RAID1_HANDLE_ERRORS)
1309 			DMEMIT(" 1 handle_errors");
1310 	}
1311 
1312 	return 0;
1313 }
1314 
1315 static struct target_type mirror_target = {
1316 	.name	 = "mirror",
1317 	.version = {1, 0, 3},
1318 	.module	 = THIS_MODULE,
1319 	.ctr	 = mirror_ctr,
1320 	.dtr	 = mirror_dtr,
1321 	.map	 = mirror_map,
1322 	.end_io	 = mirror_end_io,
1323 	.postsuspend = mirror_postsuspend,
1324 	.resume	 = mirror_resume,
1325 	.status	 = mirror_status,
1326 };
1327 
1328 static int __init dm_mirror_init(void)
1329 {
1330 	int r;
1331 
1332 	r = dm_dirty_log_init();
1333 	if (r)
1334 		return r;
1335 
1336 	r = dm_register_target(&mirror_target);
1337 	if (r < 0) {
1338 		DMERR("%s: Failed to register mirror target",
1339 		      mirror_target.name);
1340 		dm_dirty_log_exit();
1341 	}
1342 
1343 	return r;
1344 }
1345 
1346 static void __exit dm_mirror_exit(void)
1347 {
1348 	int r;
1349 
1350 	r = dm_unregister_target(&mirror_target);
1351 	if (r < 0)
1352 		DMERR("%s: unregister failed %d", mirror_target.name, r);
1353 
1354 	dm_dirty_log_exit();
1355 }
1356 
1357 /* Module hooks */
1358 module_init(dm_mirror_init);
1359 module_exit(dm_mirror_exit);
1360 
1361 MODULE_DESCRIPTION(DM_NAME " mirror target");
1362 MODULE_AUTHOR("Joe Thornber");
1363 MODULE_LICENSE("GPL");
1364