xref: /openbmc/linux/drivers/md/dm-snap.c (revision 643d1f7f)
1 /*
2  * dm-snapshot.c
3  *
4  * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
5  *
6  * This file is released under the GPL.
7  */
8 
9 #include <linux/blkdev.h>
10 #include <linux/ctype.h>
11 #include <linux/device-mapper.h>
12 #include <linux/fs.h>
13 #include <linux/init.h>
14 #include <linux/kdev_t.h>
15 #include <linux/list.h>
16 #include <linux/mempool.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/log2.h>
21 
22 #include "dm-snap.h"
23 #include "dm-bio-list.h"
24 #include "kcopyd.h"
25 
26 #define DM_MSG_PREFIX "snapshots"
27 
28 /*
29  * The percentage increment we will wake up users at
30  */
31 #define WAKE_UP_PERCENT 5
32 
33 /*
34  * kcopyd priority of snapshot operations
35  */
36 #define SNAPSHOT_COPY_PRIORITY 2
37 
38 /*
39  * Each snapshot reserves this many pages for io
40  */
41 #define SNAPSHOT_PAGES 256
42 
43 static struct workqueue_struct *ksnapd;
44 static void flush_queued_bios(struct work_struct *work);
45 
46 struct dm_snap_pending_exception {
47 	struct dm_snap_exception e;
48 
49 	/*
50 	 * Origin buffers waiting for this to complete are held
51 	 * in a bio list
52 	 */
53 	struct bio_list origin_bios;
54 	struct bio_list snapshot_bios;
55 
56 	/*
57 	 * Short-term queue of pending exceptions prior to submission.
58 	 */
59 	struct list_head list;
60 
61 	/*
62 	 * The primary pending_exception is the one that holds
63 	 * the ref_count and the list of origin_bios for a
64 	 * group of pending_exceptions.  It is always last to get freed.
65 	 * These fields get set up when writing to the origin.
66 	 */
67 	struct dm_snap_pending_exception *primary_pe;
68 
69 	/*
70 	 * Number of pending_exceptions processing this chunk.
71 	 * When this drops to zero we must complete the origin bios.
72 	 * If incrementing or decrementing this, hold pe->snap->lock for
73 	 * the sibling concerned and not pe->primary_pe->snap->lock unless
74 	 * they are the same.
75 	 */
76 	atomic_t ref_count;
77 
78 	/* Pointer back to snapshot context */
79 	struct dm_snapshot *snap;
80 
81 	/*
82 	 * 1 indicates the exception has already been sent to
83 	 * kcopyd.
84 	 */
85 	int started;
86 };
87 
88 /*
89  * Hash table mapping origin volumes to lists of snapshots and
90  * a lock to protect it
91  */
92 static struct kmem_cache *exception_cache;
93 static struct kmem_cache *pending_cache;
94 static mempool_t *pending_pool;
95 
96 /*
97  * One of these per registered origin, held in the snapshot_origins hash
98  */
99 struct origin {
100 	/* The origin device */
101 	struct block_device *bdev;
102 
103 	struct list_head hash_list;
104 
105 	/* List of snapshots for this origin */
106 	struct list_head snapshots;
107 };
108 
109 /*
110  * Size of the hash table for origin volumes. If we make this
111  * the size of the minors list then it should be nearly perfect
112  */
113 #define ORIGIN_HASH_SIZE 256
114 #define ORIGIN_MASK      0xFF
115 static struct list_head *_origins;
116 static struct rw_semaphore _origins_lock;
117 
118 static int init_origin_hash(void)
119 {
120 	int i;
121 
122 	_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
123 			   GFP_KERNEL);
124 	if (!_origins) {
125 		DMERR("unable to allocate memory");
126 		return -ENOMEM;
127 	}
128 
129 	for (i = 0; i < ORIGIN_HASH_SIZE; i++)
130 		INIT_LIST_HEAD(_origins + i);
131 	init_rwsem(&_origins_lock);
132 
133 	return 0;
134 }
135 
136 static void exit_origin_hash(void)
137 {
138 	kfree(_origins);
139 }
140 
141 static unsigned origin_hash(struct block_device *bdev)
142 {
143 	return bdev->bd_dev & ORIGIN_MASK;
144 }
145 
146 static struct origin *__lookup_origin(struct block_device *origin)
147 {
148 	struct list_head *ol;
149 	struct origin *o;
150 
151 	ol = &_origins[origin_hash(origin)];
152 	list_for_each_entry (o, ol, hash_list)
153 		if (bdev_equal(o->bdev, origin))
154 			return o;
155 
156 	return NULL;
157 }
158 
159 static void __insert_origin(struct origin *o)
160 {
161 	struct list_head *sl = &_origins[origin_hash(o->bdev)];
162 	list_add_tail(&o->hash_list, sl);
163 }
164 
165 /*
166  * Make a note of the snapshot and its origin so we can look it
167  * up when the origin has a write on it.
168  */
169 static int register_snapshot(struct dm_snapshot *snap)
170 {
171 	struct origin *o;
172 	struct block_device *bdev = snap->origin->bdev;
173 
174 	down_write(&_origins_lock);
175 	o = __lookup_origin(bdev);
176 
177 	if (!o) {
178 		/* New origin */
179 		o = kmalloc(sizeof(*o), GFP_KERNEL);
180 		if (!o) {
181 			up_write(&_origins_lock);
182 			return -ENOMEM;
183 		}
184 
185 		/* Initialise the struct */
186 		INIT_LIST_HEAD(&o->snapshots);
187 		o->bdev = bdev;
188 
189 		__insert_origin(o);
190 	}
191 
192 	list_add_tail(&snap->list, &o->snapshots);
193 
194 	up_write(&_origins_lock);
195 	return 0;
196 }
197 
198 static void unregister_snapshot(struct dm_snapshot *s)
199 {
200 	struct origin *o;
201 
202 	down_write(&_origins_lock);
203 	o = __lookup_origin(s->origin->bdev);
204 
205 	list_del(&s->list);
206 	if (list_empty(&o->snapshots)) {
207 		list_del(&o->hash_list);
208 		kfree(o);
209 	}
210 
211 	up_write(&_origins_lock);
212 }
213 
214 /*
215  * Implementation of the exception hash tables.
216  */
217 static int init_exception_table(struct exception_table *et, uint32_t size)
218 {
219 	unsigned int i;
220 
221 	et->hash_mask = size - 1;
222 	et->table = dm_vcalloc(size, sizeof(struct list_head));
223 	if (!et->table)
224 		return -ENOMEM;
225 
226 	for (i = 0; i < size; i++)
227 		INIT_LIST_HEAD(et->table + i);
228 
229 	return 0;
230 }
231 
232 static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem)
233 {
234 	struct list_head *slot;
235 	struct dm_snap_exception *ex, *next;
236 	int i, size;
237 
238 	size = et->hash_mask + 1;
239 	for (i = 0; i < size; i++) {
240 		slot = et->table + i;
241 
242 		list_for_each_entry_safe (ex, next, slot, hash_list)
243 			kmem_cache_free(mem, ex);
244 	}
245 
246 	vfree(et->table);
247 }
248 
249 static uint32_t exception_hash(struct exception_table *et, chunk_t chunk)
250 {
251 	return chunk & et->hash_mask;
252 }
253 
254 static void insert_exception(struct exception_table *eh,
255 			     struct dm_snap_exception *e)
256 {
257 	struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)];
258 	list_add(&e->hash_list, l);
259 }
260 
261 static void remove_exception(struct dm_snap_exception *e)
262 {
263 	list_del(&e->hash_list);
264 }
265 
266 /*
267  * Return the exception data for a sector, or NULL if not
268  * remapped.
269  */
270 static struct dm_snap_exception *lookup_exception(struct exception_table *et,
271 						  chunk_t chunk)
272 {
273 	struct list_head *slot;
274 	struct dm_snap_exception *e;
275 
276 	slot = &et->table[exception_hash(et, chunk)];
277 	list_for_each_entry (e, slot, hash_list)
278 		if (e->old_chunk == chunk)
279 			return e;
280 
281 	return NULL;
282 }
283 
284 static struct dm_snap_exception *alloc_exception(void)
285 {
286 	struct dm_snap_exception *e;
287 
288 	e = kmem_cache_alloc(exception_cache, GFP_NOIO);
289 	if (!e)
290 		e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
291 
292 	return e;
293 }
294 
295 static void free_exception(struct dm_snap_exception *e)
296 {
297 	kmem_cache_free(exception_cache, e);
298 }
299 
300 static struct dm_snap_pending_exception *alloc_pending_exception(void)
301 {
302 	return mempool_alloc(pending_pool, GFP_NOIO);
303 }
304 
305 static void free_pending_exception(struct dm_snap_pending_exception *pe)
306 {
307 	mempool_free(pe, pending_pool);
308 }
309 
310 int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new)
311 {
312 	struct dm_snap_exception *e;
313 
314 	e = alloc_exception();
315 	if (!e)
316 		return -ENOMEM;
317 
318 	e->old_chunk = old;
319 	e->new_chunk = new;
320 	insert_exception(&s->complete, e);
321 	return 0;
322 }
323 
324 /*
325  * Hard coded magic.
326  */
327 static int calc_max_buckets(void)
328 {
329 	/* use a fixed size of 2MB */
330 	unsigned long mem = 2 * 1024 * 1024;
331 	mem /= sizeof(struct list_head);
332 
333 	return mem;
334 }
335 
336 /*
337  * Rounds a number down to a power of 2.
338  */
339 static uint32_t round_down(uint32_t n)
340 {
341 	while (n & (n - 1))
342 		n &= (n - 1);
343 	return n;
344 }
345 
346 /*
347  * Allocate room for a suitable hash table.
348  */
349 static int init_hash_tables(struct dm_snapshot *s)
350 {
351 	sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
352 
353 	/*
354 	 * Calculate based on the size of the original volume or
355 	 * the COW volume...
356 	 */
357 	cow_dev_size = get_dev_size(s->cow->bdev);
358 	origin_dev_size = get_dev_size(s->origin->bdev);
359 	max_buckets = calc_max_buckets();
360 
361 	hash_size = min(origin_dev_size, cow_dev_size) >> s->chunk_shift;
362 	hash_size = min(hash_size, max_buckets);
363 
364 	/* Round it down to a power of 2 */
365 	hash_size = round_down(hash_size);
366 	if (init_exception_table(&s->complete, hash_size))
367 		return -ENOMEM;
368 
369 	/*
370 	 * Allocate hash table for in-flight exceptions
371 	 * Make this smaller than the real hash table
372 	 */
373 	hash_size >>= 3;
374 	if (hash_size < 64)
375 		hash_size = 64;
376 
377 	if (init_exception_table(&s->pending, hash_size)) {
378 		exit_exception_table(&s->complete, exception_cache);
379 		return -ENOMEM;
380 	}
381 
382 	return 0;
383 }
384 
385 /*
386  * Round a number up to the nearest 'size' boundary.  size must
387  * be a power of 2.
388  */
389 static ulong round_up(ulong n, ulong size)
390 {
391 	size--;
392 	return (n + size) & ~size;
393 }
394 
395 static int set_chunk_size(struct dm_snapshot *s, const char *chunk_size_arg,
396 			  char **error)
397 {
398 	unsigned long chunk_size;
399 	char *value;
400 
401 	chunk_size = simple_strtoul(chunk_size_arg, &value, 10);
402 	if (*chunk_size_arg == '\0' || *value != '\0') {
403 		*error = "Invalid chunk size";
404 		return -EINVAL;
405 	}
406 
407 	if (!chunk_size) {
408 		s->chunk_size = s->chunk_mask = s->chunk_shift = 0;
409 		return 0;
410 	}
411 
412 	/*
413 	 * Chunk size must be multiple of page size.  Silently
414 	 * round up if it's not.
415 	 */
416 	chunk_size = round_up(chunk_size, PAGE_SIZE >> 9);
417 
418 	/* Check chunk_size is a power of 2 */
419 	if (!is_power_of_2(chunk_size)) {
420 		*error = "Chunk size is not a power of 2";
421 		return -EINVAL;
422 	}
423 
424 	/* Validate the chunk size against the device block size */
425 	if (chunk_size % (bdev_hardsect_size(s->cow->bdev) >> 9)) {
426 		*error = "Chunk size is not a multiple of device blocksize";
427 		return -EINVAL;
428 	}
429 
430 	s->chunk_size = chunk_size;
431 	s->chunk_mask = chunk_size - 1;
432 	s->chunk_shift = ffs(chunk_size) - 1;
433 
434 	return 0;
435 }
436 
437 /*
438  * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
439  */
440 static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
441 {
442 	struct dm_snapshot *s;
443 	int r = -EINVAL;
444 	char persistent;
445 	char *origin_path;
446 	char *cow_path;
447 
448 	if (argc != 4) {
449 		ti->error = "requires exactly 4 arguments";
450 		r = -EINVAL;
451 		goto bad1;
452 	}
453 
454 	origin_path = argv[0];
455 	cow_path = argv[1];
456 	persistent = toupper(*argv[2]);
457 
458 	if (persistent != 'P' && persistent != 'N') {
459 		ti->error = "Persistent flag is not P or N";
460 		r = -EINVAL;
461 		goto bad1;
462 	}
463 
464 	s = kmalloc(sizeof(*s), GFP_KERNEL);
465 	if (s == NULL) {
466 		ti->error = "Cannot allocate snapshot context private "
467 		    "structure";
468 		r = -ENOMEM;
469 		goto bad1;
470 	}
471 
472 	r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin);
473 	if (r) {
474 		ti->error = "Cannot get origin device";
475 		goto bad2;
476 	}
477 
478 	r = dm_get_device(ti, cow_path, 0, 0,
479 			  FMODE_READ | FMODE_WRITE, &s->cow);
480 	if (r) {
481 		dm_put_device(ti, s->origin);
482 		ti->error = "Cannot get COW device";
483 		goto bad2;
484 	}
485 
486 	r = set_chunk_size(s, argv[3], &ti->error);
487 	if (r)
488 		goto bad3;
489 
490 	s->type = persistent;
491 
492 	s->valid = 1;
493 	s->active = 0;
494 	s->last_percent = 0;
495 	init_rwsem(&s->lock);
496 	spin_lock_init(&s->pe_lock);
497 	s->table = ti->table;
498 
499 	/* Allocate hash table for COW data */
500 	if (init_hash_tables(s)) {
501 		ti->error = "Unable to allocate hash table space";
502 		r = -ENOMEM;
503 		goto bad3;
504 	}
505 
506 	s->store.snap = s;
507 
508 	if (persistent == 'P')
509 		r = dm_create_persistent(&s->store);
510 	else
511 		r = dm_create_transient(&s->store);
512 
513 	if (r) {
514 		ti->error = "Couldn't create exception store";
515 		r = -EINVAL;
516 		goto bad4;
517 	}
518 
519 	r = kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client);
520 	if (r) {
521 		ti->error = "Could not create kcopyd client";
522 		goto bad5;
523 	}
524 
525 	/* Metadata must only be loaded into one table at once */
526 	r = s->store.read_metadata(&s->store);
527 	if (r < 0) {
528 		ti->error = "Failed to read snapshot metadata";
529 		goto bad6;
530 	} else if (r > 0) {
531 		s->valid = 0;
532 		DMWARN("Snapshot is marked invalid.");
533 	}
534 
535 	bio_list_init(&s->queued_bios);
536 	INIT_WORK(&s->queued_bios_work, flush_queued_bios);
537 
538 	/* Add snapshot to the list of snapshots for this origin */
539 	/* Exceptions aren't triggered till snapshot_resume() is called */
540 	if (register_snapshot(s)) {
541 		r = -EINVAL;
542 		ti->error = "Cannot register snapshot origin";
543 		goto bad6;
544 	}
545 
546 	ti->private = s;
547 	ti->split_io = s->chunk_size;
548 
549 	return 0;
550 
551  bad6:
552 	kcopyd_client_destroy(s->kcopyd_client);
553 
554  bad5:
555 	s->store.destroy(&s->store);
556 
557  bad4:
558 	exit_exception_table(&s->pending, pending_cache);
559 	exit_exception_table(&s->complete, exception_cache);
560 
561  bad3:
562 	dm_put_device(ti, s->cow);
563 	dm_put_device(ti, s->origin);
564 
565  bad2:
566 	kfree(s);
567 
568  bad1:
569 	return r;
570 }
571 
572 static void __free_exceptions(struct dm_snapshot *s)
573 {
574 	kcopyd_client_destroy(s->kcopyd_client);
575 	s->kcopyd_client = NULL;
576 
577 	exit_exception_table(&s->pending, pending_cache);
578 	exit_exception_table(&s->complete, exception_cache);
579 
580 	s->store.destroy(&s->store);
581 }
582 
583 static void snapshot_dtr(struct dm_target *ti)
584 {
585 	struct dm_snapshot *s = ti->private;
586 
587 	flush_workqueue(ksnapd);
588 
589 	/* Prevent further origin writes from using this snapshot. */
590 	/* After this returns there can be no new kcopyd jobs. */
591 	unregister_snapshot(s);
592 
593 	__free_exceptions(s);
594 
595 	dm_put_device(ti, s->origin);
596 	dm_put_device(ti, s->cow);
597 
598 	kfree(s);
599 }
600 
601 /*
602  * Flush a list of buffers.
603  */
604 static void flush_bios(struct bio *bio)
605 {
606 	struct bio *n;
607 
608 	while (bio) {
609 		n = bio->bi_next;
610 		bio->bi_next = NULL;
611 		generic_make_request(bio);
612 		bio = n;
613 	}
614 }
615 
616 static void flush_queued_bios(struct work_struct *work)
617 {
618 	struct dm_snapshot *s =
619 		container_of(work, struct dm_snapshot, queued_bios_work);
620 	struct bio *queued_bios;
621 	unsigned long flags;
622 
623 	spin_lock_irqsave(&s->pe_lock, flags);
624 	queued_bios = bio_list_get(&s->queued_bios);
625 	spin_unlock_irqrestore(&s->pe_lock, flags);
626 
627 	flush_bios(queued_bios);
628 }
629 
630 /*
631  * Error a list of buffers.
632  */
633 static void error_bios(struct bio *bio)
634 {
635 	struct bio *n;
636 
637 	while (bio) {
638 		n = bio->bi_next;
639 		bio->bi_next = NULL;
640 		bio_io_error(bio);
641 		bio = n;
642 	}
643 }
644 
645 static void __invalidate_snapshot(struct dm_snapshot *s, int err)
646 {
647 	if (!s->valid)
648 		return;
649 
650 	if (err == -EIO)
651 		DMERR("Invalidating snapshot: Error reading/writing.");
652 	else if (err == -ENOMEM)
653 		DMERR("Invalidating snapshot: Unable to allocate exception.");
654 
655 	if (s->store.drop_snapshot)
656 		s->store.drop_snapshot(&s->store);
657 
658 	s->valid = 0;
659 
660 	dm_table_event(s->table);
661 }
662 
663 static void get_pending_exception(struct dm_snap_pending_exception *pe)
664 {
665 	atomic_inc(&pe->ref_count);
666 }
667 
668 static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe)
669 {
670 	struct dm_snap_pending_exception *primary_pe;
671 	struct bio *origin_bios = NULL;
672 
673 	primary_pe = pe->primary_pe;
674 
675 	/*
676 	 * If this pe is involved in a write to the origin and
677 	 * it is the last sibling to complete then release
678 	 * the bios for the original write to the origin.
679 	 */
680 	if (primary_pe &&
681 	    atomic_dec_and_test(&primary_pe->ref_count))
682 		origin_bios = bio_list_get(&primary_pe->origin_bios);
683 
684 	/*
685 	 * Free the pe if it's not linked to an origin write or if
686 	 * it's not itself a primary pe.
687 	 */
688 	if (!primary_pe || primary_pe != pe)
689 		free_pending_exception(pe);
690 
691 	/*
692 	 * Free the primary pe if nothing references it.
693 	 */
694 	if (primary_pe && !atomic_read(&primary_pe->ref_count))
695 		free_pending_exception(primary_pe);
696 
697 	return origin_bios;
698 }
699 
700 static void pending_complete(struct dm_snap_pending_exception *pe, int success)
701 {
702 	struct dm_snap_exception *e;
703 	struct dm_snapshot *s = pe->snap;
704 	struct bio *origin_bios = NULL;
705 	struct bio *snapshot_bios = NULL;
706 	int error = 0;
707 
708 	if (!success) {
709 		/* Read/write error - snapshot is unusable */
710 		down_write(&s->lock);
711 		__invalidate_snapshot(s, -EIO);
712 		error = 1;
713 		goto out;
714 	}
715 
716 	e = alloc_exception();
717 	if (!e) {
718 		down_write(&s->lock);
719 		__invalidate_snapshot(s, -ENOMEM);
720 		error = 1;
721 		goto out;
722 	}
723 	*e = pe->e;
724 
725 	down_write(&s->lock);
726 	if (!s->valid) {
727 		free_exception(e);
728 		error = 1;
729 		goto out;
730 	}
731 
732 	/*
733 	 * Add a proper exception, and remove the
734 	 * in-flight exception from the list.
735 	 */
736 	insert_exception(&s->complete, e);
737 
738  out:
739 	remove_exception(&pe->e);
740 	snapshot_bios = bio_list_get(&pe->snapshot_bios);
741 	origin_bios = put_pending_exception(pe);
742 
743 	up_write(&s->lock);
744 
745 	/* Submit any pending write bios */
746 	if (error)
747 		error_bios(snapshot_bios);
748 	else
749 		flush_bios(snapshot_bios);
750 
751 	flush_bios(origin_bios);
752 }
753 
754 static void commit_callback(void *context, int success)
755 {
756 	struct dm_snap_pending_exception *pe = context;
757 
758 	pending_complete(pe, success);
759 }
760 
761 /*
762  * Called when the copy I/O has finished.  kcopyd actually runs
763  * this code so don't block.
764  */
765 static void copy_callback(int read_err, unsigned int write_err, void *context)
766 {
767 	struct dm_snap_pending_exception *pe = context;
768 	struct dm_snapshot *s = pe->snap;
769 
770 	if (read_err || write_err)
771 		pending_complete(pe, 0);
772 
773 	else
774 		/* Update the metadata if we are persistent */
775 		s->store.commit_exception(&s->store, &pe->e, commit_callback,
776 					  pe);
777 }
778 
779 /*
780  * Dispatches the copy operation to kcopyd.
781  */
782 static void start_copy(struct dm_snap_pending_exception *pe)
783 {
784 	struct dm_snapshot *s = pe->snap;
785 	struct io_region src, dest;
786 	struct block_device *bdev = s->origin->bdev;
787 	sector_t dev_size;
788 
789 	dev_size = get_dev_size(bdev);
790 
791 	src.bdev = bdev;
792 	src.sector = chunk_to_sector(s, pe->e.old_chunk);
793 	src.count = min(s->chunk_size, dev_size - src.sector);
794 
795 	dest.bdev = s->cow->bdev;
796 	dest.sector = chunk_to_sector(s, pe->e.new_chunk);
797 	dest.count = src.count;
798 
799 	/* Hand over to kcopyd */
800 	kcopyd_copy(s->kcopyd_client,
801 		    &src, 1, &dest, 0, copy_callback, pe);
802 }
803 
804 /*
805  * Looks to see if this snapshot already has a pending exception
806  * for this chunk, otherwise it allocates a new one and inserts
807  * it into the pending table.
808  *
809  * NOTE: a write lock must be held on snap->lock before calling
810  * this.
811  */
812 static struct dm_snap_pending_exception *
813 __find_pending_exception(struct dm_snapshot *s, struct bio *bio)
814 {
815 	struct dm_snap_exception *e;
816 	struct dm_snap_pending_exception *pe;
817 	chunk_t chunk = sector_to_chunk(s, bio->bi_sector);
818 
819 	/*
820 	 * Is there a pending exception for this already ?
821 	 */
822 	e = lookup_exception(&s->pending, chunk);
823 	if (e) {
824 		/* cast the exception to a pending exception */
825 		pe = container_of(e, struct dm_snap_pending_exception, e);
826 		goto out;
827 	}
828 
829 	/*
830 	 * Create a new pending exception, we don't want
831 	 * to hold the lock while we do this.
832 	 */
833 	up_write(&s->lock);
834 	pe = alloc_pending_exception();
835 	down_write(&s->lock);
836 
837 	if (!s->valid) {
838 		free_pending_exception(pe);
839 		return NULL;
840 	}
841 
842 	e = lookup_exception(&s->pending, chunk);
843 	if (e) {
844 		free_pending_exception(pe);
845 		pe = container_of(e, struct dm_snap_pending_exception, e);
846 		goto out;
847 	}
848 
849 	pe->e.old_chunk = chunk;
850 	bio_list_init(&pe->origin_bios);
851 	bio_list_init(&pe->snapshot_bios);
852 	pe->primary_pe = NULL;
853 	atomic_set(&pe->ref_count, 0);
854 	pe->snap = s;
855 	pe->started = 0;
856 
857 	if (s->store.prepare_exception(&s->store, &pe->e)) {
858 		free_pending_exception(pe);
859 		return NULL;
860 	}
861 
862 	get_pending_exception(pe);
863 	insert_exception(&s->pending, &pe->e);
864 
865  out:
866 	return pe;
867 }
868 
869 static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e,
870 			    struct bio *bio)
871 {
872 	bio->bi_bdev = s->cow->bdev;
873 	bio->bi_sector = chunk_to_sector(s, e->new_chunk) +
874 		(bio->bi_sector & s->chunk_mask);
875 }
876 
877 static int snapshot_map(struct dm_target *ti, struct bio *bio,
878 			union map_info *map_context)
879 {
880 	struct dm_snap_exception *e;
881 	struct dm_snapshot *s = ti->private;
882 	int r = DM_MAPIO_REMAPPED;
883 	chunk_t chunk;
884 	struct dm_snap_pending_exception *pe = NULL;
885 
886 	chunk = sector_to_chunk(s, bio->bi_sector);
887 
888 	/* Full snapshots are not usable */
889 	/* To get here the table must be live so s->active is always set. */
890 	if (!s->valid)
891 		return -EIO;
892 
893 	/* FIXME: should only take write lock if we need
894 	 * to copy an exception */
895 	down_write(&s->lock);
896 
897 	if (!s->valid) {
898 		r = -EIO;
899 		goto out_unlock;
900 	}
901 
902 	/* If the block is already remapped - use that, else remap it */
903 	e = lookup_exception(&s->complete, chunk);
904 	if (e) {
905 		remap_exception(s, e, bio);
906 		goto out_unlock;
907 	}
908 
909 	/*
910 	 * Write to snapshot - higher level takes care of RW/RO
911 	 * flags so we should only get this if we are
912 	 * writeable.
913 	 */
914 	if (bio_rw(bio) == WRITE) {
915 		pe = __find_pending_exception(s, bio);
916 		if (!pe) {
917 			__invalidate_snapshot(s, -ENOMEM);
918 			r = -EIO;
919 			goto out_unlock;
920 		}
921 
922 		remap_exception(s, &pe->e, bio);
923 		bio_list_add(&pe->snapshot_bios, bio);
924 
925 		r = DM_MAPIO_SUBMITTED;
926 
927 		if (!pe->started) {
928 			/* this is protected by snap->lock */
929 			pe->started = 1;
930 			up_write(&s->lock);
931 			start_copy(pe);
932 			goto out;
933 		}
934 	} else
935 		/*
936 		 * FIXME: this read path scares me because we
937 		 * always use the origin when we have a pending
938 		 * exception.  However I can't think of a
939 		 * situation where this is wrong - ejt.
940 		 */
941 		bio->bi_bdev = s->origin->bdev;
942 
943  out_unlock:
944 	up_write(&s->lock);
945  out:
946 	return r;
947 }
948 
949 static void snapshot_resume(struct dm_target *ti)
950 {
951 	struct dm_snapshot *s = ti->private;
952 
953 	down_write(&s->lock);
954 	s->active = 1;
955 	up_write(&s->lock);
956 }
957 
958 static int snapshot_status(struct dm_target *ti, status_type_t type,
959 			   char *result, unsigned int maxlen)
960 {
961 	struct dm_snapshot *snap = ti->private;
962 
963 	switch (type) {
964 	case STATUSTYPE_INFO:
965 		if (!snap->valid)
966 			snprintf(result, maxlen, "Invalid");
967 		else {
968 			if (snap->store.fraction_full) {
969 				sector_t numerator, denominator;
970 				snap->store.fraction_full(&snap->store,
971 							  &numerator,
972 							  &denominator);
973 				snprintf(result, maxlen, "%llu/%llu",
974 					(unsigned long long)numerator,
975 					(unsigned long long)denominator);
976 			}
977 			else
978 				snprintf(result, maxlen, "Unknown");
979 		}
980 		break;
981 
982 	case STATUSTYPE_TABLE:
983 		/*
984 		 * kdevname returns a static pointer so we need
985 		 * to make private copies if the output is to
986 		 * make sense.
987 		 */
988 		snprintf(result, maxlen, "%s %s %c %llu",
989 			 snap->origin->name, snap->cow->name,
990 			 snap->type,
991 			 (unsigned long long)snap->chunk_size);
992 		break;
993 	}
994 
995 	return 0;
996 }
997 
998 /*-----------------------------------------------------------------
999  * Origin methods
1000  *---------------------------------------------------------------*/
1001 static int __origin_write(struct list_head *snapshots, struct bio *bio)
1002 {
1003 	int r = DM_MAPIO_REMAPPED, first = 0;
1004 	struct dm_snapshot *snap;
1005 	struct dm_snap_exception *e;
1006 	struct dm_snap_pending_exception *pe, *next_pe, *primary_pe = NULL;
1007 	chunk_t chunk;
1008 	LIST_HEAD(pe_queue);
1009 
1010 	/* Do all the snapshots on this origin */
1011 	list_for_each_entry (snap, snapshots, list) {
1012 
1013 		down_write(&snap->lock);
1014 
1015 		/* Only deal with valid and active snapshots */
1016 		if (!snap->valid || !snap->active)
1017 			goto next_snapshot;
1018 
1019 		/* Nothing to do if writing beyond end of snapshot */
1020 		if (bio->bi_sector >= dm_table_get_size(snap->table))
1021 			goto next_snapshot;
1022 
1023 		/*
1024 		 * Remember, different snapshots can have
1025 		 * different chunk sizes.
1026 		 */
1027 		chunk = sector_to_chunk(snap, bio->bi_sector);
1028 
1029 		/*
1030 		 * Check exception table to see if block
1031 		 * is already remapped in this snapshot
1032 		 * and trigger an exception if not.
1033 		 *
1034 		 * ref_count is initialised to 1 so pending_complete()
1035 		 * won't destroy the primary_pe while we're inside this loop.
1036 		 */
1037 		e = lookup_exception(&snap->complete, chunk);
1038 		if (e)
1039 			goto next_snapshot;
1040 
1041 		pe = __find_pending_exception(snap, bio);
1042 		if (!pe) {
1043 			__invalidate_snapshot(snap, -ENOMEM);
1044 			goto next_snapshot;
1045 		}
1046 
1047 		if (!primary_pe) {
1048 			/*
1049 			 * Either every pe here has same
1050 			 * primary_pe or none has one yet.
1051 			 */
1052 			if (pe->primary_pe)
1053 				primary_pe = pe->primary_pe;
1054 			else {
1055 				primary_pe = pe;
1056 				first = 1;
1057 			}
1058 
1059 			bio_list_add(&primary_pe->origin_bios, bio);
1060 
1061 			r = DM_MAPIO_SUBMITTED;
1062 		}
1063 
1064 		if (!pe->primary_pe) {
1065 			pe->primary_pe = primary_pe;
1066 			get_pending_exception(primary_pe);
1067 		}
1068 
1069 		if (!pe->started) {
1070 			pe->started = 1;
1071 			list_add_tail(&pe->list, &pe_queue);
1072 		}
1073 
1074  next_snapshot:
1075 		up_write(&snap->lock);
1076 	}
1077 
1078 	if (!primary_pe)
1079 		return r;
1080 
1081 	/*
1082 	 * If this is the first time we're processing this chunk and
1083 	 * ref_count is now 1 it means all the pending exceptions
1084 	 * got completed while we were in the loop above, so it falls to
1085 	 * us here to remove the primary_pe and submit any origin_bios.
1086 	 */
1087 
1088 	if (first && atomic_dec_and_test(&primary_pe->ref_count)) {
1089 		flush_bios(bio_list_get(&primary_pe->origin_bios));
1090 		free_pending_exception(primary_pe);
1091 		/* If we got here, pe_queue is necessarily empty. */
1092 		return r;
1093 	}
1094 
1095 	/*
1096 	 * Now that we have a complete pe list we can start the copying.
1097 	 */
1098 	list_for_each_entry_safe(pe, next_pe, &pe_queue, list)
1099 		start_copy(pe);
1100 
1101 	return r;
1102 }
1103 
1104 /*
1105  * Called on a write from the origin driver.
1106  */
1107 static int do_origin(struct dm_dev *origin, struct bio *bio)
1108 {
1109 	struct origin *o;
1110 	int r = DM_MAPIO_REMAPPED;
1111 
1112 	down_read(&_origins_lock);
1113 	o = __lookup_origin(origin->bdev);
1114 	if (o)
1115 		r = __origin_write(&o->snapshots, bio);
1116 	up_read(&_origins_lock);
1117 
1118 	return r;
1119 }
1120 
1121 /*
1122  * Origin: maps a linear range of a device, with hooks for snapshotting.
1123  */
1124 
1125 /*
1126  * Construct an origin mapping: <dev_path>
1127  * The context for an origin is merely a 'struct dm_dev *'
1128  * pointing to the real device.
1129  */
1130 static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1131 {
1132 	int r;
1133 	struct dm_dev *dev;
1134 
1135 	if (argc != 1) {
1136 		ti->error = "origin: incorrect number of arguments";
1137 		return -EINVAL;
1138 	}
1139 
1140 	r = dm_get_device(ti, argv[0], 0, ti->len,
1141 			  dm_table_get_mode(ti->table), &dev);
1142 	if (r) {
1143 		ti->error = "Cannot get target device";
1144 		return r;
1145 	}
1146 
1147 	ti->private = dev;
1148 	return 0;
1149 }
1150 
1151 static void origin_dtr(struct dm_target *ti)
1152 {
1153 	struct dm_dev *dev = ti->private;
1154 	dm_put_device(ti, dev);
1155 }
1156 
1157 static int origin_map(struct dm_target *ti, struct bio *bio,
1158 		      union map_info *map_context)
1159 {
1160 	struct dm_dev *dev = ti->private;
1161 	bio->bi_bdev = dev->bdev;
1162 
1163 	/* Only tell snapshots if this is a write */
1164 	return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
1165 }
1166 
1167 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
1168 
1169 /*
1170  * Set the target "split_io" field to the minimum of all the snapshots'
1171  * chunk sizes.
1172  */
1173 static void origin_resume(struct dm_target *ti)
1174 {
1175 	struct dm_dev *dev = ti->private;
1176 	struct dm_snapshot *snap;
1177 	struct origin *o;
1178 	chunk_t chunk_size = 0;
1179 
1180 	down_read(&_origins_lock);
1181 	o = __lookup_origin(dev->bdev);
1182 	if (o)
1183 		list_for_each_entry (snap, &o->snapshots, list)
1184 			chunk_size = min_not_zero(chunk_size, snap->chunk_size);
1185 	up_read(&_origins_lock);
1186 
1187 	ti->split_io = chunk_size;
1188 }
1189 
1190 static int origin_status(struct dm_target *ti, status_type_t type, char *result,
1191 			 unsigned int maxlen)
1192 {
1193 	struct dm_dev *dev = ti->private;
1194 
1195 	switch (type) {
1196 	case STATUSTYPE_INFO:
1197 		result[0] = '\0';
1198 		break;
1199 
1200 	case STATUSTYPE_TABLE:
1201 		snprintf(result, maxlen, "%s", dev->name);
1202 		break;
1203 	}
1204 
1205 	return 0;
1206 }
1207 
1208 static struct target_type origin_target = {
1209 	.name    = "snapshot-origin",
1210 	.version = {1, 5, 0},
1211 	.module  = THIS_MODULE,
1212 	.ctr     = origin_ctr,
1213 	.dtr     = origin_dtr,
1214 	.map     = origin_map,
1215 	.resume  = origin_resume,
1216 	.status  = origin_status,
1217 };
1218 
1219 static struct target_type snapshot_target = {
1220 	.name    = "snapshot",
1221 	.version = {1, 5, 0},
1222 	.module  = THIS_MODULE,
1223 	.ctr     = snapshot_ctr,
1224 	.dtr     = snapshot_dtr,
1225 	.map     = snapshot_map,
1226 	.resume  = snapshot_resume,
1227 	.status  = snapshot_status,
1228 };
1229 
1230 static int __init dm_snapshot_init(void)
1231 {
1232 	int r;
1233 
1234 	r = dm_register_target(&snapshot_target);
1235 	if (r) {
1236 		DMERR("snapshot target register failed %d", r);
1237 		return r;
1238 	}
1239 
1240 	r = dm_register_target(&origin_target);
1241 	if (r < 0) {
1242 		DMERR("Origin target register failed %d", r);
1243 		goto bad1;
1244 	}
1245 
1246 	r = init_origin_hash();
1247 	if (r) {
1248 		DMERR("init_origin_hash failed.");
1249 		goto bad2;
1250 	}
1251 
1252 	exception_cache = KMEM_CACHE(dm_snap_exception, 0);
1253 	if (!exception_cache) {
1254 		DMERR("Couldn't create exception cache.");
1255 		r = -ENOMEM;
1256 		goto bad3;
1257 	}
1258 
1259 	pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
1260 	if (!pending_cache) {
1261 		DMERR("Couldn't create pending cache.");
1262 		r = -ENOMEM;
1263 		goto bad4;
1264 	}
1265 
1266 	pending_pool = mempool_create_slab_pool(128, pending_cache);
1267 	if (!pending_pool) {
1268 		DMERR("Couldn't create pending pool.");
1269 		r = -ENOMEM;
1270 		goto bad5;
1271 	}
1272 
1273 	ksnapd = create_singlethread_workqueue("ksnapd");
1274 	if (!ksnapd) {
1275 		DMERR("Failed to create ksnapd workqueue.");
1276 		r = -ENOMEM;
1277 		goto bad6;
1278 	}
1279 
1280 	return 0;
1281 
1282       bad6:
1283 	mempool_destroy(pending_pool);
1284       bad5:
1285 	kmem_cache_destroy(pending_cache);
1286       bad4:
1287 	kmem_cache_destroy(exception_cache);
1288       bad3:
1289 	exit_origin_hash();
1290       bad2:
1291 	dm_unregister_target(&origin_target);
1292       bad1:
1293 	dm_unregister_target(&snapshot_target);
1294 	return r;
1295 }
1296 
1297 static void __exit dm_snapshot_exit(void)
1298 {
1299 	int r;
1300 
1301 	destroy_workqueue(ksnapd);
1302 
1303 	r = dm_unregister_target(&snapshot_target);
1304 	if (r)
1305 		DMERR("snapshot unregister failed %d", r);
1306 
1307 	r = dm_unregister_target(&origin_target);
1308 	if (r)
1309 		DMERR("origin unregister failed %d", r);
1310 
1311 	exit_origin_hash();
1312 	mempool_destroy(pending_pool);
1313 	kmem_cache_destroy(pending_cache);
1314 	kmem_cache_destroy(exception_cache);
1315 }
1316 
1317 /* Module hooks */
1318 module_init(dm_snapshot_init);
1319 module_exit(dm_snapshot_exit);
1320 
1321 MODULE_DESCRIPTION(DM_NAME " snapshot target");
1322 MODULE_AUTHOR("Joe Thornber");
1323 MODULE_LICENSE("GPL");
1324