1 /*
2  * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
3  * Copyright (C) 2006-2008 Red Hat GmbH
4  *
5  * This file is released under the GPL.
6  */
7 
8 #include "dm-exception-store.h"
9 
10 #include <linux/ctype.h>
11 #include <linux/mm.h>
12 #include <linux/pagemap.h>
13 #include <linux/vmalloc.h>
14 #include <linux/export.h>
15 #include <linux/slab.h>
16 #include <linux/dm-io.h>
17 #include "dm-bufio.h"
18 
19 #define DM_MSG_PREFIX "persistent snapshot"
20 #define DM_CHUNK_SIZE_DEFAULT_SECTORS 32	/* 16KB */
21 
22 #define DM_PREFETCH_CHUNKS		12
23 
24 /*-----------------------------------------------------------------
25  * Persistent snapshots, by persistent we mean that the snapshot
26  * will survive a reboot.
27  *---------------------------------------------------------------*/
28 
29 /*
30  * We need to store a record of which parts of the origin have
31  * been copied to the snapshot device.  The snapshot code
32  * requires that we copy exception chunks to chunk aligned areas
33  * of the COW store.  It makes sense therefore, to store the
34  * metadata in chunk size blocks.
35  *
36  * There is no backward or forward compatibility implemented,
37  * snapshots with different disk versions than the kernel will
38  * not be usable.  It is expected that "lvcreate" will blank out
39  * the start of a fresh COW device before calling the snapshot
40  * constructor.
41  *
42  * The first chunk of the COW device just contains the header.
43  * After this there is a chunk filled with exception metadata,
44  * followed by as many exception chunks as can fit in the
45  * metadata areas.
46  *
47  * All on disk structures are in little-endian format.  The end
48  * of the exceptions info is indicated by an exception with a
49  * new_chunk of 0, which is invalid since it would point to the
50  * header chunk.
51  */
52 
53 /*
54  * Magic for persistent snapshots: "SnAp" - Feeble isn't it.
55  */
56 #define SNAP_MAGIC 0x70416e53
57 
58 /*
59  * The on-disk version of the metadata.
60  */
61 #define SNAPSHOT_DISK_VERSION 1
62 
63 #define NUM_SNAPSHOT_HDR_CHUNKS 1
64 
65 struct disk_header {
66 	__le32 magic;
67 
68 	/*
69 	 * Is this snapshot valid.  There is no way of recovering
70 	 * an invalid snapshot.
71 	 */
72 	__le32 valid;
73 
74 	/*
75 	 * Simple, incrementing version. no backward
76 	 * compatibility.
77 	 */
78 	__le32 version;
79 
80 	/* In sectors */
81 	__le32 chunk_size;
82 } __packed;
83 
84 struct disk_exception {
85 	__le64 old_chunk;
86 	__le64 new_chunk;
87 } __packed;
88 
89 struct core_exception {
90 	uint64_t old_chunk;
91 	uint64_t new_chunk;
92 };
93 
94 struct commit_callback {
95 	void (*callback)(void *, int success);
96 	void *context;
97 };
98 
99 /*
100  * The top level structure for a persistent exception store.
101  */
102 struct pstore {
103 	struct dm_exception_store *store;
104 	int version;
105 	int valid;
106 	uint32_t exceptions_per_area;
107 
108 	/*
109 	 * Now that we have an asynchronous kcopyd there is no
110 	 * need for large chunk sizes, so it wont hurt to have a
111 	 * whole chunks worth of metadata in memory at once.
112 	 */
113 	void *area;
114 
115 	/*
116 	 * An area of zeros used to clear the next area.
117 	 */
118 	void *zero_area;
119 
120 	/*
121 	 * An area used for header. The header can be written
122 	 * concurrently with metadata (when invalidating the snapshot),
123 	 * so it needs a separate buffer.
124 	 */
125 	void *header_area;
126 
127 	/*
128 	 * Used to keep track of which metadata area the data in
129 	 * 'chunk' refers to.
130 	 */
131 	chunk_t current_area;
132 
133 	/*
134 	 * The next free chunk for an exception.
135 	 *
136 	 * When creating exceptions, all the chunks here and above are
137 	 * free.  It holds the next chunk to be allocated.  On rare
138 	 * occasions (e.g. after a system crash) holes can be left in
139 	 * the exception store because chunks can be committed out of
140 	 * order.
141 	 *
142 	 * When merging exceptions, it does not necessarily mean all the
143 	 * chunks here and above are free.  It holds the value it would
144 	 * have held if all chunks had been committed in order of
145 	 * allocation.  Consequently the value may occasionally be
146 	 * slightly too low, but since it's only used for 'status' and
147 	 * it can never reach its minimum value too early this doesn't
148 	 * matter.
149 	 */
150 
151 	chunk_t next_free;
152 
153 	/*
154 	 * The index of next free exception in the current
155 	 * metadata area.
156 	 */
157 	uint32_t current_committed;
158 
159 	atomic_t pending_count;
160 	uint32_t callback_count;
161 	struct commit_callback *callbacks;
162 	struct dm_io_client *io_client;
163 
164 	struct workqueue_struct *metadata_wq;
165 };
166 
167 static int alloc_area(struct pstore *ps)
168 {
169 	int r = -ENOMEM;
170 	size_t len;
171 
172 	len = ps->store->chunk_size << SECTOR_SHIFT;
173 
174 	/*
175 	 * Allocate the chunk_size block of memory that will hold
176 	 * a single metadata area.
177 	 */
178 	ps->area = vmalloc(len);
179 	if (!ps->area)
180 		goto err_area;
181 
182 	ps->zero_area = vzalloc(len);
183 	if (!ps->zero_area)
184 		goto err_zero_area;
185 
186 	ps->header_area = vmalloc(len);
187 	if (!ps->header_area)
188 		goto err_header_area;
189 
190 	return 0;
191 
192 err_header_area:
193 	vfree(ps->zero_area);
194 
195 err_zero_area:
196 	vfree(ps->area);
197 
198 err_area:
199 	return r;
200 }
201 
202 static void free_area(struct pstore *ps)
203 {
204 	vfree(ps->area);
205 	ps->area = NULL;
206 	vfree(ps->zero_area);
207 	ps->zero_area = NULL;
208 	vfree(ps->header_area);
209 	ps->header_area = NULL;
210 }
211 
212 struct mdata_req {
213 	struct dm_io_region *where;
214 	struct dm_io_request *io_req;
215 	struct work_struct work;
216 	int result;
217 };
218 
219 static void do_metadata(struct work_struct *work)
220 {
221 	struct mdata_req *req = container_of(work, struct mdata_req, work);
222 
223 	req->result = dm_io(req->io_req, 1, req->where, NULL);
224 }
225 
226 /*
227  * Read or write a chunk aligned and sized block of data from a device.
228  */
229 static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int op,
230 		    int op_flags, int metadata)
231 {
232 	struct dm_io_region where = {
233 		.bdev = dm_snap_cow(ps->store->snap)->bdev,
234 		.sector = ps->store->chunk_size * chunk,
235 		.count = ps->store->chunk_size,
236 	};
237 	struct dm_io_request io_req = {
238 		.bi_op = op,
239 		.bi_op_flags = op_flags,
240 		.mem.type = DM_IO_VMA,
241 		.mem.ptr.vma = area,
242 		.client = ps->io_client,
243 		.notify.fn = NULL,
244 	};
245 	struct mdata_req req;
246 
247 	if (!metadata)
248 		return dm_io(&io_req, 1, &where, NULL);
249 
250 	req.where = &where;
251 	req.io_req = &io_req;
252 
253 	/*
254 	 * Issue the synchronous I/O from a different thread
255 	 * to avoid generic_make_request recursion.
256 	 */
257 	INIT_WORK_ONSTACK(&req.work, do_metadata);
258 	queue_work(ps->metadata_wq, &req.work);
259 	flush_workqueue(ps->metadata_wq);
260 	destroy_work_on_stack(&req.work);
261 
262 	return req.result;
263 }
264 
265 /*
266  * Convert a metadata area index to a chunk index.
267  */
268 static chunk_t area_location(struct pstore *ps, chunk_t area)
269 {
270 	return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area);
271 }
272 
273 static void skip_metadata(struct pstore *ps)
274 {
275 	uint32_t stride = ps->exceptions_per_area + 1;
276 	chunk_t next_free = ps->next_free;
277 	if (sector_div(next_free, stride) == NUM_SNAPSHOT_HDR_CHUNKS)
278 		ps->next_free++;
279 }
280 
281 /*
282  * Read or write a metadata area.  Remembering to skip the first
283  * chunk which holds the header.
284  */
285 static int area_io(struct pstore *ps, int op, int op_flags)
286 {
287 	int r;
288 	chunk_t chunk;
289 
290 	chunk = area_location(ps, ps->current_area);
291 
292 	r = chunk_io(ps, ps->area, chunk, op, op_flags, 0);
293 	if (r)
294 		return r;
295 
296 	return 0;
297 }
298 
299 static void zero_memory_area(struct pstore *ps)
300 {
301 	memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT);
302 }
303 
304 static int zero_disk_area(struct pstore *ps, chunk_t area)
305 {
306 	return chunk_io(ps, ps->zero_area, area_location(ps, area),
307 			REQ_OP_WRITE, 0, 0);
308 }
309 
310 static int read_header(struct pstore *ps, int *new_snapshot)
311 {
312 	int r;
313 	struct disk_header *dh;
314 	unsigned chunk_size;
315 	int chunk_size_supplied = 1;
316 	char *chunk_err;
317 
318 	/*
319 	 * Use default chunk size (or logical_block_size, if larger)
320 	 * if none supplied
321 	 */
322 	if (!ps->store->chunk_size) {
323 		ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
324 		    bdev_logical_block_size(dm_snap_cow(ps->store->snap)->
325 					    bdev) >> 9);
326 		ps->store->chunk_mask = ps->store->chunk_size - 1;
327 		ps->store->chunk_shift = __ffs(ps->store->chunk_size);
328 		chunk_size_supplied = 0;
329 	}
330 
331 	ps->io_client = dm_io_client_create();
332 	if (IS_ERR(ps->io_client))
333 		return PTR_ERR(ps->io_client);
334 
335 	r = alloc_area(ps);
336 	if (r)
337 		return r;
338 
339 	r = chunk_io(ps, ps->header_area, 0, REQ_OP_READ, 0, 1);
340 	if (r)
341 		goto bad;
342 
343 	dh = ps->header_area;
344 
345 	if (le32_to_cpu(dh->magic) == 0) {
346 		*new_snapshot = 1;
347 		return 0;
348 	}
349 
350 	if (le32_to_cpu(dh->magic) != SNAP_MAGIC) {
351 		DMWARN("Invalid or corrupt snapshot");
352 		r = -ENXIO;
353 		goto bad;
354 	}
355 
356 	*new_snapshot = 0;
357 	ps->valid = le32_to_cpu(dh->valid);
358 	ps->version = le32_to_cpu(dh->version);
359 	chunk_size = le32_to_cpu(dh->chunk_size);
360 
361 	if (ps->store->chunk_size == chunk_size)
362 		return 0;
363 
364 	if (chunk_size_supplied)
365 		DMWARN("chunk size %u in device metadata overrides "
366 		       "table chunk size of %u.",
367 		       chunk_size, ps->store->chunk_size);
368 
369 	/* We had a bogus chunk_size. Fix stuff up. */
370 	free_area(ps);
371 
372 	r = dm_exception_store_set_chunk_size(ps->store, chunk_size,
373 					      &chunk_err);
374 	if (r) {
375 		DMERR("invalid on-disk chunk size %u: %s.",
376 		      chunk_size, chunk_err);
377 		return r;
378 	}
379 
380 	r = alloc_area(ps);
381 	return r;
382 
383 bad:
384 	free_area(ps);
385 	return r;
386 }
387 
388 static int write_header(struct pstore *ps)
389 {
390 	struct disk_header *dh;
391 
392 	memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT);
393 
394 	dh = ps->header_area;
395 	dh->magic = cpu_to_le32(SNAP_MAGIC);
396 	dh->valid = cpu_to_le32(ps->valid);
397 	dh->version = cpu_to_le32(ps->version);
398 	dh->chunk_size = cpu_to_le32(ps->store->chunk_size);
399 
400 	return chunk_io(ps, ps->header_area, 0, REQ_OP_WRITE, 0, 1);
401 }
402 
403 /*
404  * Access functions for the disk exceptions, these do the endian conversions.
405  */
406 static struct disk_exception *get_exception(struct pstore *ps, void *ps_area,
407 					    uint32_t index)
408 {
409 	BUG_ON(index >= ps->exceptions_per_area);
410 
411 	return ((struct disk_exception *) ps_area) + index;
412 }
413 
414 static void read_exception(struct pstore *ps, void *ps_area,
415 			   uint32_t index, struct core_exception *result)
416 {
417 	struct disk_exception *de = get_exception(ps, ps_area, index);
418 
419 	/* copy it */
420 	result->old_chunk = le64_to_cpu(de->old_chunk);
421 	result->new_chunk = le64_to_cpu(de->new_chunk);
422 }
423 
424 static void write_exception(struct pstore *ps,
425 			    uint32_t index, struct core_exception *e)
426 {
427 	struct disk_exception *de = get_exception(ps, ps->area, index);
428 
429 	/* copy it */
430 	de->old_chunk = cpu_to_le64(e->old_chunk);
431 	de->new_chunk = cpu_to_le64(e->new_chunk);
432 }
433 
434 static void clear_exception(struct pstore *ps, uint32_t index)
435 {
436 	struct disk_exception *de = get_exception(ps, ps->area, index);
437 
438 	/* clear it */
439 	de->old_chunk = 0;
440 	de->new_chunk = 0;
441 }
442 
443 /*
444  * Registers the exceptions that are present in the current area.
445  * 'full' is filled in to indicate if the area has been
446  * filled.
447  */
448 static int insert_exceptions(struct pstore *ps, void *ps_area,
449 			     int (*callback)(void *callback_context,
450 					     chunk_t old, chunk_t new),
451 			     void *callback_context,
452 			     int *full)
453 {
454 	int r;
455 	unsigned int i;
456 	struct core_exception e;
457 
458 	/* presume the area is full */
459 	*full = 1;
460 
461 	for (i = 0; i < ps->exceptions_per_area; i++) {
462 		read_exception(ps, ps_area, i, &e);
463 
464 		/*
465 		 * If the new_chunk is pointing at the start of
466 		 * the COW device, where the first metadata area
467 		 * is we know that we've hit the end of the
468 		 * exceptions.  Therefore the area is not full.
469 		 */
470 		if (e.new_chunk == 0LL) {
471 			ps->current_committed = i;
472 			*full = 0;
473 			break;
474 		}
475 
476 		/*
477 		 * Keep track of the start of the free chunks.
478 		 */
479 		if (ps->next_free <= e.new_chunk)
480 			ps->next_free = e.new_chunk + 1;
481 
482 		/*
483 		 * Otherwise we add the exception to the snapshot.
484 		 */
485 		r = callback(callback_context, e.old_chunk, e.new_chunk);
486 		if (r)
487 			return r;
488 	}
489 
490 	return 0;
491 }
492 
493 static int read_exceptions(struct pstore *ps,
494 			   int (*callback)(void *callback_context, chunk_t old,
495 					   chunk_t new),
496 			   void *callback_context)
497 {
498 	int r, full = 1;
499 	struct dm_bufio_client *client;
500 	chunk_t prefetch_area = 0;
501 
502 	client = dm_bufio_client_create(dm_snap_cow(ps->store->snap)->bdev,
503 					ps->store->chunk_size << SECTOR_SHIFT,
504 					1, 0, NULL, NULL);
505 
506 	if (IS_ERR(client))
507 		return PTR_ERR(client);
508 
509 	/*
510 	 * Setup for one current buffer + desired readahead buffers.
511 	 */
512 	dm_bufio_set_minimum_buffers(client, 1 + DM_PREFETCH_CHUNKS);
513 
514 	/*
515 	 * Keeping reading chunks and inserting exceptions until
516 	 * we find a partially full area.
517 	 */
518 	for (ps->current_area = 0; full; ps->current_area++) {
519 		struct dm_buffer *bp;
520 		void *area;
521 		chunk_t chunk;
522 
523 		if (unlikely(prefetch_area < ps->current_area))
524 			prefetch_area = ps->current_area;
525 
526 		if (DM_PREFETCH_CHUNKS) do {
527 			chunk_t pf_chunk = area_location(ps, prefetch_area);
528 			if (unlikely(pf_chunk >= dm_bufio_get_device_size(client)))
529 				break;
530 			dm_bufio_prefetch(client, pf_chunk, 1);
531 			prefetch_area++;
532 			if (unlikely(!prefetch_area))
533 				break;
534 		} while (prefetch_area <= ps->current_area + DM_PREFETCH_CHUNKS);
535 
536 		chunk = area_location(ps, ps->current_area);
537 
538 		area = dm_bufio_read(client, chunk, &bp);
539 		if (IS_ERR(area)) {
540 			r = PTR_ERR(area);
541 			goto ret_destroy_bufio;
542 		}
543 
544 		r = insert_exceptions(ps, area, callback, callback_context,
545 				      &full);
546 
547 		if (!full)
548 			memcpy(ps->area, area, ps->store->chunk_size << SECTOR_SHIFT);
549 
550 		dm_bufio_release(bp);
551 
552 		dm_bufio_forget(client, chunk);
553 
554 		if (unlikely(r))
555 			goto ret_destroy_bufio;
556 	}
557 
558 	ps->current_area--;
559 
560 	skip_metadata(ps);
561 
562 	r = 0;
563 
564 ret_destroy_bufio:
565 	dm_bufio_client_destroy(client);
566 
567 	return r;
568 }
569 
570 static struct pstore *get_info(struct dm_exception_store *store)
571 {
572 	return (struct pstore *) store->context;
573 }
574 
575 static void persistent_usage(struct dm_exception_store *store,
576 			     sector_t *total_sectors,
577 			     sector_t *sectors_allocated,
578 			     sector_t *metadata_sectors)
579 {
580 	struct pstore *ps = get_info(store);
581 
582 	*sectors_allocated = ps->next_free * store->chunk_size;
583 	*total_sectors = get_dev_size(dm_snap_cow(store->snap)->bdev);
584 
585 	/*
586 	 * First chunk is the fixed header.
587 	 * Then there are (ps->current_area + 1) metadata chunks, each one
588 	 * separated from the next by ps->exceptions_per_area data chunks.
589 	 */
590 	*metadata_sectors = (ps->current_area + 1 + NUM_SNAPSHOT_HDR_CHUNKS) *
591 			    store->chunk_size;
592 }
593 
594 static void persistent_dtr(struct dm_exception_store *store)
595 {
596 	struct pstore *ps = get_info(store);
597 
598 	destroy_workqueue(ps->metadata_wq);
599 
600 	/* Created in read_header */
601 	if (ps->io_client)
602 		dm_io_client_destroy(ps->io_client);
603 	free_area(ps);
604 
605 	/* Allocated in persistent_read_metadata */
606 	vfree(ps->callbacks);
607 
608 	kfree(ps);
609 }
610 
611 static int persistent_read_metadata(struct dm_exception_store *store,
612 				    int (*callback)(void *callback_context,
613 						    chunk_t old, chunk_t new),
614 				    void *callback_context)
615 {
616 	int r, uninitialized_var(new_snapshot);
617 	struct pstore *ps = get_info(store);
618 
619 	/*
620 	 * Read the snapshot header.
621 	 */
622 	r = read_header(ps, &new_snapshot);
623 	if (r)
624 		return r;
625 
626 	/*
627 	 * Now we know correct chunk_size, complete the initialisation.
628 	 */
629 	ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) /
630 				  sizeof(struct disk_exception);
631 	ps->callbacks = dm_vcalloc(ps->exceptions_per_area,
632 				   sizeof(*ps->callbacks));
633 	if (!ps->callbacks)
634 		return -ENOMEM;
635 
636 	/*
637 	 * Do we need to setup a new snapshot ?
638 	 */
639 	if (new_snapshot) {
640 		r = write_header(ps);
641 		if (r) {
642 			DMWARN("write_header failed");
643 			return r;
644 		}
645 
646 		ps->current_area = 0;
647 		zero_memory_area(ps);
648 		r = zero_disk_area(ps, 0);
649 		if (r)
650 			DMWARN("zero_disk_area(0) failed");
651 		return r;
652 	}
653 	/*
654 	 * Sanity checks.
655 	 */
656 	if (ps->version != SNAPSHOT_DISK_VERSION) {
657 		DMWARN("unable to handle snapshot disk version %d",
658 		       ps->version);
659 		return -EINVAL;
660 	}
661 
662 	/*
663 	 * Metadata are valid, but snapshot is invalidated
664 	 */
665 	if (!ps->valid)
666 		return 1;
667 
668 	/*
669 	 * Read the metadata.
670 	 */
671 	r = read_exceptions(ps, callback, callback_context);
672 
673 	return r;
674 }
675 
676 static int persistent_prepare_exception(struct dm_exception_store *store,
677 					struct dm_exception *e)
678 {
679 	struct pstore *ps = get_info(store);
680 	sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev);
681 
682 	/* Is there enough room ? */
683 	if (size < ((ps->next_free + 1) * store->chunk_size))
684 		return -ENOSPC;
685 
686 	e->new_chunk = ps->next_free;
687 
688 	/*
689 	 * Move onto the next free pending, making sure to take
690 	 * into account the location of the metadata chunks.
691 	 */
692 	ps->next_free++;
693 	skip_metadata(ps);
694 
695 	atomic_inc(&ps->pending_count);
696 	return 0;
697 }
698 
699 static void persistent_commit_exception(struct dm_exception_store *store,
700 					struct dm_exception *e, int valid,
701 					void (*callback) (void *, int success),
702 					void *callback_context)
703 {
704 	unsigned int i;
705 	struct pstore *ps = get_info(store);
706 	struct core_exception ce;
707 	struct commit_callback *cb;
708 
709 	if (!valid)
710 		ps->valid = 0;
711 
712 	ce.old_chunk = e->old_chunk;
713 	ce.new_chunk = e->new_chunk;
714 	write_exception(ps, ps->current_committed++, &ce);
715 
716 	/*
717 	 * Add the callback to the back of the array.  This code
718 	 * is the only place where the callback array is
719 	 * manipulated, and we know that it will never be called
720 	 * multiple times concurrently.
721 	 */
722 	cb = ps->callbacks + ps->callback_count++;
723 	cb->callback = callback;
724 	cb->context = callback_context;
725 
726 	/*
727 	 * If there are exceptions in flight and we have not yet
728 	 * filled this metadata area there's nothing more to do.
729 	 */
730 	if (!atomic_dec_and_test(&ps->pending_count) &&
731 	    (ps->current_committed != ps->exceptions_per_area))
732 		return;
733 
734 	/*
735 	 * If we completely filled the current area, then wipe the next one.
736 	 */
737 	if ((ps->current_committed == ps->exceptions_per_area) &&
738 	    zero_disk_area(ps, ps->current_area + 1))
739 		ps->valid = 0;
740 
741 	/*
742 	 * Commit exceptions to disk.
743 	 */
744 	if (ps->valid && area_io(ps, REQ_OP_WRITE, REQ_PREFLUSH | REQ_FUA))
745 		ps->valid = 0;
746 
747 	/*
748 	 * Advance to the next area if this one is full.
749 	 */
750 	if (ps->current_committed == ps->exceptions_per_area) {
751 		ps->current_committed = 0;
752 		ps->current_area++;
753 		zero_memory_area(ps);
754 	}
755 
756 	for (i = 0; i < ps->callback_count; i++) {
757 		cb = ps->callbacks + i;
758 		cb->callback(cb->context, ps->valid);
759 	}
760 
761 	ps->callback_count = 0;
762 }
763 
764 static int persistent_prepare_merge(struct dm_exception_store *store,
765 				    chunk_t *last_old_chunk,
766 				    chunk_t *last_new_chunk)
767 {
768 	struct pstore *ps = get_info(store);
769 	struct core_exception ce;
770 	int nr_consecutive;
771 	int r;
772 
773 	/*
774 	 * When current area is empty, move back to preceding area.
775 	 */
776 	if (!ps->current_committed) {
777 		/*
778 		 * Have we finished?
779 		 */
780 		if (!ps->current_area)
781 			return 0;
782 
783 		ps->current_area--;
784 		r = area_io(ps, REQ_OP_READ, 0);
785 		if (r < 0)
786 			return r;
787 		ps->current_committed = ps->exceptions_per_area;
788 	}
789 
790 	read_exception(ps, ps->area, ps->current_committed - 1, &ce);
791 	*last_old_chunk = ce.old_chunk;
792 	*last_new_chunk = ce.new_chunk;
793 
794 	/*
795 	 * Find number of consecutive chunks within the current area,
796 	 * working backwards.
797 	 */
798 	for (nr_consecutive = 1; nr_consecutive < ps->current_committed;
799 	     nr_consecutive++) {
800 		read_exception(ps, ps->area,
801 			       ps->current_committed - 1 - nr_consecutive, &ce);
802 		if (ce.old_chunk != *last_old_chunk - nr_consecutive ||
803 		    ce.new_chunk != *last_new_chunk - nr_consecutive)
804 			break;
805 	}
806 
807 	return nr_consecutive;
808 }
809 
810 static int persistent_commit_merge(struct dm_exception_store *store,
811 				   int nr_merged)
812 {
813 	int r, i;
814 	struct pstore *ps = get_info(store);
815 
816 	BUG_ON(nr_merged > ps->current_committed);
817 
818 	for (i = 0; i < nr_merged; i++)
819 		clear_exception(ps, ps->current_committed - 1 - i);
820 
821 	r = area_io(ps, REQ_OP_WRITE, REQ_PREFLUSH | REQ_FUA);
822 	if (r < 0)
823 		return r;
824 
825 	ps->current_committed -= nr_merged;
826 
827 	/*
828 	 * At this stage, only persistent_usage() uses ps->next_free, so
829 	 * we make no attempt to keep ps->next_free strictly accurate
830 	 * as exceptions may have been committed out-of-order originally.
831 	 * Once a snapshot has become merging, we set it to the value it
832 	 * would have held had all the exceptions been committed in order.
833 	 *
834 	 * ps->current_area does not get reduced by prepare_merge() until
835 	 * after commit_merge() has removed the nr_merged previous exceptions.
836 	 */
837 	ps->next_free = area_location(ps, ps->current_area) +
838 			ps->current_committed + 1;
839 
840 	return 0;
841 }
842 
843 static void persistent_drop_snapshot(struct dm_exception_store *store)
844 {
845 	struct pstore *ps = get_info(store);
846 
847 	ps->valid = 0;
848 	if (write_header(ps))
849 		DMWARN("write header failed");
850 }
851 
852 static int persistent_ctr(struct dm_exception_store *store, char *options)
853 {
854 	struct pstore *ps;
855 	int r;
856 
857 	/* allocate the pstore */
858 	ps = kzalloc(sizeof(*ps), GFP_KERNEL);
859 	if (!ps)
860 		return -ENOMEM;
861 
862 	ps->store = store;
863 	ps->valid = 1;
864 	ps->version = SNAPSHOT_DISK_VERSION;
865 	ps->area = NULL;
866 	ps->zero_area = NULL;
867 	ps->header_area = NULL;
868 	ps->next_free = NUM_SNAPSHOT_HDR_CHUNKS + 1; /* header and 1st area */
869 	ps->current_committed = 0;
870 
871 	ps->callback_count = 0;
872 	atomic_set(&ps->pending_count, 0);
873 	ps->callbacks = NULL;
874 
875 	ps->metadata_wq = alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM, 0);
876 	if (!ps->metadata_wq) {
877 		DMERR("couldn't start header metadata update thread");
878 		r = -ENOMEM;
879 		goto err_workqueue;
880 	}
881 
882 	if (options) {
883 		char overflow = toupper(options[0]);
884 		if (overflow == 'O')
885 			store->userspace_supports_overflow = true;
886 		else {
887 			DMERR("Unsupported persistent store option: %s", options);
888 			r = -EINVAL;
889 			goto err_options;
890 		}
891 	}
892 
893 	store->context = ps;
894 
895 	return 0;
896 
897 err_options:
898 	destroy_workqueue(ps->metadata_wq);
899 err_workqueue:
900 	kfree(ps);
901 
902 	return r;
903 }
904 
905 static unsigned persistent_status(struct dm_exception_store *store,
906 				  status_type_t status, char *result,
907 				  unsigned maxlen)
908 {
909 	unsigned sz = 0;
910 
911 	switch (status) {
912 	case STATUSTYPE_INFO:
913 		break;
914 	case STATUSTYPE_TABLE:
915 		DMEMIT(" %s %llu", store->userspace_supports_overflow ? "PO" : "P",
916 		       (unsigned long long)store->chunk_size);
917 	}
918 
919 	return sz;
920 }
921 
922 static struct dm_exception_store_type _persistent_type = {
923 	.name = "persistent",
924 	.module = THIS_MODULE,
925 	.ctr = persistent_ctr,
926 	.dtr = persistent_dtr,
927 	.read_metadata = persistent_read_metadata,
928 	.prepare_exception = persistent_prepare_exception,
929 	.commit_exception = persistent_commit_exception,
930 	.prepare_merge = persistent_prepare_merge,
931 	.commit_merge = persistent_commit_merge,
932 	.drop_snapshot = persistent_drop_snapshot,
933 	.usage = persistent_usage,
934 	.status = persistent_status,
935 };
936 
937 static struct dm_exception_store_type _persistent_compat_type = {
938 	.name = "P",
939 	.module = THIS_MODULE,
940 	.ctr = persistent_ctr,
941 	.dtr = persistent_dtr,
942 	.read_metadata = persistent_read_metadata,
943 	.prepare_exception = persistent_prepare_exception,
944 	.commit_exception = persistent_commit_exception,
945 	.prepare_merge = persistent_prepare_merge,
946 	.commit_merge = persistent_commit_merge,
947 	.drop_snapshot = persistent_drop_snapshot,
948 	.usage = persistent_usage,
949 	.status = persistent_status,
950 };
951 
952 int dm_persistent_snapshot_init(void)
953 {
954 	int r;
955 
956 	r = dm_exception_store_type_register(&_persistent_type);
957 	if (r) {
958 		DMERR("Unable to register persistent exception store type");
959 		return r;
960 	}
961 
962 	r = dm_exception_store_type_register(&_persistent_compat_type);
963 	if (r) {
964 		DMERR("Unable to register old-style persistent exception "
965 		      "store type");
966 		dm_exception_store_type_unregister(&_persistent_type);
967 		return r;
968 	}
969 
970 	return r;
971 }
972 
973 void dm_persistent_snapshot_exit(void)
974 {
975 	dm_exception_store_type_unregister(&_persistent_type);
976 	dm_exception_store_type_unregister(&_persistent_compat_type);
977 }
978