xref: /openbmc/linux/drivers/md/dm-cache-target.c (revision 6396bb221514d2876fd6dc0aa2a1f240d99b37bb)
1 /*
2  * Copyright (C) 2012 Red Hat. All rights reserved.
3  *
4  * This file is released under the GPL.
5  */
6 
7 #include "dm.h"
8 #include "dm-bio-prison-v2.h"
9 #include "dm-bio-record.h"
10 #include "dm-cache-metadata.h"
11 
12 #include <linux/dm-io.h>
13 #include <linux/dm-kcopyd.h>
14 #include <linux/jiffies.h>
15 #include <linux/init.h>
16 #include <linux/mempool.h>
17 #include <linux/module.h>
18 #include <linux/rwsem.h>
19 #include <linux/slab.h>
20 #include <linux/vmalloc.h>
21 
22 #define DM_MSG_PREFIX "cache"
23 
24 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(cache_copy_throttle,
25 	"A percentage of time allocated for copying to and/or from cache");
26 
27 /*----------------------------------------------------------------*/
28 
29 /*
30  * Glossary:
31  *
32  * oblock: index of an origin block
33  * cblock: index of a cache block
34  * promotion: movement of a block from origin to cache
35  * demotion: movement of a block from cache to origin
36  * migration: movement of a block between the origin and cache device,
37  *	      either direction
38  */
39 
40 /*----------------------------------------------------------------*/
41 
42 struct io_tracker {
43 	spinlock_t lock;
44 
45 	/*
46 	 * Sectors of in-flight IO.
47 	 */
48 	sector_t in_flight;
49 
50 	/*
51 	 * The time, in jiffies, when this device became idle (if it is
52 	 * indeed idle).
53 	 */
54 	unsigned long idle_time;
55 	unsigned long last_update_time;
56 };
57 
58 static void iot_init(struct io_tracker *iot)
59 {
60 	spin_lock_init(&iot->lock);
61 	iot->in_flight = 0ul;
62 	iot->idle_time = 0ul;
63 	iot->last_update_time = jiffies;
64 }
65 
66 static bool __iot_idle_for(struct io_tracker *iot, unsigned long jifs)
67 {
68 	if (iot->in_flight)
69 		return false;
70 
71 	return time_after(jiffies, iot->idle_time + jifs);
72 }
73 
74 static bool iot_idle_for(struct io_tracker *iot, unsigned long jifs)
75 {
76 	bool r;
77 	unsigned long flags;
78 
79 	spin_lock_irqsave(&iot->lock, flags);
80 	r = __iot_idle_for(iot, jifs);
81 	spin_unlock_irqrestore(&iot->lock, flags);
82 
83 	return r;
84 }
85 
86 static void iot_io_begin(struct io_tracker *iot, sector_t len)
87 {
88 	unsigned long flags;
89 
90 	spin_lock_irqsave(&iot->lock, flags);
91 	iot->in_flight += len;
92 	spin_unlock_irqrestore(&iot->lock, flags);
93 }
94 
95 static void __iot_io_end(struct io_tracker *iot, sector_t len)
96 {
97 	if (!len)
98 		return;
99 
100 	iot->in_flight -= len;
101 	if (!iot->in_flight)
102 		iot->idle_time = jiffies;
103 }
104 
105 static void iot_io_end(struct io_tracker *iot, sector_t len)
106 {
107 	unsigned long flags;
108 
109 	spin_lock_irqsave(&iot->lock, flags);
110 	__iot_io_end(iot, len);
111 	spin_unlock_irqrestore(&iot->lock, flags);
112 }
113 
114 /*----------------------------------------------------------------*/
115 
116 /*
117  * Represents a chunk of future work.  'input' allows continuations to pass
118  * values between themselves, typically error values.
119  */
120 struct continuation {
121 	struct work_struct ws;
122 	blk_status_t input;
123 };
124 
125 static inline void init_continuation(struct continuation *k,
126 				     void (*fn)(struct work_struct *))
127 {
128 	INIT_WORK(&k->ws, fn);
129 	k->input = 0;
130 }
131 
132 static inline void queue_continuation(struct workqueue_struct *wq,
133 				      struct continuation *k)
134 {
135 	queue_work(wq, &k->ws);
136 }
137 
138 /*----------------------------------------------------------------*/
139 
140 /*
141  * The batcher collects together pieces of work that need a particular
142  * operation to occur before they can proceed (typically a commit).
143  */
144 struct batcher {
145 	/*
146 	 * The operation that everyone is waiting for.
147 	 */
148 	blk_status_t (*commit_op)(void *context);
149 	void *commit_context;
150 
151 	/*
152 	 * This is how bios should be issued once the commit op is complete
153 	 * (accounted_request).
154 	 */
155 	void (*issue_op)(struct bio *bio, void *context);
156 	void *issue_context;
157 
158 	/*
159 	 * Queued work gets put on here after commit.
160 	 */
161 	struct workqueue_struct *wq;
162 
163 	spinlock_t lock;
164 	struct list_head work_items;
165 	struct bio_list bios;
166 	struct work_struct commit_work;
167 
168 	bool commit_scheduled;
169 };
170 
171 static void __commit(struct work_struct *_ws)
172 {
173 	struct batcher *b = container_of(_ws, struct batcher, commit_work);
174 	blk_status_t r;
175 	unsigned long flags;
176 	struct list_head work_items;
177 	struct work_struct *ws, *tmp;
178 	struct continuation *k;
179 	struct bio *bio;
180 	struct bio_list bios;
181 
182 	INIT_LIST_HEAD(&work_items);
183 	bio_list_init(&bios);
184 
185 	/*
186 	 * We have to grab these before the commit_op to avoid a race
187 	 * condition.
188 	 */
189 	spin_lock_irqsave(&b->lock, flags);
190 	list_splice_init(&b->work_items, &work_items);
191 	bio_list_merge(&bios, &b->bios);
192 	bio_list_init(&b->bios);
193 	b->commit_scheduled = false;
194 	spin_unlock_irqrestore(&b->lock, flags);
195 
196 	r = b->commit_op(b->commit_context);
197 
198 	list_for_each_entry_safe(ws, tmp, &work_items, entry) {
199 		k = container_of(ws, struct continuation, ws);
200 		k->input = r;
201 		INIT_LIST_HEAD(&ws->entry); /* to avoid a WARN_ON */
202 		queue_work(b->wq, ws);
203 	}
204 
205 	while ((bio = bio_list_pop(&bios))) {
206 		if (r) {
207 			bio->bi_status = r;
208 			bio_endio(bio);
209 		} else
210 			b->issue_op(bio, b->issue_context);
211 	}
212 }
213 
214 static void batcher_init(struct batcher *b,
215 			 blk_status_t (*commit_op)(void *),
216 			 void *commit_context,
217 			 void (*issue_op)(struct bio *bio, void *),
218 			 void *issue_context,
219 			 struct workqueue_struct *wq)
220 {
221 	b->commit_op = commit_op;
222 	b->commit_context = commit_context;
223 	b->issue_op = issue_op;
224 	b->issue_context = issue_context;
225 	b->wq = wq;
226 
227 	spin_lock_init(&b->lock);
228 	INIT_LIST_HEAD(&b->work_items);
229 	bio_list_init(&b->bios);
230 	INIT_WORK(&b->commit_work, __commit);
231 	b->commit_scheduled = false;
232 }
233 
234 static void async_commit(struct batcher *b)
235 {
236 	queue_work(b->wq, &b->commit_work);
237 }
238 
239 static void continue_after_commit(struct batcher *b, struct continuation *k)
240 {
241 	unsigned long flags;
242 	bool commit_scheduled;
243 
244 	spin_lock_irqsave(&b->lock, flags);
245 	commit_scheduled = b->commit_scheduled;
246 	list_add_tail(&k->ws.entry, &b->work_items);
247 	spin_unlock_irqrestore(&b->lock, flags);
248 
249 	if (commit_scheduled)
250 		async_commit(b);
251 }
252 
253 /*
254  * Bios are errored if commit failed.
255  */
256 static void issue_after_commit(struct batcher *b, struct bio *bio)
257 {
258        unsigned long flags;
259        bool commit_scheduled;
260 
261        spin_lock_irqsave(&b->lock, flags);
262        commit_scheduled = b->commit_scheduled;
263        bio_list_add(&b->bios, bio);
264        spin_unlock_irqrestore(&b->lock, flags);
265 
266        if (commit_scheduled)
267 	       async_commit(b);
268 }
269 
270 /*
271  * Call this if some urgent work is waiting for the commit to complete.
272  */
273 static void schedule_commit(struct batcher *b)
274 {
275 	bool immediate;
276 	unsigned long flags;
277 
278 	spin_lock_irqsave(&b->lock, flags);
279 	immediate = !list_empty(&b->work_items) || !bio_list_empty(&b->bios);
280 	b->commit_scheduled = true;
281 	spin_unlock_irqrestore(&b->lock, flags);
282 
283 	if (immediate)
284 		async_commit(b);
285 }
286 
287 /*
288  * There are a couple of places where we let a bio run, but want to do some
289  * work before calling its endio function.  We do this by temporarily
290  * changing the endio fn.
291  */
292 struct dm_hook_info {
293 	bio_end_io_t *bi_end_io;
294 };
295 
296 static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio,
297 			bio_end_io_t *bi_end_io, void *bi_private)
298 {
299 	h->bi_end_io = bio->bi_end_io;
300 
301 	bio->bi_end_io = bi_end_io;
302 	bio->bi_private = bi_private;
303 }
304 
305 static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
306 {
307 	bio->bi_end_io = h->bi_end_io;
308 }
309 
310 /*----------------------------------------------------------------*/
311 
312 #define MIGRATION_POOL_SIZE 128
313 #define COMMIT_PERIOD HZ
314 #define MIGRATION_COUNT_WINDOW 10
315 
316 /*
317  * The block size of the device holding cache data must be
318  * between 32KB and 1GB.
319  */
320 #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT)
321 #define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
322 
323 enum cache_metadata_mode {
324 	CM_WRITE,		/* metadata may be changed */
325 	CM_READ_ONLY,		/* metadata may not be changed */
326 	CM_FAIL
327 };
328 
329 enum cache_io_mode {
330 	/*
331 	 * Data is written to cached blocks only.  These blocks are marked
332 	 * dirty.  If you lose the cache device you will lose data.
333 	 * Potential performance increase for both reads and writes.
334 	 */
335 	CM_IO_WRITEBACK,
336 
337 	/*
338 	 * Data is written to both cache and origin.  Blocks are never
339 	 * dirty.  Potential performance benfit for reads only.
340 	 */
341 	CM_IO_WRITETHROUGH,
342 
343 	/*
344 	 * A degraded mode useful for various cache coherency situations
345 	 * (eg, rolling back snapshots).  Reads and writes always go to the
346 	 * origin.  If a write goes to a cached oblock, then the cache
347 	 * block is invalidated.
348 	 */
349 	CM_IO_PASSTHROUGH
350 };
351 
352 struct cache_features {
353 	enum cache_metadata_mode mode;
354 	enum cache_io_mode io_mode;
355 	unsigned metadata_version;
356 };
357 
358 struct cache_stats {
359 	atomic_t read_hit;
360 	atomic_t read_miss;
361 	atomic_t write_hit;
362 	atomic_t write_miss;
363 	atomic_t demotion;
364 	atomic_t promotion;
365 	atomic_t writeback;
366 	atomic_t copies_avoided;
367 	atomic_t cache_cell_clash;
368 	atomic_t commit_count;
369 	atomic_t discard_count;
370 };
371 
372 struct cache {
373 	struct dm_target *ti;
374 	struct dm_target_callbacks callbacks;
375 
376 	struct dm_cache_metadata *cmd;
377 
378 	/*
379 	 * Metadata is written to this device.
380 	 */
381 	struct dm_dev *metadata_dev;
382 
383 	/*
384 	 * The slower of the two data devices.  Typically a spindle.
385 	 */
386 	struct dm_dev *origin_dev;
387 
388 	/*
389 	 * The faster of the two data devices.  Typically an SSD.
390 	 */
391 	struct dm_dev *cache_dev;
392 
393 	/*
394 	 * Size of the origin device in _complete_ blocks and native sectors.
395 	 */
396 	dm_oblock_t origin_blocks;
397 	sector_t origin_sectors;
398 
399 	/*
400 	 * Size of the cache device in blocks.
401 	 */
402 	dm_cblock_t cache_size;
403 
404 	/*
405 	 * Fields for converting from sectors to blocks.
406 	 */
407 	sector_t sectors_per_block;
408 	int sectors_per_block_shift;
409 
410 	spinlock_t lock;
411 	struct bio_list deferred_bios;
412 	sector_t migration_threshold;
413 	wait_queue_head_t migration_wait;
414 	atomic_t nr_allocated_migrations;
415 
416 	/*
417 	 * The number of in flight migrations that are performing
418 	 * background io. eg, promotion, writeback.
419 	 */
420 	atomic_t nr_io_migrations;
421 
422 	struct rw_semaphore quiesce_lock;
423 
424 	/*
425 	 * cache_size entries, dirty if set
426 	 */
427 	atomic_t nr_dirty;
428 	unsigned long *dirty_bitset;
429 
430 	/*
431 	 * origin_blocks entries, discarded if set.
432 	 */
433 	dm_dblock_t discard_nr_blocks;
434 	unsigned long *discard_bitset;
435 	uint32_t discard_block_size; /* a power of 2 times sectors per block */
436 
437 	/*
438 	 * Rather than reconstructing the table line for the status we just
439 	 * save it and regurgitate.
440 	 */
441 	unsigned nr_ctr_args;
442 	const char **ctr_args;
443 
444 	struct dm_kcopyd_client *copier;
445 	struct workqueue_struct *wq;
446 	struct work_struct deferred_bio_worker;
447 	struct work_struct migration_worker;
448 	struct delayed_work waker;
449 	struct dm_bio_prison_v2 *prison;
450 	struct bio_set bs;
451 
452 	mempool_t migration_pool;
453 
454 	struct dm_cache_policy *policy;
455 	unsigned policy_nr_args;
456 
457 	bool need_tick_bio:1;
458 	bool sized:1;
459 	bool invalidate:1;
460 	bool commit_requested:1;
461 	bool loaded_mappings:1;
462 	bool loaded_discards:1;
463 
464 	/*
465 	 * Cache features such as write-through.
466 	 */
467 	struct cache_features features;
468 
469 	struct cache_stats stats;
470 
471 	/*
472 	 * Invalidation fields.
473 	 */
474 	spinlock_t invalidation_lock;
475 	struct list_head invalidation_requests;
476 
477 	struct io_tracker tracker;
478 
479 	struct work_struct commit_ws;
480 	struct batcher committer;
481 
482 	struct rw_semaphore background_work_lock;
483 };
484 
485 struct per_bio_data {
486 	bool tick:1;
487 	unsigned req_nr:2;
488 	struct dm_bio_prison_cell_v2 *cell;
489 	struct dm_hook_info hook_info;
490 	sector_t len;
491 };
492 
493 struct dm_cache_migration {
494 	struct continuation k;
495 	struct cache *cache;
496 
497 	struct policy_work *op;
498 	struct bio *overwrite_bio;
499 	struct dm_bio_prison_cell_v2 *cell;
500 
501 	dm_cblock_t invalidate_cblock;
502 	dm_oblock_t invalidate_oblock;
503 };
504 
505 /*----------------------------------------------------------------*/
506 
507 static bool writethrough_mode(struct cache *cache)
508 {
509 	return cache->features.io_mode == CM_IO_WRITETHROUGH;
510 }
511 
512 static bool writeback_mode(struct cache *cache)
513 {
514 	return cache->features.io_mode == CM_IO_WRITEBACK;
515 }
516 
517 static inline bool passthrough_mode(struct cache *cache)
518 {
519 	return unlikely(cache->features.io_mode == CM_IO_PASSTHROUGH);
520 }
521 
522 /*----------------------------------------------------------------*/
523 
524 static void wake_deferred_bio_worker(struct cache *cache)
525 {
526 	queue_work(cache->wq, &cache->deferred_bio_worker);
527 }
528 
529 static void wake_migration_worker(struct cache *cache)
530 {
531 	if (passthrough_mode(cache))
532 		return;
533 
534 	queue_work(cache->wq, &cache->migration_worker);
535 }
536 
537 /*----------------------------------------------------------------*/
538 
539 static struct dm_bio_prison_cell_v2 *alloc_prison_cell(struct cache *cache)
540 {
541 	return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOWAIT);
542 }
543 
544 static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell_v2 *cell)
545 {
546 	dm_bio_prison_free_cell_v2(cache->prison, cell);
547 }
548 
549 static struct dm_cache_migration *alloc_migration(struct cache *cache)
550 {
551 	struct dm_cache_migration *mg;
552 
553 	mg = mempool_alloc(&cache->migration_pool, GFP_NOWAIT);
554 	if (!mg)
555 		return NULL;
556 
557 	memset(mg, 0, sizeof(*mg));
558 
559 	mg->cache = cache;
560 	atomic_inc(&cache->nr_allocated_migrations);
561 
562 	return mg;
563 }
564 
565 static void free_migration(struct dm_cache_migration *mg)
566 {
567 	struct cache *cache = mg->cache;
568 
569 	if (atomic_dec_and_test(&cache->nr_allocated_migrations))
570 		wake_up(&cache->migration_wait);
571 
572 	mempool_free(mg, &cache->migration_pool);
573 }
574 
575 /*----------------------------------------------------------------*/
576 
577 static inline dm_oblock_t oblock_succ(dm_oblock_t b)
578 {
579 	return to_oblock(from_oblock(b) + 1ull);
580 }
581 
582 static void build_key(dm_oblock_t begin, dm_oblock_t end, struct dm_cell_key_v2 *key)
583 {
584 	key->virtual = 0;
585 	key->dev = 0;
586 	key->block_begin = from_oblock(begin);
587 	key->block_end = from_oblock(end);
588 }
589 
590 /*
591  * We have two lock levels.  Level 0, which is used to prevent WRITEs, and
592  * level 1 which prevents *both* READs and WRITEs.
593  */
594 #define WRITE_LOCK_LEVEL 0
595 #define READ_WRITE_LOCK_LEVEL 1
596 
597 static unsigned lock_level(struct bio *bio)
598 {
599 	return bio_data_dir(bio) == WRITE ?
600 		WRITE_LOCK_LEVEL :
601 		READ_WRITE_LOCK_LEVEL;
602 }
603 
604 /*----------------------------------------------------------------
605  * Per bio data
606  *--------------------------------------------------------------*/
607 
608 static struct per_bio_data *get_per_bio_data(struct bio *bio)
609 {
610 	struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
611 	BUG_ON(!pb);
612 	return pb;
613 }
614 
615 static struct per_bio_data *init_per_bio_data(struct bio *bio)
616 {
617 	struct per_bio_data *pb = get_per_bio_data(bio);
618 
619 	pb->tick = false;
620 	pb->req_nr = dm_bio_get_target_bio_nr(bio);
621 	pb->cell = NULL;
622 	pb->len = 0;
623 
624 	return pb;
625 }
626 
627 /*----------------------------------------------------------------*/
628 
629 static void defer_bio(struct cache *cache, struct bio *bio)
630 {
631 	unsigned long flags;
632 
633 	spin_lock_irqsave(&cache->lock, flags);
634 	bio_list_add(&cache->deferred_bios, bio);
635 	spin_unlock_irqrestore(&cache->lock, flags);
636 
637 	wake_deferred_bio_worker(cache);
638 }
639 
640 static void defer_bios(struct cache *cache, struct bio_list *bios)
641 {
642 	unsigned long flags;
643 
644 	spin_lock_irqsave(&cache->lock, flags);
645 	bio_list_merge(&cache->deferred_bios, bios);
646 	bio_list_init(bios);
647 	spin_unlock_irqrestore(&cache->lock, flags);
648 
649 	wake_deferred_bio_worker(cache);
650 }
651 
652 /*----------------------------------------------------------------*/
653 
654 static bool bio_detain_shared(struct cache *cache, dm_oblock_t oblock, struct bio *bio)
655 {
656 	bool r;
657 	struct per_bio_data *pb;
658 	struct dm_cell_key_v2 key;
659 	dm_oblock_t end = to_oblock(from_oblock(oblock) + 1ULL);
660 	struct dm_bio_prison_cell_v2 *cell_prealloc, *cell;
661 
662 	cell_prealloc = alloc_prison_cell(cache); /* FIXME: allow wait if calling from worker */
663 	if (!cell_prealloc) {
664 		defer_bio(cache, bio);
665 		return false;
666 	}
667 
668 	build_key(oblock, end, &key);
669 	r = dm_cell_get_v2(cache->prison, &key, lock_level(bio), bio, cell_prealloc, &cell);
670 	if (!r) {
671 		/*
672 		 * Failed to get the lock.
673 		 */
674 		free_prison_cell(cache, cell_prealloc);
675 		return r;
676 	}
677 
678 	if (cell != cell_prealloc)
679 		free_prison_cell(cache, cell_prealloc);
680 
681 	pb = get_per_bio_data(bio);
682 	pb->cell = cell;
683 
684 	return r;
685 }
686 
687 /*----------------------------------------------------------------*/
688 
689 static bool is_dirty(struct cache *cache, dm_cblock_t b)
690 {
691 	return test_bit(from_cblock(b), cache->dirty_bitset);
692 }
693 
694 static void set_dirty(struct cache *cache, dm_cblock_t cblock)
695 {
696 	if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) {
697 		atomic_inc(&cache->nr_dirty);
698 		policy_set_dirty(cache->policy, cblock);
699 	}
700 }
701 
702 /*
703  * These two are called when setting after migrations to force the policy
704  * and dirty bitset to be in sync.
705  */
706 static void force_set_dirty(struct cache *cache, dm_cblock_t cblock)
707 {
708 	if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset))
709 		atomic_inc(&cache->nr_dirty);
710 	policy_set_dirty(cache->policy, cblock);
711 }
712 
713 static void force_clear_dirty(struct cache *cache, dm_cblock_t cblock)
714 {
715 	if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) {
716 		if (atomic_dec_return(&cache->nr_dirty) == 0)
717 			dm_table_event(cache->ti->table);
718 	}
719 
720 	policy_clear_dirty(cache->policy, cblock);
721 }
722 
723 /*----------------------------------------------------------------*/
724 
725 static bool block_size_is_power_of_two(struct cache *cache)
726 {
727 	return cache->sectors_per_block_shift >= 0;
728 }
729 
730 /* gcc on ARM generates spurious references to __udivdi3 and __umoddi3 */
731 #if defined(CONFIG_ARM) && __GNUC__ == 4 && __GNUC_MINOR__ <= 6
732 __always_inline
733 #endif
734 static dm_block_t block_div(dm_block_t b, uint32_t n)
735 {
736 	do_div(b, n);
737 
738 	return b;
739 }
740 
741 static dm_block_t oblocks_per_dblock(struct cache *cache)
742 {
743 	dm_block_t oblocks = cache->discard_block_size;
744 
745 	if (block_size_is_power_of_two(cache))
746 		oblocks >>= cache->sectors_per_block_shift;
747 	else
748 		oblocks = block_div(oblocks, cache->sectors_per_block);
749 
750 	return oblocks;
751 }
752 
753 static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock)
754 {
755 	return to_dblock(block_div(from_oblock(oblock),
756 				   oblocks_per_dblock(cache)));
757 }
758 
759 static void set_discard(struct cache *cache, dm_dblock_t b)
760 {
761 	unsigned long flags;
762 
763 	BUG_ON(from_dblock(b) >= from_dblock(cache->discard_nr_blocks));
764 	atomic_inc(&cache->stats.discard_count);
765 
766 	spin_lock_irqsave(&cache->lock, flags);
767 	set_bit(from_dblock(b), cache->discard_bitset);
768 	spin_unlock_irqrestore(&cache->lock, flags);
769 }
770 
771 static void clear_discard(struct cache *cache, dm_dblock_t b)
772 {
773 	unsigned long flags;
774 
775 	spin_lock_irqsave(&cache->lock, flags);
776 	clear_bit(from_dblock(b), cache->discard_bitset);
777 	spin_unlock_irqrestore(&cache->lock, flags);
778 }
779 
780 static bool is_discarded(struct cache *cache, dm_dblock_t b)
781 {
782 	int r;
783 	unsigned long flags;
784 
785 	spin_lock_irqsave(&cache->lock, flags);
786 	r = test_bit(from_dblock(b), cache->discard_bitset);
787 	spin_unlock_irqrestore(&cache->lock, flags);
788 
789 	return r;
790 }
791 
792 static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
793 {
794 	int r;
795 	unsigned long flags;
796 
797 	spin_lock_irqsave(&cache->lock, flags);
798 	r = test_bit(from_dblock(oblock_to_dblock(cache, b)),
799 		     cache->discard_bitset);
800 	spin_unlock_irqrestore(&cache->lock, flags);
801 
802 	return r;
803 }
804 
805 /*----------------------------------------------------------------
806  * Remapping
807  *--------------------------------------------------------------*/
808 static void remap_to_origin(struct cache *cache, struct bio *bio)
809 {
810 	bio_set_dev(bio, cache->origin_dev->bdev);
811 }
812 
813 static void remap_to_cache(struct cache *cache, struct bio *bio,
814 			   dm_cblock_t cblock)
815 {
816 	sector_t bi_sector = bio->bi_iter.bi_sector;
817 	sector_t block = from_cblock(cblock);
818 
819 	bio_set_dev(bio, cache->cache_dev->bdev);
820 	if (!block_size_is_power_of_two(cache))
821 		bio->bi_iter.bi_sector =
822 			(block * cache->sectors_per_block) +
823 			sector_div(bi_sector, cache->sectors_per_block);
824 	else
825 		bio->bi_iter.bi_sector =
826 			(block << cache->sectors_per_block_shift) |
827 			(bi_sector & (cache->sectors_per_block - 1));
828 }
829 
830 static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
831 {
832 	unsigned long flags;
833 	struct per_bio_data *pb;
834 
835 	spin_lock_irqsave(&cache->lock, flags);
836 	if (cache->need_tick_bio && !op_is_flush(bio->bi_opf) &&
837 	    bio_op(bio) != REQ_OP_DISCARD) {
838 		pb = get_per_bio_data(bio);
839 		pb->tick = true;
840 		cache->need_tick_bio = false;
841 	}
842 	spin_unlock_irqrestore(&cache->lock, flags);
843 }
844 
845 static void __remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
846 					    dm_oblock_t oblock, bool bio_has_pbd)
847 {
848 	if (bio_has_pbd)
849 		check_if_tick_bio_needed(cache, bio);
850 	remap_to_origin(cache, bio);
851 	if (bio_data_dir(bio) == WRITE)
852 		clear_discard(cache, oblock_to_dblock(cache, oblock));
853 }
854 
855 static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
856 					  dm_oblock_t oblock)
857 {
858 	// FIXME: check_if_tick_bio_needed() is called way too much through this interface
859 	__remap_to_origin_clear_discard(cache, bio, oblock, true);
860 }
861 
862 static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
863 				 dm_oblock_t oblock, dm_cblock_t cblock)
864 {
865 	check_if_tick_bio_needed(cache, bio);
866 	remap_to_cache(cache, bio, cblock);
867 	if (bio_data_dir(bio) == WRITE) {
868 		set_dirty(cache, cblock);
869 		clear_discard(cache, oblock_to_dblock(cache, oblock));
870 	}
871 }
872 
873 static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
874 {
875 	sector_t block_nr = bio->bi_iter.bi_sector;
876 
877 	if (!block_size_is_power_of_two(cache))
878 		(void) sector_div(block_nr, cache->sectors_per_block);
879 	else
880 		block_nr >>= cache->sectors_per_block_shift;
881 
882 	return to_oblock(block_nr);
883 }
884 
885 static bool accountable_bio(struct cache *cache, struct bio *bio)
886 {
887 	return bio_op(bio) != REQ_OP_DISCARD;
888 }
889 
890 static void accounted_begin(struct cache *cache, struct bio *bio)
891 {
892 	struct per_bio_data *pb;
893 
894 	if (accountable_bio(cache, bio)) {
895 		pb = get_per_bio_data(bio);
896 		pb->len = bio_sectors(bio);
897 		iot_io_begin(&cache->tracker, pb->len);
898 	}
899 }
900 
901 static void accounted_complete(struct cache *cache, struct bio *bio)
902 {
903 	struct per_bio_data *pb = get_per_bio_data(bio);
904 
905 	iot_io_end(&cache->tracker, pb->len);
906 }
907 
908 static void accounted_request(struct cache *cache, struct bio *bio)
909 {
910 	accounted_begin(cache, bio);
911 	generic_make_request(bio);
912 }
913 
914 static void issue_op(struct bio *bio, void *context)
915 {
916 	struct cache *cache = context;
917 	accounted_request(cache, bio);
918 }
919 
920 /*
921  * When running in writethrough mode we need to send writes to clean blocks
922  * to both the cache and origin devices.  Clone the bio and send them in parallel.
923  */
924 static void remap_to_origin_and_cache(struct cache *cache, struct bio *bio,
925 				      dm_oblock_t oblock, dm_cblock_t cblock)
926 {
927 	struct bio *origin_bio = bio_clone_fast(bio, GFP_NOIO, &cache->bs);
928 
929 	BUG_ON(!origin_bio);
930 
931 	bio_chain(origin_bio, bio);
932 	/*
933 	 * Passing false to __remap_to_origin_clear_discard() skips
934 	 * all code that might use per_bio_data (since clone doesn't have it)
935 	 */
936 	__remap_to_origin_clear_discard(cache, origin_bio, oblock, false);
937 	submit_bio(origin_bio);
938 
939 	remap_to_cache(cache, bio, cblock);
940 }
941 
942 /*----------------------------------------------------------------
943  * Failure modes
944  *--------------------------------------------------------------*/
945 static enum cache_metadata_mode get_cache_mode(struct cache *cache)
946 {
947 	return cache->features.mode;
948 }
949 
950 static const char *cache_device_name(struct cache *cache)
951 {
952 	return dm_device_name(dm_table_get_md(cache->ti->table));
953 }
954 
955 static void notify_mode_switch(struct cache *cache, enum cache_metadata_mode mode)
956 {
957 	const char *descs[] = {
958 		"write",
959 		"read-only",
960 		"fail"
961 	};
962 
963 	dm_table_event(cache->ti->table);
964 	DMINFO("%s: switching cache to %s mode",
965 	       cache_device_name(cache), descs[(int)mode]);
966 }
967 
968 static void set_cache_mode(struct cache *cache, enum cache_metadata_mode new_mode)
969 {
970 	bool needs_check;
971 	enum cache_metadata_mode old_mode = get_cache_mode(cache);
972 
973 	if (dm_cache_metadata_needs_check(cache->cmd, &needs_check)) {
974 		DMERR("%s: unable to read needs_check flag, setting failure mode.",
975 		      cache_device_name(cache));
976 		new_mode = CM_FAIL;
977 	}
978 
979 	if (new_mode == CM_WRITE && needs_check) {
980 		DMERR("%s: unable to switch cache to write mode until repaired.",
981 		      cache_device_name(cache));
982 		if (old_mode != new_mode)
983 			new_mode = old_mode;
984 		else
985 			new_mode = CM_READ_ONLY;
986 	}
987 
988 	/* Never move out of fail mode */
989 	if (old_mode == CM_FAIL)
990 		new_mode = CM_FAIL;
991 
992 	switch (new_mode) {
993 	case CM_FAIL:
994 	case CM_READ_ONLY:
995 		dm_cache_metadata_set_read_only(cache->cmd);
996 		break;
997 
998 	case CM_WRITE:
999 		dm_cache_metadata_set_read_write(cache->cmd);
1000 		break;
1001 	}
1002 
1003 	cache->features.mode = new_mode;
1004 
1005 	if (new_mode != old_mode)
1006 		notify_mode_switch(cache, new_mode);
1007 }
1008 
1009 static void abort_transaction(struct cache *cache)
1010 {
1011 	const char *dev_name = cache_device_name(cache);
1012 
1013 	if (get_cache_mode(cache) >= CM_READ_ONLY)
1014 		return;
1015 
1016 	if (dm_cache_metadata_set_needs_check(cache->cmd)) {
1017 		DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name);
1018 		set_cache_mode(cache, CM_FAIL);
1019 	}
1020 
1021 	DMERR_LIMIT("%s: aborting current metadata transaction", dev_name);
1022 	if (dm_cache_metadata_abort(cache->cmd)) {
1023 		DMERR("%s: failed to abort metadata transaction", dev_name);
1024 		set_cache_mode(cache, CM_FAIL);
1025 	}
1026 }
1027 
1028 static void metadata_operation_failed(struct cache *cache, const char *op, int r)
1029 {
1030 	DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d",
1031 		    cache_device_name(cache), op, r);
1032 	abort_transaction(cache);
1033 	set_cache_mode(cache, CM_READ_ONLY);
1034 }
1035 
1036 /*----------------------------------------------------------------*/
1037 
1038 static void load_stats(struct cache *cache)
1039 {
1040 	struct dm_cache_statistics stats;
1041 
1042 	dm_cache_metadata_get_stats(cache->cmd, &stats);
1043 	atomic_set(&cache->stats.read_hit, stats.read_hits);
1044 	atomic_set(&cache->stats.read_miss, stats.read_misses);
1045 	atomic_set(&cache->stats.write_hit, stats.write_hits);
1046 	atomic_set(&cache->stats.write_miss, stats.write_misses);
1047 }
1048 
1049 static void save_stats(struct cache *cache)
1050 {
1051 	struct dm_cache_statistics stats;
1052 
1053 	if (get_cache_mode(cache) >= CM_READ_ONLY)
1054 		return;
1055 
1056 	stats.read_hits = atomic_read(&cache->stats.read_hit);
1057 	stats.read_misses = atomic_read(&cache->stats.read_miss);
1058 	stats.write_hits = atomic_read(&cache->stats.write_hit);
1059 	stats.write_misses = atomic_read(&cache->stats.write_miss);
1060 
1061 	dm_cache_metadata_set_stats(cache->cmd, &stats);
1062 }
1063 
1064 static void update_stats(struct cache_stats *stats, enum policy_operation op)
1065 {
1066 	switch (op) {
1067 	case POLICY_PROMOTE:
1068 		atomic_inc(&stats->promotion);
1069 		break;
1070 
1071 	case POLICY_DEMOTE:
1072 		atomic_inc(&stats->demotion);
1073 		break;
1074 
1075 	case POLICY_WRITEBACK:
1076 		atomic_inc(&stats->writeback);
1077 		break;
1078 	}
1079 }
1080 
1081 /*----------------------------------------------------------------
1082  * Migration processing
1083  *
1084  * Migration covers moving data from the origin device to the cache, or
1085  * vice versa.
1086  *--------------------------------------------------------------*/
1087 
1088 static void inc_io_migrations(struct cache *cache)
1089 {
1090 	atomic_inc(&cache->nr_io_migrations);
1091 }
1092 
1093 static void dec_io_migrations(struct cache *cache)
1094 {
1095 	atomic_dec(&cache->nr_io_migrations);
1096 }
1097 
1098 static bool discard_or_flush(struct bio *bio)
1099 {
1100 	return bio_op(bio) == REQ_OP_DISCARD || op_is_flush(bio->bi_opf);
1101 }
1102 
1103 static void calc_discard_block_range(struct cache *cache, struct bio *bio,
1104 				     dm_dblock_t *b, dm_dblock_t *e)
1105 {
1106 	sector_t sb = bio->bi_iter.bi_sector;
1107 	sector_t se = bio_end_sector(bio);
1108 
1109 	*b = to_dblock(dm_sector_div_up(sb, cache->discard_block_size));
1110 
1111 	if (se - sb < cache->discard_block_size)
1112 		*e = *b;
1113 	else
1114 		*e = to_dblock(block_div(se, cache->discard_block_size));
1115 }
1116 
1117 /*----------------------------------------------------------------*/
1118 
1119 static void prevent_background_work(struct cache *cache)
1120 {
1121 	lockdep_off();
1122 	down_write(&cache->background_work_lock);
1123 	lockdep_on();
1124 }
1125 
1126 static void allow_background_work(struct cache *cache)
1127 {
1128 	lockdep_off();
1129 	up_write(&cache->background_work_lock);
1130 	lockdep_on();
1131 }
1132 
1133 static bool background_work_begin(struct cache *cache)
1134 {
1135 	bool r;
1136 
1137 	lockdep_off();
1138 	r = down_read_trylock(&cache->background_work_lock);
1139 	lockdep_on();
1140 
1141 	return r;
1142 }
1143 
1144 static void background_work_end(struct cache *cache)
1145 {
1146 	lockdep_off();
1147 	up_read(&cache->background_work_lock);
1148 	lockdep_on();
1149 }
1150 
1151 /*----------------------------------------------------------------*/
1152 
1153 static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
1154 {
1155 	return (bio_data_dir(bio) == WRITE) &&
1156 		(bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
1157 }
1158 
1159 static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block)
1160 {
1161 	return writeback_mode(cache) &&
1162 		(is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio));
1163 }
1164 
1165 static void quiesce(struct dm_cache_migration *mg,
1166 		    void (*continuation)(struct work_struct *))
1167 {
1168 	init_continuation(&mg->k, continuation);
1169 	dm_cell_quiesce_v2(mg->cache->prison, mg->cell, &mg->k.ws);
1170 }
1171 
1172 static struct dm_cache_migration *ws_to_mg(struct work_struct *ws)
1173 {
1174 	struct continuation *k = container_of(ws, struct continuation, ws);
1175 	return container_of(k, struct dm_cache_migration, k);
1176 }
1177 
1178 static void copy_complete(int read_err, unsigned long write_err, void *context)
1179 {
1180 	struct dm_cache_migration *mg = container_of(context, struct dm_cache_migration, k);
1181 
1182 	if (read_err || write_err)
1183 		mg->k.input = BLK_STS_IOERR;
1184 
1185 	queue_continuation(mg->cache->wq, &mg->k);
1186 }
1187 
1188 static int copy(struct dm_cache_migration *mg, bool promote)
1189 {
1190 	int r;
1191 	struct dm_io_region o_region, c_region;
1192 	struct cache *cache = mg->cache;
1193 
1194 	o_region.bdev = cache->origin_dev->bdev;
1195 	o_region.sector = from_oblock(mg->op->oblock) * cache->sectors_per_block;
1196 	o_region.count = cache->sectors_per_block;
1197 
1198 	c_region.bdev = cache->cache_dev->bdev;
1199 	c_region.sector = from_cblock(mg->op->cblock) * cache->sectors_per_block;
1200 	c_region.count = cache->sectors_per_block;
1201 
1202 	if (promote)
1203 		r = dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, &mg->k);
1204 	else
1205 		r = dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, &mg->k);
1206 
1207 	return r;
1208 }
1209 
1210 static void bio_drop_shared_lock(struct cache *cache, struct bio *bio)
1211 {
1212 	struct per_bio_data *pb = get_per_bio_data(bio);
1213 
1214 	if (pb->cell && dm_cell_put_v2(cache->prison, pb->cell))
1215 		free_prison_cell(cache, pb->cell);
1216 	pb->cell = NULL;
1217 }
1218 
1219 static void overwrite_endio(struct bio *bio)
1220 {
1221 	struct dm_cache_migration *mg = bio->bi_private;
1222 	struct cache *cache = mg->cache;
1223 	struct per_bio_data *pb = get_per_bio_data(bio);
1224 
1225 	dm_unhook_bio(&pb->hook_info, bio);
1226 
1227 	if (bio->bi_status)
1228 		mg->k.input = bio->bi_status;
1229 
1230 	queue_continuation(cache->wq, &mg->k);
1231 }
1232 
1233 static void overwrite(struct dm_cache_migration *mg,
1234 		      void (*continuation)(struct work_struct *))
1235 {
1236 	struct bio *bio = mg->overwrite_bio;
1237 	struct per_bio_data *pb = get_per_bio_data(bio);
1238 
1239 	dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg);
1240 
1241 	/*
1242 	 * The overwrite bio is part of the copy operation, as such it does
1243 	 * not set/clear discard or dirty flags.
1244 	 */
1245 	if (mg->op->op == POLICY_PROMOTE)
1246 		remap_to_cache(mg->cache, bio, mg->op->cblock);
1247 	else
1248 		remap_to_origin(mg->cache, bio);
1249 
1250 	init_continuation(&mg->k, continuation);
1251 	accounted_request(mg->cache, bio);
1252 }
1253 
1254 /*
1255  * Migration steps:
1256  *
1257  * 1) exclusive lock preventing WRITEs
1258  * 2) quiesce
1259  * 3) copy or issue overwrite bio
1260  * 4) upgrade to exclusive lock preventing READs and WRITEs
1261  * 5) quiesce
1262  * 6) update metadata and commit
1263  * 7) unlock
1264  */
1265 static void mg_complete(struct dm_cache_migration *mg, bool success)
1266 {
1267 	struct bio_list bios;
1268 	struct cache *cache = mg->cache;
1269 	struct policy_work *op = mg->op;
1270 	dm_cblock_t cblock = op->cblock;
1271 
1272 	if (success)
1273 		update_stats(&cache->stats, op->op);
1274 
1275 	switch (op->op) {
1276 	case POLICY_PROMOTE:
1277 		clear_discard(cache, oblock_to_dblock(cache, op->oblock));
1278 		policy_complete_background_work(cache->policy, op, success);
1279 
1280 		if (mg->overwrite_bio) {
1281 			if (success)
1282 				force_set_dirty(cache, cblock);
1283 			else if (mg->k.input)
1284 				mg->overwrite_bio->bi_status = mg->k.input;
1285 			else
1286 				mg->overwrite_bio->bi_status = BLK_STS_IOERR;
1287 			bio_endio(mg->overwrite_bio);
1288 		} else {
1289 			if (success)
1290 				force_clear_dirty(cache, cblock);
1291 			dec_io_migrations(cache);
1292 		}
1293 		break;
1294 
1295 	case POLICY_DEMOTE:
1296 		/*
1297 		 * We clear dirty here to update the nr_dirty counter.
1298 		 */
1299 		if (success)
1300 			force_clear_dirty(cache, cblock);
1301 		policy_complete_background_work(cache->policy, op, success);
1302 		dec_io_migrations(cache);
1303 		break;
1304 
1305 	case POLICY_WRITEBACK:
1306 		if (success)
1307 			force_clear_dirty(cache, cblock);
1308 		policy_complete_background_work(cache->policy, op, success);
1309 		dec_io_migrations(cache);
1310 		break;
1311 	}
1312 
1313 	bio_list_init(&bios);
1314 	if (mg->cell) {
1315 		if (dm_cell_unlock_v2(cache->prison, mg->cell, &bios))
1316 			free_prison_cell(cache, mg->cell);
1317 	}
1318 
1319 	free_migration(mg);
1320 	defer_bios(cache, &bios);
1321 	wake_migration_worker(cache);
1322 
1323 	background_work_end(cache);
1324 }
1325 
1326 static void mg_success(struct work_struct *ws)
1327 {
1328 	struct dm_cache_migration *mg = ws_to_mg(ws);
1329 	mg_complete(mg, mg->k.input == 0);
1330 }
1331 
1332 static void mg_update_metadata(struct work_struct *ws)
1333 {
1334 	int r;
1335 	struct dm_cache_migration *mg = ws_to_mg(ws);
1336 	struct cache *cache = mg->cache;
1337 	struct policy_work *op = mg->op;
1338 
1339 	switch (op->op) {
1340 	case POLICY_PROMOTE:
1341 		r = dm_cache_insert_mapping(cache->cmd, op->cblock, op->oblock);
1342 		if (r) {
1343 			DMERR_LIMIT("%s: migration failed; couldn't insert mapping",
1344 				    cache_device_name(cache));
1345 			metadata_operation_failed(cache, "dm_cache_insert_mapping", r);
1346 
1347 			mg_complete(mg, false);
1348 			return;
1349 		}
1350 		mg_complete(mg, true);
1351 		break;
1352 
1353 	case POLICY_DEMOTE:
1354 		r = dm_cache_remove_mapping(cache->cmd, op->cblock);
1355 		if (r) {
1356 			DMERR_LIMIT("%s: migration failed; couldn't update on disk metadata",
1357 				    cache_device_name(cache));
1358 			metadata_operation_failed(cache, "dm_cache_remove_mapping", r);
1359 
1360 			mg_complete(mg, false);
1361 			return;
1362 		}
1363 
1364 		/*
1365 		 * It would be nice if we only had to commit when a REQ_FLUSH
1366 		 * comes through.  But there's one scenario that we have to
1367 		 * look out for:
1368 		 *
1369 		 * - vblock x in a cache block
1370 		 * - domotion occurs
1371 		 * - cache block gets reallocated and over written
1372 		 * - crash
1373 		 *
1374 		 * When we recover, because there was no commit the cache will
1375 		 * rollback to having the data for vblock x in the cache block.
1376 		 * But the cache block has since been overwritten, so it'll end
1377 		 * up pointing to data that was never in 'x' during the history
1378 		 * of the device.
1379 		 *
1380 		 * To avoid this issue we require a commit as part of the
1381 		 * demotion operation.
1382 		 */
1383 		init_continuation(&mg->k, mg_success);
1384 		continue_after_commit(&cache->committer, &mg->k);
1385 		schedule_commit(&cache->committer);
1386 		break;
1387 
1388 	case POLICY_WRITEBACK:
1389 		mg_complete(mg, true);
1390 		break;
1391 	}
1392 }
1393 
1394 static void mg_update_metadata_after_copy(struct work_struct *ws)
1395 {
1396 	struct dm_cache_migration *mg = ws_to_mg(ws);
1397 
1398 	/*
1399 	 * Did the copy succeed?
1400 	 */
1401 	if (mg->k.input)
1402 		mg_complete(mg, false);
1403 	else
1404 		mg_update_metadata(ws);
1405 }
1406 
1407 static void mg_upgrade_lock(struct work_struct *ws)
1408 {
1409 	int r;
1410 	struct dm_cache_migration *mg = ws_to_mg(ws);
1411 
1412 	/*
1413 	 * Did the copy succeed?
1414 	 */
1415 	if (mg->k.input)
1416 		mg_complete(mg, false);
1417 
1418 	else {
1419 		/*
1420 		 * Now we want the lock to prevent both reads and writes.
1421 		 */
1422 		r = dm_cell_lock_promote_v2(mg->cache->prison, mg->cell,
1423 					    READ_WRITE_LOCK_LEVEL);
1424 		if (r < 0)
1425 			mg_complete(mg, false);
1426 
1427 		else if (r)
1428 			quiesce(mg, mg_update_metadata);
1429 
1430 		else
1431 			mg_update_metadata(ws);
1432 	}
1433 }
1434 
1435 static void mg_full_copy(struct work_struct *ws)
1436 {
1437 	struct dm_cache_migration *mg = ws_to_mg(ws);
1438 	struct cache *cache = mg->cache;
1439 	struct policy_work *op = mg->op;
1440 	bool is_policy_promote = (op->op == POLICY_PROMOTE);
1441 
1442 	if ((!is_policy_promote && !is_dirty(cache, op->cblock)) ||
1443 	    is_discarded_oblock(cache, op->oblock)) {
1444 		mg_upgrade_lock(ws);
1445 		return;
1446 	}
1447 
1448 	init_continuation(&mg->k, mg_upgrade_lock);
1449 
1450 	if (copy(mg, is_policy_promote)) {
1451 		DMERR_LIMIT("%s: migration copy failed", cache_device_name(cache));
1452 		mg->k.input = BLK_STS_IOERR;
1453 		mg_complete(mg, false);
1454 	}
1455 }
1456 
1457 static void mg_copy(struct work_struct *ws)
1458 {
1459 	struct dm_cache_migration *mg = ws_to_mg(ws);
1460 
1461 	if (mg->overwrite_bio) {
1462 		/*
1463 		 * No exclusive lock was held when we last checked if the bio
1464 		 * was optimisable.  So we have to check again in case things
1465 		 * have changed (eg, the block may no longer be discarded).
1466 		 */
1467 		if (!optimisable_bio(mg->cache, mg->overwrite_bio, mg->op->oblock)) {
1468 			/*
1469 			 * Fallback to a real full copy after doing some tidying up.
1470 			 */
1471 			bool rb = bio_detain_shared(mg->cache, mg->op->oblock, mg->overwrite_bio);
1472 			BUG_ON(rb); /* An exclussive lock must _not_ be held for this block */
1473 			mg->overwrite_bio = NULL;
1474 			inc_io_migrations(mg->cache);
1475 			mg_full_copy(ws);
1476 			return;
1477 		}
1478 
1479 		/*
1480 		 * It's safe to do this here, even though it's new data
1481 		 * because all IO has been locked out of the block.
1482 		 *
1483 		 * mg_lock_writes() already took READ_WRITE_LOCK_LEVEL
1484 		 * so _not_ using mg_upgrade_lock() as continutation.
1485 		 */
1486 		overwrite(mg, mg_update_metadata_after_copy);
1487 
1488 	} else
1489 		mg_full_copy(ws);
1490 }
1491 
1492 static int mg_lock_writes(struct dm_cache_migration *mg)
1493 {
1494 	int r;
1495 	struct dm_cell_key_v2 key;
1496 	struct cache *cache = mg->cache;
1497 	struct dm_bio_prison_cell_v2 *prealloc;
1498 
1499 	prealloc = alloc_prison_cell(cache);
1500 	if (!prealloc) {
1501 		DMERR_LIMIT("%s: alloc_prison_cell failed", cache_device_name(cache));
1502 		mg_complete(mg, false);
1503 		return -ENOMEM;
1504 	}
1505 
1506 	/*
1507 	 * Prevent writes to the block, but allow reads to continue.
1508 	 * Unless we're using an overwrite bio, in which case we lock
1509 	 * everything.
1510 	 */
1511 	build_key(mg->op->oblock, oblock_succ(mg->op->oblock), &key);
1512 	r = dm_cell_lock_v2(cache->prison, &key,
1513 			    mg->overwrite_bio ?  READ_WRITE_LOCK_LEVEL : WRITE_LOCK_LEVEL,
1514 			    prealloc, &mg->cell);
1515 	if (r < 0) {
1516 		free_prison_cell(cache, prealloc);
1517 		mg_complete(mg, false);
1518 		return r;
1519 	}
1520 
1521 	if (mg->cell != prealloc)
1522 		free_prison_cell(cache, prealloc);
1523 
1524 	if (r == 0)
1525 		mg_copy(&mg->k.ws);
1526 	else
1527 		quiesce(mg, mg_copy);
1528 
1529 	return 0;
1530 }
1531 
1532 static int mg_start(struct cache *cache, struct policy_work *op, struct bio *bio)
1533 {
1534 	struct dm_cache_migration *mg;
1535 
1536 	if (!background_work_begin(cache)) {
1537 		policy_complete_background_work(cache->policy, op, false);
1538 		return -EPERM;
1539 	}
1540 
1541 	mg = alloc_migration(cache);
1542 	if (!mg) {
1543 		policy_complete_background_work(cache->policy, op, false);
1544 		background_work_end(cache);
1545 		return -ENOMEM;
1546 	}
1547 
1548 	mg->op = op;
1549 	mg->overwrite_bio = bio;
1550 
1551 	if (!bio)
1552 		inc_io_migrations(cache);
1553 
1554 	return mg_lock_writes(mg);
1555 }
1556 
1557 /*----------------------------------------------------------------
1558  * invalidation processing
1559  *--------------------------------------------------------------*/
1560 
1561 static void invalidate_complete(struct dm_cache_migration *mg, bool success)
1562 {
1563 	struct bio_list bios;
1564 	struct cache *cache = mg->cache;
1565 
1566 	bio_list_init(&bios);
1567 	if (dm_cell_unlock_v2(cache->prison, mg->cell, &bios))
1568 		free_prison_cell(cache, mg->cell);
1569 
1570 	if (!success && mg->overwrite_bio)
1571 		bio_io_error(mg->overwrite_bio);
1572 
1573 	free_migration(mg);
1574 	defer_bios(cache, &bios);
1575 
1576 	background_work_end(cache);
1577 }
1578 
1579 static void invalidate_completed(struct work_struct *ws)
1580 {
1581 	struct dm_cache_migration *mg = ws_to_mg(ws);
1582 	invalidate_complete(mg, !mg->k.input);
1583 }
1584 
1585 static int invalidate_cblock(struct cache *cache, dm_cblock_t cblock)
1586 {
1587 	int r = policy_invalidate_mapping(cache->policy, cblock);
1588 	if (!r) {
1589 		r = dm_cache_remove_mapping(cache->cmd, cblock);
1590 		if (r) {
1591 			DMERR_LIMIT("%s: invalidation failed; couldn't update on disk metadata",
1592 				    cache_device_name(cache));
1593 			metadata_operation_failed(cache, "dm_cache_remove_mapping", r);
1594 		}
1595 
1596 	} else if (r == -ENODATA) {
1597 		/*
1598 		 * Harmless, already unmapped.
1599 		 */
1600 		r = 0;
1601 
1602 	} else
1603 		DMERR("%s: policy_invalidate_mapping failed", cache_device_name(cache));
1604 
1605 	return r;
1606 }
1607 
1608 static void invalidate_remove(struct work_struct *ws)
1609 {
1610 	int r;
1611 	struct dm_cache_migration *mg = ws_to_mg(ws);
1612 	struct cache *cache = mg->cache;
1613 
1614 	r = invalidate_cblock(cache, mg->invalidate_cblock);
1615 	if (r) {
1616 		invalidate_complete(mg, false);
1617 		return;
1618 	}
1619 
1620 	init_continuation(&mg->k, invalidate_completed);
1621 	continue_after_commit(&cache->committer, &mg->k);
1622 	remap_to_origin_clear_discard(cache, mg->overwrite_bio, mg->invalidate_oblock);
1623 	mg->overwrite_bio = NULL;
1624 	schedule_commit(&cache->committer);
1625 }
1626 
1627 static int invalidate_lock(struct dm_cache_migration *mg)
1628 {
1629 	int r;
1630 	struct dm_cell_key_v2 key;
1631 	struct cache *cache = mg->cache;
1632 	struct dm_bio_prison_cell_v2 *prealloc;
1633 
1634 	prealloc = alloc_prison_cell(cache);
1635 	if (!prealloc) {
1636 		invalidate_complete(mg, false);
1637 		return -ENOMEM;
1638 	}
1639 
1640 	build_key(mg->invalidate_oblock, oblock_succ(mg->invalidate_oblock), &key);
1641 	r = dm_cell_lock_v2(cache->prison, &key,
1642 			    READ_WRITE_LOCK_LEVEL, prealloc, &mg->cell);
1643 	if (r < 0) {
1644 		free_prison_cell(cache, prealloc);
1645 		invalidate_complete(mg, false);
1646 		return r;
1647 	}
1648 
1649 	if (mg->cell != prealloc)
1650 		free_prison_cell(cache, prealloc);
1651 
1652 	if (r)
1653 		quiesce(mg, invalidate_remove);
1654 
1655 	else {
1656 		/*
1657 		 * We can't call invalidate_remove() directly here because we
1658 		 * might still be in request context.
1659 		 */
1660 		init_continuation(&mg->k, invalidate_remove);
1661 		queue_work(cache->wq, &mg->k.ws);
1662 	}
1663 
1664 	return 0;
1665 }
1666 
1667 static int invalidate_start(struct cache *cache, dm_cblock_t cblock,
1668 			    dm_oblock_t oblock, struct bio *bio)
1669 {
1670 	struct dm_cache_migration *mg;
1671 
1672 	if (!background_work_begin(cache))
1673 		return -EPERM;
1674 
1675 	mg = alloc_migration(cache);
1676 	if (!mg) {
1677 		background_work_end(cache);
1678 		return -ENOMEM;
1679 	}
1680 
1681 	mg->overwrite_bio = bio;
1682 	mg->invalidate_cblock = cblock;
1683 	mg->invalidate_oblock = oblock;
1684 
1685 	return invalidate_lock(mg);
1686 }
1687 
1688 /*----------------------------------------------------------------
1689  * bio processing
1690  *--------------------------------------------------------------*/
1691 
1692 enum busy {
1693 	IDLE,
1694 	BUSY
1695 };
1696 
1697 static enum busy spare_migration_bandwidth(struct cache *cache)
1698 {
1699 	bool idle = iot_idle_for(&cache->tracker, HZ);
1700 	sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) *
1701 		cache->sectors_per_block;
1702 
1703 	if (idle && current_volume <= cache->migration_threshold)
1704 		return IDLE;
1705 	else
1706 		return BUSY;
1707 }
1708 
1709 static void inc_hit_counter(struct cache *cache, struct bio *bio)
1710 {
1711 	atomic_inc(bio_data_dir(bio) == READ ?
1712 		   &cache->stats.read_hit : &cache->stats.write_hit);
1713 }
1714 
1715 static void inc_miss_counter(struct cache *cache, struct bio *bio)
1716 {
1717 	atomic_inc(bio_data_dir(bio) == READ ?
1718 		   &cache->stats.read_miss : &cache->stats.write_miss);
1719 }
1720 
1721 /*----------------------------------------------------------------*/
1722 
1723 static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block,
1724 		   bool *commit_needed)
1725 {
1726 	int r, data_dir;
1727 	bool rb, background_queued;
1728 	dm_cblock_t cblock;
1729 
1730 	*commit_needed = false;
1731 
1732 	rb = bio_detain_shared(cache, block, bio);
1733 	if (!rb) {
1734 		/*
1735 		 * An exclusive lock is held for this block, so we have to
1736 		 * wait.  We set the commit_needed flag so the current
1737 		 * transaction will be committed asap, allowing this lock
1738 		 * to be dropped.
1739 		 */
1740 		*commit_needed = true;
1741 		return DM_MAPIO_SUBMITTED;
1742 	}
1743 
1744 	data_dir = bio_data_dir(bio);
1745 
1746 	if (optimisable_bio(cache, bio, block)) {
1747 		struct policy_work *op = NULL;
1748 
1749 		r = policy_lookup_with_work(cache->policy, block, &cblock, data_dir, true, &op);
1750 		if (unlikely(r && r != -ENOENT)) {
1751 			DMERR_LIMIT("%s: policy_lookup_with_work() failed with r = %d",
1752 				    cache_device_name(cache), r);
1753 			bio_io_error(bio);
1754 			return DM_MAPIO_SUBMITTED;
1755 		}
1756 
1757 		if (r == -ENOENT && op) {
1758 			bio_drop_shared_lock(cache, bio);
1759 			BUG_ON(op->op != POLICY_PROMOTE);
1760 			mg_start(cache, op, bio);
1761 			return DM_MAPIO_SUBMITTED;
1762 		}
1763 	} else {
1764 		r = policy_lookup(cache->policy, block, &cblock, data_dir, false, &background_queued);
1765 		if (unlikely(r && r != -ENOENT)) {
1766 			DMERR_LIMIT("%s: policy_lookup() failed with r = %d",
1767 				    cache_device_name(cache), r);
1768 			bio_io_error(bio);
1769 			return DM_MAPIO_SUBMITTED;
1770 		}
1771 
1772 		if (background_queued)
1773 			wake_migration_worker(cache);
1774 	}
1775 
1776 	if (r == -ENOENT) {
1777 		struct per_bio_data *pb = get_per_bio_data(bio);
1778 
1779 		/*
1780 		 * Miss.
1781 		 */
1782 		inc_miss_counter(cache, bio);
1783 		if (pb->req_nr == 0) {
1784 			accounted_begin(cache, bio);
1785 			remap_to_origin_clear_discard(cache, bio, block);
1786 		} else {
1787 			/*
1788 			 * This is a duplicate writethrough io that is no
1789 			 * longer needed because the block has been demoted.
1790 			 */
1791 			bio_endio(bio);
1792 			return DM_MAPIO_SUBMITTED;
1793 		}
1794 	} else {
1795 		/*
1796 		 * Hit.
1797 		 */
1798 		inc_hit_counter(cache, bio);
1799 
1800 		/*
1801 		 * Passthrough always maps to the origin, invalidating any
1802 		 * cache blocks that are written to.
1803 		 */
1804 		if (passthrough_mode(cache)) {
1805 			if (bio_data_dir(bio) == WRITE) {
1806 				bio_drop_shared_lock(cache, bio);
1807 				atomic_inc(&cache->stats.demotion);
1808 				invalidate_start(cache, cblock, block, bio);
1809 			} else
1810 				remap_to_origin_clear_discard(cache, bio, block);
1811 		} else {
1812 			if (bio_data_dir(bio) == WRITE && writethrough_mode(cache) &&
1813 			    !is_dirty(cache, cblock)) {
1814 				remap_to_origin_and_cache(cache, bio, block, cblock);
1815 				accounted_begin(cache, bio);
1816 			} else
1817 				remap_to_cache_dirty(cache, bio, block, cblock);
1818 		}
1819 	}
1820 
1821 	/*
1822 	 * dm core turns FUA requests into a separate payload and FLUSH req.
1823 	 */
1824 	if (bio->bi_opf & REQ_FUA) {
1825 		/*
1826 		 * issue_after_commit will call accounted_begin a second time.  So
1827 		 * we call accounted_complete() to avoid double accounting.
1828 		 */
1829 		accounted_complete(cache, bio);
1830 		issue_after_commit(&cache->committer, bio);
1831 		*commit_needed = true;
1832 		return DM_MAPIO_SUBMITTED;
1833 	}
1834 
1835 	return DM_MAPIO_REMAPPED;
1836 }
1837 
1838 static bool process_bio(struct cache *cache, struct bio *bio)
1839 {
1840 	bool commit_needed;
1841 
1842 	if (map_bio(cache, bio, get_bio_block(cache, bio), &commit_needed) == DM_MAPIO_REMAPPED)
1843 		generic_make_request(bio);
1844 
1845 	return commit_needed;
1846 }
1847 
1848 /*
1849  * A non-zero return indicates read_only or fail_io mode.
1850  */
1851 static int commit(struct cache *cache, bool clean_shutdown)
1852 {
1853 	int r;
1854 
1855 	if (get_cache_mode(cache) >= CM_READ_ONLY)
1856 		return -EINVAL;
1857 
1858 	atomic_inc(&cache->stats.commit_count);
1859 	r = dm_cache_commit(cache->cmd, clean_shutdown);
1860 	if (r)
1861 		metadata_operation_failed(cache, "dm_cache_commit", r);
1862 
1863 	return r;
1864 }
1865 
1866 /*
1867  * Used by the batcher.
1868  */
1869 static blk_status_t commit_op(void *context)
1870 {
1871 	struct cache *cache = context;
1872 
1873 	if (dm_cache_changed_this_transaction(cache->cmd))
1874 		return errno_to_blk_status(commit(cache, false));
1875 
1876 	return 0;
1877 }
1878 
1879 /*----------------------------------------------------------------*/
1880 
1881 static bool process_flush_bio(struct cache *cache, struct bio *bio)
1882 {
1883 	struct per_bio_data *pb = get_per_bio_data(bio);
1884 
1885 	if (!pb->req_nr)
1886 		remap_to_origin(cache, bio);
1887 	else
1888 		remap_to_cache(cache, bio, 0);
1889 
1890 	issue_after_commit(&cache->committer, bio);
1891 	return true;
1892 }
1893 
1894 static bool process_discard_bio(struct cache *cache, struct bio *bio)
1895 {
1896 	dm_dblock_t b, e;
1897 
1898 	// FIXME: do we need to lock the region?  Or can we just assume the
1899 	// user wont be so foolish as to issue discard concurrently with
1900 	// other IO?
1901 	calc_discard_block_range(cache, bio, &b, &e);
1902 	while (b != e) {
1903 		set_discard(cache, b);
1904 		b = to_dblock(from_dblock(b) + 1);
1905 	}
1906 
1907 	bio_endio(bio);
1908 
1909 	return false;
1910 }
1911 
1912 static void process_deferred_bios(struct work_struct *ws)
1913 {
1914 	struct cache *cache = container_of(ws, struct cache, deferred_bio_worker);
1915 
1916 	unsigned long flags;
1917 	bool commit_needed = false;
1918 	struct bio_list bios;
1919 	struct bio *bio;
1920 
1921 	bio_list_init(&bios);
1922 
1923 	spin_lock_irqsave(&cache->lock, flags);
1924 	bio_list_merge(&bios, &cache->deferred_bios);
1925 	bio_list_init(&cache->deferred_bios);
1926 	spin_unlock_irqrestore(&cache->lock, flags);
1927 
1928 	while ((bio = bio_list_pop(&bios))) {
1929 		if (bio->bi_opf & REQ_PREFLUSH)
1930 			commit_needed = process_flush_bio(cache, bio) || commit_needed;
1931 
1932 		else if (bio_op(bio) == REQ_OP_DISCARD)
1933 			commit_needed = process_discard_bio(cache, bio) || commit_needed;
1934 
1935 		else
1936 			commit_needed = process_bio(cache, bio) || commit_needed;
1937 	}
1938 
1939 	if (commit_needed)
1940 		schedule_commit(&cache->committer);
1941 }
1942 
1943 /*----------------------------------------------------------------
1944  * Main worker loop
1945  *--------------------------------------------------------------*/
1946 
1947 static void requeue_deferred_bios(struct cache *cache)
1948 {
1949 	struct bio *bio;
1950 	struct bio_list bios;
1951 
1952 	bio_list_init(&bios);
1953 	bio_list_merge(&bios, &cache->deferred_bios);
1954 	bio_list_init(&cache->deferred_bios);
1955 
1956 	while ((bio = bio_list_pop(&bios))) {
1957 		bio->bi_status = BLK_STS_DM_REQUEUE;
1958 		bio_endio(bio);
1959 	}
1960 }
1961 
1962 /*
1963  * We want to commit periodically so that not too much
1964  * unwritten metadata builds up.
1965  */
1966 static void do_waker(struct work_struct *ws)
1967 {
1968 	struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
1969 
1970 	policy_tick(cache->policy, true);
1971 	wake_migration_worker(cache);
1972 	schedule_commit(&cache->committer);
1973 	queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
1974 }
1975 
1976 static void check_migrations(struct work_struct *ws)
1977 {
1978 	int r;
1979 	struct policy_work *op;
1980 	struct cache *cache = container_of(ws, struct cache, migration_worker);
1981 	enum busy b;
1982 
1983 	for (;;) {
1984 		b = spare_migration_bandwidth(cache);
1985 
1986 		r = policy_get_background_work(cache->policy, b == IDLE, &op);
1987 		if (r == -ENODATA)
1988 			break;
1989 
1990 		if (r) {
1991 			DMERR_LIMIT("%s: policy_background_work failed",
1992 				    cache_device_name(cache));
1993 			break;
1994 		}
1995 
1996 		r = mg_start(cache, op, NULL);
1997 		if (r)
1998 			break;
1999 	}
2000 }
2001 
2002 /*----------------------------------------------------------------
2003  * Target methods
2004  *--------------------------------------------------------------*/
2005 
2006 /*
2007  * This function gets called on the error paths of the constructor, so we
2008  * have to cope with a partially initialised struct.
2009  */
2010 static void destroy(struct cache *cache)
2011 {
2012 	unsigned i;
2013 
2014 	mempool_exit(&cache->migration_pool);
2015 
2016 	if (cache->prison)
2017 		dm_bio_prison_destroy_v2(cache->prison);
2018 
2019 	if (cache->wq)
2020 		destroy_workqueue(cache->wq);
2021 
2022 	if (cache->dirty_bitset)
2023 		free_bitset(cache->dirty_bitset);
2024 
2025 	if (cache->discard_bitset)
2026 		free_bitset(cache->discard_bitset);
2027 
2028 	if (cache->copier)
2029 		dm_kcopyd_client_destroy(cache->copier);
2030 
2031 	if (cache->cmd)
2032 		dm_cache_metadata_close(cache->cmd);
2033 
2034 	if (cache->metadata_dev)
2035 		dm_put_device(cache->ti, cache->metadata_dev);
2036 
2037 	if (cache->origin_dev)
2038 		dm_put_device(cache->ti, cache->origin_dev);
2039 
2040 	if (cache->cache_dev)
2041 		dm_put_device(cache->ti, cache->cache_dev);
2042 
2043 	if (cache->policy)
2044 		dm_cache_policy_destroy(cache->policy);
2045 
2046 	for (i = 0; i < cache->nr_ctr_args ; i++)
2047 		kfree(cache->ctr_args[i]);
2048 	kfree(cache->ctr_args);
2049 
2050 	bioset_exit(&cache->bs);
2051 
2052 	kfree(cache);
2053 }
2054 
2055 static void cache_dtr(struct dm_target *ti)
2056 {
2057 	struct cache *cache = ti->private;
2058 
2059 	destroy(cache);
2060 }
2061 
2062 static sector_t get_dev_size(struct dm_dev *dev)
2063 {
2064 	return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
2065 }
2066 
2067 /*----------------------------------------------------------------*/
2068 
2069 /*
2070  * Construct a cache device mapping.
2071  *
2072  * cache <metadata dev> <cache dev> <origin dev> <block size>
2073  *       <#feature args> [<feature arg>]*
2074  *       <policy> <#policy args> [<policy arg>]*
2075  *
2076  * metadata dev    : fast device holding the persistent metadata
2077  * cache dev	   : fast device holding cached data blocks
2078  * origin dev	   : slow device holding original data blocks
2079  * block size	   : cache unit size in sectors
2080  *
2081  * #feature args   : number of feature arguments passed
2082  * feature args    : writethrough.  (The default is writeback.)
2083  *
2084  * policy	   : the replacement policy to use
2085  * #policy args    : an even number of policy arguments corresponding
2086  *		     to key/value pairs passed to the policy
2087  * policy args	   : key/value pairs passed to the policy
2088  *		     E.g. 'sequential_threshold 1024'
2089  *		     See cache-policies.txt for details.
2090  *
2091  * Optional feature arguments are:
2092  *   writethrough  : write through caching that prohibits cache block
2093  *		     content from being different from origin block content.
2094  *		     Without this argument, the default behaviour is to write
2095  *		     back cache block contents later for performance reasons,
2096  *		     so they may differ from the corresponding origin blocks.
2097  */
2098 struct cache_args {
2099 	struct dm_target *ti;
2100 
2101 	struct dm_dev *metadata_dev;
2102 
2103 	struct dm_dev *cache_dev;
2104 	sector_t cache_sectors;
2105 
2106 	struct dm_dev *origin_dev;
2107 	sector_t origin_sectors;
2108 
2109 	uint32_t block_size;
2110 
2111 	const char *policy_name;
2112 	int policy_argc;
2113 	const char **policy_argv;
2114 
2115 	struct cache_features features;
2116 };
2117 
2118 static void destroy_cache_args(struct cache_args *ca)
2119 {
2120 	if (ca->metadata_dev)
2121 		dm_put_device(ca->ti, ca->metadata_dev);
2122 
2123 	if (ca->cache_dev)
2124 		dm_put_device(ca->ti, ca->cache_dev);
2125 
2126 	if (ca->origin_dev)
2127 		dm_put_device(ca->ti, ca->origin_dev);
2128 
2129 	kfree(ca);
2130 }
2131 
2132 static bool at_least_one_arg(struct dm_arg_set *as, char **error)
2133 {
2134 	if (!as->argc) {
2135 		*error = "Insufficient args";
2136 		return false;
2137 	}
2138 
2139 	return true;
2140 }
2141 
2142 static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as,
2143 			      char **error)
2144 {
2145 	int r;
2146 	sector_t metadata_dev_size;
2147 	char b[BDEVNAME_SIZE];
2148 
2149 	if (!at_least_one_arg(as, error))
2150 		return -EINVAL;
2151 
2152 	r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
2153 			  &ca->metadata_dev);
2154 	if (r) {
2155 		*error = "Error opening metadata device";
2156 		return r;
2157 	}
2158 
2159 	metadata_dev_size = get_dev_size(ca->metadata_dev);
2160 	if (metadata_dev_size > DM_CACHE_METADATA_MAX_SECTORS_WARNING)
2161 		DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
2162 		       bdevname(ca->metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
2163 
2164 	return 0;
2165 }
2166 
2167 static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as,
2168 			   char **error)
2169 {
2170 	int r;
2171 
2172 	if (!at_least_one_arg(as, error))
2173 		return -EINVAL;
2174 
2175 	r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
2176 			  &ca->cache_dev);
2177 	if (r) {
2178 		*error = "Error opening cache device";
2179 		return r;
2180 	}
2181 	ca->cache_sectors = get_dev_size(ca->cache_dev);
2182 
2183 	return 0;
2184 }
2185 
2186 static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as,
2187 			    char **error)
2188 {
2189 	int r;
2190 
2191 	if (!at_least_one_arg(as, error))
2192 		return -EINVAL;
2193 
2194 	r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
2195 			  &ca->origin_dev);
2196 	if (r) {
2197 		*error = "Error opening origin device";
2198 		return r;
2199 	}
2200 
2201 	ca->origin_sectors = get_dev_size(ca->origin_dev);
2202 	if (ca->ti->len > ca->origin_sectors) {
2203 		*error = "Device size larger than cached device";
2204 		return -EINVAL;
2205 	}
2206 
2207 	return 0;
2208 }
2209 
2210 static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as,
2211 			    char **error)
2212 {
2213 	unsigned long block_size;
2214 
2215 	if (!at_least_one_arg(as, error))
2216 		return -EINVAL;
2217 
2218 	if (kstrtoul(dm_shift_arg(as), 10, &block_size) || !block_size ||
2219 	    block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
2220 	    block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
2221 	    block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
2222 		*error = "Invalid data block size";
2223 		return -EINVAL;
2224 	}
2225 
2226 	if (block_size > ca->cache_sectors) {
2227 		*error = "Data block size is larger than the cache device";
2228 		return -EINVAL;
2229 	}
2230 
2231 	ca->block_size = block_size;
2232 
2233 	return 0;
2234 }
2235 
2236 static void init_features(struct cache_features *cf)
2237 {
2238 	cf->mode = CM_WRITE;
2239 	cf->io_mode = CM_IO_WRITEBACK;
2240 	cf->metadata_version = 1;
2241 }
2242 
2243 static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
2244 			  char **error)
2245 {
2246 	static const struct dm_arg _args[] = {
2247 		{0, 2, "Invalid number of cache feature arguments"},
2248 	};
2249 
2250 	int r;
2251 	unsigned argc;
2252 	const char *arg;
2253 	struct cache_features *cf = &ca->features;
2254 
2255 	init_features(cf);
2256 
2257 	r = dm_read_arg_group(_args, as, &argc, error);
2258 	if (r)
2259 		return -EINVAL;
2260 
2261 	while (argc--) {
2262 		arg = dm_shift_arg(as);
2263 
2264 		if (!strcasecmp(arg, "writeback"))
2265 			cf->io_mode = CM_IO_WRITEBACK;
2266 
2267 		else if (!strcasecmp(arg, "writethrough"))
2268 			cf->io_mode = CM_IO_WRITETHROUGH;
2269 
2270 		else if (!strcasecmp(arg, "passthrough"))
2271 			cf->io_mode = CM_IO_PASSTHROUGH;
2272 
2273 		else if (!strcasecmp(arg, "metadata2"))
2274 			cf->metadata_version = 2;
2275 
2276 		else {
2277 			*error = "Unrecognised cache feature requested";
2278 			return -EINVAL;
2279 		}
2280 	}
2281 
2282 	return 0;
2283 }
2284 
2285 static int parse_policy(struct cache_args *ca, struct dm_arg_set *as,
2286 			char **error)
2287 {
2288 	static const struct dm_arg _args[] = {
2289 		{0, 1024, "Invalid number of policy arguments"},
2290 	};
2291 
2292 	int r;
2293 
2294 	if (!at_least_one_arg(as, error))
2295 		return -EINVAL;
2296 
2297 	ca->policy_name = dm_shift_arg(as);
2298 
2299 	r = dm_read_arg_group(_args, as, &ca->policy_argc, error);
2300 	if (r)
2301 		return -EINVAL;
2302 
2303 	ca->policy_argv = (const char **)as->argv;
2304 	dm_consume_args(as, ca->policy_argc);
2305 
2306 	return 0;
2307 }
2308 
2309 static int parse_cache_args(struct cache_args *ca, int argc, char **argv,
2310 			    char **error)
2311 {
2312 	int r;
2313 	struct dm_arg_set as;
2314 
2315 	as.argc = argc;
2316 	as.argv = argv;
2317 
2318 	r = parse_metadata_dev(ca, &as, error);
2319 	if (r)
2320 		return r;
2321 
2322 	r = parse_cache_dev(ca, &as, error);
2323 	if (r)
2324 		return r;
2325 
2326 	r = parse_origin_dev(ca, &as, error);
2327 	if (r)
2328 		return r;
2329 
2330 	r = parse_block_size(ca, &as, error);
2331 	if (r)
2332 		return r;
2333 
2334 	r = parse_features(ca, &as, error);
2335 	if (r)
2336 		return r;
2337 
2338 	r = parse_policy(ca, &as, error);
2339 	if (r)
2340 		return r;
2341 
2342 	return 0;
2343 }
2344 
2345 /*----------------------------------------------------------------*/
2346 
2347 static struct kmem_cache *migration_cache;
2348 
2349 #define NOT_CORE_OPTION 1
2350 
2351 static int process_config_option(struct cache *cache, const char *key, const char *value)
2352 {
2353 	unsigned long tmp;
2354 
2355 	if (!strcasecmp(key, "migration_threshold")) {
2356 		if (kstrtoul(value, 10, &tmp))
2357 			return -EINVAL;
2358 
2359 		cache->migration_threshold = tmp;
2360 		return 0;
2361 	}
2362 
2363 	return NOT_CORE_OPTION;
2364 }
2365 
2366 static int set_config_value(struct cache *cache, const char *key, const char *value)
2367 {
2368 	int r = process_config_option(cache, key, value);
2369 
2370 	if (r == NOT_CORE_OPTION)
2371 		r = policy_set_config_value(cache->policy, key, value);
2372 
2373 	if (r)
2374 		DMWARN("bad config value for %s: %s", key, value);
2375 
2376 	return r;
2377 }
2378 
2379 static int set_config_values(struct cache *cache, int argc, const char **argv)
2380 {
2381 	int r = 0;
2382 
2383 	if (argc & 1) {
2384 		DMWARN("Odd number of policy arguments given but they should be <key> <value> pairs.");
2385 		return -EINVAL;
2386 	}
2387 
2388 	while (argc) {
2389 		r = set_config_value(cache, argv[0], argv[1]);
2390 		if (r)
2391 			break;
2392 
2393 		argc -= 2;
2394 		argv += 2;
2395 	}
2396 
2397 	return r;
2398 }
2399 
2400 static int create_cache_policy(struct cache *cache, struct cache_args *ca,
2401 			       char **error)
2402 {
2403 	struct dm_cache_policy *p = dm_cache_policy_create(ca->policy_name,
2404 							   cache->cache_size,
2405 							   cache->origin_sectors,
2406 							   cache->sectors_per_block);
2407 	if (IS_ERR(p)) {
2408 		*error = "Error creating cache's policy";
2409 		return PTR_ERR(p);
2410 	}
2411 	cache->policy = p;
2412 	BUG_ON(!cache->policy);
2413 
2414 	return 0;
2415 }
2416 
2417 /*
2418  * We want the discard block size to be at least the size of the cache
2419  * block size and have no more than 2^14 discard blocks across the origin.
2420  */
2421 #define MAX_DISCARD_BLOCKS (1 << 14)
2422 
2423 static bool too_many_discard_blocks(sector_t discard_block_size,
2424 				    sector_t origin_size)
2425 {
2426 	(void) sector_div(origin_size, discard_block_size);
2427 
2428 	return origin_size > MAX_DISCARD_BLOCKS;
2429 }
2430 
2431 static sector_t calculate_discard_block_size(sector_t cache_block_size,
2432 					     sector_t origin_size)
2433 {
2434 	sector_t discard_block_size = cache_block_size;
2435 
2436 	if (origin_size)
2437 		while (too_many_discard_blocks(discard_block_size, origin_size))
2438 			discard_block_size *= 2;
2439 
2440 	return discard_block_size;
2441 }
2442 
2443 static void set_cache_size(struct cache *cache, dm_cblock_t size)
2444 {
2445 	dm_block_t nr_blocks = from_cblock(size);
2446 
2447 	if (nr_blocks > (1 << 20) && cache->cache_size != size)
2448 		DMWARN_LIMIT("You have created a cache device with a lot of individual cache blocks (%llu)\n"
2449 			     "All these mappings can consume a lot of kernel memory, and take some time to read/write.\n"
2450 			     "Please consider increasing the cache block size to reduce the overall cache block count.",
2451 			     (unsigned long long) nr_blocks);
2452 
2453 	cache->cache_size = size;
2454 }
2455 
2456 static int is_congested(struct dm_dev *dev, int bdi_bits)
2457 {
2458 	struct request_queue *q = bdev_get_queue(dev->bdev);
2459 	return bdi_congested(q->backing_dev_info, bdi_bits);
2460 }
2461 
2462 static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
2463 {
2464 	struct cache *cache = container_of(cb, struct cache, callbacks);
2465 
2466 	return is_congested(cache->origin_dev, bdi_bits) ||
2467 		is_congested(cache->cache_dev, bdi_bits);
2468 }
2469 
2470 #define DEFAULT_MIGRATION_THRESHOLD 2048
2471 
2472 static int cache_create(struct cache_args *ca, struct cache **result)
2473 {
2474 	int r = 0;
2475 	char **error = &ca->ti->error;
2476 	struct cache *cache;
2477 	struct dm_target *ti = ca->ti;
2478 	dm_block_t origin_blocks;
2479 	struct dm_cache_metadata *cmd;
2480 	bool may_format = ca->features.mode == CM_WRITE;
2481 
2482 	cache = kzalloc(sizeof(*cache), GFP_KERNEL);
2483 	if (!cache)
2484 		return -ENOMEM;
2485 
2486 	cache->ti = ca->ti;
2487 	ti->private = cache;
2488 	ti->num_flush_bios = 2;
2489 	ti->flush_supported = true;
2490 
2491 	ti->num_discard_bios = 1;
2492 	ti->discards_supported = true;
2493 	ti->split_discard_bios = false;
2494 
2495 	ti->per_io_data_size = sizeof(struct per_bio_data);
2496 
2497 	cache->features = ca->features;
2498 	if (writethrough_mode(cache)) {
2499 		/* Create bioset for writethrough bios issued to origin */
2500 		r = bioset_init(&cache->bs, BIO_POOL_SIZE, 0, 0);
2501 		if (r)
2502 			goto bad;
2503 	}
2504 
2505 	cache->callbacks.congested_fn = cache_is_congested;
2506 	dm_table_add_target_callbacks(ti->table, &cache->callbacks);
2507 
2508 	cache->metadata_dev = ca->metadata_dev;
2509 	cache->origin_dev = ca->origin_dev;
2510 	cache->cache_dev = ca->cache_dev;
2511 
2512 	ca->metadata_dev = ca->origin_dev = ca->cache_dev = NULL;
2513 
2514 	origin_blocks = cache->origin_sectors = ca->origin_sectors;
2515 	origin_blocks = block_div(origin_blocks, ca->block_size);
2516 	cache->origin_blocks = to_oblock(origin_blocks);
2517 
2518 	cache->sectors_per_block = ca->block_size;
2519 	if (dm_set_target_max_io_len(ti, cache->sectors_per_block)) {
2520 		r = -EINVAL;
2521 		goto bad;
2522 	}
2523 
2524 	if (ca->block_size & (ca->block_size - 1)) {
2525 		dm_block_t cache_size = ca->cache_sectors;
2526 
2527 		cache->sectors_per_block_shift = -1;
2528 		cache_size = block_div(cache_size, ca->block_size);
2529 		set_cache_size(cache, to_cblock(cache_size));
2530 	} else {
2531 		cache->sectors_per_block_shift = __ffs(ca->block_size);
2532 		set_cache_size(cache, to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift));
2533 	}
2534 
2535 	r = create_cache_policy(cache, ca, error);
2536 	if (r)
2537 		goto bad;
2538 
2539 	cache->policy_nr_args = ca->policy_argc;
2540 	cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
2541 
2542 	r = set_config_values(cache, ca->policy_argc, ca->policy_argv);
2543 	if (r) {
2544 		*error = "Error setting cache policy's config values";
2545 		goto bad;
2546 	}
2547 
2548 	cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
2549 				     ca->block_size, may_format,
2550 				     dm_cache_policy_get_hint_size(cache->policy),
2551 				     ca->features.metadata_version);
2552 	if (IS_ERR(cmd)) {
2553 		*error = "Error creating metadata object";
2554 		r = PTR_ERR(cmd);
2555 		goto bad;
2556 	}
2557 	cache->cmd = cmd;
2558 	set_cache_mode(cache, CM_WRITE);
2559 	if (get_cache_mode(cache) != CM_WRITE) {
2560 		*error = "Unable to get write access to metadata, please check/repair metadata.";
2561 		r = -EINVAL;
2562 		goto bad;
2563 	}
2564 
2565 	if (passthrough_mode(cache)) {
2566 		bool all_clean;
2567 
2568 		r = dm_cache_metadata_all_clean(cache->cmd, &all_clean);
2569 		if (r) {
2570 			*error = "dm_cache_metadata_all_clean() failed";
2571 			goto bad;
2572 		}
2573 
2574 		if (!all_clean) {
2575 			*error = "Cannot enter passthrough mode unless all blocks are clean";
2576 			r = -EINVAL;
2577 			goto bad;
2578 		}
2579 
2580 		policy_allow_migrations(cache->policy, false);
2581 	}
2582 
2583 	spin_lock_init(&cache->lock);
2584 	bio_list_init(&cache->deferred_bios);
2585 	atomic_set(&cache->nr_allocated_migrations, 0);
2586 	atomic_set(&cache->nr_io_migrations, 0);
2587 	init_waitqueue_head(&cache->migration_wait);
2588 
2589 	r = -ENOMEM;
2590 	atomic_set(&cache->nr_dirty, 0);
2591 	cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
2592 	if (!cache->dirty_bitset) {
2593 		*error = "could not allocate dirty bitset";
2594 		goto bad;
2595 	}
2596 	clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size));
2597 
2598 	cache->discard_block_size =
2599 		calculate_discard_block_size(cache->sectors_per_block,
2600 					     cache->origin_sectors);
2601 	cache->discard_nr_blocks = to_dblock(dm_sector_div_up(cache->origin_sectors,
2602 							      cache->discard_block_size));
2603 	cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks));
2604 	if (!cache->discard_bitset) {
2605 		*error = "could not allocate discard bitset";
2606 		goto bad;
2607 	}
2608 	clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
2609 
2610 	cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
2611 	if (IS_ERR(cache->copier)) {
2612 		*error = "could not create kcopyd client";
2613 		r = PTR_ERR(cache->copier);
2614 		goto bad;
2615 	}
2616 
2617 	cache->wq = alloc_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM, 0);
2618 	if (!cache->wq) {
2619 		*error = "could not create workqueue for metadata object";
2620 		goto bad;
2621 	}
2622 	INIT_WORK(&cache->deferred_bio_worker, process_deferred_bios);
2623 	INIT_WORK(&cache->migration_worker, check_migrations);
2624 	INIT_DELAYED_WORK(&cache->waker, do_waker);
2625 
2626 	cache->prison = dm_bio_prison_create_v2(cache->wq);
2627 	if (!cache->prison) {
2628 		*error = "could not create bio prison";
2629 		goto bad;
2630 	}
2631 
2632 	r = mempool_init_slab_pool(&cache->migration_pool, MIGRATION_POOL_SIZE,
2633 				   migration_cache);
2634 	if (r) {
2635 		*error = "Error creating cache's migration mempool";
2636 		goto bad;
2637 	}
2638 
2639 	cache->need_tick_bio = true;
2640 	cache->sized = false;
2641 	cache->invalidate = false;
2642 	cache->commit_requested = false;
2643 	cache->loaded_mappings = false;
2644 	cache->loaded_discards = false;
2645 
2646 	load_stats(cache);
2647 
2648 	atomic_set(&cache->stats.demotion, 0);
2649 	atomic_set(&cache->stats.promotion, 0);
2650 	atomic_set(&cache->stats.copies_avoided, 0);
2651 	atomic_set(&cache->stats.cache_cell_clash, 0);
2652 	atomic_set(&cache->stats.commit_count, 0);
2653 	atomic_set(&cache->stats.discard_count, 0);
2654 
2655 	spin_lock_init(&cache->invalidation_lock);
2656 	INIT_LIST_HEAD(&cache->invalidation_requests);
2657 
2658 	batcher_init(&cache->committer, commit_op, cache,
2659 		     issue_op, cache, cache->wq);
2660 	iot_init(&cache->tracker);
2661 
2662 	init_rwsem(&cache->background_work_lock);
2663 	prevent_background_work(cache);
2664 
2665 	*result = cache;
2666 	return 0;
2667 bad:
2668 	destroy(cache);
2669 	return r;
2670 }
2671 
2672 static int copy_ctr_args(struct cache *cache, int argc, const char **argv)
2673 {
2674 	unsigned i;
2675 	const char **copy;
2676 
2677 	copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL);
2678 	if (!copy)
2679 		return -ENOMEM;
2680 	for (i = 0; i < argc; i++) {
2681 		copy[i] = kstrdup(argv[i], GFP_KERNEL);
2682 		if (!copy[i]) {
2683 			while (i--)
2684 				kfree(copy[i]);
2685 			kfree(copy);
2686 			return -ENOMEM;
2687 		}
2688 	}
2689 
2690 	cache->nr_ctr_args = argc;
2691 	cache->ctr_args = copy;
2692 
2693 	return 0;
2694 }
2695 
2696 static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv)
2697 {
2698 	int r = -EINVAL;
2699 	struct cache_args *ca;
2700 	struct cache *cache = NULL;
2701 
2702 	ca = kzalloc(sizeof(*ca), GFP_KERNEL);
2703 	if (!ca) {
2704 		ti->error = "Error allocating memory for cache";
2705 		return -ENOMEM;
2706 	}
2707 	ca->ti = ti;
2708 
2709 	r = parse_cache_args(ca, argc, argv, &ti->error);
2710 	if (r)
2711 		goto out;
2712 
2713 	r = cache_create(ca, &cache);
2714 	if (r)
2715 		goto out;
2716 
2717 	r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3);
2718 	if (r) {
2719 		destroy(cache);
2720 		goto out;
2721 	}
2722 
2723 	ti->private = cache;
2724 out:
2725 	destroy_cache_args(ca);
2726 	return r;
2727 }
2728 
2729 /*----------------------------------------------------------------*/
2730 
2731 static int cache_map(struct dm_target *ti, struct bio *bio)
2732 {
2733 	struct cache *cache = ti->private;
2734 
2735 	int r;
2736 	bool commit_needed;
2737 	dm_oblock_t block = get_bio_block(cache, bio);
2738 
2739 	init_per_bio_data(bio);
2740 	if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
2741 		/*
2742 		 * This can only occur if the io goes to a partial block at
2743 		 * the end of the origin device.  We don't cache these.
2744 		 * Just remap to the origin and carry on.
2745 		 */
2746 		remap_to_origin(cache, bio);
2747 		accounted_begin(cache, bio);
2748 		return DM_MAPIO_REMAPPED;
2749 	}
2750 
2751 	if (discard_or_flush(bio)) {
2752 		defer_bio(cache, bio);
2753 		return DM_MAPIO_SUBMITTED;
2754 	}
2755 
2756 	r = map_bio(cache, bio, block, &commit_needed);
2757 	if (commit_needed)
2758 		schedule_commit(&cache->committer);
2759 
2760 	return r;
2761 }
2762 
2763 static int cache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *error)
2764 {
2765 	struct cache *cache = ti->private;
2766 	unsigned long flags;
2767 	struct per_bio_data *pb = get_per_bio_data(bio);
2768 
2769 	if (pb->tick) {
2770 		policy_tick(cache->policy, false);
2771 
2772 		spin_lock_irqsave(&cache->lock, flags);
2773 		cache->need_tick_bio = true;
2774 		spin_unlock_irqrestore(&cache->lock, flags);
2775 	}
2776 
2777 	bio_drop_shared_lock(cache, bio);
2778 	accounted_complete(cache, bio);
2779 
2780 	return DM_ENDIO_DONE;
2781 }
2782 
2783 static int write_dirty_bitset(struct cache *cache)
2784 {
2785 	int r;
2786 
2787 	if (get_cache_mode(cache) >= CM_READ_ONLY)
2788 		return -EINVAL;
2789 
2790 	r = dm_cache_set_dirty_bits(cache->cmd, from_cblock(cache->cache_size), cache->dirty_bitset);
2791 	if (r)
2792 		metadata_operation_failed(cache, "dm_cache_set_dirty_bits", r);
2793 
2794 	return r;
2795 }
2796 
2797 static int write_discard_bitset(struct cache *cache)
2798 {
2799 	unsigned i, r;
2800 
2801 	if (get_cache_mode(cache) >= CM_READ_ONLY)
2802 		return -EINVAL;
2803 
2804 	r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size,
2805 					   cache->discard_nr_blocks);
2806 	if (r) {
2807 		DMERR("%s: could not resize on-disk discard bitset", cache_device_name(cache));
2808 		metadata_operation_failed(cache, "dm_cache_discard_bitset_resize", r);
2809 		return r;
2810 	}
2811 
2812 	for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) {
2813 		r = dm_cache_set_discard(cache->cmd, to_dblock(i),
2814 					 is_discarded(cache, to_dblock(i)));
2815 		if (r) {
2816 			metadata_operation_failed(cache, "dm_cache_set_discard", r);
2817 			return r;
2818 		}
2819 	}
2820 
2821 	return 0;
2822 }
2823 
2824 static int write_hints(struct cache *cache)
2825 {
2826 	int r;
2827 
2828 	if (get_cache_mode(cache) >= CM_READ_ONLY)
2829 		return -EINVAL;
2830 
2831 	r = dm_cache_write_hints(cache->cmd, cache->policy);
2832 	if (r) {
2833 		metadata_operation_failed(cache, "dm_cache_write_hints", r);
2834 		return r;
2835 	}
2836 
2837 	return 0;
2838 }
2839 
2840 /*
2841  * returns true on success
2842  */
2843 static bool sync_metadata(struct cache *cache)
2844 {
2845 	int r1, r2, r3, r4;
2846 
2847 	r1 = write_dirty_bitset(cache);
2848 	if (r1)
2849 		DMERR("%s: could not write dirty bitset", cache_device_name(cache));
2850 
2851 	r2 = write_discard_bitset(cache);
2852 	if (r2)
2853 		DMERR("%s: could not write discard bitset", cache_device_name(cache));
2854 
2855 	save_stats(cache);
2856 
2857 	r3 = write_hints(cache);
2858 	if (r3)
2859 		DMERR("%s: could not write hints", cache_device_name(cache));
2860 
2861 	/*
2862 	 * If writing the above metadata failed, we still commit, but don't
2863 	 * set the clean shutdown flag.  This will effectively force every
2864 	 * dirty bit to be set on reload.
2865 	 */
2866 	r4 = commit(cache, !r1 && !r2 && !r3);
2867 	if (r4)
2868 		DMERR("%s: could not write cache metadata", cache_device_name(cache));
2869 
2870 	return !r1 && !r2 && !r3 && !r4;
2871 }
2872 
2873 static void cache_postsuspend(struct dm_target *ti)
2874 {
2875 	struct cache *cache = ti->private;
2876 
2877 	prevent_background_work(cache);
2878 	BUG_ON(atomic_read(&cache->nr_io_migrations));
2879 
2880 	cancel_delayed_work(&cache->waker);
2881 	flush_workqueue(cache->wq);
2882 	WARN_ON(cache->tracker.in_flight);
2883 
2884 	/*
2885 	 * If it's a flush suspend there won't be any deferred bios, so this
2886 	 * call is harmless.
2887 	 */
2888 	requeue_deferred_bios(cache);
2889 
2890 	if (get_cache_mode(cache) == CM_WRITE)
2891 		(void) sync_metadata(cache);
2892 }
2893 
2894 static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
2895 			bool dirty, uint32_t hint, bool hint_valid)
2896 {
2897 	int r;
2898 	struct cache *cache = context;
2899 
2900 	if (dirty) {
2901 		set_bit(from_cblock(cblock), cache->dirty_bitset);
2902 		atomic_inc(&cache->nr_dirty);
2903 	} else
2904 		clear_bit(from_cblock(cblock), cache->dirty_bitset);
2905 
2906 	r = policy_load_mapping(cache->policy, oblock, cblock, dirty, hint, hint_valid);
2907 	if (r)
2908 		return r;
2909 
2910 	return 0;
2911 }
2912 
2913 /*
2914  * The discard block size in the on disk metadata is not
2915  * neccessarily the same as we're currently using.  So we have to
2916  * be careful to only set the discarded attribute if we know it
2917  * covers a complete block of the new size.
2918  */
2919 struct discard_load_info {
2920 	struct cache *cache;
2921 
2922 	/*
2923 	 * These blocks are sized using the on disk dblock size, rather
2924 	 * than the current one.
2925 	 */
2926 	dm_block_t block_size;
2927 	dm_block_t discard_begin, discard_end;
2928 };
2929 
2930 static void discard_load_info_init(struct cache *cache,
2931 				   struct discard_load_info *li)
2932 {
2933 	li->cache = cache;
2934 	li->discard_begin = li->discard_end = 0;
2935 }
2936 
2937 static void set_discard_range(struct discard_load_info *li)
2938 {
2939 	sector_t b, e;
2940 
2941 	if (li->discard_begin == li->discard_end)
2942 		return;
2943 
2944 	/*
2945 	 * Convert to sectors.
2946 	 */
2947 	b = li->discard_begin * li->block_size;
2948 	e = li->discard_end * li->block_size;
2949 
2950 	/*
2951 	 * Then convert back to the current dblock size.
2952 	 */
2953 	b = dm_sector_div_up(b, li->cache->discard_block_size);
2954 	sector_div(e, li->cache->discard_block_size);
2955 
2956 	/*
2957 	 * The origin may have shrunk, so we need to check we're still in
2958 	 * bounds.
2959 	 */
2960 	if (e > from_dblock(li->cache->discard_nr_blocks))
2961 		e = from_dblock(li->cache->discard_nr_blocks);
2962 
2963 	for (; b < e; b++)
2964 		set_discard(li->cache, to_dblock(b));
2965 }
2966 
2967 static int load_discard(void *context, sector_t discard_block_size,
2968 			dm_dblock_t dblock, bool discard)
2969 {
2970 	struct discard_load_info *li = context;
2971 
2972 	li->block_size = discard_block_size;
2973 
2974 	if (discard) {
2975 		if (from_dblock(dblock) == li->discard_end)
2976 			/*
2977 			 * We're already in a discard range, just extend it.
2978 			 */
2979 			li->discard_end = li->discard_end + 1ULL;
2980 
2981 		else {
2982 			/*
2983 			 * Emit the old range and start a new one.
2984 			 */
2985 			set_discard_range(li);
2986 			li->discard_begin = from_dblock(dblock);
2987 			li->discard_end = li->discard_begin + 1ULL;
2988 		}
2989 	} else {
2990 		set_discard_range(li);
2991 		li->discard_begin = li->discard_end = 0;
2992 	}
2993 
2994 	return 0;
2995 }
2996 
2997 static dm_cblock_t get_cache_dev_size(struct cache *cache)
2998 {
2999 	sector_t size = get_dev_size(cache->cache_dev);
3000 	(void) sector_div(size, cache->sectors_per_block);
3001 	return to_cblock(size);
3002 }
3003 
3004 static bool can_resize(struct cache *cache, dm_cblock_t new_size)
3005 {
3006 	if (from_cblock(new_size) > from_cblock(cache->cache_size))
3007 		return true;
3008 
3009 	/*
3010 	 * We can't drop a dirty block when shrinking the cache.
3011 	 */
3012 	while (from_cblock(new_size) < from_cblock(cache->cache_size)) {
3013 		new_size = to_cblock(from_cblock(new_size) + 1);
3014 		if (is_dirty(cache, new_size)) {
3015 			DMERR("%s: unable to shrink cache; cache block %llu is dirty",
3016 			      cache_device_name(cache),
3017 			      (unsigned long long) from_cblock(new_size));
3018 			return false;
3019 		}
3020 	}
3021 
3022 	return true;
3023 }
3024 
3025 static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size)
3026 {
3027 	int r;
3028 
3029 	r = dm_cache_resize(cache->cmd, new_size);
3030 	if (r) {
3031 		DMERR("%s: could not resize cache metadata", cache_device_name(cache));
3032 		metadata_operation_failed(cache, "dm_cache_resize", r);
3033 		return r;
3034 	}
3035 
3036 	set_cache_size(cache, new_size);
3037 
3038 	return 0;
3039 }
3040 
3041 static int cache_preresume(struct dm_target *ti)
3042 {
3043 	int r = 0;
3044 	struct cache *cache = ti->private;
3045 	dm_cblock_t csize = get_cache_dev_size(cache);
3046 
3047 	/*
3048 	 * Check to see if the cache has resized.
3049 	 */
3050 	if (!cache->sized) {
3051 		r = resize_cache_dev(cache, csize);
3052 		if (r)
3053 			return r;
3054 
3055 		cache->sized = true;
3056 
3057 	} else if (csize != cache->cache_size) {
3058 		if (!can_resize(cache, csize))
3059 			return -EINVAL;
3060 
3061 		r = resize_cache_dev(cache, csize);
3062 		if (r)
3063 			return r;
3064 	}
3065 
3066 	if (!cache->loaded_mappings) {
3067 		r = dm_cache_load_mappings(cache->cmd, cache->policy,
3068 					   load_mapping, cache);
3069 		if (r) {
3070 			DMERR("%s: could not load cache mappings", cache_device_name(cache));
3071 			metadata_operation_failed(cache, "dm_cache_load_mappings", r);
3072 			return r;
3073 		}
3074 
3075 		cache->loaded_mappings = true;
3076 	}
3077 
3078 	if (!cache->loaded_discards) {
3079 		struct discard_load_info li;
3080 
3081 		/*
3082 		 * The discard bitset could have been resized, or the
3083 		 * discard block size changed.  To be safe we start by
3084 		 * setting every dblock to not discarded.
3085 		 */
3086 		clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
3087 
3088 		discard_load_info_init(cache, &li);
3089 		r = dm_cache_load_discards(cache->cmd, load_discard, &li);
3090 		if (r) {
3091 			DMERR("%s: could not load origin discards", cache_device_name(cache));
3092 			metadata_operation_failed(cache, "dm_cache_load_discards", r);
3093 			return r;
3094 		}
3095 		set_discard_range(&li);
3096 
3097 		cache->loaded_discards = true;
3098 	}
3099 
3100 	return r;
3101 }
3102 
3103 static void cache_resume(struct dm_target *ti)
3104 {
3105 	struct cache *cache = ti->private;
3106 
3107 	cache->need_tick_bio = true;
3108 	allow_background_work(cache);
3109 	do_waker(&cache->waker.work);
3110 }
3111 
3112 /*
3113  * Status format:
3114  *
3115  * <metadata block size> <#used metadata blocks>/<#total metadata blocks>
3116  * <cache block size> <#used cache blocks>/<#total cache blocks>
3117  * <#read hits> <#read misses> <#write hits> <#write misses>
3118  * <#demotions> <#promotions> <#dirty>
3119  * <#features> <features>*
3120  * <#core args> <core args>
3121  * <policy name> <#policy args> <policy args>* <cache metadata mode> <needs_check>
3122  */
3123 static void cache_status(struct dm_target *ti, status_type_t type,
3124 			 unsigned status_flags, char *result, unsigned maxlen)
3125 {
3126 	int r = 0;
3127 	unsigned i;
3128 	ssize_t sz = 0;
3129 	dm_block_t nr_free_blocks_metadata = 0;
3130 	dm_block_t nr_blocks_metadata = 0;
3131 	char buf[BDEVNAME_SIZE];
3132 	struct cache *cache = ti->private;
3133 	dm_cblock_t residency;
3134 	bool needs_check;
3135 
3136 	switch (type) {
3137 	case STATUSTYPE_INFO:
3138 		if (get_cache_mode(cache) == CM_FAIL) {
3139 			DMEMIT("Fail");
3140 			break;
3141 		}
3142 
3143 		/* Commit to ensure statistics aren't out-of-date */
3144 		if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
3145 			(void) commit(cache, false);
3146 
3147 		r = dm_cache_get_free_metadata_block_count(cache->cmd, &nr_free_blocks_metadata);
3148 		if (r) {
3149 			DMERR("%s: dm_cache_get_free_metadata_block_count returned %d",
3150 			      cache_device_name(cache), r);
3151 			goto err;
3152 		}
3153 
3154 		r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata);
3155 		if (r) {
3156 			DMERR("%s: dm_cache_get_metadata_dev_size returned %d",
3157 			      cache_device_name(cache), r);
3158 			goto err;
3159 		}
3160 
3161 		residency = policy_residency(cache->policy);
3162 
3163 		DMEMIT("%u %llu/%llu %llu %llu/%llu %u %u %u %u %u %u %lu ",
3164 		       (unsigned)DM_CACHE_METADATA_BLOCK_SIZE,
3165 		       (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
3166 		       (unsigned long long)nr_blocks_metadata,
3167 		       (unsigned long long)cache->sectors_per_block,
3168 		       (unsigned long long) from_cblock(residency),
3169 		       (unsigned long long) from_cblock(cache->cache_size),
3170 		       (unsigned) atomic_read(&cache->stats.read_hit),
3171 		       (unsigned) atomic_read(&cache->stats.read_miss),
3172 		       (unsigned) atomic_read(&cache->stats.write_hit),
3173 		       (unsigned) atomic_read(&cache->stats.write_miss),
3174 		       (unsigned) atomic_read(&cache->stats.demotion),
3175 		       (unsigned) atomic_read(&cache->stats.promotion),
3176 		       (unsigned long) atomic_read(&cache->nr_dirty));
3177 
3178 		if (cache->features.metadata_version == 2)
3179 			DMEMIT("2 metadata2 ");
3180 		else
3181 			DMEMIT("1 ");
3182 
3183 		if (writethrough_mode(cache))
3184 			DMEMIT("writethrough ");
3185 
3186 		else if (passthrough_mode(cache))
3187 			DMEMIT("passthrough ");
3188 
3189 		else if (writeback_mode(cache))
3190 			DMEMIT("writeback ");
3191 
3192 		else {
3193 			DMERR("%s: internal error: unknown io mode: %d",
3194 			      cache_device_name(cache), (int) cache->features.io_mode);
3195 			goto err;
3196 		}
3197 
3198 		DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold);
3199 
3200 		DMEMIT("%s ", dm_cache_policy_get_name(cache->policy));
3201 		if (sz < maxlen) {
3202 			r = policy_emit_config_values(cache->policy, result, maxlen, &sz);
3203 			if (r)
3204 				DMERR("%s: policy_emit_config_values returned %d",
3205 				      cache_device_name(cache), r);
3206 		}
3207 
3208 		if (get_cache_mode(cache) == CM_READ_ONLY)
3209 			DMEMIT("ro ");
3210 		else
3211 			DMEMIT("rw ");
3212 
3213 		r = dm_cache_metadata_needs_check(cache->cmd, &needs_check);
3214 
3215 		if (r || needs_check)
3216 			DMEMIT("needs_check ");
3217 		else
3218 			DMEMIT("- ");
3219 
3220 		break;
3221 
3222 	case STATUSTYPE_TABLE:
3223 		format_dev_t(buf, cache->metadata_dev->bdev->bd_dev);
3224 		DMEMIT("%s ", buf);
3225 		format_dev_t(buf, cache->cache_dev->bdev->bd_dev);
3226 		DMEMIT("%s ", buf);
3227 		format_dev_t(buf, cache->origin_dev->bdev->bd_dev);
3228 		DMEMIT("%s", buf);
3229 
3230 		for (i = 0; i < cache->nr_ctr_args - 1; i++)
3231 			DMEMIT(" %s", cache->ctr_args[i]);
3232 		if (cache->nr_ctr_args)
3233 			DMEMIT(" %s", cache->ctr_args[cache->nr_ctr_args - 1]);
3234 	}
3235 
3236 	return;
3237 
3238 err:
3239 	DMEMIT("Error");
3240 }
3241 
3242 /*
3243  * Defines a range of cblocks, begin to (end - 1) are in the range.  end is
3244  * the one-past-the-end value.
3245  */
3246 struct cblock_range {
3247 	dm_cblock_t begin;
3248 	dm_cblock_t end;
3249 };
3250 
3251 /*
3252  * A cache block range can take two forms:
3253  *
3254  * i) A single cblock, eg. '3456'
3255  * ii) A begin and end cblock with a dash between, eg. 123-234
3256  */
3257 static int parse_cblock_range(struct cache *cache, const char *str,
3258 			      struct cblock_range *result)
3259 {
3260 	char dummy;
3261 	uint64_t b, e;
3262 	int r;
3263 
3264 	/*
3265 	 * Try and parse form (ii) first.
3266 	 */
3267 	r = sscanf(str, "%llu-%llu%c", &b, &e, &dummy);
3268 	if (r < 0)
3269 		return r;
3270 
3271 	if (r == 2) {
3272 		result->begin = to_cblock(b);
3273 		result->end = to_cblock(e);
3274 		return 0;
3275 	}
3276 
3277 	/*
3278 	 * That didn't work, try form (i).
3279 	 */
3280 	r = sscanf(str, "%llu%c", &b, &dummy);
3281 	if (r < 0)
3282 		return r;
3283 
3284 	if (r == 1) {
3285 		result->begin = to_cblock(b);
3286 		result->end = to_cblock(from_cblock(result->begin) + 1u);
3287 		return 0;
3288 	}
3289 
3290 	DMERR("%s: invalid cblock range '%s'", cache_device_name(cache), str);
3291 	return -EINVAL;
3292 }
3293 
3294 static int validate_cblock_range(struct cache *cache, struct cblock_range *range)
3295 {
3296 	uint64_t b = from_cblock(range->begin);
3297 	uint64_t e = from_cblock(range->end);
3298 	uint64_t n = from_cblock(cache->cache_size);
3299 
3300 	if (b >= n) {
3301 		DMERR("%s: begin cblock out of range: %llu >= %llu",
3302 		      cache_device_name(cache), b, n);
3303 		return -EINVAL;
3304 	}
3305 
3306 	if (e > n) {
3307 		DMERR("%s: end cblock out of range: %llu > %llu",
3308 		      cache_device_name(cache), e, n);
3309 		return -EINVAL;
3310 	}
3311 
3312 	if (b >= e) {
3313 		DMERR("%s: invalid cblock range: %llu >= %llu",
3314 		      cache_device_name(cache), b, e);
3315 		return -EINVAL;
3316 	}
3317 
3318 	return 0;
3319 }
3320 
3321 static inline dm_cblock_t cblock_succ(dm_cblock_t b)
3322 {
3323 	return to_cblock(from_cblock(b) + 1);
3324 }
3325 
3326 static int request_invalidation(struct cache *cache, struct cblock_range *range)
3327 {
3328 	int r = 0;
3329 
3330 	/*
3331 	 * We don't need to do any locking here because we know we're in
3332 	 * passthrough mode.  There's is potential for a race between an
3333 	 * invalidation triggered by an io and an invalidation message.  This
3334 	 * is harmless, we must not worry if the policy call fails.
3335 	 */
3336 	while (range->begin != range->end) {
3337 		r = invalidate_cblock(cache, range->begin);
3338 		if (r)
3339 			return r;
3340 
3341 		range->begin = cblock_succ(range->begin);
3342 	}
3343 
3344 	cache->commit_requested = true;
3345 	return r;
3346 }
3347 
3348 static int process_invalidate_cblocks_message(struct cache *cache, unsigned count,
3349 					      const char **cblock_ranges)
3350 {
3351 	int r = 0;
3352 	unsigned i;
3353 	struct cblock_range range;
3354 
3355 	if (!passthrough_mode(cache)) {
3356 		DMERR("%s: cache has to be in passthrough mode for invalidation",
3357 		      cache_device_name(cache));
3358 		return -EPERM;
3359 	}
3360 
3361 	for (i = 0; i < count; i++) {
3362 		r = parse_cblock_range(cache, cblock_ranges[i], &range);
3363 		if (r)
3364 			break;
3365 
3366 		r = validate_cblock_range(cache, &range);
3367 		if (r)
3368 			break;
3369 
3370 		/*
3371 		 * Pass begin and end origin blocks to the worker and wake it.
3372 		 */
3373 		r = request_invalidation(cache, &range);
3374 		if (r)
3375 			break;
3376 	}
3377 
3378 	return r;
3379 }
3380 
3381 /*
3382  * Supports
3383  *	"<key> <value>"
3384  * and
3385  *     "invalidate_cblocks [(<begin>)|(<begin>-<end>)]*
3386  *
3387  * The key migration_threshold is supported by the cache target core.
3388  */
3389 static int cache_message(struct dm_target *ti, unsigned argc, char **argv,
3390 			 char *result, unsigned maxlen)
3391 {
3392 	struct cache *cache = ti->private;
3393 
3394 	if (!argc)
3395 		return -EINVAL;
3396 
3397 	if (get_cache_mode(cache) >= CM_READ_ONLY) {
3398 		DMERR("%s: unable to service cache target messages in READ_ONLY or FAIL mode",
3399 		      cache_device_name(cache));
3400 		return -EOPNOTSUPP;
3401 	}
3402 
3403 	if (!strcasecmp(argv[0], "invalidate_cblocks"))
3404 		return process_invalidate_cblocks_message(cache, argc - 1, (const char **) argv + 1);
3405 
3406 	if (argc != 2)
3407 		return -EINVAL;
3408 
3409 	return set_config_value(cache, argv[0], argv[1]);
3410 }
3411 
3412 static int cache_iterate_devices(struct dm_target *ti,
3413 				 iterate_devices_callout_fn fn, void *data)
3414 {
3415 	int r = 0;
3416 	struct cache *cache = ti->private;
3417 
3418 	r = fn(ti, cache->cache_dev, 0, get_dev_size(cache->cache_dev), data);
3419 	if (!r)
3420 		r = fn(ti, cache->origin_dev, 0, ti->len, data);
3421 
3422 	return r;
3423 }
3424 
3425 static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
3426 {
3427 	/*
3428 	 * FIXME: these limits may be incompatible with the cache device
3429 	 */
3430 	limits->max_discard_sectors = min_t(sector_t, cache->discard_block_size * 1024,
3431 					    cache->origin_sectors);
3432 	limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
3433 }
3434 
3435 static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
3436 {
3437 	struct cache *cache = ti->private;
3438 	uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
3439 
3440 	/*
3441 	 * If the system-determined stacked limits are compatible with the
3442 	 * cache's blocksize (io_opt is a factor) do not override them.
3443 	 */
3444 	if (io_opt_sectors < cache->sectors_per_block ||
3445 	    do_div(io_opt_sectors, cache->sectors_per_block)) {
3446 		blk_limits_io_min(limits, cache->sectors_per_block << SECTOR_SHIFT);
3447 		blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
3448 	}
3449 	set_discard_limits(cache, limits);
3450 }
3451 
3452 /*----------------------------------------------------------------*/
3453 
3454 static struct target_type cache_target = {
3455 	.name = "cache",
3456 	.version = {2, 0, 0},
3457 	.module = THIS_MODULE,
3458 	.ctr = cache_ctr,
3459 	.dtr = cache_dtr,
3460 	.map = cache_map,
3461 	.end_io = cache_end_io,
3462 	.postsuspend = cache_postsuspend,
3463 	.preresume = cache_preresume,
3464 	.resume = cache_resume,
3465 	.status = cache_status,
3466 	.message = cache_message,
3467 	.iterate_devices = cache_iterate_devices,
3468 	.io_hints = cache_io_hints,
3469 };
3470 
3471 static int __init dm_cache_init(void)
3472 {
3473 	int r;
3474 
3475 	migration_cache = KMEM_CACHE(dm_cache_migration, 0);
3476 	if (!migration_cache) {
3477 		dm_unregister_target(&cache_target);
3478 		return -ENOMEM;
3479 	}
3480 
3481 	r = dm_register_target(&cache_target);
3482 	if (r) {
3483 		DMERR("cache target registration failed: %d", r);
3484 		return r;
3485 	}
3486 
3487 	return 0;
3488 }
3489 
3490 static void __exit dm_cache_exit(void)
3491 {
3492 	dm_unregister_target(&cache_target);
3493 	kmem_cache_destroy(migration_cache);
3494 }
3495 
3496 module_init(dm_cache_init);
3497 module_exit(dm_cache_exit);
3498 
3499 MODULE_DESCRIPTION(DM_NAME " cache target");
3500 MODULE_AUTHOR("Joe Thornber <ejt@redhat.com>");
3501 MODULE_LICENSE("GPL");
3502