1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2012 Red Hat. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8 #include "dm.h"
9 #include "dm-bio-prison-v2.h"
10 #include "dm-bio-record.h"
11 #include "dm-cache-metadata.h"
12 #include "dm-io-tracker.h"
13
14 #include <linux/dm-io.h>
15 #include <linux/dm-kcopyd.h>
16 #include <linux/jiffies.h>
17 #include <linux/init.h>
18 #include <linux/mempool.h>
19 #include <linux/module.h>
20 #include <linux/rwsem.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23
24 #define DM_MSG_PREFIX "cache"
25
26 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(cache_copy_throttle,
27 "A percentage of time allocated for copying to and/or from cache");
28
29 /*----------------------------------------------------------------*/
30
31 /*
32 * Glossary:
33 *
34 * oblock: index of an origin block
35 * cblock: index of a cache block
36 * promotion: movement of a block from origin to cache
37 * demotion: movement of a block from cache to origin
38 * migration: movement of a block between the origin and cache device,
39 * either direction
40 */
41
42 /*----------------------------------------------------------------*/
43
44 /*
45 * Represents a chunk of future work. 'input' allows continuations to pass
46 * values between themselves, typically error values.
47 */
48 struct continuation {
49 struct work_struct ws;
50 blk_status_t input;
51 };
52
init_continuation(struct continuation * k,void (* fn)(struct work_struct *))53 static inline void init_continuation(struct continuation *k,
54 void (*fn)(struct work_struct *))
55 {
56 INIT_WORK(&k->ws, fn);
57 k->input = 0;
58 }
59
queue_continuation(struct workqueue_struct * wq,struct continuation * k)60 static inline void queue_continuation(struct workqueue_struct *wq,
61 struct continuation *k)
62 {
63 queue_work(wq, &k->ws);
64 }
65
66 /*----------------------------------------------------------------*/
67
68 /*
69 * The batcher collects together pieces of work that need a particular
70 * operation to occur before they can proceed (typically a commit).
71 */
72 struct batcher {
73 /*
74 * The operation that everyone is waiting for.
75 */
76 blk_status_t (*commit_op)(void *context);
77 void *commit_context;
78
79 /*
80 * This is how bios should be issued once the commit op is complete
81 * (accounted_request).
82 */
83 void (*issue_op)(struct bio *bio, void *context);
84 void *issue_context;
85
86 /*
87 * Queued work gets put on here after commit.
88 */
89 struct workqueue_struct *wq;
90
91 spinlock_t lock;
92 struct list_head work_items;
93 struct bio_list bios;
94 struct work_struct commit_work;
95
96 bool commit_scheduled;
97 };
98
__commit(struct work_struct * _ws)99 static void __commit(struct work_struct *_ws)
100 {
101 struct batcher *b = container_of(_ws, struct batcher, commit_work);
102 blk_status_t r;
103 struct list_head work_items;
104 struct work_struct *ws, *tmp;
105 struct continuation *k;
106 struct bio *bio;
107 struct bio_list bios;
108
109 INIT_LIST_HEAD(&work_items);
110 bio_list_init(&bios);
111
112 /*
113 * We have to grab these before the commit_op to avoid a race
114 * condition.
115 */
116 spin_lock_irq(&b->lock);
117 list_splice_init(&b->work_items, &work_items);
118 bio_list_merge(&bios, &b->bios);
119 bio_list_init(&b->bios);
120 b->commit_scheduled = false;
121 spin_unlock_irq(&b->lock);
122
123 r = b->commit_op(b->commit_context);
124
125 list_for_each_entry_safe(ws, tmp, &work_items, entry) {
126 k = container_of(ws, struct continuation, ws);
127 k->input = r;
128 INIT_LIST_HEAD(&ws->entry); /* to avoid a WARN_ON */
129 queue_work(b->wq, ws);
130 }
131
132 while ((bio = bio_list_pop(&bios))) {
133 if (r) {
134 bio->bi_status = r;
135 bio_endio(bio);
136 } else
137 b->issue_op(bio, b->issue_context);
138 }
139 }
140
batcher_init(struct batcher * b,blk_status_t (* commit_op)(void *),void * commit_context,void (* issue_op)(struct bio * bio,void *),void * issue_context,struct workqueue_struct * wq)141 static void batcher_init(struct batcher *b,
142 blk_status_t (*commit_op)(void *),
143 void *commit_context,
144 void (*issue_op)(struct bio *bio, void *),
145 void *issue_context,
146 struct workqueue_struct *wq)
147 {
148 b->commit_op = commit_op;
149 b->commit_context = commit_context;
150 b->issue_op = issue_op;
151 b->issue_context = issue_context;
152 b->wq = wq;
153
154 spin_lock_init(&b->lock);
155 INIT_LIST_HEAD(&b->work_items);
156 bio_list_init(&b->bios);
157 INIT_WORK(&b->commit_work, __commit);
158 b->commit_scheduled = false;
159 }
160
async_commit(struct batcher * b)161 static void async_commit(struct batcher *b)
162 {
163 queue_work(b->wq, &b->commit_work);
164 }
165
continue_after_commit(struct batcher * b,struct continuation * k)166 static void continue_after_commit(struct batcher *b, struct continuation *k)
167 {
168 bool commit_scheduled;
169
170 spin_lock_irq(&b->lock);
171 commit_scheduled = b->commit_scheduled;
172 list_add_tail(&k->ws.entry, &b->work_items);
173 spin_unlock_irq(&b->lock);
174
175 if (commit_scheduled)
176 async_commit(b);
177 }
178
179 /*
180 * Bios are errored if commit failed.
181 */
issue_after_commit(struct batcher * b,struct bio * bio)182 static void issue_after_commit(struct batcher *b, struct bio *bio)
183 {
184 bool commit_scheduled;
185
186 spin_lock_irq(&b->lock);
187 commit_scheduled = b->commit_scheduled;
188 bio_list_add(&b->bios, bio);
189 spin_unlock_irq(&b->lock);
190
191 if (commit_scheduled)
192 async_commit(b);
193 }
194
195 /*
196 * Call this if some urgent work is waiting for the commit to complete.
197 */
schedule_commit(struct batcher * b)198 static void schedule_commit(struct batcher *b)
199 {
200 bool immediate;
201
202 spin_lock_irq(&b->lock);
203 immediate = !list_empty(&b->work_items) || !bio_list_empty(&b->bios);
204 b->commit_scheduled = true;
205 spin_unlock_irq(&b->lock);
206
207 if (immediate)
208 async_commit(b);
209 }
210
211 /*
212 * There are a couple of places where we let a bio run, but want to do some
213 * work before calling its endio function. We do this by temporarily
214 * changing the endio fn.
215 */
216 struct dm_hook_info {
217 bio_end_io_t *bi_end_io;
218 };
219
dm_hook_bio(struct dm_hook_info * h,struct bio * bio,bio_end_io_t * bi_end_io,void * bi_private)220 static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio,
221 bio_end_io_t *bi_end_io, void *bi_private)
222 {
223 h->bi_end_io = bio->bi_end_io;
224
225 bio->bi_end_io = bi_end_io;
226 bio->bi_private = bi_private;
227 }
228
dm_unhook_bio(struct dm_hook_info * h,struct bio * bio)229 static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
230 {
231 bio->bi_end_io = h->bi_end_io;
232 }
233
234 /*----------------------------------------------------------------*/
235
236 #define MIGRATION_POOL_SIZE 128
237 #define COMMIT_PERIOD HZ
238 #define MIGRATION_COUNT_WINDOW 10
239
240 /*
241 * The block size of the device holding cache data must be
242 * between 32KB and 1GB.
243 */
244 #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT)
245 #define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
246
247 enum cache_metadata_mode {
248 CM_WRITE, /* metadata may be changed */
249 CM_READ_ONLY, /* metadata may not be changed */
250 CM_FAIL
251 };
252
253 enum cache_io_mode {
254 /*
255 * Data is written to cached blocks only. These blocks are marked
256 * dirty. If you lose the cache device you will lose data.
257 * Potential performance increase for both reads and writes.
258 */
259 CM_IO_WRITEBACK,
260
261 /*
262 * Data is written to both cache and origin. Blocks are never
263 * dirty. Potential performance benfit for reads only.
264 */
265 CM_IO_WRITETHROUGH,
266
267 /*
268 * A degraded mode useful for various cache coherency situations
269 * (eg, rolling back snapshots). Reads and writes always go to the
270 * origin. If a write goes to a cached oblock, then the cache
271 * block is invalidated.
272 */
273 CM_IO_PASSTHROUGH
274 };
275
276 struct cache_features {
277 enum cache_metadata_mode mode;
278 enum cache_io_mode io_mode;
279 unsigned int metadata_version;
280 bool discard_passdown:1;
281 };
282
283 struct cache_stats {
284 atomic_t read_hit;
285 atomic_t read_miss;
286 atomic_t write_hit;
287 atomic_t write_miss;
288 atomic_t demotion;
289 atomic_t promotion;
290 atomic_t writeback;
291 atomic_t copies_avoided;
292 atomic_t cache_cell_clash;
293 atomic_t commit_count;
294 atomic_t discard_count;
295 };
296
297 struct cache {
298 struct dm_target *ti;
299 spinlock_t lock;
300
301 /*
302 * Fields for converting from sectors to blocks.
303 */
304 int sectors_per_block_shift;
305 sector_t sectors_per_block;
306
307 struct dm_cache_metadata *cmd;
308
309 /*
310 * Metadata is written to this device.
311 */
312 struct dm_dev *metadata_dev;
313
314 /*
315 * The slower of the two data devices. Typically a spindle.
316 */
317 struct dm_dev *origin_dev;
318
319 /*
320 * The faster of the two data devices. Typically an SSD.
321 */
322 struct dm_dev *cache_dev;
323
324 /*
325 * Size of the origin device in _complete_ blocks and native sectors.
326 */
327 dm_oblock_t origin_blocks;
328 sector_t origin_sectors;
329
330 /*
331 * Size of the cache device in blocks.
332 */
333 dm_cblock_t cache_size;
334
335 /*
336 * Invalidation fields.
337 */
338 spinlock_t invalidation_lock;
339 struct list_head invalidation_requests;
340
341 sector_t migration_threshold;
342 wait_queue_head_t migration_wait;
343 atomic_t nr_allocated_migrations;
344
345 /*
346 * The number of in flight migrations that are performing
347 * background io. eg, promotion, writeback.
348 */
349 atomic_t nr_io_migrations;
350
351 struct bio_list deferred_bios;
352
353 struct rw_semaphore quiesce_lock;
354
355 /*
356 * origin_blocks entries, discarded if set.
357 */
358 dm_dblock_t discard_nr_blocks;
359 unsigned long *discard_bitset;
360 uint32_t discard_block_size; /* a power of 2 times sectors per block */
361
362 /*
363 * Rather than reconstructing the table line for the status we just
364 * save it and regurgitate.
365 */
366 unsigned int nr_ctr_args;
367 const char **ctr_args;
368
369 struct dm_kcopyd_client *copier;
370 struct work_struct deferred_bio_worker;
371 struct work_struct migration_worker;
372 struct workqueue_struct *wq;
373 struct delayed_work waker;
374 struct dm_bio_prison_v2 *prison;
375
376 /*
377 * cache_size entries, dirty if set
378 */
379 unsigned long *dirty_bitset;
380 atomic_t nr_dirty;
381
382 unsigned int policy_nr_args;
383 struct dm_cache_policy *policy;
384
385 /*
386 * Cache features such as write-through.
387 */
388 struct cache_features features;
389
390 struct cache_stats stats;
391
392 bool need_tick_bio:1;
393 bool sized:1;
394 bool invalidate:1;
395 bool commit_requested:1;
396 bool loaded_mappings:1;
397 bool loaded_discards:1;
398
399 struct rw_semaphore background_work_lock;
400
401 struct batcher committer;
402 struct work_struct commit_ws;
403
404 struct dm_io_tracker tracker;
405
406 mempool_t migration_pool;
407
408 struct bio_set bs;
409 };
410
411 struct per_bio_data {
412 bool tick:1;
413 unsigned int req_nr:2;
414 struct dm_bio_prison_cell_v2 *cell;
415 struct dm_hook_info hook_info;
416 sector_t len;
417 };
418
419 struct dm_cache_migration {
420 struct continuation k;
421 struct cache *cache;
422
423 struct policy_work *op;
424 struct bio *overwrite_bio;
425 struct dm_bio_prison_cell_v2 *cell;
426
427 dm_cblock_t invalidate_cblock;
428 dm_oblock_t invalidate_oblock;
429 };
430
431 /*----------------------------------------------------------------*/
432
writethrough_mode(struct cache * cache)433 static bool writethrough_mode(struct cache *cache)
434 {
435 return cache->features.io_mode == CM_IO_WRITETHROUGH;
436 }
437
writeback_mode(struct cache * cache)438 static bool writeback_mode(struct cache *cache)
439 {
440 return cache->features.io_mode == CM_IO_WRITEBACK;
441 }
442
passthrough_mode(struct cache * cache)443 static inline bool passthrough_mode(struct cache *cache)
444 {
445 return unlikely(cache->features.io_mode == CM_IO_PASSTHROUGH);
446 }
447
448 /*----------------------------------------------------------------*/
449
wake_deferred_bio_worker(struct cache * cache)450 static void wake_deferred_bio_worker(struct cache *cache)
451 {
452 queue_work(cache->wq, &cache->deferred_bio_worker);
453 }
454
wake_migration_worker(struct cache * cache)455 static void wake_migration_worker(struct cache *cache)
456 {
457 if (passthrough_mode(cache))
458 return;
459
460 queue_work(cache->wq, &cache->migration_worker);
461 }
462
463 /*----------------------------------------------------------------*/
464
alloc_prison_cell(struct cache * cache)465 static struct dm_bio_prison_cell_v2 *alloc_prison_cell(struct cache *cache)
466 {
467 return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOIO);
468 }
469
free_prison_cell(struct cache * cache,struct dm_bio_prison_cell_v2 * cell)470 static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell_v2 *cell)
471 {
472 dm_bio_prison_free_cell_v2(cache->prison, cell);
473 }
474
alloc_migration(struct cache * cache)475 static struct dm_cache_migration *alloc_migration(struct cache *cache)
476 {
477 struct dm_cache_migration *mg;
478
479 mg = mempool_alloc(&cache->migration_pool, GFP_NOIO);
480
481 memset(mg, 0, sizeof(*mg));
482
483 mg->cache = cache;
484 atomic_inc(&cache->nr_allocated_migrations);
485
486 return mg;
487 }
488
free_migration(struct dm_cache_migration * mg)489 static void free_migration(struct dm_cache_migration *mg)
490 {
491 struct cache *cache = mg->cache;
492
493 if (atomic_dec_and_test(&cache->nr_allocated_migrations))
494 wake_up(&cache->migration_wait);
495
496 mempool_free(mg, &cache->migration_pool);
497 }
498
499 /*----------------------------------------------------------------*/
500
oblock_succ(dm_oblock_t b)501 static inline dm_oblock_t oblock_succ(dm_oblock_t b)
502 {
503 return to_oblock(from_oblock(b) + 1ull);
504 }
505
build_key(dm_oblock_t begin,dm_oblock_t end,struct dm_cell_key_v2 * key)506 static void build_key(dm_oblock_t begin, dm_oblock_t end, struct dm_cell_key_v2 *key)
507 {
508 key->virtual = 0;
509 key->dev = 0;
510 key->block_begin = from_oblock(begin);
511 key->block_end = from_oblock(end);
512 }
513
514 /*
515 * We have two lock levels. Level 0, which is used to prevent WRITEs, and
516 * level 1 which prevents *both* READs and WRITEs.
517 */
518 #define WRITE_LOCK_LEVEL 0
519 #define READ_WRITE_LOCK_LEVEL 1
520
lock_level(struct bio * bio)521 static unsigned int lock_level(struct bio *bio)
522 {
523 return bio_data_dir(bio) == WRITE ?
524 WRITE_LOCK_LEVEL :
525 READ_WRITE_LOCK_LEVEL;
526 }
527
528 /*
529 *--------------------------------------------------------------
530 * Per bio data
531 *--------------------------------------------------------------
532 */
533
get_per_bio_data(struct bio * bio)534 static struct per_bio_data *get_per_bio_data(struct bio *bio)
535 {
536 struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
537
538 BUG_ON(!pb);
539 return pb;
540 }
541
init_per_bio_data(struct bio * bio)542 static struct per_bio_data *init_per_bio_data(struct bio *bio)
543 {
544 struct per_bio_data *pb = get_per_bio_data(bio);
545
546 pb->tick = false;
547 pb->req_nr = dm_bio_get_target_bio_nr(bio);
548 pb->cell = NULL;
549 pb->len = 0;
550
551 return pb;
552 }
553
554 /*----------------------------------------------------------------*/
555
defer_bio(struct cache * cache,struct bio * bio)556 static void defer_bio(struct cache *cache, struct bio *bio)
557 {
558 spin_lock_irq(&cache->lock);
559 bio_list_add(&cache->deferred_bios, bio);
560 spin_unlock_irq(&cache->lock);
561
562 wake_deferred_bio_worker(cache);
563 }
564
defer_bios(struct cache * cache,struct bio_list * bios)565 static void defer_bios(struct cache *cache, struct bio_list *bios)
566 {
567 spin_lock_irq(&cache->lock);
568 bio_list_merge(&cache->deferred_bios, bios);
569 bio_list_init(bios);
570 spin_unlock_irq(&cache->lock);
571
572 wake_deferred_bio_worker(cache);
573 }
574
575 /*----------------------------------------------------------------*/
576
bio_detain_shared(struct cache * cache,dm_oblock_t oblock,struct bio * bio)577 static bool bio_detain_shared(struct cache *cache, dm_oblock_t oblock, struct bio *bio)
578 {
579 bool r;
580 struct per_bio_data *pb;
581 struct dm_cell_key_v2 key;
582 dm_oblock_t end = to_oblock(from_oblock(oblock) + 1ULL);
583 struct dm_bio_prison_cell_v2 *cell_prealloc, *cell;
584
585 cell_prealloc = alloc_prison_cell(cache); /* FIXME: allow wait if calling from worker */
586
587 build_key(oblock, end, &key);
588 r = dm_cell_get_v2(cache->prison, &key, lock_level(bio), bio, cell_prealloc, &cell);
589 if (!r) {
590 /*
591 * Failed to get the lock.
592 */
593 free_prison_cell(cache, cell_prealloc);
594 return r;
595 }
596
597 if (cell != cell_prealloc)
598 free_prison_cell(cache, cell_prealloc);
599
600 pb = get_per_bio_data(bio);
601 pb->cell = cell;
602
603 return r;
604 }
605
606 /*----------------------------------------------------------------*/
607
is_dirty(struct cache * cache,dm_cblock_t b)608 static bool is_dirty(struct cache *cache, dm_cblock_t b)
609 {
610 return test_bit(from_cblock(b), cache->dirty_bitset);
611 }
612
set_dirty(struct cache * cache,dm_cblock_t cblock)613 static void set_dirty(struct cache *cache, dm_cblock_t cblock)
614 {
615 if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) {
616 atomic_inc(&cache->nr_dirty);
617 policy_set_dirty(cache->policy, cblock);
618 }
619 }
620
621 /*
622 * These two are called when setting after migrations to force the policy
623 * and dirty bitset to be in sync.
624 */
force_set_dirty(struct cache * cache,dm_cblock_t cblock)625 static void force_set_dirty(struct cache *cache, dm_cblock_t cblock)
626 {
627 if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset))
628 atomic_inc(&cache->nr_dirty);
629 policy_set_dirty(cache->policy, cblock);
630 }
631
force_clear_dirty(struct cache * cache,dm_cblock_t cblock)632 static void force_clear_dirty(struct cache *cache, dm_cblock_t cblock)
633 {
634 if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) {
635 if (atomic_dec_return(&cache->nr_dirty) == 0)
636 dm_table_event(cache->ti->table);
637 }
638
639 policy_clear_dirty(cache->policy, cblock);
640 }
641
642 /*----------------------------------------------------------------*/
643
block_size_is_power_of_two(struct cache * cache)644 static bool block_size_is_power_of_two(struct cache *cache)
645 {
646 return cache->sectors_per_block_shift >= 0;
647 }
648
block_div(dm_block_t b,uint32_t n)649 static dm_block_t block_div(dm_block_t b, uint32_t n)
650 {
651 do_div(b, n);
652
653 return b;
654 }
655
oblocks_per_dblock(struct cache * cache)656 static dm_block_t oblocks_per_dblock(struct cache *cache)
657 {
658 dm_block_t oblocks = cache->discard_block_size;
659
660 if (block_size_is_power_of_two(cache))
661 oblocks >>= cache->sectors_per_block_shift;
662 else
663 oblocks = block_div(oblocks, cache->sectors_per_block);
664
665 return oblocks;
666 }
667
oblock_to_dblock(struct cache * cache,dm_oblock_t oblock)668 static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock)
669 {
670 return to_dblock(block_div(from_oblock(oblock),
671 oblocks_per_dblock(cache)));
672 }
673
set_discard(struct cache * cache,dm_dblock_t b)674 static void set_discard(struct cache *cache, dm_dblock_t b)
675 {
676 BUG_ON(from_dblock(b) >= from_dblock(cache->discard_nr_blocks));
677 atomic_inc(&cache->stats.discard_count);
678
679 spin_lock_irq(&cache->lock);
680 set_bit(from_dblock(b), cache->discard_bitset);
681 spin_unlock_irq(&cache->lock);
682 }
683
clear_discard(struct cache * cache,dm_dblock_t b)684 static void clear_discard(struct cache *cache, dm_dblock_t b)
685 {
686 spin_lock_irq(&cache->lock);
687 clear_bit(from_dblock(b), cache->discard_bitset);
688 spin_unlock_irq(&cache->lock);
689 }
690
is_discarded(struct cache * cache,dm_dblock_t b)691 static bool is_discarded(struct cache *cache, dm_dblock_t b)
692 {
693 int r;
694
695 spin_lock_irq(&cache->lock);
696 r = test_bit(from_dblock(b), cache->discard_bitset);
697 spin_unlock_irq(&cache->lock);
698
699 return r;
700 }
701
is_discarded_oblock(struct cache * cache,dm_oblock_t b)702 static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
703 {
704 int r;
705
706 spin_lock_irq(&cache->lock);
707 r = test_bit(from_dblock(oblock_to_dblock(cache, b)),
708 cache->discard_bitset);
709 spin_unlock_irq(&cache->lock);
710
711 return r;
712 }
713
714 /*
715 * -------------------------------------------------------------
716 * Remapping
717 *--------------------------------------------------------------
718 */
remap_to_origin(struct cache * cache,struct bio * bio)719 static void remap_to_origin(struct cache *cache, struct bio *bio)
720 {
721 bio_set_dev(bio, cache->origin_dev->bdev);
722 }
723
remap_to_cache(struct cache * cache,struct bio * bio,dm_cblock_t cblock)724 static void remap_to_cache(struct cache *cache, struct bio *bio,
725 dm_cblock_t cblock)
726 {
727 sector_t bi_sector = bio->bi_iter.bi_sector;
728 sector_t block = from_cblock(cblock);
729
730 bio_set_dev(bio, cache->cache_dev->bdev);
731 if (!block_size_is_power_of_two(cache))
732 bio->bi_iter.bi_sector =
733 (block * cache->sectors_per_block) +
734 sector_div(bi_sector, cache->sectors_per_block);
735 else
736 bio->bi_iter.bi_sector =
737 (block << cache->sectors_per_block_shift) |
738 (bi_sector & (cache->sectors_per_block - 1));
739 }
740
check_if_tick_bio_needed(struct cache * cache,struct bio * bio)741 static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
742 {
743 struct per_bio_data *pb;
744
745 spin_lock_irq(&cache->lock);
746 if (cache->need_tick_bio && !op_is_flush(bio->bi_opf) &&
747 bio_op(bio) != REQ_OP_DISCARD) {
748 pb = get_per_bio_data(bio);
749 pb->tick = true;
750 cache->need_tick_bio = false;
751 }
752 spin_unlock_irq(&cache->lock);
753 }
754
remap_to_origin_clear_discard(struct cache * cache,struct bio * bio,dm_oblock_t oblock)755 static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
756 dm_oblock_t oblock)
757 {
758 // FIXME: check_if_tick_bio_needed() is called way too much through this interface
759 check_if_tick_bio_needed(cache, bio);
760 remap_to_origin(cache, bio);
761 if (bio_data_dir(bio) == WRITE)
762 clear_discard(cache, oblock_to_dblock(cache, oblock));
763 }
764
remap_to_cache_dirty(struct cache * cache,struct bio * bio,dm_oblock_t oblock,dm_cblock_t cblock)765 static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
766 dm_oblock_t oblock, dm_cblock_t cblock)
767 {
768 check_if_tick_bio_needed(cache, bio);
769 remap_to_cache(cache, bio, cblock);
770 if (bio_data_dir(bio) == WRITE) {
771 set_dirty(cache, cblock);
772 clear_discard(cache, oblock_to_dblock(cache, oblock));
773 }
774 }
775
get_bio_block(struct cache * cache,struct bio * bio)776 static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
777 {
778 sector_t block_nr = bio->bi_iter.bi_sector;
779
780 if (!block_size_is_power_of_two(cache))
781 (void) sector_div(block_nr, cache->sectors_per_block);
782 else
783 block_nr >>= cache->sectors_per_block_shift;
784
785 return to_oblock(block_nr);
786 }
787
accountable_bio(struct cache * cache,struct bio * bio)788 static bool accountable_bio(struct cache *cache, struct bio *bio)
789 {
790 return bio_op(bio) != REQ_OP_DISCARD;
791 }
792
accounted_begin(struct cache * cache,struct bio * bio)793 static void accounted_begin(struct cache *cache, struct bio *bio)
794 {
795 struct per_bio_data *pb;
796
797 if (accountable_bio(cache, bio)) {
798 pb = get_per_bio_data(bio);
799 pb->len = bio_sectors(bio);
800 dm_iot_io_begin(&cache->tracker, pb->len);
801 }
802 }
803
accounted_complete(struct cache * cache,struct bio * bio)804 static void accounted_complete(struct cache *cache, struct bio *bio)
805 {
806 struct per_bio_data *pb = get_per_bio_data(bio);
807
808 dm_iot_io_end(&cache->tracker, pb->len);
809 }
810
accounted_request(struct cache * cache,struct bio * bio)811 static void accounted_request(struct cache *cache, struct bio *bio)
812 {
813 accounted_begin(cache, bio);
814 dm_submit_bio_remap(bio, NULL);
815 }
816
issue_op(struct bio * bio,void * context)817 static void issue_op(struct bio *bio, void *context)
818 {
819 struct cache *cache = context;
820
821 accounted_request(cache, bio);
822 }
823
824 /*
825 * When running in writethrough mode we need to send writes to clean blocks
826 * to both the cache and origin devices. Clone the bio and send them in parallel.
827 */
remap_to_origin_and_cache(struct cache * cache,struct bio * bio,dm_oblock_t oblock,dm_cblock_t cblock)828 static void remap_to_origin_and_cache(struct cache *cache, struct bio *bio,
829 dm_oblock_t oblock, dm_cblock_t cblock)
830 {
831 struct bio *origin_bio = bio_alloc_clone(cache->origin_dev->bdev, bio,
832 GFP_NOIO, &cache->bs);
833
834 BUG_ON(!origin_bio);
835
836 bio_chain(origin_bio, bio);
837
838 if (bio_data_dir(origin_bio) == WRITE)
839 clear_discard(cache, oblock_to_dblock(cache, oblock));
840 submit_bio(origin_bio);
841
842 remap_to_cache(cache, bio, cblock);
843 }
844
845 /*
846 *--------------------------------------------------------------
847 * Failure modes
848 *--------------------------------------------------------------
849 */
get_cache_mode(struct cache * cache)850 static enum cache_metadata_mode get_cache_mode(struct cache *cache)
851 {
852 return cache->features.mode;
853 }
854
cache_device_name(struct cache * cache)855 static const char *cache_device_name(struct cache *cache)
856 {
857 return dm_table_device_name(cache->ti->table);
858 }
859
notify_mode_switch(struct cache * cache,enum cache_metadata_mode mode)860 static void notify_mode_switch(struct cache *cache, enum cache_metadata_mode mode)
861 {
862 static const char *descs[] = {
863 "write",
864 "read-only",
865 "fail"
866 };
867
868 dm_table_event(cache->ti->table);
869 DMINFO("%s: switching cache to %s mode",
870 cache_device_name(cache), descs[(int)mode]);
871 }
872
set_cache_mode(struct cache * cache,enum cache_metadata_mode new_mode)873 static void set_cache_mode(struct cache *cache, enum cache_metadata_mode new_mode)
874 {
875 bool needs_check;
876 enum cache_metadata_mode old_mode = get_cache_mode(cache);
877
878 if (dm_cache_metadata_needs_check(cache->cmd, &needs_check)) {
879 DMERR("%s: unable to read needs_check flag, setting failure mode.",
880 cache_device_name(cache));
881 new_mode = CM_FAIL;
882 }
883
884 if (new_mode == CM_WRITE && needs_check) {
885 DMERR("%s: unable to switch cache to write mode until repaired.",
886 cache_device_name(cache));
887 if (old_mode != new_mode)
888 new_mode = old_mode;
889 else
890 new_mode = CM_READ_ONLY;
891 }
892
893 /* Never move out of fail mode */
894 if (old_mode == CM_FAIL)
895 new_mode = CM_FAIL;
896
897 switch (new_mode) {
898 case CM_FAIL:
899 case CM_READ_ONLY:
900 dm_cache_metadata_set_read_only(cache->cmd);
901 break;
902
903 case CM_WRITE:
904 dm_cache_metadata_set_read_write(cache->cmd);
905 break;
906 }
907
908 cache->features.mode = new_mode;
909
910 if (new_mode != old_mode)
911 notify_mode_switch(cache, new_mode);
912 }
913
abort_transaction(struct cache * cache)914 static void abort_transaction(struct cache *cache)
915 {
916 const char *dev_name = cache_device_name(cache);
917
918 if (get_cache_mode(cache) >= CM_READ_ONLY)
919 return;
920
921 DMERR_LIMIT("%s: aborting current metadata transaction", dev_name);
922 if (dm_cache_metadata_abort(cache->cmd)) {
923 DMERR("%s: failed to abort metadata transaction", dev_name);
924 set_cache_mode(cache, CM_FAIL);
925 }
926
927 if (dm_cache_metadata_set_needs_check(cache->cmd)) {
928 DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name);
929 set_cache_mode(cache, CM_FAIL);
930 }
931 }
932
metadata_operation_failed(struct cache * cache,const char * op,int r)933 static void metadata_operation_failed(struct cache *cache, const char *op, int r)
934 {
935 DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d",
936 cache_device_name(cache), op, r);
937 abort_transaction(cache);
938 set_cache_mode(cache, CM_READ_ONLY);
939 }
940
941 /*----------------------------------------------------------------*/
942
load_stats(struct cache * cache)943 static void load_stats(struct cache *cache)
944 {
945 struct dm_cache_statistics stats;
946
947 dm_cache_metadata_get_stats(cache->cmd, &stats);
948 atomic_set(&cache->stats.read_hit, stats.read_hits);
949 atomic_set(&cache->stats.read_miss, stats.read_misses);
950 atomic_set(&cache->stats.write_hit, stats.write_hits);
951 atomic_set(&cache->stats.write_miss, stats.write_misses);
952 }
953
save_stats(struct cache * cache)954 static void save_stats(struct cache *cache)
955 {
956 struct dm_cache_statistics stats;
957
958 if (get_cache_mode(cache) >= CM_READ_ONLY)
959 return;
960
961 stats.read_hits = atomic_read(&cache->stats.read_hit);
962 stats.read_misses = atomic_read(&cache->stats.read_miss);
963 stats.write_hits = atomic_read(&cache->stats.write_hit);
964 stats.write_misses = atomic_read(&cache->stats.write_miss);
965
966 dm_cache_metadata_set_stats(cache->cmd, &stats);
967 }
968
update_stats(struct cache_stats * stats,enum policy_operation op)969 static void update_stats(struct cache_stats *stats, enum policy_operation op)
970 {
971 switch (op) {
972 case POLICY_PROMOTE:
973 atomic_inc(&stats->promotion);
974 break;
975
976 case POLICY_DEMOTE:
977 atomic_inc(&stats->demotion);
978 break;
979
980 case POLICY_WRITEBACK:
981 atomic_inc(&stats->writeback);
982 break;
983 }
984 }
985
986 /*
987 *---------------------------------------------------------------------
988 * Migration processing
989 *
990 * Migration covers moving data from the origin device to the cache, or
991 * vice versa.
992 *---------------------------------------------------------------------
993 */
inc_io_migrations(struct cache * cache)994 static void inc_io_migrations(struct cache *cache)
995 {
996 atomic_inc(&cache->nr_io_migrations);
997 }
998
dec_io_migrations(struct cache * cache)999 static void dec_io_migrations(struct cache *cache)
1000 {
1001 atomic_dec(&cache->nr_io_migrations);
1002 }
1003
discard_or_flush(struct bio * bio)1004 static bool discard_or_flush(struct bio *bio)
1005 {
1006 return bio_op(bio) == REQ_OP_DISCARD || op_is_flush(bio->bi_opf);
1007 }
1008
calc_discard_block_range(struct cache * cache,struct bio * bio,dm_dblock_t * b,dm_dblock_t * e)1009 static void calc_discard_block_range(struct cache *cache, struct bio *bio,
1010 dm_dblock_t *b, dm_dblock_t *e)
1011 {
1012 sector_t sb = bio->bi_iter.bi_sector;
1013 sector_t se = bio_end_sector(bio);
1014
1015 *b = to_dblock(dm_sector_div_up(sb, cache->discard_block_size));
1016
1017 if (se - sb < cache->discard_block_size)
1018 *e = *b;
1019 else
1020 *e = to_dblock(block_div(se, cache->discard_block_size));
1021 }
1022
1023 /*----------------------------------------------------------------*/
1024
prevent_background_work(struct cache * cache)1025 static void prevent_background_work(struct cache *cache)
1026 {
1027 lockdep_off();
1028 down_write(&cache->background_work_lock);
1029 lockdep_on();
1030 }
1031
allow_background_work(struct cache * cache)1032 static void allow_background_work(struct cache *cache)
1033 {
1034 lockdep_off();
1035 up_write(&cache->background_work_lock);
1036 lockdep_on();
1037 }
1038
background_work_begin(struct cache * cache)1039 static bool background_work_begin(struct cache *cache)
1040 {
1041 bool r;
1042
1043 lockdep_off();
1044 r = down_read_trylock(&cache->background_work_lock);
1045 lockdep_on();
1046
1047 return r;
1048 }
1049
background_work_end(struct cache * cache)1050 static void background_work_end(struct cache *cache)
1051 {
1052 lockdep_off();
1053 up_read(&cache->background_work_lock);
1054 lockdep_on();
1055 }
1056
1057 /*----------------------------------------------------------------*/
1058
bio_writes_complete_block(struct cache * cache,struct bio * bio)1059 static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
1060 {
1061 return (bio_data_dir(bio) == WRITE) &&
1062 (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
1063 }
1064
optimisable_bio(struct cache * cache,struct bio * bio,dm_oblock_t block)1065 static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block)
1066 {
1067 return writeback_mode(cache) &&
1068 (is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio));
1069 }
1070
quiesce(struct dm_cache_migration * mg,void (* continuation)(struct work_struct *))1071 static void quiesce(struct dm_cache_migration *mg,
1072 void (*continuation)(struct work_struct *))
1073 {
1074 init_continuation(&mg->k, continuation);
1075 dm_cell_quiesce_v2(mg->cache->prison, mg->cell, &mg->k.ws);
1076 }
1077
ws_to_mg(struct work_struct * ws)1078 static struct dm_cache_migration *ws_to_mg(struct work_struct *ws)
1079 {
1080 struct continuation *k = container_of(ws, struct continuation, ws);
1081
1082 return container_of(k, struct dm_cache_migration, k);
1083 }
1084
copy_complete(int read_err,unsigned long write_err,void * context)1085 static void copy_complete(int read_err, unsigned long write_err, void *context)
1086 {
1087 struct dm_cache_migration *mg = container_of(context, struct dm_cache_migration, k);
1088
1089 if (read_err || write_err)
1090 mg->k.input = BLK_STS_IOERR;
1091
1092 queue_continuation(mg->cache->wq, &mg->k);
1093 }
1094
copy(struct dm_cache_migration * mg,bool promote)1095 static void copy(struct dm_cache_migration *mg, bool promote)
1096 {
1097 struct dm_io_region o_region, c_region;
1098 struct cache *cache = mg->cache;
1099
1100 o_region.bdev = cache->origin_dev->bdev;
1101 o_region.sector = from_oblock(mg->op->oblock) * cache->sectors_per_block;
1102 o_region.count = cache->sectors_per_block;
1103
1104 c_region.bdev = cache->cache_dev->bdev;
1105 c_region.sector = from_cblock(mg->op->cblock) * cache->sectors_per_block;
1106 c_region.count = cache->sectors_per_block;
1107
1108 if (promote)
1109 dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, &mg->k);
1110 else
1111 dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, &mg->k);
1112 }
1113
bio_drop_shared_lock(struct cache * cache,struct bio * bio)1114 static void bio_drop_shared_lock(struct cache *cache, struct bio *bio)
1115 {
1116 struct per_bio_data *pb = get_per_bio_data(bio);
1117
1118 if (pb->cell && dm_cell_put_v2(cache->prison, pb->cell))
1119 free_prison_cell(cache, pb->cell);
1120 pb->cell = NULL;
1121 }
1122
overwrite_endio(struct bio * bio)1123 static void overwrite_endio(struct bio *bio)
1124 {
1125 struct dm_cache_migration *mg = bio->bi_private;
1126 struct cache *cache = mg->cache;
1127 struct per_bio_data *pb = get_per_bio_data(bio);
1128
1129 dm_unhook_bio(&pb->hook_info, bio);
1130
1131 if (bio->bi_status)
1132 mg->k.input = bio->bi_status;
1133
1134 queue_continuation(cache->wq, &mg->k);
1135 }
1136
overwrite(struct dm_cache_migration * mg,void (* continuation)(struct work_struct *))1137 static void overwrite(struct dm_cache_migration *mg,
1138 void (*continuation)(struct work_struct *))
1139 {
1140 struct bio *bio = mg->overwrite_bio;
1141 struct per_bio_data *pb = get_per_bio_data(bio);
1142
1143 dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg);
1144
1145 /*
1146 * The overwrite bio is part of the copy operation, as such it does
1147 * not set/clear discard or dirty flags.
1148 */
1149 if (mg->op->op == POLICY_PROMOTE)
1150 remap_to_cache(mg->cache, bio, mg->op->cblock);
1151 else
1152 remap_to_origin(mg->cache, bio);
1153
1154 init_continuation(&mg->k, continuation);
1155 accounted_request(mg->cache, bio);
1156 }
1157
1158 /*
1159 * Migration steps:
1160 *
1161 * 1) exclusive lock preventing WRITEs
1162 * 2) quiesce
1163 * 3) copy or issue overwrite bio
1164 * 4) upgrade to exclusive lock preventing READs and WRITEs
1165 * 5) quiesce
1166 * 6) update metadata and commit
1167 * 7) unlock
1168 */
mg_complete(struct dm_cache_migration * mg,bool success)1169 static void mg_complete(struct dm_cache_migration *mg, bool success)
1170 {
1171 struct bio_list bios;
1172 struct cache *cache = mg->cache;
1173 struct policy_work *op = mg->op;
1174 dm_cblock_t cblock = op->cblock;
1175
1176 if (success)
1177 update_stats(&cache->stats, op->op);
1178
1179 switch (op->op) {
1180 case POLICY_PROMOTE:
1181 clear_discard(cache, oblock_to_dblock(cache, op->oblock));
1182 policy_complete_background_work(cache->policy, op, success);
1183
1184 if (mg->overwrite_bio) {
1185 if (success)
1186 force_set_dirty(cache, cblock);
1187 else if (mg->k.input)
1188 mg->overwrite_bio->bi_status = mg->k.input;
1189 else
1190 mg->overwrite_bio->bi_status = BLK_STS_IOERR;
1191 bio_endio(mg->overwrite_bio);
1192 } else {
1193 if (success)
1194 force_clear_dirty(cache, cblock);
1195 dec_io_migrations(cache);
1196 }
1197 break;
1198
1199 case POLICY_DEMOTE:
1200 /*
1201 * We clear dirty here to update the nr_dirty counter.
1202 */
1203 if (success)
1204 force_clear_dirty(cache, cblock);
1205 policy_complete_background_work(cache->policy, op, success);
1206 dec_io_migrations(cache);
1207 break;
1208
1209 case POLICY_WRITEBACK:
1210 if (success)
1211 force_clear_dirty(cache, cblock);
1212 policy_complete_background_work(cache->policy, op, success);
1213 dec_io_migrations(cache);
1214 break;
1215 }
1216
1217 bio_list_init(&bios);
1218 if (mg->cell) {
1219 if (dm_cell_unlock_v2(cache->prison, mg->cell, &bios))
1220 free_prison_cell(cache, mg->cell);
1221 }
1222
1223 free_migration(mg);
1224 defer_bios(cache, &bios);
1225 wake_migration_worker(cache);
1226
1227 background_work_end(cache);
1228 }
1229
mg_success(struct work_struct * ws)1230 static void mg_success(struct work_struct *ws)
1231 {
1232 struct dm_cache_migration *mg = ws_to_mg(ws);
1233
1234 mg_complete(mg, mg->k.input == 0);
1235 }
1236
mg_update_metadata(struct work_struct * ws)1237 static void mg_update_metadata(struct work_struct *ws)
1238 {
1239 int r;
1240 struct dm_cache_migration *mg = ws_to_mg(ws);
1241 struct cache *cache = mg->cache;
1242 struct policy_work *op = mg->op;
1243
1244 switch (op->op) {
1245 case POLICY_PROMOTE:
1246 r = dm_cache_insert_mapping(cache->cmd, op->cblock, op->oblock);
1247 if (r) {
1248 DMERR_LIMIT("%s: migration failed; couldn't insert mapping",
1249 cache_device_name(cache));
1250 metadata_operation_failed(cache, "dm_cache_insert_mapping", r);
1251
1252 mg_complete(mg, false);
1253 return;
1254 }
1255 mg_complete(mg, true);
1256 break;
1257
1258 case POLICY_DEMOTE:
1259 r = dm_cache_remove_mapping(cache->cmd, op->cblock);
1260 if (r) {
1261 DMERR_LIMIT("%s: migration failed; couldn't update on disk metadata",
1262 cache_device_name(cache));
1263 metadata_operation_failed(cache, "dm_cache_remove_mapping", r);
1264
1265 mg_complete(mg, false);
1266 return;
1267 }
1268
1269 /*
1270 * It would be nice if we only had to commit when a REQ_FLUSH
1271 * comes through. But there's one scenario that we have to
1272 * look out for:
1273 *
1274 * - vblock x in a cache block
1275 * - domotion occurs
1276 * - cache block gets reallocated and over written
1277 * - crash
1278 *
1279 * When we recover, because there was no commit the cache will
1280 * rollback to having the data for vblock x in the cache block.
1281 * But the cache block has since been overwritten, so it'll end
1282 * up pointing to data that was never in 'x' during the history
1283 * of the device.
1284 *
1285 * To avoid this issue we require a commit as part of the
1286 * demotion operation.
1287 */
1288 init_continuation(&mg->k, mg_success);
1289 continue_after_commit(&cache->committer, &mg->k);
1290 schedule_commit(&cache->committer);
1291 break;
1292
1293 case POLICY_WRITEBACK:
1294 mg_complete(mg, true);
1295 break;
1296 }
1297 }
1298
mg_update_metadata_after_copy(struct work_struct * ws)1299 static void mg_update_metadata_after_copy(struct work_struct *ws)
1300 {
1301 struct dm_cache_migration *mg = ws_to_mg(ws);
1302
1303 /*
1304 * Did the copy succeed?
1305 */
1306 if (mg->k.input)
1307 mg_complete(mg, false);
1308 else
1309 mg_update_metadata(ws);
1310 }
1311
mg_upgrade_lock(struct work_struct * ws)1312 static void mg_upgrade_lock(struct work_struct *ws)
1313 {
1314 int r;
1315 struct dm_cache_migration *mg = ws_to_mg(ws);
1316
1317 /*
1318 * Did the copy succeed?
1319 */
1320 if (mg->k.input)
1321 mg_complete(mg, false);
1322
1323 else {
1324 /*
1325 * Now we want the lock to prevent both reads and writes.
1326 */
1327 r = dm_cell_lock_promote_v2(mg->cache->prison, mg->cell,
1328 READ_WRITE_LOCK_LEVEL);
1329 if (r < 0)
1330 mg_complete(mg, false);
1331
1332 else if (r)
1333 quiesce(mg, mg_update_metadata);
1334
1335 else
1336 mg_update_metadata(ws);
1337 }
1338 }
1339
mg_full_copy(struct work_struct * ws)1340 static void mg_full_copy(struct work_struct *ws)
1341 {
1342 struct dm_cache_migration *mg = ws_to_mg(ws);
1343 struct cache *cache = mg->cache;
1344 struct policy_work *op = mg->op;
1345 bool is_policy_promote = (op->op == POLICY_PROMOTE);
1346
1347 if ((!is_policy_promote && !is_dirty(cache, op->cblock)) ||
1348 is_discarded_oblock(cache, op->oblock)) {
1349 mg_upgrade_lock(ws);
1350 return;
1351 }
1352
1353 init_continuation(&mg->k, mg_upgrade_lock);
1354 copy(mg, is_policy_promote);
1355 }
1356
mg_copy(struct work_struct * ws)1357 static void mg_copy(struct work_struct *ws)
1358 {
1359 struct dm_cache_migration *mg = ws_to_mg(ws);
1360
1361 if (mg->overwrite_bio) {
1362 /*
1363 * No exclusive lock was held when we last checked if the bio
1364 * was optimisable. So we have to check again in case things
1365 * have changed (eg, the block may no longer be discarded).
1366 */
1367 if (!optimisable_bio(mg->cache, mg->overwrite_bio, mg->op->oblock)) {
1368 /*
1369 * Fallback to a real full copy after doing some tidying up.
1370 */
1371 bool rb = bio_detain_shared(mg->cache, mg->op->oblock, mg->overwrite_bio);
1372
1373 BUG_ON(rb); /* An exclussive lock must _not_ be held for this block */
1374 mg->overwrite_bio = NULL;
1375 inc_io_migrations(mg->cache);
1376 mg_full_copy(ws);
1377 return;
1378 }
1379
1380 /*
1381 * It's safe to do this here, even though it's new data
1382 * because all IO has been locked out of the block.
1383 *
1384 * mg_lock_writes() already took READ_WRITE_LOCK_LEVEL
1385 * so _not_ using mg_upgrade_lock() as continutation.
1386 */
1387 overwrite(mg, mg_update_metadata_after_copy);
1388
1389 } else
1390 mg_full_copy(ws);
1391 }
1392
mg_lock_writes(struct dm_cache_migration * mg)1393 static int mg_lock_writes(struct dm_cache_migration *mg)
1394 {
1395 int r;
1396 struct dm_cell_key_v2 key;
1397 struct cache *cache = mg->cache;
1398 struct dm_bio_prison_cell_v2 *prealloc;
1399
1400 prealloc = alloc_prison_cell(cache);
1401
1402 /*
1403 * Prevent writes to the block, but allow reads to continue.
1404 * Unless we're using an overwrite bio, in which case we lock
1405 * everything.
1406 */
1407 build_key(mg->op->oblock, oblock_succ(mg->op->oblock), &key);
1408 r = dm_cell_lock_v2(cache->prison, &key,
1409 mg->overwrite_bio ? READ_WRITE_LOCK_LEVEL : WRITE_LOCK_LEVEL,
1410 prealloc, &mg->cell);
1411 if (r < 0) {
1412 free_prison_cell(cache, prealloc);
1413 mg_complete(mg, false);
1414 return r;
1415 }
1416
1417 if (mg->cell != prealloc)
1418 free_prison_cell(cache, prealloc);
1419
1420 if (r == 0)
1421 mg_copy(&mg->k.ws);
1422 else
1423 quiesce(mg, mg_copy);
1424
1425 return 0;
1426 }
1427
mg_start(struct cache * cache,struct policy_work * op,struct bio * bio)1428 static int mg_start(struct cache *cache, struct policy_work *op, struct bio *bio)
1429 {
1430 struct dm_cache_migration *mg;
1431
1432 if (!background_work_begin(cache)) {
1433 policy_complete_background_work(cache->policy, op, false);
1434 return -EPERM;
1435 }
1436
1437 mg = alloc_migration(cache);
1438
1439 mg->op = op;
1440 mg->overwrite_bio = bio;
1441
1442 if (!bio)
1443 inc_io_migrations(cache);
1444
1445 return mg_lock_writes(mg);
1446 }
1447
1448 /*
1449 *--------------------------------------------------------------
1450 * invalidation processing
1451 *--------------------------------------------------------------
1452 */
1453
invalidate_complete(struct dm_cache_migration * mg,bool success)1454 static void invalidate_complete(struct dm_cache_migration *mg, bool success)
1455 {
1456 struct bio_list bios;
1457 struct cache *cache = mg->cache;
1458
1459 bio_list_init(&bios);
1460 if (dm_cell_unlock_v2(cache->prison, mg->cell, &bios))
1461 free_prison_cell(cache, mg->cell);
1462
1463 if (!success && mg->overwrite_bio)
1464 bio_io_error(mg->overwrite_bio);
1465
1466 free_migration(mg);
1467 defer_bios(cache, &bios);
1468
1469 background_work_end(cache);
1470 }
1471
invalidate_completed(struct work_struct * ws)1472 static void invalidate_completed(struct work_struct *ws)
1473 {
1474 struct dm_cache_migration *mg = ws_to_mg(ws);
1475
1476 invalidate_complete(mg, !mg->k.input);
1477 }
1478
invalidate_cblock(struct cache * cache,dm_cblock_t cblock)1479 static int invalidate_cblock(struct cache *cache, dm_cblock_t cblock)
1480 {
1481 int r;
1482
1483 r = policy_invalidate_mapping(cache->policy, cblock);
1484 if (!r) {
1485 r = dm_cache_remove_mapping(cache->cmd, cblock);
1486 if (r) {
1487 DMERR_LIMIT("%s: invalidation failed; couldn't update on disk metadata",
1488 cache_device_name(cache));
1489 metadata_operation_failed(cache, "dm_cache_remove_mapping", r);
1490 }
1491
1492 } else if (r == -ENODATA) {
1493 /*
1494 * Harmless, already unmapped.
1495 */
1496 r = 0;
1497
1498 } else
1499 DMERR("%s: policy_invalidate_mapping failed", cache_device_name(cache));
1500
1501 return r;
1502 }
1503
invalidate_remove(struct work_struct * ws)1504 static void invalidate_remove(struct work_struct *ws)
1505 {
1506 int r;
1507 struct dm_cache_migration *mg = ws_to_mg(ws);
1508 struct cache *cache = mg->cache;
1509
1510 r = invalidate_cblock(cache, mg->invalidate_cblock);
1511 if (r) {
1512 invalidate_complete(mg, false);
1513 return;
1514 }
1515
1516 init_continuation(&mg->k, invalidate_completed);
1517 continue_after_commit(&cache->committer, &mg->k);
1518 remap_to_origin_clear_discard(cache, mg->overwrite_bio, mg->invalidate_oblock);
1519 mg->overwrite_bio = NULL;
1520 schedule_commit(&cache->committer);
1521 }
1522
invalidate_lock(struct dm_cache_migration * mg)1523 static int invalidate_lock(struct dm_cache_migration *mg)
1524 {
1525 int r;
1526 struct dm_cell_key_v2 key;
1527 struct cache *cache = mg->cache;
1528 struct dm_bio_prison_cell_v2 *prealloc;
1529
1530 prealloc = alloc_prison_cell(cache);
1531
1532 build_key(mg->invalidate_oblock, oblock_succ(mg->invalidate_oblock), &key);
1533 r = dm_cell_lock_v2(cache->prison, &key,
1534 READ_WRITE_LOCK_LEVEL, prealloc, &mg->cell);
1535 if (r < 0) {
1536 free_prison_cell(cache, prealloc);
1537 invalidate_complete(mg, false);
1538 return r;
1539 }
1540
1541 if (mg->cell != prealloc)
1542 free_prison_cell(cache, prealloc);
1543
1544 if (r)
1545 quiesce(mg, invalidate_remove);
1546
1547 else {
1548 /*
1549 * We can't call invalidate_remove() directly here because we
1550 * might still be in request context.
1551 */
1552 init_continuation(&mg->k, invalidate_remove);
1553 queue_work(cache->wq, &mg->k.ws);
1554 }
1555
1556 return 0;
1557 }
1558
invalidate_start(struct cache * cache,dm_cblock_t cblock,dm_oblock_t oblock,struct bio * bio)1559 static int invalidate_start(struct cache *cache, dm_cblock_t cblock,
1560 dm_oblock_t oblock, struct bio *bio)
1561 {
1562 struct dm_cache_migration *mg;
1563
1564 if (!background_work_begin(cache))
1565 return -EPERM;
1566
1567 mg = alloc_migration(cache);
1568
1569 mg->overwrite_bio = bio;
1570 mg->invalidate_cblock = cblock;
1571 mg->invalidate_oblock = oblock;
1572
1573 return invalidate_lock(mg);
1574 }
1575
1576 /*
1577 *--------------------------------------------------------------
1578 * bio processing
1579 *--------------------------------------------------------------
1580 */
1581
1582 enum busy {
1583 IDLE,
1584 BUSY
1585 };
1586
spare_migration_bandwidth(struct cache * cache)1587 static enum busy spare_migration_bandwidth(struct cache *cache)
1588 {
1589 bool idle = dm_iot_idle_for(&cache->tracker, HZ);
1590 sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) *
1591 cache->sectors_per_block;
1592
1593 if (idle && current_volume <= cache->migration_threshold)
1594 return IDLE;
1595 else
1596 return BUSY;
1597 }
1598
inc_hit_counter(struct cache * cache,struct bio * bio)1599 static void inc_hit_counter(struct cache *cache, struct bio *bio)
1600 {
1601 atomic_inc(bio_data_dir(bio) == READ ?
1602 &cache->stats.read_hit : &cache->stats.write_hit);
1603 }
1604
inc_miss_counter(struct cache * cache,struct bio * bio)1605 static void inc_miss_counter(struct cache *cache, struct bio *bio)
1606 {
1607 atomic_inc(bio_data_dir(bio) == READ ?
1608 &cache->stats.read_miss : &cache->stats.write_miss);
1609 }
1610
1611 /*----------------------------------------------------------------*/
1612
map_bio(struct cache * cache,struct bio * bio,dm_oblock_t block,bool * commit_needed)1613 static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block,
1614 bool *commit_needed)
1615 {
1616 int r, data_dir;
1617 bool rb, background_queued;
1618 dm_cblock_t cblock;
1619
1620 *commit_needed = false;
1621
1622 rb = bio_detain_shared(cache, block, bio);
1623 if (!rb) {
1624 /*
1625 * An exclusive lock is held for this block, so we have to
1626 * wait. We set the commit_needed flag so the current
1627 * transaction will be committed asap, allowing this lock
1628 * to be dropped.
1629 */
1630 *commit_needed = true;
1631 return DM_MAPIO_SUBMITTED;
1632 }
1633
1634 data_dir = bio_data_dir(bio);
1635
1636 if (optimisable_bio(cache, bio, block)) {
1637 struct policy_work *op = NULL;
1638
1639 r = policy_lookup_with_work(cache->policy, block, &cblock, data_dir, true, &op);
1640 if (unlikely(r && r != -ENOENT)) {
1641 DMERR_LIMIT("%s: policy_lookup_with_work() failed with r = %d",
1642 cache_device_name(cache), r);
1643 bio_io_error(bio);
1644 return DM_MAPIO_SUBMITTED;
1645 }
1646
1647 if (r == -ENOENT && op) {
1648 bio_drop_shared_lock(cache, bio);
1649 BUG_ON(op->op != POLICY_PROMOTE);
1650 mg_start(cache, op, bio);
1651 return DM_MAPIO_SUBMITTED;
1652 }
1653 } else {
1654 r = policy_lookup(cache->policy, block, &cblock, data_dir, false, &background_queued);
1655 if (unlikely(r && r != -ENOENT)) {
1656 DMERR_LIMIT("%s: policy_lookup() failed with r = %d",
1657 cache_device_name(cache), r);
1658 bio_io_error(bio);
1659 return DM_MAPIO_SUBMITTED;
1660 }
1661
1662 if (background_queued)
1663 wake_migration_worker(cache);
1664 }
1665
1666 if (r == -ENOENT) {
1667 struct per_bio_data *pb = get_per_bio_data(bio);
1668
1669 /*
1670 * Miss.
1671 */
1672 inc_miss_counter(cache, bio);
1673 if (pb->req_nr == 0) {
1674 accounted_begin(cache, bio);
1675 remap_to_origin_clear_discard(cache, bio, block);
1676 } else {
1677 /*
1678 * This is a duplicate writethrough io that is no
1679 * longer needed because the block has been demoted.
1680 */
1681 bio_endio(bio);
1682 return DM_MAPIO_SUBMITTED;
1683 }
1684 } else {
1685 /*
1686 * Hit.
1687 */
1688 inc_hit_counter(cache, bio);
1689
1690 /*
1691 * Passthrough always maps to the origin, invalidating any
1692 * cache blocks that are written to.
1693 */
1694 if (passthrough_mode(cache)) {
1695 if (bio_data_dir(bio) == WRITE) {
1696 bio_drop_shared_lock(cache, bio);
1697 atomic_inc(&cache->stats.demotion);
1698 invalidate_start(cache, cblock, block, bio);
1699 } else
1700 remap_to_origin_clear_discard(cache, bio, block);
1701 } else {
1702 if (bio_data_dir(bio) == WRITE && writethrough_mode(cache) &&
1703 !is_dirty(cache, cblock)) {
1704 remap_to_origin_and_cache(cache, bio, block, cblock);
1705 accounted_begin(cache, bio);
1706 } else
1707 remap_to_cache_dirty(cache, bio, block, cblock);
1708 }
1709 }
1710
1711 /*
1712 * dm core turns FUA requests into a separate payload and FLUSH req.
1713 */
1714 if (bio->bi_opf & REQ_FUA) {
1715 /*
1716 * issue_after_commit will call accounted_begin a second time. So
1717 * we call accounted_complete() to avoid double accounting.
1718 */
1719 accounted_complete(cache, bio);
1720 issue_after_commit(&cache->committer, bio);
1721 *commit_needed = true;
1722 return DM_MAPIO_SUBMITTED;
1723 }
1724
1725 return DM_MAPIO_REMAPPED;
1726 }
1727
process_bio(struct cache * cache,struct bio * bio)1728 static bool process_bio(struct cache *cache, struct bio *bio)
1729 {
1730 bool commit_needed;
1731
1732 if (map_bio(cache, bio, get_bio_block(cache, bio), &commit_needed) == DM_MAPIO_REMAPPED)
1733 dm_submit_bio_remap(bio, NULL);
1734
1735 return commit_needed;
1736 }
1737
1738 /*
1739 * A non-zero return indicates read_only or fail_io mode.
1740 */
commit(struct cache * cache,bool clean_shutdown)1741 static int commit(struct cache *cache, bool clean_shutdown)
1742 {
1743 int r;
1744
1745 if (get_cache_mode(cache) >= CM_READ_ONLY)
1746 return -EINVAL;
1747
1748 atomic_inc(&cache->stats.commit_count);
1749 r = dm_cache_commit(cache->cmd, clean_shutdown);
1750 if (r)
1751 metadata_operation_failed(cache, "dm_cache_commit", r);
1752
1753 return r;
1754 }
1755
1756 /*
1757 * Used by the batcher.
1758 */
commit_op(void * context)1759 static blk_status_t commit_op(void *context)
1760 {
1761 struct cache *cache = context;
1762
1763 if (dm_cache_changed_this_transaction(cache->cmd))
1764 return errno_to_blk_status(commit(cache, false));
1765
1766 return 0;
1767 }
1768
1769 /*----------------------------------------------------------------*/
1770
process_flush_bio(struct cache * cache,struct bio * bio)1771 static bool process_flush_bio(struct cache *cache, struct bio *bio)
1772 {
1773 struct per_bio_data *pb = get_per_bio_data(bio);
1774
1775 if (!pb->req_nr)
1776 remap_to_origin(cache, bio);
1777 else
1778 remap_to_cache(cache, bio, 0);
1779
1780 issue_after_commit(&cache->committer, bio);
1781 return true;
1782 }
1783
process_discard_bio(struct cache * cache,struct bio * bio)1784 static bool process_discard_bio(struct cache *cache, struct bio *bio)
1785 {
1786 dm_dblock_t b, e;
1787
1788 /*
1789 * FIXME: do we need to lock the region? Or can we just assume the
1790 * user wont be so foolish as to issue discard concurrently with
1791 * other IO?
1792 */
1793 calc_discard_block_range(cache, bio, &b, &e);
1794 while (b != e) {
1795 set_discard(cache, b);
1796 b = to_dblock(from_dblock(b) + 1);
1797 }
1798
1799 if (cache->features.discard_passdown) {
1800 remap_to_origin(cache, bio);
1801 dm_submit_bio_remap(bio, NULL);
1802 } else
1803 bio_endio(bio);
1804
1805 return false;
1806 }
1807
process_deferred_bios(struct work_struct * ws)1808 static void process_deferred_bios(struct work_struct *ws)
1809 {
1810 struct cache *cache = container_of(ws, struct cache, deferred_bio_worker);
1811
1812 bool commit_needed = false;
1813 struct bio_list bios;
1814 struct bio *bio;
1815
1816 bio_list_init(&bios);
1817
1818 spin_lock_irq(&cache->lock);
1819 bio_list_merge(&bios, &cache->deferred_bios);
1820 bio_list_init(&cache->deferred_bios);
1821 spin_unlock_irq(&cache->lock);
1822
1823 while ((bio = bio_list_pop(&bios))) {
1824 if (bio->bi_opf & REQ_PREFLUSH)
1825 commit_needed = process_flush_bio(cache, bio) || commit_needed;
1826
1827 else if (bio_op(bio) == REQ_OP_DISCARD)
1828 commit_needed = process_discard_bio(cache, bio) || commit_needed;
1829
1830 else
1831 commit_needed = process_bio(cache, bio) || commit_needed;
1832 cond_resched();
1833 }
1834
1835 if (commit_needed)
1836 schedule_commit(&cache->committer);
1837 }
1838
1839 /*
1840 *--------------------------------------------------------------
1841 * Main worker loop
1842 *--------------------------------------------------------------
1843 */
requeue_deferred_bios(struct cache * cache)1844 static void requeue_deferred_bios(struct cache *cache)
1845 {
1846 struct bio *bio;
1847 struct bio_list bios;
1848
1849 bio_list_init(&bios);
1850 bio_list_merge(&bios, &cache->deferred_bios);
1851 bio_list_init(&cache->deferred_bios);
1852
1853 while ((bio = bio_list_pop(&bios))) {
1854 bio->bi_status = BLK_STS_DM_REQUEUE;
1855 bio_endio(bio);
1856 cond_resched();
1857 }
1858 }
1859
1860 /*
1861 * We want to commit periodically so that not too much
1862 * unwritten metadata builds up.
1863 */
do_waker(struct work_struct * ws)1864 static void do_waker(struct work_struct *ws)
1865 {
1866 struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
1867
1868 policy_tick(cache->policy, true);
1869 wake_migration_worker(cache);
1870 schedule_commit(&cache->committer);
1871 queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
1872 }
1873
check_migrations(struct work_struct * ws)1874 static void check_migrations(struct work_struct *ws)
1875 {
1876 int r;
1877 struct policy_work *op;
1878 struct cache *cache = container_of(ws, struct cache, migration_worker);
1879 enum busy b;
1880
1881 for (;;) {
1882 b = spare_migration_bandwidth(cache);
1883
1884 r = policy_get_background_work(cache->policy, b == IDLE, &op);
1885 if (r == -ENODATA)
1886 break;
1887
1888 if (r) {
1889 DMERR_LIMIT("%s: policy_background_work failed",
1890 cache_device_name(cache));
1891 break;
1892 }
1893
1894 r = mg_start(cache, op, NULL);
1895 if (r)
1896 break;
1897
1898 cond_resched();
1899 }
1900 }
1901
1902 /*
1903 *--------------------------------------------------------------
1904 * Target methods
1905 *--------------------------------------------------------------
1906 */
1907
1908 /*
1909 * This function gets called on the error paths of the constructor, so we
1910 * have to cope with a partially initialised struct.
1911 */
__destroy(struct cache * cache)1912 static void __destroy(struct cache *cache)
1913 {
1914 mempool_exit(&cache->migration_pool);
1915
1916 if (cache->prison)
1917 dm_bio_prison_destroy_v2(cache->prison);
1918
1919 if (cache->wq)
1920 destroy_workqueue(cache->wq);
1921
1922 if (cache->dirty_bitset)
1923 free_bitset(cache->dirty_bitset);
1924
1925 if (cache->discard_bitset)
1926 free_bitset(cache->discard_bitset);
1927
1928 if (cache->copier)
1929 dm_kcopyd_client_destroy(cache->copier);
1930
1931 if (cache->cmd)
1932 dm_cache_metadata_close(cache->cmd);
1933
1934 if (cache->metadata_dev)
1935 dm_put_device(cache->ti, cache->metadata_dev);
1936
1937 if (cache->origin_dev)
1938 dm_put_device(cache->ti, cache->origin_dev);
1939
1940 if (cache->cache_dev)
1941 dm_put_device(cache->ti, cache->cache_dev);
1942
1943 if (cache->policy)
1944 dm_cache_policy_destroy(cache->policy);
1945
1946 bioset_exit(&cache->bs);
1947
1948 kfree(cache);
1949 }
1950
destroy(struct cache * cache)1951 static void destroy(struct cache *cache)
1952 {
1953 unsigned int i;
1954
1955 cancel_delayed_work_sync(&cache->waker);
1956
1957 for (i = 0; i < cache->nr_ctr_args ; i++)
1958 kfree(cache->ctr_args[i]);
1959 kfree(cache->ctr_args);
1960
1961 __destroy(cache);
1962 }
1963
cache_dtr(struct dm_target * ti)1964 static void cache_dtr(struct dm_target *ti)
1965 {
1966 struct cache *cache = ti->private;
1967
1968 destroy(cache);
1969 }
1970
get_dev_size(struct dm_dev * dev)1971 static sector_t get_dev_size(struct dm_dev *dev)
1972 {
1973 return bdev_nr_sectors(dev->bdev);
1974 }
1975
1976 /*----------------------------------------------------------------*/
1977
1978 /*
1979 * Construct a cache device mapping.
1980 *
1981 * cache <metadata dev> <cache dev> <origin dev> <block size>
1982 * <#feature args> [<feature arg>]*
1983 * <policy> <#policy args> [<policy arg>]*
1984 *
1985 * metadata dev : fast device holding the persistent metadata
1986 * cache dev : fast device holding cached data blocks
1987 * origin dev : slow device holding original data blocks
1988 * block size : cache unit size in sectors
1989 *
1990 * #feature args : number of feature arguments passed
1991 * feature args : writethrough. (The default is writeback.)
1992 *
1993 * policy : the replacement policy to use
1994 * #policy args : an even number of policy arguments corresponding
1995 * to key/value pairs passed to the policy
1996 * policy args : key/value pairs passed to the policy
1997 * E.g. 'sequential_threshold 1024'
1998 * See cache-policies.txt for details.
1999 *
2000 * Optional feature arguments are:
2001 * writethrough : write through caching that prohibits cache block
2002 * content from being different from origin block content.
2003 * Without this argument, the default behaviour is to write
2004 * back cache block contents later for performance reasons,
2005 * so they may differ from the corresponding origin blocks.
2006 */
2007 struct cache_args {
2008 struct dm_target *ti;
2009
2010 struct dm_dev *metadata_dev;
2011
2012 struct dm_dev *cache_dev;
2013 sector_t cache_sectors;
2014
2015 struct dm_dev *origin_dev;
2016
2017 uint32_t block_size;
2018
2019 const char *policy_name;
2020 int policy_argc;
2021 const char **policy_argv;
2022
2023 struct cache_features features;
2024 };
2025
destroy_cache_args(struct cache_args * ca)2026 static void destroy_cache_args(struct cache_args *ca)
2027 {
2028 if (ca->metadata_dev)
2029 dm_put_device(ca->ti, ca->metadata_dev);
2030
2031 if (ca->cache_dev)
2032 dm_put_device(ca->ti, ca->cache_dev);
2033
2034 if (ca->origin_dev)
2035 dm_put_device(ca->ti, ca->origin_dev);
2036
2037 kfree(ca);
2038 }
2039
at_least_one_arg(struct dm_arg_set * as,char ** error)2040 static bool at_least_one_arg(struct dm_arg_set *as, char **error)
2041 {
2042 if (!as->argc) {
2043 *error = "Insufficient args";
2044 return false;
2045 }
2046
2047 return true;
2048 }
2049
parse_metadata_dev(struct cache_args * ca,struct dm_arg_set * as,char ** error)2050 static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as,
2051 char **error)
2052 {
2053 int r;
2054 sector_t metadata_dev_size;
2055
2056 if (!at_least_one_arg(as, error))
2057 return -EINVAL;
2058
2059 r = dm_get_device(ca->ti, dm_shift_arg(as),
2060 BLK_OPEN_READ | BLK_OPEN_WRITE, &ca->metadata_dev);
2061 if (r) {
2062 *error = "Error opening metadata device";
2063 return r;
2064 }
2065
2066 metadata_dev_size = get_dev_size(ca->metadata_dev);
2067 if (metadata_dev_size > DM_CACHE_METADATA_MAX_SECTORS_WARNING)
2068 DMWARN("Metadata device %pg is larger than %u sectors: excess space will not be used.",
2069 ca->metadata_dev->bdev, THIN_METADATA_MAX_SECTORS);
2070
2071 return 0;
2072 }
2073
parse_cache_dev(struct cache_args * ca,struct dm_arg_set * as,char ** error)2074 static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as,
2075 char **error)
2076 {
2077 int r;
2078
2079 if (!at_least_one_arg(as, error))
2080 return -EINVAL;
2081
2082 r = dm_get_device(ca->ti, dm_shift_arg(as),
2083 BLK_OPEN_READ | BLK_OPEN_WRITE, &ca->cache_dev);
2084 if (r) {
2085 *error = "Error opening cache device";
2086 return r;
2087 }
2088 ca->cache_sectors = get_dev_size(ca->cache_dev);
2089
2090 return 0;
2091 }
2092
parse_origin_dev(struct cache_args * ca,struct dm_arg_set * as,char ** error)2093 static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as,
2094 char **error)
2095 {
2096 sector_t origin_sectors;
2097 int r;
2098
2099 if (!at_least_one_arg(as, error))
2100 return -EINVAL;
2101
2102 r = dm_get_device(ca->ti, dm_shift_arg(as),
2103 BLK_OPEN_READ | BLK_OPEN_WRITE, &ca->origin_dev);
2104 if (r) {
2105 *error = "Error opening origin device";
2106 return r;
2107 }
2108
2109 origin_sectors = get_dev_size(ca->origin_dev);
2110 if (ca->ti->len > origin_sectors) {
2111 *error = "Device size larger than cached device";
2112 return -EINVAL;
2113 }
2114
2115 return 0;
2116 }
2117
parse_block_size(struct cache_args * ca,struct dm_arg_set * as,char ** error)2118 static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as,
2119 char **error)
2120 {
2121 unsigned long block_size;
2122
2123 if (!at_least_one_arg(as, error))
2124 return -EINVAL;
2125
2126 if (kstrtoul(dm_shift_arg(as), 10, &block_size) || !block_size ||
2127 block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
2128 block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
2129 block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
2130 *error = "Invalid data block size";
2131 return -EINVAL;
2132 }
2133
2134 if (block_size > ca->cache_sectors) {
2135 *error = "Data block size is larger than the cache device";
2136 return -EINVAL;
2137 }
2138
2139 ca->block_size = block_size;
2140
2141 return 0;
2142 }
2143
init_features(struct cache_features * cf)2144 static void init_features(struct cache_features *cf)
2145 {
2146 cf->mode = CM_WRITE;
2147 cf->io_mode = CM_IO_WRITEBACK;
2148 cf->metadata_version = 1;
2149 cf->discard_passdown = true;
2150 }
2151
parse_features(struct cache_args * ca,struct dm_arg_set * as,char ** error)2152 static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
2153 char **error)
2154 {
2155 static const struct dm_arg _args[] = {
2156 {0, 3, "Invalid number of cache feature arguments"},
2157 };
2158
2159 int r, mode_ctr = 0;
2160 unsigned int argc;
2161 const char *arg;
2162 struct cache_features *cf = &ca->features;
2163
2164 init_features(cf);
2165
2166 r = dm_read_arg_group(_args, as, &argc, error);
2167 if (r)
2168 return -EINVAL;
2169
2170 while (argc--) {
2171 arg = dm_shift_arg(as);
2172
2173 if (!strcasecmp(arg, "writeback")) {
2174 cf->io_mode = CM_IO_WRITEBACK;
2175 mode_ctr++;
2176 }
2177
2178 else if (!strcasecmp(arg, "writethrough")) {
2179 cf->io_mode = CM_IO_WRITETHROUGH;
2180 mode_ctr++;
2181 }
2182
2183 else if (!strcasecmp(arg, "passthrough")) {
2184 cf->io_mode = CM_IO_PASSTHROUGH;
2185 mode_ctr++;
2186 }
2187
2188 else if (!strcasecmp(arg, "metadata2"))
2189 cf->metadata_version = 2;
2190
2191 else if (!strcasecmp(arg, "no_discard_passdown"))
2192 cf->discard_passdown = false;
2193
2194 else {
2195 *error = "Unrecognised cache feature requested";
2196 return -EINVAL;
2197 }
2198 }
2199
2200 if (mode_ctr > 1) {
2201 *error = "Duplicate cache io_mode features requested";
2202 return -EINVAL;
2203 }
2204
2205 return 0;
2206 }
2207
parse_policy(struct cache_args * ca,struct dm_arg_set * as,char ** error)2208 static int parse_policy(struct cache_args *ca, struct dm_arg_set *as,
2209 char **error)
2210 {
2211 static const struct dm_arg _args[] = {
2212 {0, 1024, "Invalid number of policy arguments"},
2213 };
2214
2215 int r;
2216
2217 if (!at_least_one_arg(as, error))
2218 return -EINVAL;
2219
2220 ca->policy_name = dm_shift_arg(as);
2221
2222 r = dm_read_arg_group(_args, as, &ca->policy_argc, error);
2223 if (r)
2224 return -EINVAL;
2225
2226 ca->policy_argv = (const char **)as->argv;
2227 dm_consume_args(as, ca->policy_argc);
2228
2229 return 0;
2230 }
2231
parse_cache_args(struct cache_args * ca,int argc,char ** argv,char ** error)2232 static int parse_cache_args(struct cache_args *ca, int argc, char **argv,
2233 char **error)
2234 {
2235 int r;
2236 struct dm_arg_set as;
2237
2238 as.argc = argc;
2239 as.argv = argv;
2240
2241 r = parse_metadata_dev(ca, &as, error);
2242 if (r)
2243 return r;
2244
2245 r = parse_cache_dev(ca, &as, error);
2246 if (r)
2247 return r;
2248
2249 r = parse_origin_dev(ca, &as, error);
2250 if (r)
2251 return r;
2252
2253 r = parse_block_size(ca, &as, error);
2254 if (r)
2255 return r;
2256
2257 r = parse_features(ca, &as, error);
2258 if (r)
2259 return r;
2260
2261 r = parse_policy(ca, &as, error);
2262 if (r)
2263 return r;
2264
2265 return 0;
2266 }
2267
2268 /*----------------------------------------------------------------*/
2269
2270 static struct kmem_cache *migration_cache;
2271
2272 #define NOT_CORE_OPTION 1
2273
process_config_option(struct cache * cache,const char * key,const char * value)2274 static int process_config_option(struct cache *cache, const char *key, const char *value)
2275 {
2276 unsigned long tmp;
2277
2278 if (!strcasecmp(key, "migration_threshold")) {
2279 if (kstrtoul(value, 10, &tmp))
2280 return -EINVAL;
2281
2282 cache->migration_threshold = tmp;
2283 return 0;
2284 }
2285
2286 return NOT_CORE_OPTION;
2287 }
2288
set_config_value(struct cache * cache,const char * key,const char * value)2289 static int set_config_value(struct cache *cache, const char *key, const char *value)
2290 {
2291 int r = process_config_option(cache, key, value);
2292
2293 if (r == NOT_CORE_OPTION)
2294 r = policy_set_config_value(cache->policy, key, value);
2295
2296 if (r)
2297 DMWARN("bad config value for %s: %s", key, value);
2298
2299 return r;
2300 }
2301
set_config_values(struct cache * cache,int argc,const char ** argv)2302 static int set_config_values(struct cache *cache, int argc, const char **argv)
2303 {
2304 int r = 0;
2305
2306 if (argc & 1) {
2307 DMWARN("Odd number of policy arguments given but they should be <key> <value> pairs.");
2308 return -EINVAL;
2309 }
2310
2311 while (argc) {
2312 r = set_config_value(cache, argv[0], argv[1]);
2313 if (r)
2314 break;
2315
2316 argc -= 2;
2317 argv += 2;
2318 }
2319
2320 return r;
2321 }
2322
create_cache_policy(struct cache * cache,struct cache_args * ca,char ** error)2323 static int create_cache_policy(struct cache *cache, struct cache_args *ca,
2324 char **error)
2325 {
2326 struct dm_cache_policy *p = dm_cache_policy_create(ca->policy_name,
2327 cache->cache_size,
2328 cache->origin_sectors,
2329 cache->sectors_per_block);
2330 if (IS_ERR(p)) {
2331 *error = "Error creating cache's policy";
2332 return PTR_ERR(p);
2333 }
2334 cache->policy = p;
2335 BUG_ON(!cache->policy);
2336
2337 return 0;
2338 }
2339
2340 /*
2341 * We want the discard block size to be at least the size of the cache
2342 * block size and have no more than 2^14 discard blocks across the origin.
2343 */
2344 #define MAX_DISCARD_BLOCKS (1 << 14)
2345
too_many_discard_blocks(sector_t discard_block_size,sector_t origin_size)2346 static bool too_many_discard_blocks(sector_t discard_block_size,
2347 sector_t origin_size)
2348 {
2349 (void) sector_div(origin_size, discard_block_size);
2350
2351 return origin_size > MAX_DISCARD_BLOCKS;
2352 }
2353
calculate_discard_block_size(sector_t cache_block_size,sector_t origin_size)2354 static sector_t calculate_discard_block_size(sector_t cache_block_size,
2355 sector_t origin_size)
2356 {
2357 sector_t discard_block_size = cache_block_size;
2358
2359 if (origin_size)
2360 while (too_many_discard_blocks(discard_block_size, origin_size))
2361 discard_block_size *= 2;
2362
2363 return discard_block_size;
2364 }
2365
set_cache_size(struct cache * cache,dm_cblock_t size)2366 static void set_cache_size(struct cache *cache, dm_cblock_t size)
2367 {
2368 dm_block_t nr_blocks = from_cblock(size);
2369
2370 if (nr_blocks > (1 << 20) && cache->cache_size != size)
2371 DMWARN_LIMIT("You have created a cache device with a lot of individual cache blocks (%llu)\n"
2372 "All these mappings can consume a lot of kernel memory, and take some time to read/write.\n"
2373 "Please consider increasing the cache block size to reduce the overall cache block count.",
2374 (unsigned long long) nr_blocks);
2375
2376 cache->cache_size = size;
2377 }
2378
2379 #define DEFAULT_MIGRATION_THRESHOLD 2048
2380
cache_create(struct cache_args * ca,struct cache ** result)2381 static int cache_create(struct cache_args *ca, struct cache **result)
2382 {
2383 int r = 0;
2384 char **error = &ca->ti->error;
2385 struct cache *cache;
2386 struct dm_target *ti = ca->ti;
2387 dm_block_t origin_blocks;
2388 struct dm_cache_metadata *cmd;
2389 bool may_format = ca->features.mode == CM_WRITE;
2390
2391 cache = kzalloc(sizeof(*cache), GFP_KERNEL);
2392 if (!cache)
2393 return -ENOMEM;
2394
2395 cache->ti = ca->ti;
2396 ti->private = cache;
2397 ti->accounts_remapped_io = true;
2398 ti->num_flush_bios = 2;
2399 ti->flush_supported = true;
2400
2401 ti->num_discard_bios = 1;
2402 ti->discards_supported = true;
2403
2404 ti->per_io_data_size = sizeof(struct per_bio_data);
2405
2406 cache->features = ca->features;
2407 if (writethrough_mode(cache)) {
2408 /* Create bioset for writethrough bios issued to origin */
2409 r = bioset_init(&cache->bs, BIO_POOL_SIZE, 0, 0);
2410 if (r)
2411 goto bad;
2412 }
2413
2414 cache->metadata_dev = ca->metadata_dev;
2415 cache->origin_dev = ca->origin_dev;
2416 cache->cache_dev = ca->cache_dev;
2417
2418 ca->metadata_dev = ca->origin_dev = ca->cache_dev = NULL;
2419
2420 origin_blocks = cache->origin_sectors = ti->len;
2421 origin_blocks = block_div(origin_blocks, ca->block_size);
2422 cache->origin_blocks = to_oblock(origin_blocks);
2423
2424 cache->sectors_per_block = ca->block_size;
2425 if (dm_set_target_max_io_len(ti, cache->sectors_per_block)) {
2426 r = -EINVAL;
2427 goto bad;
2428 }
2429
2430 if (ca->block_size & (ca->block_size - 1)) {
2431 dm_block_t cache_size = ca->cache_sectors;
2432
2433 cache->sectors_per_block_shift = -1;
2434 cache_size = block_div(cache_size, ca->block_size);
2435 set_cache_size(cache, to_cblock(cache_size));
2436 } else {
2437 cache->sectors_per_block_shift = __ffs(ca->block_size);
2438 set_cache_size(cache, to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift));
2439 }
2440
2441 r = create_cache_policy(cache, ca, error);
2442 if (r)
2443 goto bad;
2444
2445 cache->policy_nr_args = ca->policy_argc;
2446 cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
2447
2448 r = set_config_values(cache, ca->policy_argc, ca->policy_argv);
2449 if (r) {
2450 *error = "Error setting cache policy's config values";
2451 goto bad;
2452 }
2453
2454 cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
2455 ca->block_size, may_format,
2456 dm_cache_policy_get_hint_size(cache->policy),
2457 ca->features.metadata_version);
2458 if (IS_ERR(cmd)) {
2459 *error = "Error creating metadata object";
2460 r = PTR_ERR(cmd);
2461 goto bad;
2462 }
2463 cache->cmd = cmd;
2464 set_cache_mode(cache, CM_WRITE);
2465 if (get_cache_mode(cache) != CM_WRITE) {
2466 *error = "Unable to get write access to metadata, please check/repair metadata.";
2467 r = -EINVAL;
2468 goto bad;
2469 }
2470
2471 if (passthrough_mode(cache)) {
2472 bool all_clean;
2473
2474 r = dm_cache_metadata_all_clean(cache->cmd, &all_clean);
2475 if (r) {
2476 *error = "dm_cache_metadata_all_clean() failed";
2477 goto bad;
2478 }
2479
2480 if (!all_clean) {
2481 *error = "Cannot enter passthrough mode unless all blocks are clean";
2482 r = -EINVAL;
2483 goto bad;
2484 }
2485
2486 policy_allow_migrations(cache->policy, false);
2487 }
2488
2489 spin_lock_init(&cache->lock);
2490 bio_list_init(&cache->deferred_bios);
2491 atomic_set(&cache->nr_allocated_migrations, 0);
2492 atomic_set(&cache->nr_io_migrations, 0);
2493 init_waitqueue_head(&cache->migration_wait);
2494
2495 r = -ENOMEM;
2496 atomic_set(&cache->nr_dirty, 0);
2497 cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
2498 if (!cache->dirty_bitset) {
2499 *error = "could not allocate dirty bitset";
2500 goto bad;
2501 }
2502 clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size));
2503
2504 cache->discard_block_size =
2505 calculate_discard_block_size(cache->sectors_per_block,
2506 cache->origin_sectors);
2507 cache->discard_nr_blocks = to_dblock(dm_sector_div_up(cache->origin_sectors,
2508 cache->discard_block_size));
2509 cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks));
2510 if (!cache->discard_bitset) {
2511 *error = "could not allocate discard bitset";
2512 goto bad;
2513 }
2514 clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
2515
2516 cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
2517 if (IS_ERR(cache->copier)) {
2518 *error = "could not create kcopyd client";
2519 r = PTR_ERR(cache->copier);
2520 goto bad;
2521 }
2522
2523 cache->wq = alloc_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM, 0);
2524 if (!cache->wq) {
2525 *error = "could not create workqueue for metadata object";
2526 goto bad;
2527 }
2528 INIT_WORK(&cache->deferred_bio_worker, process_deferred_bios);
2529 INIT_WORK(&cache->migration_worker, check_migrations);
2530 INIT_DELAYED_WORK(&cache->waker, do_waker);
2531
2532 cache->prison = dm_bio_prison_create_v2(cache->wq);
2533 if (!cache->prison) {
2534 *error = "could not create bio prison";
2535 goto bad;
2536 }
2537
2538 r = mempool_init_slab_pool(&cache->migration_pool, MIGRATION_POOL_SIZE,
2539 migration_cache);
2540 if (r) {
2541 *error = "Error creating cache's migration mempool";
2542 goto bad;
2543 }
2544
2545 cache->need_tick_bio = true;
2546 cache->sized = false;
2547 cache->invalidate = false;
2548 cache->commit_requested = false;
2549 cache->loaded_mappings = false;
2550 cache->loaded_discards = false;
2551
2552 load_stats(cache);
2553
2554 atomic_set(&cache->stats.demotion, 0);
2555 atomic_set(&cache->stats.promotion, 0);
2556 atomic_set(&cache->stats.copies_avoided, 0);
2557 atomic_set(&cache->stats.cache_cell_clash, 0);
2558 atomic_set(&cache->stats.commit_count, 0);
2559 atomic_set(&cache->stats.discard_count, 0);
2560
2561 spin_lock_init(&cache->invalidation_lock);
2562 INIT_LIST_HEAD(&cache->invalidation_requests);
2563
2564 batcher_init(&cache->committer, commit_op, cache,
2565 issue_op, cache, cache->wq);
2566 dm_iot_init(&cache->tracker);
2567
2568 init_rwsem(&cache->background_work_lock);
2569 prevent_background_work(cache);
2570
2571 *result = cache;
2572 return 0;
2573 bad:
2574 __destroy(cache);
2575 return r;
2576 }
2577
copy_ctr_args(struct cache * cache,int argc,const char ** argv)2578 static int copy_ctr_args(struct cache *cache, int argc, const char **argv)
2579 {
2580 unsigned int i;
2581 const char **copy;
2582
2583 copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL);
2584 if (!copy)
2585 return -ENOMEM;
2586 for (i = 0; i < argc; i++) {
2587 copy[i] = kstrdup(argv[i], GFP_KERNEL);
2588 if (!copy[i]) {
2589 while (i--)
2590 kfree(copy[i]);
2591 kfree(copy);
2592 return -ENOMEM;
2593 }
2594 }
2595
2596 cache->nr_ctr_args = argc;
2597 cache->ctr_args = copy;
2598
2599 return 0;
2600 }
2601
cache_ctr(struct dm_target * ti,unsigned int argc,char ** argv)2602 static int cache_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2603 {
2604 int r = -EINVAL;
2605 struct cache_args *ca;
2606 struct cache *cache = NULL;
2607
2608 ca = kzalloc(sizeof(*ca), GFP_KERNEL);
2609 if (!ca) {
2610 ti->error = "Error allocating memory for cache";
2611 return -ENOMEM;
2612 }
2613 ca->ti = ti;
2614
2615 r = parse_cache_args(ca, argc, argv, &ti->error);
2616 if (r)
2617 goto out;
2618
2619 r = cache_create(ca, &cache);
2620 if (r)
2621 goto out;
2622
2623 r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3);
2624 if (r) {
2625 __destroy(cache);
2626 goto out;
2627 }
2628
2629 ti->private = cache;
2630 out:
2631 destroy_cache_args(ca);
2632 return r;
2633 }
2634
2635 /*----------------------------------------------------------------*/
2636
cache_map(struct dm_target * ti,struct bio * bio)2637 static int cache_map(struct dm_target *ti, struct bio *bio)
2638 {
2639 struct cache *cache = ti->private;
2640
2641 int r;
2642 bool commit_needed;
2643 dm_oblock_t block = get_bio_block(cache, bio);
2644
2645 init_per_bio_data(bio);
2646 if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
2647 /*
2648 * This can only occur if the io goes to a partial block at
2649 * the end of the origin device. We don't cache these.
2650 * Just remap to the origin and carry on.
2651 */
2652 remap_to_origin(cache, bio);
2653 accounted_begin(cache, bio);
2654 return DM_MAPIO_REMAPPED;
2655 }
2656
2657 if (discard_or_flush(bio)) {
2658 defer_bio(cache, bio);
2659 return DM_MAPIO_SUBMITTED;
2660 }
2661
2662 r = map_bio(cache, bio, block, &commit_needed);
2663 if (commit_needed)
2664 schedule_commit(&cache->committer);
2665
2666 return r;
2667 }
2668
cache_end_io(struct dm_target * ti,struct bio * bio,blk_status_t * error)2669 static int cache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *error)
2670 {
2671 struct cache *cache = ti->private;
2672 unsigned long flags;
2673 struct per_bio_data *pb = get_per_bio_data(bio);
2674
2675 if (pb->tick) {
2676 policy_tick(cache->policy, false);
2677
2678 spin_lock_irqsave(&cache->lock, flags);
2679 cache->need_tick_bio = true;
2680 spin_unlock_irqrestore(&cache->lock, flags);
2681 }
2682
2683 bio_drop_shared_lock(cache, bio);
2684 accounted_complete(cache, bio);
2685
2686 return DM_ENDIO_DONE;
2687 }
2688
write_dirty_bitset(struct cache * cache)2689 static int write_dirty_bitset(struct cache *cache)
2690 {
2691 int r;
2692
2693 if (get_cache_mode(cache) >= CM_READ_ONLY)
2694 return -EINVAL;
2695
2696 r = dm_cache_set_dirty_bits(cache->cmd, from_cblock(cache->cache_size), cache->dirty_bitset);
2697 if (r)
2698 metadata_operation_failed(cache, "dm_cache_set_dirty_bits", r);
2699
2700 return r;
2701 }
2702
write_discard_bitset(struct cache * cache)2703 static int write_discard_bitset(struct cache *cache)
2704 {
2705 unsigned int i, r;
2706
2707 if (get_cache_mode(cache) >= CM_READ_ONLY)
2708 return -EINVAL;
2709
2710 r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size,
2711 cache->discard_nr_blocks);
2712 if (r) {
2713 DMERR("%s: could not resize on-disk discard bitset", cache_device_name(cache));
2714 metadata_operation_failed(cache, "dm_cache_discard_bitset_resize", r);
2715 return r;
2716 }
2717
2718 for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) {
2719 r = dm_cache_set_discard(cache->cmd, to_dblock(i),
2720 is_discarded(cache, to_dblock(i)));
2721 if (r) {
2722 metadata_operation_failed(cache, "dm_cache_set_discard", r);
2723 return r;
2724 }
2725 }
2726
2727 return 0;
2728 }
2729
write_hints(struct cache * cache)2730 static int write_hints(struct cache *cache)
2731 {
2732 int r;
2733
2734 if (get_cache_mode(cache) >= CM_READ_ONLY)
2735 return -EINVAL;
2736
2737 r = dm_cache_write_hints(cache->cmd, cache->policy);
2738 if (r) {
2739 metadata_operation_failed(cache, "dm_cache_write_hints", r);
2740 return r;
2741 }
2742
2743 return 0;
2744 }
2745
2746 /*
2747 * returns true on success
2748 */
sync_metadata(struct cache * cache)2749 static bool sync_metadata(struct cache *cache)
2750 {
2751 int r1, r2, r3, r4;
2752
2753 r1 = write_dirty_bitset(cache);
2754 if (r1)
2755 DMERR("%s: could not write dirty bitset", cache_device_name(cache));
2756
2757 r2 = write_discard_bitset(cache);
2758 if (r2)
2759 DMERR("%s: could not write discard bitset", cache_device_name(cache));
2760
2761 save_stats(cache);
2762
2763 r3 = write_hints(cache);
2764 if (r3)
2765 DMERR("%s: could not write hints", cache_device_name(cache));
2766
2767 /*
2768 * If writing the above metadata failed, we still commit, but don't
2769 * set the clean shutdown flag. This will effectively force every
2770 * dirty bit to be set on reload.
2771 */
2772 r4 = commit(cache, !r1 && !r2 && !r3);
2773 if (r4)
2774 DMERR("%s: could not write cache metadata", cache_device_name(cache));
2775
2776 return !r1 && !r2 && !r3 && !r4;
2777 }
2778
cache_postsuspend(struct dm_target * ti)2779 static void cache_postsuspend(struct dm_target *ti)
2780 {
2781 struct cache *cache = ti->private;
2782
2783 prevent_background_work(cache);
2784 BUG_ON(atomic_read(&cache->nr_io_migrations));
2785
2786 cancel_delayed_work_sync(&cache->waker);
2787 drain_workqueue(cache->wq);
2788 WARN_ON(cache->tracker.in_flight);
2789
2790 /*
2791 * If it's a flush suspend there won't be any deferred bios, so this
2792 * call is harmless.
2793 */
2794 requeue_deferred_bios(cache);
2795
2796 if (get_cache_mode(cache) == CM_WRITE)
2797 (void) sync_metadata(cache);
2798 }
2799
load_mapping(void * context,dm_oblock_t oblock,dm_cblock_t cblock,bool dirty,uint32_t hint,bool hint_valid)2800 static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
2801 bool dirty, uint32_t hint, bool hint_valid)
2802 {
2803 struct cache *cache = context;
2804
2805 if (dirty) {
2806 set_bit(from_cblock(cblock), cache->dirty_bitset);
2807 atomic_inc(&cache->nr_dirty);
2808 } else
2809 clear_bit(from_cblock(cblock), cache->dirty_bitset);
2810
2811 return policy_load_mapping(cache->policy, oblock, cblock, dirty, hint, hint_valid);
2812 }
2813
2814 /*
2815 * The discard block size in the on disk metadata is not
2816 * necessarily the same as we're currently using. So we have to
2817 * be careful to only set the discarded attribute if we know it
2818 * covers a complete block of the new size.
2819 */
2820 struct discard_load_info {
2821 struct cache *cache;
2822
2823 /*
2824 * These blocks are sized using the on disk dblock size, rather
2825 * than the current one.
2826 */
2827 dm_block_t block_size;
2828 dm_block_t discard_begin, discard_end;
2829 };
2830
discard_load_info_init(struct cache * cache,struct discard_load_info * li)2831 static void discard_load_info_init(struct cache *cache,
2832 struct discard_load_info *li)
2833 {
2834 li->cache = cache;
2835 li->discard_begin = li->discard_end = 0;
2836 }
2837
set_discard_range(struct discard_load_info * li)2838 static void set_discard_range(struct discard_load_info *li)
2839 {
2840 sector_t b, e;
2841
2842 if (li->discard_begin == li->discard_end)
2843 return;
2844
2845 /*
2846 * Convert to sectors.
2847 */
2848 b = li->discard_begin * li->block_size;
2849 e = li->discard_end * li->block_size;
2850
2851 /*
2852 * Then convert back to the current dblock size.
2853 */
2854 b = dm_sector_div_up(b, li->cache->discard_block_size);
2855 sector_div(e, li->cache->discard_block_size);
2856
2857 /*
2858 * The origin may have shrunk, so we need to check we're still in
2859 * bounds.
2860 */
2861 if (e > from_dblock(li->cache->discard_nr_blocks))
2862 e = from_dblock(li->cache->discard_nr_blocks);
2863
2864 for (; b < e; b++)
2865 set_discard(li->cache, to_dblock(b));
2866 }
2867
load_discard(void * context,sector_t discard_block_size,dm_dblock_t dblock,bool discard)2868 static int load_discard(void *context, sector_t discard_block_size,
2869 dm_dblock_t dblock, bool discard)
2870 {
2871 struct discard_load_info *li = context;
2872
2873 li->block_size = discard_block_size;
2874
2875 if (discard) {
2876 if (from_dblock(dblock) == li->discard_end)
2877 /*
2878 * We're already in a discard range, just extend it.
2879 */
2880 li->discard_end = li->discard_end + 1ULL;
2881
2882 else {
2883 /*
2884 * Emit the old range and start a new one.
2885 */
2886 set_discard_range(li);
2887 li->discard_begin = from_dblock(dblock);
2888 li->discard_end = li->discard_begin + 1ULL;
2889 }
2890 } else {
2891 set_discard_range(li);
2892 li->discard_begin = li->discard_end = 0;
2893 }
2894
2895 return 0;
2896 }
2897
get_cache_dev_size(struct cache * cache)2898 static dm_cblock_t get_cache_dev_size(struct cache *cache)
2899 {
2900 sector_t size = get_dev_size(cache->cache_dev);
2901 (void) sector_div(size, cache->sectors_per_block);
2902 return to_cblock(size);
2903 }
2904
can_resize(struct cache * cache,dm_cblock_t new_size)2905 static bool can_resize(struct cache *cache, dm_cblock_t new_size)
2906 {
2907 if (from_cblock(new_size) > from_cblock(cache->cache_size)) {
2908 DMERR("%s: unable to extend cache due to missing cache table reload",
2909 cache_device_name(cache));
2910 return false;
2911 }
2912
2913 /*
2914 * We can't drop a dirty block when shrinking the cache.
2915 */
2916 if (cache->loaded_mappings) {
2917 new_size = to_cblock(find_next_bit(cache->dirty_bitset,
2918 from_cblock(cache->cache_size),
2919 from_cblock(new_size)));
2920 if (new_size != cache->cache_size) {
2921 DMERR("%s: unable to shrink cache; cache block %llu is dirty",
2922 cache_device_name(cache),
2923 (unsigned long long) from_cblock(new_size));
2924 return false;
2925 }
2926 }
2927
2928 return true;
2929 }
2930
resize_cache_dev(struct cache * cache,dm_cblock_t new_size)2931 static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size)
2932 {
2933 int r;
2934
2935 r = dm_cache_resize(cache->cmd, new_size);
2936 if (r) {
2937 DMERR("%s: could not resize cache metadata", cache_device_name(cache));
2938 metadata_operation_failed(cache, "dm_cache_resize", r);
2939 return r;
2940 }
2941
2942 set_cache_size(cache, new_size);
2943
2944 return 0;
2945 }
2946
cache_preresume(struct dm_target * ti)2947 static int cache_preresume(struct dm_target *ti)
2948 {
2949 int r = 0;
2950 struct cache *cache = ti->private;
2951 dm_cblock_t csize = get_cache_dev_size(cache);
2952
2953 /*
2954 * Check to see if the cache has resized.
2955 */
2956 if (!cache->sized || csize != cache->cache_size) {
2957 if (!can_resize(cache, csize))
2958 return -EINVAL;
2959
2960 r = resize_cache_dev(cache, csize);
2961 if (r)
2962 return r;
2963
2964 cache->sized = true;
2965 }
2966
2967 if (!cache->loaded_mappings) {
2968 r = dm_cache_load_mappings(cache->cmd, cache->policy,
2969 load_mapping, cache);
2970 if (r) {
2971 DMERR("%s: could not load cache mappings", cache_device_name(cache));
2972 metadata_operation_failed(cache, "dm_cache_load_mappings", r);
2973 return r;
2974 }
2975
2976 cache->loaded_mappings = true;
2977 }
2978
2979 if (!cache->loaded_discards) {
2980 struct discard_load_info li;
2981
2982 /*
2983 * The discard bitset could have been resized, or the
2984 * discard block size changed. To be safe we start by
2985 * setting every dblock to not discarded.
2986 */
2987 clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
2988
2989 discard_load_info_init(cache, &li);
2990 r = dm_cache_load_discards(cache->cmd, load_discard, &li);
2991 if (r) {
2992 DMERR("%s: could not load origin discards", cache_device_name(cache));
2993 metadata_operation_failed(cache, "dm_cache_load_discards", r);
2994 return r;
2995 }
2996 set_discard_range(&li);
2997
2998 cache->loaded_discards = true;
2999 }
3000
3001 return r;
3002 }
3003
cache_resume(struct dm_target * ti)3004 static void cache_resume(struct dm_target *ti)
3005 {
3006 struct cache *cache = ti->private;
3007
3008 cache->need_tick_bio = true;
3009 allow_background_work(cache);
3010 do_waker(&cache->waker.work);
3011 }
3012
emit_flags(struct cache * cache,char * result,unsigned int maxlen,ssize_t * sz_ptr)3013 static void emit_flags(struct cache *cache, char *result,
3014 unsigned int maxlen, ssize_t *sz_ptr)
3015 {
3016 ssize_t sz = *sz_ptr;
3017 struct cache_features *cf = &cache->features;
3018 unsigned int count = (cf->metadata_version == 2) + !cf->discard_passdown + 1;
3019
3020 DMEMIT("%u ", count);
3021
3022 if (cf->metadata_version == 2)
3023 DMEMIT("metadata2 ");
3024
3025 if (writethrough_mode(cache))
3026 DMEMIT("writethrough ");
3027
3028 else if (passthrough_mode(cache))
3029 DMEMIT("passthrough ");
3030
3031 else if (writeback_mode(cache))
3032 DMEMIT("writeback ");
3033
3034 else {
3035 DMEMIT("unknown ");
3036 DMERR("%s: internal error: unknown io mode: %d",
3037 cache_device_name(cache), (int) cf->io_mode);
3038 }
3039
3040 if (!cf->discard_passdown)
3041 DMEMIT("no_discard_passdown ");
3042
3043 *sz_ptr = sz;
3044 }
3045
3046 /*
3047 * Status format:
3048 *
3049 * <metadata block size> <#used metadata blocks>/<#total metadata blocks>
3050 * <cache block size> <#used cache blocks>/<#total cache blocks>
3051 * <#read hits> <#read misses> <#write hits> <#write misses>
3052 * <#demotions> <#promotions> <#dirty>
3053 * <#features> <features>*
3054 * <#core args> <core args>
3055 * <policy name> <#policy args> <policy args>* <cache metadata mode> <needs_check>
3056 */
cache_status(struct dm_target * ti,status_type_t type,unsigned int status_flags,char * result,unsigned int maxlen)3057 static void cache_status(struct dm_target *ti, status_type_t type,
3058 unsigned int status_flags, char *result, unsigned int maxlen)
3059 {
3060 int r = 0;
3061 unsigned int i;
3062 ssize_t sz = 0;
3063 dm_block_t nr_free_blocks_metadata = 0;
3064 dm_block_t nr_blocks_metadata = 0;
3065 char buf[BDEVNAME_SIZE];
3066 struct cache *cache = ti->private;
3067 dm_cblock_t residency;
3068 bool needs_check;
3069
3070 switch (type) {
3071 case STATUSTYPE_INFO:
3072 if (get_cache_mode(cache) == CM_FAIL) {
3073 DMEMIT("Fail");
3074 break;
3075 }
3076
3077 /* Commit to ensure statistics aren't out-of-date */
3078 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
3079 (void) commit(cache, false);
3080
3081 r = dm_cache_get_free_metadata_block_count(cache->cmd, &nr_free_blocks_metadata);
3082 if (r) {
3083 DMERR("%s: dm_cache_get_free_metadata_block_count returned %d",
3084 cache_device_name(cache), r);
3085 goto err;
3086 }
3087
3088 r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata);
3089 if (r) {
3090 DMERR("%s: dm_cache_get_metadata_dev_size returned %d",
3091 cache_device_name(cache), r);
3092 goto err;
3093 }
3094
3095 residency = policy_residency(cache->policy);
3096
3097 DMEMIT("%u %llu/%llu %llu %llu/%llu %u %u %u %u %u %u %lu ",
3098 (unsigned int)DM_CACHE_METADATA_BLOCK_SIZE,
3099 (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
3100 (unsigned long long)nr_blocks_metadata,
3101 (unsigned long long)cache->sectors_per_block,
3102 (unsigned long long) from_cblock(residency),
3103 (unsigned long long) from_cblock(cache->cache_size),
3104 (unsigned int) atomic_read(&cache->stats.read_hit),
3105 (unsigned int) atomic_read(&cache->stats.read_miss),
3106 (unsigned int) atomic_read(&cache->stats.write_hit),
3107 (unsigned int) atomic_read(&cache->stats.write_miss),
3108 (unsigned int) atomic_read(&cache->stats.demotion),
3109 (unsigned int) atomic_read(&cache->stats.promotion),
3110 (unsigned long) atomic_read(&cache->nr_dirty));
3111
3112 emit_flags(cache, result, maxlen, &sz);
3113
3114 DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold);
3115
3116 DMEMIT("%s ", dm_cache_policy_get_name(cache->policy));
3117 if (sz < maxlen) {
3118 r = policy_emit_config_values(cache->policy, result, maxlen, &sz);
3119 if (r)
3120 DMERR("%s: policy_emit_config_values returned %d",
3121 cache_device_name(cache), r);
3122 }
3123
3124 if (get_cache_mode(cache) == CM_READ_ONLY)
3125 DMEMIT("ro ");
3126 else
3127 DMEMIT("rw ");
3128
3129 r = dm_cache_metadata_needs_check(cache->cmd, &needs_check);
3130
3131 if (r || needs_check)
3132 DMEMIT("needs_check ");
3133 else
3134 DMEMIT("- ");
3135
3136 break;
3137
3138 case STATUSTYPE_TABLE:
3139 format_dev_t(buf, cache->metadata_dev->bdev->bd_dev);
3140 DMEMIT("%s ", buf);
3141 format_dev_t(buf, cache->cache_dev->bdev->bd_dev);
3142 DMEMIT("%s ", buf);
3143 format_dev_t(buf, cache->origin_dev->bdev->bd_dev);
3144 DMEMIT("%s", buf);
3145
3146 for (i = 0; i < cache->nr_ctr_args - 1; i++)
3147 DMEMIT(" %s", cache->ctr_args[i]);
3148 if (cache->nr_ctr_args)
3149 DMEMIT(" %s", cache->ctr_args[cache->nr_ctr_args - 1]);
3150 break;
3151
3152 case STATUSTYPE_IMA:
3153 DMEMIT_TARGET_NAME_VERSION(ti->type);
3154 if (get_cache_mode(cache) == CM_FAIL)
3155 DMEMIT(",metadata_mode=fail");
3156 else if (get_cache_mode(cache) == CM_READ_ONLY)
3157 DMEMIT(",metadata_mode=ro");
3158 else
3159 DMEMIT(",metadata_mode=rw");
3160
3161 format_dev_t(buf, cache->metadata_dev->bdev->bd_dev);
3162 DMEMIT(",cache_metadata_device=%s", buf);
3163 format_dev_t(buf, cache->cache_dev->bdev->bd_dev);
3164 DMEMIT(",cache_device=%s", buf);
3165 format_dev_t(buf, cache->origin_dev->bdev->bd_dev);
3166 DMEMIT(",cache_origin_device=%s", buf);
3167 DMEMIT(",writethrough=%c", writethrough_mode(cache) ? 'y' : 'n');
3168 DMEMIT(",writeback=%c", writeback_mode(cache) ? 'y' : 'n');
3169 DMEMIT(",passthrough=%c", passthrough_mode(cache) ? 'y' : 'n');
3170 DMEMIT(",metadata2=%c", cache->features.metadata_version == 2 ? 'y' : 'n');
3171 DMEMIT(",no_discard_passdown=%c", cache->features.discard_passdown ? 'n' : 'y');
3172 DMEMIT(";");
3173 break;
3174 }
3175
3176 return;
3177
3178 err:
3179 DMEMIT("Error");
3180 }
3181
3182 /*
3183 * Defines a range of cblocks, begin to (end - 1) are in the range. end is
3184 * the one-past-the-end value.
3185 */
3186 struct cblock_range {
3187 dm_cblock_t begin;
3188 dm_cblock_t end;
3189 };
3190
3191 /*
3192 * A cache block range can take two forms:
3193 *
3194 * i) A single cblock, eg. '3456'
3195 * ii) A begin and end cblock with a dash between, eg. 123-234
3196 */
parse_cblock_range(struct cache * cache,const char * str,struct cblock_range * result)3197 static int parse_cblock_range(struct cache *cache, const char *str,
3198 struct cblock_range *result)
3199 {
3200 char dummy;
3201 uint64_t b, e;
3202 int r;
3203
3204 /*
3205 * Try and parse form (ii) first.
3206 */
3207 r = sscanf(str, "%llu-%llu%c", &b, &e, &dummy);
3208 if (r < 0)
3209 return r;
3210
3211 if (r == 2) {
3212 result->begin = to_cblock(b);
3213 result->end = to_cblock(e);
3214 return 0;
3215 }
3216
3217 /*
3218 * That didn't work, try form (i).
3219 */
3220 r = sscanf(str, "%llu%c", &b, &dummy);
3221 if (r < 0)
3222 return r;
3223
3224 if (r == 1) {
3225 result->begin = to_cblock(b);
3226 result->end = to_cblock(from_cblock(result->begin) + 1u);
3227 return 0;
3228 }
3229
3230 DMERR("%s: invalid cblock range '%s'", cache_device_name(cache), str);
3231 return -EINVAL;
3232 }
3233
validate_cblock_range(struct cache * cache,struct cblock_range * range)3234 static int validate_cblock_range(struct cache *cache, struct cblock_range *range)
3235 {
3236 uint64_t b = from_cblock(range->begin);
3237 uint64_t e = from_cblock(range->end);
3238 uint64_t n = from_cblock(cache->cache_size);
3239
3240 if (b >= n) {
3241 DMERR("%s: begin cblock out of range: %llu >= %llu",
3242 cache_device_name(cache), b, n);
3243 return -EINVAL;
3244 }
3245
3246 if (e > n) {
3247 DMERR("%s: end cblock out of range: %llu > %llu",
3248 cache_device_name(cache), e, n);
3249 return -EINVAL;
3250 }
3251
3252 if (b >= e) {
3253 DMERR("%s: invalid cblock range: %llu >= %llu",
3254 cache_device_name(cache), b, e);
3255 return -EINVAL;
3256 }
3257
3258 return 0;
3259 }
3260
cblock_succ(dm_cblock_t b)3261 static inline dm_cblock_t cblock_succ(dm_cblock_t b)
3262 {
3263 return to_cblock(from_cblock(b) + 1);
3264 }
3265
request_invalidation(struct cache * cache,struct cblock_range * range)3266 static int request_invalidation(struct cache *cache, struct cblock_range *range)
3267 {
3268 int r = 0;
3269
3270 /*
3271 * We don't need to do any locking here because we know we're in
3272 * passthrough mode. There's is potential for a race between an
3273 * invalidation triggered by an io and an invalidation message. This
3274 * is harmless, we must not worry if the policy call fails.
3275 */
3276 while (range->begin != range->end) {
3277 r = invalidate_cblock(cache, range->begin);
3278 if (r)
3279 return r;
3280
3281 range->begin = cblock_succ(range->begin);
3282 }
3283
3284 cache->commit_requested = true;
3285 return r;
3286 }
3287
process_invalidate_cblocks_message(struct cache * cache,unsigned int count,const char ** cblock_ranges)3288 static int process_invalidate_cblocks_message(struct cache *cache, unsigned int count,
3289 const char **cblock_ranges)
3290 {
3291 int r = 0;
3292 unsigned int i;
3293 struct cblock_range range;
3294
3295 if (!passthrough_mode(cache)) {
3296 DMERR("%s: cache has to be in passthrough mode for invalidation",
3297 cache_device_name(cache));
3298 return -EPERM;
3299 }
3300
3301 for (i = 0; i < count; i++) {
3302 r = parse_cblock_range(cache, cblock_ranges[i], &range);
3303 if (r)
3304 break;
3305
3306 r = validate_cblock_range(cache, &range);
3307 if (r)
3308 break;
3309
3310 /*
3311 * Pass begin and end origin blocks to the worker and wake it.
3312 */
3313 r = request_invalidation(cache, &range);
3314 if (r)
3315 break;
3316 }
3317
3318 return r;
3319 }
3320
3321 /*
3322 * Supports
3323 * "<key> <value>"
3324 * and
3325 * "invalidate_cblocks [(<begin>)|(<begin>-<end>)]*
3326 *
3327 * The key migration_threshold is supported by the cache target core.
3328 */
cache_message(struct dm_target * ti,unsigned int argc,char ** argv,char * result,unsigned int maxlen)3329 static int cache_message(struct dm_target *ti, unsigned int argc, char **argv,
3330 char *result, unsigned int maxlen)
3331 {
3332 struct cache *cache = ti->private;
3333
3334 if (!argc)
3335 return -EINVAL;
3336
3337 if (get_cache_mode(cache) >= CM_READ_ONLY) {
3338 DMERR("%s: unable to service cache target messages in READ_ONLY or FAIL mode",
3339 cache_device_name(cache));
3340 return -EOPNOTSUPP;
3341 }
3342
3343 if (!strcasecmp(argv[0], "invalidate_cblocks"))
3344 return process_invalidate_cblocks_message(cache, argc - 1, (const char **) argv + 1);
3345
3346 if (argc != 2)
3347 return -EINVAL;
3348
3349 return set_config_value(cache, argv[0], argv[1]);
3350 }
3351
cache_iterate_devices(struct dm_target * ti,iterate_devices_callout_fn fn,void * data)3352 static int cache_iterate_devices(struct dm_target *ti,
3353 iterate_devices_callout_fn fn, void *data)
3354 {
3355 int r = 0;
3356 struct cache *cache = ti->private;
3357
3358 r = fn(ti, cache->cache_dev, 0, get_dev_size(cache->cache_dev), data);
3359 if (!r)
3360 r = fn(ti, cache->origin_dev, 0, ti->len, data);
3361
3362 return r;
3363 }
3364
3365 /*
3366 * If discard_passdown was enabled verify that the origin device
3367 * supports discards. Disable discard_passdown if not.
3368 */
disable_passdown_if_not_supported(struct cache * cache)3369 static void disable_passdown_if_not_supported(struct cache *cache)
3370 {
3371 struct block_device *origin_bdev = cache->origin_dev->bdev;
3372 struct queue_limits *origin_limits = &bdev_get_queue(origin_bdev)->limits;
3373 const char *reason = NULL;
3374
3375 if (!cache->features.discard_passdown)
3376 return;
3377
3378 if (!bdev_max_discard_sectors(origin_bdev))
3379 reason = "discard unsupported";
3380
3381 else if (origin_limits->max_discard_sectors < cache->sectors_per_block)
3382 reason = "max discard sectors smaller than a block";
3383
3384 if (reason) {
3385 DMWARN("Origin device (%pg) %s: Disabling discard passdown.",
3386 origin_bdev, reason);
3387 cache->features.discard_passdown = false;
3388 }
3389 }
3390
set_discard_limits(struct cache * cache,struct queue_limits * limits)3391 static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
3392 {
3393 struct block_device *origin_bdev = cache->origin_dev->bdev;
3394 struct queue_limits *origin_limits = &bdev_get_queue(origin_bdev)->limits;
3395
3396 if (!cache->features.discard_passdown) {
3397 /* No passdown is done so setting own virtual limits */
3398 limits->max_discard_sectors = min_t(sector_t, cache->discard_block_size * 1024,
3399 cache->origin_sectors);
3400 limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
3401 return;
3402 }
3403
3404 /*
3405 * cache_iterate_devices() is stacking both origin and fast device limits
3406 * but discards aren't passed to fast device, so inherit origin's limits.
3407 */
3408 limits->max_discard_sectors = origin_limits->max_discard_sectors;
3409 limits->max_hw_discard_sectors = origin_limits->max_hw_discard_sectors;
3410 limits->discard_granularity = origin_limits->discard_granularity;
3411 limits->discard_alignment = origin_limits->discard_alignment;
3412 limits->discard_misaligned = origin_limits->discard_misaligned;
3413 }
3414
cache_io_hints(struct dm_target * ti,struct queue_limits * limits)3415 static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
3416 {
3417 struct cache *cache = ti->private;
3418 uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
3419
3420 /*
3421 * If the system-determined stacked limits are compatible with the
3422 * cache's blocksize (io_opt is a factor) do not override them.
3423 */
3424 if (io_opt_sectors < cache->sectors_per_block ||
3425 do_div(io_opt_sectors, cache->sectors_per_block)) {
3426 blk_limits_io_min(limits, cache->sectors_per_block << SECTOR_SHIFT);
3427 blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
3428 }
3429
3430 disable_passdown_if_not_supported(cache);
3431 set_discard_limits(cache, limits);
3432 }
3433
3434 /*----------------------------------------------------------------*/
3435
3436 static struct target_type cache_target = {
3437 .name = "cache",
3438 .version = {2, 2, 0},
3439 .module = THIS_MODULE,
3440 .ctr = cache_ctr,
3441 .dtr = cache_dtr,
3442 .map = cache_map,
3443 .end_io = cache_end_io,
3444 .postsuspend = cache_postsuspend,
3445 .preresume = cache_preresume,
3446 .resume = cache_resume,
3447 .status = cache_status,
3448 .message = cache_message,
3449 .iterate_devices = cache_iterate_devices,
3450 .io_hints = cache_io_hints,
3451 };
3452
dm_cache_init(void)3453 static int __init dm_cache_init(void)
3454 {
3455 int r;
3456
3457 migration_cache = KMEM_CACHE(dm_cache_migration, 0);
3458 if (!migration_cache)
3459 return -ENOMEM;
3460
3461 r = dm_register_target(&cache_target);
3462 if (r) {
3463 kmem_cache_destroy(migration_cache);
3464 return r;
3465 }
3466
3467 return 0;
3468 }
3469
dm_cache_exit(void)3470 static void __exit dm_cache_exit(void)
3471 {
3472 dm_unregister_target(&cache_target);
3473 kmem_cache_destroy(migration_cache);
3474 }
3475
3476 module_init(dm_cache_init);
3477 module_exit(dm_cache_exit);
3478
3479 MODULE_DESCRIPTION(DM_NAME " cache target");
3480 MODULE_AUTHOR("Joe Thornber <ejt@redhat.com>");
3481 MODULE_LICENSE("GPL");
3482