11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Copyright (C) 2001-2002 Sistina Software (UK) Limited. 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * This file is released under the GPL. 51da177e4SLinus Torvalds */ 61da177e4SLinus Torvalds 71da177e4SLinus Torvalds #include <linux/blkdev.h> 81da177e4SLinus Torvalds #include <linux/device-mapper.h> 990fa1527SMikulas Patocka #include <linux/delay.h> 101da177e4SLinus Torvalds #include <linux/fs.h> 111da177e4SLinus Torvalds #include <linux/init.h> 121da177e4SLinus Torvalds #include <linux/kdev_t.h> 131da177e4SLinus Torvalds #include <linux/list.h> 14f79ae415SNikos Tsironis #include <linux/list_bl.h> 151da177e4SLinus Torvalds #include <linux/mempool.h> 161da177e4SLinus Torvalds #include <linux/module.h> 171da177e4SLinus Torvalds #include <linux/slab.h> 181da177e4SLinus Torvalds #include <linux/vmalloc.h> 196f3c3f0aSvignesh babu #include <linux/log2.h> 20a765e20eSAlasdair G Kergon #include <linux/dm-kcopyd.h> 211da177e4SLinus Torvalds 22b735fedeSMikulas Patocka #include "dm.h" 23b735fedeSMikulas Patocka 24aea53d92SJonathan Brassow #include "dm-exception-store.h" 251da177e4SLinus Torvalds 2672d94861SAlasdair G Kergon #define DM_MSG_PREFIX "snapshots" 2772d94861SAlasdair G Kergon 28d698aa45SMikulas Patocka static const char dm_snapshot_merge_target_name[] = "snapshot-merge"; 29d698aa45SMikulas Patocka 30d698aa45SMikulas Patocka #define dm_target_is_snapshot_merge(ti) \ 31d698aa45SMikulas Patocka ((ti)->type->name == dm_snapshot_merge_target_name) 32d698aa45SMikulas Patocka 331da177e4SLinus Torvalds /* 34cd45daffSMikulas Patocka * The size of the mempool used to track chunks in use. 35cd45daffSMikulas Patocka */ 36cd45daffSMikulas Patocka #define MIN_IOS 256 37cd45daffSMikulas Patocka 38ccc45ea8SJonathan Brassow #define DM_TRACKED_CHUNK_HASH_SIZE 16 39ccc45ea8SJonathan Brassow #define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \ 40ccc45ea8SJonathan Brassow (DM_TRACKED_CHUNK_HASH_SIZE - 1)) 41ccc45ea8SJonathan Brassow 42191437a5SJon Brassow struct dm_exception_table { 43ccc45ea8SJonathan Brassow uint32_t hash_mask; 44ccc45ea8SJonathan Brassow unsigned hash_shift; 45f79ae415SNikos Tsironis struct hlist_bl_head *table; 46ccc45ea8SJonathan Brassow }; 47ccc45ea8SJonathan Brassow 48ccc45ea8SJonathan Brassow struct dm_snapshot { 494ad8d880SNikos Tsironis struct rw_semaphore lock; 50ccc45ea8SJonathan Brassow 51ccc45ea8SJonathan Brassow struct dm_dev *origin; 52fc56f6fbSMike Snitzer struct dm_dev *cow; 53fc56f6fbSMike Snitzer 54fc56f6fbSMike Snitzer struct dm_target *ti; 55ccc45ea8SJonathan Brassow 56ccc45ea8SJonathan Brassow /* List of snapshots per Origin */ 57ccc45ea8SJonathan Brassow struct list_head list; 58ccc45ea8SJonathan Brassow 59d8ddb1cfSMike Snitzer /* 60d8ddb1cfSMike Snitzer * You can't use a snapshot if this is 0 (e.g. if full). 61d8ddb1cfSMike Snitzer * A snapshot-merge target never clears this. 62d8ddb1cfSMike Snitzer */ 63ccc45ea8SJonathan Brassow int valid; 64ccc45ea8SJonathan Brassow 6576c44f6dSMikulas Patocka /* 6676c44f6dSMikulas Patocka * The snapshot overflowed because of a write to the snapshot device. 6776c44f6dSMikulas Patocka * We don't have to invalidate the snapshot in this case, but we need 6876c44f6dSMikulas Patocka * to prevent further writes. 6976c44f6dSMikulas Patocka */ 7076c44f6dSMikulas Patocka int snapshot_overflowed; 7176c44f6dSMikulas Patocka 72ccc45ea8SJonathan Brassow /* Origin writes don't trigger exceptions until this is set */ 73ccc45ea8SJonathan Brassow int active; 74ccc45ea8SJonathan Brassow 75ccc45ea8SJonathan Brassow atomic_t pending_exceptions_count; 76ccc45ea8SJonathan Brassow 773f1637f2SNikos Tsironis spinlock_t pe_allocation_lock; 783f1637f2SNikos Tsironis 793f1637f2SNikos Tsironis /* Protected by "pe_allocation_lock" */ 80230c83afSMikulas Patocka sector_t exception_start_sequence; 81230c83afSMikulas Patocka 82230c83afSMikulas Patocka /* Protected by kcopyd single-threaded callback */ 83230c83afSMikulas Patocka sector_t exception_complete_sequence; 84230c83afSMikulas Patocka 85230c83afSMikulas Patocka /* 86230c83afSMikulas Patocka * A list of pending exceptions that completed out of order. 87230c83afSMikulas Patocka * Protected by kcopyd single-threaded callback. 88230c83afSMikulas Patocka */ 893db2776dSDavid Jeffery struct rb_root out_of_order_tree; 90230c83afSMikulas Patocka 916f1c819cSKent Overstreet mempool_t pending_pool; 92924e600dSMike Snitzer 93191437a5SJon Brassow struct dm_exception_table pending; 94191437a5SJon Brassow struct dm_exception_table complete; 95ccc45ea8SJonathan Brassow 96ccc45ea8SJonathan Brassow /* 97ccc45ea8SJonathan Brassow * pe_lock protects all pending_exception operations and access 98ccc45ea8SJonathan Brassow * as well as the snapshot_bios list. 99ccc45ea8SJonathan Brassow */ 100ccc45ea8SJonathan Brassow spinlock_t pe_lock; 101ccc45ea8SJonathan Brassow 102924e600dSMike Snitzer /* Chunks with outstanding reads */ 103924e600dSMike Snitzer spinlock_t tracked_chunk_lock; 104924e600dSMike Snitzer struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE]; 105924e600dSMike Snitzer 106ccc45ea8SJonathan Brassow /* The on disk metadata handler */ 107ccc45ea8SJonathan Brassow struct dm_exception_store *store; 108ccc45ea8SJonathan Brassow 109b2155578SMikulas Patocka unsigned in_progress; 110b2155578SMikulas Patocka struct wait_queue_head in_progress_wait; 111721b1d98SNikos Tsironis 112ccc45ea8SJonathan Brassow struct dm_kcopyd_client *kcopyd_client; 113ccc45ea8SJonathan Brassow 114924e600dSMike Snitzer /* Wait for events based on state_bits */ 115924e600dSMike Snitzer unsigned long state_bits; 116924e600dSMike Snitzer 117924e600dSMike Snitzer /* Range of chunks currently being merged. */ 118924e600dSMike Snitzer chunk_t first_merging_chunk; 119924e600dSMike Snitzer int num_merging_chunks; 1201e03f97eSMikulas Patocka 121d8ddb1cfSMike Snitzer /* 122d8ddb1cfSMike Snitzer * The merge operation failed if this flag is set. 123d8ddb1cfSMike Snitzer * Failure modes are handled as follows: 124d8ddb1cfSMike Snitzer * - I/O error reading the header 125d8ddb1cfSMike Snitzer * => don't load the target; abort. 126d8ddb1cfSMike Snitzer * - Header does not have "valid" flag set 127d8ddb1cfSMike Snitzer * => use the origin; forget about the snapshot. 128d8ddb1cfSMike Snitzer * - I/O error when reading exceptions 129d8ddb1cfSMike Snitzer * => don't load the target; abort. 130d8ddb1cfSMike Snitzer * (We can't use the intermediate origin state.) 131d8ddb1cfSMike Snitzer * - I/O error while merging 132d8ddb1cfSMike Snitzer * => stop merging; set merge_failed; process I/O normally. 133d8ddb1cfSMike Snitzer */ 1342e602385SMike Snitzer bool merge_failed:1; 1352e602385SMike Snitzer 1362e602385SMike Snitzer bool discard_zeroes_cow:1; 1372e602385SMike Snitzer bool discard_passdown_origin:1; 138d8ddb1cfSMike Snitzer 1399fe86254SMikulas Patocka /* 1409fe86254SMikulas Patocka * Incoming bios that overlap with chunks being merged must wait 1419fe86254SMikulas Patocka * for them to be committed. 1429fe86254SMikulas Patocka */ 1439fe86254SMikulas Patocka struct bio_list bios_queued_during_merge; 144ccc45ea8SJonathan Brassow }; 145ccc45ea8SJonathan Brassow 1461e03f97eSMikulas Patocka /* 1471e03f97eSMikulas Patocka * state_bits: 1481e03f97eSMikulas Patocka * RUNNING_MERGE - Merge operation is in progress. 1491e03f97eSMikulas Patocka * SHUTDOWN_MERGE - Set to signal that merge needs to be stopped; 1501e03f97eSMikulas Patocka * cleared afterwards. 1511e03f97eSMikulas Patocka */ 1521e03f97eSMikulas Patocka #define RUNNING_MERGE 0 1531e03f97eSMikulas Patocka #define SHUTDOWN_MERGE 1 1541e03f97eSMikulas Patocka 155721b1d98SNikos Tsironis /* 156721b1d98SNikos Tsironis * Maximum number of chunks being copied on write. 157721b1d98SNikos Tsironis * 158721b1d98SNikos Tsironis * The value was decided experimentally as a trade-off between memory 159721b1d98SNikos Tsironis * consumption, stalling the kernel's workqueues and maintaining a high enough 160721b1d98SNikos Tsironis * throughput. 161721b1d98SNikos Tsironis */ 162721b1d98SNikos Tsironis #define DEFAULT_COW_THRESHOLD 2048 163721b1d98SNikos Tsironis 164b2155578SMikulas Patocka static unsigned cow_threshold = DEFAULT_COW_THRESHOLD; 165b2155578SMikulas Patocka module_param_named(snapshot_cow_threshold, cow_threshold, uint, 0644); 166721b1d98SNikos Tsironis MODULE_PARM_DESC(snapshot_cow_threshold, "Maximum number of chunks being copied on write"); 167721b1d98SNikos Tsironis 168df5d2e90SMikulas Patocka DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle, 169df5d2e90SMikulas Patocka "A percentage of time allocated for copy on write"); 170df5d2e90SMikulas Patocka 171c2411045SMikulas Patocka struct dm_dev *dm_snap_origin(struct dm_snapshot *s) 172c2411045SMikulas Patocka { 173c2411045SMikulas Patocka return s->origin; 174c2411045SMikulas Patocka } 175c2411045SMikulas Patocka EXPORT_SYMBOL(dm_snap_origin); 176c2411045SMikulas Patocka 177fc56f6fbSMike Snitzer struct dm_dev *dm_snap_cow(struct dm_snapshot *s) 178fc56f6fbSMike Snitzer { 179fc56f6fbSMike Snitzer return s->cow; 180fc56f6fbSMike Snitzer } 181fc56f6fbSMike Snitzer EXPORT_SYMBOL(dm_snap_cow); 182fc56f6fbSMike Snitzer 183ccc45ea8SJonathan Brassow static sector_t chunk_to_sector(struct dm_exception_store *store, 184ccc45ea8SJonathan Brassow chunk_t chunk) 185ccc45ea8SJonathan Brassow { 186ccc45ea8SJonathan Brassow return chunk << store->chunk_shift; 187ccc45ea8SJonathan Brassow } 188ccc45ea8SJonathan Brassow 189ccc45ea8SJonathan Brassow static int bdev_equal(struct block_device *lhs, struct block_device *rhs) 190ccc45ea8SJonathan Brassow { 191ccc45ea8SJonathan Brassow /* 192ccc45ea8SJonathan Brassow * There is only ever one instance of a particular block 193ccc45ea8SJonathan Brassow * device so we can compare pointers safely. 194ccc45ea8SJonathan Brassow */ 195ccc45ea8SJonathan Brassow return lhs == rhs; 196ccc45ea8SJonathan Brassow } 197ccc45ea8SJonathan Brassow 198028867acSAlasdair G Kergon struct dm_snap_pending_exception { 1991d4989c8SJon Brassow struct dm_exception e; 2001da177e4SLinus Torvalds 2011da177e4SLinus Torvalds /* 2021da177e4SLinus Torvalds * Origin buffers waiting for this to complete are held 2031da177e4SLinus Torvalds * in a bio list 2041da177e4SLinus Torvalds */ 2051da177e4SLinus Torvalds struct bio_list origin_bios; 2061da177e4SLinus Torvalds struct bio_list snapshot_bios; 2071da177e4SLinus Torvalds 2081da177e4SLinus Torvalds /* Pointer back to snapshot context */ 2091da177e4SLinus Torvalds struct dm_snapshot *snap; 2101da177e4SLinus Torvalds 2111da177e4SLinus Torvalds /* 2121da177e4SLinus Torvalds * 1 indicates the exception has already been sent to 2131da177e4SLinus Torvalds * kcopyd. 2141da177e4SLinus Torvalds */ 2151da177e4SLinus Torvalds int started; 216a6e50b40SMikulas Patocka 217230c83afSMikulas Patocka /* There was copying error. */ 218230c83afSMikulas Patocka int copy_error; 219230c83afSMikulas Patocka 220230c83afSMikulas Patocka /* A sequence number, it is used for in-order completion. */ 221230c83afSMikulas Patocka sector_t exception_sequence; 222230c83afSMikulas Patocka 2233db2776dSDavid Jeffery struct rb_node out_of_order_node; 224230c83afSMikulas Patocka 225a6e50b40SMikulas Patocka /* 226a6e50b40SMikulas Patocka * For writing a complete chunk, bypassing the copy. 227a6e50b40SMikulas Patocka */ 228a6e50b40SMikulas Patocka struct bio *full_bio; 229a6e50b40SMikulas Patocka bio_end_io_t *full_bio_end_io; 2301da177e4SLinus Torvalds }; 2311da177e4SLinus Torvalds 2321da177e4SLinus Torvalds /* 2331da177e4SLinus Torvalds * Hash table mapping origin volumes to lists of snapshots and 2341da177e4SLinus Torvalds * a lock to protect it 2351da177e4SLinus Torvalds */ 236e18b890bSChristoph Lameter static struct kmem_cache *exception_cache; 237e18b890bSChristoph Lameter static struct kmem_cache *pending_cache; 2381da177e4SLinus Torvalds 239cd45daffSMikulas Patocka struct dm_snap_tracked_chunk { 240cd45daffSMikulas Patocka struct hlist_node node; 241cd45daffSMikulas Patocka chunk_t chunk; 242cd45daffSMikulas Patocka }; 243cd45daffSMikulas Patocka 244ee18026aSMikulas Patocka static void init_tracked_chunk(struct bio *bio) 245ee18026aSMikulas Patocka { 246ee18026aSMikulas Patocka struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk)); 247ee18026aSMikulas Patocka INIT_HLIST_NODE(&c->node); 248ee18026aSMikulas Patocka } 249ee18026aSMikulas Patocka 250ee18026aSMikulas Patocka static bool is_bio_tracked(struct bio *bio) 251ee18026aSMikulas Patocka { 252ee18026aSMikulas Patocka struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk)); 253ee18026aSMikulas Patocka return !hlist_unhashed(&c->node); 254ee18026aSMikulas Patocka } 255ee18026aSMikulas Patocka 256ee18026aSMikulas Patocka static void track_chunk(struct dm_snapshot *s, struct bio *bio, chunk_t chunk) 257cd45daffSMikulas Patocka { 25842bc954fSMikulas Patocka struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk)); 259cd45daffSMikulas Patocka 260cd45daffSMikulas Patocka c->chunk = chunk; 261cd45daffSMikulas Patocka 2629aa0c0e6SMikulas Patocka spin_lock_irq(&s->tracked_chunk_lock); 263cd45daffSMikulas Patocka hlist_add_head(&c->node, 264cd45daffSMikulas Patocka &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]); 2659aa0c0e6SMikulas Patocka spin_unlock_irq(&s->tracked_chunk_lock); 266cd45daffSMikulas Patocka } 267cd45daffSMikulas Patocka 268ee18026aSMikulas Patocka static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio) 269cd45daffSMikulas Patocka { 270ee18026aSMikulas Patocka struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk)); 271cd45daffSMikulas Patocka unsigned long flags; 272cd45daffSMikulas Patocka 273cd45daffSMikulas Patocka spin_lock_irqsave(&s->tracked_chunk_lock, flags); 274cd45daffSMikulas Patocka hlist_del(&c->node); 275cd45daffSMikulas Patocka spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); 276cd45daffSMikulas Patocka } 277cd45daffSMikulas Patocka 278a8d41b59SMikulas Patocka static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk) 279a8d41b59SMikulas Patocka { 280a8d41b59SMikulas Patocka struct dm_snap_tracked_chunk *c; 281a8d41b59SMikulas Patocka int found = 0; 282a8d41b59SMikulas Patocka 283a8d41b59SMikulas Patocka spin_lock_irq(&s->tracked_chunk_lock); 284a8d41b59SMikulas Patocka 285b67bfe0dSSasha Levin hlist_for_each_entry(c, 286a8d41b59SMikulas Patocka &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) { 287a8d41b59SMikulas Patocka if (c->chunk == chunk) { 288a8d41b59SMikulas Patocka found = 1; 289a8d41b59SMikulas Patocka break; 290a8d41b59SMikulas Patocka } 291a8d41b59SMikulas Patocka } 292a8d41b59SMikulas Patocka 293a8d41b59SMikulas Patocka spin_unlock_irq(&s->tracked_chunk_lock); 294a8d41b59SMikulas Patocka 295a8d41b59SMikulas Patocka return found; 296a8d41b59SMikulas Patocka } 297a8d41b59SMikulas Patocka 2981da177e4SLinus Torvalds /* 299615d1eb9SMike Snitzer * This conflicting I/O is extremely improbable in the caller, 300615d1eb9SMike Snitzer * so msleep(1) is sufficient and there is no need for a wait queue. 301615d1eb9SMike Snitzer */ 302615d1eb9SMike Snitzer static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk) 303615d1eb9SMike Snitzer { 304615d1eb9SMike Snitzer while (__chunk_is_tracked(s, chunk)) 305615d1eb9SMike Snitzer msleep(1); 306615d1eb9SMike Snitzer } 307615d1eb9SMike Snitzer 308615d1eb9SMike Snitzer /* 3091da177e4SLinus Torvalds * One of these per registered origin, held in the snapshot_origins hash 3101da177e4SLinus Torvalds */ 3111da177e4SLinus Torvalds struct origin { 3121da177e4SLinus Torvalds /* The origin device */ 3131da177e4SLinus Torvalds struct block_device *bdev; 3141da177e4SLinus Torvalds 3151da177e4SLinus Torvalds struct list_head hash_list; 3161da177e4SLinus Torvalds 3171da177e4SLinus Torvalds /* List of snapshots for this origin */ 3181da177e4SLinus Torvalds struct list_head snapshots; 3191da177e4SLinus Torvalds }; 3201da177e4SLinus Torvalds 3211da177e4SLinus Torvalds /* 322b735fedeSMikulas Patocka * This structure is allocated for each origin target 323b735fedeSMikulas Patocka */ 324b735fedeSMikulas Patocka struct dm_origin { 325b735fedeSMikulas Patocka struct dm_dev *dev; 326b735fedeSMikulas Patocka struct dm_target *ti; 327b735fedeSMikulas Patocka unsigned split_boundary; 328b735fedeSMikulas Patocka struct list_head hash_list; 329b735fedeSMikulas Patocka }; 330b735fedeSMikulas Patocka 331b735fedeSMikulas Patocka /* 3321da177e4SLinus Torvalds * Size of the hash table for origin volumes. If we make this 3331da177e4SLinus Torvalds * the size of the minors list then it should be nearly perfect 3341da177e4SLinus Torvalds */ 3351da177e4SLinus Torvalds #define ORIGIN_HASH_SIZE 256 3361da177e4SLinus Torvalds #define ORIGIN_MASK 0xFF 3371da177e4SLinus Torvalds static struct list_head *_origins; 338b735fedeSMikulas Patocka static struct list_head *_dm_origins; 3391da177e4SLinus Torvalds static struct rw_semaphore _origins_lock; 3401da177e4SLinus Torvalds 34173dfd078SMikulas Patocka static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done); 34273dfd078SMikulas Patocka static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock); 34373dfd078SMikulas Patocka static uint64_t _pending_exceptions_done_count; 34473dfd078SMikulas Patocka 3451da177e4SLinus Torvalds static int init_origin_hash(void) 3461da177e4SLinus Torvalds { 3471da177e4SLinus Torvalds int i; 3481da177e4SLinus Torvalds 3496da2ec56SKees Cook _origins = kmalloc_array(ORIGIN_HASH_SIZE, sizeof(struct list_head), 3501da177e4SLinus Torvalds GFP_KERNEL); 3511da177e4SLinus Torvalds if (!_origins) { 352b735fedeSMikulas Patocka DMERR("unable to allocate memory for _origins"); 3531da177e4SLinus Torvalds return -ENOMEM; 3541da177e4SLinus Torvalds } 3551da177e4SLinus Torvalds for (i = 0; i < ORIGIN_HASH_SIZE; i++) 3561da177e4SLinus Torvalds INIT_LIST_HEAD(_origins + i); 357b735fedeSMikulas Patocka 3586da2ec56SKees Cook _dm_origins = kmalloc_array(ORIGIN_HASH_SIZE, 3596da2ec56SKees Cook sizeof(struct list_head), 360b735fedeSMikulas Patocka GFP_KERNEL); 361b735fedeSMikulas Patocka if (!_dm_origins) { 362b735fedeSMikulas Patocka DMERR("unable to allocate memory for _dm_origins"); 363b735fedeSMikulas Patocka kfree(_origins); 364b735fedeSMikulas Patocka return -ENOMEM; 365b735fedeSMikulas Patocka } 366b735fedeSMikulas Patocka for (i = 0; i < ORIGIN_HASH_SIZE; i++) 367b735fedeSMikulas Patocka INIT_LIST_HEAD(_dm_origins + i); 368b735fedeSMikulas Patocka 3691da177e4SLinus Torvalds init_rwsem(&_origins_lock); 3701da177e4SLinus Torvalds 3711da177e4SLinus Torvalds return 0; 3721da177e4SLinus Torvalds } 3731da177e4SLinus Torvalds 3741da177e4SLinus Torvalds static void exit_origin_hash(void) 3751da177e4SLinus Torvalds { 3761da177e4SLinus Torvalds kfree(_origins); 377b735fedeSMikulas Patocka kfree(_dm_origins); 3781da177e4SLinus Torvalds } 3791da177e4SLinus Torvalds 380028867acSAlasdair G Kergon static unsigned origin_hash(struct block_device *bdev) 3811da177e4SLinus Torvalds { 3821da177e4SLinus Torvalds return bdev->bd_dev & ORIGIN_MASK; 3831da177e4SLinus Torvalds } 3841da177e4SLinus Torvalds 3851da177e4SLinus Torvalds static struct origin *__lookup_origin(struct block_device *origin) 3861da177e4SLinus Torvalds { 3871da177e4SLinus Torvalds struct list_head *ol; 3881da177e4SLinus Torvalds struct origin *o; 3891da177e4SLinus Torvalds 3901da177e4SLinus Torvalds ol = &_origins[origin_hash(origin)]; 3911da177e4SLinus Torvalds list_for_each_entry (o, ol, hash_list) 3921da177e4SLinus Torvalds if (bdev_equal(o->bdev, origin)) 3931da177e4SLinus Torvalds return o; 3941da177e4SLinus Torvalds 3951da177e4SLinus Torvalds return NULL; 3961da177e4SLinus Torvalds } 3971da177e4SLinus Torvalds 3981da177e4SLinus Torvalds static void __insert_origin(struct origin *o) 3991da177e4SLinus Torvalds { 4001da177e4SLinus Torvalds struct list_head *sl = &_origins[origin_hash(o->bdev)]; 4011da177e4SLinus Torvalds list_add_tail(&o->hash_list, sl); 4021da177e4SLinus Torvalds } 4031da177e4SLinus Torvalds 404b735fedeSMikulas Patocka static struct dm_origin *__lookup_dm_origin(struct block_device *origin) 405b735fedeSMikulas Patocka { 406b735fedeSMikulas Patocka struct list_head *ol; 407b735fedeSMikulas Patocka struct dm_origin *o; 408b735fedeSMikulas Patocka 409b735fedeSMikulas Patocka ol = &_dm_origins[origin_hash(origin)]; 410b735fedeSMikulas Patocka list_for_each_entry (o, ol, hash_list) 411b735fedeSMikulas Patocka if (bdev_equal(o->dev->bdev, origin)) 412b735fedeSMikulas Patocka return o; 413b735fedeSMikulas Patocka 414b735fedeSMikulas Patocka return NULL; 415b735fedeSMikulas Patocka } 416b735fedeSMikulas Patocka 417b735fedeSMikulas Patocka static void __insert_dm_origin(struct dm_origin *o) 418b735fedeSMikulas Patocka { 419b735fedeSMikulas Patocka struct list_head *sl = &_dm_origins[origin_hash(o->dev->bdev)]; 420b735fedeSMikulas Patocka list_add_tail(&o->hash_list, sl); 421b735fedeSMikulas Patocka } 422b735fedeSMikulas Patocka 423b735fedeSMikulas Patocka static void __remove_dm_origin(struct dm_origin *o) 424b735fedeSMikulas Patocka { 425b735fedeSMikulas Patocka list_del(&o->hash_list); 426b735fedeSMikulas Patocka } 427b735fedeSMikulas Patocka 4281da177e4SLinus Torvalds /* 429c1f0c183SMike Snitzer * _origins_lock must be held when calling this function. 430c1f0c183SMike Snitzer * Returns number of snapshots registered using the supplied cow device, plus: 431c1f0c183SMike Snitzer * snap_src - a snapshot suitable for use as a source of exception handover 432c1f0c183SMike Snitzer * snap_dest - a snapshot capable of receiving exception handover. 4339d3b15c4SMikulas Patocka * snap_merge - an existing snapshot-merge target linked to the same origin. 4349d3b15c4SMikulas Patocka * There can be at most one snapshot-merge target. The parameter is optional. 435c1f0c183SMike Snitzer * 4369d3b15c4SMikulas Patocka * Possible return values and states of snap_src and snap_dest. 437c1f0c183SMike Snitzer * 0: NULL, NULL - first new snapshot 438c1f0c183SMike Snitzer * 1: snap_src, NULL - normal snapshot 439c1f0c183SMike Snitzer * 2: snap_src, snap_dest - waiting for handover 440c1f0c183SMike Snitzer * 2: snap_src, NULL - handed over, waiting for old to be deleted 441c1f0c183SMike Snitzer * 1: NULL, snap_dest - source got destroyed without handover 442c1f0c183SMike Snitzer */ 443c1f0c183SMike Snitzer static int __find_snapshots_sharing_cow(struct dm_snapshot *snap, 444c1f0c183SMike Snitzer struct dm_snapshot **snap_src, 4459d3b15c4SMikulas Patocka struct dm_snapshot **snap_dest, 4469d3b15c4SMikulas Patocka struct dm_snapshot **snap_merge) 447c1f0c183SMike Snitzer { 448c1f0c183SMike Snitzer struct dm_snapshot *s; 449c1f0c183SMike Snitzer struct origin *o; 450c1f0c183SMike Snitzer int count = 0; 451c1f0c183SMike Snitzer int active; 452c1f0c183SMike Snitzer 453c1f0c183SMike Snitzer o = __lookup_origin(snap->origin->bdev); 454c1f0c183SMike Snitzer if (!o) 455c1f0c183SMike Snitzer goto out; 456c1f0c183SMike Snitzer 457c1f0c183SMike Snitzer list_for_each_entry(s, &o->snapshots, list) { 4589d3b15c4SMikulas Patocka if (dm_target_is_snapshot_merge(s->ti) && snap_merge) 4599d3b15c4SMikulas Patocka *snap_merge = s; 460c1f0c183SMike Snitzer if (!bdev_equal(s->cow->bdev, snap->cow->bdev)) 461c1f0c183SMike Snitzer continue; 462c1f0c183SMike Snitzer 4634ad8d880SNikos Tsironis down_read(&s->lock); 464c1f0c183SMike Snitzer active = s->active; 4654ad8d880SNikos Tsironis up_read(&s->lock); 466c1f0c183SMike Snitzer 467c1f0c183SMike Snitzer if (active) { 468c1f0c183SMike Snitzer if (snap_src) 469c1f0c183SMike Snitzer *snap_src = s; 470c1f0c183SMike Snitzer } else if (snap_dest) 471c1f0c183SMike Snitzer *snap_dest = s; 472c1f0c183SMike Snitzer 473c1f0c183SMike Snitzer count++; 474c1f0c183SMike Snitzer } 475c1f0c183SMike Snitzer 476c1f0c183SMike Snitzer out: 477c1f0c183SMike Snitzer return count; 478c1f0c183SMike Snitzer } 479c1f0c183SMike Snitzer 480c1f0c183SMike Snitzer /* 481c1f0c183SMike Snitzer * On success, returns 1 if this snapshot is a handover destination, 482c1f0c183SMike Snitzer * otherwise returns 0. 483c1f0c183SMike Snitzer */ 484c1f0c183SMike Snitzer static int __validate_exception_handover(struct dm_snapshot *snap) 485c1f0c183SMike Snitzer { 486c1f0c183SMike Snitzer struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; 4879d3b15c4SMikulas Patocka struct dm_snapshot *snap_merge = NULL; 488c1f0c183SMike Snitzer 489c1f0c183SMike Snitzer /* Does snapshot need exceptions handed over to it? */ 4909d3b15c4SMikulas Patocka if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest, 4919d3b15c4SMikulas Patocka &snap_merge) == 2) || 492c1f0c183SMike Snitzer snap_dest) { 493c1f0c183SMike Snitzer snap->ti->error = "Snapshot cow pairing for exception " 494c1f0c183SMike Snitzer "table handover failed"; 495c1f0c183SMike Snitzer return -EINVAL; 496c1f0c183SMike Snitzer } 497c1f0c183SMike Snitzer 498c1f0c183SMike Snitzer /* 499c1f0c183SMike Snitzer * If no snap_src was found, snap cannot become a handover 500c1f0c183SMike Snitzer * destination. 501c1f0c183SMike Snitzer */ 502c1f0c183SMike Snitzer if (!snap_src) 503c1f0c183SMike Snitzer return 0; 504c1f0c183SMike Snitzer 5059d3b15c4SMikulas Patocka /* 5069d3b15c4SMikulas Patocka * Non-snapshot-merge handover? 5079d3b15c4SMikulas Patocka */ 5089d3b15c4SMikulas Patocka if (!dm_target_is_snapshot_merge(snap->ti)) 5099d3b15c4SMikulas Patocka return 1; 5109d3b15c4SMikulas Patocka 5119d3b15c4SMikulas Patocka /* 5129d3b15c4SMikulas Patocka * Do not allow more than one merging snapshot. 5139d3b15c4SMikulas Patocka */ 5149d3b15c4SMikulas Patocka if (snap_merge) { 5159d3b15c4SMikulas Patocka snap->ti->error = "A snapshot is already merging."; 5169d3b15c4SMikulas Patocka return -EINVAL; 5179d3b15c4SMikulas Patocka } 5189d3b15c4SMikulas Patocka 5191e03f97eSMikulas Patocka if (!snap_src->store->type->prepare_merge || 5201e03f97eSMikulas Patocka !snap_src->store->type->commit_merge) { 5211e03f97eSMikulas Patocka snap->ti->error = "Snapshot exception store does not " 5221e03f97eSMikulas Patocka "support snapshot-merge."; 5231e03f97eSMikulas Patocka return -EINVAL; 5241e03f97eSMikulas Patocka } 5251e03f97eSMikulas Patocka 526c1f0c183SMike Snitzer return 1; 527c1f0c183SMike Snitzer } 528c1f0c183SMike Snitzer 529c1f0c183SMike Snitzer static void __insert_snapshot(struct origin *o, struct dm_snapshot *s) 530c1f0c183SMike Snitzer { 531c1f0c183SMike Snitzer struct dm_snapshot *l; 532c1f0c183SMike Snitzer 533c1f0c183SMike Snitzer /* Sort the list according to chunk size, largest-first smallest-last */ 534c1f0c183SMike Snitzer list_for_each_entry(l, &o->snapshots, list) 535c1f0c183SMike Snitzer if (l->store->chunk_size < s->store->chunk_size) 536c1f0c183SMike Snitzer break; 537c1f0c183SMike Snitzer list_add_tail(&s->list, &l->list); 538c1f0c183SMike Snitzer } 539c1f0c183SMike Snitzer 540c1f0c183SMike Snitzer /* 5411da177e4SLinus Torvalds * Make a note of the snapshot and its origin so we can look it 5421da177e4SLinus Torvalds * up when the origin has a write on it. 543c1f0c183SMike Snitzer * 544c1f0c183SMike Snitzer * Also validate snapshot exception store handovers. 545c1f0c183SMike Snitzer * On success, returns 1 if this registration is a handover destination, 546c1f0c183SMike Snitzer * otherwise returns 0. 5471da177e4SLinus Torvalds */ 5481da177e4SLinus Torvalds static int register_snapshot(struct dm_snapshot *snap) 5491da177e4SLinus Torvalds { 550c1f0c183SMike Snitzer struct origin *o, *new_o = NULL; 5511da177e4SLinus Torvalds struct block_device *bdev = snap->origin->bdev; 552c1f0c183SMike Snitzer int r = 0; 5531da177e4SLinus Torvalds 55460c856c8SMikulas Patocka new_o = kmalloc(sizeof(*new_o), GFP_KERNEL); 55560c856c8SMikulas Patocka if (!new_o) 55660c856c8SMikulas Patocka return -ENOMEM; 55760c856c8SMikulas Patocka 5581da177e4SLinus Torvalds down_write(&_origins_lock); 5591da177e4SLinus Torvalds 560c1f0c183SMike Snitzer r = __validate_exception_handover(snap); 561c1f0c183SMike Snitzer if (r < 0) { 562c1f0c183SMike Snitzer kfree(new_o); 563c1f0c183SMike Snitzer goto out; 564c1f0c183SMike Snitzer } 565c1f0c183SMike Snitzer 566c1f0c183SMike Snitzer o = __lookup_origin(bdev); 56760c856c8SMikulas Patocka if (o) 56860c856c8SMikulas Patocka kfree(new_o); 56960c856c8SMikulas Patocka else { 5701da177e4SLinus Torvalds /* New origin */ 57160c856c8SMikulas Patocka o = new_o; 5721da177e4SLinus Torvalds 5731da177e4SLinus Torvalds /* Initialise the struct */ 5741da177e4SLinus Torvalds INIT_LIST_HEAD(&o->snapshots); 5751da177e4SLinus Torvalds o->bdev = bdev; 5761da177e4SLinus Torvalds 5771da177e4SLinus Torvalds __insert_origin(o); 5781da177e4SLinus Torvalds } 5791da177e4SLinus Torvalds 580c1f0c183SMike Snitzer __insert_snapshot(o, snap); 581c1f0c183SMike Snitzer 582c1f0c183SMike Snitzer out: 583c1f0c183SMike Snitzer up_write(&_origins_lock); 584c1f0c183SMike Snitzer 585c1f0c183SMike Snitzer return r; 586c1f0c183SMike Snitzer } 587c1f0c183SMike Snitzer 588c1f0c183SMike Snitzer /* 589c1f0c183SMike Snitzer * Move snapshot to correct place in list according to chunk size. 590c1f0c183SMike Snitzer */ 591c1f0c183SMike Snitzer static void reregister_snapshot(struct dm_snapshot *s) 592c1f0c183SMike Snitzer { 593c1f0c183SMike Snitzer struct block_device *bdev = s->origin->bdev; 594c1f0c183SMike Snitzer 595c1f0c183SMike Snitzer down_write(&_origins_lock); 596c1f0c183SMike Snitzer 597c1f0c183SMike Snitzer list_del(&s->list); 598c1f0c183SMike Snitzer __insert_snapshot(__lookup_origin(bdev), s); 5991da177e4SLinus Torvalds 6001da177e4SLinus Torvalds up_write(&_origins_lock); 6011da177e4SLinus Torvalds } 6021da177e4SLinus Torvalds 6031da177e4SLinus Torvalds static void unregister_snapshot(struct dm_snapshot *s) 6041da177e4SLinus Torvalds { 6051da177e4SLinus Torvalds struct origin *o; 6061da177e4SLinus Torvalds 6071da177e4SLinus Torvalds down_write(&_origins_lock); 6081da177e4SLinus Torvalds o = __lookup_origin(s->origin->bdev); 6091da177e4SLinus Torvalds 6101da177e4SLinus Torvalds list_del(&s->list); 611c1f0c183SMike Snitzer if (o && list_empty(&o->snapshots)) { 6121da177e4SLinus Torvalds list_del(&o->hash_list); 6131da177e4SLinus Torvalds kfree(o); 6141da177e4SLinus Torvalds } 6151da177e4SLinus Torvalds 6161da177e4SLinus Torvalds up_write(&_origins_lock); 6171da177e4SLinus Torvalds } 6181da177e4SLinus Torvalds 6191da177e4SLinus Torvalds /* 6201da177e4SLinus Torvalds * Implementation of the exception hash tables. 621d74f81f8SMilan Broz * The lowest hash_shift bits of the chunk number are ignored, allowing 622d74f81f8SMilan Broz * some consecutive chunks to be grouped together. 6231da177e4SLinus Torvalds */ 624f79ae415SNikos Tsironis static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk); 625f79ae415SNikos Tsironis 626f79ae415SNikos Tsironis /* Lock to protect access to the completed and pending exception hash tables. */ 627f79ae415SNikos Tsironis struct dm_exception_table_lock { 628f79ae415SNikos Tsironis struct hlist_bl_head *complete_slot; 629f79ae415SNikos Tsironis struct hlist_bl_head *pending_slot; 630f79ae415SNikos Tsironis }; 631f79ae415SNikos Tsironis 632f79ae415SNikos Tsironis static void dm_exception_table_lock_init(struct dm_snapshot *s, chunk_t chunk, 633f79ae415SNikos Tsironis struct dm_exception_table_lock *lock) 634f79ae415SNikos Tsironis { 635f79ae415SNikos Tsironis struct dm_exception_table *complete = &s->complete; 636f79ae415SNikos Tsironis struct dm_exception_table *pending = &s->pending; 637f79ae415SNikos Tsironis 638f79ae415SNikos Tsironis lock->complete_slot = &complete->table[exception_hash(complete, chunk)]; 639f79ae415SNikos Tsironis lock->pending_slot = &pending->table[exception_hash(pending, chunk)]; 640f79ae415SNikos Tsironis } 641f79ae415SNikos Tsironis 642f79ae415SNikos Tsironis static void dm_exception_table_lock(struct dm_exception_table_lock *lock) 643f79ae415SNikos Tsironis { 644f79ae415SNikos Tsironis hlist_bl_lock(lock->complete_slot); 645f79ae415SNikos Tsironis hlist_bl_lock(lock->pending_slot); 646f79ae415SNikos Tsironis } 647f79ae415SNikos Tsironis 648f79ae415SNikos Tsironis static void dm_exception_table_unlock(struct dm_exception_table_lock *lock) 649f79ae415SNikos Tsironis { 650f79ae415SNikos Tsironis hlist_bl_unlock(lock->pending_slot); 651f79ae415SNikos Tsironis hlist_bl_unlock(lock->complete_slot); 652f79ae415SNikos Tsironis } 653f79ae415SNikos Tsironis 6543510cb94SJon Brassow static int dm_exception_table_init(struct dm_exception_table *et, 6553510cb94SJon Brassow uint32_t size, unsigned hash_shift) 6561da177e4SLinus Torvalds { 6571da177e4SLinus Torvalds unsigned int i; 6581da177e4SLinus Torvalds 659d74f81f8SMilan Broz et->hash_shift = hash_shift; 6601da177e4SLinus Torvalds et->hash_mask = size - 1; 661f79ae415SNikos Tsironis et->table = dm_vcalloc(size, sizeof(struct hlist_bl_head)); 6621da177e4SLinus Torvalds if (!et->table) 6631da177e4SLinus Torvalds return -ENOMEM; 6641da177e4SLinus Torvalds 6651da177e4SLinus Torvalds for (i = 0; i < size; i++) 666f79ae415SNikos Tsironis INIT_HLIST_BL_HEAD(et->table + i); 6671da177e4SLinus Torvalds 6681da177e4SLinus Torvalds return 0; 6691da177e4SLinus Torvalds } 6701da177e4SLinus Torvalds 6713510cb94SJon Brassow static void dm_exception_table_exit(struct dm_exception_table *et, 672191437a5SJon Brassow struct kmem_cache *mem) 6731da177e4SLinus Torvalds { 674f79ae415SNikos Tsironis struct hlist_bl_head *slot; 675f79ae415SNikos Tsironis struct dm_exception *ex; 676f79ae415SNikos Tsironis struct hlist_bl_node *pos, *n; 6771da177e4SLinus Torvalds int i, size; 6781da177e4SLinus Torvalds 6791da177e4SLinus Torvalds size = et->hash_mask + 1; 6801da177e4SLinus Torvalds for (i = 0; i < size; i++) { 6811da177e4SLinus Torvalds slot = et->table + i; 6821da177e4SLinus Torvalds 683f79ae415SNikos Tsironis hlist_bl_for_each_entry_safe(ex, pos, n, slot, hash_list) 6841da177e4SLinus Torvalds kmem_cache_free(mem, ex); 6851da177e4SLinus Torvalds } 6861da177e4SLinus Torvalds 6871da177e4SLinus Torvalds vfree(et->table); 6881da177e4SLinus Torvalds } 6891da177e4SLinus Torvalds 690191437a5SJon Brassow static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk) 6911da177e4SLinus Torvalds { 692d74f81f8SMilan Broz return (chunk >> et->hash_shift) & et->hash_mask; 6931da177e4SLinus Torvalds } 6941da177e4SLinus Torvalds 6953510cb94SJon Brassow static void dm_remove_exception(struct dm_exception *e) 6961da177e4SLinus Torvalds { 697f79ae415SNikos Tsironis hlist_bl_del(&e->hash_list); 6981da177e4SLinus Torvalds } 6991da177e4SLinus Torvalds 7001da177e4SLinus Torvalds /* 7011da177e4SLinus Torvalds * Return the exception data for a sector, or NULL if not 7021da177e4SLinus Torvalds * remapped. 7031da177e4SLinus Torvalds */ 7043510cb94SJon Brassow static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et, 7051da177e4SLinus Torvalds chunk_t chunk) 7061da177e4SLinus Torvalds { 707f79ae415SNikos Tsironis struct hlist_bl_head *slot; 708f79ae415SNikos Tsironis struct hlist_bl_node *pos; 7091d4989c8SJon Brassow struct dm_exception *e; 7101da177e4SLinus Torvalds 7111da177e4SLinus Torvalds slot = &et->table[exception_hash(et, chunk)]; 712f79ae415SNikos Tsironis hlist_bl_for_each_entry(e, pos, slot, hash_list) 713d74f81f8SMilan Broz if (chunk >= e->old_chunk && 714d74f81f8SMilan Broz chunk <= e->old_chunk + dm_consecutive_chunk_count(e)) 7151da177e4SLinus Torvalds return e; 7161da177e4SLinus Torvalds 7171da177e4SLinus Torvalds return NULL; 7181da177e4SLinus Torvalds } 7191da177e4SLinus Torvalds 720119bc547SMikulas Patocka static struct dm_exception *alloc_completed_exception(gfp_t gfp) 7211da177e4SLinus Torvalds { 7221d4989c8SJon Brassow struct dm_exception *e; 7231da177e4SLinus Torvalds 724119bc547SMikulas Patocka e = kmem_cache_alloc(exception_cache, gfp); 725119bc547SMikulas Patocka if (!e && gfp == GFP_NOIO) 7261da177e4SLinus Torvalds e = kmem_cache_alloc(exception_cache, GFP_ATOMIC); 7271da177e4SLinus Torvalds 7281da177e4SLinus Torvalds return e; 7291da177e4SLinus Torvalds } 7301da177e4SLinus Torvalds 7313510cb94SJon Brassow static void free_completed_exception(struct dm_exception *e) 7321da177e4SLinus Torvalds { 7331da177e4SLinus Torvalds kmem_cache_free(exception_cache, e); 7341da177e4SLinus Torvalds } 7351da177e4SLinus Torvalds 73692e86812SMikulas Patocka static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s) 7371da177e4SLinus Torvalds { 7386f1c819cSKent Overstreet struct dm_snap_pending_exception *pe = mempool_alloc(&s->pending_pool, 73992e86812SMikulas Patocka GFP_NOIO); 74092e86812SMikulas Patocka 741879129d2SMikulas Patocka atomic_inc(&s->pending_exceptions_count); 74292e86812SMikulas Patocka pe->snap = s; 74392e86812SMikulas Patocka 74492e86812SMikulas Patocka return pe; 7451da177e4SLinus Torvalds } 7461da177e4SLinus Torvalds 747028867acSAlasdair G Kergon static void free_pending_exception(struct dm_snap_pending_exception *pe) 7481da177e4SLinus Torvalds { 749879129d2SMikulas Patocka struct dm_snapshot *s = pe->snap; 750879129d2SMikulas Patocka 7516f1c819cSKent Overstreet mempool_free(pe, &s->pending_pool); 7524e857c58SPeter Zijlstra smp_mb__before_atomic(); 753879129d2SMikulas Patocka atomic_dec(&s->pending_exceptions_count); 7541da177e4SLinus Torvalds } 7551da177e4SLinus Torvalds 7563510cb94SJon Brassow static void dm_insert_exception(struct dm_exception_table *eh, 7571d4989c8SJon Brassow struct dm_exception *new_e) 758d74f81f8SMilan Broz { 759f79ae415SNikos Tsironis struct hlist_bl_head *l; 760f79ae415SNikos Tsironis struct hlist_bl_node *pos; 7611d4989c8SJon Brassow struct dm_exception *e = NULL; 762d74f81f8SMilan Broz 763d74f81f8SMilan Broz l = &eh->table[exception_hash(eh, new_e->old_chunk)]; 764d74f81f8SMilan Broz 765d74f81f8SMilan Broz /* Add immediately if this table doesn't support consecutive chunks */ 766d74f81f8SMilan Broz if (!eh->hash_shift) 767d74f81f8SMilan Broz goto out; 768d74f81f8SMilan Broz 769d74f81f8SMilan Broz /* List is ordered by old_chunk */ 770f79ae415SNikos Tsironis hlist_bl_for_each_entry(e, pos, l, hash_list) { 771d74f81f8SMilan Broz /* Insert after an existing chunk? */ 772d74f81f8SMilan Broz if (new_e->old_chunk == (e->old_chunk + 773d74f81f8SMilan Broz dm_consecutive_chunk_count(e) + 1) && 774d74f81f8SMilan Broz new_e->new_chunk == (dm_chunk_number(e->new_chunk) + 775d74f81f8SMilan Broz dm_consecutive_chunk_count(e) + 1)) { 776d74f81f8SMilan Broz dm_consecutive_chunk_count_inc(e); 7773510cb94SJon Brassow free_completed_exception(new_e); 778d74f81f8SMilan Broz return; 779d74f81f8SMilan Broz } 780d74f81f8SMilan Broz 781d74f81f8SMilan Broz /* Insert before an existing chunk? */ 782d74f81f8SMilan Broz if (new_e->old_chunk == (e->old_chunk - 1) && 783d74f81f8SMilan Broz new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) { 784d74f81f8SMilan Broz dm_consecutive_chunk_count_inc(e); 785d74f81f8SMilan Broz e->old_chunk--; 786d74f81f8SMilan Broz e->new_chunk--; 7873510cb94SJon Brassow free_completed_exception(new_e); 788d74f81f8SMilan Broz return; 789d74f81f8SMilan Broz } 790d74f81f8SMilan Broz 791f79ae415SNikos Tsironis if (new_e->old_chunk < e->old_chunk) 792d74f81f8SMilan Broz break; 793d74f81f8SMilan Broz } 794d74f81f8SMilan Broz 795d74f81f8SMilan Broz out: 796f79ae415SNikos Tsironis if (!e) { 797f79ae415SNikos Tsironis /* 798f79ae415SNikos Tsironis * Either the table doesn't support consecutive chunks or slot 799f79ae415SNikos Tsironis * l is empty. 800f79ae415SNikos Tsironis */ 801f79ae415SNikos Tsironis hlist_bl_add_head(&new_e->hash_list, l); 802f79ae415SNikos Tsironis } else if (new_e->old_chunk < e->old_chunk) { 803f79ae415SNikos Tsironis /* Add before an existing exception */ 804f79ae415SNikos Tsironis hlist_bl_add_before(&new_e->hash_list, &e->hash_list); 805f79ae415SNikos Tsironis } else { 806f79ae415SNikos Tsironis /* Add to l's tail: e is the last exception in this slot */ 807f79ae415SNikos Tsironis hlist_bl_add_behind(&new_e->hash_list, &e->hash_list); 808f79ae415SNikos Tsironis } 809d74f81f8SMilan Broz } 810d74f81f8SMilan Broz 811a159c1acSJonathan Brassow /* 812a159c1acSJonathan Brassow * Callback used by the exception stores to load exceptions when 813a159c1acSJonathan Brassow * initialising. 814a159c1acSJonathan Brassow */ 815a159c1acSJonathan Brassow static int dm_add_exception(void *context, chunk_t old, chunk_t new) 8161da177e4SLinus Torvalds { 817f79ae415SNikos Tsironis struct dm_exception_table_lock lock; 818a159c1acSJonathan Brassow struct dm_snapshot *s = context; 8191d4989c8SJon Brassow struct dm_exception *e; 8201da177e4SLinus Torvalds 821119bc547SMikulas Patocka e = alloc_completed_exception(GFP_KERNEL); 8221da177e4SLinus Torvalds if (!e) 8231da177e4SLinus Torvalds return -ENOMEM; 8241da177e4SLinus Torvalds 8251da177e4SLinus Torvalds e->old_chunk = old; 826d74f81f8SMilan Broz 827d74f81f8SMilan Broz /* Consecutive_count is implicitly initialised to zero */ 8281da177e4SLinus Torvalds e->new_chunk = new; 829d74f81f8SMilan Broz 830f79ae415SNikos Tsironis /* 831f79ae415SNikos Tsironis * Although there is no need to lock access to the exception tables 832f79ae415SNikos Tsironis * here, if we don't then hlist_bl_add_head(), called by 833f79ae415SNikos Tsironis * dm_insert_exception(), will complain about accessing the 834f79ae415SNikos Tsironis * corresponding list without locking it first. 835f79ae415SNikos Tsironis */ 836f79ae415SNikos Tsironis dm_exception_table_lock_init(s, old, &lock); 837f79ae415SNikos Tsironis 838f79ae415SNikos Tsironis dm_exception_table_lock(&lock); 8393510cb94SJon Brassow dm_insert_exception(&s->complete, e); 840f79ae415SNikos Tsironis dm_exception_table_unlock(&lock); 841d74f81f8SMilan Broz 8421da177e4SLinus Torvalds return 0; 8431da177e4SLinus Torvalds } 8441da177e4SLinus Torvalds 8457e201b35SMikulas Patocka /* 8467e201b35SMikulas Patocka * Return a minimum chunk size of all snapshots that have the specified origin. 8477e201b35SMikulas Patocka * Return zero if the origin has no snapshots. 8487e201b35SMikulas Patocka */ 849542f9038SMike Snitzer static uint32_t __minimum_chunk_size(struct origin *o) 8507e201b35SMikulas Patocka { 8517e201b35SMikulas Patocka struct dm_snapshot *snap; 8527e201b35SMikulas Patocka unsigned chunk_size = 0; 8537e201b35SMikulas Patocka 8547e201b35SMikulas Patocka if (o) 8557e201b35SMikulas Patocka list_for_each_entry(snap, &o->snapshots, list) 8567e201b35SMikulas Patocka chunk_size = min_not_zero(chunk_size, 8577e201b35SMikulas Patocka snap->store->chunk_size); 8587e201b35SMikulas Patocka 859542f9038SMike Snitzer return (uint32_t) chunk_size; 8607e201b35SMikulas Patocka } 8617e201b35SMikulas Patocka 8621da177e4SLinus Torvalds /* 8631da177e4SLinus Torvalds * Hard coded magic. 8641da177e4SLinus Torvalds */ 8651da177e4SLinus Torvalds static int calc_max_buckets(void) 8661da177e4SLinus Torvalds { 8671da177e4SLinus Torvalds /* use a fixed size of 2MB */ 8681da177e4SLinus Torvalds unsigned long mem = 2 * 1024 * 1024; 869f79ae415SNikos Tsironis mem /= sizeof(struct hlist_bl_head); 8701da177e4SLinus Torvalds 8711da177e4SLinus Torvalds return mem; 8721da177e4SLinus Torvalds } 8731da177e4SLinus Torvalds 8741da177e4SLinus Torvalds /* 8751da177e4SLinus Torvalds * Allocate room for a suitable hash table. 8761da177e4SLinus Torvalds */ 877fee1998eSJonathan Brassow static int init_hash_tables(struct dm_snapshot *s) 8781da177e4SLinus Torvalds { 87960e356f3SMikulas Patocka sector_t hash_size, cow_dev_size, max_buckets; 8801da177e4SLinus Torvalds 8811da177e4SLinus Torvalds /* 8821da177e4SLinus Torvalds * Calculate based on the size of the original volume or 8831da177e4SLinus Torvalds * the COW volume... 8841da177e4SLinus Torvalds */ 885fc56f6fbSMike Snitzer cow_dev_size = get_dev_size(s->cow->bdev); 8861da177e4SLinus Torvalds max_buckets = calc_max_buckets(); 8871da177e4SLinus Torvalds 88860e356f3SMikulas Patocka hash_size = cow_dev_size >> s->store->chunk_shift; 8891da177e4SLinus Torvalds hash_size = min(hash_size, max_buckets); 8901da177e4SLinus Torvalds 8918e87b9b8SMikulas Patocka if (hash_size < 64) 8928e87b9b8SMikulas Patocka hash_size = 64; 8938defd830SRobert P. J. Day hash_size = rounddown_pow_of_two(hash_size); 8943510cb94SJon Brassow if (dm_exception_table_init(&s->complete, hash_size, 895d74f81f8SMilan Broz DM_CHUNK_CONSECUTIVE_BITS)) 8961da177e4SLinus Torvalds return -ENOMEM; 8971da177e4SLinus Torvalds 8981da177e4SLinus Torvalds /* 8991da177e4SLinus Torvalds * Allocate hash table for in-flight exceptions 9001da177e4SLinus Torvalds * Make this smaller than the real hash table 9011da177e4SLinus Torvalds */ 9021da177e4SLinus Torvalds hash_size >>= 3; 9031da177e4SLinus Torvalds if (hash_size < 64) 9041da177e4SLinus Torvalds hash_size = 64; 9051da177e4SLinus Torvalds 9063510cb94SJon Brassow if (dm_exception_table_init(&s->pending, hash_size, 0)) { 9073510cb94SJon Brassow dm_exception_table_exit(&s->complete, exception_cache); 9081da177e4SLinus Torvalds return -ENOMEM; 9091da177e4SLinus Torvalds } 9101da177e4SLinus Torvalds 9111da177e4SLinus Torvalds return 0; 9121da177e4SLinus Torvalds } 9131da177e4SLinus Torvalds 9141e03f97eSMikulas Patocka static void merge_shutdown(struct dm_snapshot *s) 9151e03f97eSMikulas Patocka { 9161e03f97eSMikulas Patocka clear_bit_unlock(RUNNING_MERGE, &s->state_bits); 9174e857c58SPeter Zijlstra smp_mb__after_atomic(); 9181e03f97eSMikulas Patocka wake_up_bit(&s->state_bits, RUNNING_MERGE); 9191e03f97eSMikulas Patocka } 9201e03f97eSMikulas Patocka 9219fe86254SMikulas Patocka static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s) 9229fe86254SMikulas Patocka { 9239fe86254SMikulas Patocka s->first_merging_chunk = 0; 9249fe86254SMikulas Patocka s->num_merging_chunks = 0; 9259fe86254SMikulas Patocka 9269fe86254SMikulas Patocka return bio_list_get(&s->bios_queued_during_merge); 9279fe86254SMikulas Patocka } 9289fe86254SMikulas Patocka 9291e03f97eSMikulas Patocka /* 9301e03f97eSMikulas Patocka * Remove one chunk from the index of completed exceptions. 9311e03f97eSMikulas Patocka */ 9321e03f97eSMikulas Patocka static int __remove_single_exception_chunk(struct dm_snapshot *s, 9331e03f97eSMikulas Patocka chunk_t old_chunk) 9341e03f97eSMikulas Patocka { 9351e03f97eSMikulas Patocka struct dm_exception *e; 9361e03f97eSMikulas Patocka 9371e03f97eSMikulas Patocka e = dm_lookup_exception(&s->complete, old_chunk); 9381e03f97eSMikulas Patocka if (!e) { 9391e03f97eSMikulas Patocka DMERR("Corruption detected: exception for block %llu is " 9401e03f97eSMikulas Patocka "on disk but not in memory", 9411e03f97eSMikulas Patocka (unsigned long long)old_chunk); 9421e03f97eSMikulas Patocka return -EINVAL; 9431e03f97eSMikulas Patocka } 9441e03f97eSMikulas Patocka 9451e03f97eSMikulas Patocka /* 9461e03f97eSMikulas Patocka * If this is the only chunk using this exception, remove exception. 9471e03f97eSMikulas Patocka */ 9481e03f97eSMikulas Patocka if (!dm_consecutive_chunk_count(e)) { 9491e03f97eSMikulas Patocka dm_remove_exception(e); 9501e03f97eSMikulas Patocka free_completed_exception(e); 9511e03f97eSMikulas Patocka return 0; 9521e03f97eSMikulas Patocka } 9531e03f97eSMikulas Patocka 9541e03f97eSMikulas Patocka /* 9551e03f97eSMikulas Patocka * The chunk may be either at the beginning or the end of a 9561e03f97eSMikulas Patocka * group of consecutive chunks - never in the middle. We are 9571e03f97eSMikulas Patocka * removing chunks in the opposite order to that in which they 9581e03f97eSMikulas Patocka * were added, so this should always be true. 9591e03f97eSMikulas Patocka * Decrement the consecutive chunk counter and adjust the 9601e03f97eSMikulas Patocka * starting point if necessary. 9611e03f97eSMikulas Patocka */ 9621e03f97eSMikulas Patocka if (old_chunk == e->old_chunk) { 9631e03f97eSMikulas Patocka e->old_chunk++; 9641e03f97eSMikulas Patocka e->new_chunk++; 9651e03f97eSMikulas Patocka } else if (old_chunk != e->old_chunk + 9661e03f97eSMikulas Patocka dm_consecutive_chunk_count(e)) { 9671e03f97eSMikulas Patocka DMERR("Attempt to merge block %llu from the " 9681e03f97eSMikulas Patocka "middle of a chunk range [%llu - %llu]", 9691e03f97eSMikulas Patocka (unsigned long long)old_chunk, 9701e03f97eSMikulas Patocka (unsigned long long)e->old_chunk, 9711e03f97eSMikulas Patocka (unsigned long long) 9721e03f97eSMikulas Patocka e->old_chunk + dm_consecutive_chunk_count(e)); 9731e03f97eSMikulas Patocka return -EINVAL; 9741e03f97eSMikulas Patocka } 9751e03f97eSMikulas Patocka 9761e03f97eSMikulas Patocka dm_consecutive_chunk_count_dec(e); 9771e03f97eSMikulas Patocka 9781e03f97eSMikulas Patocka return 0; 9791e03f97eSMikulas Patocka } 9801e03f97eSMikulas Patocka 9819fe86254SMikulas Patocka static void flush_bios(struct bio *bio); 9829fe86254SMikulas Patocka 9839fe86254SMikulas Patocka static int remove_single_exception_chunk(struct dm_snapshot *s) 9841e03f97eSMikulas Patocka { 9859fe86254SMikulas Patocka struct bio *b = NULL; 9869fe86254SMikulas Patocka int r; 9879fe86254SMikulas Patocka chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1; 9881e03f97eSMikulas Patocka 9894ad8d880SNikos Tsironis down_write(&s->lock); 9909fe86254SMikulas Patocka 9919fe86254SMikulas Patocka /* 9929fe86254SMikulas Patocka * Process chunks (and associated exceptions) in reverse order 9939fe86254SMikulas Patocka * so that dm_consecutive_chunk_count_dec() accounting works. 9949fe86254SMikulas Patocka */ 9959fe86254SMikulas Patocka do { 9961e03f97eSMikulas Patocka r = __remove_single_exception_chunk(s, old_chunk); 9979fe86254SMikulas Patocka if (r) 9989fe86254SMikulas Patocka goto out; 9999fe86254SMikulas Patocka } while (old_chunk-- > s->first_merging_chunk); 10009fe86254SMikulas Patocka 10019fe86254SMikulas Patocka b = __release_queued_bios_after_merge(s); 10029fe86254SMikulas Patocka 10039fe86254SMikulas Patocka out: 10044ad8d880SNikos Tsironis up_write(&s->lock); 10059fe86254SMikulas Patocka if (b) 10069fe86254SMikulas Patocka flush_bios(b); 10071e03f97eSMikulas Patocka 10081e03f97eSMikulas Patocka return r; 10091e03f97eSMikulas Patocka } 10101e03f97eSMikulas Patocka 101173dfd078SMikulas Patocka static int origin_write_extent(struct dm_snapshot *merging_snap, 101273dfd078SMikulas Patocka sector_t sector, unsigned chunk_size); 101373dfd078SMikulas Patocka 10141e03f97eSMikulas Patocka static void merge_callback(int read_err, unsigned long write_err, 10151e03f97eSMikulas Patocka void *context); 10161e03f97eSMikulas Patocka 101773dfd078SMikulas Patocka static uint64_t read_pending_exceptions_done_count(void) 101873dfd078SMikulas Patocka { 101973dfd078SMikulas Patocka uint64_t pending_exceptions_done; 102073dfd078SMikulas Patocka 102173dfd078SMikulas Patocka spin_lock(&_pending_exceptions_done_spinlock); 102273dfd078SMikulas Patocka pending_exceptions_done = _pending_exceptions_done_count; 102373dfd078SMikulas Patocka spin_unlock(&_pending_exceptions_done_spinlock); 102473dfd078SMikulas Patocka 102573dfd078SMikulas Patocka return pending_exceptions_done; 102673dfd078SMikulas Patocka } 102773dfd078SMikulas Patocka 102873dfd078SMikulas Patocka static void increment_pending_exceptions_done_count(void) 102973dfd078SMikulas Patocka { 103073dfd078SMikulas Patocka spin_lock(&_pending_exceptions_done_spinlock); 103173dfd078SMikulas Patocka _pending_exceptions_done_count++; 103273dfd078SMikulas Patocka spin_unlock(&_pending_exceptions_done_spinlock); 103373dfd078SMikulas Patocka 103473dfd078SMikulas Patocka wake_up_all(&_pending_exceptions_done); 103573dfd078SMikulas Patocka } 103673dfd078SMikulas Patocka 10371e03f97eSMikulas Patocka static void snapshot_merge_next_chunks(struct dm_snapshot *s) 10381e03f97eSMikulas Patocka { 10398a2d5286SMike Snitzer int i, linear_chunks; 10401e03f97eSMikulas Patocka chunk_t old_chunk, new_chunk; 10411e03f97eSMikulas Patocka struct dm_io_region src, dest; 10428a2d5286SMike Snitzer sector_t io_size; 104373dfd078SMikulas Patocka uint64_t previous_count; 10441e03f97eSMikulas Patocka 10451e03f97eSMikulas Patocka BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits)); 10461e03f97eSMikulas Patocka if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits))) 10471e03f97eSMikulas Patocka goto shut; 10481e03f97eSMikulas Patocka 10491e03f97eSMikulas Patocka /* 10501e03f97eSMikulas Patocka * valid flag never changes during merge, so no lock required. 10511e03f97eSMikulas Patocka */ 10521e03f97eSMikulas Patocka if (!s->valid) { 10531e03f97eSMikulas Patocka DMERR("Snapshot is invalid: can't merge"); 10541e03f97eSMikulas Patocka goto shut; 10551e03f97eSMikulas Patocka } 10561e03f97eSMikulas Patocka 10578a2d5286SMike Snitzer linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk, 10588a2d5286SMike Snitzer &new_chunk); 10598a2d5286SMike Snitzer if (linear_chunks <= 0) { 1060d8ddb1cfSMike Snitzer if (linear_chunks < 0) { 10611e03f97eSMikulas Patocka DMERR("Read error in exception store: " 10621e03f97eSMikulas Patocka "shutting down merge"); 10634ad8d880SNikos Tsironis down_write(&s->lock); 10641d1dda8cSzhengbin s->merge_failed = true; 10654ad8d880SNikos Tsironis up_write(&s->lock); 1066d8ddb1cfSMike Snitzer } 10671e03f97eSMikulas Patocka goto shut; 10681e03f97eSMikulas Patocka } 10691e03f97eSMikulas Patocka 10708a2d5286SMike Snitzer /* Adjust old_chunk and new_chunk to reflect start of linear region */ 10718a2d5286SMike Snitzer old_chunk = old_chunk + 1 - linear_chunks; 10728a2d5286SMike Snitzer new_chunk = new_chunk + 1 - linear_chunks; 10738a2d5286SMike Snitzer 10748a2d5286SMike Snitzer /* 10758a2d5286SMike Snitzer * Use one (potentially large) I/O to copy all 'linear_chunks' 10768a2d5286SMike Snitzer * from the exception store to the origin 10778a2d5286SMike Snitzer */ 10788a2d5286SMike Snitzer io_size = linear_chunks * s->store->chunk_size; 10791e03f97eSMikulas Patocka 10801e03f97eSMikulas Patocka dest.bdev = s->origin->bdev; 10811e03f97eSMikulas Patocka dest.sector = chunk_to_sector(s->store, old_chunk); 10828a2d5286SMike Snitzer dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector); 10831e03f97eSMikulas Patocka 10841e03f97eSMikulas Patocka src.bdev = s->cow->bdev; 10851e03f97eSMikulas Patocka src.sector = chunk_to_sector(s->store, new_chunk); 10861e03f97eSMikulas Patocka src.count = dest.count; 10871e03f97eSMikulas Patocka 108873dfd078SMikulas Patocka /* 108973dfd078SMikulas Patocka * Reallocate any exceptions needed in other snapshots then 109073dfd078SMikulas Patocka * wait for the pending exceptions to complete. 109173dfd078SMikulas Patocka * Each time any pending exception (globally on the system) 109273dfd078SMikulas Patocka * completes we are woken and repeat the process to find out 109373dfd078SMikulas Patocka * if we can proceed. While this may not seem a particularly 109473dfd078SMikulas Patocka * efficient algorithm, it is not expected to have any 109573dfd078SMikulas Patocka * significant impact on performance. 109673dfd078SMikulas Patocka */ 109773dfd078SMikulas Patocka previous_count = read_pending_exceptions_done_count(); 10988a2d5286SMike Snitzer while (origin_write_extent(s, dest.sector, io_size)) { 109973dfd078SMikulas Patocka wait_event(_pending_exceptions_done, 110073dfd078SMikulas Patocka (read_pending_exceptions_done_count() != 110173dfd078SMikulas Patocka previous_count)); 110273dfd078SMikulas Patocka /* Retry after the wait, until all exceptions are done. */ 110373dfd078SMikulas Patocka previous_count = read_pending_exceptions_done_count(); 110473dfd078SMikulas Patocka } 110573dfd078SMikulas Patocka 11064ad8d880SNikos Tsironis down_write(&s->lock); 11079fe86254SMikulas Patocka s->first_merging_chunk = old_chunk; 11088a2d5286SMike Snitzer s->num_merging_chunks = linear_chunks; 11094ad8d880SNikos Tsironis up_write(&s->lock); 11109fe86254SMikulas Patocka 11118a2d5286SMike Snitzer /* Wait until writes to all 'linear_chunks' drain */ 11128a2d5286SMike Snitzer for (i = 0; i < linear_chunks; i++) 11138a2d5286SMike Snitzer __check_for_conflicting_io(s, old_chunk + i); 11149fe86254SMikulas Patocka 11151e03f97eSMikulas Patocka dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s); 11161e03f97eSMikulas Patocka return; 11171e03f97eSMikulas Patocka 11181e03f97eSMikulas Patocka shut: 11191e03f97eSMikulas Patocka merge_shutdown(s); 11201e03f97eSMikulas Patocka } 11211e03f97eSMikulas Patocka 11229fe86254SMikulas Patocka static void error_bios(struct bio *bio); 11239fe86254SMikulas Patocka 11241e03f97eSMikulas Patocka static void merge_callback(int read_err, unsigned long write_err, void *context) 11251e03f97eSMikulas Patocka { 11261e03f97eSMikulas Patocka struct dm_snapshot *s = context; 11279fe86254SMikulas Patocka struct bio *b = NULL; 11281e03f97eSMikulas Patocka 11291e03f97eSMikulas Patocka if (read_err || write_err) { 11301e03f97eSMikulas Patocka if (read_err) 11311e03f97eSMikulas Patocka DMERR("Read error: shutting down merge."); 11321e03f97eSMikulas Patocka else 11331e03f97eSMikulas Patocka DMERR("Write error: shutting down merge."); 11341e03f97eSMikulas Patocka goto shut; 11351e03f97eSMikulas Patocka } 11361e03f97eSMikulas Patocka 11379fe86254SMikulas Patocka if (s->store->type->commit_merge(s->store, 11389fe86254SMikulas Patocka s->num_merging_chunks) < 0) { 11391e03f97eSMikulas Patocka DMERR("Write error in exception store: shutting down merge"); 11401e03f97eSMikulas Patocka goto shut; 11411e03f97eSMikulas Patocka } 11421e03f97eSMikulas Patocka 11439fe86254SMikulas Patocka if (remove_single_exception_chunk(s) < 0) 11449fe86254SMikulas Patocka goto shut; 11459fe86254SMikulas Patocka 11461e03f97eSMikulas Patocka snapshot_merge_next_chunks(s); 11471e03f97eSMikulas Patocka 11481e03f97eSMikulas Patocka return; 11491e03f97eSMikulas Patocka 11501e03f97eSMikulas Patocka shut: 11514ad8d880SNikos Tsironis down_write(&s->lock); 11521d1dda8cSzhengbin s->merge_failed = true; 11539fe86254SMikulas Patocka b = __release_queued_bios_after_merge(s); 11544ad8d880SNikos Tsironis up_write(&s->lock); 11559fe86254SMikulas Patocka error_bios(b); 11569fe86254SMikulas Patocka 11571e03f97eSMikulas Patocka merge_shutdown(s); 11581e03f97eSMikulas Patocka } 11591e03f97eSMikulas Patocka 11601e03f97eSMikulas Patocka static void start_merge(struct dm_snapshot *s) 11611e03f97eSMikulas Patocka { 11621e03f97eSMikulas Patocka if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits)) 11631e03f97eSMikulas Patocka snapshot_merge_next_chunks(s); 11641e03f97eSMikulas Patocka } 11651e03f97eSMikulas Patocka 11661e03f97eSMikulas Patocka /* 11671e03f97eSMikulas Patocka * Stop the merging process and wait until it finishes. 11681e03f97eSMikulas Patocka */ 11691e03f97eSMikulas Patocka static void stop_merge(struct dm_snapshot *s) 11701e03f97eSMikulas Patocka { 11711e03f97eSMikulas Patocka set_bit(SHUTDOWN_MERGE, &s->state_bits); 117274316201SNeilBrown wait_on_bit(&s->state_bits, RUNNING_MERGE, TASK_UNINTERRUPTIBLE); 11731e03f97eSMikulas Patocka clear_bit(SHUTDOWN_MERGE, &s->state_bits); 11741e03f97eSMikulas Patocka } 11751e03f97eSMikulas Patocka 11762e602385SMike Snitzer static int parse_snapshot_features(struct dm_arg_set *as, struct dm_snapshot *s, 11772e602385SMike Snitzer struct dm_target *ti) 11782e602385SMike Snitzer { 11792e602385SMike Snitzer int r; 11802e602385SMike Snitzer unsigned argc; 11812e602385SMike Snitzer const char *arg_name; 11822e602385SMike Snitzer 11832e602385SMike Snitzer static const struct dm_arg _args[] = { 11842e602385SMike Snitzer {0, 2, "Invalid number of feature arguments"}, 11852e602385SMike Snitzer }; 11862e602385SMike Snitzer 11871da177e4SLinus Torvalds /* 11882e602385SMike Snitzer * No feature arguments supplied. 11892e602385SMike Snitzer */ 11902e602385SMike Snitzer if (!as->argc) 11912e602385SMike Snitzer return 0; 11922e602385SMike Snitzer 11932e602385SMike Snitzer r = dm_read_arg_group(_args, as, &argc, &ti->error); 11942e602385SMike Snitzer if (r) 11952e602385SMike Snitzer return -EINVAL; 11962e602385SMike Snitzer 11972e602385SMike Snitzer while (argc && !r) { 11982e602385SMike Snitzer arg_name = dm_shift_arg(as); 11992e602385SMike Snitzer argc--; 12002e602385SMike Snitzer 12012e602385SMike Snitzer if (!strcasecmp(arg_name, "discard_zeroes_cow")) 12022e602385SMike Snitzer s->discard_zeroes_cow = true; 12032e602385SMike Snitzer 12042e602385SMike Snitzer else if (!strcasecmp(arg_name, "discard_passdown_origin")) 12052e602385SMike Snitzer s->discard_passdown_origin = true; 12062e602385SMike Snitzer 12072e602385SMike Snitzer else { 12082e602385SMike Snitzer ti->error = "Unrecognised feature requested"; 12092e602385SMike Snitzer r = -EINVAL; 12102e602385SMike Snitzer break; 12112e602385SMike Snitzer } 12122e602385SMike Snitzer } 12132e602385SMike Snitzer 12142e602385SMike Snitzer if (!s->discard_zeroes_cow && s->discard_passdown_origin) { 12152e602385SMike Snitzer /* 12162e602385SMike Snitzer * TODO: really these are disjoint.. but ti->num_discard_bios 12172e602385SMike Snitzer * and dm_bio_get_target_bio_nr() require rigid constraints. 12182e602385SMike Snitzer */ 12192e602385SMike Snitzer ti->error = "discard_passdown_origin feature depends on discard_zeroes_cow"; 12202e602385SMike Snitzer r = -EINVAL; 12212e602385SMike Snitzer } 12222e602385SMike Snitzer 12232e602385SMike Snitzer return r; 12242e602385SMike Snitzer } 12252e602385SMike Snitzer 12262e602385SMike Snitzer /* 12272e602385SMike Snitzer * Construct a snapshot mapping: 12282e602385SMike Snitzer * <origin_dev> <COW-dev> <p|po|n> <chunk-size> [<# feature args> [<arg>]*] 12291da177e4SLinus Torvalds */ 12301da177e4SLinus Torvalds static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) 12311da177e4SLinus Torvalds { 12321da177e4SLinus Torvalds struct dm_snapshot *s; 12332e602385SMike Snitzer struct dm_arg_set as; 1234cd45daffSMikulas Patocka int i; 12351da177e4SLinus Torvalds int r = -EINVAL; 1236fc56f6fbSMike Snitzer char *origin_path, *cow_path; 12374df2bf46SDingXiang dev_t origin_dev, cow_dev; 123855a62eefSAlasdair G Kergon unsigned args_used, num_flush_bios = 1; 123910b8106aSMike Snitzer fmode_t origin_mode = FMODE_READ; 12401da177e4SLinus Torvalds 12412e602385SMike Snitzer if (argc < 4) { 12422e602385SMike Snitzer ti->error = "requires 4 or more arguments"; 12431da177e4SLinus Torvalds r = -EINVAL; 1244fc56f6fbSMike Snitzer goto bad; 12451da177e4SLinus Torvalds } 12461da177e4SLinus Torvalds 124710b8106aSMike Snitzer if (dm_target_is_snapshot_merge(ti)) { 124855a62eefSAlasdair G Kergon num_flush_bios = 2; 124910b8106aSMike Snitzer origin_mode = FMODE_WRITE; 125010b8106aSMike Snitzer } 125110b8106aSMike Snitzer 1252d3775354SKent Overstreet s = kzalloc(sizeof(*s), GFP_KERNEL); 1253fee1998eSJonathan Brassow if (!s) { 1254a2d2b034SJonathan Brassow ti->error = "Cannot allocate private snapshot structure"; 12551da177e4SLinus Torvalds r = -ENOMEM; 1256fc56f6fbSMike Snitzer goto bad; 12571da177e4SLinus Torvalds } 12581da177e4SLinus Torvalds 12592e602385SMike Snitzer as.argc = argc; 12602e602385SMike Snitzer as.argv = argv; 12612e602385SMike Snitzer dm_consume_args(&as, 4); 12622e602385SMike Snitzer r = parse_snapshot_features(&as, s, ti); 12632e602385SMike Snitzer if (r) 12642e602385SMike Snitzer goto bad_features; 12652e602385SMike Snitzer 1266c2411045SMikulas Patocka origin_path = argv[0]; 1267c2411045SMikulas Patocka argv++; 1268c2411045SMikulas Patocka argc--; 1269c2411045SMikulas Patocka 1270c2411045SMikulas Patocka r = dm_get_device(ti, origin_path, origin_mode, &s->origin); 1271c2411045SMikulas Patocka if (r) { 1272c2411045SMikulas Patocka ti->error = "Cannot get origin device"; 1273c2411045SMikulas Patocka goto bad_origin; 1274c2411045SMikulas Patocka } 12754df2bf46SDingXiang origin_dev = s->origin->bdev->bd_dev; 1276c2411045SMikulas Patocka 1277fc56f6fbSMike Snitzer cow_path = argv[0]; 1278fc56f6fbSMike Snitzer argv++; 1279fc56f6fbSMike Snitzer argc--; 1280fc56f6fbSMike Snitzer 12814df2bf46SDingXiang cow_dev = dm_get_dev_t(cow_path); 12824df2bf46SDingXiang if (cow_dev && cow_dev == origin_dev) { 12834df2bf46SDingXiang ti->error = "COW device cannot be the same as origin device"; 12844df2bf46SDingXiang r = -EINVAL; 12854df2bf46SDingXiang goto bad_cow; 12864df2bf46SDingXiang } 12874df2bf46SDingXiang 1288024d37e9SMilan Broz r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow); 1289fc56f6fbSMike Snitzer if (r) { 1290fc56f6fbSMike Snitzer ti->error = "Cannot get COW device"; 1291fc56f6fbSMike Snitzer goto bad_cow; 1292fc56f6fbSMike Snitzer } 1293fc56f6fbSMike Snitzer 1294fc56f6fbSMike Snitzer r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store); 1295fc56f6fbSMike Snitzer if (r) { 1296fc56f6fbSMike Snitzer ti->error = "Couldn't create exception store"; 1297fc56f6fbSMike Snitzer r = -EINVAL; 1298fc56f6fbSMike Snitzer goto bad_store; 1299fc56f6fbSMike Snitzer } 1300fc56f6fbSMike Snitzer 1301fc56f6fbSMike Snitzer argv += args_used; 1302fc56f6fbSMike Snitzer argc -= args_used; 1303fc56f6fbSMike Snitzer 1304fc56f6fbSMike Snitzer s->ti = ti; 13051da177e4SLinus Torvalds s->valid = 1; 130676c44f6dSMikulas Patocka s->snapshot_overflowed = 0; 1307aa14edebSAlasdair G Kergon s->active = 0; 1308879129d2SMikulas Patocka atomic_set(&s->pending_exceptions_count, 0); 13093f1637f2SNikos Tsironis spin_lock_init(&s->pe_allocation_lock); 1310230c83afSMikulas Patocka s->exception_start_sequence = 0; 1311230c83afSMikulas Patocka s->exception_complete_sequence = 0; 13123db2776dSDavid Jeffery s->out_of_order_tree = RB_ROOT; 13134ad8d880SNikos Tsironis init_rwsem(&s->lock); 1314c1f0c183SMike Snitzer INIT_LIST_HEAD(&s->list); 1315ca3a931fSAlasdair G Kergon spin_lock_init(&s->pe_lock); 13161e03f97eSMikulas Patocka s->state_bits = 0; 13171d1dda8cSzhengbin s->merge_failed = false; 13189fe86254SMikulas Patocka s->first_merging_chunk = 0; 13199fe86254SMikulas Patocka s->num_merging_chunks = 0; 13209fe86254SMikulas Patocka bio_list_init(&s->bios_queued_during_merge); 13211da177e4SLinus Torvalds 13221da177e4SLinus Torvalds /* Allocate hash table for COW data */ 1323fee1998eSJonathan Brassow if (init_hash_tables(s)) { 13241da177e4SLinus Torvalds ti->error = "Unable to allocate hash table space"; 13251da177e4SLinus Torvalds r = -ENOMEM; 1326fee1998eSJonathan Brassow goto bad_hash_tables; 13271da177e4SLinus Torvalds } 13281da177e4SLinus Torvalds 1329b2155578SMikulas Patocka init_waitqueue_head(&s->in_progress_wait); 1330721b1d98SNikos Tsironis 1331df5d2e90SMikulas Patocka s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle); 1332fa34ce73SMikulas Patocka if (IS_ERR(s->kcopyd_client)) { 1333fa34ce73SMikulas Patocka r = PTR_ERR(s->kcopyd_client); 13341da177e4SLinus Torvalds ti->error = "Could not create kcopyd client"; 1335fee1998eSJonathan Brassow goto bad_kcopyd; 13361da177e4SLinus Torvalds } 13371da177e4SLinus Torvalds 13386f1c819cSKent Overstreet r = mempool_init_slab_pool(&s->pending_pool, MIN_IOS, pending_cache); 13396f1c819cSKent Overstreet if (r) { 134092e86812SMikulas Patocka ti->error = "Could not allocate mempool for pending exceptions"; 1341fee1998eSJonathan Brassow goto bad_pending_pool; 134292e86812SMikulas Patocka } 134392e86812SMikulas Patocka 1344cd45daffSMikulas Patocka for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) 1345cd45daffSMikulas Patocka INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]); 1346cd45daffSMikulas Patocka 1347cd45daffSMikulas Patocka spin_lock_init(&s->tracked_chunk_lock); 1348cd45daffSMikulas Patocka 1349c1f0c183SMike Snitzer ti->private = s; 135055a62eefSAlasdair G Kergon ti->num_flush_bios = num_flush_bios; 13512e602385SMike Snitzer if (s->discard_zeroes_cow) 13522e602385SMike Snitzer ti->num_discard_bios = (s->discard_passdown_origin ? 2 : 1); 135330187e1dSMike Snitzer ti->per_io_data_size = sizeof(struct dm_snap_tracked_chunk); 1354c1f0c183SMike Snitzer 1355c1f0c183SMike Snitzer /* Add snapshot to the list of snapshots for this origin */ 1356c1f0c183SMike Snitzer /* Exceptions aren't triggered till snapshot_resume() is called */ 1357c1f0c183SMike Snitzer r = register_snapshot(s); 1358c1f0c183SMike Snitzer if (r == -ENOMEM) { 1359c1f0c183SMike Snitzer ti->error = "Snapshot origin struct allocation failed"; 1360c1f0c183SMike Snitzer goto bad_load_and_register; 1361c1f0c183SMike Snitzer } else if (r < 0) { 1362c1f0c183SMike Snitzer /* invalid handover, register_snapshot has set ti->error */ 1363c1f0c183SMike Snitzer goto bad_load_and_register; 1364c1f0c183SMike Snitzer } 1365c1f0c183SMike Snitzer 1366c1f0c183SMike Snitzer /* 1367c1f0c183SMike Snitzer * Metadata must only be loaded into one table at once, so skip this 1368c1f0c183SMike Snitzer * if metadata will be handed over during resume. 1369c1f0c183SMike Snitzer * Chunk size will be set during the handover - set it to zero to 1370c1f0c183SMike Snitzer * ensure it's ignored. 1371c1f0c183SMike Snitzer */ 1372c1f0c183SMike Snitzer if (r > 0) { 1373c1f0c183SMike Snitzer s->store->chunk_size = 0; 1374c1f0c183SMike Snitzer return 0; 1375c1f0c183SMike Snitzer } 1376c1f0c183SMike Snitzer 1377493df71cSJonathan Brassow r = s->store->type->read_metadata(s->store, dm_add_exception, 1378493df71cSJonathan Brassow (void *)s); 13790764147bSMilan Broz if (r < 0) { 1380f9cea4f7SMark McLoughlin ti->error = "Failed to read snapshot metadata"; 1381c1f0c183SMike Snitzer goto bad_read_metadata; 13820764147bSMilan Broz } else if (r > 0) { 13830764147bSMilan Broz s->valid = 0; 13840764147bSMilan Broz DMWARN("Snapshot is marked invalid."); 1385f9cea4f7SMark McLoughlin } 1386aa14edebSAlasdair G Kergon 13873f2412dcSMikulas Patocka if (!s->store->chunk_size) { 13883f2412dcSMikulas Patocka ti->error = "Chunk size not set"; 1389c1f0c183SMike Snitzer goto bad_read_metadata; 13903f2412dcSMikulas Patocka } 1391542f9038SMike Snitzer 1392542f9038SMike Snitzer r = dm_set_target_max_io_len(ti, s->store->chunk_size); 1393542f9038SMike Snitzer if (r) 1394542f9038SMike Snitzer goto bad_read_metadata; 13951da177e4SLinus Torvalds 13961da177e4SLinus Torvalds return 0; 13971da177e4SLinus Torvalds 1398c1f0c183SMike Snitzer bad_read_metadata: 1399c1f0c183SMike Snitzer unregister_snapshot(s); 1400cd45daffSMikulas Patocka bad_load_and_register: 14016f1c819cSKent Overstreet mempool_exit(&s->pending_pool); 1402fee1998eSJonathan Brassow bad_pending_pool: 1403eb69aca5SHeinz Mauelshagen dm_kcopyd_client_destroy(s->kcopyd_client); 1404fee1998eSJonathan Brassow bad_kcopyd: 14053510cb94SJon Brassow dm_exception_table_exit(&s->pending, pending_cache); 14063510cb94SJon Brassow dm_exception_table_exit(&s->complete, exception_cache); 1407fee1998eSJonathan Brassow bad_hash_tables: 1408fc56f6fbSMike Snitzer dm_exception_store_destroy(s->store); 1409fc56f6fbSMike Snitzer bad_store: 1410fc56f6fbSMike Snitzer dm_put_device(ti, s->cow); 1411fc56f6fbSMike Snitzer bad_cow: 1412c2411045SMikulas Patocka dm_put_device(ti, s->origin); 1413c2411045SMikulas Patocka bad_origin: 14142e602385SMike Snitzer bad_features: 14151da177e4SLinus Torvalds kfree(s); 1416fc56f6fbSMike Snitzer bad: 14171da177e4SLinus Torvalds return r; 14181da177e4SLinus Torvalds } 14191da177e4SLinus Torvalds 142031c93a0cSMilan Broz static void __free_exceptions(struct dm_snapshot *s) 142131c93a0cSMilan Broz { 1422eb69aca5SHeinz Mauelshagen dm_kcopyd_client_destroy(s->kcopyd_client); 142331c93a0cSMilan Broz s->kcopyd_client = NULL; 142431c93a0cSMilan Broz 14253510cb94SJon Brassow dm_exception_table_exit(&s->pending, pending_cache); 14263510cb94SJon Brassow dm_exception_table_exit(&s->complete, exception_cache); 142731c93a0cSMilan Broz } 142831c93a0cSMilan Broz 1429c1f0c183SMike Snitzer static void __handover_exceptions(struct dm_snapshot *snap_src, 1430c1f0c183SMike Snitzer struct dm_snapshot *snap_dest) 1431c1f0c183SMike Snitzer { 1432c1f0c183SMike Snitzer union { 1433c1f0c183SMike Snitzer struct dm_exception_table table_swap; 1434c1f0c183SMike Snitzer struct dm_exception_store *store_swap; 1435c1f0c183SMike Snitzer } u; 1436c1f0c183SMike Snitzer 1437c1f0c183SMike Snitzer /* 1438c1f0c183SMike Snitzer * Swap all snapshot context information between the two instances. 1439c1f0c183SMike Snitzer */ 1440c1f0c183SMike Snitzer u.table_swap = snap_dest->complete; 1441c1f0c183SMike Snitzer snap_dest->complete = snap_src->complete; 1442c1f0c183SMike Snitzer snap_src->complete = u.table_swap; 1443c1f0c183SMike Snitzer 1444c1f0c183SMike Snitzer u.store_swap = snap_dest->store; 1445c1f0c183SMike Snitzer snap_dest->store = snap_src->store; 1446b0d3cc01SMike Snitzer snap_dest->store->userspace_supports_overflow = u.store_swap->userspace_supports_overflow; 1447c1f0c183SMike Snitzer snap_src->store = u.store_swap; 1448c1f0c183SMike Snitzer 1449c1f0c183SMike Snitzer snap_dest->store->snap = snap_dest; 1450c1f0c183SMike Snitzer snap_src->store->snap = snap_src; 1451c1f0c183SMike Snitzer 1452542f9038SMike Snitzer snap_dest->ti->max_io_len = snap_dest->store->chunk_size; 1453c1f0c183SMike Snitzer snap_dest->valid = snap_src->valid; 145476c44f6dSMikulas Patocka snap_dest->snapshot_overflowed = snap_src->snapshot_overflowed; 1455c1f0c183SMike Snitzer 1456c1f0c183SMike Snitzer /* 1457c1f0c183SMike Snitzer * Set source invalid to ensure it receives no further I/O. 1458c1f0c183SMike Snitzer */ 1459c1f0c183SMike Snitzer snap_src->valid = 0; 1460c1f0c183SMike Snitzer } 1461c1f0c183SMike Snitzer 14621da177e4SLinus Torvalds static void snapshot_dtr(struct dm_target *ti) 14631da177e4SLinus Torvalds { 1464cd45daffSMikulas Patocka #ifdef CONFIG_DM_DEBUG 1465cd45daffSMikulas Patocka int i; 1466cd45daffSMikulas Patocka #endif 1467028867acSAlasdair G Kergon struct dm_snapshot *s = ti->private; 1468c1f0c183SMike Snitzer struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; 14691da177e4SLinus Torvalds 1470c1f0c183SMike Snitzer down_read(&_origins_lock); 1471c1f0c183SMike Snitzer /* Check whether exception handover must be cancelled */ 14729d3b15c4SMikulas Patocka (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); 1473c1f0c183SMike Snitzer if (snap_src && snap_dest && (s == snap_src)) { 14744ad8d880SNikos Tsironis down_write(&snap_dest->lock); 1475c1f0c183SMike Snitzer snap_dest->valid = 0; 14764ad8d880SNikos Tsironis up_write(&snap_dest->lock); 1477c1f0c183SMike Snitzer DMERR("Cancelling snapshot handover."); 1478c1f0c183SMike Snitzer } 1479c1f0c183SMike Snitzer up_read(&_origins_lock); 1480c1f0c183SMike Snitzer 14811e03f97eSMikulas Patocka if (dm_target_is_snapshot_merge(ti)) 14821e03f97eSMikulas Patocka stop_merge(s); 14831e03f97eSMikulas Patocka 1484138728dcSAlasdair G Kergon /* Prevent further origin writes from using this snapshot. */ 1485138728dcSAlasdair G Kergon /* After this returns there can be no new kcopyd jobs. */ 14861da177e4SLinus Torvalds unregister_snapshot(s); 14871da177e4SLinus Torvalds 1488879129d2SMikulas Patocka while (atomic_read(&s->pending_exceptions_count)) 148990fa1527SMikulas Patocka msleep(1); 1490879129d2SMikulas Patocka /* 14916f1c819cSKent Overstreet * Ensure instructions in mempool_exit aren't reordered 1492879129d2SMikulas Patocka * before atomic_read. 1493879129d2SMikulas Patocka */ 1494879129d2SMikulas Patocka smp_mb(); 1495879129d2SMikulas Patocka 1496cd45daffSMikulas Patocka #ifdef CONFIG_DM_DEBUG 1497cd45daffSMikulas Patocka for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) 1498cd45daffSMikulas Patocka BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i])); 1499cd45daffSMikulas Patocka #endif 1500cd45daffSMikulas Patocka 150131c93a0cSMilan Broz __free_exceptions(s); 15021da177e4SLinus Torvalds 15036f1c819cSKent Overstreet mempool_exit(&s->pending_pool); 150492e86812SMikulas Patocka 1505fee1998eSJonathan Brassow dm_exception_store_destroy(s->store); 1506138728dcSAlasdair G Kergon 1507fc56f6fbSMike Snitzer dm_put_device(ti, s->cow); 1508fc56f6fbSMike Snitzer 1509c2411045SMikulas Patocka dm_put_device(ti, s->origin); 1510c2411045SMikulas Patocka 1511b2155578SMikulas Patocka WARN_ON(s->in_progress); 1512b2155578SMikulas Patocka 15131da177e4SLinus Torvalds kfree(s); 15141da177e4SLinus Torvalds } 15151da177e4SLinus Torvalds 1516a2f83e8bSMikulas Patocka static void account_start_copy(struct dm_snapshot *s) 1517a2f83e8bSMikulas Patocka { 1518b2155578SMikulas Patocka spin_lock(&s->in_progress_wait.lock); 1519b2155578SMikulas Patocka s->in_progress++; 1520b2155578SMikulas Patocka spin_unlock(&s->in_progress_wait.lock); 1521a2f83e8bSMikulas Patocka } 1522a2f83e8bSMikulas Patocka 1523a2f83e8bSMikulas Patocka static void account_end_copy(struct dm_snapshot *s) 1524a2f83e8bSMikulas Patocka { 1525b2155578SMikulas Patocka spin_lock(&s->in_progress_wait.lock); 1526b2155578SMikulas Patocka BUG_ON(!s->in_progress); 1527b2155578SMikulas Patocka s->in_progress--; 1528b2155578SMikulas Patocka if (likely(s->in_progress <= cow_threshold) && 1529b2155578SMikulas Patocka unlikely(waitqueue_active(&s->in_progress_wait))) 1530b2155578SMikulas Patocka wake_up_locked(&s->in_progress_wait); 1531b2155578SMikulas Patocka spin_unlock(&s->in_progress_wait.lock); 1532b2155578SMikulas Patocka } 1533b2155578SMikulas Patocka 1534b2155578SMikulas Patocka static bool wait_for_in_progress(struct dm_snapshot *s, bool unlock_origins) 1535b2155578SMikulas Patocka { 1536b2155578SMikulas Patocka if (unlikely(s->in_progress > cow_threshold)) { 1537b2155578SMikulas Patocka spin_lock(&s->in_progress_wait.lock); 1538b2155578SMikulas Patocka if (likely(s->in_progress > cow_threshold)) { 1539b2155578SMikulas Patocka /* 1540b2155578SMikulas Patocka * NOTE: this throttle doesn't account for whether 1541b2155578SMikulas Patocka * the caller is servicing an IO that will trigger a COW 1542b2155578SMikulas Patocka * so excess throttling may result for chunks not required 1543b2155578SMikulas Patocka * to be COW'd. But if cow_threshold was reached, extra 1544b2155578SMikulas Patocka * throttling is unlikely to negatively impact performance. 1545b2155578SMikulas Patocka */ 1546b2155578SMikulas Patocka DECLARE_WAITQUEUE(wait, current); 1547b2155578SMikulas Patocka __add_wait_queue(&s->in_progress_wait, &wait); 1548b2155578SMikulas Patocka __set_current_state(TASK_UNINTERRUPTIBLE); 1549b2155578SMikulas Patocka spin_unlock(&s->in_progress_wait.lock); 1550b2155578SMikulas Patocka if (unlock_origins) 1551b2155578SMikulas Patocka up_read(&_origins_lock); 1552b2155578SMikulas Patocka io_schedule(); 1553b2155578SMikulas Patocka remove_wait_queue(&s->in_progress_wait, &wait); 1554b2155578SMikulas Patocka return false; 1555b2155578SMikulas Patocka } 1556b2155578SMikulas Patocka spin_unlock(&s->in_progress_wait.lock); 1557b2155578SMikulas Patocka } 1558b2155578SMikulas Patocka return true; 1559a2f83e8bSMikulas Patocka } 1560a2f83e8bSMikulas Patocka 15611da177e4SLinus Torvalds /* 15621da177e4SLinus Torvalds * Flush a list of buffers. 15631da177e4SLinus Torvalds */ 15641da177e4SLinus Torvalds static void flush_bios(struct bio *bio) 15651da177e4SLinus Torvalds { 15661da177e4SLinus Torvalds struct bio *n; 15671da177e4SLinus Torvalds 15681da177e4SLinus Torvalds while (bio) { 15691da177e4SLinus Torvalds n = bio->bi_next; 15701da177e4SLinus Torvalds bio->bi_next = NULL; 15711da177e4SLinus Torvalds generic_make_request(bio); 15721da177e4SLinus Torvalds bio = n; 15731da177e4SLinus Torvalds } 15741da177e4SLinus Torvalds } 15751da177e4SLinus Torvalds 1576b2155578SMikulas Patocka static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit); 1577515ad66cSMikulas Patocka 1578515ad66cSMikulas Patocka /* 1579515ad66cSMikulas Patocka * Flush a list of buffers. 1580515ad66cSMikulas Patocka */ 1581515ad66cSMikulas Patocka static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio) 1582515ad66cSMikulas Patocka { 1583515ad66cSMikulas Patocka struct bio *n; 1584515ad66cSMikulas Patocka int r; 1585515ad66cSMikulas Patocka 1586515ad66cSMikulas Patocka while (bio) { 1587515ad66cSMikulas Patocka n = bio->bi_next; 1588515ad66cSMikulas Patocka bio->bi_next = NULL; 1589b2155578SMikulas Patocka r = do_origin(s->origin, bio, false); 1590515ad66cSMikulas Patocka if (r == DM_MAPIO_REMAPPED) 1591515ad66cSMikulas Patocka generic_make_request(bio); 1592515ad66cSMikulas Patocka bio = n; 1593515ad66cSMikulas Patocka } 1594515ad66cSMikulas Patocka } 1595515ad66cSMikulas Patocka 15961da177e4SLinus Torvalds /* 15971da177e4SLinus Torvalds * Error a list of buffers. 15981da177e4SLinus Torvalds */ 15991da177e4SLinus Torvalds static void error_bios(struct bio *bio) 16001da177e4SLinus Torvalds { 16011da177e4SLinus Torvalds struct bio *n; 16021da177e4SLinus Torvalds 16031da177e4SLinus Torvalds while (bio) { 16041da177e4SLinus Torvalds n = bio->bi_next; 16051da177e4SLinus Torvalds bio->bi_next = NULL; 16066712ecf8SNeilBrown bio_io_error(bio); 16071da177e4SLinus Torvalds bio = n; 16081da177e4SLinus Torvalds } 16091da177e4SLinus Torvalds } 16101da177e4SLinus Torvalds 1611695368acSAlasdair G Kergon static void __invalidate_snapshot(struct dm_snapshot *s, int err) 161276df1c65SAlasdair G Kergon { 161376df1c65SAlasdair G Kergon if (!s->valid) 161476df1c65SAlasdair G Kergon return; 161576df1c65SAlasdair G Kergon 161676df1c65SAlasdair G Kergon if (err == -EIO) 161776df1c65SAlasdair G Kergon DMERR("Invalidating snapshot: Error reading/writing."); 161876df1c65SAlasdair G Kergon else if (err == -ENOMEM) 161976df1c65SAlasdair G Kergon DMERR("Invalidating snapshot: Unable to allocate exception."); 162076df1c65SAlasdair G Kergon 1621493df71cSJonathan Brassow if (s->store->type->drop_snapshot) 1622493df71cSJonathan Brassow s->store->type->drop_snapshot(s->store); 162376df1c65SAlasdair G Kergon 162476df1c65SAlasdair G Kergon s->valid = 0; 162576df1c65SAlasdair G Kergon 1626fc56f6fbSMike Snitzer dm_table_event(s->ti->table); 162776df1c65SAlasdair G Kergon } 162876df1c65SAlasdair G Kergon 16293f1637f2SNikos Tsironis static void invalidate_snapshot(struct dm_snapshot *s, int err) 16303f1637f2SNikos Tsironis { 16313f1637f2SNikos Tsironis down_write(&s->lock); 16323f1637f2SNikos Tsironis __invalidate_snapshot(s, err); 16333f1637f2SNikos Tsironis up_write(&s->lock); 16343f1637f2SNikos Tsironis } 16353f1637f2SNikos Tsironis 1636385277bfSMikulas Patocka static void pending_complete(void *context, int success) 16371da177e4SLinus Torvalds { 1638385277bfSMikulas Patocka struct dm_snap_pending_exception *pe = context; 16391d4989c8SJon Brassow struct dm_exception *e; 16401da177e4SLinus Torvalds struct dm_snapshot *s = pe->snap; 16419d493fa8SAlasdair G Kergon struct bio *origin_bios = NULL; 16429d493fa8SAlasdair G Kergon struct bio *snapshot_bios = NULL; 1643a6e50b40SMikulas Patocka struct bio *full_bio = NULL; 1644f79ae415SNikos Tsironis struct dm_exception_table_lock lock; 16459d493fa8SAlasdair G Kergon int error = 0; 16461da177e4SLinus Torvalds 1647f79ae415SNikos Tsironis dm_exception_table_lock_init(s, pe->e.old_chunk, &lock); 1648f79ae415SNikos Tsironis 164976df1c65SAlasdair G Kergon if (!success) { 165076df1c65SAlasdair G Kergon /* Read/write error - snapshot is unusable */ 16513f1637f2SNikos Tsironis invalidate_snapshot(s, -EIO); 16529d493fa8SAlasdair G Kergon error = 1; 1653f79ae415SNikos Tsironis 1654f79ae415SNikos Tsironis dm_exception_table_lock(&lock); 165576df1c65SAlasdair G Kergon goto out; 165676df1c65SAlasdair G Kergon } 165776df1c65SAlasdair G Kergon 1658119bc547SMikulas Patocka e = alloc_completed_exception(GFP_NOIO); 165976df1c65SAlasdair G Kergon if (!e) { 16603f1637f2SNikos Tsironis invalidate_snapshot(s, -ENOMEM); 16619d493fa8SAlasdair G Kergon error = 1; 1662f79ae415SNikos Tsironis 1663f79ae415SNikos Tsironis dm_exception_table_lock(&lock); 16641da177e4SLinus Torvalds goto out; 16651da177e4SLinus Torvalds } 16661da177e4SLinus Torvalds *e = pe->e; 16671da177e4SLinus Torvalds 16683f1637f2SNikos Tsironis down_read(&s->lock); 1669f79ae415SNikos Tsironis dm_exception_table_lock(&lock); 16709d493fa8SAlasdair G Kergon if (!s->valid) { 16713f1637f2SNikos Tsironis up_read(&s->lock); 16723510cb94SJon Brassow free_completed_exception(e); 16739d493fa8SAlasdair G Kergon error = 1; 16743f1637f2SNikos Tsironis 16759d493fa8SAlasdair G Kergon goto out; 16769d493fa8SAlasdair G Kergon } 16779d493fa8SAlasdair G Kergon 1678a8d41b59SMikulas Patocka /* 167965fc7c37SNikos Tsironis * Add a proper exception. After inserting the completed exception all 168065fc7c37SNikos Tsironis * subsequent snapshot reads to this chunk will be redirected to the 168165fc7c37SNikos Tsironis * COW device. This ensures that we do not starve. Moreover, as long 168265fc7c37SNikos Tsironis * as the pending exception exists, neither origin writes nor snapshot 168365fc7c37SNikos Tsironis * merging can overwrite the chunk in origin. 16841da177e4SLinus Torvalds */ 16853510cb94SJon Brassow dm_insert_exception(&s->complete, e); 16863f1637f2SNikos Tsironis up_read(&s->lock); 16871da177e4SLinus Torvalds 168865fc7c37SNikos Tsironis /* Wait for conflicting reads to drain */ 168965fc7c37SNikos Tsironis if (__chunk_is_tracked(s, pe->e.old_chunk)) { 1690f79ae415SNikos Tsironis dm_exception_table_unlock(&lock); 169165fc7c37SNikos Tsironis __check_for_conflicting_io(s, pe->e.old_chunk); 1692f79ae415SNikos Tsironis dm_exception_table_lock(&lock); 169365fc7c37SNikos Tsironis } 169465fc7c37SNikos Tsironis 16951da177e4SLinus Torvalds out: 169665fc7c37SNikos Tsironis /* Remove the in-flight exception from the list */ 16973510cb94SJon Brassow dm_remove_exception(&pe->e); 1698f79ae415SNikos Tsironis 1699f79ae415SNikos Tsironis dm_exception_table_unlock(&lock); 1700f79ae415SNikos Tsironis 17019d493fa8SAlasdair G Kergon snapshot_bios = bio_list_get(&pe->snapshot_bios); 1702515ad66cSMikulas Patocka origin_bios = bio_list_get(&pe->origin_bios); 1703a6e50b40SMikulas Patocka full_bio = pe->full_bio; 1704fe3265b1SMikulas Patocka if (full_bio) 1705a6e50b40SMikulas Patocka full_bio->bi_end_io = pe->full_bio_end_io; 170673dfd078SMikulas Patocka increment_pending_exceptions_done_count(); 170773dfd078SMikulas Patocka 17089d493fa8SAlasdair G Kergon /* Submit any pending write bios */ 1709a6e50b40SMikulas Patocka if (error) { 1710a6e50b40SMikulas Patocka if (full_bio) 1711a6e50b40SMikulas Patocka bio_io_error(full_bio); 17129d493fa8SAlasdair G Kergon error_bios(snapshot_bios); 1713a6e50b40SMikulas Patocka } else { 1714a6e50b40SMikulas Patocka if (full_bio) 17154246a0b6SChristoph Hellwig bio_endio(full_bio); 17169d493fa8SAlasdair G Kergon flush_bios(snapshot_bios); 1717a6e50b40SMikulas Patocka } 17189d493fa8SAlasdair G Kergon 1719515ad66cSMikulas Patocka retry_origin_bios(s, origin_bios); 172022aa66a3SMikulas Patocka 172122aa66a3SMikulas Patocka free_pending_exception(pe); 17221da177e4SLinus Torvalds } 17231da177e4SLinus Torvalds 1724230c83afSMikulas Patocka static void complete_exception(struct dm_snap_pending_exception *pe) 1725230c83afSMikulas Patocka { 1726230c83afSMikulas Patocka struct dm_snapshot *s = pe->snap; 1727230c83afSMikulas Patocka 1728230c83afSMikulas Patocka /* Update the metadata if we are persistent */ 1729385277bfSMikulas Patocka s->store->type->commit_exception(s->store, &pe->e, !pe->copy_error, 1730385277bfSMikulas Patocka pending_complete, pe); 1731230c83afSMikulas Patocka } 1732230c83afSMikulas Patocka 17331da177e4SLinus Torvalds /* 17341da177e4SLinus Torvalds * Called when the copy I/O has finished. kcopyd actually runs 17351da177e4SLinus Torvalds * this code so don't block. 17361da177e4SLinus Torvalds */ 17374cdc1d1fSAlasdair G Kergon static void copy_callback(int read_err, unsigned long write_err, void *context) 17381da177e4SLinus Torvalds { 1739028867acSAlasdair G Kergon struct dm_snap_pending_exception *pe = context; 17401da177e4SLinus Torvalds struct dm_snapshot *s = pe->snap; 17411da177e4SLinus Torvalds 1742230c83afSMikulas Patocka pe->copy_error = read_err || write_err; 17431da177e4SLinus Torvalds 1744230c83afSMikulas Patocka if (pe->exception_sequence == s->exception_complete_sequence) { 17453db2776dSDavid Jeffery struct rb_node *next; 17463db2776dSDavid Jeffery 1747230c83afSMikulas Patocka s->exception_complete_sequence++; 1748230c83afSMikulas Patocka complete_exception(pe); 1749230c83afSMikulas Patocka 17503db2776dSDavid Jeffery next = rb_first(&s->out_of_order_tree); 17513db2776dSDavid Jeffery while (next) { 17523db2776dSDavid Jeffery pe = rb_entry(next, struct dm_snap_pending_exception, 17533db2776dSDavid Jeffery out_of_order_node); 1754230c83afSMikulas Patocka if (pe->exception_sequence != s->exception_complete_sequence) 1755230c83afSMikulas Patocka break; 17563db2776dSDavid Jeffery next = rb_next(next); 1757230c83afSMikulas Patocka s->exception_complete_sequence++; 17583db2776dSDavid Jeffery rb_erase(&pe->out_of_order_node, &s->out_of_order_tree); 1759230c83afSMikulas Patocka complete_exception(pe); 17603db2776dSDavid Jeffery cond_resched(); 1761230c83afSMikulas Patocka } 1762230c83afSMikulas Patocka } else { 17633db2776dSDavid Jeffery struct rb_node *parent = NULL; 17643db2776dSDavid Jeffery struct rb_node **p = &s->out_of_order_tree.rb_node; 1765230c83afSMikulas Patocka struct dm_snap_pending_exception *pe2; 1766230c83afSMikulas Patocka 17673db2776dSDavid Jeffery while (*p) { 17683db2776dSDavid Jeffery pe2 = rb_entry(*p, struct dm_snap_pending_exception, out_of_order_node); 17693db2776dSDavid Jeffery parent = *p; 17703db2776dSDavid Jeffery 17713db2776dSDavid Jeffery BUG_ON(pe->exception_sequence == pe2->exception_sequence); 17723db2776dSDavid Jeffery if (pe->exception_sequence < pe2->exception_sequence) 17733db2776dSDavid Jeffery p = &((*p)->rb_left); 17743db2776dSDavid Jeffery else 17753db2776dSDavid Jeffery p = &((*p)->rb_right); 1776230c83afSMikulas Patocka } 17773db2776dSDavid Jeffery 17783db2776dSDavid Jeffery rb_link_node(&pe->out_of_order_node, parent, p); 17793db2776dSDavid Jeffery rb_insert_color(&pe->out_of_order_node, &s->out_of_order_tree); 1780230c83afSMikulas Patocka } 1781a2f83e8bSMikulas Patocka account_end_copy(s); 17821da177e4SLinus Torvalds } 17831da177e4SLinus Torvalds 17841da177e4SLinus Torvalds /* 17851da177e4SLinus Torvalds * Dispatches the copy operation to kcopyd. 17861da177e4SLinus Torvalds */ 1787028867acSAlasdair G Kergon static void start_copy(struct dm_snap_pending_exception *pe) 17881da177e4SLinus Torvalds { 17891da177e4SLinus Torvalds struct dm_snapshot *s = pe->snap; 179022a1ceb1SHeinz Mauelshagen struct dm_io_region src, dest; 17911da177e4SLinus Torvalds struct block_device *bdev = s->origin->bdev; 17921da177e4SLinus Torvalds sector_t dev_size; 17931da177e4SLinus Torvalds 17941da177e4SLinus Torvalds dev_size = get_dev_size(bdev); 17951da177e4SLinus Torvalds 17961da177e4SLinus Torvalds src.bdev = bdev; 179771fab00aSJonathan Brassow src.sector = chunk_to_sector(s->store, pe->e.old_chunk); 1798df96eee6SMikulas Patocka src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector); 17991da177e4SLinus Torvalds 1800fc56f6fbSMike Snitzer dest.bdev = s->cow->bdev; 180171fab00aSJonathan Brassow dest.sector = chunk_to_sector(s->store, pe->e.new_chunk); 18021da177e4SLinus Torvalds dest.count = src.count; 18031da177e4SLinus Torvalds 18041da177e4SLinus Torvalds /* Hand over to kcopyd */ 1805a2f83e8bSMikulas Patocka account_start_copy(s); 1806a2d2b034SJonathan Brassow dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe); 18071da177e4SLinus Torvalds } 18081da177e4SLinus Torvalds 18094246a0b6SChristoph Hellwig static void full_bio_end_io(struct bio *bio) 1810a6e50b40SMikulas Patocka { 1811a6e50b40SMikulas Patocka void *callback_data = bio->bi_private; 1812a6e50b40SMikulas Patocka 18134e4cbee9SChristoph Hellwig dm_kcopyd_do_callback(callback_data, 0, bio->bi_status ? 1 : 0); 1814a6e50b40SMikulas Patocka } 1815a6e50b40SMikulas Patocka 1816a6e50b40SMikulas Patocka static void start_full_bio(struct dm_snap_pending_exception *pe, 1817a6e50b40SMikulas Patocka struct bio *bio) 1818a6e50b40SMikulas Patocka { 1819a6e50b40SMikulas Patocka struct dm_snapshot *s = pe->snap; 1820a6e50b40SMikulas Patocka void *callback_data; 1821a6e50b40SMikulas Patocka 1822a6e50b40SMikulas Patocka pe->full_bio = bio; 1823a6e50b40SMikulas Patocka pe->full_bio_end_io = bio->bi_end_io; 1824a6e50b40SMikulas Patocka 1825a2f83e8bSMikulas Patocka account_start_copy(s); 1826a6e50b40SMikulas Patocka callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client, 1827a6e50b40SMikulas Patocka copy_callback, pe); 1828a6e50b40SMikulas Patocka 1829a6e50b40SMikulas Patocka bio->bi_end_io = full_bio_end_io; 1830a6e50b40SMikulas Patocka bio->bi_private = callback_data; 1831a6e50b40SMikulas Patocka 1832a6e50b40SMikulas Patocka generic_make_request(bio); 1833a6e50b40SMikulas Patocka } 1834a6e50b40SMikulas Patocka 18352913808eSMikulas Patocka static struct dm_snap_pending_exception * 18362913808eSMikulas Patocka __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk) 18372913808eSMikulas Patocka { 18383510cb94SJon Brassow struct dm_exception *e = dm_lookup_exception(&s->pending, chunk); 18392913808eSMikulas Patocka 18402913808eSMikulas Patocka if (!e) 18412913808eSMikulas Patocka return NULL; 18422913808eSMikulas Patocka 18432913808eSMikulas Patocka return container_of(e, struct dm_snap_pending_exception, e); 18442913808eSMikulas Patocka } 18452913808eSMikulas Patocka 18461da177e4SLinus Torvalds /* 184765fc7c37SNikos Tsironis * Inserts a pending exception into the pending table. 184865fc7c37SNikos Tsironis * 18493f1637f2SNikos Tsironis * NOTE: a write lock must be held on the chunk's pending exception table slot 18503f1637f2SNikos Tsironis * before calling this. 185165fc7c37SNikos Tsironis */ 185265fc7c37SNikos Tsironis static struct dm_snap_pending_exception * 185365fc7c37SNikos Tsironis __insert_pending_exception(struct dm_snapshot *s, 185465fc7c37SNikos Tsironis struct dm_snap_pending_exception *pe, chunk_t chunk) 185565fc7c37SNikos Tsironis { 185665fc7c37SNikos Tsironis pe->e.old_chunk = chunk; 185765fc7c37SNikos Tsironis bio_list_init(&pe->origin_bios); 185865fc7c37SNikos Tsironis bio_list_init(&pe->snapshot_bios); 185965fc7c37SNikos Tsironis pe->started = 0; 186065fc7c37SNikos Tsironis pe->full_bio = NULL; 186165fc7c37SNikos Tsironis 18623f1637f2SNikos Tsironis spin_lock(&s->pe_allocation_lock); 186365fc7c37SNikos Tsironis if (s->store->type->prepare_exception(s->store, &pe->e)) { 18643f1637f2SNikos Tsironis spin_unlock(&s->pe_allocation_lock); 186565fc7c37SNikos Tsironis free_pending_exception(pe); 186665fc7c37SNikos Tsironis return NULL; 186765fc7c37SNikos Tsironis } 186865fc7c37SNikos Tsironis 186965fc7c37SNikos Tsironis pe->exception_sequence = s->exception_start_sequence++; 18703f1637f2SNikos Tsironis spin_unlock(&s->pe_allocation_lock); 187165fc7c37SNikos Tsironis 187265fc7c37SNikos Tsironis dm_insert_exception(&s->pending, &pe->e); 187365fc7c37SNikos Tsironis 187465fc7c37SNikos Tsironis return pe; 187565fc7c37SNikos Tsironis } 187665fc7c37SNikos Tsironis 187765fc7c37SNikos Tsironis /* 18781da177e4SLinus Torvalds * Looks to see if this snapshot already has a pending exception 18791da177e4SLinus Torvalds * for this chunk, otherwise it allocates a new one and inserts 18801da177e4SLinus Torvalds * it into the pending table. 18811da177e4SLinus Torvalds * 18823f1637f2SNikos Tsironis * NOTE: a write lock must be held on the chunk's pending exception table slot 18833f1637f2SNikos Tsironis * before calling this. 18841da177e4SLinus Torvalds */ 1885028867acSAlasdair G Kergon static struct dm_snap_pending_exception * 1886c6621392SMikulas Patocka __find_pending_exception(struct dm_snapshot *s, 1887c6621392SMikulas Patocka struct dm_snap_pending_exception *pe, chunk_t chunk) 18881da177e4SLinus Torvalds { 1889c6621392SMikulas Patocka struct dm_snap_pending_exception *pe2; 189076df1c65SAlasdair G Kergon 18912913808eSMikulas Patocka pe2 = __lookup_pending_exception(s, chunk); 18922913808eSMikulas Patocka if (pe2) { 18931da177e4SLinus Torvalds free_pending_exception(pe); 18942913808eSMikulas Patocka return pe2; 189576df1c65SAlasdair G Kergon } 189676df1c65SAlasdair G Kergon 189765fc7c37SNikos Tsironis return __insert_pending_exception(s, pe, chunk); 18981da177e4SLinus Torvalds } 18991da177e4SLinus Torvalds 19001d4989c8SJon Brassow static void remap_exception(struct dm_snapshot *s, struct dm_exception *e, 1901d74f81f8SMilan Broz struct bio *bio, chunk_t chunk) 19021da177e4SLinus Torvalds { 190374d46992SChristoph Hellwig bio_set_dev(bio, s->cow->bdev); 19044f024f37SKent Overstreet bio->bi_iter.bi_sector = 19054f024f37SKent Overstreet chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) + 1906d74f81f8SMilan Broz (chunk - e->old_chunk)) + 19074f024f37SKent Overstreet (bio->bi_iter.bi_sector & s->store->chunk_mask); 19081da177e4SLinus Torvalds } 19091da177e4SLinus Torvalds 19102e602385SMike Snitzer static void zero_callback(int read_err, unsigned long write_err, void *context) 19112e602385SMike Snitzer { 19122e602385SMike Snitzer struct bio *bio = context; 19132e602385SMike Snitzer struct dm_snapshot *s = bio->bi_private; 19142e602385SMike Snitzer 1915a2f83e8bSMikulas Patocka account_end_copy(s); 19162e602385SMike Snitzer bio->bi_status = write_err ? BLK_STS_IOERR : 0; 19172e602385SMike Snitzer bio_endio(bio); 19182e602385SMike Snitzer } 19192e602385SMike Snitzer 19202e602385SMike Snitzer static void zero_exception(struct dm_snapshot *s, struct dm_exception *e, 19212e602385SMike Snitzer struct bio *bio, chunk_t chunk) 19222e602385SMike Snitzer { 19232e602385SMike Snitzer struct dm_io_region dest; 19242e602385SMike Snitzer 19252e602385SMike Snitzer dest.bdev = s->cow->bdev; 19262e602385SMike Snitzer dest.sector = bio->bi_iter.bi_sector; 19272e602385SMike Snitzer dest.count = s->store->chunk_size; 19282e602385SMike Snitzer 1929a2f83e8bSMikulas Patocka account_start_copy(s); 19302e602385SMike Snitzer WARN_ON_ONCE(bio->bi_private); 19312e602385SMike Snitzer bio->bi_private = s; 19322e602385SMike Snitzer dm_kcopyd_zero(s->kcopyd_client, 1, &dest, 0, zero_callback, bio); 19332e602385SMike Snitzer } 19342e602385SMike Snitzer 19352e602385SMike Snitzer static bool io_overlaps_chunk(struct dm_snapshot *s, struct bio *bio) 19362e602385SMike Snitzer { 19372e602385SMike Snitzer return bio->bi_iter.bi_size == 19382e602385SMike Snitzer (s->store->chunk_size << SECTOR_SHIFT); 19392e602385SMike Snitzer } 19402e602385SMike Snitzer 19417de3ee57SMikulas Patocka static int snapshot_map(struct dm_target *ti, struct bio *bio) 19421da177e4SLinus Torvalds { 19431d4989c8SJon Brassow struct dm_exception *e; 1944028867acSAlasdair G Kergon struct dm_snapshot *s = ti->private; 1945d2a7ad29SKiyoshi Ueda int r = DM_MAPIO_REMAPPED; 19461da177e4SLinus Torvalds chunk_t chunk; 1947028867acSAlasdair G Kergon struct dm_snap_pending_exception *pe = NULL; 1948f79ae415SNikos Tsironis struct dm_exception_table_lock lock; 19491da177e4SLinus Torvalds 1950ee18026aSMikulas Patocka init_tracked_chunk(bio); 1951ee18026aSMikulas Patocka 19521eff9d32SJens Axboe if (bio->bi_opf & REQ_PREFLUSH) { 195374d46992SChristoph Hellwig bio_set_dev(bio, s->cow->bdev); 1954494b3ee7SMikulas Patocka return DM_MAPIO_REMAPPED; 1955494b3ee7SMikulas Patocka } 1956494b3ee7SMikulas Patocka 19574f024f37SKent Overstreet chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector); 1958f79ae415SNikos Tsironis dm_exception_table_lock_init(s, chunk, &lock); 19591da177e4SLinus Torvalds 19601da177e4SLinus Torvalds /* Full snapshots are not usable */ 196176df1c65SAlasdair G Kergon /* To get here the table must be live so s->active is always set. */ 19621da177e4SLinus Torvalds if (!s->valid) 1963846785e6SChristoph Hellwig return DM_MAPIO_KILL; 19641da177e4SLinus Torvalds 1965b2155578SMikulas Patocka if (bio_data_dir(bio) == WRITE) { 1966b2155578SMikulas Patocka while (unlikely(!wait_for_in_progress(s, false))) 1967b2155578SMikulas Patocka ; /* wait_for_in_progress() has slept */ 1968b2155578SMikulas Patocka } 1969b2155578SMikulas Patocka 19703f1637f2SNikos Tsironis down_read(&s->lock); 1971f79ae415SNikos Tsironis dm_exception_table_lock(&lock); 19721da177e4SLinus Torvalds 197370246286SChristoph Hellwig if (!s->valid || (unlikely(s->snapshot_overflowed) && 197470246286SChristoph Hellwig bio_data_dir(bio) == WRITE)) { 1975846785e6SChristoph Hellwig r = DM_MAPIO_KILL; 197676df1c65SAlasdair G Kergon goto out_unlock; 197776df1c65SAlasdair G Kergon } 197876df1c65SAlasdair G Kergon 19792e602385SMike Snitzer if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) { 19802e602385SMike Snitzer if (s->discard_passdown_origin && dm_bio_get_target_bio_nr(bio)) { 19812e602385SMike Snitzer /* 19822e602385SMike Snitzer * passdown discard to origin (without triggering 19832e602385SMike Snitzer * snapshot exceptions via do_origin; doing so would 19842e602385SMike Snitzer * defeat the goal of freeing space in origin that is 19852e602385SMike Snitzer * implied by the "discard_passdown_origin" feature) 19862e602385SMike Snitzer */ 19872e602385SMike Snitzer bio_set_dev(bio, s->origin->bdev); 19882e602385SMike Snitzer track_chunk(s, bio, chunk); 19892e602385SMike Snitzer goto out_unlock; 19902e602385SMike Snitzer } 19912e602385SMike Snitzer /* discard to snapshot (target_bio_nr == 0) zeroes exceptions */ 19922e602385SMike Snitzer } 19932e602385SMike Snitzer 19941da177e4SLinus Torvalds /* If the block is already remapped - use that, else remap it */ 19953510cb94SJon Brassow e = dm_lookup_exception(&s->complete, chunk); 19961da177e4SLinus Torvalds if (e) { 1997d74f81f8SMilan Broz remap_exception(s, e, bio, chunk); 19982e602385SMike Snitzer if (unlikely(bio_op(bio) == REQ_OP_DISCARD) && 19992e602385SMike Snitzer io_overlaps_chunk(s, bio)) { 20002e602385SMike Snitzer dm_exception_table_unlock(&lock); 20012e602385SMike Snitzer up_read(&s->lock); 20022e602385SMike Snitzer zero_exception(s, e, bio, chunk); 20032e602385SMike Snitzer r = DM_MAPIO_SUBMITTED; /* discard is not issued */ 20042e602385SMike Snitzer goto out; 20052e602385SMike Snitzer } 20062e602385SMike Snitzer goto out_unlock; 20072e602385SMike Snitzer } 20082e602385SMike Snitzer 20092e602385SMike Snitzer if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) { 20102e602385SMike Snitzer /* 20112e602385SMike Snitzer * If no exception exists, complete discard immediately 20122e602385SMike Snitzer * otherwise it'll trigger copy-out. 20132e602385SMike Snitzer */ 20142e602385SMike Snitzer bio_endio(bio); 20152e602385SMike Snitzer r = DM_MAPIO_SUBMITTED; 201676df1c65SAlasdair G Kergon goto out_unlock; 201776df1c65SAlasdair G Kergon } 20181da177e4SLinus Torvalds 2019ba40a2aaSAlasdair G Kergon /* 2020ba40a2aaSAlasdair G Kergon * Write to snapshot - higher level takes care of RW/RO 2021ba40a2aaSAlasdair G Kergon * flags so we should only get this if we are 2022ba40a2aaSAlasdair G Kergon * writeable. 2023ba40a2aaSAlasdair G Kergon */ 202470246286SChristoph Hellwig if (bio_data_dir(bio) == WRITE) { 20252913808eSMikulas Patocka pe = __lookup_pending_exception(s, chunk); 20262913808eSMikulas Patocka if (!pe) { 2027f79ae415SNikos Tsironis dm_exception_table_unlock(&lock); 2028c6621392SMikulas Patocka pe = alloc_pending_exception(s); 2029f79ae415SNikos Tsironis dm_exception_table_lock(&lock); 2030c6621392SMikulas Patocka 20313510cb94SJon Brassow e = dm_lookup_exception(&s->complete, chunk); 203235bf659bSMikulas Patocka if (e) { 203335bf659bSMikulas Patocka free_pending_exception(pe); 203435bf659bSMikulas Patocka remap_exception(s, e, bio, chunk); 203535bf659bSMikulas Patocka goto out_unlock; 203635bf659bSMikulas Patocka } 203735bf659bSMikulas Patocka 2038c6621392SMikulas Patocka pe = __find_pending_exception(s, pe, chunk); 20391da177e4SLinus Torvalds if (!pe) { 2040f79ae415SNikos Tsironis dm_exception_table_unlock(&lock); 20413f1637f2SNikos Tsironis up_read(&s->lock); 20423f1637f2SNikos Tsironis 20433f1637f2SNikos Tsironis down_write(&s->lock); 2044f79ae415SNikos Tsironis 2045b0d3cc01SMike Snitzer if (s->store->userspace_supports_overflow) { 20463f1637f2SNikos Tsironis if (s->valid && !s->snapshot_overflowed) { 204776c44f6dSMikulas Patocka s->snapshot_overflowed = 1; 204876c44f6dSMikulas Patocka DMERR("Snapshot overflowed: Unable to allocate exception."); 20493f1637f2SNikos Tsironis } 2050b0d3cc01SMike Snitzer } else 2051b0d3cc01SMike Snitzer __invalidate_snapshot(s, -ENOMEM); 2052f79ae415SNikos Tsironis up_write(&s->lock); 2053f79ae415SNikos Tsironis 2054846785e6SChristoph Hellwig r = DM_MAPIO_KILL; 2055f79ae415SNikos Tsironis goto out; 205676df1c65SAlasdair G Kergon } 20572913808eSMikulas Patocka } 205876df1c65SAlasdair G Kergon 2059d74f81f8SMilan Broz remap_exception(s, &pe->e, bio, chunk); 20601da177e4SLinus Torvalds 2061d2a7ad29SKiyoshi Ueda r = DM_MAPIO_SUBMITTED; 2062ba40a2aaSAlasdair G Kergon 20632e602385SMike Snitzer if (!pe->started && io_overlaps_chunk(s, bio)) { 2064a6e50b40SMikulas Patocka pe->started = 1; 20653f1637f2SNikos Tsironis 2066f79ae415SNikos Tsironis dm_exception_table_unlock(&lock); 20673f1637f2SNikos Tsironis up_read(&s->lock); 20683f1637f2SNikos Tsironis 2069a6e50b40SMikulas Patocka start_full_bio(pe, bio); 2070a6e50b40SMikulas Patocka goto out; 2071a6e50b40SMikulas Patocka } 2072a6e50b40SMikulas Patocka 2073a6e50b40SMikulas Patocka bio_list_add(&pe->snapshot_bios, bio); 2074a6e50b40SMikulas Patocka 20751da177e4SLinus Torvalds if (!pe->started) { 20763f1637f2SNikos Tsironis /* this is protected by the exception table lock */ 20771da177e4SLinus Torvalds pe->started = 1; 20783f1637f2SNikos Tsironis 2079f79ae415SNikos Tsironis dm_exception_table_unlock(&lock); 20803f1637f2SNikos Tsironis up_read(&s->lock); 20813f1637f2SNikos Tsironis 208276df1c65SAlasdair G Kergon start_copy(pe); 2083ba40a2aaSAlasdair G Kergon goto out; 2084ba40a2aaSAlasdair G Kergon } 2085cd45daffSMikulas Patocka } else { 208674d46992SChristoph Hellwig bio_set_dev(bio, s->origin->bdev); 2087ee18026aSMikulas Patocka track_chunk(s, bio, chunk); 2088cd45daffSMikulas Patocka } 20891da177e4SLinus Torvalds 2090ba40a2aaSAlasdair G Kergon out_unlock: 2091f79ae415SNikos Tsironis dm_exception_table_unlock(&lock); 20923f1637f2SNikos Tsironis up_read(&s->lock); 2093ba40a2aaSAlasdair G Kergon out: 20941da177e4SLinus Torvalds return r; 20951da177e4SLinus Torvalds } 20961da177e4SLinus Torvalds 20973452c2a1SMikulas Patocka /* 20983452c2a1SMikulas Patocka * A snapshot-merge target behaves like a combination of a snapshot 20993452c2a1SMikulas Patocka * target and a snapshot-origin target. It only generates new 21003452c2a1SMikulas Patocka * exceptions in other snapshots and not in the one that is being 21013452c2a1SMikulas Patocka * merged. 21023452c2a1SMikulas Patocka * 21033452c2a1SMikulas Patocka * For each chunk, if there is an existing exception, it is used to 21043452c2a1SMikulas Patocka * redirect I/O to the cow device. Otherwise I/O is sent to the origin, 21053452c2a1SMikulas Patocka * which in turn might generate exceptions in other snapshots. 21069fe86254SMikulas Patocka * If merging is currently taking place on the chunk in question, the 21079fe86254SMikulas Patocka * I/O is deferred by adding it to s->bios_queued_during_merge. 21083452c2a1SMikulas Patocka */ 21097de3ee57SMikulas Patocka static int snapshot_merge_map(struct dm_target *ti, struct bio *bio) 21103452c2a1SMikulas Patocka { 21113452c2a1SMikulas Patocka struct dm_exception *e; 21123452c2a1SMikulas Patocka struct dm_snapshot *s = ti->private; 21133452c2a1SMikulas Patocka int r = DM_MAPIO_REMAPPED; 21143452c2a1SMikulas Patocka chunk_t chunk; 21153452c2a1SMikulas Patocka 2116ee18026aSMikulas Patocka init_tracked_chunk(bio); 2117ee18026aSMikulas Patocka 21181eff9d32SJens Axboe if (bio->bi_opf & REQ_PREFLUSH) { 211955a62eefSAlasdair G Kergon if (!dm_bio_get_target_bio_nr(bio)) 212074d46992SChristoph Hellwig bio_set_dev(bio, s->origin->bdev); 212110b8106aSMike Snitzer else 212274d46992SChristoph Hellwig bio_set_dev(bio, s->cow->bdev); 212310b8106aSMike Snitzer return DM_MAPIO_REMAPPED; 212410b8106aSMike Snitzer } 212510b8106aSMike Snitzer 21263ee25485SMike Snitzer if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) { 21273ee25485SMike Snitzer /* Once merging, discards no longer effect change */ 21283ee25485SMike Snitzer bio_endio(bio); 21293ee25485SMike Snitzer return DM_MAPIO_SUBMITTED; 21303ee25485SMike Snitzer } 21313ee25485SMike Snitzer 21324f024f37SKent Overstreet chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector); 21333452c2a1SMikulas Patocka 21344ad8d880SNikos Tsironis down_write(&s->lock); 21353452c2a1SMikulas Patocka 2136d2fdb776SMikulas Patocka /* Full merging snapshots are redirected to the origin */ 2137d2fdb776SMikulas Patocka if (!s->valid) 2138d2fdb776SMikulas Patocka goto redirect_to_origin; 21393452c2a1SMikulas Patocka 21403452c2a1SMikulas Patocka /* If the block is already remapped - use that */ 21413452c2a1SMikulas Patocka e = dm_lookup_exception(&s->complete, chunk); 21423452c2a1SMikulas Patocka if (e) { 21439fe86254SMikulas Patocka /* Queue writes overlapping with chunks being merged */ 214470246286SChristoph Hellwig if (bio_data_dir(bio) == WRITE && 21459fe86254SMikulas Patocka chunk >= s->first_merging_chunk && 21469fe86254SMikulas Patocka chunk < (s->first_merging_chunk + 21479fe86254SMikulas Patocka s->num_merging_chunks)) { 214874d46992SChristoph Hellwig bio_set_dev(bio, s->origin->bdev); 21499fe86254SMikulas Patocka bio_list_add(&s->bios_queued_during_merge, bio); 21509fe86254SMikulas Patocka r = DM_MAPIO_SUBMITTED; 21519fe86254SMikulas Patocka goto out_unlock; 21529fe86254SMikulas Patocka } 215317aa0332SMikulas Patocka 21543452c2a1SMikulas Patocka remap_exception(s, e, bio, chunk); 215517aa0332SMikulas Patocka 215670246286SChristoph Hellwig if (bio_data_dir(bio) == WRITE) 2157ee18026aSMikulas Patocka track_chunk(s, bio, chunk); 21583452c2a1SMikulas Patocka goto out_unlock; 21593452c2a1SMikulas Patocka } 21603452c2a1SMikulas Patocka 2161d2fdb776SMikulas Patocka redirect_to_origin: 216274d46992SChristoph Hellwig bio_set_dev(bio, s->origin->bdev); 21633452c2a1SMikulas Patocka 216470246286SChristoph Hellwig if (bio_data_dir(bio) == WRITE) { 21654ad8d880SNikos Tsironis up_write(&s->lock); 2166b2155578SMikulas Patocka return do_origin(s->origin, bio, false); 21673452c2a1SMikulas Patocka } 21683452c2a1SMikulas Patocka 21693452c2a1SMikulas Patocka out_unlock: 21704ad8d880SNikos Tsironis up_write(&s->lock); 21713452c2a1SMikulas Patocka 21723452c2a1SMikulas Patocka return r; 21733452c2a1SMikulas Patocka } 21743452c2a1SMikulas Patocka 21754e4cbee9SChristoph Hellwig static int snapshot_end_io(struct dm_target *ti, struct bio *bio, 21764e4cbee9SChristoph Hellwig blk_status_t *error) 2177cd45daffSMikulas Patocka { 2178cd45daffSMikulas Patocka struct dm_snapshot *s = ti->private; 2179cd45daffSMikulas Patocka 2180ee18026aSMikulas Patocka if (is_bio_tracked(bio)) 2181ee18026aSMikulas Patocka stop_tracking_chunk(s, bio); 2182cd45daffSMikulas Patocka 21831be56909SChristoph Hellwig return DM_ENDIO_DONE; 2184cd45daffSMikulas Patocka } 2185cd45daffSMikulas Patocka 21861e03f97eSMikulas Patocka static void snapshot_merge_presuspend(struct dm_target *ti) 21871e03f97eSMikulas Patocka { 21881e03f97eSMikulas Patocka struct dm_snapshot *s = ti->private; 21891e03f97eSMikulas Patocka 21901e03f97eSMikulas Patocka stop_merge(s); 21911e03f97eSMikulas Patocka } 21921e03f97eSMikulas Patocka 2193c1f0c183SMike Snitzer static int snapshot_preresume(struct dm_target *ti) 2194c1f0c183SMike Snitzer { 2195c1f0c183SMike Snitzer int r = 0; 2196c1f0c183SMike Snitzer struct dm_snapshot *s = ti->private; 2197c1f0c183SMike Snitzer struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; 2198c1f0c183SMike Snitzer 2199c1f0c183SMike Snitzer down_read(&_origins_lock); 22009d3b15c4SMikulas Patocka (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); 2201c1f0c183SMike Snitzer if (snap_src && snap_dest) { 22024ad8d880SNikos Tsironis down_read(&snap_src->lock); 2203c1f0c183SMike Snitzer if (s == snap_src) { 2204c1f0c183SMike Snitzer DMERR("Unable to resume snapshot source until " 2205c1f0c183SMike Snitzer "handover completes."); 2206c1f0c183SMike Snitzer r = -EINVAL; 2207b83b2f29SMike Snitzer } else if (!dm_suspended(snap_src->ti)) { 2208c1f0c183SMike Snitzer DMERR("Unable to perform snapshot handover until " 2209c1f0c183SMike Snitzer "source is suspended."); 2210c1f0c183SMike Snitzer r = -EINVAL; 2211c1f0c183SMike Snitzer } 22124ad8d880SNikos Tsironis up_read(&snap_src->lock); 2213c1f0c183SMike Snitzer } 2214c1f0c183SMike Snitzer up_read(&_origins_lock); 2215c1f0c183SMike Snitzer 2216c1f0c183SMike Snitzer return r; 2217c1f0c183SMike Snitzer } 2218c1f0c183SMike Snitzer 22191da177e4SLinus Torvalds static void snapshot_resume(struct dm_target *ti) 22201da177e4SLinus Torvalds { 2221028867acSAlasdair G Kergon struct dm_snapshot *s = ti->private; 222209ee96b2SMikulas Patocka struct dm_snapshot *snap_src = NULL, *snap_dest = NULL, *snap_merging = NULL; 2223b735fedeSMikulas Patocka struct dm_origin *o; 2224b735fedeSMikulas Patocka struct mapped_device *origin_md = NULL; 222509ee96b2SMikulas Patocka bool must_restart_merging = false; 2226c1f0c183SMike Snitzer 2227c1f0c183SMike Snitzer down_read(&_origins_lock); 2228b735fedeSMikulas Patocka 2229b735fedeSMikulas Patocka o = __lookup_dm_origin(s->origin->bdev); 2230b735fedeSMikulas Patocka if (o) 2231b735fedeSMikulas Patocka origin_md = dm_table_get_md(o->ti->table); 223209ee96b2SMikulas Patocka if (!origin_md) { 223309ee96b2SMikulas Patocka (void) __find_snapshots_sharing_cow(s, NULL, NULL, &snap_merging); 223409ee96b2SMikulas Patocka if (snap_merging) 223509ee96b2SMikulas Patocka origin_md = dm_table_get_md(snap_merging->ti->table); 223609ee96b2SMikulas Patocka } 2237b735fedeSMikulas Patocka if (origin_md == dm_table_get_md(ti->table)) 2238b735fedeSMikulas Patocka origin_md = NULL; 223909ee96b2SMikulas Patocka if (origin_md) { 224009ee96b2SMikulas Patocka if (dm_hold(origin_md)) 224109ee96b2SMikulas Patocka origin_md = NULL; 224209ee96b2SMikulas Patocka } 2243b735fedeSMikulas Patocka 224409ee96b2SMikulas Patocka up_read(&_origins_lock); 224509ee96b2SMikulas Patocka 224609ee96b2SMikulas Patocka if (origin_md) { 2247b735fedeSMikulas Patocka dm_internal_suspend_fast(origin_md); 224809ee96b2SMikulas Patocka if (snap_merging && test_bit(RUNNING_MERGE, &snap_merging->state_bits)) { 224909ee96b2SMikulas Patocka must_restart_merging = true; 225009ee96b2SMikulas Patocka stop_merge(snap_merging); 225109ee96b2SMikulas Patocka } 225209ee96b2SMikulas Patocka } 225309ee96b2SMikulas Patocka 225409ee96b2SMikulas Patocka down_read(&_origins_lock); 2255b735fedeSMikulas Patocka 22569d3b15c4SMikulas Patocka (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); 2257c1f0c183SMike Snitzer if (snap_src && snap_dest) { 22584ad8d880SNikos Tsironis down_write(&snap_src->lock); 22594ad8d880SNikos Tsironis down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING); 2260c1f0c183SMike Snitzer __handover_exceptions(snap_src, snap_dest); 22614ad8d880SNikos Tsironis up_write(&snap_dest->lock); 22624ad8d880SNikos Tsironis up_write(&snap_src->lock); 2263c1f0c183SMike Snitzer } 2264b735fedeSMikulas Patocka 2265c1f0c183SMike Snitzer up_read(&_origins_lock); 2266c1f0c183SMike Snitzer 226709ee96b2SMikulas Patocka if (origin_md) { 226809ee96b2SMikulas Patocka if (must_restart_merging) 226909ee96b2SMikulas Patocka start_merge(snap_merging); 227009ee96b2SMikulas Patocka dm_internal_resume_fast(origin_md); 227109ee96b2SMikulas Patocka dm_put(origin_md); 227209ee96b2SMikulas Patocka } 227309ee96b2SMikulas Patocka 2274c1f0c183SMike Snitzer /* Now we have correct chunk size, reregister */ 2275c1f0c183SMike Snitzer reregister_snapshot(s); 22761da177e4SLinus Torvalds 22774ad8d880SNikos Tsironis down_write(&s->lock); 2278aa14edebSAlasdair G Kergon s->active = 1; 22794ad8d880SNikos Tsironis up_write(&s->lock); 22801da177e4SLinus Torvalds } 22811da177e4SLinus Torvalds 2282542f9038SMike Snitzer static uint32_t get_origin_minimum_chunksize(struct block_device *bdev) 22831e03f97eSMikulas Patocka { 2284542f9038SMike Snitzer uint32_t min_chunksize; 22851e03f97eSMikulas Patocka 22861e03f97eSMikulas Patocka down_read(&_origins_lock); 22871e03f97eSMikulas Patocka min_chunksize = __minimum_chunk_size(__lookup_origin(bdev)); 22881e03f97eSMikulas Patocka up_read(&_origins_lock); 22891e03f97eSMikulas Patocka 22901e03f97eSMikulas Patocka return min_chunksize; 22911e03f97eSMikulas Patocka } 22921e03f97eSMikulas Patocka 22931e03f97eSMikulas Patocka static void snapshot_merge_resume(struct dm_target *ti) 22941e03f97eSMikulas Patocka { 22951e03f97eSMikulas Patocka struct dm_snapshot *s = ti->private; 22961e03f97eSMikulas Patocka 22971e03f97eSMikulas Patocka /* 22981e03f97eSMikulas Patocka * Handover exceptions from existing snapshot. 22991e03f97eSMikulas Patocka */ 23001e03f97eSMikulas Patocka snapshot_resume(ti); 23011e03f97eSMikulas Patocka 23021e03f97eSMikulas Patocka /* 2303542f9038SMike Snitzer * snapshot-merge acts as an origin, so set ti->max_io_len 23041e03f97eSMikulas Patocka */ 2305542f9038SMike Snitzer ti->max_io_len = get_origin_minimum_chunksize(s->origin->bdev); 23061e03f97eSMikulas Patocka 23071e03f97eSMikulas Patocka start_merge(s); 23081e03f97eSMikulas Patocka } 23091e03f97eSMikulas Patocka 2310fd7c092eSMikulas Patocka static void snapshot_status(struct dm_target *ti, status_type_t type, 23111f4e0ff0SAlasdair G Kergon unsigned status_flags, char *result, unsigned maxlen) 23121da177e4SLinus Torvalds { 23132e4a31dfSJonathan Brassow unsigned sz = 0; 2314028867acSAlasdair G Kergon struct dm_snapshot *snap = ti->private; 23152e602385SMike Snitzer unsigned num_features; 23161da177e4SLinus Torvalds 23171da177e4SLinus Torvalds switch (type) { 23181da177e4SLinus Torvalds case STATUSTYPE_INFO: 231994e76572SMikulas Patocka 23204ad8d880SNikos Tsironis down_write(&snap->lock); 232194e76572SMikulas Patocka 23221da177e4SLinus Torvalds if (!snap->valid) 23232e4a31dfSJonathan Brassow DMEMIT("Invalid"); 2324d8ddb1cfSMike Snitzer else if (snap->merge_failed) 2325d8ddb1cfSMike Snitzer DMEMIT("Merge failed"); 232676c44f6dSMikulas Patocka else if (snap->snapshot_overflowed) 232776c44f6dSMikulas Patocka DMEMIT("Overflow"); 23281da177e4SLinus Torvalds else { 2329985903bbSMike Snitzer if (snap->store->type->usage) { 2330985903bbSMike Snitzer sector_t total_sectors, sectors_allocated, 2331985903bbSMike Snitzer metadata_sectors; 2332985903bbSMike Snitzer snap->store->type->usage(snap->store, 2333985903bbSMike Snitzer &total_sectors, 2334985903bbSMike Snitzer §ors_allocated, 2335985903bbSMike Snitzer &metadata_sectors); 2336985903bbSMike Snitzer DMEMIT("%llu/%llu %llu", 2337985903bbSMike Snitzer (unsigned long long)sectors_allocated, 2338985903bbSMike Snitzer (unsigned long long)total_sectors, 2339985903bbSMike Snitzer (unsigned long long)metadata_sectors); 23401da177e4SLinus Torvalds } 23411da177e4SLinus Torvalds else 23422e4a31dfSJonathan Brassow DMEMIT("Unknown"); 23431da177e4SLinus Torvalds } 234494e76572SMikulas Patocka 23454ad8d880SNikos Tsironis up_write(&snap->lock); 234694e76572SMikulas Patocka 23471da177e4SLinus Torvalds break; 23481da177e4SLinus Torvalds 23491da177e4SLinus Torvalds case STATUSTYPE_TABLE: 23501da177e4SLinus Torvalds /* 23511da177e4SLinus Torvalds * kdevname returns a static pointer so we need 23521da177e4SLinus Torvalds * to make private copies if the output is to 23531da177e4SLinus Torvalds * make sense. 23541da177e4SLinus Torvalds */ 2355fc56f6fbSMike Snitzer DMEMIT("%s %s", snap->origin->name, snap->cow->name); 23562e602385SMike Snitzer sz += snap->store->type->status(snap->store, type, result + sz, 23571e302a92SJonathan Brassow maxlen - sz); 23582e602385SMike Snitzer num_features = snap->discard_zeroes_cow + snap->discard_passdown_origin; 23592e602385SMike Snitzer if (num_features) { 23602e602385SMike Snitzer DMEMIT(" %u", num_features); 23612e602385SMike Snitzer if (snap->discard_zeroes_cow) 23622e602385SMike Snitzer DMEMIT(" discard_zeroes_cow"); 23632e602385SMike Snitzer if (snap->discard_passdown_origin) 23642e602385SMike Snitzer DMEMIT(" discard_passdown_origin"); 23652e602385SMike Snitzer } 23661da177e4SLinus Torvalds break; 23671da177e4SLinus Torvalds } 23681da177e4SLinus Torvalds } 23691da177e4SLinus Torvalds 23708811f46cSMike Snitzer static int snapshot_iterate_devices(struct dm_target *ti, 23718811f46cSMike Snitzer iterate_devices_callout_fn fn, void *data) 23728811f46cSMike Snitzer { 23738811f46cSMike Snitzer struct dm_snapshot *snap = ti->private; 23741e5554c8SMikulas Patocka int r; 23758811f46cSMike Snitzer 23761e5554c8SMikulas Patocka r = fn(ti, snap->origin, 0, ti->len, data); 23771e5554c8SMikulas Patocka 23781e5554c8SMikulas Patocka if (!r) 23791e5554c8SMikulas Patocka r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data); 23801e5554c8SMikulas Patocka 23811e5554c8SMikulas Patocka return r; 23828811f46cSMike Snitzer } 23838811f46cSMike Snitzer 23842e602385SMike Snitzer static void snapshot_io_hints(struct dm_target *ti, struct queue_limits *limits) 23852e602385SMike Snitzer { 23862e602385SMike Snitzer struct dm_snapshot *snap = ti->private; 23872e602385SMike Snitzer 23882e602385SMike Snitzer if (snap->discard_zeroes_cow) { 23892e602385SMike Snitzer struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; 23902e602385SMike Snitzer 23913ee25485SMike Snitzer down_read(&_origins_lock); 23923ee25485SMike Snitzer 23932e602385SMike Snitzer (void) __find_snapshots_sharing_cow(snap, &snap_src, &snap_dest, NULL); 23942e602385SMike Snitzer if (snap_src && snap_dest) 23952e602385SMike Snitzer snap = snap_src; 23962e602385SMike Snitzer 23972e602385SMike Snitzer /* All discards are split on chunk_size boundary */ 23982e602385SMike Snitzer limits->discard_granularity = snap->store->chunk_size; 23992e602385SMike Snitzer limits->max_discard_sectors = snap->store->chunk_size; 24003ee25485SMike Snitzer 24013ee25485SMike Snitzer up_read(&_origins_lock); 24022e602385SMike Snitzer } 24032e602385SMike Snitzer } 24048811f46cSMike Snitzer 24051da177e4SLinus Torvalds /*----------------------------------------------------------------- 24061da177e4SLinus Torvalds * Origin methods 24071da177e4SLinus Torvalds *---------------------------------------------------------------*/ 24089eaae8ffSMikulas Patocka 24099eaae8ffSMikulas Patocka /* 24109eaae8ffSMikulas Patocka * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any 24119eaae8ffSMikulas Patocka * supplied bio was ignored. The caller may submit it immediately. 24129eaae8ffSMikulas Patocka * (No remapping actually occurs as the origin is always a direct linear 24139eaae8ffSMikulas Patocka * map.) 24149eaae8ffSMikulas Patocka * 24159eaae8ffSMikulas Patocka * If further exceptions are required, DM_MAPIO_SUBMITTED is returned 24169eaae8ffSMikulas Patocka * and any supplied bio is added to a list to be submitted once all 24179eaae8ffSMikulas Patocka * the necessary exceptions exist. 24189eaae8ffSMikulas Patocka */ 24199eaae8ffSMikulas Patocka static int __origin_write(struct list_head *snapshots, sector_t sector, 24209eaae8ffSMikulas Patocka struct bio *bio) 24211da177e4SLinus Torvalds { 2422515ad66cSMikulas Patocka int r = DM_MAPIO_REMAPPED; 24231da177e4SLinus Torvalds struct dm_snapshot *snap; 24241d4989c8SJon Brassow struct dm_exception *e; 242565fc7c37SNikos Tsironis struct dm_snap_pending_exception *pe, *pe2; 2426515ad66cSMikulas Patocka struct dm_snap_pending_exception *pe_to_start_now = NULL; 2427515ad66cSMikulas Patocka struct dm_snap_pending_exception *pe_to_start_last = NULL; 2428f79ae415SNikos Tsironis struct dm_exception_table_lock lock; 24291da177e4SLinus Torvalds chunk_t chunk; 24301da177e4SLinus Torvalds 24311da177e4SLinus Torvalds /* Do all the snapshots on this origin */ 24321da177e4SLinus Torvalds list_for_each_entry (snap, snapshots, list) { 24333452c2a1SMikulas Patocka /* 24343452c2a1SMikulas Patocka * Don't make new exceptions in a merging snapshot 24353452c2a1SMikulas Patocka * because it has effectively been deleted 24363452c2a1SMikulas Patocka */ 24373452c2a1SMikulas Patocka if (dm_target_is_snapshot_merge(snap->ti)) 24383452c2a1SMikulas Patocka continue; 24393452c2a1SMikulas Patocka 2440d5e404c1SAlasdair G Kergon /* Nothing to do if writing beyond end of snapshot */ 24419eaae8ffSMikulas Patocka if (sector >= dm_table_get_size(snap->ti->table)) 2442f79ae415SNikos Tsironis continue; 24431da177e4SLinus Torvalds 24441da177e4SLinus Torvalds /* 24451da177e4SLinus Torvalds * Remember, different snapshots can have 24461da177e4SLinus Torvalds * different chunk sizes. 24471da177e4SLinus Torvalds */ 24489eaae8ffSMikulas Patocka chunk = sector_to_chunk(snap->store, sector); 2449f79ae415SNikos Tsironis dm_exception_table_lock_init(snap, chunk, &lock); 2450f79ae415SNikos Tsironis 24513f1637f2SNikos Tsironis down_read(&snap->lock); 2452f79ae415SNikos Tsironis dm_exception_table_lock(&lock); 2453f79ae415SNikos Tsironis 2454f79ae415SNikos Tsironis /* Only deal with valid and active snapshots */ 2455f79ae415SNikos Tsironis if (!snap->valid || !snap->active) 2456f79ae415SNikos Tsironis goto next_snapshot; 24571da177e4SLinus Torvalds 245865fc7c37SNikos Tsironis pe = __lookup_pending_exception(snap, chunk); 245965fc7c37SNikos Tsironis if (!pe) { 24601da177e4SLinus Torvalds /* 246165fc7c37SNikos Tsironis * Check exception table to see if block is already 246265fc7c37SNikos Tsironis * remapped in this snapshot and trigger an exception 246365fc7c37SNikos Tsironis * if not. 24641da177e4SLinus Torvalds */ 24653510cb94SJon Brassow e = dm_lookup_exception(&snap->complete, chunk); 246676df1c65SAlasdair G Kergon if (e) 246776df1c65SAlasdair G Kergon goto next_snapshot; 246876df1c65SAlasdair G Kergon 2469f79ae415SNikos Tsironis dm_exception_table_unlock(&lock); 2470c6621392SMikulas Patocka pe = alloc_pending_exception(snap); 2471f79ae415SNikos Tsironis dm_exception_table_lock(&lock); 2472c6621392SMikulas Patocka 247365fc7c37SNikos Tsironis pe2 = __lookup_pending_exception(snap, chunk); 247465fc7c37SNikos Tsironis 247565fc7c37SNikos Tsironis if (!pe2) { 24763510cb94SJon Brassow e = dm_lookup_exception(&snap->complete, chunk); 247735bf659bSMikulas Patocka if (e) { 247835bf659bSMikulas Patocka free_pending_exception(pe); 247935bf659bSMikulas Patocka goto next_snapshot; 248035bf659bSMikulas Patocka } 248135bf659bSMikulas Patocka 248265fc7c37SNikos Tsironis pe = __insert_pending_exception(snap, pe, chunk); 24831da177e4SLinus Torvalds if (!pe) { 2484f79ae415SNikos Tsironis dm_exception_table_unlock(&lock); 24853f1637f2SNikos Tsironis up_read(&snap->lock); 2486f79ae415SNikos Tsironis 24873f1637f2SNikos Tsironis invalidate_snapshot(snap, -ENOMEM); 2488f79ae415SNikos Tsironis continue; 248976df1c65SAlasdair G Kergon } 249065fc7c37SNikos Tsironis } else { 249165fc7c37SNikos Tsironis free_pending_exception(pe); 249265fc7c37SNikos Tsironis pe = pe2; 249365fc7c37SNikos Tsironis } 24942913808eSMikulas Patocka } 24951da177e4SLinus Torvalds 2496d2a7ad29SKiyoshi Ueda r = DM_MAPIO_SUBMITTED; 249776df1c65SAlasdair G Kergon 2498515ad66cSMikulas Patocka /* 2499515ad66cSMikulas Patocka * If an origin bio was supplied, queue it to wait for the 2500515ad66cSMikulas Patocka * completion of this exception, and start this one last, 2501515ad66cSMikulas Patocka * at the end of the function. 2502515ad66cSMikulas Patocka */ 2503515ad66cSMikulas Patocka if (bio) { 2504515ad66cSMikulas Patocka bio_list_add(&pe->origin_bios, bio); 2505515ad66cSMikulas Patocka bio = NULL; 2506515ad66cSMikulas Patocka 2507515ad66cSMikulas Patocka if (!pe->started) { 2508515ad66cSMikulas Patocka pe->started = 1; 2509515ad66cSMikulas Patocka pe_to_start_last = pe; 2510515ad66cSMikulas Patocka } 2511b4b610f6SAlasdair G Kergon } 251276df1c65SAlasdair G Kergon 2513eccf0817SAlasdair G Kergon if (!pe->started) { 2514eccf0817SAlasdair G Kergon pe->started = 1; 2515515ad66cSMikulas Patocka pe_to_start_now = pe; 2516eccf0817SAlasdair G Kergon } 25171da177e4SLinus Torvalds 251876df1c65SAlasdair G Kergon next_snapshot: 2519f79ae415SNikos Tsironis dm_exception_table_unlock(&lock); 25203f1637f2SNikos Tsironis up_read(&snap->lock); 2521515ad66cSMikulas Patocka 2522515ad66cSMikulas Patocka if (pe_to_start_now) { 2523515ad66cSMikulas Patocka start_copy(pe_to_start_now); 2524515ad66cSMikulas Patocka pe_to_start_now = NULL; 25251da177e4SLinus Torvalds } 2526b4b610f6SAlasdair G Kergon } 2527b4b610f6SAlasdair G Kergon 25281da177e4SLinus Torvalds /* 2529515ad66cSMikulas Patocka * Submit the exception against which the bio is queued last, 2530515ad66cSMikulas Patocka * to give the other exceptions a head start. 25311da177e4SLinus Torvalds */ 2532515ad66cSMikulas Patocka if (pe_to_start_last) 2533515ad66cSMikulas Patocka start_copy(pe_to_start_last); 25341da177e4SLinus Torvalds 25351da177e4SLinus Torvalds return r; 25361da177e4SLinus Torvalds } 25371da177e4SLinus Torvalds 25381da177e4SLinus Torvalds /* 25391da177e4SLinus Torvalds * Called on a write from the origin driver. 25401da177e4SLinus Torvalds */ 2541b2155578SMikulas Patocka static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit) 25421da177e4SLinus Torvalds { 25431da177e4SLinus Torvalds struct origin *o; 2544d2a7ad29SKiyoshi Ueda int r = DM_MAPIO_REMAPPED; 25451da177e4SLinus Torvalds 2546b2155578SMikulas Patocka again: 25471da177e4SLinus Torvalds down_read(&_origins_lock); 25481da177e4SLinus Torvalds o = __lookup_origin(origin->bdev); 2549b2155578SMikulas Patocka if (o) { 2550b2155578SMikulas Patocka if (limit) { 2551b2155578SMikulas Patocka struct dm_snapshot *s; 2552b2155578SMikulas Patocka list_for_each_entry(s, &o->snapshots, list) 2553b2155578SMikulas Patocka if (unlikely(!wait_for_in_progress(s, true))) 2554b2155578SMikulas Patocka goto again; 2555b2155578SMikulas Patocka } 2556b2155578SMikulas Patocka 25574f024f37SKent Overstreet r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio); 2558b2155578SMikulas Patocka } 25591da177e4SLinus Torvalds up_read(&_origins_lock); 25601da177e4SLinus Torvalds 25611da177e4SLinus Torvalds return r; 25621da177e4SLinus Torvalds } 25631da177e4SLinus Torvalds 25641da177e4SLinus Torvalds /* 256573dfd078SMikulas Patocka * Trigger exceptions in all non-merging snapshots. 256673dfd078SMikulas Patocka * 256773dfd078SMikulas Patocka * The chunk size of the merging snapshot may be larger than the chunk 256873dfd078SMikulas Patocka * size of some other snapshot so we may need to reallocate multiple 256973dfd078SMikulas Patocka * chunks in other snapshots. 257073dfd078SMikulas Patocka * 257173dfd078SMikulas Patocka * We scan all the overlapping exceptions in the other snapshots. 257273dfd078SMikulas Patocka * Returns 1 if anything was reallocated and must be waited for, 257373dfd078SMikulas Patocka * otherwise returns 0. 257473dfd078SMikulas Patocka * 257573dfd078SMikulas Patocka * size must be a multiple of merging_snap's chunk_size. 257673dfd078SMikulas Patocka */ 257773dfd078SMikulas Patocka static int origin_write_extent(struct dm_snapshot *merging_snap, 257873dfd078SMikulas Patocka sector_t sector, unsigned size) 257973dfd078SMikulas Patocka { 258073dfd078SMikulas Patocka int must_wait = 0; 258173dfd078SMikulas Patocka sector_t n; 258273dfd078SMikulas Patocka struct origin *o; 258373dfd078SMikulas Patocka 258473dfd078SMikulas Patocka /* 2585542f9038SMike Snitzer * The origin's __minimum_chunk_size() got stored in max_io_len 258673dfd078SMikulas Patocka * by snapshot_merge_resume(). 258773dfd078SMikulas Patocka */ 258873dfd078SMikulas Patocka down_read(&_origins_lock); 258973dfd078SMikulas Patocka o = __lookup_origin(merging_snap->origin->bdev); 2590542f9038SMike Snitzer for (n = 0; n < size; n += merging_snap->ti->max_io_len) 259173dfd078SMikulas Patocka if (__origin_write(&o->snapshots, sector + n, NULL) == 259273dfd078SMikulas Patocka DM_MAPIO_SUBMITTED) 259373dfd078SMikulas Patocka must_wait = 1; 259473dfd078SMikulas Patocka up_read(&_origins_lock); 259573dfd078SMikulas Patocka 259673dfd078SMikulas Patocka return must_wait; 259773dfd078SMikulas Patocka } 259873dfd078SMikulas Patocka 259973dfd078SMikulas Patocka /* 26001da177e4SLinus Torvalds * Origin: maps a linear range of a device, with hooks for snapshotting. 26011da177e4SLinus Torvalds */ 26021da177e4SLinus Torvalds 26031da177e4SLinus Torvalds /* 26041da177e4SLinus Torvalds * Construct an origin mapping: <dev_path> 26051da177e4SLinus Torvalds * The context for an origin is merely a 'struct dm_dev *' 26061da177e4SLinus Torvalds * pointing to the real device. 26071da177e4SLinus Torvalds */ 26081da177e4SLinus Torvalds static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv) 26091da177e4SLinus Torvalds { 26101da177e4SLinus Torvalds int r; 2611599cdf3bSMikulas Patocka struct dm_origin *o; 26121da177e4SLinus Torvalds 26131da177e4SLinus Torvalds if (argc != 1) { 261472d94861SAlasdair G Kergon ti->error = "origin: incorrect number of arguments"; 26151da177e4SLinus Torvalds return -EINVAL; 26161da177e4SLinus Torvalds } 26171da177e4SLinus Torvalds 2618599cdf3bSMikulas Patocka o = kmalloc(sizeof(struct dm_origin), GFP_KERNEL); 2619599cdf3bSMikulas Patocka if (!o) { 2620599cdf3bSMikulas Patocka ti->error = "Cannot allocate private origin structure"; 2621599cdf3bSMikulas Patocka r = -ENOMEM; 2622599cdf3bSMikulas Patocka goto bad_alloc; 26231da177e4SLinus Torvalds } 26241da177e4SLinus Torvalds 2625599cdf3bSMikulas Patocka r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &o->dev); 2626599cdf3bSMikulas Patocka if (r) { 2627599cdf3bSMikulas Patocka ti->error = "Cannot get target device"; 2628599cdf3bSMikulas Patocka goto bad_open; 2629599cdf3bSMikulas Patocka } 2630599cdf3bSMikulas Patocka 2631b735fedeSMikulas Patocka o->ti = ti; 2632599cdf3bSMikulas Patocka ti->private = o; 263355a62eefSAlasdair G Kergon ti->num_flush_bios = 1; 2634494b3ee7SMikulas Patocka 26351da177e4SLinus Torvalds return 0; 2636599cdf3bSMikulas Patocka 2637599cdf3bSMikulas Patocka bad_open: 2638599cdf3bSMikulas Patocka kfree(o); 2639599cdf3bSMikulas Patocka bad_alloc: 2640599cdf3bSMikulas Patocka return r; 26411da177e4SLinus Torvalds } 26421da177e4SLinus Torvalds 26431da177e4SLinus Torvalds static void origin_dtr(struct dm_target *ti) 26441da177e4SLinus Torvalds { 2645599cdf3bSMikulas Patocka struct dm_origin *o = ti->private; 2646b735fedeSMikulas Patocka 2647599cdf3bSMikulas Patocka dm_put_device(ti, o->dev); 2648599cdf3bSMikulas Patocka kfree(o); 26491da177e4SLinus Torvalds } 26501da177e4SLinus Torvalds 26517de3ee57SMikulas Patocka static int origin_map(struct dm_target *ti, struct bio *bio) 26521da177e4SLinus Torvalds { 2653599cdf3bSMikulas Patocka struct dm_origin *o = ti->private; 2654298eaa89SMikulas Patocka unsigned available_sectors; 26551da177e4SLinus Torvalds 265674d46992SChristoph Hellwig bio_set_dev(bio, o->dev->bdev); 26571da177e4SLinus Torvalds 26581eff9d32SJens Axboe if (unlikely(bio->bi_opf & REQ_PREFLUSH)) 2659494b3ee7SMikulas Patocka return DM_MAPIO_REMAPPED; 2660494b3ee7SMikulas Patocka 266170246286SChristoph Hellwig if (bio_data_dir(bio) != WRITE) 2662298eaa89SMikulas Patocka return DM_MAPIO_REMAPPED; 2663298eaa89SMikulas Patocka 2664298eaa89SMikulas Patocka available_sectors = o->split_boundary - 2665298eaa89SMikulas Patocka ((unsigned)bio->bi_iter.bi_sector & (o->split_boundary - 1)); 2666298eaa89SMikulas Patocka 2667298eaa89SMikulas Patocka if (bio_sectors(bio) > available_sectors) 2668298eaa89SMikulas Patocka dm_accept_partial_bio(bio, available_sectors); 2669298eaa89SMikulas Patocka 26701da177e4SLinus Torvalds /* Only tell snapshots if this is a write */ 2671b2155578SMikulas Patocka return do_origin(o->dev, bio, true); 26721da177e4SLinus Torvalds } 26731da177e4SLinus Torvalds 26741da177e4SLinus Torvalds /* 2675542f9038SMike Snitzer * Set the target "max_io_len" field to the minimum of all the snapshots' 26761da177e4SLinus Torvalds * chunk sizes. 26771da177e4SLinus Torvalds */ 26781da177e4SLinus Torvalds static void origin_resume(struct dm_target *ti) 26791da177e4SLinus Torvalds { 2680599cdf3bSMikulas Patocka struct dm_origin *o = ti->private; 26811da177e4SLinus Torvalds 2682298eaa89SMikulas Patocka o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev); 2683b735fedeSMikulas Patocka 2684b735fedeSMikulas Patocka down_write(&_origins_lock); 2685b735fedeSMikulas Patocka __insert_dm_origin(o); 2686b735fedeSMikulas Patocka up_write(&_origins_lock); 2687b735fedeSMikulas Patocka } 2688b735fedeSMikulas Patocka 2689b735fedeSMikulas Patocka static void origin_postsuspend(struct dm_target *ti) 2690b735fedeSMikulas Patocka { 2691b735fedeSMikulas Patocka struct dm_origin *o = ti->private; 2692b735fedeSMikulas Patocka 2693b735fedeSMikulas Patocka down_write(&_origins_lock); 2694b735fedeSMikulas Patocka __remove_dm_origin(o); 2695b735fedeSMikulas Patocka up_write(&_origins_lock); 26961da177e4SLinus Torvalds } 26971da177e4SLinus Torvalds 2698fd7c092eSMikulas Patocka static void origin_status(struct dm_target *ti, status_type_t type, 26991f4e0ff0SAlasdair G Kergon unsigned status_flags, char *result, unsigned maxlen) 27001da177e4SLinus Torvalds { 2701599cdf3bSMikulas Patocka struct dm_origin *o = ti->private; 27021da177e4SLinus Torvalds 27031da177e4SLinus Torvalds switch (type) { 27041da177e4SLinus Torvalds case STATUSTYPE_INFO: 27051da177e4SLinus Torvalds result[0] = '\0'; 27061da177e4SLinus Torvalds break; 27071da177e4SLinus Torvalds 27081da177e4SLinus Torvalds case STATUSTYPE_TABLE: 2709599cdf3bSMikulas Patocka snprintf(result, maxlen, "%s", o->dev->name); 27101da177e4SLinus Torvalds break; 27111da177e4SLinus Torvalds } 27121da177e4SLinus Torvalds } 27131da177e4SLinus Torvalds 27148811f46cSMike Snitzer static int origin_iterate_devices(struct dm_target *ti, 27158811f46cSMike Snitzer iterate_devices_callout_fn fn, void *data) 27168811f46cSMike Snitzer { 2717599cdf3bSMikulas Patocka struct dm_origin *o = ti->private; 27188811f46cSMike Snitzer 2719599cdf3bSMikulas Patocka return fn(ti, o->dev, 0, ti->len, data); 27208811f46cSMike Snitzer } 27218811f46cSMike Snitzer 27221da177e4SLinus Torvalds static struct target_type origin_target = { 27231da177e4SLinus Torvalds .name = "snapshot-origin", 2724b735fedeSMikulas Patocka .version = {1, 9, 0}, 27251da177e4SLinus Torvalds .module = THIS_MODULE, 27261da177e4SLinus Torvalds .ctr = origin_ctr, 27271da177e4SLinus Torvalds .dtr = origin_dtr, 27281da177e4SLinus Torvalds .map = origin_map, 27291da177e4SLinus Torvalds .resume = origin_resume, 2730b735fedeSMikulas Patocka .postsuspend = origin_postsuspend, 27311da177e4SLinus Torvalds .status = origin_status, 27328811f46cSMike Snitzer .iterate_devices = origin_iterate_devices, 27331da177e4SLinus Torvalds }; 27341da177e4SLinus Torvalds 27351da177e4SLinus Torvalds static struct target_type snapshot_target = { 27361da177e4SLinus Torvalds .name = "snapshot", 27372e602385SMike Snitzer .version = {1, 16, 0}, 27381da177e4SLinus Torvalds .module = THIS_MODULE, 27391da177e4SLinus Torvalds .ctr = snapshot_ctr, 27401da177e4SLinus Torvalds .dtr = snapshot_dtr, 27411da177e4SLinus Torvalds .map = snapshot_map, 2742cd45daffSMikulas Patocka .end_io = snapshot_end_io, 2743c1f0c183SMike Snitzer .preresume = snapshot_preresume, 27441da177e4SLinus Torvalds .resume = snapshot_resume, 27451da177e4SLinus Torvalds .status = snapshot_status, 27468811f46cSMike Snitzer .iterate_devices = snapshot_iterate_devices, 27472e602385SMike Snitzer .io_hints = snapshot_io_hints, 27481da177e4SLinus Torvalds }; 27491da177e4SLinus Torvalds 2750d698aa45SMikulas Patocka static struct target_type merge_target = { 2751d698aa45SMikulas Patocka .name = dm_snapshot_merge_target_name, 27522e602385SMike Snitzer .version = {1, 5, 0}, 2753d698aa45SMikulas Patocka .module = THIS_MODULE, 2754d698aa45SMikulas Patocka .ctr = snapshot_ctr, 2755d698aa45SMikulas Patocka .dtr = snapshot_dtr, 27563452c2a1SMikulas Patocka .map = snapshot_merge_map, 2757d698aa45SMikulas Patocka .end_io = snapshot_end_io, 27581e03f97eSMikulas Patocka .presuspend = snapshot_merge_presuspend, 2759d698aa45SMikulas Patocka .preresume = snapshot_preresume, 27601e03f97eSMikulas Patocka .resume = snapshot_merge_resume, 2761d698aa45SMikulas Patocka .status = snapshot_status, 2762d698aa45SMikulas Patocka .iterate_devices = snapshot_iterate_devices, 27632e602385SMike Snitzer .io_hints = snapshot_io_hints, 2764d698aa45SMikulas Patocka }; 2765d698aa45SMikulas Patocka 27661da177e4SLinus Torvalds static int __init dm_snapshot_init(void) 27671da177e4SLinus Torvalds { 27681da177e4SLinus Torvalds int r; 27691da177e4SLinus Torvalds 27704db6bfe0SAlasdair G Kergon r = dm_exception_store_init(); 27714db6bfe0SAlasdair G Kergon if (r) { 27724db6bfe0SAlasdair G Kergon DMERR("Failed to initialize exception stores"); 27734db6bfe0SAlasdair G Kergon return r; 27744db6bfe0SAlasdair G Kergon } 27754db6bfe0SAlasdair G Kergon 27761da177e4SLinus Torvalds r = init_origin_hash(); 27771da177e4SLinus Torvalds if (r) { 27781da177e4SLinus Torvalds DMERR("init_origin_hash failed."); 2779d698aa45SMikulas Patocka goto bad_origin_hash; 27801da177e4SLinus Torvalds } 27811da177e4SLinus Torvalds 27821d4989c8SJon Brassow exception_cache = KMEM_CACHE(dm_exception, 0); 27831da177e4SLinus Torvalds if (!exception_cache) { 27841da177e4SLinus Torvalds DMERR("Couldn't create exception cache."); 27851da177e4SLinus Torvalds r = -ENOMEM; 2786d698aa45SMikulas Patocka goto bad_exception_cache; 27871da177e4SLinus Torvalds } 27881da177e4SLinus Torvalds 2789028867acSAlasdair G Kergon pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0); 27901da177e4SLinus Torvalds if (!pending_cache) { 27911da177e4SLinus Torvalds DMERR("Couldn't create pending cache."); 27921da177e4SLinus Torvalds r = -ENOMEM; 2793d698aa45SMikulas Patocka goto bad_pending_cache; 27941da177e4SLinus Torvalds } 27951da177e4SLinus Torvalds 27967e6358d2Smonty_pavel@sina.com r = dm_register_target(&snapshot_target); 27977e6358d2Smonty_pavel@sina.com if (r < 0) { 27987e6358d2Smonty_pavel@sina.com DMERR("snapshot target register failed %d", r); 27997e6358d2Smonty_pavel@sina.com goto bad_register_snapshot_target; 28007e6358d2Smonty_pavel@sina.com } 28017e6358d2Smonty_pavel@sina.com 28027e6358d2Smonty_pavel@sina.com r = dm_register_target(&origin_target); 28037e6358d2Smonty_pavel@sina.com if (r < 0) { 28047e6358d2Smonty_pavel@sina.com DMERR("Origin target register failed %d", r); 28057e6358d2Smonty_pavel@sina.com goto bad_register_origin_target; 28067e6358d2Smonty_pavel@sina.com } 28077e6358d2Smonty_pavel@sina.com 28087e6358d2Smonty_pavel@sina.com r = dm_register_target(&merge_target); 28097e6358d2Smonty_pavel@sina.com if (r < 0) { 28107e6358d2Smonty_pavel@sina.com DMERR("Merge target register failed %d", r); 28117e6358d2Smonty_pavel@sina.com goto bad_register_merge_target; 28127e6358d2Smonty_pavel@sina.com } 28137e6358d2Smonty_pavel@sina.com 28141da177e4SLinus Torvalds return 0; 28151da177e4SLinus Torvalds 2816d698aa45SMikulas Patocka bad_register_merge_target: 28171da177e4SLinus Torvalds dm_unregister_target(&origin_target); 2818d698aa45SMikulas Patocka bad_register_origin_target: 28191da177e4SLinus Torvalds dm_unregister_target(&snapshot_target); 2820034a186dSJonathan Brassow bad_register_snapshot_target: 28217e6358d2Smonty_pavel@sina.com kmem_cache_destroy(pending_cache); 28227e6358d2Smonty_pavel@sina.com bad_pending_cache: 28237e6358d2Smonty_pavel@sina.com kmem_cache_destroy(exception_cache); 28247e6358d2Smonty_pavel@sina.com bad_exception_cache: 28257e6358d2Smonty_pavel@sina.com exit_origin_hash(); 28267e6358d2Smonty_pavel@sina.com bad_origin_hash: 2827034a186dSJonathan Brassow dm_exception_store_exit(); 2828d698aa45SMikulas Patocka 28291da177e4SLinus Torvalds return r; 28301da177e4SLinus Torvalds } 28311da177e4SLinus Torvalds 28321da177e4SLinus Torvalds static void __exit dm_snapshot_exit(void) 28331da177e4SLinus Torvalds { 283410d3bd09SMikulas Patocka dm_unregister_target(&snapshot_target); 283510d3bd09SMikulas Patocka dm_unregister_target(&origin_target); 2836d698aa45SMikulas Patocka dm_unregister_target(&merge_target); 28371da177e4SLinus Torvalds 28381da177e4SLinus Torvalds exit_origin_hash(); 28391da177e4SLinus Torvalds kmem_cache_destroy(pending_cache); 28401da177e4SLinus Torvalds kmem_cache_destroy(exception_cache); 28414db6bfe0SAlasdair G Kergon 28424db6bfe0SAlasdair G Kergon dm_exception_store_exit(); 28431da177e4SLinus Torvalds } 28441da177e4SLinus Torvalds 28451da177e4SLinus Torvalds /* Module hooks */ 28461da177e4SLinus Torvalds module_init(dm_snapshot_init); 28471da177e4SLinus Torvalds module_exit(dm_snapshot_exit); 28481da177e4SLinus Torvalds 28491da177e4SLinus Torvalds MODULE_DESCRIPTION(DM_NAME " snapshot target"); 28501da177e4SLinus Torvalds MODULE_AUTHOR("Joe Thornber"); 28511da177e4SLinus Torvalds MODULE_LICENSE("GPL"); 285223cb2109SMikulas Patocka MODULE_ALIAS("dm-snapshot-origin"); 285323cb2109SMikulas Patocka MODULE_ALIAS("dm-snapshot-merge"); 2854