1 /*
2  * Copyright (C) 2012 Red Hat, Inc.
3  *
4  * This file is released under the GPL.
5  */
6 
7 #include "dm-cache-metadata.h"
8 
9 #include "persistent-data/dm-array.h"
10 #include "persistent-data/dm-bitset.h"
11 #include "persistent-data/dm-space-map.h"
12 #include "persistent-data/dm-space-map-disk.h"
13 #include "persistent-data/dm-transaction-manager.h"
14 
15 #include <linux/device-mapper.h>
16 
17 /*----------------------------------------------------------------*/
18 
19 #define DM_MSG_PREFIX   "cache metadata"
20 
21 #define CACHE_SUPERBLOCK_MAGIC 06142003
22 #define CACHE_SUPERBLOCK_LOCATION 0
23 
24 /*
25  * defines a range of metadata versions that this module can handle.
26  */
27 #define MIN_CACHE_VERSION 1
28 #define MAX_CACHE_VERSION 1
29 
30 #define CACHE_METADATA_CACHE_SIZE 64
31 
32 /*
33  *  3 for btree insert +
34  *  2 for btree lookup used within space map
35  */
36 #define CACHE_MAX_CONCURRENT_LOCKS 5
37 #define SPACE_MAP_ROOT_SIZE 128
38 
39 enum superblock_flag_bits {
40 	/* for spotting crashes that would invalidate the dirty bitset */
41 	CLEAN_SHUTDOWN,
42 	/* metadata must be checked using the tools */
43 	NEEDS_CHECK,
44 };
45 
46 /*
47  * Each mapping from cache block -> origin block carries a set of flags.
48  */
49 enum mapping_bits {
50 	/*
51 	 * A valid mapping.  Because we're using an array we clear this
52 	 * flag for an non existant mapping.
53 	 */
54 	M_VALID = 1,
55 
56 	/*
57 	 * The data on the cache is different from that on the origin.
58 	 */
59 	M_DIRTY = 2
60 };
61 
62 struct cache_disk_superblock {
63 	__le32 csum;
64 	__le32 flags;
65 	__le64 blocknr;
66 
67 	__u8 uuid[16];
68 	__le64 magic;
69 	__le32 version;
70 
71 	__u8 policy_name[CACHE_POLICY_NAME_SIZE];
72 	__le32 policy_hint_size;
73 
74 	__u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
75 	__le64 mapping_root;
76 	__le64 hint_root;
77 
78 	__le64 discard_root;
79 	__le64 discard_block_size;
80 	__le64 discard_nr_blocks;
81 
82 	__le32 data_block_size;
83 	__le32 metadata_block_size;
84 	__le32 cache_blocks;
85 
86 	__le32 compat_flags;
87 	__le32 compat_ro_flags;
88 	__le32 incompat_flags;
89 
90 	__le32 read_hits;
91 	__le32 read_misses;
92 	__le32 write_hits;
93 	__le32 write_misses;
94 
95 	__le32 policy_version[CACHE_POLICY_VERSION_SIZE];
96 } __packed;
97 
98 struct dm_cache_metadata {
99 	atomic_t ref_count;
100 	struct list_head list;
101 
102 	struct block_device *bdev;
103 	struct dm_block_manager *bm;
104 	struct dm_space_map *metadata_sm;
105 	struct dm_transaction_manager *tm;
106 
107 	struct dm_array_info info;
108 	struct dm_array_info hint_info;
109 	struct dm_disk_bitset discard_info;
110 
111 	struct rw_semaphore root_lock;
112 	unsigned long flags;
113 	dm_block_t root;
114 	dm_block_t hint_root;
115 	dm_block_t discard_root;
116 
117 	sector_t discard_block_size;
118 	dm_dblock_t discard_nr_blocks;
119 
120 	sector_t data_block_size;
121 	dm_cblock_t cache_blocks;
122 	bool changed:1;
123 	bool clean_when_opened:1;
124 
125 	char policy_name[CACHE_POLICY_NAME_SIZE];
126 	unsigned policy_version[CACHE_POLICY_VERSION_SIZE];
127 	size_t policy_hint_size;
128 	struct dm_cache_statistics stats;
129 
130 	/*
131 	 * Reading the space map root can fail, so we read it into this
132 	 * buffer before the superblock is locked and updated.
133 	 */
134 	__u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
135 
136 	/*
137 	 * Set if a transaction has to be aborted but the attempt to roll
138 	 * back to the previous (good) transaction failed.  The only
139 	 * metadata operation permissible in this state is the closing of
140 	 * the device.
141 	 */
142 	bool fail_io:1;
143 };
144 
145 /*-------------------------------------------------------------------
146  * superblock validator
147  *-----------------------------------------------------------------*/
148 
149 #define SUPERBLOCK_CSUM_XOR 9031977
150 
151 static void sb_prepare_for_write(struct dm_block_validator *v,
152 				 struct dm_block *b,
153 				 size_t sb_block_size)
154 {
155 	struct cache_disk_superblock *disk_super = dm_block_data(b);
156 
157 	disk_super->blocknr = cpu_to_le64(dm_block_location(b));
158 	disk_super->csum = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
159 						      sb_block_size - sizeof(__le32),
160 						      SUPERBLOCK_CSUM_XOR));
161 }
162 
163 static int check_metadata_version(struct cache_disk_superblock *disk_super)
164 {
165 	uint32_t metadata_version = le32_to_cpu(disk_super->version);
166 	if (metadata_version < MIN_CACHE_VERSION || metadata_version > MAX_CACHE_VERSION) {
167 		DMERR("Cache metadata version %u found, but only versions between %u and %u supported.",
168 		      metadata_version, MIN_CACHE_VERSION, MAX_CACHE_VERSION);
169 		return -EINVAL;
170 	}
171 
172 	return 0;
173 }
174 
175 static int sb_check(struct dm_block_validator *v,
176 		    struct dm_block *b,
177 		    size_t sb_block_size)
178 {
179 	struct cache_disk_superblock *disk_super = dm_block_data(b);
180 	__le32 csum_le;
181 
182 	if (dm_block_location(b) != le64_to_cpu(disk_super->blocknr)) {
183 		DMERR("sb_check failed: blocknr %llu: wanted %llu",
184 		      le64_to_cpu(disk_super->blocknr),
185 		      (unsigned long long)dm_block_location(b));
186 		return -ENOTBLK;
187 	}
188 
189 	if (le64_to_cpu(disk_super->magic) != CACHE_SUPERBLOCK_MAGIC) {
190 		DMERR("sb_check failed: magic %llu: wanted %llu",
191 		      le64_to_cpu(disk_super->magic),
192 		      (unsigned long long)CACHE_SUPERBLOCK_MAGIC);
193 		return -EILSEQ;
194 	}
195 
196 	csum_le = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
197 					     sb_block_size - sizeof(__le32),
198 					     SUPERBLOCK_CSUM_XOR));
199 	if (csum_le != disk_super->csum) {
200 		DMERR("sb_check failed: csum %u: wanted %u",
201 		      le32_to_cpu(csum_le), le32_to_cpu(disk_super->csum));
202 		return -EILSEQ;
203 	}
204 
205 	return check_metadata_version(disk_super);
206 }
207 
208 static struct dm_block_validator sb_validator = {
209 	.name = "superblock",
210 	.prepare_for_write = sb_prepare_for_write,
211 	.check = sb_check
212 };
213 
214 /*----------------------------------------------------------------*/
215 
216 static int superblock_read_lock(struct dm_cache_metadata *cmd,
217 				struct dm_block **sblock)
218 {
219 	return dm_bm_read_lock(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
220 			       &sb_validator, sblock);
221 }
222 
223 static int superblock_lock_zero(struct dm_cache_metadata *cmd,
224 				struct dm_block **sblock)
225 {
226 	return dm_bm_write_lock_zero(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
227 				     &sb_validator, sblock);
228 }
229 
230 static int superblock_lock(struct dm_cache_metadata *cmd,
231 			   struct dm_block **sblock)
232 {
233 	return dm_bm_write_lock(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
234 				&sb_validator, sblock);
235 }
236 
237 /*----------------------------------------------------------------*/
238 
239 static int __superblock_all_zeroes(struct dm_block_manager *bm, bool *result)
240 {
241 	int r;
242 	unsigned i;
243 	struct dm_block *b;
244 	__le64 *data_le, zero = cpu_to_le64(0);
245 	unsigned sb_block_size = dm_bm_block_size(bm) / sizeof(__le64);
246 
247 	/*
248 	 * We can't use a validator here - it may be all zeroes.
249 	 */
250 	r = dm_bm_read_lock(bm, CACHE_SUPERBLOCK_LOCATION, NULL, &b);
251 	if (r)
252 		return r;
253 
254 	data_le = dm_block_data(b);
255 	*result = true;
256 	for (i = 0; i < sb_block_size; i++) {
257 		if (data_le[i] != zero) {
258 			*result = false;
259 			break;
260 		}
261 	}
262 
263 	return dm_bm_unlock(b);
264 }
265 
266 static void __setup_mapping_info(struct dm_cache_metadata *cmd)
267 {
268 	struct dm_btree_value_type vt;
269 
270 	vt.context = NULL;
271 	vt.size = sizeof(__le64);
272 	vt.inc = NULL;
273 	vt.dec = NULL;
274 	vt.equal = NULL;
275 	dm_array_info_init(&cmd->info, cmd->tm, &vt);
276 
277 	if (cmd->policy_hint_size) {
278 		vt.size = sizeof(__le32);
279 		dm_array_info_init(&cmd->hint_info, cmd->tm, &vt);
280 	}
281 }
282 
283 static int __save_sm_root(struct dm_cache_metadata *cmd)
284 {
285 	int r;
286 	size_t metadata_len;
287 
288 	r = dm_sm_root_size(cmd->metadata_sm, &metadata_len);
289 	if (r < 0)
290 		return r;
291 
292 	return dm_sm_copy_root(cmd->metadata_sm, &cmd->metadata_space_map_root,
293 			       metadata_len);
294 }
295 
296 static void __copy_sm_root(struct dm_cache_metadata *cmd,
297 			   struct cache_disk_superblock *disk_super)
298 {
299 	memcpy(&disk_super->metadata_space_map_root,
300 	       &cmd->metadata_space_map_root,
301 	       sizeof(cmd->metadata_space_map_root));
302 }
303 
304 static int __write_initial_superblock(struct dm_cache_metadata *cmd)
305 {
306 	int r;
307 	struct dm_block *sblock;
308 	struct cache_disk_superblock *disk_super;
309 	sector_t bdev_size = i_size_read(cmd->bdev->bd_inode) >> SECTOR_SHIFT;
310 
311 	/* FIXME: see if we can lose the max sectors limit */
312 	if (bdev_size > DM_CACHE_METADATA_MAX_SECTORS)
313 		bdev_size = DM_CACHE_METADATA_MAX_SECTORS;
314 
315 	r = dm_tm_pre_commit(cmd->tm);
316 	if (r < 0)
317 		return r;
318 
319 	/*
320 	 * dm_sm_copy_root() can fail.  So we need to do it before we start
321 	 * updating the superblock.
322 	 */
323 	r = __save_sm_root(cmd);
324 	if (r)
325 		return r;
326 
327 	r = superblock_lock_zero(cmd, &sblock);
328 	if (r)
329 		return r;
330 
331 	disk_super = dm_block_data(sblock);
332 	disk_super->flags = 0;
333 	memset(disk_super->uuid, 0, sizeof(disk_super->uuid));
334 	disk_super->magic = cpu_to_le64(CACHE_SUPERBLOCK_MAGIC);
335 	disk_super->version = cpu_to_le32(MAX_CACHE_VERSION);
336 	memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name));
337 	memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version));
338 	disk_super->policy_hint_size = 0;
339 
340 	__copy_sm_root(cmd, disk_super);
341 
342 	disk_super->mapping_root = cpu_to_le64(cmd->root);
343 	disk_super->hint_root = cpu_to_le64(cmd->hint_root);
344 	disk_super->discard_root = cpu_to_le64(cmd->discard_root);
345 	disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size);
346 	disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks));
347 	disk_super->metadata_block_size = cpu_to_le32(DM_CACHE_METADATA_BLOCK_SIZE);
348 	disk_super->data_block_size = cpu_to_le32(cmd->data_block_size);
349 	disk_super->cache_blocks = cpu_to_le32(0);
350 
351 	disk_super->read_hits = cpu_to_le32(0);
352 	disk_super->read_misses = cpu_to_le32(0);
353 	disk_super->write_hits = cpu_to_le32(0);
354 	disk_super->write_misses = cpu_to_le32(0);
355 
356 	return dm_tm_commit(cmd->tm, sblock);
357 }
358 
359 static int __format_metadata(struct dm_cache_metadata *cmd)
360 {
361 	int r;
362 
363 	r = dm_tm_create_with_sm(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
364 				 &cmd->tm, &cmd->metadata_sm);
365 	if (r < 0) {
366 		DMERR("tm_create_with_sm failed");
367 		return r;
368 	}
369 
370 	__setup_mapping_info(cmd);
371 
372 	r = dm_array_empty(&cmd->info, &cmd->root);
373 	if (r < 0)
374 		goto bad;
375 
376 	dm_disk_bitset_init(cmd->tm, &cmd->discard_info);
377 
378 	r = dm_bitset_empty(&cmd->discard_info, &cmd->discard_root);
379 	if (r < 0)
380 		goto bad;
381 
382 	cmd->discard_block_size = 0;
383 	cmd->discard_nr_blocks = 0;
384 
385 	r = __write_initial_superblock(cmd);
386 	if (r)
387 		goto bad;
388 
389 	cmd->clean_when_opened = true;
390 	return 0;
391 
392 bad:
393 	dm_tm_destroy(cmd->tm);
394 	dm_sm_destroy(cmd->metadata_sm);
395 
396 	return r;
397 }
398 
399 static int __check_incompat_features(struct cache_disk_superblock *disk_super,
400 				     struct dm_cache_metadata *cmd)
401 {
402 	uint32_t features;
403 
404 	features = le32_to_cpu(disk_super->incompat_flags) & ~DM_CACHE_FEATURE_INCOMPAT_SUPP;
405 	if (features) {
406 		DMERR("could not access metadata due to unsupported optional features (%lx).",
407 		      (unsigned long)features);
408 		return -EINVAL;
409 	}
410 
411 	/*
412 	 * Check for read-only metadata to skip the following RDWR checks.
413 	 */
414 	if (get_disk_ro(cmd->bdev->bd_disk))
415 		return 0;
416 
417 	features = le32_to_cpu(disk_super->compat_ro_flags) & ~DM_CACHE_FEATURE_COMPAT_RO_SUPP;
418 	if (features) {
419 		DMERR("could not access metadata RDWR due to unsupported optional features (%lx).",
420 		      (unsigned long)features);
421 		return -EINVAL;
422 	}
423 
424 	return 0;
425 }
426 
427 static int __open_metadata(struct dm_cache_metadata *cmd)
428 {
429 	int r;
430 	struct dm_block *sblock;
431 	struct cache_disk_superblock *disk_super;
432 	unsigned long sb_flags;
433 
434 	r = superblock_read_lock(cmd, &sblock);
435 	if (r < 0) {
436 		DMERR("couldn't read lock superblock");
437 		return r;
438 	}
439 
440 	disk_super = dm_block_data(sblock);
441 
442 	/* Verify the data block size hasn't changed */
443 	if (le32_to_cpu(disk_super->data_block_size) != cmd->data_block_size) {
444 		DMERR("changing the data block size (from %u to %llu) is not supported",
445 		      le32_to_cpu(disk_super->data_block_size),
446 		      (unsigned long long)cmd->data_block_size);
447 		r = -EINVAL;
448 		goto bad;
449 	}
450 
451 	r = __check_incompat_features(disk_super, cmd);
452 	if (r < 0)
453 		goto bad;
454 
455 	r = dm_tm_open_with_sm(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
456 			       disk_super->metadata_space_map_root,
457 			       sizeof(disk_super->metadata_space_map_root),
458 			       &cmd->tm, &cmd->metadata_sm);
459 	if (r < 0) {
460 		DMERR("tm_open_with_sm failed");
461 		goto bad;
462 	}
463 
464 	__setup_mapping_info(cmd);
465 	dm_disk_bitset_init(cmd->tm, &cmd->discard_info);
466 	sb_flags = le32_to_cpu(disk_super->flags);
467 	cmd->clean_when_opened = test_bit(CLEAN_SHUTDOWN, &sb_flags);
468 	return dm_bm_unlock(sblock);
469 
470 bad:
471 	dm_bm_unlock(sblock);
472 	return r;
473 }
474 
475 static int __open_or_format_metadata(struct dm_cache_metadata *cmd,
476 				     bool format_device)
477 {
478 	int r;
479 	bool unformatted = false;
480 
481 	r = __superblock_all_zeroes(cmd->bm, &unformatted);
482 	if (r)
483 		return r;
484 
485 	if (unformatted)
486 		return format_device ? __format_metadata(cmd) : -EPERM;
487 
488 	return __open_metadata(cmd);
489 }
490 
491 static int __create_persistent_data_objects(struct dm_cache_metadata *cmd,
492 					    bool may_format_device)
493 {
494 	int r;
495 	cmd->bm = dm_block_manager_create(cmd->bdev, DM_CACHE_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
496 					  CACHE_METADATA_CACHE_SIZE,
497 					  CACHE_MAX_CONCURRENT_LOCKS);
498 	if (IS_ERR(cmd->bm)) {
499 		DMERR("could not create block manager");
500 		return PTR_ERR(cmd->bm);
501 	}
502 
503 	r = __open_or_format_metadata(cmd, may_format_device);
504 	if (r)
505 		dm_block_manager_destroy(cmd->bm);
506 
507 	return r;
508 }
509 
510 static void __destroy_persistent_data_objects(struct dm_cache_metadata *cmd)
511 {
512 	dm_sm_destroy(cmd->metadata_sm);
513 	dm_tm_destroy(cmd->tm);
514 	dm_block_manager_destroy(cmd->bm);
515 }
516 
517 typedef unsigned long (*flags_mutator)(unsigned long);
518 
519 static void update_flags(struct cache_disk_superblock *disk_super,
520 			 flags_mutator mutator)
521 {
522 	uint32_t sb_flags = mutator(le32_to_cpu(disk_super->flags));
523 	disk_super->flags = cpu_to_le32(sb_flags);
524 }
525 
526 static unsigned long set_clean_shutdown(unsigned long flags)
527 {
528 	set_bit(CLEAN_SHUTDOWN, &flags);
529 	return flags;
530 }
531 
532 static unsigned long clear_clean_shutdown(unsigned long flags)
533 {
534 	clear_bit(CLEAN_SHUTDOWN, &flags);
535 	return flags;
536 }
537 
538 static void read_superblock_fields(struct dm_cache_metadata *cmd,
539 				   struct cache_disk_superblock *disk_super)
540 {
541 	cmd->flags = le32_to_cpu(disk_super->flags);
542 	cmd->root = le64_to_cpu(disk_super->mapping_root);
543 	cmd->hint_root = le64_to_cpu(disk_super->hint_root);
544 	cmd->discard_root = le64_to_cpu(disk_super->discard_root);
545 	cmd->discard_block_size = le64_to_cpu(disk_super->discard_block_size);
546 	cmd->discard_nr_blocks = to_dblock(le64_to_cpu(disk_super->discard_nr_blocks));
547 	cmd->data_block_size = le32_to_cpu(disk_super->data_block_size);
548 	cmd->cache_blocks = to_cblock(le32_to_cpu(disk_super->cache_blocks));
549 	strncpy(cmd->policy_name, disk_super->policy_name, sizeof(cmd->policy_name));
550 	cmd->policy_version[0] = le32_to_cpu(disk_super->policy_version[0]);
551 	cmd->policy_version[1] = le32_to_cpu(disk_super->policy_version[1]);
552 	cmd->policy_version[2] = le32_to_cpu(disk_super->policy_version[2]);
553 	cmd->policy_hint_size = le32_to_cpu(disk_super->policy_hint_size);
554 
555 	cmd->stats.read_hits = le32_to_cpu(disk_super->read_hits);
556 	cmd->stats.read_misses = le32_to_cpu(disk_super->read_misses);
557 	cmd->stats.write_hits = le32_to_cpu(disk_super->write_hits);
558 	cmd->stats.write_misses = le32_to_cpu(disk_super->write_misses);
559 
560 	cmd->changed = false;
561 }
562 
563 /*
564  * The mutator updates the superblock flags.
565  */
566 static int __begin_transaction_flags(struct dm_cache_metadata *cmd,
567 				     flags_mutator mutator)
568 {
569 	int r;
570 	struct cache_disk_superblock *disk_super;
571 	struct dm_block *sblock;
572 
573 	r = superblock_lock(cmd, &sblock);
574 	if (r)
575 		return r;
576 
577 	disk_super = dm_block_data(sblock);
578 	update_flags(disk_super, mutator);
579 	read_superblock_fields(cmd, disk_super);
580 	dm_bm_unlock(sblock);
581 
582 	return dm_bm_flush(cmd->bm);
583 }
584 
585 static int __begin_transaction(struct dm_cache_metadata *cmd)
586 {
587 	int r;
588 	struct cache_disk_superblock *disk_super;
589 	struct dm_block *sblock;
590 
591 	/*
592 	 * We re-read the superblock every time.  Shouldn't need to do this
593 	 * really.
594 	 */
595 	r = superblock_read_lock(cmd, &sblock);
596 	if (r)
597 		return r;
598 
599 	disk_super = dm_block_data(sblock);
600 	read_superblock_fields(cmd, disk_super);
601 	dm_bm_unlock(sblock);
602 
603 	return 0;
604 }
605 
606 static int __commit_transaction(struct dm_cache_metadata *cmd,
607 				flags_mutator mutator)
608 {
609 	int r;
610 	struct cache_disk_superblock *disk_super;
611 	struct dm_block *sblock;
612 
613 	/*
614 	 * We need to know if the cache_disk_superblock exceeds a 512-byte sector.
615 	 */
616 	BUILD_BUG_ON(sizeof(struct cache_disk_superblock) > 512);
617 
618 	r = dm_bitset_flush(&cmd->discard_info, cmd->discard_root,
619 			    &cmd->discard_root);
620 	if (r)
621 		return r;
622 
623 	r = dm_tm_pre_commit(cmd->tm);
624 	if (r < 0)
625 		return r;
626 
627 	r = __save_sm_root(cmd);
628 	if (r)
629 		return r;
630 
631 	r = superblock_lock(cmd, &sblock);
632 	if (r)
633 		return r;
634 
635 	disk_super = dm_block_data(sblock);
636 
637 	if (mutator)
638 		update_flags(disk_super, mutator);
639 
640 	disk_super->flags = cpu_to_le32(cmd->flags);
641 	disk_super->mapping_root = cpu_to_le64(cmd->root);
642 	disk_super->hint_root = cpu_to_le64(cmd->hint_root);
643 	disk_super->discard_root = cpu_to_le64(cmd->discard_root);
644 	disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size);
645 	disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks));
646 	disk_super->cache_blocks = cpu_to_le32(from_cblock(cmd->cache_blocks));
647 	strncpy(disk_super->policy_name, cmd->policy_name, sizeof(disk_super->policy_name));
648 	disk_super->policy_version[0] = cpu_to_le32(cmd->policy_version[0]);
649 	disk_super->policy_version[1] = cpu_to_le32(cmd->policy_version[1]);
650 	disk_super->policy_version[2] = cpu_to_le32(cmd->policy_version[2]);
651 
652 	disk_super->read_hits = cpu_to_le32(cmd->stats.read_hits);
653 	disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses);
654 	disk_super->write_hits = cpu_to_le32(cmd->stats.write_hits);
655 	disk_super->write_misses = cpu_to_le32(cmd->stats.write_misses);
656 	__copy_sm_root(cmd, disk_super);
657 
658 	return dm_tm_commit(cmd->tm, sblock);
659 }
660 
661 /*----------------------------------------------------------------*/
662 
663 /*
664  * The mappings are held in a dm-array that has 64-bit values stored in
665  * little-endian format.  The index is the cblock, the high 48bits of the
666  * value are the oblock and the low 16 bit the flags.
667  */
668 #define FLAGS_MASK ((1 << 16) - 1)
669 
670 static __le64 pack_value(dm_oblock_t block, unsigned flags)
671 {
672 	uint64_t value = from_oblock(block);
673 	value <<= 16;
674 	value = value | (flags & FLAGS_MASK);
675 	return cpu_to_le64(value);
676 }
677 
678 static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned *flags)
679 {
680 	uint64_t value = le64_to_cpu(value_le);
681 	uint64_t b = value >> 16;
682 	*block = to_oblock(b);
683 	*flags = value & FLAGS_MASK;
684 }
685 
686 /*----------------------------------------------------------------*/
687 
688 static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
689 					       sector_t data_block_size,
690 					       bool may_format_device,
691 					       size_t policy_hint_size)
692 {
693 	int r;
694 	struct dm_cache_metadata *cmd;
695 
696 	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
697 	if (!cmd) {
698 		DMERR("could not allocate metadata struct");
699 		return ERR_PTR(-ENOMEM);
700 	}
701 
702 	atomic_set(&cmd->ref_count, 1);
703 	init_rwsem(&cmd->root_lock);
704 	cmd->bdev = bdev;
705 	cmd->data_block_size = data_block_size;
706 	cmd->cache_blocks = 0;
707 	cmd->policy_hint_size = policy_hint_size;
708 	cmd->changed = true;
709 	cmd->fail_io = false;
710 
711 	r = __create_persistent_data_objects(cmd, may_format_device);
712 	if (r) {
713 		kfree(cmd);
714 		return ERR_PTR(r);
715 	}
716 
717 	r = __begin_transaction_flags(cmd, clear_clean_shutdown);
718 	if (r < 0) {
719 		dm_cache_metadata_close(cmd);
720 		return ERR_PTR(r);
721 	}
722 
723 	return cmd;
724 }
725 
726 /*
727  * We keep a little list of ref counted metadata objects to prevent two
728  * different target instances creating separate bufio instances.  This is
729  * an issue if a table is reloaded before the suspend.
730  */
731 static DEFINE_MUTEX(table_lock);
732 static LIST_HEAD(table);
733 
734 static struct dm_cache_metadata *lookup(struct block_device *bdev)
735 {
736 	struct dm_cache_metadata *cmd;
737 
738 	list_for_each_entry(cmd, &table, list)
739 		if (cmd->bdev == bdev) {
740 			atomic_inc(&cmd->ref_count);
741 			return cmd;
742 		}
743 
744 	return NULL;
745 }
746 
747 static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
748 						sector_t data_block_size,
749 						bool may_format_device,
750 						size_t policy_hint_size)
751 {
752 	struct dm_cache_metadata *cmd, *cmd2;
753 
754 	mutex_lock(&table_lock);
755 	cmd = lookup(bdev);
756 	mutex_unlock(&table_lock);
757 
758 	if (cmd)
759 		return cmd;
760 
761 	cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size);
762 	if (!IS_ERR(cmd)) {
763 		mutex_lock(&table_lock);
764 		cmd2 = lookup(bdev);
765 		if (cmd2) {
766 			mutex_unlock(&table_lock);
767 			__destroy_persistent_data_objects(cmd);
768 			kfree(cmd);
769 			return cmd2;
770 		}
771 		list_add(&cmd->list, &table);
772 		mutex_unlock(&table_lock);
773 	}
774 
775 	return cmd;
776 }
777 
778 static bool same_params(struct dm_cache_metadata *cmd, sector_t data_block_size)
779 {
780 	if (cmd->data_block_size != data_block_size) {
781 		DMERR("data_block_size (%llu) different from that in metadata (%llu)\n",
782 		      (unsigned long long) data_block_size,
783 		      (unsigned long long) cmd->data_block_size);
784 		return false;
785 	}
786 
787 	return true;
788 }
789 
790 struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
791 						 sector_t data_block_size,
792 						 bool may_format_device,
793 						 size_t policy_hint_size)
794 {
795 	struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size,
796 						       may_format_device, policy_hint_size);
797 
798 	if (!IS_ERR(cmd) && !same_params(cmd, data_block_size)) {
799 		dm_cache_metadata_close(cmd);
800 		return ERR_PTR(-EINVAL);
801 	}
802 
803 	return cmd;
804 }
805 
806 void dm_cache_metadata_close(struct dm_cache_metadata *cmd)
807 {
808 	if (atomic_dec_and_test(&cmd->ref_count)) {
809 		mutex_lock(&table_lock);
810 		list_del(&cmd->list);
811 		mutex_unlock(&table_lock);
812 
813 		if (!cmd->fail_io)
814 			__destroy_persistent_data_objects(cmd);
815 		kfree(cmd);
816 	}
817 }
818 
819 /*
820  * Checks that the given cache block is either unmapped or clean.
821  */
822 static int block_unmapped_or_clean(struct dm_cache_metadata *cmd, dm_cblock_t b,
823 				   bool *result)
824 {
825 	int r;
826 	__le64 value;
827 	dm_oblock_t ob;
828 	unsigned flags;
829 
830 	r = dm_array_get_value(&cmd->info, cmd->root, from_cblock(b), &value);
831 	if (r) {
832 		DMERR("block_unmapped_or_clean failed");
833 		return r;
834 	}
835 
836 	unpack_value(value, &ob, &flags);
837 	*result = !((flags & M_VALID) && (flags & M_DIRTY));
838 
839 	return 0;
840 }
841 
842 static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
843 					dm_cblock_t begin, dm_cblock_t end,
844 					bool *result)
845 {
846 	int r;
847 	*result = true;
848 
849 	while (begin != end) {
850 		r = block_unmapped_or_clean(cmd, begin, result);
851 		if (r)
852 			return r;
853 
854 		if (!*result) {
855 			DMERR("cache block %llu is dirty",
856 			      (unsigned long long) from_cblock(begin));
857 			return 0;
858 		}
859 
860 		begin = to_cblock(from_cblock(begin) + 1);
861 	}
862 
863 	return 0;
864 }
865 
866 #define WRITE_LOCK(cmd) \
867 	if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) \
868 		return -EINVAL; \
869 	down_write(&cmd->root_lock)
870 
871 #define WRITE_LOCK_VOID(cmd) \
872 	if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) \
873 		return; \
874 	down_write(&cmd->root_lock)
875 
876 #define WRITE_UNLOCK(cmd) \
877 	up_write(&cmd->root_lock)
878 
879 int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
880 {
881 	int r;
882 	bool clean;
883 	__le64 null_mapping = pack_value(0, 0);
884 
885 	WRITE_LOCK(cmd);
886 	__dm_bless_for_disk(&null_mapping);
887 
888 	if (from_cblock(new_cache_size) < from_cblock(cmd->cache_blocks)) {
889 		r = blocks_are_unmapped_or_clean(cmd, new_cache_size, cmd->cache_blocks, &clean);
890 		if (r) {
891 			__dm_unbless_for_disk(&null_mapping);
892 			goto out;
893 		}
894 
895 		if (!clean) {
896 			DMERR("unable to shrink cache due to dirty blocks");
897 			r = -EINVAL;
898 			__dm_unbless_for_disk(&null_mapping);
899 			goto out;
900 		}
901 	}
902 
903 	r = dm_array_resize(&cmd->info, cmd->root, from_cblock(cmd->cache_blocks),
904 			    from_cblock(new_cache_size),
905 			    &null_mapping, &cmd->root);
906 	if (!r)
907 		cmd->cache_blocks = new_cache_size;
908 	cmd->changed = true;
909 
910 out:
911 	WRITE_UNLOCK(cmd);
912 
913 	return r;
914 }
915 
916 int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
917 				   sector_t discard_block_size,
918 				   dm_dblock_t new_nr_entries)
919 {
920 	int r;
921 
922 	WRITE_LOCK(cmd);
923 	r = dm_bitset_resize(&cmd->discard_info,
924 			     cmd->discard_root,
925 			     from_dblock(cmd->discard_nr_blocks),
926 			     from_dblock(new_nr_entries),
927 			     false, &cmd->discard_root);
928 	if (!r) {
929 		cmd->discard_block_size = discard_block_size;
930 		cmd->discard_nr_blocks = new_nr_entries;
931 	}
932 
933 	cmd->changed = true;
934 	WRITE_UNLOCK(cmd);
935 
936 	return r;
937 }
938 
939 static int __set_discard(struct dm_cache_metadata *cmd, dm_dblock_t b)
940 {
941 	return dm_bitset_set_bit(&cmd->discard_info, cmd->discard_root,
942 				 from_dblock(b), &cmd->discard_root);
943 }
944 
945 static int __clear_discard(struct dm_cache_metadata *cmd, dm_dblock_t b)
946 {
947 	return dm_bitset_clear_bit(&cmd->discard_info, cmd->discard_root,
948 				   from_dblock(b), &cmd->discard_root);
949 }
950 
951 static int __is_discarded(struct dm_cache_metadata *cmd, dm_dblock_t b,
952 			  bool *is_discarded)
953 {
954 	return dm_bitset_test_bit(&cmd->discard_info, cmd->discard_root,
955 				  from_dblock(b), &cmd->discard_root,
956 				  is_discarded);
957 }
958 
959 static int __discard(struct dm_cache_metadata *cmd,
960 		     dm_dblock_t dblock, bool discard)
961 {
962 	int r;
963 
964 	r = (discard ? __set_discard : __clear_discard)(cmd, dblock);
965 	if (r)
966 		return r;
967 
968 	cmd->changed = true;
969 	return 0;
970 }
971 
972 int dm_cache_set_discard(struct dm_cache_metadata *cmd,
973 			 dm_dblock_t dblock, bool discard)
974 {
975 	int r;
976 
977 	WRITE_LOCK(cmd);
978 	r = __discard(cmd, dblock, discard);
979 	WRITE_UNLOCK(cmd);
980 
981 	return r;
982 }
983 
984 static int __load_discards(struct dm_cache_metadata *cmd,
985 			   load_discard_fn fn, void *context)
986 {
987 	int r = 0;
988 	dm_block_t b;
989 	bool discard;
990 
991 	for (b = 0; b < from_dblock(cmd->discard_nr_blocks); b++) {
992 		dm_dblock_t dblock = to_dblock(b);
993 
994 		if (cmd->clean_when_opened) {
995 			r = __is_discarded(cmd, dblock, &discard);
996 			if (r)
997 				return r;
998 		} else
999 			discard = false;
1000 
1001 		r = fn(context, cmd->discard_block_size, dblock, discard);
1002 		if (r)
1003 			break;
1004 	}
1005 
1006 	return r;
1007 }
1008 
1009 int dm_cache_load_discards(struct dm_cache_metadata *cmd,
1010 			   load_discard_fn fn, void *context)
1011 {
1012 	int r;
1013 
1014 	down_read(&cmd->root_lock);
1015 	r = __load_discards(cmd, fn, context);
1016 	up_read(&cmd->root_lock);
1017 
1018 	return r;
1019 }
1020 
1021 dm_cblock_t dm_cache_size(struct dm_cache_metadata *cmd)
1022 {
1023 	dm_cblock_t r;
1024 
1025 	down_read(&cmd->root_lock);
1026 	r = cmd->cache_blocks;
1027 	up_read(&cmd->root_lock);
1028 
1029 	return r;
1030 }
1031 
1032 static int __remove(struct dm_cache_metadata *cmd, dm_cblock_t cblock)
1033 {
1034 	int r;
1035 	__le64 value = pack_value(0, 0);
1036 
1037 	__dm_bless_for_disk(&value);
1038 	r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
1039 			       &value, &cmd->root);
1040 	if (r)
1041 		return r;
1042 
1043 	cmd->changed = true;
1044 	return 0;
1045 }
1046 
1047 int dm_cache_remove_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock)
1048 {
1049 	int r;
1050 
1051 	WRITE_LOCK(cmd);
1052 	r = __remove(cmd, cblock);
1053 	WRITE_UNLOCK(cmd);
1054 
1055 	return r;
1056 }
1057 
1058 static int __insert(struct dm_cache_metadata *cmd,
1059 		    dm_cblock_t cblock, dm_oblock_t oblock)
1060 {
1061 	int r;
1062 	__le64 value = pack_value(oblock, M_VALID);
1063 	__dm_bless_for_disk(&value);
1064 
1065 	r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
1066 			       &value, &cmd->root);
1067 	if (r)
1068 		return r;
1069 
1070 	cmd->changed = true;
1071 	return 0;
1072 }
1073 
1074 int dm_cache_insert_mapping(struct dm_cache_metadata *cmd,
1075 			    dm_cblock_t cblock, dm_oblock_t oblock)
1076 {
1077 	int r;
1078 
1079 	WRITE_LOCK(cmd);
1080 	r = __insert(cmd, cblock, oblock);
1081 	WRITE_UNLOCK(cmd);
1082 
1083 	return r;
1084 }
1085 
1086 struct thunk {
1087 	load_mapping_fn fn;
1088 	void *context;
1089 
1090 	struct dm_cache_metadata *cmd;
1091 	bool respect_dirty_flags;
1092 	bool hints_valid;
1093 };
1094 
1095 static bool policy_unchanged(struct dm_cache_metadata *cmd,
1096 			     struct dm_cache_policy *policy)
1097 {
1098 	const char *policy_name = dm_cache_policy_get_name(policy);
1099 	const unsigned *policy_version = dm_cache_policy_get_version(policy);
1100 	size_t policy_hint_size = dm_cache_policy_get_hint_size(policy);
1101 
1102 	/*
1103 	 * Ensure policy names match.
1104 	 */
1105 	if (strncmp(cmd->policy_name, policy_name, sizeof(cmd->policy_name)))
1106 		return false;
1107 
1108 	/*
1109 	 * Ensure policy major versions match.
1110 	 */
1111 	if (cmd->policy_version[0] != policy_version[0])
1112 		return false;
1113 
1114 	/*
1115 	 * Ensure policy hint sizes match.
1116 	 */
1117 	if (cmd->policy_hint_size != policy_hint_size)
1118 		return false;
1119 
1120 	return true;
1121 }
1122 
1123 static bool hints_array_initialized(struct dm_cache_metadata *cmd)
1124 {
1125 	return cmd->hint_root && cmd->policy_hint_size;
1126 }
1127 
1128 static bool hints_array_available(struct dm_cache_metadata *cmd,
1129 				  struct dm_cache_policy *policy)
1130 {
1131 	return cmd->clean_when_opened && policy_unchanged(cmd, policy) &&
1132 		hints_array_initialized(cmd);
1133 }
1134 
1135 static int __load_mapping(void *context, uint64_t cblock, void *leaf)
1136 {
1137 	int r = 0;
1138 	bool dirty;
1139 	__le64 value;
1140 	__le32 hint_value = 0;
1141 	dm_oblock_t oblock;
1142 	unsigned flags;
1143 	struct thunk *thunk = context;
1144 	struct dm_cache_metadata *cmd = thunk->cmd;
1145 
1146 	memcpy(&value, leaf, sizeof(value));
1147 	unpack_value(value, &oblock, &flags);
1148 
1149 	if (flags & M_VALID) {
1150 		if (thunk->hints_valid) {
1151 			r = dm_array_get_value(&cmd->hint_info, cmd->hint_root,
1152 					       cblock, &hint_value);
1153 			if (r && r != -ENODATA)
1154 				return r;
1155 		}
1156 
1157 		dirty = thunk->respect_dirty_flags ? (flags & M_DIRTY) : true;
1158 		r = thunk->fn(thunk->context, oblock, to_cblock(cblock),
1159 			      dirty, le32_to_cpu(hint_value), thunk->hints_valid);
1160 	}
1161 
1162 	return r;
1163 }
1164 
1165 static int __load_mappings(struct dm_cache_metadata *cmd,
1166 			   struct dm_cache_policy *policy,
1167 			   load_mapping_fn fn, void *context)
1168 {
1169 	struct thunk thunk;
1170 
1171 	thunk.fn = fn;
1172 	thunk.context = context;
1173 
1174 	thunk.cmd = cmd;
1175 	thunk.respect_dirty_flags = cmd->clean_when_opened;
1176 	thunk.hints_valid = hints_array_available(cmd, policy);
1177 
1178 	return dm_array_walk(&cmd->info, cmd->root, __load_mapping, &thunk);
1179 }
1180 
1181 int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
1182 			   struct dm_cache_policy *policy,
1183 			   load_mapping_fn fn, void *context)
1184 {
1185 	int r;
1186 
1187 	down_read(&cmd->root_lock);
1188 	r = __load_mappings(cmd, policy, fn, context);
1189 	up_read(&cmd->root_lock);
1190 
1191 	return r;
1192 }
1193 
1194 static int __dump_mapping(void *context, uint64_t cblock, void *leaf)
1195 {
1196 	int r = 0;
1197 	__le64 value;
1198 	dm_oblock_t oblock;
1199 	unsigned flags;
1200 
1201 	memcpy(&value, leaf, sizeof(value));
1202 	unpack_value(value, &oblock, &flags);
1203 
1204 	return r;
1205 }
1206 
1207 static int __dump_mappings(struct dm_cache_metadata *cmd)
1208 {
1209 	return dm_array_walk(&cmd->info, cmd->root, __dump_mapping, NULL);
1210 }
1211 
1212 void dm_cache_dump(struct dm_cache_metadata *cmd)
1213 {
1214 	down_read(&cmd->root_lock);
1215 	__dump_mappings(cmd);
1216 	up_read(&cmd->root_lock);
1217 }
1218 
1219 int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd)
1220 {
1221 	int r;
1222 
1223 	down_read(&cmd->root_lock);
1224 	r = cmd->changed;
1225 	up_read(&cmd->root_lock);
1226 
1227 	return r;
1228 }
1229 
1230 static int __dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty)
1231 {
1232 	int r;
1233 	unsigned flags;
1234 	dm_oblock_t oblock;
1235 	__le64 value;
1236 
1237 	r = dm_array_get_value(&cmd->info, cmd->root, from_cblock(cblock), &value);
1238 	if (r)
1239 		return r;
1240 
1241 	unpack_value(value, &oblock, &flags);
1242 
1243 	if (((flags & M_DIRTY) && dirty) || (!(flags & M_DIRTY) && !dirty))
1244 		/* nothing to be done */
1245 		return 0;
1246 
1247 	value = pack_value(oblock, (flags & ~M_DIRTY) | (dirty ? M_DIRTY : 0));
1248 	__dm_bless_for_disk(&value);
1249 
1250 	r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
1251 			       &value, &cmd->root);
1252 	if (r)
1253 		return r;
1254 
1255 	cmd->changed = true;
1256 	return 0;
1257 
1258 }
1259 
1260 int dm_cache_set_dirty(struct dm_cache_metadata *cmd,
1261 		       dm_cblock_t cblock, bool dirty)
1262 {
1263 	int r;
1264 
1265 	WRITE_LOCK(cmd);
1266 	r = __dirty(cmd, cblock, dirty);
1267 	WRITE_UNLOCK(cmd);
1268 
1269 	return r;
1270 }
1271 
1272 void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
1273 				 struct dm_cache_statistics *stats)
1274 {
1275 	down_read(&cmd->root_lock);
1276 	*stats = cmd->stats;
1277 	up_read(&cmd->root_lock);
1278 }
1279 
1280 void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
1281 				 struct dm_cache_statistics *stats)
1282 {
1283 	WRITE_LOCK_VOID(cmd);
1284 	cmd->stats = *stats;
1285 	WRITE_UNLOCK(cmd);
1286 }
1287 
1288 int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown)
1289 {
1290 	int r;
1291 	flags_mutator mutator = (clean_shutdown ? set_clean_shutdown :
1292 				 clear_clean_shutdown);
1293 
1294 	WRITE_LOCK(cmd);
1295 	r = __commit_transaction(cmd, mutator);
1296 	if (r)
1297 		goto out;
1298 
1299 	r = __begin_transaction(cmd);
1300 
1301 out:
1302 	WRITE_UNLOCK(cmd);
1303 	return r;
1304 }
1305 
1306 int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
1307 					   dm_block_t *result)
1308 {
1309 	int r = -EINVAL;
1310 
1311 	down_read(&cmd->root_lock);
1312 	r = dm_sm_get_nr_free(cmd->metadata_sm, result);
1313 	up_read(&cmd->root_lock);
1314 
1315 	return r;
1316 }
1317 
1318 int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
1319 				   dm_block_t *result)
1320 {
1321 	int r = -EINVAL;
1322 
1323 	down_read(&cmd->root_lock);
1324 	r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
1325 	up_read(&cmd->root_lock);
1326 
1327 	return r;
1328 }
1329 
1330 /*----------------------------------------------------------------*/
1331 
1332 static int begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
1333 {
1334 	int r;
1335 	__le32 value;
1336 	size_t hint_size;
1337 	const char *policy_name = dm_cache_policy_get_name(policy);
1338 	const unsigned *policy_version = dm_cache_policy_get_version(policy);
1339 
1340 	if (!policy_name[0] ||
1341 	    (strlen(policy_name) > sizeof(cmd->policy_name) - 1))
1342 		return -EINVAL;
1343 
1344 	if (!policy_unchanged(cmd, policy)) {
1345 		strncpy(cmd->policy_name, policy_name, sizeof(cmd->policy_name));
1346 		memcpy(cmd->policy_version, policy_version, sizeof(cmd->policy_version));
1347 
1348 		hint_size = dm_cache_policy_get_hint_size(policy);
1349 		if (!hint_size)
1350 			return 0; /* short-circuit hints initialization */
1351 		cmd->policy_hint_size = hint_size;
1352 
1353 		if (cmd->hint_root) {
1354 			r = dm_array_del(&cmd->hint_info, cmd->hint_root);
1355 			if (r)
1356 				return r;
1357 		}
1358 
1359 		r = dm_array_empty(&cmd->hint_info, &cmd->hint_root);
1360 		if (r)
1361 			return r;
1362 
1363 		value = cpu_to_le32(0);
1364 		__dm_bless_for_disk(&value);
1365 		r = dm_array_resize(&cmd->hint_info, cmd->hint_root, 0,
1366 				    from_cblock(cmd->cache_blocks),
1367 				    &value, &cmd->hint_root);
1368 		if (r)
1369 			return r;
1370 	}
1371 
1372 	return 0;
1373 }
1374 
1375 static int save_hint(void *context, dm_cblock_t cblock, dm_oblock_t oblock, uint32_t hint)
1376 {
1377 	struct dm_cache_metadata *cmd = context;
1378 	__le32 value = cpu_to_le32(hint);
1379 	int r;
1380 
1381 	__dm_bless_for_disk(&value);
1382 
1383 	r = dm_array_set_value(&cmd->hint_info, cmd->hint_root,
1384 			       from_cblock(cblock), &value, &cmd->hint_root);
1385 	cmd->changed = true;
1386 
1387 	return r;
1388 }
1389 
1390 static int write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
1391 {
1392 	int r;
1393 
1394 	r = begin_hints(cmd, policy);
1395 	if (r) {
1396 		DMERR("begin_hints failed");
1397 		return r;
1398 	}
1399 
1400 	return policy_walk_mappings(policy, save_hint, cmd);
1401 }
1402 
1403 int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
1404 {
1405 	int r;
1406 
1407 	WRITE_LOCK(cmd);
1408 	r = write_hints(cmd, policy);
1409 	WRITE_UNLOCK(cmd);
1410 
1411 	return r;
1412 }
1413 
1414 int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result)
1415 {
1416 	return blocks_are_unmapped_or_clean(cmd, 0, cmd->cache_blocks, result);
1417 }
1418 
1419 void dm_cache_metadata_set_read_only(struct dm_cache_metadata *cmd)
1420 {
1421 	WRITE_LOCK_VOID(cmd);
1422 	dm_bm_set_read_only(cmd->bm);
1423 	WRITE_UNLOCK(cmd);
1424 }
1425 
1426 void dm_cache_metadata_set_read_write(struct dm_cache_metadata *cmd)
1427 {
1428 	WRITE_LOCK_VOID(cmd);
1429 	dm_bm_set_read_write(cmd->bm);
1430 	WRITE_UNLOCK(cmd);
1431 }
1432 
1433 int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd)
1434 {
1435 	int r;
1436 	struct dm_block *sblock;
1437 	struct cache_disk_superblock *disk_super;
1438 
1439 	/*
1440 	 * We ignore fail_io for this function.
1441 	 */
1442 	down_write(&cmd->root_lock);
1443 	set_bit(NEEDS_CHECK, &cmd->flags);
1444 
1445 	r = superblock_lock(cmd, &sblock);
1446 	if (r) {
1447 		DMERR("couldn't read superblock");
1448 		goto out;
1449 	}
1450 
1451 	disk_super = dm_block_data(sblock);
1452 	disk_super->flags = cpu_to_le32(cmd->flags);
1453 
1454 	dm_bm_unlock(sblock);
1455 
1456 out:
1457 	up_write(&cmd->root_lock);
1458 	return r;
1459 }
1460 
1461 bool dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd)
1462 {
1463 	bool needs_check;
1464 
1465 	down_read(&cmd->root_lock);
1466 	needs_check = !!test_bit(NEEDS_CHECK, &cmd->flags);
1467 	up_read(&cmd->root_lock);
1468 
1469 	return needs_check;
1470 }
1471 
1472 int dm_cache_metadata_abort(struct dm_cache_metadata *cmd)
1473 {
1474 	int r;
1475 
1476 	WRITE_LOCK(cmd);
1477 	__destroy_persistent_data_objects(cmd);
1478 	r = __create_persistent_data_objects(cmd, false);
1479 	if (r)
1480 		cmd->fail_io = true;
1481 	WRITE_UNLOCK(cmd);
1482 
1483 	return r;
1484 }
1485