1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2016-2017 Red Hat, Inc. All rights reserved.
4 * Copyright (C) 2016-2017 Milan Broz
5 * Copyright (C) 2016-2017 Mikulas Patocka
6 *
7 * This file is released under the GPL.
8 */
9
10 #include "dm-bio-record.h"
11
12 #include <linux/compiler.h>
13 #include <linux/module.h>
14 #include <linux/device-mapper.h>
15 #include <linux/dm-io.h>
16 #include <linux/vmalloc.h>
17 #include <linux/sort.h>
18 #include <linux/rbtree.h>
19 #include <linux/delay.h>
20 #include <linux/random.h>
21 #include <linux/reboot.h>
22 #include <crypto/hash.h>
23 #include <crypto/skcipher.h>
24 #include <linux/async_tx.h>
25 #include <linux/dm-bufio.h>
26
27 #include "dm-audit.h"
28
29 #define DM_MSG_PREFIX "integrity"
30
31 #define DEFAULT_INTERLEAVE_SECTORS 32768
32 #define DEFAULT_JOURNAL_SIZE_FACTOR 7
33 #define DEFAULT_SECTORS_PER_BITMAP_BIT 32768
34 #define DEFAULT_BUFFER_SECTORS 128
35 #define DEFAULT_JOURNAL_WATERMARK 50
36 #define DEFAULT_SYNC_MSEC 10000
37 #define DEFAULT_MAX_JOURNAL_SECTORS (IS_ENABLED(CONFIG_64BIT) ? 131072 : 8192)
38 #define MIN_LOG2_INTERLEAVE_SECTORS 3
39 #define MAX_LOG2_INTERLEAVE_SECTORS 31
40 #define METADATA_WORKQUEUE_MAX_ACTIVE 16
41 #define RECALC_SECTORS (IS_ENABLED(CONFIG_64BIT) ? 32768 : 2048)
42 #define RECALC_WRITE_SUPER 16
43 #define BITMAP_BLOCK_SIZE 4096 /* don't change it */
44 #define BITMAP_FLUSH_INTERVAL (10 * HZ)
45 #define DISCARD_FILLER 0xf6
46 #define SALT_SIZE 16
47
48 /*
49 * Warning - DEBUG_PRINT prints security-sensitive data to the log,
50 * so it should not be enabled in the official kernel
51 */
52 //#define DEBUG_PRINT
53 //#define INTERNAL_VERIFY
54
55 /*
56 * On disk structures
57 */
58
59 #define SB_MAGIC "integrt"
60 #define SB_VERSION_1 1
61 #define SB_VERSION_2 2
62 #define SB_VERSION_3 3
63 #define SB_VERSION_4 4
64 #define SB_VERSION_5 5
65 #define SB_SECTORS 8
66 #define MAX_SECTORS_PER_BLOCK 8
67
68 struct superblock {
69 __u8 magic[8];
70 __u8 version;
71 __u8 log2_interleave_sectors;
72 __le16 integrity_tag_size;
73 __le32 journal_sections;
74 __le64 provided_data_sectors; /* userspace uses this value */
75 __le32 flags;
76 __u8 log2_sectors_per_block;
77 __u8 log2_blocks_per_bitmap_bit;
78 __u8 pad[2];
79 __le64 recalc_sector;
80 __u8 pad2[8];
81 __u8 salt[SALT_SIZE];
82 };
83
84 #define SB_FLAG_HAVE_JOURNAL_MAC 0x1
85 #define SB_FLAG_RECALCULATING 0x2
86 #define SB_FLAG_DIRTY_BITMAP 0x4
87 #define SB_FLAG_FIXED_PADDING 0x8
88 #define SB_FLAG_FIXED_HMAC 0x10
89
90 #define JOURNAL_ENTRY_ROUNDUP 8
91
92 typedef __le64 commit_id_t;
93 #define JOURNAL_MAC_PER_SECTOR 8
94
95 struct journal_entry {
96 union {
97 struct {
98 __le32 sector_lo;
99 __le32 sector_hi;
100 } s;
101 __le64 sector;
102 } u;
103 commit_id_t last_bytes[];
104 /* __u8 tag[0]; */
105 };
106
107 #define journal_entry_tag(ic, je) ((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block])
108
109 #if BITS_PER_LONG == 64
110 #define journal_entry_set_sector(je, x) do { smp_wmb(); WRITE_ONCE((je)->u.sector, cpu_to_le64(x)); } while (0)
111 #else
112 #define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0)
113 #endif
114 #define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
115 #define journal_entry_is_unused(je) ((je)->u.s.sector_hi == cpu_to_le32(-1))
116 #define journal_entry_set_unused(je) ((je)->u.s.sector_hi = cpu_to_le32(-1))
117 #define journal_entry_is_inprogress(je) ((je)->u.s.sector_hi == cpu_to_le32(-2))
118 #define journal_entry_set_inprogress(je) ((je)->u.s.sector_hi = cpu_to_le32(-2))
119
120 #define JOURNAL_BLOCK_SECTORS 8
121 #define JOURNAL_SECTOR_DATA ((1 << SECTOR_SHIFT) - sizeof(commit_id_t))
122 #define JOURNAL_MAC_SIZE (JOURNAL_MAC_PER_SECTOR * JOURNAL_BLOCK_SECTORS)
123
124 struct journal_sector {
125 struct_group(sectors,
126 __u8 entries[JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR];
127 __u8 mac[JOURNAL_MAC_PER_SECTOR];
128 );
129 commit_id_t commit_id;
130 };
131
132 #define MAX_TAG_SIZE (JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, last_bytes[MAX_SECTORS_PER_BLOCK]))
133
134 #define METADATA_PADDING_SECTORS 8
135
136 #define N_COMMIT_IDS 4
137
prev_commit_seq(unsigned char seq)138 static unsigned char prev_commit_seq(unsigned char seq)
139 {
140 return (seq + N_COMMIT_IDS - 1) % N_COMMIT_IDS;
141 }
142
next_commit_seq(unsigned char seq)143 static unsigned char next_commit_seq(unsigned char seq)
144 {
145 return (seq + 1) % N_COMMIT_IDS;
146 }
147
148 /*
149 * In-memory structures
150 */
151
152 struct journal_node {
153 struct rb_node node;
154 sector_t sector;
155 };
156
157 struct alg_spec {
158 char *alg_string;
159 char *key_string;
160 __u8 *key;
161 unsigned int key_size;
162 };
163
164 struct dm_integrity_c {
165 struct dm_dev *dev;
166 struct dm_dev *meta_dev;
167 unsigned int tag_size;
168 __s8 log2_tag_size;
169 sector_t start;
170 mempool_t journal_io_mempool;
171 struct dm_io_client *io;
172 struct dm_bufio_client *bufio;
173 struct workqueue_struct *metadata_wq;
174 struct superblock *sb;
175 unsigned int journal_pages;
176 unsigned int n_bitmap_blocks;
177
178 struct page_list *journal;
179 struct page_list *journal_io;
180 struct page_list *journal_xor;
181 struct page_list *recalc_bitmap;
182 struct page_list *may_write_bitmap;
183 struct bitmap_block_status *bbs;
184 unsigned int bitmap_flush_interval;
185 int synchronous_mode;
186 struct bio_list synchronous_bios;
187 struct delayed_work bitmap_flush_work;
188
189 struct crypto_skcipher *journal_crypt;
190 struct scatterlist **journal_scatterlist;
191 struct scatterlist **journal_io_scatterlist;
192 struct skcipher_request **sk_requests;
193
194 struct crypto_shash *journal_mac;
195
196 struct journal_node *journal_tree;
197 struct rb_root journal_tree_root;
198
199 sector_t provided_data_sectors;
200
201 unsigned short journal_entry_size;
202 unsigned char journal_entries_per_sector;
203 unsigned char journal_section_entries;
204 unsigned short journal_section_sectors;
205 unsigned int journal_sections;
206 unsigned int journal_entries;
207 sector_t data_device_sectors;
208 sector_t meta_device_sectors;
209 unsigned int initial_sectors;
210 unsigned int metadata_run;
211 __s8 log2_metadata_run;
212 __u8 log2_buffer_sectors;
213 __u8 sectors_per_block;
214 __u8 log2_blocks_per_bitmap_bit;
215
216 unsigned char mode;
217
218 int failed;
219
220 struct crypto_shash *internal_hash;
221
222 struct dm_target *ti;
223
224 /* these variables are locked with endio_wait.lock */
225 struct rb_root in_progress;
226 struct list_head wait_list;
227 wait_queue_head_t endio_wait;
228 struct workqueue_struct *wait_wq;
229 struct workqueue_struct *offload_wq;
230
231 unsigned char commit_seq;
232 commit_id_t commit_ids[N_COMMIT_IDS];
233
234 unsigned int committed_section;
235 unsigned int n_committed_sections;
236
237 unsigned int uncommitted_section;
238 unsigned int n_uncommitted_sections;
239
240 unsigned int free_section;
241 unsigned char free_section_entry;
242 unsigned int free_sectors;
243
244 unsigned int free_sectors_threshold;
245
246 struct workqueue_struct *commit_wq;
247 struct work_struct commit_work;
248
249 struct workqueue_struct *writer_wq;
250 struct work_struct writer_work;
251
252 struct workqueue_struct *recalc_wq;
253 struct work_struct recalc_work;
254
255 struct bio_list flush_bio_list;
256
257 unsigned long autocommit_jiffies;
258 struct timer_list autocommit_timer;
259 unsigned int autocommit_msec;
260
261 wait_queue_head_t copy_to_journal_wait;
262
263 struct completion crypto_backoff;
264
265 bool wrote_to_journal;
266 bool journal_uptodate;
267 bool just_formatted;
268 bool recalculate_flag;
269 bool reset_recalculate_flag;
270 bool discard;
271 bool fix_padding;
272 bool fix_hmac;
273 bool legacy_recalculate;
274
275 struct alg_spec internal_hash_alg;
276 struct alg_spec journal_crypt_alg;
277 struct alg_spec journal_mac_alg;
278
279 atomic64_t number_of_mismatches;
280
281 mempool_t recheck_pool;
282
283 struct notifier_block reboot_notifier;
284 };
285
286 struct dm_integrity_range {
287 sector_t logical_sector;
288 sector_t n_sectors;
289 bool waiting;
290 union {
291 struct rb_node node;
292 struct {
293 struct task_struct *task;
294 struct list_head wait_entry;
295 };
296 };
297 };
298
299 struct dm_integrity_io {
300 struct work_struct work;
301
302 struct dm_integrity_c *ic;
303 enum req_op op;
304 bool fua;
305
306 struct dm_integrity_range range;
307
308 sector_t metadata_block;
309 unsigned int metadata_offset;
310
311 atomic_t in_flight;
312 blk_status_t bi_status;
313
314 struct completion *completion;
315
316 struct dm_bio_details bio_details;
317 };
318
319 struct journal_completion {
320 struct dm_integrity_c *ic;
321 atomic_t in_flight;
322 struct completion comp;
323 };
324
325 struct journal_io {
326 struct dm_integrity_range range;
327 struct journal_completion *comp;
328 };
329
330 struct bitmap_block_status {
331 struct work_struct work;
332 struct dm_integrity_c *ic;
333 unsigned int idx;
334 unsigned long *bitmap;
335 struct bio_list bio_queue;
336 spinlock_t bio_queue_lock;
337
338 };
339
340 static struct kmem_cache *journal_io_cache;
341
342 #define JOURNAL_IO_MEMPOOL 32
343
344 #ifdef DEBUG_PRINT
345 #define DEBUG_print(x, ...) printk(KERN_DEBUG x, ##__VA_ARGS__)
346 #define DEBUG_bytes(bytes, len, msg, ...) printk(KERN_DEBUG msg "%s%*ph\n", ##__VA_ARGS__, \
347 len ? ": " : "", len, bytes)
348 #else
349 #define DEBUG_print(x, ...) do { } while (0)
350 #define DEBUG_bytes(bytes, len, msg, ...) do { } while (0)
351 #endif
352
dm_integrity_prepare(struct request * rq)353 static void dm_integrity_prepare(struct request *rq)
354 {
355 }
356
dm_integrity_complete(struct request * rq,unsigned int nr_bytes)357 static void dm_integrity_complete(struct request *rq, unsigned int nr_bytes)
358 {
359 }
360
361 /*
362 * DM Integrity profile, protection is performed layer above (dm-crypt)
363 */
364 static const struct blk_integrity_profile dm_integrity_profile = {
365 .name = "DM-DIF-EXT-TAG",
366 .generate_fn = NULL,
367 .verify_fn = NULL,
368 .prepare_fn = dm_integrity_prepare,
369 .complete_fn = dm_integrity_complete,
370 };
371
372 static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map);
373 static void integrity_bio_wait(struct work_struct *w);
374 static void dm_integrity_dtr(struct dm_target *ti);
375
dm_integrity_io_error(struct dm_integrity_c * ic,const char * msg,int err)376 static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err)
377 {
378 if (err == -EILSEQ)
379 atomic64_inc(&ic->number_of_mismatches);
380 if (!cmpxchg(&ic->failed, 0, err))
381 DMERR("Error on %s: %d", msg, err);
382 }
383
dm_integrity_failed(struct dm_integrity_c * ic)384 static int dm_integrity_failed(struct dm_integrity_c *ic)
385 {
386 return READ_ONCE(ic->failed);
387 }
388
dm_integrity_disable_recalculate(struct dm_integrity_c * ic)389 static bool dm_integrity_disable_recalculate(struct dm_integrity_c *ic)
390 {
391 if (ic->legacy_recalculate)
392 return false;
393 if (!(ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) ?
394 ic->internal_hash_alg.key || ic->journal_mac_alg.key :
395 ic->internal_hash_alg.key && !ic->journal_mac_alg.key)
396 return true;
397 return false;
398 }
399
dm_integrity_commit_id(struct dm_integrity_c * ic,unsigned int i,unsigned int j,unsigned char seq)400 static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned int i,
401 unsigned int j, unsigned char seq)
402 {
403 /*
404 * Xor the number with section and sector, so that if a piece of
405 * journal is written at wrong place, it is detected.
406 */
407 return ic->commit_ids[seq] ^ cpu_to_le64(((__u64)i << 32) ^ j);
408 }
409
get_area_and_offset(struct dm_integrity_c * ic,sector_t data_sector,sector_t * area,sector_t * offset)410 static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector,
411 sector_t *area, sector_t *offset)
412 {
413 if (!ic->meta_dev) {
414 __u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors;
415 *area = data_sector >> log2_interleave_sectors;
416 *offset = (unsigned int)data_sector & ((1U << log2_interleave_sectors) - 1);
417 } else {
418 *area = 0;
419 *offset = data_sector;
420 }
421 }
422
423 #define sector_to_block(ic, n) \
424 do { \
425 BUG_ON((n) & (unsigned int)((ic)->sectors_per_block - 1)); \
426 (n) >>= (ic)->sb->log2_sectors_per_block; \
427 } while (0)
428
get_metadata_sector_and_offset(struct dm_integrity_c * ic,sector_t area,sector_t offset,unsigned int * metadata_offset)429 static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area,
430 sector_t offset, unsigned int *metadata_offset)
431 {
432 __u64 ms;
433 unsigned int mo;
434
435 ms = area << ic->sb->log2_interleave_sectors;
436 if (likely(ic->log2_metadata_run >= 0))
437 ms += area << ic->log2_metadata_run;
438 else
439 ms += area * ic->metadata_run;
440 ms >>= ic->log2_buffer_sectors;
441
442 sector_to_block(ic, offset);
443
444 if (likely(ic->log2_tag_size >= 0)) {
445 ms += offset >> (SECTOR_SHIFT + ic->log2_buffer_sectors - ic->log2_tag_size);
446 mo = (offset << ic->log2_tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
447 } else {
448 ms += (__u64)offset * ic->tag_size >> (SECTOR_SHIFT + ic->log2_buffer_sectors);
449 mo = (offset * ic->tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
450 }
451 *metadata_offset = mo;
452 return ms;
453 }
454
get_data_sector(struct dm_integrity_c * ic,sector_t area,sector_t offset)455 static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector_t offset)
456 {
457 sector_t result;
458
459 if (ic->meta_dev)
460 return offset;
461
462 result = area << ic->sb->log2_interleave_sectors;
463 if (likely(ic->log2_metadata_run >= 0))
464 result += (area + 1) << ic->log2_metadata_run;
465 else
466 result += (area + 1) * ic->metadata_run;
467
468 result += (sector_t)ic->initial_sectors + offset;
469 result += ic->start;
470
471 return result;
472 }
473
wraparound_section(struct dm_integrity_c * ic,unsigned int * sec_ptr)474 static void wraparound_section(struct dm_integrity_c *ic, unsigned int *sec_ptr)
475 {
476 if (unlikely(*sec_ptr >= ic->journal_sections))
477 *sec_ptr -= ic->journal_sections;
478 }
479
sb_set_version(struct dm_integrity_c * ic)480 static void sb_set_version(struct dm_integrity_c *ic)
481 {
482 if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC))
483 ic->sb->version = SB_VERSION_5;
484 else if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING))
485 ic->sb->version = SB_VERSION_4;
486 else if (ic->mode == 'B' || ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP))
487 ic->sb->version = SB_VERSION_3;
488 else if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
489 ic->sb->version = SB_VERSION_2;
490 else
491 ic->sb->version = SB_VERSION_1;
492 }
493
sb_mac(struct dm_integrity_c * ic,bool wr)494 static int sb_mac(struct dm_integrity_c *ic, bool wr)
495 {
496 SHASH_DESC_ON_STACK(desc, ic->journal_mac);
497 int r;
498 unsigned int size = crypto_shash_digestsize(ic->journal_mac);
499
500 if (sizeof(struct superblock) + size > 1 << SECTOR_SHIFT) {
501 dm_integrity_io_error(ic, "digest is too long", -EINVAL);
502 return -EINVAL;
503 }
504
505 desc->tfm = ic->journal_mac;
506
507 r = crypto_shash_init(desc);
508 if (unlikely(r < 0)) {
509 dm_integrity_io_error(ic, "crypto_shash_init", r);
510 return r;
511 }
512
513 r = crypto_shash_update(desc, (__u8 *)ic->sb, (1 << SECTOR_SHIFT) - size);
514 if (unlikely(r < 0)) {
515 dm_integrity_io_error(ic, "crypto_shash_update", r);
516 return r;
517 }
518
519 if (likely(wr)) {
520 r = crypto_shash_final(desc, (__u8 *)ic->sb + (1 << SECTOR_SHIFT) - size);
521 if (unlikely(r < 0)) {
522 dm_integrity_io_error(ic, "crypto_shash_final", r);
523 return r;
524 }
525 } else {
526 __u8 result[HASH_MAX_DIGESTSIZE];
527
528 r = crypto_shash_final(desc, result);
529 if (unlikely(r < 0)) {
530 dm_integrity_io_error(ic, "crypto_shash_final", r);
531 return r;
532 }
533 if (memcmp((__u8 *)ic->sb + (1 << SECTOR_SHIFT) - size, result, size)) {
534 dm_integrity_io_error(ic, "superblock mac", -EILSEQ);
535 dm_audit_log_target(DM_MSG_PREFIX, "mac-superblock", ic->ti, 0);
536 return -EILSEQ;
537 }
538 }
539
540 return 0;
541 }
542
sync_rw_sb(struct dm_integrity_c * ic,blk_opf_t opf)543 static int sync_rw_sb(struct dm_integrity_c *ic, blk_opf_t opf)
544 {
545 struct dm_io_request io_req;
546 struct dm_io_region io_loc;
547 const enum req_op op = opf & REQ_OP_MASK;
548 int r;
549
550 io_req.bi_opf = opf;
551 io_req.mem.type = DM_IO_KMEM;
552 io_req.mem.ptr.addr = ic->sb;
553 io_req.notify.fn = NULL;
554 io_req.client = ic->io;
555 io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
556 io_loc.sector = ic->start;
557 io_loc.count = SB_SECTORS;
558
559 if (op == REQ_OP_WRITE) {
560 sb_set_version(ic);
561 if (ic->journal_mac && ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
562 r = sb_mac(ic, true);
563 if (unlikely(r))
564 return r;
565 }
566 }
567
568 r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
569 if (unlikely(r))
570 return r;
571
572 if (op == REQ_OP_READ) {
573 if (ic->mode != 'R' && ic->journal_mac && ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
574 r = sb_mac(ic, false);
575 if (unlikely(r))
576 return r;
577 }
578 }
579
580 return 0;
581 }
582
583 #define BITMAP_OP_TEST_ALL_SET 0
584 #define BITMAP_OP_TEST_ALL_CLEAR 1
585 #define BITMAP_OP_SET 2
586 #define BITMAP_OP_CLEAR 3
587
block_bitmap_op(struct dm_integrity_c * ic,struct page_list * bitmap,sector_t sector,sector_t n_sectors,int mode)588 static bool block_bitmap_op(struct dm_integrity_c *ic, struct page_list *bitmap,
589 sector_t sector, sector_t n_sectors, int mode)
590 {
591 unsigned long bit, end_bit, this_end_bit, page, end_page;
592 unsigned long *data;
593
594 if (unlikely(((sector | n_sectors) & ((1 << ic->sb->log2_sectors_per_block) - 1)) != 0)) {
595 DMCRIT("invalid bitmap access (%llx,%llx,%d,%d,%d)",
596 sector,
597 n_sectors,
598 ic->sb->log2_sectors_per_block,
599 ic->log2_blocks_per_bitmap_bit,
600 mode);
601 BUG();
602 }
603
604 if (unlikely(!n_sectors))
605 return true;
606
607 bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
608 end_bit = (sector + n_sectors - 1) >>
609 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
610
611 page = bit / (PAGE_SIZE * 8);
612 bit %= PAGE_SIZE * 8;
613
614 end_page = end_bit / (PAGE_SIZE * 8);
615 end_bit %= PAGE_SIZE * 8;
616
617 repeat:
618 if (page < end_page)
619 this_end_bit = PAGE_SIZE * 8 - 1;
620 else
621 this_end_bit = end_bit;
622
623 data = lowmem_page_address(bitmap[page].page);
624
625 if (mode == BITMAP_OP_TEST_ALL_SET) {
626 while (bit <= this_end_bit) {
627 if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
628 do {
629 if (data[bit / BITS_PER_LONG] != -1)
630 return false;
631 bit += BITS_PER_LONG;
632 } while (this_end_bit >= bit + BITS_PER_LONG - 1);
633 continue;
634 }
635 if (!test_bit(bit, data))
636 return false;
637 bit++;
638 }
639 } else if (mode == BITMAP_OP_TEST_ALL_CLEAR) {
640 while (bit <= this_end_bit) {
641 if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
642 do {
643 if (data[bit / BITS_PER_LONG] != 0)
644 return false;
645 bit += BITS_PER_LONG;
646 } while (this_end_bit >= bit + BITS_PER_LONG - 1);
647 continue;
648 }
649 if (test_bit(bit, data))
650 return false;
651 bit++;
652 }
653 } else if (mode == BITMAP_OP_SET) {
654 while (bit <= this_end_bit) {
655 if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
656 do {
657 data[bit / BITS_PER_LONG] = -1;
658 bit += BITS_PER_LONG;
659 } while (this_end_bit >= bit + BITS_PER_LONG - 1);
660 continue;
661 }
662 __set_bit(bit, data);
663 bit++;
664 }
665 } else if (mode == BITMAP_OP_CLEAR) {
666 if (!bit && this_end_bit == PAGE_SIZE * 8 - 1)
667 clear_page(data);
668 else {
669 while (bit <= this_end_bit) {
670 if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
671 do {
672 data[bit / BITS_PER_LONG] = 0;
673 bit += BITS_PER_LONG;
674 } while (this_end_bit >= bit + BITS_PER_LONG - 1);
675 continue;
676 }
677 __clear_bit(bit, data);
678 bit++;
679 }
680 }
681 } else {
682 BUG();
683 }
684
685 if (unlikely(page < end_page)) {
686 bit = 0;
687 page++;
688 goto repeat;
689 }
690
691 return true;
692 }
693
block_bitmap_copy(struct dm_integrity_c * ic,struct page_list * dst,struct page_list * src)694 static void block_bitmap_copy(struct dm_integrity_c *ic, struct page_list *dst, struct page_list *src)
695 {
696 unsigned int n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
697 unsigned int i;
698
699 for (i = 0; i < n_bitmap_pages; i++) {
700 unsigned long *dst_data = lowmem_page_address(dst[i].page);
701 unsigned long *src_data = lowmem_page_address(src[i].page);
702
703 copy_page(dst_data, src_data);
704 }
705 }
706
sector_to_bitmap_block(struct dm_integrity_c * ic,sector_t sector)707 static struct bitmap_block_status *sector_to_bitmap_block(struct dm_integrity_c *ic, sector_t sector)
708 {
709 unsigned int bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
710 unsigned int bitmap_block = bit / (BITMAP_BLOCK_SIZE * 8);
711
712 BUG_ON(bitmap_block >= ic->n_bitmap_blocks);
713 return &ic->bbs[bitmap_block];
714 }
715
access_journal_check(struct dm_integrity_c * ic,unsigned int section,unsigned int offset,bool e,const char * function)716 static void access_journal_check(struct dm_integrity_c *ic, unsigned int section, unsigned int offset,
717 bool e, const char *function)
718 {
719 #if defined(CONFIG_DM_DEBUG) || defined(INTERNAL_VERIFY)
720 unsigned int limit = e ? ic->journal_section_entries : ic->journal_section_sectors;
721
722 if (unlikely(section >= ic->journal_sections) ||
723 unlikely(offset >= limit)) {
724 DMCRIT("%s: invalid access at (%u,%u), limit (%u,%u)",
725 function, section, offset, ic->journal_sections, limit);
726 BUG();
727 }
728 #endif
729 }
730
page_list_location(struct dm_integrity_c * ic,unsigned int section,unsigned int offset,unsigned int * pl_index,unsigned int * pl_offset)731 static void page_list_location(struct dm_integrity_c *ic, unsigned int section, unsigned int offset,
732 unsigned int *pl_index, unsigned int *pl_offset)
733 {
734 unsigned int sector;
735
736 access_journal_check(ic, section, offset, false, "page_list_location");
737
738 sector = section * ic->journal_section_sectors + offset;
739
740 *pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
741 *pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
742 }
743
access_page_list(struct dm_integrity_c * ic,struct page_list * pl,unsigned int section,unsigned int offset,unsigned int * n_sectors)744 static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl,
745 unsigned int section, unsigned int offset, unsigned int *n_sectors)
746 {
747 unsigned int pl_index, pl_offset;
748 char *va;
749
750 page_list_location(ic, section, offset, &pl_index, &pl_offset);
751
752 if (n_sectors)
753 *n_sectors = (PAGE_SIZE - pl_offset) >> SECTOR_SHIFT;
754
755 va = lowmem_page_address(pl[pl_index].page);
756
757 return (struct journal_sector *)(va + pl_offset);
758 }
759
access_journal(struct dm_integrity_c * ic,unsigned int section,unsigned int offset)760 static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned int section, unsigned int offset)
761 {
762 return access_page_list(ic, ic->journal, section, offset, NULL);
763 }
764
access_journal_entry(struct dm_integrity_c * ic,unsigned int section,unsigned int n)765 static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned int section, unsigned int n)
766 {
767 unsigned int rel_sector, offset;
768 struct journal_sector *js;
769
770 access_journal_check(ic, section, n, true, "access_journal_entry");
771
772 rel_sector = n % JOURNAL_BLOCK_SECTORS;
773 offset = n / JOURNAL_BLOCK_SECTORS;
774
775 js = access_journal(ic, section, rel_sector);
776 return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size);
777 }
778
access_journal_data(struct dm_integrity_c * ic,unsigned int section,unsigned int n)779 static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned int section, unsigned int n)
780 {
781 n <<= ic->sb->log2_sectors_per_block;
782
783 n += JOURNAL_BLOCK_SECTORS;
784
785 access_journal_check(ic, section, n, false, "access_journal_data");
786
787 return access_journal(ic, section, n);
788 }
789
section_mac(struct dm_integrity_c * ic,unsigned int section,__u8 result[JOURNAL_MAC_SIZE])790 static void section_mac(struct dm_integrity_c *ic, unsigned int section, __u8 result[JOURNAL_MAC_SIZE])
791 {
792 SHASH_DESC_ON_STACK(desc, ic->journal_mac);
793 int r;
794 unsigned int j, size;
795
796 desc->tfm = ic->journal_mac;
797
798 r = crypto_shash_init(desc);
799 if (unlikely(r < 0)) {
800 dm_integrity_io_error(ic, "crypto_shash_init", r);
801 goto err;
802 }
803
804 if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
805 __le64 section_le;
806
807 r = crypto_shash_update(desc, (__u8 *)&ic->sb->salt, SALT_SIZE);
808 if (unlikely(r < 0)) {
809 dm_integrity_io_error(ic, "crypto_shash_update", r);
810 goto err;
811 }
812
813 section_le = cpu_to_le64(section);
814 r = crypto_shash_update(desc, (__u8 *)§ion_le, sizeof(section_le));
815 if (unlikely(r < 0)) {
816 dm_integrity_io_error(ic, "crypto_shash_update", r);
817 goto err;
818 }
819 }
820
821 for (j = 0; j < ic->journal_section_entries; j++) {
822 struct journal_entry *je = access_journal_entry(ic, section, j);
823
824 r = crypto_shash_update(desc, (__u8 *)&je->u.sector, sizeof(je->u.sector));
825 if (unlikely(r < 0)) {
826 dm_integrity_io_error(ic, "crypto_shash_update", r);
827 goto err;
828 }
829 }
830
831 size = crypto_shash_digestsize(ic->journal_mac);
832
833 if (likely(size <= JOURNAL_MAC_SIZE)) {
834 r = crypto_shash_final(desc, result);
835 if (unlikely(r < 0)) {
836 dm_integrity_io_error(ic, "crypto_shash_final", r);
837 goto err;
838 }
839 memset(result + size, 0, JOURNAL_MAC_SIZE - size);
840 } else {
841 __u8 digest[HASH_MAX_DIGESTSIZE];
842
843 if (WARN_ON(size > sizeof(digest))) {
844 dm_integrity_io_error(ic, "digest_size", -EINVAL);
845 goto err;
846 }
847 r = crypto_shash_final(desc, digest);
848 if (unlikely(r < 0)) {
849 dm_integrity_io_error(ic, "crypto_shash_final", r);
850 goto err;
851 }
852 memcpy(result, digest, JOURNAL_MAC_SIZE);
853 }
854
855 return;
856 err:
857 memset(result, 0, JOURNAL_MAC_SIZE);
858 }
859
rw_section_mac(struct dm_integrity_c * ic,unsigned int section,bool wr)860 static void rw_section_mac(struct dm_integrity_c *ic, unsigned int section, bool wr)
861 {
862 __u8 result[JOURNAL_MAC_SIZE];
863 unsigned int j;
864
865 if (!ic->journal_mac)
866 return;
867
868 section_mac(ic, section, result);
869
870 for (j = 0; j < JOURNAL_BLOCK_SECTORS; j++) {
871 struct journal_sector *js = access_journal(ic, section, j);
872
873 if (likely(wr))
874 memcpy(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR);
875 else {
876 if (memcmp(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR)) {
877 dm_integrity_io_error(ic, "journal mac", -EILSEQ);
878 dm_audit_log_target(DM_MSG_PREFIX, "mac-journal", ic->ti, 0);
879 }
880 }
881 }
882 }
883
complete_journal_op(void * context)884 static void complete_journal_op(void *context)
885 {
886 struct journal_completion *comp = context;
887
888 BUG_ON(!atomic_read(&comp->in_flight));
889 if (likely(atomic_dec_and_test(&comp->in_flight)))
890 complete(&comp->comp);
891 }
892
xor_journal(struct dm_integrity_c * ic,bool encrypt,unsigned int section,unsigned int n_sections,struct journal_completion * comp)893 static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned int section,
894 unsigned int n_sections, struct journal_completion *comp)
895 {
896 struct async_submit_ctl submit;
897 size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT;
898 unsigned int pl_index, pl_offset, section_index;
899 struct page_list *source_pl, *target_pl;
900
901 if (likely(encrypt)) {
902 source_pl = ic->journal;
903 target_pl = ic->journal_io;
904 } else {
905 source_pl = ic->journal_io;
906 target_pl = ic->journal;
907 }
908
909 page_list_location(ic, section, 0, &pl_index, &pl_offset);
910
911 atomic_add(roundup(pl_offset + n_bytes, PAGE_SIZE) >> PAGE_SHIFT, &comp->in_flight);
912
913 init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, complete_journal_op, comp, NULL);
914
915 section_index = pl_index;
916
917 do {
918 size_t this_step;
919 struct page *src_pages[2];
920 struct page *dst_page;
921
922 while (unlikely(pl_index == section_index)) {
923 unsigned int dummy;
924
925 if (likely(encrypt))
926 rw_section_mac(ic, section, true);
927 section++;
928 n_sections--;
929 if (!n_sections)
930 break;
931 page_list_location(ic, section, 0, §ion_index, &dummy);
932 }
933
934 this_step = min(n_bytes, (size_t)PAGE_SIZE - pl_offset);
935 dst_page = target_pl[pl_index].page;
936 src_pages[0] = source_pl[pl_index].page;
937 src_pages[1] = ic->journal_xor[pl_index].page;
938
939 async_xor(dst_page, src_pages, pl_offset, 2, this_step, &submit);
940
941 pl_index++;
942 pl_offset = 0;
943 n_bytes -= this_step;
944 } while (n_bytes);
945
946 BUG_ON(n_sections);
947
948 async_tx_issue_pending_all();
949 }
950
complete_journal_encrypt(void * data,int err)951 static void complete_journal_encrypt(void *data, int err)
952 {
953 struct journal_completion *comp = data;
954
955 if (unlikely(err)) {
956 if (likely(err == -EINPROGRESS)) {
957 complete(&comp->ic->crypto_backoff);
958 return;
959 }
960 dm_integrity_io_error(comp->ic, "asynchronous encrypt", err);
961 }
962 complete_journal_op(comp);
963 }
964
do_crypt(bool encrypt,struct skcipher_request * req,struct journal_completion * comp)965 static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)
966 {
967 int r;
968
969 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
970 complete_journal_encrypt, comp);
971 if (likely(encrypt))
972 r = crypto_skcipher_encrypt(req);
973 else
974 r = crypto_skcipher_decrypt(req);
975 if (likely(!r))
976 return false;
977 if (likely(r == -EINPROGRESS))
978 return true;
979 if (likely(r == -EBUSY)) {
980 wait_for_completion(&comp->ic->crypto_backoff);
981 reinit_completion(&comp->ic->crypto_backoff);
982 return true;
983 }
984 dm_integrity_io_error(comp->ic, "encrypt", r);
985 return false;
986 }
987
crypt_journal(struct dm_integrity_c * ic,bool encrypt,unsigned int section,unsigned int n_sections,struct journal_completion * comp)988 static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned int section,
989 unsigned int n_sections, struct journal_completion *comp)
990 {
991 struct scatterlist **source_sg;
992 struct scatterlist **target_sg;
993
994 atomic_add(2, &comp->in_flight);
995
996 if (likely(encrypt)) {
997 source_sg = ic->journal_scatterlist;
998 target_sg = ic->journal_io_scatterlist;
999 } else {
1000 source_sg = ic->journal_io_scatterlist;
1001 target_sg = ic->journal_scatterlist;
1002 }
1003
1004 do {
1005 struct skcipher_request *req;
1006 unsigned int ivsize;
1007 char *iv;
1008
1009 if (likely(encrypt))
1010 rw_section_mac(ic, section, true);
1011
1012 req = ic->sk_requests[section];
1013 ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
1014 iv = req->iv;
1015
1016 memcpy(iv, iv + ivsize, ivsize);
1017
1018 req->src = source_sg[section];
1019 req->dst = target_sg[section];
1020
1021 if (unlikely(do_crypt(encrypt, req, comp)))
1022 atomic_inc(&comp->in_flight);
1023
1024 section++;
1025 n_sections--;
1026 } while (n_sections);
1027
1028 atomic_dec(&comp->in_flight);
1029 complete_journal_op(comp);
1030 }
1031
encrypt_journal(struct dm_integrity_c * ic,bool encrypt,unsigned int section,unsigned int n_sections,struct journal_completion * comp)1032 static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned int section,
1033 unsigned int n_sections, struct journal_completion *comp)
1034 {
1035 if (ic->journal_xor)
1036 return xor_journal(ic, encrypt, section, n_sections, comp);
1037 else
1038 return crypt_journal(ic, encrypt, section, n_sections, comp);
1039 }
1040
complete_journal_io(unsigned long error,void * context)1041 static void complete_journal_io(unsigned long error, void *context)
1042 {
1043 struct journal_completion *comp = context;
1044
1045 if (unlikely(error != 0))
1046 dm_integrity_io_error(comp->ic, "writing journal", -EIO);
1047 complete_journal_op(comp);
1048 }
1049
rw_journal_sectors(struct dm_integrity_c * ic,blk_opf_t opf,unsigned int sector,unsigned int n_sectors,struct journal_completion * comp)1050 static void rw_journal_sectors(struct dm_integrity_c *ic, blk_opf_t opf,
1051 unsigned int sector, unsigned int n_sectors,
1052 struct journal_completion *comp)
1053 {
1054 struct dm_io_request io_req;
1055 struct dm_io_region io_loc;
1056 unsigned int pl_index, pl_offset;
1057 int r;
1058
1059 if (unlikely(dm_integrity_failed(ic))) {
1060 if (comp)
1061 complete_journal_io(-1UL, comp);
1062 return;
1063 }
1064
1065 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
1066 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
1067
1068 io_req.bi_opf = opf;
1069 io_req.mem.type = DM_IO_PAGE_LIST;
1070 if (ic->journal_io)
1071 io_req.mem.ptr.pl = &ic->journal_io[pl_index];
1072 else
1073 io_req.mem.ptr.pl = &ic->journal[pl_index];
1074 io_req.mem.offset = pl_offset;
1075 if (likely(comp != NULL)) {
1076 io_req.notify.fn = complete_journal_io;
1077 io_req.notify.context = comp;
1078 } else {
1079 io_req.notify.fn = NULL;
1080 }
1081 io_req.client = ic->io;
1082 io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
1083 io_loc.sector = ic->start + SB_SECTORS + sector;
1084 io_loc.count = n_sectors;
1085
1086 r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
1087 if (unlikely(r)) {
1088 dm_integrity_io_error(ic, (opf & REQ_OP_MASK) == REQ_OP_READ ?
1089 "reading journal" : "writing journal", r);
1090 if (comp) {
1091 WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
1092 complete_journal_io(-1UL, comp);
1093 }
1094 }
1095 }
1096
rw_journal(struct dm_integrity_c * ic,blk_opf_t opf,unsigned int section,unsigned int n_sections,struct journal_completion * comp)1097 static void rw_journal(struct dm_integrity_c *ic, blk_opf_t opf,
1098 unsigned int section, unsigned int n_sections,
1099 struct journal_completion *comp)
1100 {
1101 unsigned int sector, n_sectors;
1102
1103 sector = section * ic->journal_section_sectors;
1104 n_sectors = n_sections * ic->journal_section_sectors;
1105
1106 rw_journal_sectors(ic, opf, sector, n_sectors, comp);
1107 }
1108
write_journal(struct dm_integrity_c * ic,unsigned int commit_start,unsigned int commit_sections)1109 static void write_journal(struct dm_integrity_c *ic, unsigned int commit_start, unsigned int commit_sections)
1110 {
1111 struct journal_completion io_comp;
1112 struct journal_completion crypt_comp_1;
1113 struct journal_completion crypt_comp_2;
1114 unsigned int i;
1115
1116 io_comp.ic = ic;
1117 init_completion(&io_comp.comp);
1118
1119 if (commit_start + commit_sections <= ic->journal_sections) {
1120 io_comp.in_flight = (atomic_t)ATOMIC_INIT(1);
1121 if (ic->journal_io) {
1122 crypt_comp_1.ic = ic;
1123 init_completion(&crypt_comp_1.comp);
1124 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1125 encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1);
1126 wait_for_completion_io(&crypt_comp_1.comp);
1127 } else {
1128 for (i = 0; i < commit_sections; i++)
1129 rw_section_mac(ic, commit_start + i, true);
1130 }
1131 rw_journal(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, commit_start,
1132 commit_sections, &io_comp);
1133 } else {
1134 unsigned int to_end;
1135
1136 io_comp.in_flight = (atomic_t)ATOMIC_INIT(2);
1137 to_end = ic->journal_sections - commit_start;
1138 if (ic->journal_io) {
1139 crypt_comp_1.ic = ic;
1140 init_completion(&crypt_comp_1.comp);
1141 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1142 encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1);
1143 if (try_wait_for_completion(&crypt_comp_1.comp)) {
1144 rw_journal(ic, REQ_OP_WRITE | REQ_FUA,
1145 commit_start, to_end, &io_comp);
1146 reinit_completion(&crypt_comp_1.comp);
1147 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1148 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1);
1149 wait_for_completion_io(&crypt_comp_1.comp);
1150 } else {
1151 crypt_comp_2.ic = ic;
1152 init_completion(&crypt_comp_2.comp);
1153 crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0);
1154 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2);
1155 wait_for_completion_io(&crypt_comp_1.comp);
1156 rw_journal(ic, REQ_OP_WRITE | REQ_FUA, commit_start, to_end, &io_comp);
1157 wait_for_completion_io(&crypt_comp_2.comp);
1158 }
1159 } else {
1160 for (i = 0; i < to_end; i++)
1161 rw_section_mac(ic, commit_start + i, true);
1162 rw_journal(ic, REQ_OP_WRITE | REQ_FUA, commit_start, to_end, &io_comp);
1163 for (i = 0; i < commit_sections - to_end; i++)
1164 rw_section_mac(ic, i, true);
1165 }
1166 rw_journal(ic, REQ_OP_WRITE | REQ_FUA, 0, commit_sections - to_end, &io_comp);
1167 }
1168
1169 wait_for_completion_io(&io_comp.comp);
1170 }
1171
copy_from_journal(struct dm_integrity_c * ic,unsigned int section,unsigned int offset,unsigned int n_sectors,sector_t target,io_notify_fn fn,void * data)1172 static void copy_from_journal(struct dm_integrity_c *ic, unsigned int section, unsigned int offset,
1173 unsigned int n_sectors, sector_t target, io_notify_fn fn, void *data)
1174 {
1175 struct dm_io_request io_req;
1176 struct dm_io_region io_loc;
1177 int r;
1178 unsigned int sector, pl_index, pl_offset;
1179
1180 BUG_ON((target | n_sectors | offset) & (unsigned int)(ic->sectors_per_block - 1));
1181
1182 if (unlikely(dm_integrity_failed(ic))) {
1183 fn(-1UL, data);
1184 return;
1185 }
1186
1187 sector = section * ic->journal_section_sectors + JOURNAL_BLOCK_SECTORS + offset;
1188
1189 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
1190 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
1191
1192 io_req.bi_opf = REQ_OP_WRITE;
1193 io_req.mem.type = DM_IO_PAGE_LIST;
1194 io_req.mem.ptr.pl = &ic->journal[pl_index];
1195 io_req.mem.offset = pl_offset;
1196 io_req.notify.fn = fn;
1197 io_req.notify.context = data;
1198 io_req.client = ic->io;
1199 io_loc.bdev = ic->dev->bdev;
1200 io_loc.sector = target;
1201 io_loc.count = n_sectors;
1202
1203 r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
1204 if (unlikely(r)) {
1205 WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
1206 fn(-1UL, data);
1207 }
1208 }
1209
ranges_overlap(struct dm_integrity_range * range1,struct dm_integrity_range * range2)1210 static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2)
1211 {
1212 return range1->logical_sector < range2->logical_sector + range2->n_sectors &&
1213 range1->logical_sector + range1->n_sectors > range2->logical_sector;
1214 }
1215
add_new_range(struct dm_integrity_c * ic,struct dm_integrity_range * new_range,bool check_waiting)1216 static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting)
1217 {
1218 struct rb_node **n = &ic->in_progress.rb_node;
1219 struct rb_node *parent;
1220
1221 BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned int)(ic->sectors_per_block - 1));
1222
1223 if (likely(check_waiting)) {
1224 struct dm_integrity_range *range;
1225
1226 list_for_each_entry(range, &ic->wait_list, wait_entry) {
1227 if (unlikely(ranges_overlap(range, new_range)))
1228 return false;
1229 }
1230 }
1231
1232 parent = NULL;
1233
1234 while (*n) {
1235 struct dm_integrity_range *range = container_of(*n, struct dm_integrity_range, node);
1236
1237 parent = *n;
1238 if (new_range->logical_sector + new_range->n_sectors <= range->logical_sector)
1239 n = &range->node.rb_left;
1240 else if (new_range->logical_sector >= range->logical_sector + range->n_sectors)
1241 n = &range->node.rb_right;
1242 else
1243 return false;
1244 }
1245
1246 rb_link_node(&new_range->node, parent, n);
1247 rb_insert_color(&new_range->node, &ic->in_progress);
1248
1249 return true;
1250 }
1251
remove_range_unlocked(struct dm_integrity_c * ic,struct dm_integrity_range * range)1252 static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity_range *range)
1253 {
1254 rb_erase(&range->node, &ic->in_progress);
1255 while (unlikely(!list_empty(&ic->wait_list))) {
1256 struct dm_integrity_range *last_range =
1257 list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
1258 struct task_struct *last_range_task;
1259
1260 last_range_task = last_range->task;
1261 list_del(&last_range->wait_entry);
1262 if (!add_new_range(ic, last_range, false)) {
1263 last_range->task = last_range_task;
1264 list_add(&last_range->wait_entry, &ic->wait_list);
1265 break;
1266 }
1267 last_range->waiting = false;
1268 wake_up_process(last_range_task);
1269 }
1270 }
1271
remove_range(struct dm_integrity_c * ic,struct dm_integrity_range * range)1272 static void remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *range)
1273 {
1274 unsigned long flags;
1275
1276 spin_lock_irqsave(&ic->endio_wait.lock, flags);
1277 remove_range_unlocked(ic, range);
1278 spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1279 }
1280
wait_and_add_new_range(struct dm_integrity_c * ic,struct dm_integrity_range * new_range)1281 static void wait_and_add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
1282 {
1283 new_range->waiting = true;
1284 list_add_tail(&new_range->wait_entry, &ic->wait_list);
1285 new_range->task = current;
1286 do {
1287 __set_current_state(TASK_UNINTERRUPTIBLE);
1288 spin_unlock_irq(&ic->endio_wait.lock);
1289 io_schedule();
1290 spin_lock_irq(&ic->endio_wait.lock);
1291 } while (unlikely(new_range->waiting));
1292 }
1293
add_new_range_and_wait(struct dm_integrity_c * ic,struct dm_integrity_range * new_range)1294 static void add_new_range_and_wait(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
1295 {
1296 if (unlikely(!add_new_range(ic, new_range, true)))
1297 wait_and_add_new_range(ic, new_range);
1298 }
1299
init_journal_node(struct journal_node * node)1300 static void init_journal_node(struct journal_node *node)
1301 {
1302 RB_CLEAR_NODE(&node->node);
1303 node->sector = (sector_t)-1;
1304 }
1305
add_journal_node(struct dm_integrity_c * ic,struct journal_node * node,sector_t sector)1306 static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector)
1307 {
1308 struct rb_node **link;
1309 struct rb_node *parent;
1310
1311 node->sector = sector;
1312 BUG_ON(!RB_EMPTY_NODE(&node->node));
1313
1314 link = &ic->journal_tree_root.rb_node;
1315 parent = NULL;
1316
1317 while (*link) {
1318 struct journal_node *j;
1319
1320 parent = *link;
1321 j = container_of(parent, struct journal_node, node);
1322 if (sector < j->sector)
1323 link = &j->node.rb_left;
1324 else
1325 link = &j->node.rb_right;
1326 }
1327
1328 rb_link_node(&node->node, parent, link);
1329 rb_insert_color(&node->node, &ic->journal_tree_root);
1330 }
1331
remove_journal_node(struct dm_integrity_c * ic,struct journal_node * node)1332 static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *node)
1333 {
1334 BUG_ON(RB_EMPTY_NODE(&node->node));
1335 rb_erase(&node->node, &ic->journal_tree_root);
1336 init_journal_node(node);
1337 }
1338
1339 #define NOT_FOUND (-1U)
1340
find_journal_node(struct dm_integrity_c * ic,sector_t sector,sector_t * next_sector)1341 static unsigned int find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector)
1342 {
1343 struct rb_node *n = ic->journal_tree_root.rb_node;
1344 unsigned int found = NOT_FOUND;
1345
1346 *next_sector = (sector_t)-1;
1347 while (n) {
1348 struct journal_node *j = container_of(n, struct journal_node, node);
1349
1350 if (sector == j->sector)
1351 found = j - ic->journal_tree;
1352
1353 if (sector < j->sector) {
1354 *next_sector = j->sector;
1355 n = j->node.rb_left;
1356 } else
1357 n = j->node.rb_right;
1358 }
1359
1360 return found;
1361 }
1362
test_journal_node(struct dm_integrity_c * ic,unsigned int pos,sector_t sector)1363 static bool test_journal_node(struct dm_integrity_c *ic, unsigned int pos, sector_t sector)
1364 {
1365 struct journal_node *node, *next_node;
1366 struct rb_node *next;
1367
1368 if (unlikely(pos >= ic->journal_entries))
1369 return false;
1370 node = &ic->journal_tree[pos];
1371 if (unlikely(RB_EMPTY_NODE(&node->node)))
1372 return false;
1373 if (unlikely(node->sector != sector))
1374 return false;
1375
1376 next = rb_next(&node->node);
1377 if (unlikely(!next))
1378 return true;
1379
1380 next_node = container_of(next, struct journal_node, node);
1381 return next_node->sector != sector;
1382 }
1383
find_newer_committed_node(struct dm_integrity_c * ic,struct journal_node * node)1384 static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_node *node)
1385 {
1386 struct rb_node *next;
1387 struct journal_node *next_node;
1388 unsigned int next_section;
1389
1390 BUG_ON(RB_EMPTY_NODE(&node->node));
1391
1392 next = rb_next(&node->node);
1393 if (unlikely(!next))
1394 return false;
1395
1396 next_node = container_of(next, struct journal_node, node);
1397
1398 if (next_node->sector != node->sector)
1399 return false;
1400
1401 next_section = (unsigned int)(next_node - ic->journal_tree) / ic->journal_section_entries;
1402 if (next_section >= ic->committed_section &&
1403 next_section < ic->committed_section + ic->n_committed_sections)
1404 return true;
1405 if (next_section + ic->journal_sections < ic->committed_section + ic->n_committed_sections)
1406 return true;
1407
1408 return false;
1409 }
1410
1411 #define TAG_READ 0
1412 #define TAG_WRITE 1
1413 #define TAG_CMP 2
1414
dm_integrity_rw_tag(struct dm_integrity_c * ic,unsigned char * tag,sector_t * metadata_block,unsigned int * metadata_offset,unsigned int total_size,int op)1415 static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block,
1416 unsigned int *metadata_offset, unsigned int total_size, int op)
1417 {
1418 #define MAY_BE_FILLER 1
1419 #define MAY_BE_HASH 2
1420 unsigned int hash_offset = 0;
1421 unsigned int may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
1422
1423 do {
1424 unsigned char *data, *dp;
1425 struct dm_buffer *b;
1426 unsigned int to_copy;
1427 int r;
1428
1429 r = dm_integrity_failed(ic);
1430 if (unlikely(r))
1431 return r;
1432
1433 data = dm_bufio_read(ic->bufio, *metadata_block, &b);
1434 if (IS_ERR(data))
1435 return PTR_ERR(data);
1436
1437 to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size);
1438 dp = data + *metadata_offset;
1439 if (op == TAG_READ) {
1440 memcpy(tag, dp, to_copy);
1441 } else if (op == TAG_WRITE) {
1442 if (memcmp(dp, tag, to_copy)) {
1443 memcpy(dp, tag, to_copy);
1444 dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy);
1445 }
1446 } else {
1447 /* e.g.: op == TAG_CMP */
1448
1449 if (likely(is_power_of_2(ic->tag_size))) {
1450 if (unlikely(memcmp(dp, tag, to_copy)))
1451 if (unlikely(!ic->discard) ||
1452 unlikely(memchr_inv(dp, DISCARD_FILLER, to_copy) != NULL)) {
1453 goto thorough_test;
1454 }
1455 } else {
1456 unsigned int i, ts;
1457 thorough_test:
1458 ts = total_size;
1459
1460 for (i = 0; i < to_copy; i++, ts--) {
1461 if (unlikely(dp[i] != tag[i]))
1462 may_be &= ~MAY_BE_HASH;
1463 if (likely(dp[i] != DISCARD_FILLER))
1464 may_be &= ~MAY_BE_FILLER;
1465 hash_offset++;
1466 if (unlikely(hash_offset == ic->tag_size)) {
1467 if (unlikely(!may_be)) {
1468 dm_bufio_release(b);
1469 return ts;
1470 }
1471 hash_offset = 0;
1472 may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
1473 }
1474 }
1475 }
1476 }
1477 dm_bufio_release(b);
1478
1479 tag += to_copy;
1480 *metadata_offset += to_copy;
1481 if (unlikely(*metadata_offset == 1U << SECTOR_SHIFT << ic->log2_buffer_sectors)) {
1482 (*metadata_block)++;
1483 *metadata_offset = 0;
1484 }
1485
1486 if (unlikely(!is_power_of_2(ic->tag_size)))
1487 hash_offset = (hash_offset + to_copy) % ic->tag_size;
1488
1489 total_size -= to_copy;
1490 } while (unlikely(total_size));
1491
1492 return 0;
1493 #undef MAY_BE_FILLER
1494 #undef MAY_BE_HASH
1495 }
1496
1497 struct flush_request {
1498 struct dm_io_request io_req;
1499 struct dm_io_region io_reg;
1500 struct dm_integrity_c *ic;
1501 struct completion comp;
1502 };
1503
flush_notify(unsigned long error,void * fr_)1504 static void flush_notify(unsigned long error, void *fr_)
1505 {
1506 struct flush_request *fr = fr_;
1507
1508 if (unlikely(error != 0))
1509 dm_integrity_io_error(fr->ic, "flushing disk cache", -EIO);
1510 complete(&fr->comp);
1511 }
1512
dm_integrity_flush_buffers(struct dm_integrity_c * ic,bool flush_data)1513 static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_data)
1514 {
1515 int r;
1516 struct flush_request fr;
1517
1518 if (!ic->meta_dev)
1519 flush_data = false;
1520 if (flush_data) {
1521 fr.io_req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC,
1522 fr.io_req.mem.type = DM_IO_KMEM,
1523 fr.io_req.mem.ptr.addr = NULL,
1524 fr.io_req.notify.fn = flush_notify,
1525 fr.io_req.notify.context = &fr;
1526 fr.io_req.client = dm_bufio_get_dm_io_client(ic->bufio),
1527 fr.io_reg.bdev = ic->dev->bdev,
1528 fr.io_reg.sector = 0,
1529 fr.io_reg.count = 0,
1530 fr.ic = ic;
1531 init_completion(&fr.comp);
1532 r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL, IOPRIO_DEFAULT);
1533 BUG_ON(r);
1534 }
1535
1536 r = dm_bufio_write_dirty_buffers(ic->bufio);
1537 if (unlikely(r))
1538 dm_integrity_io_error(ic, "writing tags", r);
1539
1540 if (flush_data)
1541 wait_for_completion(&fr.comp);
1542 }
1543
sleep_on_endio_wait(struct dm_integrity_c * ic)1544 static void sleep_on_endio_wait(struct dm_integrity_c *ic)
1545 {
1546 DECLARE_WAITQUEUE(wait, current);
1547
1548 __add_wait_queue(&ic->endio_wait, &wait);
1549 __set_current_state(TASK_UNINTERRUPTIBLE);
1550 spin_unlock_irq(&ic->endio_wait.lock);
1551 io_schedule();
1552 spin_lock_irq(&ic->endio_wait.lock);
1553 __remove_wait_queue(&ic->endio_wait, &wait);
1554 }
1555
autocommit_fn(struct timer_list * t)1556 static void autocommit_fn(struct timer_list *t)
1557 {
1558 struct dm_integrity_c *ic = from_timer(ic, t, autocommit_timer);
1559
1560 if (likely(!dm_integrity_failed(ic)))
1561 queue_work(ic->commit_wq, &ic->commit_work);
1562 }
1563
schedule_autocommit(struct dm_integrity_c * ic)1564 static void schedule_autocommit(struct dm_integrity_c *ic)
1565 {
1566 if (!timer_pending(&ic->autocommit_timer))
1567 mod_timer(&ic->autocommit_timer, jiffies + ic->autocommit_jiffies);
1568 }
1569
submit_flush_bio(struct dm_integrity_c * ic,struct dm_integrity_io * dio)1570 static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1571 {
1572 struct bio *bio;
1573 unsigned long flags;
1574
1575 spin_lock_irqsave(&ic->endio_wait.lock, flags);
1576 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1577 bio_list_add(&ic->flush_bio_list, bio);
1578 spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1579
1580 queue_work(ic->commit_wq, &ic->commit_work);
1581 }
1582
do_endio(struct dm_integrity_c * ic,struct bio * bio)1583 static void do_endio(struct dm_integrity_c *ic, struct bio *bio)
1584 {
1585 int r;
1586
1587 r = dm_integrity_failed(ic);
1588 if (unlikely(r) && !bio->bi_status)
1589 bio->bi_status = errno_to_blk_status(r);
1590 if (unlikely(ic->synchronous_mode) && bio_op(bio) == REQ_OP_WRITE) {
1591 unsigned long flags;
1592
1593 spin_lock_irqsave(&ic->endio_wait.lock, flags);
1594 bio_list_add(&ic->synchronous_bios, bio);
1595 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
1596 spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1597 return;
1598 }
1599 bio_endio(bio);
1600 }
1601
do_endio_flush(struct dm_integrity_c * ic,struct dm_integrity_io * dio)1602 static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1603 {
1604 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1605
1606 if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic)))
1607 submit_flush_bio(ic, dio);
1608 else
1609 do_endio(ic, bio);
1610 }
1611
dec_in_flight(struct dm_integrity_io * dio)1612 static void dec_in_flight(struct dm_integrity_io *dio)
1613 {
1614 if (atomic_dec_and_test(&dio->in_flight)) {
1615 struct dm_integrity_c *ic = dio->ic;
1616 struct bio *bio;
1617
1618 remove_range(ic, &dio->range);
1619
1620 if (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))
1621 schedule_autocommit(ic);
1622
1623 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1624 if (unlikely(dio->bi_status) && !bio->bi_status)
1625 bio->bi_status = dio->bi_status;
1626 if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) {
1627 dio->range.logical_sector += dio->range.n_sectors;
1628 bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
1629 INIT_WORK(&dio->work, integrity_bio_wait);
1630 queue_work(ic->offload_wq, &dio->work);
1631 return;
1632 }
1633 do_endio_flush(ic, dio);
1634 }
1635 }
1636
integrity_end_io(struct bio * bio)1637 static void integrity_end_io(struct bio *bio)
1638 {
1639 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1640
1641 dm_bio_restore(&dio->bio_details, bio);
1642 if (bio->bi_integrity)
1643 bio->bi_opf |= REQ_INTEGRITY;
1644
1645 if (dio->completion)
1646 complete(dio->completion);
1647
1648 dec_in_flight(dio);
1649 }
1650
integrity_sector_checksum(struct dm_integrity_c * ic,sector_t sector,const char * data,char * result)1651 static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector,
1652 const char *data, char *result)
1653 {
1654 __le64 sector_le = cpu_to_le64(sector);
1655 SHASH_DESC_ON_STACK(req, ic->internal_hash);
1656 int r;
1657 unsigned int digest_size;
1658
1659 req->tfm = ic->internal_hash;
1660
1661 r = crypto_shash_init(req);
1662 if (unlikely(r < 0)) {
1663 dm_integrity_io_error(ic, "crypto_shash_init", r);
1664 goto failed;
1665 }
1666
1667 if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
1668 r = crypto_shash_update(req, (__u8 *)&ic->sb->salt, SALT_SIZE);
1669 if (unlikely(r < 0)) {
1670 dm_integrity_io_error(ic, "crypto_shash_update", r);
1671 goto failed;
1672 }
1673 }
1674
1675 r = crypto_shash_update(req, (const __u8 *)§or_le, sizeof(sector_le));
1676 if (unlikely(r < 0)) {
1677 dm_integrity_io_error(ic, "crypto_shash_update", r);
1678 goto failed;
1679 }
1680
1681 r = crypto_shash_update(req, data, ic->sectors_per_block << SECTOR_SHIFT);
1682 if (unlikely(r < 0)) {
1683 dm_integrity_io_error(ic, "crypto_shash_update", r);
1684 goto failed;
1685 }
1686
1687 r = crypto_shash_final(req, result);
1688 if (unlikely(r < 0)) {
1689 dm_integrity_io_error(ic, "crypto_shash_final", r);
1690 goto failed;
1691 }
1692
1693 digest_size = crypto_shash_digestsize(ic->internal_hash);
1694 if (unlikely(digest_size < ic->tag_size))
1695 memset(result + digest_size, 0, ic->tag_size - digest_size);
1696
1697 return;
1698
1699 failed:
1700 /* this shouldn't happen anyway, the hash functions have no reason to fail */
1701 get_random_bytes(result, ic->tag_size);
1702 }
1703
integrity_recheck(struct dm_integrity_io * dio,char * checksum)1704 static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checksum)
1705 {
1706 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1707 struct dm_integrity_c *ic = dio->ic;
1708 struct bvec_iter iter;
1709 struct bio_vec bv;
1710 sector_t sector, logical_sector, area, offset;
1711 struct page *page;
1712
1713 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1714 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset,
1715 &dio->metadata_offset);
1716 sector = get_data_sector(ic, area, offset);
1717 logical_sector = dio->range.logical_sector;
1718
1719 page = mempool_alloc(&ic->recheck_pool, GFP_NOIO);
1720
1721 __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
1722 unsigned pos = 0;
1723
1724 do {
1725 sector_t alignment;
1726 char *mem;
1727 char *buffer = page_to_virt(page);
1728 int r;
1729 struct dm_io_request io_req;
1730 struct dm_io_region io_loc;
1731 io_req.bi_opf = REQ_OP_READ;
1732 io_req.mem.type = DM_IO_KMEM;
1733 io_req.mem.ptr.addr = buffer;
1734 io_req.notify.fn = NULL;
1735 io_req.client = ic->io;
1736 io_loc.bdev = ic->dev->bdev;
1737 io_loc.sector = sector;
1738 io_loc.count = ic->sectors_per_block;
1739
1740 /* Align the bio to logical block size */
1741 alignment = dio->range.logical_sector | bio_sectors(bio) | (PAGE_SIZE >> SECTOR_SHIFT);
1742 alignment &= -alignment;
1743 io_loc.sector = round_down(io_loc.sector, alignment);
1744 io_loc.count += sector - io_loc.sector;
1745 buffer += (sector - io_loc.sector) << SECTOR_SHIFT;
1746 io_loc.count = round_up(io_loc.count, alignment);
1747
1748 r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
1749 if (unlikely(r)) {
1750 dio->bi_status = errno_to_blk_status(r);
1751 goto free_ret;
1752 }
1753
1754 integrity_sector_checksum(ic, logical_sector, buffer, checksum);
1755 r = dm_integrity_rw_tag(ic, checksum, &dio->metadata_block,
1756 &dio->metadata_offset, ic->tag_size, TAG_CMP);
1757 if (r) {
1758 if (r > 0) {
1759 DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx",
1760 bio->bi_bdev, logical_sector);
1761 atomic64_inc(&ic->number_of_mismatches);
1762 dm_audit_log_bio(DM_MSG_PREFIX, "integrity-checksum",
1763 bio, logical_sector, 0);
1764 r = -EILSEQ;
1765 }
1766 dio->bi_status = errno_to_blk_status(r);
1767 goto free_ret;
1768 }
1769
1770 mem = bvec_kmap_local(&bv);
1771 memcpy(mem + pos, buffer, ic->sectors_per_block << SECTOR_SHIFT);
1772 kunmap_local(mem);
1773
1774 pos += ic->sectors_per_block << SECTOR_SHIFT;
1775 sector += ic->sectors_per_block;
1776 logical_sector += ic->sectors_per_block;
1777 } while (pos < bv.bv_len);
1778 }
1779 free_ret:
1780 mempool_free(page, &ic->recheck_pool);
1781 }
1782
integrity_metadata(struct work_struct * w)1783 static void integrity_metadata(struct work_struct *w)
1784 {
1785 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
1786 struct dm_integrity_c *ic = dio->ic;
1787
1788 int r;
1789
1790 if (ic->internal_hash) {
1791 struct bvec_iter iter;
1792 struct bio_vec bv;
1793 unsigned int digest_size = crypto_shash_digestsize(ic->internal_hash);
1794 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1795 char *checksums;
1796 unsigned int extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
1797 char checksums_onstack[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
1798 sector_t sector;
1799 unsigned int sectors_to_process;
1800
1801 if (unlikely(ic->mode == 'R'))
1802 goto skip_io;
1803
1804 if (likely(dio->op != REQ_OP_DISCARD))
1805 checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space,
1806 GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
1807 else
1808 checksums = kmalloc(PAGE_SIZE, GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
1809 if (!checksums) {
1810 checksums = checksums_onstack;
1811 if (WARN_ON(extra_space &&
1812 digest_size > sizeof(checksums_onstack))) {
1813 r = -EINVAL;
1814 goto error;
1815 }
1816 }
1817
1818 if (unlikely(dio->op == REQ_OP_DISCARD)) {
1819 unsigned int bi_size = dio->bio_details.bi_iter.bi_size;
1820 unsigned int max_size = likely(checksums != checksums_onstack) ? PAGE_SIZE : HASH_MAX_DIGESTSIZE;
1821 unsigned int max_blocks = max_size / ic->tag_size;
1822
1823 memset(checksums, DISCARD_FILLER, max_size);
1824
1825 while (bi_size) {
1826 unsigned int this_step_blocks = bi_size >> (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
1827
1828 this_step_blocks = min(this_step_blocks, max_blocks);
1829 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1830 this_step_blocks * ic->tag_size, TAG_WRITE);
1831 if (unlikely(r)) {
1832 if (likely(checksums != checksums_onstack))
1833 kfree(checksums);
1834 goto error;
1835 }
1836
1837 bi_size -= this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
1838 }
1839
1840 if (likely(checksums != checksums_onstack))
1841 kfree(checksums);
1842 goto skip_io;
1843 }
1844
1845 sector = dio->range.logical_sector;
1846 sectors_to_process = dio->range.n_sectors;
1847
1848 __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
1849 struct bio_vec bv_copy = bv;
1850 unsigned int pos;
1851 char *mem, *checksums_ptr;
1852
1853 again:
1854 mem = bvec_kmap_local(&bv_copy);
1855 pos = 0;
1856 checksums_ptr = checksums;
1857 do {
1858 integrity_sector_checksum(ic, sector, mem + pos, checksums_ptr);
1859 checksums_ptr += ic->tag_size;
1860 sectors_to_process -= ic->sectors_per_block;
1861 pos += ic->sectors_per_block << SECTOR_SHIFT;
1862 sector += ic->sectors_per_block;
1863 } while (pos < bv_copy.bv_len && sectors_to_process && checksums != checksums_onstack);
1864 kunmap_local(mem);
1865
1866 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1867 checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
1868 if (unlikely(r)) {
1869 if (likely(checksums != checksums_onstack))
1870 kfree(checksums);
1871 if (r > 0) {
1872 integrity_recheck(dio, checksums_onstack);
1873 goto skip_io;
1874 }
1875 goto error;
1876 }
1877
1878 if (!sectors_to_process)
1879 break;
1880
1881 if (unlikely(pos < bv_copy.bv_len)) {
1882 bv_copy.bv_offset += pos;
1883 bv_copy.bv_len -= pos;
1884 goto again;
1885 }
1886 }
1887
1888 if (likely(checksums != checksums_onstack))
1889 kfree(checksums);
1890 } else {
1891 struct bio_integrity_payload *bip = dio->bio_details.bi_integrity;
1892
1893 if (bip) {
1894 struct bio_vec biv;
1895 struct bvec_iter iter;
1896 unsigned int data_to_process = dio->range.n_sectors;
1897
1898 sector_to_block(ic, data_to_process);
1899 data_to_process *= ic->tag_size;
1900
1901 bip_for_each_vec(biv, bip, iter) {
1902 unsigned char *tag;
1903 unsigned int this_len;
1904
1905 BUG_ON(PageHighMem(biv.bv_page));
1906 tag = bvec_virt(&biv);
1907 this_len = min(biv.bv_len, data_to_process);
1908 r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset,
1909 this_len, dio->op == REQ_OP_READ ? TAG_READ : TAG_WRITE);
1910 if (unlikely(r))
1911 goto error;
1912 data_to_process -= this_len;
1913 if (!data_to_process)
1914 break;
1915 }
1916 }
1917 }
1918 skip_io:
1919 dec_in_flight(dio);
1920 return;
1921 error:
1922 dio->bi_status = errno_to_blk_status(r);
1923 dec_in_flight(dio);
1924 }
1925
dm_integrity_map(struct dm_target * ti,struct bio * bio)1926 static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
1927 {
1928 struct dm_integrity_c *ic = ti->private;
1929 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1930 struct bio_integrity_payload *bip;
1931
1932 sector_t area, offset;
1933
1934 dio->ic = ic;
1935 dio->bi_status = 0;
1936 dio->op = bio_op(bio);
1937
1938 if (unlikely(dio->op == REQ_OP_DISCARD)) {
1939 if (ti->max_io_len) {
1940 sector_t sec = dm_target_offset(ti, bio->bi_iter.bi_sector);
1941 unsigned int log2_max_io_len = __fls(ti->max_io_len);
1942 sector_t start_boundary = sec >> log2_max_io_len;
1943 sector_t end_boundary = (sec + bio_sectors(bio) - 1) >> log2_max_io_len;
1944
1945 if (start_boundary < end_boundary) {
1946 sector_t len = ti->max_io_len - (sec & (ti->max_io_len - 1));
1947
1948 dm_accept_partial_bio(bio, len);
1949 }
1950 }
1951 }
1952
1953 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1954 submit_flush_bio(ic, dio);
1955 return DM_MAPIO_SUBMITTED;
1956 }
1957
1958 dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1959 dio->fua = dio->op == REQ_OP_WRITE && bio->bi_opf & REQ_FUA;
1960 if (unlikely(dio->fua)) {
1961 /*
1962 * Don't pass down the FUA flag because we have to flush
1963 * disk cache anyway.
1964 */
1965 bio->bi_opf &= ~REQ_FUA;
1966 }
1967 if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) {
1968 DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx",
1969 dio->range.logical_sector, bio_sectors(bio),
1970 ic->provided_data_sectors);
1971 return DM_MAPIO_KILL;
1972 }
1973 if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned int)(ic->sectors_per_block - 1))) {
1974 DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x",
1975 ic->sectors_per_block,
1976 dio->range.logical_sector, bio_sectors(bio));
1977 return DM_MAPIO_KILL;
1978 }
1979
1980 if (ic->sectors_per_block > 1 && likely(dio->op != REQ_OP_DISCARD)) {
1981 struct bvec_iter iter;
1982 struct bio_vec bv;
1983
1984 bio_for_each_segment(bv, bio, iter) {
1985 if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
1986 DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
1987 bv.bv_offset, bv.bv_len, ic->sectors_per_block);
1988 return DM_MAPIO_KILL;
1989 }
1990 }
1991 }
1992
1993 bip = bio_integrity(bio);
1994 if (!ic->internal_hash) {
1995 if (bip) {
1996 unsigned int wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block;
1997
1998 if (ic->log2_tag_size >= 0)
1999 wanted_tag_size <<= ic->log2_tag_size;
2000 else
2001 wanted_tag_size *= ic->tag_size;
2002 if (unlikely(wanted_tag_size != bip->bip_iter.bi_size)) {
2003 DMERR("Invalid integrity data size %u, expected %u",
2004 bip->bip_iter.bi_size, wanted_tag_size);
2005 return DM_MAPIO_KILL;
2006 }
2007 }
2008 } else {
2009 if (unlikely(bip != NULL)) {
2010 DMERR("Unexpected integrity data when using internal hash");
2011 return DM_MAPIO_KILL;
2012 }
2013 }
2014
2015 if (unlikely(ic->mode == 'R') && unlikely(dio->op != REQ_OP_READ))
2016 return DM_MAPIO_KILL;
2017
2018 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
2019 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
2020 bio->bi_iter.bi_sector = get_data_sector(ic, area, offset);
2021
2022 dm_integrity_map_continue(dio, true);
2023 return DM_MAPIO_SUBMITTED;
2024 }
2025
__journal_read_write(struct dm_integrity_io * dio,struct bio * bio,unsigned int journal_section,unsigned int journal_entry)2026 static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
2027 unsigned int journal_section, unsigned int journal_entry)
2028 {
2029 struct dm_integrity_c *ic = dio->ic;
2030 sector_t logical_sector;
2031 unsigned int n_sectors;
2032
2033 logical_sector = dio->range.logical_sector;
2034 n_sectors = dio->range.n_sectors;
2035 do {
2036 struct bio_vec bv = bio_iovec(bio);
2037 char *mem;
2038
2039 if (unlikely(bv.bv_len >> SECTOR_SHIFT > n_sectors))
2040 bv.bv_len = n_sectors << SECTOR_SHIFT;
2041 n_sectors -= bv.bv_len >> SECTOR_SHIFT;
2042 bio_advance_iter(bio, &bio->bi_iter, bv.bv_len);
2043 retry_kmap:
2044 mem = kmap_local_page(bv.bv_page);
2045 if (likely(dio->op == REQ_OP_WRITE))
2046 flush_dcache_page(bv.bv_page);
2047
2048 do {
2049 struct journal_entry *je = access_journal_entry(ic, journal_section, journal_entry);
2050
2051 if (unlikely(dio->op == REQ_OP_READ)) {
2052 struct journal_sector *js;
2053 char *mem_ptr;
2054 unsigned int s;
2055
2056 if (unlikely(journal_entry_is_inprogress(je))) {
2057 flush_dcache_page(bv.bv_page);
2058 kunmap_local(mem);
2059
2060 __io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
2061 goto retry_kmap;
2062 }
2063 smp_rmb();
2064 BUG_ON(journal_entry_get_sector(je) != logical_sector);
2065 js = access_journal_data(ic, journal_section, journal_entry);
2066 mem_ptr = mem + bv.bv_offset;
2067 s = 0;
2068 do {
2069 memcpy(mem_ptr, js, JOURNAL_SECTOR_DATA);
2070 *(commit_id_t *)(mem_ptr + JOURNAL_SECTOR_DATA) = je->last_bytes[s];
2071 js++;
2072 mem_ptr += 1 << SECTOR_SHIFT;
2073 } while (++s < ic->sectors_per_block);
2074 #ifdef INTERNAL_VERIFY
2075 if (ic->internal_hash) {
2076 char checksums_onstack[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
2077
2078 integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
2079 if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
2080 DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx",
2081 logical_sector);
2082 dm_audit_log_bio(DM_MSG_PREFIX, "journal-checksum",
2083 bio, logical_sector, 0);
2084 }
2085 }
2086 #endif
2087 }
2088
2089 if (!ic->internal_hash) {
2090 struct bio_integrity_payload *bip = bio_integrity(bio);
2091 unsigned int tag_todo = ic->tag_size;
2092 char *tag_ptr = journal_entry_tag(ic, je);
2093
2094 if (bip) {
2095 do {
2096 struct bio_vec biv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
2097 unsigned int tag_now = min(biv.bv_len, tag_todo);
2098 char *tag_addr;
2099
2100 BUG_ON(PageHighMem(biv.bv_page));
2101 tag_addr = bvec_virt(&biv);
2102 if (likely(dio->op == REQ_OP_WRITE))
2103 memcpy(tag_ptr, tag_addr, tag_now);
2104 else
2105 memcpy(tag_addr, tag_ptr, tag_now);
2106 bvec_iter_advance(bip->bip_vec, &bip->bip_iter, tag_now);
2107 tag_ptr += tag_now;
2108 tag_todo -= tag_now;
2109 } while (unlikely(tag_todo));
2110 } else if (likely(dio->op == REQ_OP_WRITE))
2111 memset(tag_ptr, 0, tag_todo);
2112 }
2113
2114 if (likely(dio->op == REQ_OP_WRITE)) {
2115 struct journal_sector *js;
2116 unsigned int s;
2117
2118 js = access_journal_data(ic, journal_section, journal_entry);
2119 memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT);
2120
2121 s = 0;
2122 do {
2123 je->last_bytes[s] = js[s].commit_id;
2124 } while (++s < ic->sectors_per_block);
2125
2126 if (ic->internal_hash) {
2127 unsigned int digest_size = crypto_shash_digestsize(ic->internal_hash);
2128
2129 if (unlikely(digest_size > ic->tag_size)) {
2130 char checksums_onstack[HASH_MAX_DIGESTSIZE];
2131
2132 integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
2133 memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size);
2134 } else
2135 integrity_sector_checksum(ic, logical_sector, (char *)js, journal_entry_tag(ic, je));
2136 }
2137
2138 journal_entry_set_sector(je, logical_sector);
2139 }
2140 logical_sector += ic->sectors_per_block;
2141
2142 journal_entry++;
2143 if (unlikely(journal_entry == ic->journal_section_entries)) {
2144 journal_entry = 0;
2145 journal_section++;
2146 wraparound_section(ic, &journal_section);
2147 }
2148
2149 bv.bv_offset += ic->sectors_per_block << SECTOR_SHIFT;
2150 } while (bv.bv_len -= ic->sectors_per_block << SECTOR_SHIFT);
2151
2152 if (unlikely(dio->op == REQ_OP_READ))
2153 flush_dcache_page(bv.bv_page);
2154 kunmap_local(mem);
2155 } while (n_sectors);
2156
2157 if (likely(dio->op == REQ_OP_WRITE)) {
2158 smp_mb();
2159 if (unlikely(waitqueue_active(&ic->copy_to_journal_wait)))
2160 wake_up(&ic->copy_to_journal_wait);
2161 if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
2162 queue_work(ic->commit_wq, &ic->commit_work);
2163 else
2164 schedule_autocommit(ic);
2165 } else
2166 remove_range(ic, &dio->range);
2167
2168 if (unlikely(bio->bi_iter.bi_size)) {
2169 sector_t area, offset;
2170
2171 dio->range.logical_sector = logical_sector;
2172 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
2173 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
2174 return true;
2175 }
2176
2177 return false;
2178 }
2179
dm_integrity_map_continue(struct dm_integrity_io * dio,bool from_map)2180 static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map)
2181 {
2182 struct dm_integrity_c *ic = dio->ic;
2183 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
2184 unsigned int journal_section, journal_entry;
2185 unsigned int journal_read_pos;
2186 struct completion read_comp;
2187 bool discard_retried = false;
2188 bool need_sync_io = ic->internal_hash && dio->op == REQ_OP_READ;
2189
2190 if (unlikely(dio->op == REQ_OP_DISCARD) && ic->mode != 'D')
2191 need_sync_io = true;
2192
2193 if (need_sync_io && from_map) {
2194 INIT_WORK(&dio->work, integrity_bio_wait);
2195 queue_work(ic->offload_wq, &dio->work);
2196 return;
2197 }
2198
2199 lock_retry:
2200 spin_lock_irq(&ic->endio_wait.lock);
2201 retry:
2202 if (unlikely(dm_integrity_failed(ic))) {
2203 spin_unlock_irq(&ic->endio_wait.lock);
2204 do_endio(ic, bio);
2205 return;
2206 }
2207 dio->range.n_sectors = bio_sectors(bio);
2208 journal_read_pos = NOT_FOUND;
2209 if (ic->mode == 'J' && likely(dio->op != REQ_OP_DISCARD)) {
2210 if (dio->op == REQ_OP_WRITE) {
2211 unsigned int next_entry, i, pos;
2212 unsigned int ws, we, range_sectors;
2213
2214 dio->range.n_sectors = min(dio->range.n_sectors,
2215 (sector_t)ic->free_sectors << ic->sb->log2_sectors_per_block);
2216 if (unlikely(!dio->range.n_sectors)) {
2217 if (from_map)
2218 goto offload_to_thread;
2219 sleep_on_endio_wait(ic);
2220 goto retry;
2221 }
2222 range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block;
2223 ic->free_sectors -= range_sectors;
2224 journal_section = ic->free_section;
2225 journal_entry = ic->free_section_entry;
2226
2227 next_entry = ic->free_section_entry + range_sectors;
2228 ic->free_section_entry = next_entry % ic->journal_section_entries;
2229 ic->free_section += next_entry / ic->journal_section_entries;
2230 ic->n_uncommitted_sections += next_entry / ic->journal_section_entries;
2231 wraparound_section(ic, &ic->free_section);
2232
2233 pos = journal_section * ic->journal_section_entries + journal_entry;
2234 ws = journal_section;
2235 we = journal_entry;
2236 i = 0;
2237 do {
2238 struct journal_entry *je;
2239
2240 add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i);
2241 pos++;
2242 if (unlikely(pos >= ic->journal_entries))
2243 pos = 0;
2244
2245 je = access_journal_entry(ic, ws, we);
2246 BUG_ON(!journal_entry_is_unused(je));
2247 journal_entry_set_inprogress(je);
2248 we++;
2249 if (unlikely(we == ic->journal_section_entries)) {
2250 we = 0;
2251 ws++;
2252 wraparound_section(ic, &ws);
2253 }
2254 } while ((i += ic->sectors_per_block) < dio->range.n_sectors);
2255
2256 spin_unlock_irq(&ic->endio_wait.lock);
2257 goto journal_read_write;
2258 } else {
2259 sector_t next_sector;
2260
2261 journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2262 if (likely(journal_read_pos == NOT_FOUND)) {
2263 if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector))
2264 dio->range.n_sectors = next_sector - dio->range.logical_sector;
2265 } else {
2266 unsigned int i;
2267 unsigned int jp = journal_read_pos + 1;
2268
2269 for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) {
2270 if (!test_journal_node(ic, jp, dio->range.logical_sector + i))
2271 break;
2272 }
2273 dio->range.n_sectors = i;
2274 }
2275 }
2276 }
2277 if (unlikely(!add_new_range(ic, &dio->range, true))) {
2278 /*
2279 * We must not sleep in the request routine because it could
2280 * stall bios on current->bio_list.
2281 * So, we offload the bio to a workqueue if we have to sleep.
2282 */
2283 if (from_map) {
2284 offload_to_thread:
2285 spin_unlock_irq(&ic->endio_wait.lock);
2286 INIT_WORK(&dio->work, integrity_bio_wait);
2287 queue_work(ic->wait_wq, &dio->work);
2288 return;
2289 }
2290 if (journal_read_pos != NOT_FOUND)
2291 dio->range.n_sectors = ic->sectors_per_block;
2292 wait_and_add_new_range(ic, &dio->range);
2293 /*
2294 * wait_and_add_new_range drops the spinlock, so the journal
2295 * may have been changed arbitrarily. We need to recheck.
2296 * To simplify the code, we restrict I/O size to just one block.
2297 */
2298 if (journal_read_pos != NOT_FOUND) {
2299 sector_t next_sector;
2300 unsigned int new_pos;
2301
2302 new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2303 if (unlikely(new_pos != journal_read_pos)) {
2304 remove_range_unlocked(ic, &dio->range);
2305 goto retry;
2306 }
2307 }
2308 }
2309 if (ic->mode == 'J' && likely(dio->op == REQ_OP_DISCARD) && !discard_retried) {
2310 sector_t next_sector;
2311 unsigned int new_pos;
2312
2313 new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2314 if (unlikely(new_pos != NOT_FOUND) ||
2315 unlikely(next_sector < dio->range.logical_sector - dio->range.n_sectors)) {
2316 remove_range_unlocked(ic, &dio->range);
2317 spin_unlock_irq(&ic->endio_wait.lock);
2318 queue_work(ic->commit_wq, &ic->commit_work);
2319 flush_workqueue(ic->commit_wq);
2320 queue_work(ic->writer_wq, &ic->writer_work);
2321 flush_workqueue(ic->writer_wq);
2322 discard_retried = true;
2323 goto lock_retry;
2324 }
2325 }
2326 spin_unlock_irq(&ic->endio_wait.lock);
2327
2328 if (unlikely(journal_read_pos != NOT_FOUND)) {
2329 journal_section = journal_read_pos / ic->journal_section_entries;
2330 journal_entry = journal_read_pos % ic->journal_section_entries;
2331 goto journal_read_write;
2332 }
2333
2334 if (ic->mode == 'B' && (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))) {
2335 if (!block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2336 dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
2337 struct bitmap_block_status *bbs;
2338
2339 bbs = sector_to_bitmap_block(ic, dio->range.logical_sector);
2340 spin_lock(&bbs->bio_queue_lock);
2341 bio_list_add(&bbs->bio_queue, bio);
2342 spin_unlock(&bbs->bio_queue_lock);
2343 queue_work(ic->writer_wq, &bbs->work);
2344 return;
2345 }
2346 }
2347
2348 dio->in_flight = (atomic_t)ATOMIC_INIT(2);
2349
2350 if (need_sync_io) {
2351 init_completion(&read_comp);
2352 dio->completion = &read_comp;
2353 } else
2354 dio->completion = NULL;
2355
2356 dm_bio_record(&dio->bio_details, bio);
2357 bio_set_dev(bio, ic->dev->bdev);
2358 bio->bi_integrity = NULL;
2359 bio->bi_opf &= ~REQ_INTEGRITY;
2360 bio->bi_end_io = integrity_end_io;
2361 bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
2362
2363 if (unlikely(dio->op == REQ_OP_DISCARD) && likely(ic->mode != 'D')) {
2364 integrity_metadata(&dio->work);
2365 dm_integrity_flush_buffers(ic, false);
2366
2367 dio->in_flight = (atomic_t)ATOMIC_INIT(1);
2368 dio->completion = NULL;
2369
2370 submit_bio_noacct(bio);
2371
2372 return;
2373 }
2374
2375 submit_bio_noacct(bio);
2376
2377 if (need_sync_io) {
2378 wait_for_completion_io(&read_comp);
2379 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
2380 dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector))
2381 goto skip_check;
2382 if (ic->mode == 'B') {
2383 if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector,
2384 dio->range.n_sectors, BITMAP_OP_TEST_ALL_CLEAR))
2385 goto skip_check;
2386 }
2387
2388 if (likely(!bio->bi_status))
2389 integrity_metadata(&dio->work);
2390 else
2391 skip_check:
2392 dec_in_flight(dio);
2393 } else {
2394 INIT_WORK(&dio->work, integrity_metadata);
2395 queue_work(ic->metadata_wq, &dio->work);
2396 }
2397
2398 return;
2399
2400 journal_read_write:
2401 if (unlikely(__journal_read_write(dio, bio, journal_section, journal_entry)))
2402 goto lock_retry;
2403
2404 do_endio_flush(ic, dio);
2405 }
2406
2407
integrity_bio_wait(struct work_struct * w)2408 static void integrity_bio_wait(struct work_struct *w)
2409 {
2410 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
2411
2412 dm_integrity_map_continue(dio, false);
2413 }
2414
pad_uncommitted(struct dm_integrity_c * ic)2415 static void pad_uncommitted(struct dm_integrity_c *ic)
2416 {
2417 if (ic->free_section_entry) {
2418 ic->free_sectors -= ic->journal_section_entries - ic->free_section_entry;
2419 ic->free_section_entry = 0;
2420 ic->free_section++;
2421 wraparound_section(ic, &ic->free_section);
2422 ic->n_uncommitted_sections++;
2423 }
2424 if (WARN_ON(ic->journal_sections * ic->journal_section_entries !=
2425 (ic->n_uncommitted_sections + ic->n_committed_sections) *
2426 ic->journal_section_entries + ic->free_sectors)) {
2427 DMCRIT("journal_sections %u, journal_section_entries %u, "
2428 "n_uncommitted_sections %u, n_committed_sections %u, "
2429 "journal_section_entries %u, free_sectors %u",
2430 ic->journal_sections, ic->journal_section_entries,
2431 ic->n_uncommitted_sections, ic->n_committed_sections,
2432 ic->journal_section_entries, ic->free_sectors);
2433 }
2434 }
2435
integrity_commit(struct work_struct * w)2436 static void integrity_commit(struct work_struct *w)
2437 {
2438 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work);
2439 unsigned int commit_start, commit_sections;
2440 unsigned int i, j, n;
2441 struct bio *flushes;
2442
2443 del_timer(&ic->autocommit_timer);
2444
2445 spin_lock_irq(&ic->endio_wait.lock);
2446 flushes = bio_list_get(&ic->flush_bio_list);
2447 if (unlikely(ic->mode != 'J')) {
2448 spin_unlock_irq(&ic->endio_wait.lock);
2449 dm_integrity_flush_buffers(ic, true);
2450 goto release_flush_bios;
2451 }
2452
2453 pad_uncommitted(ic);
2454 commit_start = ic->uncommitted_section;
2455 commit_sections = ic->n_uncommitted_sections;
2456 spin_unlock_irq(&ic->endio_wait.lock);
2457
2458 if (!commit_sections)
2459 goto release_flush_bios;
2460
2461 ic->wrote_to_journal = true;
2462
2463 i = commit_start;
2464 for (n = 0; n < commit_sections; n++) {
2465 for (j = 0; j < ic->journal_section_entries; j++) {
2466 struct journal_entry *je;
2467
2468 je = access_journal_entry(ic, i, j);
2469 io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
2470 }
2471 for (j = 0; j < ic->journal_section_sectors; j++) {
2472 struct journal_sector *js;
2473
2474 js = access_journal(ic, i, j);
2475 js->commit_id = dm_integrity_commit_id(ic, i, j, ic->commit_seq);
2476 }
2477 i++;
2478 if (unlikely(i >= ic->journal_sections))
2479 ic->commit_seq = next_commit_seq(ic->commit_seq);
2480 wraparound_section(ic, &i);
2481 }
2482 smp_rmb();
2483
2484 write_journal(ic, commit_start, commit_sections);
2485
2486 spin_lock_irq(&ic->endio_wait.lock);
2487 ic->uncommitted_section += commit_sections;
2488 wraparound_section(ic, &ic->uncommitted_section);
2489 ic->n_uncommitted_sections -= commit_sections;
2490 ic->n_committed_sections += commit_sections;
2491 spin_unlock_irq(&ic->endio_wait.lock);
2492
2493 if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
2494 queue_work(ic->writer_wq, &ic->writer_work);
2495
2496 release_flush_bios:
2497 while (flushes) {
2498 struct bio *next = flushes->bi_next;
2499
2500 flushes->bi_next = NULL;
2501 do_endio(ic, flushes);
2502 flushes = next;
2503 }
2504 }
2505
complete_copy_from_journal(unsigned long error,void * context)2506 static void complete_copy_from_journal(unsigned long error, void *context)
2507 {
2508 struct journal_io *io = context;
2509 struct journal_completion *comp = io->comp;
2510 struct dm_integrity_c *ic = comp->ic;
2511
2512 remove_range(ic, &io->range);
2513 mempool_free(io, &ic->journal_io_mempool);
2514 if (unlikely(error != 0))
2515 dm_integrity_io_error(ic, "copying from journal", -EIO);
2516 complete_journal_op(comp);
2517 }
2518
restore_last_bytes(struct dm_integrity_c * ic,struct journal_sector * js,struct journal_entry * je)2519 static void restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js,
2520 struct journal_entry *je)
2521 {
2522 unsigned int s = 0;
2523
2524 do {
2525 js->commit_id = je->last_bytes[s];
2526 js++;
2527 } while (++s < ic->sectors_per_block);
2528 }
2529
do_journal_write(struct dm_integrity_c * ic,unsigned int write_start,unsigned int write_sections,bool from_replay)2530 static void do_journal_write(struct dm_integrity_c *ic, unsigned int write_start,
2531 unsigned int write_sections, bool from_replay)
2532 {
2533 unsigned int i, j, n;
2534 struct journal_completion comp;
2535 struct blk_plug plug;
2536
2537 blk_start_plug(&plug);
2538
2539 comp.ic = ic;
2540 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
2541 init_completion(&comp.comp);
2542
2543 i = write_start;
2544 for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) {
2545 #ifndef INTERNAL_VERIFY
2546 if (unlikely(from_replay))
2547 #endif
2548 rw_section_mac(ic, i, false);
2549 for (j = 0; j < ic->journal_section_entries; j++) {
2550 struct journal_entry *je = access_journal_entry(ic, i, j);
2551 sector_t sec, area, offset;
2552 unsigned int k, l, next_loop;
2553 sector_t metadata_block;
2554 unsigned int metadata_offset;
2555 struct journal_io *io;
2556
2557 if (journal_entry_is_unused(je))
2558 continue;
2559 BUG_ON(unlikely(journal_entry_is_inprogress(je)) && !from_replay);
2560 sec = journal_entry_get_sector(je);
2561 if (unlikely(from_replay)) {
2562 if (unlikely(sec & (unsigned int)(ic->sectors_per_block - 1))) {
2563 dm_integrity_io_error(ic, "invalid sector in journal", -EIO);
2564 sec &= ~(sector_t)(ic->sectors_per_block - 1);
2565 }
2566 if (unlikely(sec >= ic->provided_data_sectors)) {
2567 journal_entry_set_unused(je);
2568 continue;
2569 }
2570 }
2571 get_area_and_offset(ic, sec, &area, &offset);
2572 restore_last_bytes(ic, access_journal_data(ic, i, j), je);
2573 for (k = j + 1; k < ic->journal_section_entries; k++) {
2574 struct journal_entry *je2 = access_journal_entry(ic, i, k);
2575 sector_t sec2, area2, offset2;
2576
2577 if (journal_entry_is_unused(je2))
2578 break;
2579 BUG_ON(unlikely(journal_entry_is_inprogress(je2)) && !from_replay);
2580 sec2 = journal_entry_get_sector(je2);
2581 if (unlikely(sec2 >= ic->provided_data_sectors))
2582 break;
2583 get_area_and_offset(ic, sec2, &area2, &offset2);
2584 if (area2 != area || offset2 != offset + ((k - j) << ic->sb->log2_sectors_per_block))
2585 break;
2586 restore_last_bytes(ic, access_journal_data(ic, i, k), je2);
2587 }
2588 next_loop = k - 1;
2589
2590 io = mempool_alloc(&ic->journal_io_mempool, GFP_NOIO);
2591 io->comp = ∁
2592 io->range.logical_sector = sec;
2593 io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block;
2594
2595 spin_lock_irq(&ic->endio_wait.lock);
2596 add_new_range_and_wait(ic, &io->range);
2597
2598 if (likely(!from_replay)) {
2599 struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries];
2600
2601 /* don't write if there is newer committed sector */
2602 while (j < k && find_newer_committed_node(ic, §ion_node[j])) {
2603 struct journal_entry *je2 = access_journal_entry(ic, i, j);
2604
2605 journal_entry_set_unused(je2);
2606 remove_journal_node(ic, §ion_node[j]);
2607 j++;
2608 sec += ic->sectors_per_block;
2609 offset += ic->sectors_per_block;
2610 }
2611 while (j < k && find_newer_committed_node(ic, §ion_node[k - 1])) {
2612 struct journal_entry *je2 = access_journal_entry(ic, i, k - 1);
2613
2614 journal_entry_set_unused(je2);
2615 remove_journal_node(ic, §ion_node[k - 1]);
2616 k--;
2617 }
2618 if (j == k) {
2619 remove_range_unlocked(ic, &io->range);
2620 spin_unlock_irq(&ic->endio_wait.lock);
2621 mempool_free(io, &ic->journal_io_mempool);
2622 goto skip_io;
2623 }
2624 for (l = j; l < k; l++)
2625 remove_journal_node(ic, §ion_node[l]);
2626 }
2627 spin_unlock_irq(&ic->endio_wait.lock);
2628
2629 metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2630 for (l = j; l < k; l++) {
2631 int r;
2632 struct journal_entry *je2 = access_journal_entry(ic, i, l);
2633
2634 if (
2635 #ifndef INTERNAL_VERIFY
2636 unlikely(from_replay) &&
2637 #endif
2638 ic->internal_hash) {
2639 char test_tag[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
2640
2641 integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
2642 (char *)access_journal_data(ic, i, l), test_tag);
2643 if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size))) {
2644 dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ);
2645 dm_audit_log_target(DM_MSG_PREFIX, "integrity-replay-journal", ic->ti, 0);
2646 }
2647 }
2648
2649 journal_entry_set_unused(je2);
2650 r = dm_integrity_rw_tag(ic, journal_entry_tag(ic, je2), &metadata_block, &metadata_offset,
2651 ic->tag_size, TAG_WRITE);
2652 if (unlikely(r))
2653 dm_integrity_io_error(ic, "reading tags", r);
2654 }
2655
2656 atomic_inc(&comp.in_flight);
2657 copy_from_journal(ic, i, j << ic->sb->log2_sectors_per_block,
2658 (k - j) << ic->sb->log2_sectors_per_block,
2659 get_data_sector(ic, area, offset),
2660 complete_copy_from_journal, io);
2661 skip_io:
2662 j = next_loop;
2663 }
2664 }
2665
2666 dm_bufio_write_dirty_buffers_async(ic->bufio);
2667
2668 blk_finish_plug(&plug);
2669
2670 complete_journal_op(&comp);
2671 wait_for_completion_io(&comp.comp);
2672
2673 dm_integrity_flush_buffers(ic, true);
2674 }
2675
integrity_writer(struct work_struct * w)2676 static void integrity_writer(struct work_struct *w)
2677 {
2678 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work);
2679 unsigned int write_start, write_sections;
2680 unsigned int prev_free_sectors;
2681
2682 spin_lock_irq(&ic->endio_wait.lock);
2683 write_start = ic->committed_section;
2684 write_sections = ic->n_committed_sections;
2685 spin_unlock_irq(&ic->endio_wait.lock);
2686
2687 if (!write_sections)
2688 return;
2689
2690 do_journal_write(ic, write_start, write_sections, false);
2691
2692 spin_lock_irq(&ic->endio_wait.lock);
2693
2694 ic->committed_section += write_sections;
2695 wraparound_section(ic, &ic->committed_section);
2696 ic->n_committed_sections -= write_sections;
2697
2698 prev_free_sectors = ic->free_sectors;
2699 ic->free_sectors += write_sections * ic->journal_section_entries;
2700 if (unlikely(!prev_free_sectors))
2701 wake_up_locked(&ic->endio_wait);
2702
2703 spin_unlock_irq(&ic->endio_wait.lock);
2704 }
2705
recalc_write_super(struct dm_integrity_c * ic)2706 static void recalc_write_super(struct dm_integrity_c *ic)
2707 {
2708 int r;
2709
2710 dm_integrity_flush_buffers(ic, false);
2711 if (dm_integrity_failed(ic))
2712 return;
2713
2714 r = sync_rw_sb(ic, REQ_OP_WRITE);
2715 if (unlikely(r))
2716 dm_integrity_io_error(ic, "writing superblock", r);
2717 }
2718
integrity_recalc(struct work_struct * w)2719 static void integrity_recalc(struct work_struct *w)
2720 {
2721 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, recalc_work);
2722 size_t recalc_tags_size;
2723 u8 *recalc_buffer = NULL;
2724 u8 *recalc_tags = NULL;
2725 struct dm_integrity_range range;
2726 struct dm_io_request io_req;
2727 struct dm_io_region io_loc;
2728 sector_t area, offset;
2729 sector_t metadata_block;
2730 unsigned int metadata_offset;
2731 sector_t logical_sector, n_sectors;
2732 __u8 *t;
2733 unsigned int i;
2734 int r;
2735 unsigned int super_counter = 0;
2736 unsigned recalc_sectors = RECALC_SECTORS;
2737
2738 retry:
2739 recalc_buffer = __vmalloc(recalc_sectors << SECTOR_SHIFT, GFP_NOIO);
2740 if (!recalc_buffer) {
2741 oom:
2742 recalc_sectors >>= 1;
2743 if (recalc_sectors >= 1U << ic->sb->log2_sectors_per_block)
2744 goto retry;
2745 DMCRIT("out of memory for recalculate buffer - recalculation disabled");
2746 goto free_ret;
2747 }
2748 recalc_tags_size = (recalc_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
2749 if (crypto_shash_digestsize(ic->internal_hash) > ic->tag_size)
2750 recalc_tags_size += crypto_shash_digestsize(ic->internal_hash) - ic->tag_size;
2751 recalc_tags = kvmalloc(recalc_tags_size, GFP_NOIO);
2752 if (!recalc_tags) {
2753 vfree(recalc_buffer);
2754 recalc_buffer = NULL;
2755 goto oom;
2756 }
2757
2758 DEBUG_print("start recalculation... (position %llx)\n", le64_to_cpu(ic->sb->recalc_sector));
2759
2760 spin_lock_irq(&ic->endio_wait.lock);
2761
2762 next_chunk:
2763
2764 if (unlikely(dm_post_suspending(ic->ti)))
2765 goto unlock_ret;
2766
2767 range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
2768 if (unlikely(range.logical_sector >= ic->provided_data_sectors)) {
2769 if (ic->mode == 'B') {
2770 block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
2771 DEBUG_print("queue_delayed_work: bitmap_flush_work\n");
2772 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
2773 }
2774 goto unlock_ret;
2775 }
2776
2777 get_area_and_offset(ic, range.logical_sector, &area, &offset);
2778 range.n_sectors = min((sector_t)recalc_sectors, ic->provided_data_sectors - range.logical_sector);
2779 if (!ic->meta_dev)
2780 range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned int)offset);
2781
2782 add_new_range_and_wait(ic, &range);
2783 spin_unlock_irq(&ic->endio_wait.lock);
2784 logical_sector = range.logical_sector;
2785 n_sectors = range.n_sectors;
2786
2787 if (ic->mode == 'B') {
2788 if (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector, n_sectors, BITMAP_OP_TEST_ALL_CLEAR))
2789 goto advance_and_next;
2790
2791 while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector,
2792 ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
2793 logical_sector += ic->sectors_per_block;
2794 n_sectors -= ic->sectors_per_block;
2795 cond_resched();
2796 }
2797 while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector + n_sectors - ic->sectors_per_block,
2798 ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
2799 n_sectors -= ic->sectors_per_block;
2800 cond_resched();
2801 }
2802 get_area_and_offset(ic, logical_sector, &area, &offset);
2803 }
2804
2805 DEBUG_print("recalculating: %llx, %llx\n", logical_sector, n_sectors);
2806
2807 if (unlikely(++super_counter == RECALC_WRITE_SUPER)) {
2808 recalc_write_super(ic);
2809 if (ic->mode == 'B')
2810 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
2811
2812 super_counter = 0;
2813 }
2814
2815 if (unlikely(dm_integrity_failed(ic)))
2816 goto err;
2817
2818 io_req.bi_opf = REQ_OP_READ;
2819 io_req.mem.type = DM_IO_VMA;
2820 io_req.mem.ptr.addr = recalc_buffer;
2821 io_req.notify.fn = NULL;
2822 io_req.client = ic->io;
2823 io_loc.bdev = ic->dev->bdev;
2824 io_loc.sector = get_data_sector(ic, area, offset);
2825 io_loc.count = n_sectors;
2826
2827 r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
2828 if (unlikely(r)) {
2829 dm_integrity_io_error(ic, "reading data", r);
2830 goto err;
2831 }
2832
2833 t = recalc_tags;
2834 for (i = 0; i < n_sectors; i += ic->sectors_per_block) {
2835 integrity_sector_checksum(ic, logical_sector + i, recalc_buffer + (i << SECTOR_SHIFT), t);
2836 t += ic->tag_size;
2837 }
2838
2839 metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2840
2841 r = dm_integrity_rw_tag(ic, recalc_tags, &metadata_block, &metadata_offset, t - recalc_tags, TAG_WRITE);
2842 if (unlikely(r)) {
2843 dm_integrity_io_error(ic, "writing tags", r);
2844 goto err;
2845 }
2846
2847 if (ic->mode == 'B') {
2848 sector_t start, end;
2849
2850 start = (range.logical_sector >>
2851 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
2852 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2853 end = ((range.logical_sector + range.n_sectors) >>
2854 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
2855 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2856 block_bitmap_op(ic, ic->recalc_bitmap, start, end - start, BITMAP_OP_CLEAR);
2857 }
2858
2859 advance_and_next:
2860 cond_resched();
2861
2862 spin_lock_irq(&ic->endio_wait.lock);
2863 remove_range_unlocked(ic, &range);
2864 ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors);
2865 goto next_chunk;
2866
2867 err:
2868 remove_range(ic, &range);
2869 goto free_ret;
2870
2871 unlock_ret:
2872 spin_unlock_irq(&ic->endio_wait.lock);
2873
2874 recalc_write_super(ic);
2875
2876 free_ret:
2877 vfree(recalc_buffer);
2878 kvfree(recalc_tags);
2879 }
2880
bitmap_block_work(struct work_struct * w)2881 static void bitmap_block_work(struct work_struct *w)
2882 {
2883 struct bitmap_block_status *bbs = container_of(w, struct bitmap_block_status, work);
2884 struct dm_integrity_c *ic = bbs->ic;
2885 struct bio *bio;
2886 struct bio_list bio_queue;
2887 struct bio_list waiting;
2888
2889 bio_list_init(&waiting);
2890
2891 spin_lock(&bbs->bio_queue_lock);
2892 bio_queue = bbs->bio_queue;
2893 bio_list_init(&bbs->bio_queue);
2894 spin_unlock(&bbs->bio_queue_lock);
2895
2896 while ((bio = bio_list_pop(&bio_queue))) {
2897 struct dm_integrity_io *dio;
2898
2899 dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
2900
2901 if (block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2902 dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
2903 remove_range(ic, &dio->range);
2904 INIT_WORK(&dio->work, integrity_bio_wait);
2905 queue_work(ic->offload_wq, &dio->work);
2906 } else {
2907 block_bitmap_op(ic, ic->journal, dio->range.logical_sector,
2908 dio->range.n_sectors, BITMAP_OP_SET);
2909 bio_list_add(&waiting, bio);
2910 }
2911 }
2912
2913 if (bio_list_empty(&waiting))
2914 return;
2915
2916 rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC,
2917 bbs->idx * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT),
2918 BITMAP_BLOCK_SIZE >> SECTOR_SHIFT, NULL);
2919
2920 while ((bio = bio_list_pop(&waiting))) {
2921 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
2922
2923 block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2924 dio->range.n_sectors, BITMAP_OP_SET);
2925
2926 remove_range(ic, &dio->range);
2927 INIT_WORK(&dio->work, integrity_bio_wait);
2928 queue_work(ic->offload_wq, &dio->work);
2929 }
2930
2931 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
2932 }
2933
bitmap_flush_work(struct work_struct * work)2934 static void bitmap_flush_work(struct work_struct *work)
2935 {
2936 struct dm_integrity_c *ic = container_of(work, struct dm_integrity_c, bitmap_flush_work.work);
2937 struct dm_integrity_range range;
2938 unsigned long limit;
2939 struct bio *bio;
2940
2941 dm_integrity_flush_buffers(ic, false);
2942
2943 range.logical_sector = 0;
2944 range.n_sectors = ic->provided_data_sectors;
2945
2946 spin_lock_irq(&ic->endio_wait.lock);
2947 add_new_range_and_wait(ic, &range);
2948 spin_unlock_irq(&ic->endio_wait.lock);
2949
2950 dm_integrity_flush_buffers(ic, true);
2951
2952 limit = ic->provided_data_sectors;
2953 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
2954 limit = le64_to_cpu(ic->sb->recalc_sector)
2955 >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)
2956 << (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2957 }
2958 /*DEBUG_print("zeroing journal\n");*/
2959 block_bitmap_op(ic, ic->journal, 0, limit, BITMAP_OP_CLEAR);
2960 block_bitmap_op(ic, ic->may_write_bitmap, 0, limit, BITMAP_OP_CLEAR);
2961
2962 rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0,
2963 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
2964
2965 spin_lock_irq(&ic->endio_wait.lock);
2966 remove_range_unlocked(ic, &range);
2967 while (unlikely((bio = bio_list_pop(&ic->synchronous_bios)) != NULL)) {
2968 bio_endio(bio);
2969 spin_unlock_irq(&ic->endio_wait.lock);
2970 spin_lock_irq(&ic->endio_wait.lock);
2971 }
2972 spin_unlock_irq(&ic->endio_wait.lock);
2973 }
2974
2975
init_journal(struct dm_integrity_c * ic,unsigned int start_section,unsigned int n_sections,unsigned char commit_seq)2976 static void init_journal(struct dm_integrity_c *ic, unsigned int start_section,
2977 unsigned int n_sections, unsigned char commit_seq)
2978 {
2979 unsigned int i, j, n;
2980
2981 if (!n_sections)
2982 return;
2983
2984 for (n = 0; n < n_sections; n++) {
2985 i = start_section + n;
2986 wraparound_section(ic, &i);
2987 for (j = 0; j < ic->journal_section_sectors; j++) {
2988 struct journal_sector *js = access_journal(ic, i, j);
2989
2990 BUILD_BUG_ON(sizeof(js->sectors) != JOURNAL_SECTOR_DATA);
2991 memset(&js->sectors, 0, sizeof(js->sectors));
2992 js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq);
2993 }
2994 for (j = 0; j < ic->journal_section_entries; j++) {
2995 struct journal_entry *je = access_journal_entry(ic, i, j);
2996
2997 journal_entry_set_unused(je);
2998 }
2999 }
3000
3001 write_journal(ic, start_section, n_sections);
3002 }
3003
find_commit_seq(struct dm_integrity_c * ic,unsigned int i,unsigned int j,commit_id_t id)3004 static int find_commit_seq(struct dm_integrity_c *ic, unsigned int i, unsigned int j, commit_id_t id)
3005 {
3006 unsigned char k;
3007
3008 for (k = 0; k < N_COMMIT_IDS; k++) {
3009 if (dm_integrity_commit_id(ic, i, j, k) == id)
3010 return k;
3011 }
3012 dm_integrity_io_error(ic, "journal commit id", -EIO);
3013 return -EIO;
3014 }
3015
replay_journal(struct dm_integrity_c * ic)3016 static void replay_journal(struct dm_integrity_c *ic)
3017 {
3018 unsigned int i, j;
3019 bool used_commit_ids[N_COMMIT_IDS];
3020 unsigned int max_commit_id_sections[N_COMMIT_IDS];
3021 unsigned int write_start, write_sections;
3022 unsigned int continue_section;
3023 bool journal_empty;
3024 unsigned char unused, last_used, want_commit_seq;
3025
3026 if (ic->mode == 'R')
3027 return;
3028
3029 if (ic->journal_uptodate)
3030 return;
3031
3032 last_used = 0;
3033 write_start = 0;
3034
3035 if (!ic->just_formatted) {
3036 DEBUG_print("reading journal\n");
3037 rw_journal(ic, REQ_OP_READ, 0, ic->journal_sections, NULL);
3038 if (ic->journal_io)
3039 DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal");
3040 if (ic->journal_io) {
3041 struct journal_completion crypt_comp;
3042
3043 crypt_comp.ic = ic;
3044 init_completion(&crypt_comp.comp);
3045 crypt_comp.in_flight = (atomic_t)ATOMIC_INIT(0);
3046 encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp);
3047 wait_for_completion(&crypt_comp.comp);
3048 }
3049 DEBUG_bytes(lowmem_page_address(ic->journal[0].page), 64, "decrypted journal");
3050 }
3051
3052 if (dm_integrity_failed(ic))
3053 goto clear_journal;
3054
3055 journal_empty = true;
3056 memset(used_commit_ids, 0, sizeof(used_commit_ids));
3057 memset(max_commit_id_sections, 0, sizeof(max_commit_id_sections));
3058 for (i = 0; i < ic->journal_sections; i++) {
3059 for (j = 0; j < ic->journal_section_sectors; j++) {
3060 int k;
3061 struct journal_sector *js = access_journal(ic, i, j);
3062
3063 k = find_commit_seq(ic, i, j, js->commit_id);
3064 if (k < 0)
3065 goto clear_journal;
3066 used_commit_ids[k] = true;
3067 max_commit_id_sections[k] = i;
3068 }
3069 if (journal_empty) {
3070 for (j = 0; j < ic->journal_section_entries; j++) {
3071 struct journal_entry *je = access_journal_entry(ic, i, j);
3072
3073 if (!journal_entry_is_unused(je)) {
3074 journal_empty = false;
3075 break;
3076 }
3077 }
3078 }
3079 }
3080
3081 if (!used_commit_ids[N_COMMIT_IDS - 1]) {
3082 unused = N_COMMIT_IDS - 1;
3083 while (unused && !used_commit_ids[unused - 1])
3084 unused--;
3085 } else {
3086 for (unused = 0; unused < N_COMMIT_IDS; unused++)
3087 if (!used_commit_ids[unused])
3088 break;
3089 if (unused == N_COMMIT_IDS) {
3090 dm_integrity_io_error(ic, "journal commit ids", -EIO);
3091 goto clear_journal;
3092 }
3093 }
3094 DEBUG_print("first unused commit seq %d [%d,%d,%d,%d]\n",
3095 unused, used_commit_ids[0], used_commit_ids[1],
3096 used_commit_ids[2], used_commit_ids[3]);
3097
3098 last_used = prev_commit_seq(unused);
3099 want_commit_seq = prev_commit_seq(last_used);
3100
3101 if (!used_commit_ids[want_commit_seq] && used_commit_ids[prev_commit_seq(want_commit_seq)])
3102 journal_empty = true;
3103
3104 write_start = max_commit_id_sections[last_used] + 1;
3105 if (unlikely(write_start >= ic->journal_sections))
3106 want_commit_seq = next_commit_seq(want_commit_seq);
3107 wraparound_section(ic, &write_start);
3108
3109 i = write_start;
3110 for (write_sections = 0; write_sections < ic->journal_sections; write_sections++) {
3111 for (j = 0; j < ic->journal_section_sectors; j++) {
3112 struct journal_sector *js = access_journal(ic, i, j);
3113
3114 if (js->commit_id != dm_integrity_commit_id(ic, i, j, want_commit_seq)) {
3115 /*
3116 * This could be caused by crash during writing.
3117 * We won't replay the inconsistent part of the
3118 * journal.
3119 */
3120 DEBUG_print("commit id mismatch at position (%u, %u): %d != %d\n",
3121 i, j, find_commit_seq(ic, i, j, js->commit_id), want_commit_seq);
3122 goto brk;
3123 }
3124 }
3125 i++;
3126 if (unlikely(i >= ic->journal_sections))
3127 want_commit_seq = next_commit_seq(want_commit_seq);
3128 wraparound_section(ic, &i);
3129 }
3130 brk:
3131
3132 if (!journal_empty) {
3133 DEBUG_print("replaying %u sections, starting at %u, commit seq %d\n",
3134 write_sections, write_start, want_commit_seq);
3135 do_journal_write(ic, write_start, write_sections, true);
3136 }
3137
3138 if (write_sections == ic->journal_sections && (ic->mode == 'J' || journal_empty)) {
3139 continue_section = write_start;
3140 ic->commit_seq = want_commit_seq;
3141 DEBUG_print("continuing from section %u, commit seq %d\n", write_start, ic->commit_seq);
3142 } else {
3143 unsigned int s;
3144 unsigned char erase_seq;
3145
3146 clear_journal:
3147 DEBUG_print("clearing journal\n");
3148
3149 erase_seq = prev_commit_seq(prev_commit_seq(last_used));
3150 s = write_start;
3151 init_journal(ic, s, 1, erase_seq);
3152 s++;
3153 wraparound_section(ic, &s);
3154 if (ic->journal_sections >= 2) {
3155 init_journal(ic, s, ic->journal_sections - 2, erase_seq);
3156 s += ic->journal_sections - 2;
3157 wraparound_section(ic, &s);
3158 init_journal(ic, s, 1, erase_seq);
3159 }
3160
3161 continue_section = 0;
3162 ic->commit_seq = next_commit_seq(erase_seq);
3163 }
3164
3165 ic->committed_section = continue_section;
3166 ic->n_committed_sections = 0;
3167
3168 ic->uncommitted_section = continue_section;
3169 ic->n_uncommitted_sections = 0;
3170
3171 ic->free_section = continue_section;
3172 ic->free_section_entry = 0;
3173 ic->free_sectors = ic->journal_entries;
3174
3175 ic->journal_tree_root = RB_ROOT;
3176 for (i = 0; i < ic->journal_entries; i++)
3177 init_journal_node(&ic->journal_tree[i]);
3178 }
3179
dm_integrity_enter_synchronous_mode(struct dm_integrity_c * ic)3180 static void dm_integrity_enter_synchronous_mode(struct dm_integrity_c *ic)
3181 {
3182 DEBUG_print("%s\n", __func__);
3183
3184 if (ic->mode == 'B') {
3185 ic->bitmap_flush_interval = msecs_to_jiffies(10) + 1;
3186 ic->synchronous_mode = 1;
3187
3188 cancel_delayed_work_sync(&ic->bitmap_flush_work);
3189 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
3190 flush_workqueue(ic->commit_wq);
3191 }
3192 }
3193
dm_integrity_reboot(struct notifier_block * n,unsigned long code,void * x)3194 static int dm_integrity_reboot(struct notifier_block *n, unsigned long code, void *x)
3195 {
3196 struct dm_integrity_c *ic = container_of(n, struct dm_integrity_c, reboot_notifier);
3197
3198 DEBUG_print("%s\n", __func__);
3199
3200 dm_integrity_enter_synchronous_mode(ic);
3201
3202 return NOTIFY_DONE;
3203 }
3204
dm_integrity_postsuspend(struct dm_target * ti)3205 static void dm_integrity_postsuspend(struct dm_target *ti)
3206 {
3207 struct dm_integrity_c *ic = ti->private;
3208 int r;
3209
3210 WARN_ON(unregister_reboot_notifier(&ic->reboot_notifier));
3211
3212 del_timer_sync(&ic->autocommit_timer);
3213
3214 if (ic->recalc_wq)
3215 drain_workqueue(ic->recalc_wq);
3216
3217 if (ic->mode == 'B')
3218 cancel_delayed_work_sync(&ic->bitmap_flush_work);
3219
3220 queue_work(ic->commit_wq, &ic->commit_work);
3221 drain_workqueue(ic->commit_wq);
3222
3223 if (ic->mode == 'J') {
3224 queue_work(ic->writer_wq, &ic->writer_work);
3225 drain_workqueue(ic->writer_wq);
3226 dm_integrity_flush_buffers(ic, true);
3227 if (ic->wrote_to_journal) {
3228 init_journal(ic, ic->free_section,
3229 ic->journal_sections - ic->free_section, ic->commit_seq);
3230 if (ic->free_section) {
3231 init_journal(ic, 0, ic->free_section,
3232 next_commit_seq(ic->commit_seq));
3233 }
3234 }
3235 }
3236
3237 if (ic->mode == 'B') {
3238 dm_integrity_flush_buffers(ic, true);
3239 #if 1
3240 /* set to 0 to test bitmap replay code */
3241 init_journal(ic, 0, ic->journal_sections, 0);
3242 ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3243 r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA);
3244 if (unlikely(r))
3245 dm_integrity_io_error(ic, "writing superblock", r);
3246 #endif
3247 }
3248
3249 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
3250
3251 ic->journal_uptodate = true;
3252 }
3253
dm_integrity_resume(struct dm_target * ti)3254 static void dm_integrity_resume(struct dm_target *ti)
3255 {
3256 struct dm_integrity_c *ic = ti->private;
3257 __u64 old_provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors);
3258 int r;
3259
3260 DEBUG_print("resume\n");
3261
3262 ic->wrote_to_journal = false;
3263
3264 if (ic->provided_data_sectors != old_provided_data_sectors) {
3265 if (ic->provided_data_sectors > old_provided_data_sectors &&
3266 ic->mode == 'B' &&
3267 ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) {
3268 rw_journal_sectors(ic, REQ_OP_READ, 0,
3269 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3270 block_bitmap_op(ic, ic->journal, old_provided_data_sectors,
3271 ic->provided_data_sectors - old_provided_data_sectors, BITMAP_OP_SET);
3272 rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0,
3273 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3274 }
3275
3276 ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
3277 r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA);
3278 if (unlikely(r))
3279 dm_integrity_io_error(ic, "writing superblock", r);
3280 }
3281
3282 if (ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) {
3283 DEBUG_print("resume dirty_bitmap\n");
3284 rw_journal_sectors(ic, REQ_OP_READ, 0,
3285 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3286 if (ic->mode == 'B') {
3287 if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit &&
3288 !ic->reset_recalculate_flag) {
3289 block_bitmap_copy(ic, ic->recalc_bitmap, ic->journal);
3290 block_bitmap_copy(ic, ic->may_write_bitmap, ic->journal);
3291 if (!block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors,
3292 BITMAP_OP_TEST_ALL_CLEAR)) {
3293 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3294 ic->sb->recalc_sector = cpu_to_le64(0);
3295 }
3296 } else {
3297 DEBUG_print("non-matching blocks_per_bitmap_bit: %u, %u\n",
3298 ic->sb->log2_blocks_per_bitmap_bit, ic->log2_blocks_per_bitmap_bit);
3299 ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
3300 block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3301 block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3302 block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3303 rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0,
3304 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3305 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3306 ic->sb->recalc_sector = cpu_to_le64(0);
3307 }
3308 } else {
3309 if (!(ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit &&
3310 block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_TEST_ALL_CLEAR)) ||
3311 ic->reset_recalculate_flag) {
3312 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3313 ic->sb->recalc_sector = cpu_to_le64(0);
3314 }
3315 init_journal(ic, 0, ic->journal_sections, 0);
3316 replay_journal(ic);
3317 ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3318 }
3319 r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA);
3320 if (unlikely(r))
3321 dm_integrity_io_error(ic, "writing superblock", r);
3322 } else {
3323 replay_journal(ic);
3324 if (ic->reset_recalculate_flag) {
3325 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3326 ic->sb->recalc_sector = cpu_to_le64(0);
3327 }
3328 if (ic->mode == 'B') {
3329 ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3330 ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
3331 r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA);
3332 if (unlikely(r))
3333 dm_integrity_io_error(ic, "writing superblock", r);
3334
3335 block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3336 block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3337 block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3338 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
3339 le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors) {
3340 block_bitmap_op(ic, ic->journal, le64_to_cpu(ic->sb->recalc_sector),
3341 ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3342 block_bitmap_op(ic, ic->recalc_bitmap, le64_to_cpu(ic->sb->recalc_sector),
3343 ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3344 block_bitmap_op(ic, ic->may_write_bitmap, le64_to_cpu(ic->sb->recalc_sector),
3345 ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3346 }
3347 rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0,
3348 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3349 }
3350 }
3351
3352 DEBUG_print("testing recalc: %x\n", ic->sb->flags);
3353 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
3354 __u64 recalc_pos = le64_to_cpu(ic->sb->recalc_sector);
3355
3356 DEBUG_print("recalc pos: %llx / %llx\n", recalc_pos, ic->provided_data_sectors);
3357 if (recalc_pos < ic->provided_data_sectors) {
3358 queue_work(ic->recalc_wq, &ic->recalc_work);
3359 } else if (recalc_pos > ic->provided_data_sectors) {
3360 ic->sb->recalc_sector = cpu_to_le64(ic->provided_data_sectors);
3361 recalc_write_super(ic);
3362 }
3363 }
3364
3365 ic->reboot_notifier.notifier_call = dm_integrity_reboot;
3366 ic->reboot_notifier.next = NULL;
3367 ic->reboot_notifier.priority = INT_MAX - 1; /* be notified after md and before hardware drivers */
3368 WARN_ON(register_reboot_notifier(&ic->reboot_notifier));
3369
3370 #if 0
3371 /* set to 1 to stress test synchronous mode */
3372 dm_integrity_enter_synchronous_mode(ic);
3373 #endif
3374 }
3375
dm_integrity_status(struct dm_target * ti,status_type_t type,unsigned int status_flags,char * result,unsigned int maxlen)3376 static void dm_integrity_status(struct dm_target *ti, status_type_t type,
3377 unsigned int status_flags, char *result, unsigned int maxlen)
3378 {
3379 struct dm_integrity_c *ic = ti->private;
3380 unsigned int arg_count;
3381 size_t sz = 0;
3382
3383 switch (type) {
3384 case STATUSTYPE_INFO:
3385 DMEMIT("%llu %llu",
3386 (unsigned long long)atomic64_read(&ic->number_of_mismatches),
3387 ic->provided_data_sectors);
3388 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
3389 DMEMIT(" %llu", le64_to_cpu(ic->sb->recalc_sector));
3390 else
3391 DMEMIT(" -");
3392 break;
3393
3394 case STATUSTYPE_TABLE: {
3395 __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
3396
3397 watermark_percentage += ic->journal_entries / 2;
3398 do_div(watermark_percentage, ic->journal_entries);
3399 arg_count = 3;
3400 arg_count += !!ic->meta_dev;
3401 arg_count += ic->sectors_per_block != 1;
3402 arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING));
3403 arg_count += ic->reset_recalculate_flag;
3404 arg_count += ic->discard;
3405 arg_count += ic->mode == 'J';
3406 arg_count += ic->mode == 'J';
3407 arg_count += ic->mode == 'B';
3408 arg_count += ic->mode == 'B';
3409 arg_count += !!ic->internal_hash_alg.alg_string;
3410 arg_count += !!ic->journal_crypt_alg.alg_string;
3411 arg_count += !!ic->journal_mac_alg.alg_string;
3412 arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0;
3413 arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) != 0;
3414 arg_count += ic->legacy_recalculate;
3415 DMEMIT("%s %llu %u %c %u", ic->dev->name, ic->start,
3416 ic->tag_size, ic->mode, arg_count);
3417 if (ic->meta_dev)
3418 DMEMIT(" meta_device:%s", ic->meta_dev->name);
3419 if (ic->sectors_per_block != 1)
3420 DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
3421 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
3422 DMEMIT(" recalculate");
3423 if (ic->reset_recalculate_flag)
3424 DMEMIT(" reset_recalculate");
3425 if (ic->discard)
3426 DMEMIT(" allow_discards");
3427 DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
3428 DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
3429 DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
3430 if (ic->mode == 'J') {
3431 DMEMIT(" journal_watermark:%u", (unsigned int)watermark_percentage);
3432 DMEMIT(" commit_time:%u", ic->autocommit_msec);
3433 }
3434 if (ic->mode == 'B') {
3435 DMEMIT(" sectors_per_bit:%llu", (sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit);
3436 DMEMIT(" bitmap_flush_interval:%u", jiffies_to_msecs(ic->bitmap_flush_interval));
3437 }
3438 if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0)
3439 DMEMIT(" fix_padding");
3440 if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) != 0)
3441 DMEMIT(" fix_hmac");
3442 if (ic->legacy_recalculate)
3443 DMEMIT(" legacy_recalculate");
3444
3445 #define EMIT_ALG(a, n) \
3446 do { \
3447 if (ic->a.alg_string) { \
3448 DMEMIT(" %s:%s", n, ic->a.alg_string); \
3449 if (ic->a.key_string) \
3450 DMEMIT(":%s", ic->a.key_string);\
3451 } \
3452 } while (0)
3453 EMIT_ALG(internal_hash_alg, "internal_hash");
3454 EMIT_ALG(journal_crypt_alg, "journal_crypt");
3455 EMIT_ALG(journal_mac_alg, "journal_mac");
3456 break;
3457 }
3458 case STATUSTYPE_IMA:
3459 DMEMIT_TARGET_NAME_VERSION(ti->type);
3460 DMEMIT(",dev_name=%s,start=%llu,tag_size=%u,mode=%c",
3461 ic->dev->name, ic->start, ic->tag_size, ic->mode);
3462
3463 if (ic->meta_dev)
3464 DMEMIT(",meta_device=%s", ic->meta_dev->name);
3465 if (ic->sectors_per_block != 1)
3466 DMEMIT(",block_size=%u", ic->sectors_per_block << SECTOR_SHIFT);
3467
3468 DMEMIT(",recalculate=%c", (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) ?
3469 'y' : 'n');
3470 DMEMIT(",allow_discards=%c", ic->discard ? 'y' : 'n');
3471 DMEMIT(",fix_padding=%c",
3472 ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0) ? 'y' : 'n');
3473 DMEMIT(",fix_hmac=%c",
3474 ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) != 0) ? 'y' : 'n');
3475 DMEMIT(",legacy_recalculate=%c", ic->legacy_recalculate ? 'y' : 'n');
3476
3477 DMEMIT(",journal_sectors=%u", ic->initial_sectors - SB_SECTORS);
3478 DMEMIT(",interleave_sectors=%u", 1U << ic->sb->log2_interleave_sectors);
3479 DMEMIT(",buffer_sectors=%u", 1U << ic->log2_buffer_sectors);
3480 DMEMIT(";");
3481 break;
3482 }
3483 }
3484
dm_integrity_iterate_devices(struct dm_target * ti,iterate_devices_callout_fn fn,void * data)3485 static int dm_integrity_iterate_devices(struct dm_target *ti,
3486 iterate_devices_callout_fn fn, void *data)
3487 {
3488 struct dm_integrity_c *ic = ti->private;
3489
3490 if (!ic->meta_dev)
3491 return fn(ti, ic->dev, ic->start + ic->initial_sectors + ic->metadata_run, ti->len, data);
3492 else
3493 return fn(ti, ic->dev, 0, ti->len, data);
3494 }
3495
dm_integrity_io_hints(struct dm_target * ti,struct queue_limits * limits)3496 static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *limits)
3497 {
3498 struct dm_integrity_c *ic = ti->private;
3499
3500 if (ic->sectors_per_block > 1) {
3501 limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
3502 limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
3503 blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT);
3504 limits->dma_alignment = limits->logical_block_size - 1;
3505 }
3506 }
3507
calculate_journal_section_size(struct dm_integrity_c * ic)3508 static void calculate_journal_section_size(struct dm_integrity_c *ic)
3509 {
3510 unsigned int sector_space = JOURNAL_SECTOR_DATA;
3511
3512 ic->journal_sections = le32_to_cpu(ic->sb->journal_sections);
3513 ic->journal_entry_size = roundup(offsetof(struct journal_entry, last_bytes[ic->sectors_per_block]) + ic->tag_size,
3514 JOURNAL_ENTRY_ROUNDUP);
3515
3516 if (ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC))
3517 sector_space -= JOURNAL_MAC_PER_SECTOR;
3518 ic->journal_entries_per_sector = sector_space / ic->journal_entry_size;
3519 ic->journal_section_entries = ic->journal_entries_per_sector * JOURNAL_BLOCK_SECTORS;
3520 ic->journal_section_sectors = (ic->journal_section_entries << ic->sb->log2_sectors_per_block) + JOURNAL_BLOCK_SECTORS;
3521 ic->journal_entries = ic->journal_section_entries * ic->journal_sections;
3522 }
3523
calculate_device_limits(struct dm_integrity_c * ic)3524 static int calculate_device_limits(struct dm_integrity_c *ic)
3525 {
3526 __u64 initial_sectors;
3527
3528 calculate_journal_section_size(ic);
3529 initial_sectors = SB_SECTORS + (__u64)ic->journal_section_sectors * ic->journal_sections;
3530 if (initial_sectors + METADATA_PADDING_SECTORS >= ic->meta_device_sectors || initial_sectors > UINT_MAX)
3531 return -EINVAL;
3532 ic->initial_sectors = initial_sectors;
3533
3534 if (!ic->meta_dev) {
3535 sector_t last_sector, last_area, last_offset;
3536
3537 /* we have to maintain excessive padding for compatibility with existing volumes */
3538 __u64 metadata_run_padding =
3539 ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING) ?
3540 (__u64)(METADATA_PADDING_SECTORS << SECTOR_SHIFT) :
3541 (__u64)(1 << SECTOR_SHIFT << METADATA_PADDING_SECTORS);
3542
3543 ic->metadata_run = round_up((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2_sectors_per_block),
3544 metadata_run_padding) >> SECTOR_SHIFT;
3545 if (!(ic->metadata_run & (ic->metadata_run - 1)))
3546 ic->log2_metadata_run = __ffs(ic->metadata_run);
3547 else
3548 ic->log2_metadata_run = -1;
3549
3550 get_area_and_offset(ic, ic->provided_data_sectors - 1, &last_area, &last_offset);
3551 last_sector = get_data_sector(ic, last_area, last_offset);
3552 if (last_sector < ic->start || last_sector >= ic->meta_device_sectors)
3553 return -EINVAL;
3554 } else {
3555 __u64 meta_size = (ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
3556
3557 meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1))
3558 >> (ic->log2_buffer_sectors + SECTOR_SHIFT);
3559 meta_size <<= ic->log2_buffer_sectors;
3560 if (ic->initial_sectors + meta_size < ic->initial_sectors ||
3561 ic->initial_sectors + meta_size > ic->meta_device_sectors)
3562 return -EINVAL;
3563 ic->metadata_run = 1;
3564 ic->log2_metadata_run = 0;
3565 }
3566
3567 return 0;
3568 }
3569
get_provided_data_sectors(struct dm_integrity_c * ic)3570 static void get_provided_data_sectors(struct dm_integrity_c *ic)
3571 {
3572 if (!ic->meta_dev) {
3573 int test_bit;
3574
3575 ic->provided_data_sectors = 0;
3576 for (test_bit = fls64(ic->meta_device_sectors) - 1; test_bit >= 3; test_bit--) {
3577 __u64 prev_data_sectors = ic->provided_data_sectors;
3578
3579 ic->provided_data_sectors |= (sector_t)1 << test_bit;
3580 if (calculate_device_limits(ic))
3581 ic->provided_data_sectors = prev_data_sectors;
3582 }
3583 } else {
3584 ic->provided_data_sectors = ic->data_device_sectors;
3585 ic->provided_data_sectors &= ~(sector_t)(ic->sectors_per_block - 1);
3586 }
3587 }
3588
initialize_superblock(struct dm_integrity_c * ic,unsigned int journal_sectors,unsigned int interleave_sectors)3589 static int initialize_superblock(struct dm_integrity_c *ic,
3590 unsigned int journal_sectors, unsigned int interleave_sectors)
3591 {
3592 unsigned int journal_sections;
3593 int test_bit;
3594
3595 memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT);
3596 memcpy(ic->sb->magic, SB_MAGIC, 8);
3597 ic->sb->integrity_tag_size = cpu_to_le16(ic->tag_size);
3598 ic->sb->log2_sectors_per_block = __ffs(ic->sectors_per_block);
3599 if (ic->journal_mac_alg.alg_string)
3600 ic->sb->flags |= cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC);
3601
3602 calculate_journal_section_size(ic);
3603 journal_sections = journal_sectors / ic->journal_section_sectors;
3604 if (!journal_sections)
3605 journal_sections = 1;
3606
3607 if (ic->fix_hmac && (ic->internal_hash_alg.alg_string || ic->journal_mac_alg.alg_string)) {
3608 ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_HMAC);
3609 get_random_bytes(ic->sb->salt, SALT_SIZE);
3610 }
3611
3612 if (!ic->meta_dev) {
3613 if (ic->fix_padding)
3614 ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_PADDING);
3615 ic->sb->journal_sections = cpu_to_le32(journal_sections);
3616 if (!interleave_sectors)
3617 interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
3618 ic->sb->log2_interleave_sectors = __fls(interleave_sectors);
3619 ic->sb->log2_interleave_sectors = max_t(__u8, MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
3620 ic->sb->log2_interleave_sectors = min_t(__u8, MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
3621
3622 get_provided_data_sectors(ic);
3623 if (!ic->provided_data_sectors)
3624 return -EINVAL;
3625 } else {
3626 ic->sb->log2_interleave_sectors = 0;
3627
3628 get_provided_data_sectors(ic);
3629 if (!ic->provided_data_sectors)
3630 return -EINVAL;
3631
3632 try_smaller_buffer:
3633 ic->sb->journal_sections = cpu_to_le32(0);
3634 for (test_bit = fls(journal_sections) - 1; test_bit >= 0; test_bit--) {
3635 __u32 prev_journal_sections = le32_to_cpu(ic->sb->journal_sections);
3636 __u32 test_journal_sections = prev_journal_sections | (1U << test_bit);
3637
3638 if (test_journal_sections > journal_sections)
3639 continue;
3640 ic->sb->journal_sections = cpu_to_le32(test_journal_sections);
3641 if (calculate_device_limits(ic))
3642 ic->sb->journal_sections = cpu_to_le32(prev_journal_sections);
3643
3644 }
3645 if (!le32_to_cpu(ic->sb->journal_sections)) {
3646 if (ic->log2_buffer_sectors > 3) {
3647 ic->log2_buffer_sectors--;
3648 goto try_smaller_buffer;
3649 }
3650 return -EINVAL;
3651 }
3652 }
3653
3654 ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
3655
3656 sb_set_version(ic);
3657
3658 return 0;
3659 }
3660
dm_integrity_set(struct dm_target * ti,struct dm_integrity_c * ic)3661 static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
3662 {
3663 struct gendisk *disk = dm_disk(dm_table_get_md(ti->table));
3664 struct blk_integrity bi;
3665
3666 memset(&bi, 0, sizeof(bi));
3667 bi.profile = &dm_integrity_profile;
3668 bi.tuple_size = ic->tag_size;
3669 bi.tag_size = bi.tuple_size;
3670 bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT;
3671
3672 blk_integrity_register(disk, &bi);
3673 blk_queue_max_integrity_segments(disk->queue, UINT_MAX);
3674 }
3675
dm_integrity_free_page_list(struct page_list * pl)3676 static void dm_integrity_free_page_list(struct page_list *pl)
3677 {
3678 unsigned int i;
3679
3680 if (!pl)
3681 return;
3682 for (i = 0; pl[i].page; i++)
3683 __free_page(pl[i].page);
3684 kvfree(pl);
3685 }
3686
dm_integrity_alloc_page_list(unsigned int n_pages)3687 static struct page_list *dm_integrity_alloc_page_list(unsigned int n_pages)
3688 {
3689 struct page_list *pl;
3690 unsigned int i;
3691
3692 pl = kvmalloc_array(n_pages + 1, sizeof(struct page_list), GFP_KERNEL | __GFP_ZERO);
3693 if (!pl)
3694 return NULL;
3695
3696 for (i = 0; i < n_pages; i++) {
3697 pl[i].page = alloc_page(GFP_KERNEL);
3698 if (!pl[i].page) {
3699 dm_integrity_free_page_list(pl);
3700 return NULL;
3701 }
3702 if (i)
3703 pl[i - 1].next = &pl[i];
3704 }
3705 pl[i].page = NULL;
3706 pl[i].next = NULL;
3707
3708 return pl;
3709 }
3710
dm_integrity_free_journal_scatterlist(struct dm_integrity_c * ic,struct scatterlist ** sl)3711 static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **sl)
3712 {
3713 unsigned int i;
3714
3715 for (i = 0; i < ic->journal_sections; i++)
3716 kvfree(sl[i]);
3717 kvfree(sl);
3718 }
3719
dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c * ic,struct page_list * pl)3720 static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic,
3721 struct page_list *pl)
3722 {
3723 struct scatterlist **sl;
3724 unsigned int i;
3725
3726 sl = kvmalloc_array(ic->journal_sections,
3727 sizeof(struct scatterlist *),
3728 GFP_KERNEL | __GFP_ZERO);
3729 if (!sl)
3730 return NULL;
3731
3732 for (i = 0; i < ic->journal_sections; i++) {
3733 struct scatterlist *s;
3734 unsigned int start_index, start_offset;
3735 unsigned int end_index, end_offset;
3736 unsigned int n_pages;
3737 unsigned int idx;
3738
3739 page_list_location(ic, i, 0, &start_index, &start_offset);
3740 page_list_location(ic, i, ic->journal_section_sectors - 1,
3741 &end_index, &end_offset);
3742
3743 n_pages = (end_index - start_index + 1);
3744
3745 s = kvmalloc_array(n_pages, sizeof(struct scatterlist),
3746 GFP_KERNEL);
3747 if (!s) {
3748 dm_integrity_free_journal_scatterlist(ic, sl);
3749 return NULL;
3750 }
3751
3752 sg_init_table(s, n_pages);
3753 for (idx = start_index; idx <= end_index; idx++) {
3754 char *va = lowmem_page_address(pl[idx].page);
3755 unsigned int start = 0, end = PAGE_SIZE;
3756
3757 if (idx == start_index)
3758 start = start_offset;
3759 if (idx == end_index)
3760 end = end_offset + (1 << SECTOR_SHIFT);
3761 sg_set_buf(&s[idx - start_index], va + start, end - start);
3762 }
3763
3764 sl[i] = s;
3765 }
3766
3767 return sl;
3768 }
3769
free_alg(struct alg_spec * a)3770 static void free_alg(struct alg_spec *a)
3771 {
3772 kfree_sensitive(a->alg_string);
3773 kfree_sensitive(a->key);
3774 memset(a, 0, sizeof(*a));
3775 }
3776
get_alg_and_key(const char * arg,struct alg_spec * a,char ** error,char * error_inval)3777 static int get_alg_and_key(const char *arg, struct alg_spec *a, char **error, char *error_inval)
3778 {
3779 char *k;
3780
3781 free_alg(a);
3782
3783 a->alg_string = kstrdup(strchr(arg, ':') + 1, GFP_KERNEL);
3784 if (!a->alg_string)
3785 goto nomem;
3786
3787 k = strchr(a->alg_string, ':');
3788 if (k) {
3789 *k = 0;
3790 a->key_string = k + 1;
3791 if (strlen(a->key_string) & 1)
3792 goto inval;
3793
3794 a->key_size = strlen(a->key_string) / 2;
3795 a->key = kmalloc(a->key_size, GFP_KERNEL);
3796 if (!a->key)
3797 goto nomem;
3798 if (hex2bin(a->key, a->key_string, a->key_size))
3799 goto inval;
3800 }
3801
3802 return 0;
3803 inval:
3804 *error = error_inval;
3805 return -EINVAL;
3806 nomem:
3807 *error = "Out of memory for an argument";
3808 return -ENOMEM;
3809 }
3810
get_mac(struct crypto_shash ** hash,struct alg_spec * a,char ** error,char * error_alg,char * error_key)3811 static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error,
3812 char *error_alg, char *error_key)
3813 {
3814 int r;
3815
3816 if (a->alg_string) {
3817 *hash = crypto_alloc_shash(a->alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
3818 if (IS_ERR(*hash)) {
3819 *error = error_alg;
3820 r = PTR_ERR(*hash);
3821 *hash = NULL;
3822 return r;
3823 }
3824
3825 if (a->key) {
3826 r = crypto_shash_setkey(*hash, a->key, a->key_size);
3827 if (r) {
3828 *error = error_key;
3829 return r;
3830 }
3831 } else if (crypto_shash_get_flags(*hash) & CRYPTO_TFM_NEED_KEY) {
3832 *error = error_key;
3833 return -ENOKEY;
3834 }
3835 }
3836
3837 return 0;
3838 }
3839
create_journal(struct dm_integrity_c * ic,char ** error)3840 static int create_journal(struct dm_integrity_c *ic, char **error)
3841 {
3842 int r = 0;
3843 unsigned int i;
3844 __u64 journal_pages, journal_desc_size, journal_tree_size;
3845 unsigned char *crypt_data = NULL, *crypt_iv = NULL;
3846 struct skcipher_request *req = NULL;
3847
3848 ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL);
3849 ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL);
3850 ic->commit_ids[2] = cpu_to_le64(0x3333333333333333ULL);
3851 ic->commit_ids[3] = cpu_to_le64(0x4444444444444444ULL);
3852
3853 journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors,
3854 PAGE_SIZE >> SECTOR_SHIFT) >> (PAGE_SHIFT - SECTOR_SHIFT);
3855 journal_desc_size = journal_pages * sizeof(struct page_list);
3856 if (journal_pages >= totalram_pages() - totalhigh_pages() || journal_desc_size > ULONG_MAX) {
3857 *error = "Journal doesn't fit into memory";
3858 r = -ENOMEM;
3859 goto bad;
3860 }
3861 ic->journal_pages = journal_pages;
3862
3863 ic->journal = dm_integrity_alloc_page_list(ic->journal_pages);
3864 if (!ic->journal) {
3865 *error = "Could not allocate memory for journal";
3866 r = -ENOMEM;
3867 goto bad;
3868 }
3869 if (ic->journal_crypt_alg.alg_string) {
3870 unsigned int ivsize, blocksize;
3871 struct journal_completion comp;
3872
3873 comp.ic = ic;
3874 ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
3875 if (IS_ERR(ic->journal_crypt)) {
3876 *error = "Invalid journal cipher";
3877 r = PTR_ERR(ic->journal_crypt);
3878 ic->journal_crypt = NULL;
3879 goto bad;
3880 }
3881 ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
3882 blocksize = crypto_skcipher_blocksize(ic->journal_crypt);
3883
3884 if (ic->journal_crypt_alg.key) {
3885 r = crypto_skcipher_setkey(ic->journal_crypt, ic->journal_crypt_alg.key,
3886 ic->journal_crypt_alg.key_size);
3887 if (r) {
3888 *error = "Error setting encryption key";
3889 goto bad;
3890 }
3891 }
3892 DEBUG_print("cipher %s, block size %u iv size %u\n",
3893 ic->journal_crypt_alg.alg_string, blocksize, ivsize);
3894
3895 ic->journal_io = dm_integrity_alloc_page_list(ic->journal_pages);
3896 if (!ic->journal_io) {
3897 *error = "Could not allocate memory for journal io";
3898 r = -ENOMEM;
3899 goto bad;
3900 }
3901
3902 if (blocksize == 1) {
3903 struct scatterlist *sg;
3904
3905 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3906 if (!req) {
3907 *error = "Could not allocate crypt request";
3908 r = -ENOMEM;
3909 goto bad;
3910 }
3911
3912 crypt_iv = kzalloc(ivsize, GFP_KERNEL);
3913 if (!crypt_iv) {
3914 *error = "Could not allocate iv";
3915 r = -ENOMEM;
3916 goto bad;
3917 }
3918
3919 ic->journal_xor = dm_integrity_alloc_page_list(ic->journal_pages);
3920 if (!ic->journal_xor) {
3921 *error = "Could not allocate memory for journal xor";
3922 r = -ENOMEM;
3923 goto bad;
3924 }
3925
3926 sg = kvmalloc_array(ic->journal_pages + 1,
3927 sizeof(struct scatterlist),
3928 GFP_KERNEL);
3929 if (!sg) {
3930 *error = "Unable to allocate sg list";
3931 r = -ENOMEM;
3932 goto bad;
3933 }
3934 sg_init_table(sg, ic->journal_pages + 1);
3935 for (i = 0; i < ic->journal_pages; i++) {
3936 char *va = lowmem_page_address(ic->journal_xor[i].page);
3937
3938 clear_page(va);
3939 sg_set_buf(&sg[i], va, PAGE_SIZE);
3940 }
3941 sg_set_buf(&sg[i], &ic->commit_ids, sizeof(ic->commit_ids));
3942
3943 skcipher_request_set_crypt(req, sg, sg,
3944 PAGE_SIZE * ic->journal_pages + sizeof(ic->commit_ids), crypt_iv);
3945 init_completion(&comp.comp);
3946 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
3947 if (do_crypt(true, req, &comp))
3948 wait_for_completion(&comp.comp);
3949 kvfree(sg);
3950 r = dm_integrity_failed(ic);
3951 if (r) {
3952 *error = "Unable to encrypt journal";
3953 goto bad;
3954 }
3955 DEBUG_bytes(lowmem_page_address(ic->journal_xor[0].page), 64, "xor data");
3956
3957 crypto_free_skcipher(ic->journal_crypt);
3958 ic->journal_crypt = NULL;
3959 } else {
3960 unsigned int crypt_len = roundup(ivsize, blocksize);
3961
3962 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3963 if (!req) {
3964 *error = "Could not allocate crypt request";
3965 r = -ENOMEM;
3966 goto bad;
3967 }
3968
3969 crypt_iv = kmalloc(ivsize, GFP_KERNEL);
3970 if (!crypt_iv) {
3971 *error = "Could not allocate iv";
3972 r = -ENOMEM;
3973 goto bad;
3974 }
3975
3976 crypt_data = kmalloc(crypt_len, GFP_KERNEL);
3977 if (!crypt_data) {
3978 *error = "Unable to allocate crypt data";
3979 r = -ENOMEM;
3980 goto bad;
3981 }
3982
3983 ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal);
3984 if (!ic->journal_scatterlist) {
3985 *error = "Unable to allocate sg list";
3986 r = -ENOMEM;
3987 goto bad;
3988 }
3989 ic->journal_io_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal_io);
3990 if (!ic->journal_io_scatterlist) {
3991 *error = "Unable to allocate sg list";
3992 r = -ENOMEM;
3993 goto bad;
3994 }
3995 ic->sk_requests = kvmalloc_array(ic->journal_sections,
3996 sizeof(struct skcipher_request *),
3997 GFP_KERNEL | __GFP_ZERO);
3998 if (!ic->sk_requests) {
3999 *error = "Unable to allocate sk requests";
4000 r = -ENOMEM;
4001 goto bad;
4002 }
4003 for (i = 0; i < ic->journal_sections; i++) {
4004 struct scatterlist sg;
4005 struct skcipher_request *section_req;
4006 __le32 section_le = cpu_to_le32(i);
4007
4008 memset(crypt_iv, 0x00, ivsize);
4009 memset(crypt_data, 0x00, crypt_len);
4010 memcpy(crypt_data, §ion_le, min_t(size_t, crypt_len, sizeof(section_le)));
4011
4012 sg_init_one(&sg, crypt_data, crypt_len);
4013 skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv);
4014 init_completion(&comp.comp);
4015 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
4016 if (do_crypt(true, req, &comp))
4017 wait_for_completion(&comp.comp);
4018
4019 r = dm_integrity_failed(ic);
4020 if (r) {
4021 *error = "Unable to generate iv";
4022 goto bad;
4023 }
4024
4025 section_req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
4026 if (!section_req) {
4027 *error = "Unable to allocate crypt request";
4028 r = -ENOMEM;
4029 goto bad;
4030 }
4031 section_req->iv = kmalloc_array(ivsize, 2,
4032 GFP_KERNEL);
4033 if (!section_req->iv) {
4034 skcipher_request_free(section_req);
4035 *error = "Unable to allocate iv";
4036 r = -ENOMEM;
4037 goto bad;
4038 }
4039 memcpy(section_req->iv + ivsize, crypt_data, ivsize);
4040 section_req->cryptlen = (size_t)ic->journal_section_sectors << SECTOR_SHIFT;
4041 ic->sk_requests[i] = section_req;
4042 DEBUG_bytes(crypt_data, ivsize, "iv(%u)", i);
4043 }
4044 }
4045 }
4046
4047 for (i = 0; i < N_COMMIT_IDS; i++) {
4048 unsigned int j;
4049
4050 retest_commit_id:
4051 for (j = 0; j < i; j++) {
4052 if (ic->commit_ids[j] == ic->commit_ids[i]) {
4053 ic->commit_ids[i] = cpu_to_le64(le64_to_cpu(ic->commit_ids[i]) + 1);
4054 goto retest_commit_id;
4055 }
4056 }
4057 DEBUG_print("commit id %u: %016llx\n", i, ic->commit_ids[i]);
4058 }
4059
4060 journal_tree_size = (__u64)ic->journal_entries * sizeof(struct journal_node);
4061 if (journal_tree_size > ULONG_MAX) {
4062 *error = "Journal doesn't fit into memory";
4063 r = -ENOMEM;
4064 goto bad;
4065 }
4066 ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL);
4067 if (!ic->journal_tree) {
4068 *error = "Could not allocate memory for journal tree";
4069 r = -ENOMEM;
4070 }
4071 bad:
4072 kfree(crypt_data);
4073 kfree(crypt_iv);
4074 skcipher_request_free(req);
4075
4076 return r;
4077 }
4078
4079 /*
4080 * Construct a integrity mapping
4081 *
4082 * Arguments:
4083 * device
4084 * offset from the start of the device
4085 * tag size
4086 * D - direct writes, J - journal writes, B - bitmap mode, R - recovery mode
4087 * number of optional arguments
4088 * optional arguments:
4089 * journal_sectors
4090 * interleave_sectors
4091 * buffer_sectors
4092 * journal_watermark
4093 * commit_time
4094 * meta_device
4095 * block_size
4096 * sectors_per_bit
4097 * bitmap_flush_interval
4098 * internal_hash
4099 * journal_crypt
4100 * journal_mac
4101 * recalculate
4102 */
dm_integrity_ctr(struct dm_target * ti,unsigned int argc,char ** argv)4103 static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
4104 {
4105 struct dm_integrity_c *ic;
4106 char dummy;
4107 int r;
4108 unsigned int extra_args;
4109 struct dm_arg_set as;
4110 static const struct dm_arg _args[] = {
4111 {0, 18, "Invalid number of feature args"},
4112 };
4113 unsigned int journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
4114 bool should_write_sb;
4115 __u64 threshold;
4116 unsigned long long start;
4117 __s8 log2_sectors_per_bitmap_bit = -1;
4118 __s8 log2_blocks_per_bitmap_bit;
4119 __u64 bits_in_journal;
4120 __u64 n_bitmap_bits;
4121
4122 #define DIRECT_ARGUMENTS 4
4123
4124 if (argc <= DIRECT_ARGUMENTS) {
4125 ti->error = "Invalid argument count";
4126 return -EINVAL;
4127 }
4128
4129 ic = kzalloc(sizeof(struct dm_integrity_c), GFP_KERNEL);
4130 if (!ic) {
4131 ti->error = "Cannot allocate integrity context";
4132 return -ENOMEM;
4133 }
4134 ti->private = ic;
4135 ti->per_io_data_size = sizeof(struct dm_integrity_io);
4136 ic->ti = ti;
4137
4138 ic->in_progress = RB_ROOT;
4139 INIT_LIST_HEAD(&ic->wait_list);
4140 init_waitqueue_head(&ic->endio_wait);
4141 bio_list_init(&ic->flush_bio_list);
4142 init_waitqueue_head(&ic->copy_to_journal_wait);
4143 init_completion(&ic->crypto_backoff);
4144 atomic64_set(&ic->number_of_mismatches, 0);
4145 ic->bitmap_flush_interval = BITMAP_FLUSH_INTERVAL;
4146
4147 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev);
4148 if (r) {
4149 ti->error = "Device lookup failed";
4150 goto bad;
4151 }
4152
4153 if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) {
4154 ti->error = "Invalid starting offset";
4155 r = -EINVAL;
4156 goto bad;
4157 }
4158 ic->start = start;
4159
4160 if (strcmp(argv[2], "-")) {
4161 if (sscanf(argv[2], "%u%c", &ic->tag_size, &dummy) != 1 || !ic->tag_size) {
4162 ti->error = "Invalid tag size";
4163 r = -EINVAL;
4164 goto bad;
4165 }
4166 }
4167
4168 if (!strcmp(argv[3], "J") || !strcmp(argv[3], "B") ||
4169 !strcmp(argv[3], "D") || !strcmp(argv[3], "R")) {
4170 ic->mode = argv[3][0];
4171 } else {
4172 ti->error = "Invalid mode (expecting J, B, D, R)";
4173 r = -EINVAL;
4174 goto bad;
4175 }
4176
4177 journal_sectors = 0;
4178 interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
4179 buffer_sectors = DEFAULT_BUFFER_SECTORS;
4180 journal_watermark = DEFAULT_JOURNAL_WATERMARK;
4181 sync_msec = DEFAULT_SYNC_MSEC;
4182 ic->sectors_per_block = 1;
4183
4184 as.argc = argc - DIRECT_ARGUMENTS;
4185 as.argv = argv + DIRECT_ARGUMENTS;
4186 r = dm_read_arg_group(_args, &as, &extra_args, &ti->error);
4187 if (r)
4188 goto bad;
4189
4190 while (extra_args--) {
4191 const char *opt_string;
4192 unsigned int val;
4193 unsigned long long llval;
4194
4195 opt_string = dm_shift_arg(&as);
4196 if (!opt_string) {
4197 r = -EINVAL;
4198 ti->error = "Not enough feature arguments";
4199 goto bad;
4200 }
4201 if (sscanf(opt_string, "journal_sectors:%u%c", &val, &dummy) == 1)
4202 journal_sectors = val ? val : 1;
4203 else if (sscanf(opt_string, "interleave_sectors:%u%c", &val, &dummy) == 1)
4204 interleave_sectors = val;
4205 else if (sscanf(opt_string, "buffer_sectors:%u%c", &val, &dummy) == 1)
4206 buffer_sectors = val;
4207 else if (sscanf(opt_string, "journal_watermark:%u%c", &val, &dummy) == 1 && val <= 100)
4208 journal_watermark = val;
4209 else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
4210 sync_msec = val;
4211 else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) {
4212 if (ic->meta_dev) {
4213 dm_put_device(ti, ic->meta_dev);
4214 ic->meta_dev = NULL;
4215 }
4216 r = dm_get_device(ti, strchr(opt_string, ':') + 1,
4217 dm_table_get_mode(ti->table), &ic->meta_dev);
4218 if (r) {
4219 ti->error = "Device lookup failed";
4220 goto bad;
4221 }
4222 } else if (sscanf(opt_string, "block_size:%u%c", &val, &dummy) == 1) {
4223 if (val < 1 << SECTOR_SHIFT ||
4224 val > MAX_SECTORS_PER_BLOCK << SECTOR_SHIFT ||
4225 (val & (val - 1))) {
4226 r = -EINVAL;
4227 ti->error = "Invalid block_size argument";
4228 goto bad;
4229 }
4230 ic->sectors_per_block = val >> SECTOR_SHIFT;
4231 } else if (sscanf(opt_string, "sectors_per_bit:%llu%c", &llval, &dummy) == 1) {
4232 log2_sectors_per_bitmap_bit = !llval ? 0 : __ilog2_u64(llval);
4233 } else if (sscanf(opt_string, "bitmap_flush_interval:%u%c", &val, &dummy) == 1) {
4234 if ((uint64_t)val >= (uint64_t)UINT_MAX * 1000 / HZ) {
4235 r = -EINVAL;
4236 ti->error = "Invalid bitmap_flush_interval argument";
4237 goto bad;
4238 }
4239 ic->bitmap_flush_interval = msecs_to_jiffies(val);
4240 } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
4241 r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
4242 "Invalid internal_hash argument");
4243 if (r)
4244 goto bad;
4245 } else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
4246 r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
4247 "Invalid journal_crypt argument");
4248 if (r)
4249 goto bad;
4250 } else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
4251 r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error,
4252 "Invalid journal_mac argument");
4253 if (r)
4254 goto bad;
4255 } else if (!strcmp(opt_string, "recalculate")) {
4256 ic->recalculate_flag = true;
4257 } else if (!strcmp(opt_string, "reset_recalculate")) {
4258 ic->recalculate_flag = true;
4259 ic->reset_recalculate_flag = true;
4260 } else if (!strcmp(opt_string, "allow_discards")) {
4261 ic->discard = true;
4262 } else if (!strcmp(opt_string, "fix_padding")) {
4263 ic->fix_padding = true;
4264 } else if (!strcmp(opt_string, "fix_hmac")) {
4265 ic->fix_hmac = true;
4266 } else if (!strcmp(opt_string, "legacy_recalculate")) {
4267 ic->legacy_recalculate = true;
4268 } else {
4269 r = -EINVAL;
4270 ti->error = "Invalid argument";
4271 goto bad;
4272 }
4273 }
4274
4275 ic->data_device_sectors = bdev_nr_sectors(ic->dev->bdev);
4276 if (!ic->meta_dev)
4277 ic->meta_device_sectors = ic->data_device_sectors;
4278 else
4279 ic->meta_device_sectors = bdev_nr_sectors(ic->meta_dev->bdev);
4280
4281 if (!journal_sectors) {
4282 journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS,
4283 ic->data_device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR);
4284 }
4285
4286 if (!buffer_sectors)
4287 buffer_sectors = 1;
4288 ic->log2_buffer_sectors = min((int)__fls(buffer_sectors), 31 - SECTOR_SHIFT);
4289
4290 r = get_mac(&ic->internal_hash, &ic->internal_hash_alg, &ti->error,
4291 "Invalid internal hash", "Error setting internal hash key");
4292 if (r)
4293 goto bad;
4294
4295 r = get_mac(&ic->journal_mac, &ic->journal_mac_alg, &ti->error,
4296 "Invalid journal mac", "Error setting journal mac key");
4297 if (r)
4298 goto bad;
4299
4300 if (!ic->tag_size) {
4301 if (!ic->internal_hash) {
4302 ti->error = "Unknown tag size";
4303 r = -EINVAL;
4304 goto bad;
4305 }
4306 ic->tag_size = crypto_shash_digestsize(ic->internal_hash);
4307 }
4308 if (ic->tag_size > MAX_TAG_SIZE) {
4309 ti->error = "Too big tag size";
4310 r = -EINVAL;
4311 goto bad;
4312 }
4313 if (!(ic->tag_size & (ic->tag_size - 1)))
4314 ic->log2_tag_size = __ffs(ic->tag_size);
4315 else
4316 ic->log2_tag_size = -1;
4317
4318 if (ic->mode == 'B' && !ic->internal_hash) {
4319 r = -EINVAL;
4320 ti->error = "Bitmap mode can be only used with internal hash";
4321 goto bad;
4322 }
4323
4324 if (ic->discard && !ic->internal_hash) {
4325 r = -EINVAL;
4326 ti->error = "Discard can be only used with internal hash";
4327 goto bad;
4328 }
4329
4330 ic->autocommit_jiffies = msecs_to_jiffies(sync_msec);
4331 ic->autocommit_msec = sync_msec;
4332 timer_setup(&ic->autocommit_timer, autocommit_fn, 0);
4333
4334 ic->io = dm_io_client_create();
4335 if (IS_ERR(ic->io)) {
4336 r = PTR_ERR(ic->io);
4337 ic->io = NULL;
4338 ti->error = "Cannot allocate dm io";
4339 goto bad;
4340 }
4341
4342 r = mempool_init_slab_pool(&ic->journal_io_mempool, JOURNAL_IO_MEMPOOL, journal_io_cache);
4343 if (r) {
4344 ti->error = "Cannot allocate mempool";
4345 goto bad;
4346 }
4347
4348 r = mempool_init_page_pool(&ic->recheck_pool, 1, 0);
4349 if (r) {
4350 ti->error = "Cannot allocate mempool";
4351 goto bad;
4352 }
4353
4354 ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
4355 WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE);
4356 if (!ic->metadata_wq) {
4357 ti->error = "Cannot allocate workqueue";
4358 r = -ENOMEM;
4359 goto bad;
4360 }
4361
4362 /*
4363 * If this workqueue weren't ordered, it would cause bio reordering
4364 * and reduced performance.
4365 */
4366 ic->wait_wq = alloc_ordered_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM);
4367 if (!ic->wait_wq) {
4368 ti->error = "Cannot allocate workqueue";
4369 r = -ENOMEM;
4370 goto bad;
4371 }
4372
4373 ic->offload_wq = alloc_workqueue("dm-integrity-offload", WQ_MEM_RECLAIM,
4374 METADATA_WORKQUEUE_MAX_ACTIVE);
4375 if (!ic->offload_wq) {
4376 ti->error = "Cannot allocate workqueue";
4377 r = -ENOMEM;
4378 goto bad;
4379 }
4380
4381 ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1);
4382 if (!ic->commit_wq) {
4383 ti->error = "Cannot allocate workqueue";
4384 r = -ENOMEM;
4385 goto bad;
4386 }
4387 INIT_WORK(&ic->commit_work, integrity_commit);
4388
4389 if (ic->mode == 'J' || ic->mode == 'B') {
4390 ic->writer_wq = alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM, 1);
4391 if (!ic->writer_wq) {
4392 ti->error = "Cannot allocate workqueue";
4393 r = -ENOMEM;
4394 goto bad;
4395 }
4396 INIT_WORK(&ic->writer_work, integrity_writer);
4397 }
4398
4399 ic->sb = alloc_pages_exact(SB_SECTORS << SECTOR_SHIFT, GFP_KERNEL);
4400 if (!ic->sb) {
4401 r = -ENOMEM;
4402 ti->error = "Cannot allocate superblock area";
4403 goto bad;
4404 }
4405
4406 r = sync_rw_sb(ic, REQ_OP_READ);
4407 if (r) {
4408 ti->error = "Error reading superblock";
4409 goto bad;
4410 }
4411 should_write_sb = false;
4412 if (memcmp(ic->sb->magic, SB_MAGIC, 8)) {
4413 if (ic->mode != 'R') {
4414 if (memchr_inv(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT)) {
4415 r = -EINVAL;
4416 ti->error = "The device is not initialized";
4417 goto bad;
4418 }
4419 }
4420
4421 r = initialize_superblock(ic, journal_sectors, interleave_sectors);
4422 if (r) {
4423 ti->error = "Could not initialize superblock";
4424 goto bad;
4425 }
4426 if (ic->mode != 'R')
4427 should_write_sb = true;
4428 }
4429
4430 if (!ic->sb->version || ic->sb->version > SB_VERSION_5) {
4431 r = -EINVAL;
4432 ti->error = "Unknown version";
4433 goto bad;
4434 }
4435 if (le16_to_cpu(ic->sb->integrity_tag_size) != ic->tag_size) {
4436 r = -EINVAL;
4437 ti->error = "Tag size doesn't match the information in superblock";
4438 goto bad;
4439 }
4440 if (ic->sb->log2_sectors_per_block != __ffs(ic->sectors_per_block)) {
4441 r = -EINVAL;
4442 ti->error = "Block size doesn't match the information in superblock";
4443 goto bad;
4444 }
4445 if (!le32_to_cpu(ic->sb->journal_sections)) {
4446 r = -EINVAL;
4447 ti->error = "Corrupted superblock, journal_sections is 0";
4448 goto bad;
4449 }
4450 /* make sure that ti->max_io_len doesn't overflow */
4451 if (!ic->meta_dev) {
4452 if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS ||
4453 ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) {
4454 r = -EINVAL;
4455 ti->error = "Invalid interleave_sectors in the superblock";
4456 goto bad;
4457 }
4458 } else {
4459 if (ic->sb->log2_interleave_sectors) {
4460 r = -EINVAL;
4461 ti->error = "Invalid interleave_sectors in the superblock";
4462 goto bad;
4463 }
4464 }
4465 if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) != !!ic->journal_mac_alg.alg_string) {
4466 r = -EINVAL;
4467 ti->error = "Journal mac mismatch";
4468 goto bad;
4469 }
4470
4471 get_provided_data_sectors(ic);
4472 if (!ic->provided_data_sectors) {
4473 r = -EINVAL;
4474 ti->error = "The device is too small";
4475 goto bad;
4476 }
4477
4478 try_smaller_buffer:
4479 r = calculate_device_limits(ic);
4480 if (r) {
4481 if (ic->meta_dev) {
4482 if (ic->log2_buffer_sectors > 3) {
4483 ic->log2_buffer_sectors--;
4484 goto try_smaller_buffer;
4485 }
4486 }
4487 ti->error = "The device is too small";
4488 goto bad;
4489 }
4490
4491 if (log2_sectors_per_bitmap_bit < 0)
4492 log2_sectors_per_bitmap_bit = __fls(DEFAULT_SECTORS_PER_BITMAP_BIT);
4493 if (log2_sectors_per_bitmap_bit < ic->sb->log2_sectors_per_block)
4494 log2_sectors_per_bitmap_bit = ic->sb->log2_sectors_per_block;
4495
4496 bits_in_journal = ((__u64)ic->journal_section_sectors * ic->journal_sections) << (SECTOR_SHIFT + 3);
4497 if (bits_in_journal > UINT_MAX)
4498 bits_in_journal = UINT_MAX;
4499 while (bits_in_journal < (ic->provided_data_sectors + ((sector_t)1 << log2_sectors_per_bitmap_bit) - 1) >> log2_sectors_per_bitmap_bit)
4500 log2_sectors_per_bitmap_bit++;
4501
4502 log2_blocks_per_bitmap_bit = log2_sectors_per_bitmap_bit - ic->sb->log2_sectors_per_block;
4503 ic->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
4504 if (should_write_sb)
4505 ic->sb->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
4506
4507 n_bitmap_bits = ((ic->provided_data_sectors >> ic->sb->log2_sectors_per_block)
4508 + (((sector_t)1 << log2_blocks_per_bitmap_bit) - 1)) >> log2_blocks_per_bitmap_bit;
4509 ic->n_bitmap_blocks = DIV_ROUND_UP(n_bitmap_bits, BITMAP_BLOCK_SIZE * 8);
4510
4511 if (!ic->meta_dev)
4512 ic->log2_buffer_sectors = min(ic->log2_buffer_sectors, (__u8)__ffs(ic->metadata_run));
4513
4514 if (ti->len > ic->provided_data_sectors) {
4515 r = -EINVAL;
4516 ti->error = "Not enough provided sectors for requested mapping size";
4517 goto bad;
4518 }
4519
4520
4521 threshold = (__u64)ic->journal_entries * (100 - journal_watermark);
4522 threshold += 50;
4523 do_div(threshold, 100);
4524 ic->free_sectors_threshold = threshold;
4525
4526 DEBUG_print("initialized:\n");
4527 DEBUG_print(" integrity_tag_size %u\n", le16_to_cpu(ic->sb->integrity_tag_size));
4528 DEBUG_print(" journal_entry_size %u\n", ic->journal_entry_size);
4529 DEBUG_print(" journal_entries_per_sector %u\n", ic->journal_entries_per_sector);
4530 DEBUG_print(" journal_section_entries %u\n", ic->journal_section_entries);
4531 DEBUG_print(" journal_section_sectors %u\n", ic->journal_section_sectors);
4532 DEBUG_print(" journal_sections %u\n", (unsigned int)le32_to_cpu(ic->sb->journal_sections));
4533 DEBUG_print(" journal_entries %u\n", ic->journal_entries);
4534 DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
4535 DEBUG_print(" data_device_sectors 0x%llx\n", bdev_nr_sectors(ic->dev->bdev));
4536 DEBUG_print(" initial_sectors 0x%x\n", ic->initial_sectors);
4537 DEBUG_print(" metadata_run 0x%x\n", ic->metadata_run);
4538 DEBUG_print(" log2_metadata_run %d\n", ic->log2_metadata_run);
4539 DEBUG_print(" provided_data_sectors 0x%llx (%llu)\n", ic->provided_data_sectors, ic->provided_data_sectors);
4540 DEBUG_print(" log2_buffer_sectors %u\n", ic->log2_buffer_sectors);
4541 DEBUG_print(" bits_in_journal %llu\n", bits_in_journal);
4542
4543 if (ic->recalculate_flag && !(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) {
4544 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
4545 ic->sb->recalc_sector = cpu_to_le64(0);
4546 }
4547
4548 if (ic->internal_hash) {
4549 ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
4550 if (!ic->recalc_wq) {
4551 ti->error = "Cannot allocate workqueue";
4552 r = -ENOMEM;
4553 goto bad;
4554 }
4555 INIT_WORK(&ic->recalc_work, integrity_recalc);
4556 } else {
4557 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
4558 ti->error = "Recalculate can only be specified with internal_hash";
4559 r = -EINVAL;
4560 goto bad;
4561 }
4562 }
4563
4564 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
4565 le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors &&
4566 dm_integrity_disable_recalculate(ic)) {
4567 ti->error = "Recalculating with HMAC is disabled for security reasons - if you really need it, use the argument \"legacy_recalculate\"";
4568 r = -EOPNOTSUPP;
4569 goto bad;
4570 }
4571
4572 ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
4573 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL, 0);
4574 if (IS_ERR(ic->bufio)) {
4575 r = PTR_ERR(ic->bufio);
4576 ti->error = "Cannot initialize dm-bufio";
4577 ic->bufio = NULL;
4578 goto bad;
4579 }
4580 dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors);
4581
4582 if (ic->mode != 'R') {
4583 r = create_journal(ic, &ti->error);
4584 if (r)
4585 goto bad;
4586
4587 }
4588
4589 if (ic->mode == 'B') {
4590 unsigned int i;
4591 unsigned int n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
4592
4593 ic->recalc_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
4594 if (!ic->recalc_bitmap) {
4595 r = -ENOMEM;
4596 goto bad;
4597 }
4598 ic->may_write_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
4599 if (!ic->may_write_bitmap) {
4600 r = -ENOMEM;
4601 goto bad;
4602 }
4603 ic->bbs = kvmalloc_array(ic->n_bitmap_blocks, sizeof(struct bitmap_block_status), GFP_KERNEL);
4604 if (!ic->bbs) {
4605 r = -ENOMEM;
4606 goto bad;
4607 }
4608 INIT_DELAYED_WORK(&ic->bitmap_flush_work, bitmap_flush_work);
4609 for (i = 0; i < ic->n_bitmap_blocks; i++) {
4610 struct bitmap_block_status *bbs = &ic->bbs[i];
4611 unsigned int sector, pl_index, pl_offset;
4612
4613 INIT_WORK(&bbs->work, bitmap_block_work);
4614 bbs->ic = ic;
4615 bbs->idx = i;
4616 bio_list_init(&bbs->bio_queue);
4617 spin_lock_init(&bbs->bio_queue_lock);
4618
4619 sector = i * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT);
4620 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
4621 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
4622
4623 bbs->bitmap = lowmem_page_address(ic->journal[pl_index].page) + pl_offset;
4624 }
4625 }
4626
4627 if (should_write_sb) {
4628 init_journal(ic, 0, ic->journal_sections, 0);
4629 r = dm_integrity_failed(ic);
4630 if (unlikely(r)) {
4631 ti->error = "Error initializing journal";
4632 goto bad;
4633 }
4634 r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA);
4635 if (r) {
4636 ti->error = "Error initializing superblock";
4637 goto bad;
4638 }
4639 ic->just_formatted = true;
4640 }
4641
4642 if (!ic->meta_dev) {
4643 r = dm_set_target_max_io_len(ti, 1U << ic->sb->log2_interleave_sectors);
4644 if (r)
4645 goto bad;
4646 }
4647 if (ic->mode == 'B') {
4648 unsigned int max_io_len;
4649
4650 max_io_len = ((sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit) * (BITMAP_BLOCK_SIZE * 8);
4651 if (!max_io_len)
4652 max_io_len = 1U << 31;
4653 DEBUG_print("max_io_len: old %u, new %u\n", ti->max_io_len, max_io_len);
4654 if (!ti->max_io_len || ti->max_io_len > max_io_len) {
4655 r = dm_set_target_max_io_len(ti, max_io_len);
4656 if (r)
4657 goto bad;
4658 }
4659 }
4660
4661 if (!ic->internal_hash)
4662 dm_integrity_set(ti, ic);
4663
4664 ti->num_flush_bios = 1;
4665 ti->flush_supported = true;
4666 if (ic->discard)
4667 ti->num_discard_bios = 1;
4668
4669 dm_audit_log_ctr(DM_MSG_PREFIX, ti, 1);
4670 return 0;
4671
4672 bad:
4673 dm_audit_log_ctr(DM_MSG_PREFIX, ti, 0);
4674 dm_integrity_dtr(ti);
4675 return r;
4676 }
4677
dm_integrity_dtr(struct dm_target * ti)4678 static void dm_integrity_dtr(struct dm_target *ti)
4679 {
4680 struct dm_integrity_c *ic = ti->private;
4681
4682 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
4683 BUG_ON(!list_empty(&ic->wait_list));
4684
4685 if (ic->mode == 'B')
4686 cancel_delayed_work_sync(&ic->bitmap_flush_work);
4687 if (ic->metadata_wq)
4688 destroy_workqueue(ic->metadata_wq);
4689 if (ic->wait_wq)
4690 destroy_workqueue(ic->wait_wq);
4691 if (ic->offload_wq)
4692 destroy_workqueue(ic->offload_wq);
4693 if (ic->commit_wq)
4694 destroy_workqueue(ic->commit_wq);
4695 if (ic->writer_wq)
4696 destroy_workqueue(ic->writer_wq);
4697 if (ic->recalc_wq)
4698 destroy_workqueue(ic->recalc_wq);
4699 kvfree(ic->bbs);
4700 if (ic->bufio)
4701 dm_bufio_client_destroy(ic->bufio);
4702 mempool_exit(&ic->recheck_pool);
4703 mempool_exit(&ic->journal_io_mempool);
4704 if (ic->io)
4705 dm_io_client_destroy(ic->io);
4706 if (ic->dev)
4707 dm_put_device(ti, ic->dev);
4708 if (ic->meta_dev)
4709 dm_put_device(ti, ic->meta_dev);
4710 dm_integrity_free_page_list(ic->journal);
4711 dm_integrity_free_page_list(ic->journal_io);
4712 dm_integrity_free_page_list(ic->journal_xor);
4713 dm_integrity_free_page_list(ic->recalc_bitmap);
4714 dm_integrity_free_page_list(ic->may_write_bitmap);
4715 if (ic->journal_scatterlist)
4716 dm_integrity_free_journal_scatterlist(ic, ic->journal_scatterlist);
4717 if (ic->journal_io_scatterlist)
4718 dm_integrity_free_journal_scatterlist(ic, ic->journal_io_scatterlist);
4719 if (ic->sk_requests) {
4720 unsigned int i;
4721
4722 for (i = 0; i < ic->journal_sections; i++) {
4723 struct skcipher_request *req;
4724
4725 req = ic->sk_requests[i];
4726 if (req) {
4727 kfree_sensitive(req->iv);
4728 skcipher_request_free(req);
4729 }
4730 }
4731 kvfree(ic->sk_requests);
4732 }
4733 kvfree(ic->journal_tree);
4734 if (ic->sb)
4735 free_pages_exact(ic->sb, SB_SECTORS << SECTOR_SHIFT);
4736
4737 if (ic->internal_hash)
4738 crypto_free_shash(ic->internal_hash);
4739 free_alg(&ic->internal_hash_alg);
4740
4741 if (ic->journal_crypt)
4742 crypto_free_skcipher(ic->journal_crypt);
4743 free_alg(&ic->journal_crypt_alg);
4744
4745 if (ic->journal_mac)
4746 crypto_free_shash(ic->journal_mac);
4747 free_alg(&ic->journal_mac_alg);
4748
4749 kfree(ic);
4750 dm_audit_log_dtr(DM_MSG_PREFIX, ti, 1);
4751 }
4752
4753 static struct target_type integrity_target = {
4754 .name = "integrity",
4755 .version = {1, 10, 0},
4756 .module = THIS_MODULE,
4757 .features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
4758 .ctr = dm_integrity_ctr,
4759 .dtr = dm_integrity_dtr,
4760 .map = dm_integrity_map,
4761 .postsuspend = dm_integrity_postsuspend,
4762 .resume = dm_integrity_resume,
4763 .status = dm_integrity_status,
4764 .iterate_devices = dm_integrity_iterate_devices,
4765 .io_hints = dm_integrity_io_hints,
4766 };
4767
dm_integrity_init(void)4768 static int __init dm_integrity_init(void)
4769 {
4770 int r;
4771
4772 journal_io_cache = kmem_cache_create("integrity_journal_io",
4773 sizeof(struct journal_io), 0, 0, NULL);
4774 if (!journal_io_cache) {
4775 DMERR("can't allocate journal io cache");
4776 return -ENOMEM;
4777 }
4778
4779 r = dm_register_target(&integrity_target);
4780 if (r < 0) {
4781 kmem_cache_destroy(journal_io_cache);
4782 return r;
4783 }
4784
4785 return 0;
4786 }
4787
dm_integrity_exit(void)4788 static void __exit dm_integrity_exit(void)
4789 {
4790 dm_unregister_target(&integrity_target);
4791 kmem_cache_destroy(journal_io_cache);
4792 }
4793
4794 module_init(dm_integrity_init);
4795 module_exit(dm_integrity_exit);
4796
4797 MODULE_AUTHOR("Milan Broz");
4798 MODULE_AUTHOR("Mikulas Patocka");
4799 MODULE_DESCRIPTION(DM_NAME " target for integrity tags extension");
4800 MODULE_LICENSE("GPL");
4801