1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2016-2017 Red Hat, Inc. All rights reserved. 4 * Copyright (C) 2016-2017 Milan Broz 5 * Copyright (C) 2016-2017 Mikulas Patocka 6 * 7 * This file is released under the GPL. 8 */ 9 10 #include "dm-bio-record.h" 11 12 #include <linux/compiler.h> 13 #include <linux/module.h> 14 #include <linux/device-mapper.h> 15 #include <linux/dm-io.h> 16 #include <linux/vmalloc.h> 17 #include <linux/sort.h> 18 #include <linux/rbtree.h> 19 #include <linux/delay.h> 20 #include <linux/random.h> 21 #include <linux/reboot.h> 22 #include <crypto/hash.h> 23 #include <crypto/skcipher.h> 24 #include <linux/async_tx.h> 25 #include <linux/dm-bufio.h> 26 27 #include "dm-audit.h" 28 29 #define DM_MSG_PREFIX "integrity" 30 31 #define DEFAULT_INTERLEAVE_SECTORS 32768 32 #define DEFAULT_JOURNAL_SIZE_FACTOR 7 33 #define DEFAULT_SECTORS_PER_BITMAP_BIT 32768 34 #define DEFAULT_BUFFER_SECTORS 128 35 #define DEFAULT_JOURNAL_WATERMARK 50 36 #define DEFAULT_SYNC_MSEC 10000 37 #define DEFAULT_MAX_JOURNAL_SECTORS (IS_ENABLED(CONFIG_64BIT) ? 131072 : 8192) 38 #define MIN_LOG2_INTERLEAVE_SECTORS 3 39 #define MAX_LOG2_INTERLEAVE_SECTORS 31 40 #define METADATA_WORKQUEUE_MAX_ACTIVE 16 41 #define RECALC_SECTORS (IS_ENABLED(CONFIG_64BIT) ? 32768 : 2048) 42 #define RECALC_WRITE_SUPER 16 43 #define BITMAP_BLOCK_SIZE 4096 /* don't change it */ 44 #define BITMAP_FLUSH_INTERVAL (10 * HZ) 45 #define DISCARD_FILLER 0xf6 46 #define SALT_SIZE 16 47 48 /* 49 * Warning - DEBUG_PRINT prints security-sensitive data to the log, 50 * so it should not be enabled in the official kernel 51 */ 52 //#define DEBUG_PRINT 53 //#define INTERNAL_VERIFY 54 55 /* 56 * On disk structures 57 */ 58 59 #define SB_MAGIC "integrt" 60 #define SB_VERSION_1 1 61 #define SB_VERSION_2 2 62 #define SB_VERSION_3 3 63 #define SB_VERSION_4 4 64 #define SB_VERSION_5 5 65 #define SB_SECTORS 8 66 #define MAX_SECTORS_PER_BLOCK 8 67 68 struct superblock { 69 __u8 magic[8]; 70 __u8 version; 71 __u8 log2_interleave_sectors; 72 __le16 integrity_tag_size; 73 __le32 journal_sections; 74 __le64 provided_data_sectors; /* userspace uses this value */ 75 __le32 flags; 76 __u8 log2_sectors_per_block; 77 __u8 log2_blocks_per_bitmap_bit; 78 __u8 pad[2]; 79 __le64 recalc_sector; 80 __u8 pad2[8]; 81 __u8 salt[SALT_SIZE]; 82 }; 83 84 #define SB_FLAG_HAVE_JOURNAL_MAC 0x1 85 #define SB_FLAG_RECALCULATING 0x2 86 #define SB_FLAG_DIRTY_BITMAP 0x4 87 #define SB_FLAG_FIXED_PADDING 0x8 88 #define SB_FLAG_FIXED_HMAC 0x10 89 90 #define JOURNAL_ENTRY_ROUNDUP 8 91 92 typedef __le64 commit_id_t; 93 #define JOURNAL_MAC_PER_SECTOR 8 94 95 struct journal_entry { 96 union { 97 struct { 98 __le32 sector_lo; 99 __le32 sector_hi; 100 } s; 101 __le64 sector; 102 } u; 103 commit_id_t last_bytes[]; 104 /* __u8 tag[0]; */ 105 }; 106 107 #define journal_entry_tag(ic, je) ((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block]) 108 109 #if BITS_PER_LONG == 64 110 #define journal_entry_set_sector(je, x) do { smp_wmb(); WRITE_ONCE((je)->u.sector, cpu_to_le64(x)); } while (0) 111 #else 112 #define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0) 113 #endif 114 #define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector) 115 #define journal_entry_is_unused(je) ((je)->u.s.sector_hi == cpu_to_le32(-1)) 116 #define journal_entry_set_unused(je) ((je)->u.s.sector_hi = cpu_to_le32(-1)) 117 #define journal_entry_is_inprogress(je) ((je)->u.s.sector_hi == cpu_to_le32(-2)) 118 #define journal_entry_set_inprogress(je) ((je)->u.s.sector_hi = cpu_to_le32(-2)) 119 120 #define JOURNAL_BLOCK_SECTORS 8 121 #define JOURNAL_SECTOR_DATA ((1 << SECTOR_SHIFT) - sizeof(commit_id_t)) 122 #define JOURNAL_MAC_SIZE (JOURNAL_MAC_PER_SECTOR * JOURNAL_BLOCK_SECTORS) 123 124 struct journal_sector { 125 struct_group(sectors, 126 __u8 entries[JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR]; 127 __u8 mac[JOURNAL_MAC_PER_SECTOR]; 128 ); 129 commit_id_t commit_id; 130 }; 131 132 #define MAX_TAG_SIZE (JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, last_bytes[MAX_SECTORS_PER_BLOCK])) 133 134 #define METADATA_PADDING_SECTORS 8 135 136 #define N_COMMIT_IDS 4 137 138 static unsigned char prev_commit_seq(unsigned char seq) 139 { 140 return (seq + N_COMMIT_IDS - 1) % N_COMMIT_IDS; 141 } 142 143 static unsigned char next_commit_seq(unsigned char seq) 144 { 145 return (seq + 1) % N_COMMIT_IDS; 146 } 147 148 /* 149 * In-memory structures 150 */ 151 152 struct journal_node { 153 struct rb_node node; 154 sector_t sector; 155 }; 156 157 struct alg_spec { 158 char *alg_string; 159 char *key_string; 160 __u8 *key; 161 unsigned int key_size; 162 }; 163 164 struct dm_integrity_c { 165 struct dm_dev *dev; 166 struct dm_dev *meta_dev; 167 unsigned int tag_size; 168 __s8 log2_tag_size; 169 sector_t start; 170 mempool_t journal_io_mempool; 171 struct dm_io_client *io; 172 struct dm_bufio_client *bufio; 173 struct workqueue_struct *metadata_wq; 174 struct superblock *sb; 175 unsigned int journal_pages; 176 unsigned int n_bitmap_blocks; 177 178 struct page_list *journal; 179 struct page_list *journal_io; 180 struct page_list *journal_xor; 181 struct page_list *recalc_bitmap; 182 struct page_list *may_write_bitmap; 183 struct bitmap_block_status *bbs; 184 unsigned int bitmap_flush_interval; 185 int synchronous_mode; 186 struct bio_list synchronous_bios; 187 struct delayed_work bitmap_flush_work; 188 189 struct crypto_skcipher *journal_crypt; 190 struct scatterlist **journal_scatterlist; 191 struct scatterlist **journal_io_scatterlist; 192 struct skcipher_request **sk_requests; 193 194 struct crypto_shash *journal_mac; 195 196 struct journal_node *journal_tree; 197 struct rb_root journal_tree_root; 198 199 sector_t provided_data_sectors; 200 201 unsigned short journal_entry_size; 202 unsigned char journal_entries_per_sector; 203 unsigned char journal_section_entries; 204 unsigned short journal_section_sectors; 205 unsigned int journal_sections; 206 unsigned int journal_entries; 207 sector_t data_device_sectors; 208 sector_t meta_device_sectors; 209 unsigned int initial_sectors; 210 unsigned int metadata_run; 211 __s8 log2_metadata_run; 212 __u8 log2_buffer_sectors; 213 __u8 sectors_per_block; 214 __u8 log2_blocks_per_bitmap_bit; 215 216 unsigned char mode; 217 218 int failed; 219 220 struct crypto_shash *internal_hash; 221 222 struct dm_target *ti; 223 224 /* these variables are locked with endio_wait.lock */ 225 struct rb_root in_progress; 226 struct list_head wait_list; 227 wait_queue_head_t endio_wait; 228 struct workqueue_struct *wait_wq; 229 struct workqueue_struct *offload_wq; 230 231 unsigned char commit_seq; 232 commit_id_t commit_ids[N_COMMIT_IDS]; 233 234 unsigned int committed_section; 235 unsigned int n_committed_sections; 236 237 unsigned int uncommitted_section; 238 unsigned int n_uncommitted_sections; 239 240 unsigned int free_section; 241 unsigned char free_section_entry; 242 unsigned int free_sectors; 243 244 unsigned int free_sectors_threshold; 245 246 struct workqueue_struct *commit_wq; 247 struct work_struct commit_work; 248 249 struct workqueue_struct *writer_wq; 250 struct work_struct writer_work; 251 252 struct workqueue_struct *recalc_wq; 253 struct work_struct recalc_work; 254 255 struct bio_list flush_bio_list; 256 257 unsigned long autocommit_jiffies; 258 struct timer_list autocommit_timer; 259 unsigned int autocommit_msec; 260 261 wait_queue_head_t copy_to_journal_wait; 262 263 struct completion crypto_backoff; 264 265 bool wrote_to_journal; 266 bool journal_uptodate; 267 bool just_formatted; 268 bool recalculate_flag; 269 bool reset_recalculate_flag; 270 bool discard; 271 bool fix_padding; 272 bool fix_hmac; 273 bool legacy_recalculate; 274 275 struct alg_spec internal_hash_alg; 276 struct alg_spec journal_crypt_alg; 277 struct alg_spec journal_mac_alg; 278 279 atomic64_t number_of_mismatches; 280 281 struct notifier_block reboot_notifier; 282 }; 283 284 struct dm_integrity_range { 285 sector_t logical_sector; 286 sector_t n_sectors; 287 bool waiting; 288 union { 289 struct rb_node node; 290 struct { 291 struct task_struct *task; 292 struct list_head wait_entry; 293 }; 294 }; 295 }; 296 297 struct dm_integrity_io { 298 struct work_struct work; 299 300 struct dm_integrity_c *ic; 301 enum req_op op; 302 bool fua; 303 304 struct dm_integrity_range range; 305 306 sector_t metadata_block; 307 unsigned int metadata_offset; 308 309 atomic_t in_flight; 310 blk_status_t bi_status; 311 312 struct completion *completion; 313 314 struct dm_bio_details bio_details; 315 }; 316 317 struct journal_completion { 318 struct dm_integrity_c *ic; 319 atomic_t in_flight; 320 struct completion comp; 321 }; 322 323 struct journal_io { 324 struct dm_integrity_range range; 325 struct journal_completion *comp; 326 }; 327 328 struct bitmap_block_status { 329 struct work_struct work; 330 struct dm_integrity_c *ic; 331 unsigned int idx; 332 unsigned long *bitmap; 333 struct bio_list bio_queue; 334 spinlock_t bio_queue_lock; 335 336 }; 337 338 static struct kmem_cache *journal_io_cache; 339 340 #define JOURNAL_IO_MEMPOOL 32 341 342 #ifdef DEBUG_PRINT 343 #define DEBUG_print(x, ...) printk(KERN_DEBUG x, ##__VA_ARGS__) 344 #define DEBUG_bytes(bytes, len, msg, ...) printk(KERN_DEBUG msg "%s%*ph\n", ##__VA_ARGS__, \ 345 len ? ": " : "", len, bytes) 346 #else 347 #define DEBUG_print(x, ...) do { } while (0) 348 #define DEBUG_bytes(bytes, len, msg, ...) do { } while (0) 349 #endif 350 351 static void dm_integrity_prepare(struct request *rq) 352 { 353 } 354 355 static void dm_integrity_complete(struct request *rq, unsigned int nr_bytes) 356 { 357 } 358 359 /* 360 * DM Integrity profile, protection is performed layer above (dm-crypt) 361 */ 362 static const struct blk_integrity_profile dm_integrity_profile = { 363 .name = "DM-DIF-EXT-TAG", 364 .generate_fn = NULL, 365 .verify_fn = NULL, 366 .prepare_fn = dm_integrity_prepare, 367 .complete_fn = dm_integrity_complete, 368 }; 369 370 static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map); 371 static void integrity_bio_wait(struct work_struct *w); 372 static void dm_integrity_dtr(struct dm_target *ti); 373 374 static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err) 375 { 376 if (err == -EILSEQ) 377 atomic64_inc(&ic->number_of_mismatches); 378 if (!cmpxchg(&ic->failed, 0, err)) 379 DMERR("Error on %s: %d", msg, err); 380 } 381 382 static int dm_integrity_failed(struct dm_integrity_c *ic) 383 { 384 return READ_ONCE(ic->failed); 385 } 386 387 static bool dm_integrity_disable_recalculate(struct dm_integrity_c *ic) 388 { 389 if (ic->legacy_recalculate) 390 return false; 391 if (!(ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) ? 392 ic->internal_hash_alg.key || ic->journal_mac_alg.key : 393 ic->internal_hash_alg.key && !ic->journal_mac_alg.key) 394 return true; 395 return false; 396 } 397 398 static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned int i, 399 unsigned int j, unsigned char seq) 400 { 401 /* 402 * Xor the number with section and sector, so that if a piece of 403 * journal is written at wrong place, it is detected. 404 */ 405 return ic->commit_ids[seq] ^ cpu_to_le64(((__u64)i << 32) ^ j); 406 } 407 408 static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector, 409 sector_t *area, sector_t *offset) 410 { 411 if (!ic->meta_dev) { 412 __u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors; 413 *area = data_sector >> log2_interleave_sectors; 414 *offset = (unsigned int)data_sector & ((1U << log2_interleave_sectors) - 1); 415 } else { 416 *area = 0; 417 *offset = data_sector; 418 } 419 } 420 421 #define sector_to_block(ic, n) \ 422 do { \ 423 BUG_ON((n) & (unsigned int)((ic)->sectors_per_block - 1)); \ 424 (n) >>= (ic)->sb->log2_sectors_per_block; \ 425 } while (0) 426 427 static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area, 428 sector_t offset, unsigned int *metadata_offset) 429 { 430 __u64 ms; 431 unsigned int mo; 432 433 ms = area << ic->sb->log2_interleave_sectors; 434 if (likely(ic->log2_metadata_run >= 0)) 435 ms += area << ic->log2_metadata_run; 436 else 437 ms += area * ic->metadata_run; 438 ms >>= ic->log2_buffer_sectors; 439 440 sector_to_block(ic, offset); 441 442 if (likely(ic->log2_tag_size >= 0)) { 443 ms += offset >> (SECTOR_SHIFT + ic->log2_buffer_sectors - ic->log2_tag_size); 444 mo = (offset << ic->log2_tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1); 445 } else { 446 ms += (__u64)offset * ic->tag_size >> (SECTOR_SHIFT + ic->log2_buffer_sectors); 447 mo = (offset * ic->tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1); 448 } 449 *metadata_offset = mo; 450 return ms; 451 } 452 453 static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector_t offset) 454 { 455 sector_t result; 456 457 if (ic->meta_dev) 458 return offset; 459 460 result = area << ic->sb->log2_interleave_sectors; 461 if (likely(ic->log2_metadata_run >= 0)) 462 result += (area + 1) << ic->log2_metadata_run; 463 else 464 result += (area + 1) * ic->metadata_run; 465 466 result += (sector_t)ic->initial_sectors + offset; 467 result += ic->start; 468 469 return result; 470 } 471 472 static void wraparound_section(struct dm_integrity_c *ic, unsigned int *sec_ptr) 473 { 474 if (unlikely(*sec_ptr >= ic->journal_sections)) 475 *sec_ptr -= ic->journal_sections; 476 } 477 478 static void sb_set_version(struct dm_integrity_c *ic) 479 { 480 if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) 481 ic->sb->version = SB_VERSION_5; 482 else if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) 483 ic->sb->version = SB_VERSION_4; 484 else if (ic->mode == 'B' || ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) 485 ic->sb->version = SB_VERSION_3; 486 else if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) 487 ic->sb->version = SB_VERSION_2; 488 else 489 ic->sb->version = SB_VERSION_1; 490 } 491 492 static int sb_mac(struct dm_integrity_c *ic, bool wr) 493 { 494 SHASH_DESC_ON_STACK(desc, ic->journal_mac); 495 int r; 496 unsigned int size = crypto_shash_digestsize(ic->journal_mac); 497 498 if (sizeof(struct superblock) + size > 1 << SECTOR_SHIFT) { 499 dm_integrity_io_error(ic, "digest is too long", -EINVAL); 500 return -EINVAL; 501 } 502 503 desc->tfm = ic->journal_mac; 504 505 r = crypto_shash_init(desc); 506 if (unlikely(r < 0)) { 507 dm_integrity_io_error(ic, "crypto_shash_init", r); 508 return r; 509 } 510 511 r = crypto_shash_update(desc, (__u8 *)ic->sb, (1 << SECTOR_SHIFT) - size); 512 if (unlikely(r < 0)) { 513 dm_integrity_io_error(ic, "crypto_shash_update", r); 514 return r; 515 } 516 517 if (likely(wr)) { 518 r = crypto_shash_final(desc, (__u8 *)ic->sb + (1 << SECTOR_SHIFT) - size); 519 if (unlikely(r < 0)) { 520 dm_integrity_io_error(ic, "crypto_shash_final", r); 521 return r; 522 } 523 } else { 524 __u8 result[HASH_MAX_DIGESTSIZE]; 525 526 r = crypto_shash_final(desc, result); 527 if (unlikely(r < 0)) { 528 dm_integrity_io_error(ic, "crypto_shash_final", r); 529 return r; 530 } 531 if (memcmp((__u8 *)ic->sb + (1 << SECTOR_SHIFT) - size, result, size)) { 532 dm_integrity_io_error(ic, "superblock mac", -EILSEQ); 533 dm_audit_log_target(DM_MSG_PREFIX, "mac-superblock", ic->ti, 0); 534 return -EILSEQ; 535 } 536 } 537 538 return 0; 539 } 540 541 static int sync_rw_sb(struct dm_integrity_c *ic, blk_opf_t opf) 542 { 543 struct dm_io_request io_req; 544 struct dm_io_region io_loc; 545 const enum req_op op = opf & REQ_OP_MASK; 546 int r; 547 548 io_req.bi_opf = opf; 549 io_req.mem.type = DM_IO_KMEM; 550 io_req.mem.ptr.addr = ic->sb; 551 io_req.notify.fn = NULL; 552 io_req.client = ic->io; 553 io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev; 554 io_loc.sector = ic->start; 555 io_loc.count = SB_SECTORS; 556 557 if (op == REQ_OP_WRITE) { 558 sb_set_version(ic); 559 if (ic->journal_mac && ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) { 560 r = sb_mac(ic, true); 561 if (unlikely(r)) 562 return r; 563 } 564 } 565 566 r = dm_io(&io_req, 1, &io_loc, NULL); 567 if (unlikely(r)) 568 return r; 569 570 if (op == REQ_OP_READ) { 571 if (ic->mode != 'R' && ic->journal_mac && ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) { 572 r = sb_mac(ic, false); 573 if (unlikely(r)) 574 return r; 575 } 576 } 577 578 return 0; 579 } 580 581 #define BITMAP_OP_TEST_ALL_SET 0 582 #define BITMAP_OP_TEST_ALL_CLEAR 1 583 #define BITMAP_OP_SET 2 584 #define BITMAP_OP_CLEAR 3 585 586 static bool block_bitmap_op(struct dm_integrity_c *ic, struct page_list *bitmap, 587 sector_t sector, sector_t n_sectors, int mode) 588 { 589 unsigned long bit, end_bit, this_end_bit, page, end_page; 590 unsigned long *data; 591 592 if (unlikely(((sector | n_sectors) & ((1 << ic->sb->log2_sectors_per_block) - 1)) != 0)) { 593 DMCRIT("invalid bitmap access (%llx,%llx,%d,%d,%d)", 594 sector, 595 n_sectors, 596 ic->sb->log2_sectors_per_block, 597 ic->log2_blocks_per_bitmap_bit, 598 mode); 599 BUG(); 600 } 601 602 if (unlikely(!n_sectors)) 603 return true; 604 605 bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); 606 end_bit = (sector + n_sectors - 1) >> 607 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); 608 609 page = bit / (PAGE_SIZE * 8); 610 bit %= PAGE_SIZE * 8; 611 612 end_page = end_bit / (PAGE_SIZE * 8); 613 end_bit %= PAGE_SIZE * 8; 614 615 repeat: 616 if (page < end_page) 617 this_end_bit = PAGE_SIZE * 8 - 1; 618 else 619 this_end_bit = end_bit; 620 621 data = lowmem_page_address(bitmap[page].page); 622 623 if (mode == BITMAP_OP_TEST_ALL_SET) { 624 while (bit <= this_end_bit) { 625 if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) { 626 do { 627 if (data[bit / BITS_PER_LONG] != -1) 628 return false; 629 bit += BITS_PER_LONG; 630 } while (this_end_bit >= bit + BITS_PER_LONG - 1); 631 continue; 632 } 633 if (!test_bit(bit, data)) 634 return false; 635 bit++; 636 } 637 } else if (mode == BITMAP_OP_TEST_ALL_CLEAR) { 638 while (bit <= this_end_bit) { 639 if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) { 640 do { 641 if (data[bit / BITS_PER_LONG] != 0) 642 return false; 643 bit += BITS_PER_LONG; 644 } while (this_end_bit >= bit + BITS_PER_LONG - 1); 645 continue; 646 } 647 if (test_bit(bit, data)) 648 return false; 649 bit++; 650 } 651 } else if (mode == BITMAP_OP_SET) { 652 while (bit <= this_end_bit) { 653 if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) { 654 do { 655 data[bit / BITS_PER_LONG] = -1; 656 bit += BITS_PER_LONG; 657 } while (this_end_bit >= bit + BITS_PER_LONG - 1); 658 continue; 659 } 660 __set_bit(bit, data); 661 bit++; 662 } 663 } else if (mode == BITMAP_OP_CLEAR) { 664 if (!bit && this_end_bit == PAGE_SIZE * 8 - 1) 665 clear_page(data); 666 else { 667 while (bit <= this_end_bit) { 668 if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) { 669 do { 670 data[bit / BITS_PER_LONG] = 0; 671 bit += BITS_PER_LONG; 672 } while (this_end_bit >= bit + BITS_PER_LONG - 1); 673 continue; 674 } 675 __clear_bit(bit, data); 676 bit++; 677 } 678 } 679 } else { 680 BUG(); 681 } 682 683 if (unlikely(page < end_page)) { 684 bit = 0; 685 page++; 686 goto repeat; 687 } 688 689 return true; 690 } 691 692 static void block_bitmap_copy(struct dm_integrity_c *ic, struct page_list *dst, struct page_list *src) 693 { 694 unsigned int n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE); 695 unsigned int i; 696 697 for (i = 0; i < n_bitmap_pages; i++) { 698 unsigned long *dst_data = lowmem_page_address(dst[i].page); 699 unsigned long *src_data = lowmem_page_address(src[i].page); 700 701 copy_page(dst_data, src_data); 702 } 703 } 704 705 static struct bitmap_block_status *sector_to_bitmap_block(struct dm_integrity_c *ic, sector_t sector) 706 { 707 unsigned int bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); 708 unsigned int bitmap_block = bit / (BITMAP_BLOCK_SIZE * 8); 709 710 BUG_ON(bitmap_block >= ic->n_bitmap_blocks); 711 return &ic->bbs[bitmap_block]; 712 } 713 714 static void access_journal_check(struct dm_integrity_c *ic, unsigned int section, unsigned int offset, 715 bool e, const char *function) 716 { 717 #if defined(CONFIG_DM_DEBUG) || defined(INTERNAL_VERIFY) 718 unsigned int limit = e ? ic->journal_section_entries : ic->journal_section_sectors; 719 720 if (unlikely(section >= ic->journal_sections) || 721 unlikely(offset >= limit)) { 722 DMCRIT("%s: invalid access at (%u,%u), limit (%u,%u)", 723 function, section, offset, ic->journal_sections, limit); 724 BUG(); 725 } 726 #endif 727 } 728 729 static void page_list_location(struct dm_integrity_c *ic, unsigned int section, unsigned int offset, 730 unsigned int *pl_index, unsigned int *pl_offset) 731 { 732 unsigned int sector; 733 734 access_journal_check(ic, section, offset, false, "page_list_location"); 735 736 sector = section * ic->journal_section_sectors + offset; 737 738 *pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT); 739 *pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1); 740 } 741 742 static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl, 743 unsigned int section, unsigned int offset, unsigned int *n_sectors) 744 { 745 unsigned int pl_index, pl_offset; 746 char *va; 747 748 page_list_location(ic, section, offset, &pl_index, &pl_offset); 749 750 if (n_sectors) 751 *n_sectors = (PAGE_SIZE - pl_offset) >> SECTOR_SHIFT; 752 753 va = lowmem_page_address(pl[pl_index].page); 754 755 return (struct journal_sector *)(va + pl_offset); 756 } 757 758 static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned int section, unsigned int offset) 759 { 760 return access_page_list(ic, ic->journal, section, offset, NULL); 761 } 762 763 static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned int section, unsigned int n) 764 { 765 unsigned int rel_sector, offset; 766 struct journal_sector *js; 767 768 access_journal_check(ic, section, n, true, "access_journal_entry"); 769 770 rel_sector = n % JOURNAL_BLOCK_SECTORS; 771 offset = n / JOURNAL_BLOCK_SECTORS; 772 773 js = access_journal(ic, section, rel_sector); 774 return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size); 775 } 776 777 static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned int section, unsigned int n) 778 { 779 n <<= ic->sb->log2_sectors_per_block; 780 781 n += JOURNAL_BLOCK_SECTORS; 782 783 access_journal_check(ic, section, n, false, "access_journal_data"); 784 785 return access_journal(ic, section, n); 786 } 787 788 static void section_mac(struct dm_integrity_c *ic, unsigned int section, __u8 result[JOURNAL_MAC_SIZE]) 789 { 790 SHASH_DESC_ON_STACK(desc, ic->journal_mac); 791 int r; 792 unsigned int j, size; 793 794 desc->tfm = ic->journal_mac; 795 796 r = crypto_shash_init(desc); 797 if (unlikely(r < 0)) { 798 dm_integrity_io_error(ic, "crypto_shash_init", r); 799 goto err; 800 } 801 802 if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) { 803 __le64 section_le; 804 805 r = crypto_shash_update(desc, (__u8 *)&ic->sb->salt, SALT_SIZE); 806 if (unlikely(r < 0)) { 807 dm_integrity_io_error(ic, "crypto_shash_update", r); 808 goto err; 809 } 810 811 section_le = cpu_to_le64(section); 812 r = crypto_shash_update(desc, (__u8 *)§ion_le, sizeof(section_le)); 813 if (unlikely(r < 0)) { 814 dm_integrity_io_error(ic, "crypto_shash_update", r); 815 goto err; 816 } 817 } 818 819 for (j = 0; j < ic->journal_section_entries; j++) { 820 struct journal_entry *je = access_journal_entry(ic, section, j); 821 822 r = crypto_shash_update(desc, (__u8 *)&je->u.sector, sizeof(je->u.sector)); 823 if (unlikely(r < 0)) { 824 dm_integrity_io_error(ic, "crypto_shash_update", r); 825 goto err; 826 } 827 } 828 829 size = crypto_shash_digestsize(ic->journal_mac); 830 831 if (likely(size <= JOURNAL_MAC_SIZE)) { 832 r = crypto_shash_final(desc, result); 833 if (unlikely(r < 0)) { 834 dm_integrity_io_error(ic, "crypto_shash_final", r); 835 goto err; 836 } 837 memset(result + size, 0, JOURNAL_MAC_SIZE - size); 838 } else { 839 __u8 digest[HASH_MAX_DIGESTSIZE]; 840 841 if (WARN_ON(size > sizeof(digest))) { 842 dm_integrity_io_error(ic, "digest_size", -EINVAL); 843 goto err; 844 } 845 r = crypto_shash_final(desc, digest); 846 if (unlikely(r < 0)) { 847 dm_integrity_io_error(ic, "crypto_shash_final", r); 848 goto err; 849 } 850 memcpy(result, digest, JOURNAL_MAC_SIZE); 851 } 852 853 return; 854 err: 855 memset(result, 0, JOURNAL_MAC_SIZE); 856 } 857 858 static void rw_section_mac(struct dm_integrity_c *ic, unsigned int section, bool wr) 859 { 860 __u8 result[JOURNAL_MAC_SIZE]; 861 unsigned int j; 862 863 if (!ic->journal_mac) 864 return; 865 866 section_mac(ic, section, result); 867 868 for (j = 0; j < JOURNAL_BLOCK_SECTORS; j++) { 869 struct journal_sector *js = access_journal(ic, section, j); 870 871 if (likely(wr)) 872 memcpy(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR); 873 else { 874 if (memcmp(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR)) { 875 dm_integrity_io_error(ic, "journal mac", -EILSEQ); 876 dm_audit_log_target(DM_MSG_PREFIX, "mac-journal", ic->ti, 0); 877 } 878 } 879 } 880 } 881 882 static void complete_journal_op(void *context) 883 { 884 struct journal_completion *comp = context; 885 886 BUG_ON(!atomic_read(&comp->in_flight)); 887 if (likely(atomic_dec_and_test(&comp->in_flight))) 888 complete(&comp->comp); 889 } 890 891 static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned int section, 892 unsigned int n_sections, struct journal_completion *comp) 893 { 894 struct async_submit_ctl submit; 895 size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT; 896 unsigned int pl_index, pl_offset, section_index; 897 struct page_list *source_pl, *target_pl; 898 899 if (likely(encrypt)) { 900 source_pl = ic->journal; 901 target_pl = ic->journal_io; 902 } else { 903 source_pl = ic->journal_io; 904 target_pl = ic->journal; 905 } 906 907 page_list_location(ic, section, 0, &pl_index, &pl_offset); 908 909 atomic_add(roundup(pl_offset + n_bytes, PAGE_SIZE) >> PAGE_SHIFT, &comp->in_flight); 910 911 init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, complete_journal_op, comp, NULL); 912 913 section_index = pl_index; 914 915 do { 916 size_t this_step; 917 struct page *src_pages[2]; 918 struct page *dst_page; 919 920 while (unlikely(pl_index == section_index)) { 921 unsigned int dummy; 922 923 if (likely(encrypt)) 924 rw_section_mac(ic, section, true); 925 section++; 926 n_sections--; 927 if (!n_sections) 928 break; 929 page_list_location(ic, section, 0, §ion_index, &dummy); 930 } 931 932 this_step = min(n_bytes, (size_t)PAGE_SIZE - pl_offset); 933 dst_page = target_pl[pl_index].page; 934 src_pages[0] = source_pl[pl_index].page; 935 src_pages[1] = ic->journal_xor[pl_index].page; 936 937 async_xor(dst_page, src_pages, pl_offset, 2, this_step, &submit); 938 939 pl_index++; 940 pl_offset = 0; 941 n_bytes -= this_step; 942 } while (n_bytes); 943 944 BUG_ON(n_sections); 945 946 async_tx_issue_pending_all(); 947 } 948 949 static void complete_journal_encrypt(void *data, int err) 950 { 951 struct journal_completion *comp = data; 952 953 if (unlikely(err)) { 954 if (likely(err == -EINPROGRESS)) { 955 complete(&comp->ic->crypto_backoff); 956 return; 957 } 958 dm_integrity_io_error(comp->ic, "asynchronous encrypt", err); 959 } 960 complete_journal_op(comp); 961 } 962 963 static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp) 964 { 965 int r; 966 967 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 968 complete_journal_encrypt, comp); 969 if (likely(encrypt)) 970 r = crypto_skcipher_encrypt(req); 971 else 972 r = crypto_skcipher_decrypt(req); 973 if (likely(!r)) 974 return false; 975 if (likely(r == -EINPROGRESS)) 976 return true; 977 if (likely(r == -EBUSY)) { 978 wait_for_completion(&comp->ic->crypto_backoff); 979 reinit_completion(&comp->ic->crypto_backoff); 980 return true; 981 } 982 dm_integrity_io_error(comp->ic, "encrypt", r); 983 return false; 984 } 985 986 static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned int section, 987 unsigned int n_sections, struct journal_completion *comp) 988 { 989 struct scatterlist **source_sg; 990 struct scatterlist **target_sg; 991 992 atomic_add(2, &comp->in_flight); 993 994 if (likely(encrypt)) { 995 source_sg = ic->journal_scatterlist; 996 target_sg = ic->journal_io_scatterlist; 997 } else { 998 source_sg = ic->journal_io_scatterlist; 999 target_sg = ic->journal_scatterlist; 1000 } 1001 1002 do { 1003 struct skcipher_request *req; 1004 unsigned int ivsize; 1005 char *iv; 1006 1007 if (likely(encrypt)) 1008 rw_section_mac(ic, section, true); 1009 1010 req = ic->sk_requests[section]; 1011 ivsize = crypto_skcipher_ivsize(ic->journal_crypt); 1012 iv = req->iv; 1013 1014 memcpy(iv, iv + ivsize, ivsize); 1015 1016 req->src = source_sg[section]; 1017 req->dst = target_sg[section]; 1018 1019 if (unlikely(do_crypt(encrypt, req, comp))) 1020 atomic_inc(&comp->in_flight); 1021 1022 section++; 1023 n_sections--; 1024 } while (n_sections); 1025 1026 atomic_dec(&comp->in_flight); 1027 complete_journal_op(comp); 1028 } 1029 1030 static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned int section, 1031 unsigned int n_sections, struct journal_completion *comp) 1032 { 1033 if (ic->journal_xor) 1034 return xor_journal(ic, encrypt, section, n_sections, comp); 1035 else 1036 return crypt_journal(ic, encrypt, section, n_sections, comp); 1037 } 1038 1039 static void complete_journal_io(unsigned long error, void *context) 1040 { 1041 struct journal_completion *comp = context; 1042 1043 if (unlikely(error != 0)) 1044 dm_integrity_io_error(comp->ic, "writing journal", -EIO); 1045 complete_journal_op(comp); 1046 } 1047 1048 static void rw_journal_sectors(struct dm_integrity_c *ic, blk_opf_t opf, 1049 unsigned int sector, unsigned int n_sectors, 1050 struct journal_completion *comp) 1051 { 1052 struct dm_io_request io_req; 1053 struct dm_io_region io_loc; 1054 unsigned int pl_index, pl_offset; 1055 int r; 1056 1057 if (unlikely(dm_integrity_failed(ic))) { 1058 if (comp) 1059 complete_journal_io(-1UL, comp); 1060 return; 1061 } 1062 1063 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT); 1064 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1); 1065 1066 io_req.bi_opf = opf; 1067 io_req.mem.type = DM_IO_PAGE_LIST; 1068 if (ic->journal_io) 1069 io_req.mem.ptr.pl = &ic->journal_io[pl_index]; 1070 else 1071 io_req.mem.ptr.pl = &ic->journal[pl_index]; 1072 io_req.mem.offset = pl_offset; 1073 if (likely(comp != NULL)) { 1074 io_req.notify.fn = complete_journal_io; 1075 io_req.notify.context = comp; 1076 } else { 1077 io_req.notify.fn = NULL; 1078 } 1079 io_req.client = ic->io; 1080 io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev; 1081 io_loc.sector = ic->start + SB_SECTORS + sector; 1082 io_loc.count = n_sectors; 1083 1084 r = dm_io(&io_req, 1, &io_loc, NULL); 1085 if (unlikely(r)) { 1086 dm_integrity_io_error(ic, (opf & REQ_OP_MASK) == REQ_OP_READ ? 1087 "reading journal" : "writing journal", r); 1088 if (comp) { 1089 WARN_ONCE(1, "asynchronous dm_io failed: %d", r); 1090 complete_journal_io(-1UL, comp); 1091 } 1092 } 1093 } 1094 1095 static void rw_journal(struct dm_integrity_c *ic, blk_opf_t opf, 1096 unsigned int section, unsigned int n_sections, 1097 struct journal_completion *comp) 1098 { 1099 unsigned int sector, n_sectors; 1100 1101 sector = section * ic->journal_section_sectors; 1102 n_sectors = n_sections * ic->journal_section_sectors; 1103 1104 rw_journal_sectors(ic, opf, sector, n_sectors, comp); 1105 } 1106 1107 static void write_journal(struct dm_integrity_c *ic, unsigned int commit_start, unsigned int commit_sections) 1108 { 1109 struct journal_completion io_comp; 1110 struct journal_completion crypt_comp_1; 1111 struct journal_completion crypt_comp_2; 1112 unsigned int i; 1113 1114 io_comp.ic = ic; 1115 init_completion(&io_comp.comp); 1116 1117 if (commit_start + commit_sections <= ic->journal_sections) { 1118 io_comp.in_flight = (atomic_t)ATOMIC_INIT(1); 1119 if (ic->journal_io) { 1120 crypt_comp_1.ic = ic; 1121 init_completion(&crypt_comp_1.comp); 1122 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0); 1123 encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1); 1124 wait_for_completion_io(&crypt_comp_1.comp); 1125 } else { 1126 for (i = 0; i < commit_sections; i++) 1127 rw_section_mac(ic, commit_start + i, true); 1128 } 1129 rw_journal(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, commit_start, 1130 commit_sections, &io_comp); 1131 } else { 1132 unsigned int to_end; 1133 1134 io_comp.in_flight = (atomic_t)ATOMIC_INIT(2); 1135 to_end = ic->journal_sections - commit_start; 1136 if (ic->journal_io) { 1137 crypt_comp_1.ic = ic; 1138 init_completion(&crypt_comp_1.comp); 1139 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0); 1140 encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1); 1141 if (try_wait_for_completion(&crypt_comp_1.comp)) { 1142 rw_journal(ic, REQ_OP_WRITE | REQ_FUA, 1143 commit_start, to_end, &io_comp); 1144 reinit_completion(&crypt_comp_1.comp); 1145 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0); 1146 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1); 1147 wait_for_completion_io(&crypt_comp_1.comp); 1148 } else { 1149 crypt_comp_2.ic = ic; 1150 init_completion(&crypt_comp_2.comp); 1151 crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0); 1152 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2); 1153 wait_for_completion_io(&crypt_comp_1.comp); 1154 rw_journal(ic, REQ_OP_WRITE | REQ_FUA, commit_start, to_end, &io_comp); 1155 wait_for_completion_io(&crypt_comp_2.comp); 1156 } 1157 } else { 1158 for (i = 0; i < to_end; i++) 1159 rw_section_mac(ic, commit_start + i, true); 1160 rw_journal(ic, REQ_OP_WRITE | REQ_FUA, commit_start, to_end, &io_comp); 1161 for (i = 0; i < commit_sections - to_end; i++) 1162 rw_section_mac(ic, i, true); 1163 } 1164 rw_journal(ic, REQ_OP_WRITE | REQ_FUA, 0, commit_sections - to_end, &io_comp); 1165 } 1166 1167 wait_for_completion_io(&io_comp.comp); 1168 } 1169 1170 static void copy_from_journal(struct dm_integrity_c *ic, unsigned int section, unsigned int offset, 1171 unsigned int n_sectors, sector_t target, io_notify_fn fn, void *data) 1172 { 1173 struct dm_io_request io_req; 1174 struct dm_io_region io_loc; 1175 int r; 1176 unsigned int sector, pl_index, pl_offset; 1177 1178 BUG_ON((target | n_sectors | offset) & (unsigned int)(ic->sectors_per_block - 1)); 1179 1180 if (unlikely(dm_integrity_failed(ic))) { 1181 fn(-1UL, data); 1182 return; 1183 } 1184 1185 sector = section * ic->journal_section_sectors + JOURNAL_BLOCK_SECTORS + offset; 1186 1187 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT); 1188 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1); 1189 1190 io_req.bi_opf = REQ_OP_WRITE; 1191 io_req.mem.type = DM_IO_PAGE_LIST; 1192 io_req.mem.ptr.pl = &ic->journal[pl_index]; 1193 io_req.mem.offset = pl_offset; 1194 io_req.notify.fn = fn; 1195 io_req.notify.context = data; 1196 io_req.client = ic->io; 1197 io_loc.bdev = ic->dev->bdev; 1198 io_loc.sector = target; 1199 io_loc.count = n_sectors; 1200 1201 r = dm_io(&io_req, 1, &io_loc, NULL); 1202 if (unlikely(r)) { 1203 WARN_ONCE(1, "asynchronous dm_io failed: %d", r); 1204 fn(-1UL, data); 1205 } 1206 } 1207 1208 static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2) 1209 { 1210 return range1->logical_sector < range2->logical_sector + range2->n_sectors && 1211 range1->logical_sector + range1->n_sectors > range2->logical_sector; 1212 } 1213 1214 static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting) 1215 { 1216 struct rb_node **n = &ic->in_progress.rb_node; 1217 struct rb_node *parent; 1218 1219 BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned int)(ic->sectors_per_block - 1)); 1220 1221 if (likely(check_waiting)) { 1222 struct dm_integrity_range *range; 1223 1224 list_for_each_entry(range, &ic->wait_list, wait_entry) { 1225 if (unlikely(ranges_overlap(range, new_range))) 1226 return false; 1227 } 1228 } 1229 1230 parent = NULL; 1231 1232 while (*n) { 1233 struct dm_integrity_range *range = container_of(*n, struct dm_integrity_range, node); 1234 1235 parent = *n; 1236 if (new_range->logical_sector + new_range->n_sectors <= range->logical_sector) 1237 n = &range->node.rb_left; 1238 else if (new_range->logical_sector >= range->logical_sector + range->n_sectors) 1239 n = &range->node.rb_right; 1240 else 1241 return false; 1242 } 1243 1244 rb_link_node(&new_range->node, parent, n); 1245 rb_insert_color(&new_range->node, &ic->in_progress); 1246 1247 return true; 1248 } 1249 1250 static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity_range *range) 1251 { 1252 rb_erase(&range->node, &ic->in_progress); 1253 while (unlikely(!list_empty(&ic->wait_list))) { 1254 struct dm_integrity_range *last_range = 1255 list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry); 1256 struct task_struct *last_range_task; 1257 1258 last_range_task = last_range->task; 1259 list_del(&last_range->wait_entry); 1260 if (!add_new_range(ic, last_range, false)) { 1261 last_range->task = last_range_task; 1262 list_add(&last_range->wait_entry, &ic->wait_list); 1263 break; 1264 } 1265 last_range->waiting = false; 1266 wake_up_process(last_range_task); 1267 } 1268 } 1269 1270 static void remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *range) 1271 { 1272 unsigned long flags; 1273 1274 spin_lock_irqsave(&ic->endio_wait.lock, flags); 1275 remove_range_unlocked(ic, range); 1276 spin_unlock_irqrestore(&ic->endio_wait.lock, flags); 1277 } 1278 1279 static void wait_and_add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range) 1280 { 1281 new_range->waiting = true; 1282 list_add_tail(&new_range->wait_entry, &ic->wait_list); 1283 new_range->task = current; 1284 do { 1285 __set_current_state(TASK_UNINTERRUPTIBLE); 1286 spin_unlock_irq(&ic->endio_wait.lock); 1287 io_schedule(); 1288 spin_lock_irq(&ic->endio_wait.lock); 1289 } while (unlikely(new_range->waiting)); 1290 } 1291 1292 static void add_new_range_and_wait(struct dm_integrity_c *ic, struct dm_integrity_range *new_range) 1293 { 1294 if (unlikely(!add_new_range(ic, new_range, true))) 1295 wait_and_add_new_range(ic, new_range); 1296 } 1297 1298 static void init_journal_node(struct journal_node *node) 1299 { 1300 RB_CLEAR_NODE(&node->node); 1301 node->sector = (sector_t)-1; 1302 } 1303 1304 static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector) 1305 { 1306 struct rb_node **link; 1307 struct rb_node *parent; 1308 1309 node->sector = sector; 1310 BUG_ON(!RB_EMPTY_NODE(&node->node)); 1311 1312 link = &ic->journal_tree_root.rb_node; 1313 parent = NULL; 1314 1315 while (*link) { 1316 struct journal_node *j; 1317 1318 parent = *link; 1319 j = container_of(parent, struct journal_node, node); 1320 if (sector < j->sector) 1321 link = &j->node.rb_left; 1322 else 1323 link = &j->node.rb_right; 1324 } 1325 1326 rb_link_node(&node->node, parent, link); 1327 rb_insert_color(&node->node, &ic->journal_tree_root); 1328 } 1329 1330 static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *node) 1331 { 1332 BUG_ON(RB_EMPTY_NODE(&node->node)); 1333 rb_erase(&node->node, &ic->journal_tree_root); 1334 init_journal_node(node); 1335 } 1336 1337 #define NOT_FOUND (-1U) 1338 1339 static unsigned int find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector) 1340 { 1341 struct rb_node *n = ic->journal_tree_root.rb_node; 1342 unsigned int found = NOT_FOUND; 1343 1344 *next_sector = (sector_t)-1; 1345 while (n) { 1346 struct journal_node *j = container_of(n, struct journal_node, node); 1347 1348 if (sector == j->sector) 1349 found = j - ic->journal_tree; 1350 1351 if (sector < j->sector) { 1352 *next_sector = j->sector; 1353 n = j->node.rb_left; 1354 } else 1355 n = j->node.rb_right; 1356 } 1357 1358 return found; 1359 } 1360 1361 static bool test_journal_node(struct dm_integrity_c *ic, unsigned int pos, sector_t sector) 1362 { 1363 struct journal_node *node, *next_node; 1364 struct rb_node *next; 1365 1366 if (unlikely(pos >= ic->journal_entries)) 1367 return false; 1368 node = &ic->journal_tree[pos]; 1369 if (unlikely(RB_EMPTY_NODE(&node->node))) 1370 return false; 1371 if (unlikely(node->sector != sector)) 1372 return false; 1373 1374 next = rb_next(&node->node); 1375 if (unlikely(!next)) 1376 return true; 1377 1378 next_node = container_of(next, struct journal_node, node); 1379 return next_node->sector != sector; 1380 } 1381 1382 static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_node *node) 1383 { 1384 struct rb_node *next; 1385 struct journal_node *next_node; 1386 unsigned int next_section; 1387 1388 BUG_ON(RB_EMPTY_NODE(&node->node)); 1389 1390 next = rb_next(&node->node); 1391 if (unlikely(!next)) 1392 return false; 1393 1394 next_node = container_of(next, struct journal_node, node); 1395 1396 if (next_node->sector != node->sector) 1397 return false; 1398 1399 next_section = (unsigned int)(next_node - ic->journal_tree) / ic->journal_section_entries; 1400 if (next_section >= ic->committed_section && 1401 next_section < ic->committed_section + ic->n_committed_sections) 1402 return true; 1403 if (next_section + ic->journal_sections < ic->committed_section + ic->n_committed_sections) 1404 return true; 1405 1406 return false; 1407 } 1408 1409 #define TAG_READ 0 1410 #define TAG_WRITE 1 1411 #define TAG_CMP 2 1412 1413 static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block, 1414 unsigned int *metadata_offset, unsigned int total_size, int op) 1415 { 1416 #define MAY_BE_FILLER 1 1417 #define MAY_BE_HASH 2 1418 unsigned int hash_offset = 0; 1419 unsigned int may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0); 1420 1421 do { 1422 unsigned char *data, *dp; 1423 struct dm_buffer *b; 1424 unsigned int to_copy; 1425 int r; 1426 1427 r = dm_integrity_failed(ic); 1428 if (unlikely(r)) 1429 return r; 1430 1431 data = dm_bufio_read(ic->bufio, *metadata_block, &b); 1432 if (IS_ERR(data)) 1433 return PTR_ERR(data); 1434 1435 to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size); 1436 dp = data + *metadata_offset; 1437 if (op == TAG_READ) { 1438 memcpy(tag, dp, to_copy); 1439 } else if (op == TAG_WRITE) { 1440 if (memcmp(dp, tag, to_copy)) { 1441 memcpy(dp, tag, to_copy); 1442 dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy); 1443 } 1444 } else { 1445 /* e.g.: op == TAG_CMP */ 1446 1447 if (likely(is_power_of_2(ic->tag_size))) { 1448 if (unlikely(memcmp(dp, tag, to_copy))) 1449 if (unlikely(!ic->discard) || 1450 unlikely(memchr_inv(dp, DISCARD_FILLER, to_copy) != NULL)) { 1451 goto thorough_test; 1452 } 1453 } else { 1454 unsigned int i, ts; 1455 thorough_test: 1456 ts = total_size; 1457 1458 for (i = 0; i < to_copy; i++, ts--) { 1459 if (unlikely(dp[i] != tag[i])) 1460 may_be &= ~MAY_BE_HASH; 1461 if (likely(dp[i] != DISCARD_FILLER)) 1462 may_be &= ~MAY_BE_FILLER; 1463 hash_offset++; 1464 if (unlikely(hash_offset == ic->tag_size)) { 1465 if (unlikely(!may_be)) { 1466 dm_bufio_release(b); 1467 return ts; 1468 } 1469 hash_offset = 0; 1470 may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0); 1471 } 1472 } 1473 } 1474 } 1475 dm_bufio_release(b); 1476 1477 tag += to_copy; 1478 *metadata_offset += to_copy; 1479 if (unlikely(*metadata_offset == 1U << SECTOR_SHIFT << ic->log2_buffer_sectors)) { 1480 (*metadata_block)++; 1481 *metadata_offset = 0; 1482 } 1483 1484 if (unlikely(!is_power_of_2(ic->tag_size))) 1485 hash_offset = (hash_offset + to_copy) % ic->tag_size; 1486 1487 total_size -= to_copy; 1488 } while (unlikely(total_size)); 1489 1490 return 0; 1491 #undef MAY_BE_FILLER 1492 #undef MAY_BE_HASH 1493 } 1494 1495 struct flush_request { 1496 struct dm_io_request io_req; 1497 struct dm_io_region io_reg; 1498 struct dm_integrity_c *ic; 1499 struct completion comp; 1500 }; 1501 1502 static void flush_notify(unsigned long error, void *fr_) 1503 { 1504 struct flush_request *fr = fr_; 1505 1506 if (unlikely(error != 0)) 1507 dm_integrity_io_error(fr->ic, "flushing disk cache", -EIO); 1508 complete(&fr->comp); 1509 } 1510 1511 static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_data) 1512 { 1513 int r; 1514 struct flush_request fr; 1515 1516 if (!ic->meta_dev) 1517 flush_data = false; 1518 if (flush_data) { 1519 fr.io_req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC, 1520 fr.io_req.mem.type = DM_IO_KMEM, 1521 fr.io_req.mem.ptr.addr = NULL, 1522 fr.io_req.notify.fn = flush_notify, 1523 fr.io_req.notify.context = &fr; 1524 fr.io_req.client = dm_bufio_get_dm_io_client(ic->bufio), 1525 fr.io_reg.bdev = ic->dev->bdev, 1526 fr.io_reg.sector = 0, 1527 fr.io_reg.count = 0, 1528 fr.ic = ic; 1529 init_completion(&fr.comp); 1530 r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL); 1531 BUG_ON(r); 1532 } 1533 1534 r = dm_bufio_write_dirty_buffers(ic->bufio); 1535 if (unlikely(r)) 1536 dm_integrity_io_error(ic, "writing tags", r); 1537 1538 if (flush_data) 1539 wait_for_completion(&fr.comp); 1540 } 1541 1542 static void sleep_on_endio_wait(struct dm_integrity_c *ic) 1543 { 1544 DECLARE_WAITQUEUE(wait, current); 1545 1546 __add_wait_queue(&ic->endio_wait, &wait); 1547 __set_current_state(TASK_UNINTERRUPTIBLE); 1548 spin_unlock_irq(&ic->endio_wait.lock); 1549 io_schedule(); 1550 spin_lock_irq(&ic->endio_wait.lock); 1551 __remove_wait_queue(&ic->endio_wait, &wait); 1552 } 1553 1554 static void autocommit_fn(struct timer_list *t) 1555 { 1556 struct dm_integrity_c *ic = from_timer(ic, t, autocommit_timer); 1557 1558 if (likely(!dm_integrity_failed(ic))) 1559 queue_work(ic->commit_wq, &ic->commit_work); 1560 } 1561 1562 static void schedule_autocommit(struct dm_integrity_c *ic) 1563 { 1564 if (!timer_pending(&ic->autocommit_timer)) 1565 mod_timer(&ic->autocommit_timer, jiffies + ic->autocommit_jiffies); 1566 } 1567 1568 static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio) 1569 { 1570 struct bio *bio; 1571 unsigned long flags; 1572 1573 spin_lock_irqsave(&ic->endio_wait.lock, flags); 1574 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); 1575 bio_list_add(&ic->flush_bio_list, bio); 1576 spin_unlock_irqrestore(&ic->endio_wait.lock, flags); 1577 1578 queue_work(ic->commit_wq, &ic->commit_work); 1579 } 1580 1581 static void do_endio(struct dm_integrity_c *ic, struct bio *bio) 1582 { 1583 int r; 1584 1585 r = dm_integrity_failed(ic); 1586 if (unlikely(r) && !bio->bi_status) 1587 bio->bi_status = errno_to_blk_status(r); 1588 if (unlikely(ic->synchronous_mode) && bio_op(bio) == REQ_OP_WRITE) { 1589 unsigned long flags; 1590 1591 spin_lock_irqsave(&ic->endio_wait.lock, flags); 1592 bio_list_add(&ic->synchronous_bios, bio); 1593 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0); 1594 spin_unlock_irqrestore(&ic->endio_wait.lock, flags); 1595 return; 1596 } 1597 bio_endio(bio); 1598 } 1599 1600 static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio) 1601 { 1602 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); 1603 1604 if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic))) 1605 submit_flush_bio(ic, dio); 1606 else 1607 do_endio(ic, bio); 1608 } 1609 1610 static void dec_in_flight(struct dm_integrity_io *dio) 1611 { 1612 if (atomic_dec_and_test(&dio->in_flight)) { 1613 struct dm_integrity_c *ic = dio->ic; 1614 struct bio *bio; 1615 1616 remove_range(ic, &dio->range); 1617 1618 if (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD)) 1619 schedule_autocommit(ic); 1620 1621 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); 1622 if (unlikely(dio->bi_status) && !bio->bi_status) 1623 bio->bi_status = dio->bi_status; 1624 if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) { 1625 dio->range.logical_sector += dio->range.n_sectors; 1626 bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT); 1627 INIT_WORK(&dio->work, integrity_bio_wait); 1628 queue_work(ic->offload_wq, &dio->work); 1629 return; 1630 } 1631 do_endio_flush(ic, dio); 1632 } 1633 } 1634 1635 static void integrity_end_io(struct bio *bio) 1636 { 1637 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io)); 1638 1639 dm_bio_restore(&dio->bio_details, bio); 1640 if (bio->bi_integrity) 1641 bio->bi_opf |= REQ_INTEGRITY; 1642 1643 if (dio->completion) 1644 complete(dio->completion); 1645 1646 dec_in_flight(dio); 1647 } 1648 1649 static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector, 1650 const char *data, char *result) 1651 { 1652 __le64 sector_le = cpu_to_le64(sector); 1653 SHASH_DESC_ON_STACK(req, ic->internal_hash); 1654 int r; 1655 unsigned int digest_size; 1656 1657 req->tfm = ic->internal_hash; 1658 1659 r = crypto_shash_init(req); 1660 if (unlikely(r < 0)) { 1661 dm_integrity_io_error(ic, "crypto_shash_init", r); 1662 goto failed; 1663 } 1664 1665 if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) { 1666 r = crypto_shash_update(req, (__u8 *)&ic->sb->salt, SALT_SIZE); 1667 if (unlikely(r < 0)) { 1668 dm_integrity_io_error(ic, "crypto_shash_update", r); 1669 goto failed; 1670 } 1671 } 1672 1673 r = crypto_shash_update(req, (const __u8 *)§or_le, sizeof(sector_le)); 1674 if (unlikely(r < 0)) { 1675 dm_integrity_io_error(ic, "crypto_shash_update", r); 1676 goto failed; 1677 } 1678 1679 r = crypto_shash_update(req, data, ic->sectors_per_block << SECTOR_SHIFT); 1680 if (unlikely(r < 0)) { 1681 dm_integrity_io_error(ic, "crypto_shash_update", r); 1682 goto failed; 1683 } 1684 1685 r = crypto_shash_final(req, result); 1686 if (unlikely(r < 0)) { 1687 dm_integrity_io_error(ic, "crypto_shash_final", r); 1688 goto failed; 1689 } 1690 1691 digest_size = crypto_shash_digestsize(ic->internal_hash); 1692 if (unlikely(digest_size < ic->tag_size)) 1693 memset(result + digest_size, 0, ic->tag_size - digest_size); 1694 1695 return; 1696 1697 failed: 1698 /* this shouldn't happen anyway, the hash functions have no reason to fail */ 1699 get_random_bytes(result, ic->tag_size); 1700 } 1701 1702 static void integrity_metadata(struct work_struct *w) 1703 { 1704 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work); 1705 struct dm_integrity_c *ic = dio->ic; 1706 1707 int r; 1708 1709 if (ic->internal_hash) { 1710 struct bvec_iter iter; 1711 struct bio_vec bv; 1712 unsigned int digest_size = crypto_shash_digestsize(ic->internal_hash); 1713 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); 1714 char *checksums; 1715 unsigned int extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0; 1716 char checksums_onstack[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)]; 1717 sector_t sector; 1718 unsigned int sectors_to_process; 1719 1720 if (unlikely(ic->mode == 'R')) 1721 goto skip_io; 1722 1723 if (likely(dio->op != REQ_OP_DISCARD)) 1724 checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space, 1725 GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN); 1726 else 1727 checksums = kmalloc(PAGE_SIZE, GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN); 1728 if (!checksums) { 1729 checksums = checksums_onstack; 1730 if (WARN_ON(extra_space && 1731 digest_size > sizeof(checksums_onstack))) { 1732 r = -EINVAL; 1733 goto error; 1734 } 1735 } 1736 1737 if (unlikely(dio->op == REQ_OP_DISCARD)) { 1738 unsigned int bi_size = dio->bio_details.bi_iter.bi_size; 1739 unsigned int max_size = likely(checksums != checksums_onstack) ? PAGE_SIZE : HASH_MAX_DIGESTSIZE; 1740 unsigned int max_blocks = max_size / ic->tag_size; 1741 1742 memset(checksums, DISCARD_FILLER, max_size); 1743 1744 while (bi_size) { 1745 unsigned int this_step_blocks = bi_size >> (SECTOR_SHIFT + ic->sb->log2_sectors_per_block); 1746 1747 this_step_blocks = min(this_step_blocks, max_blocks); 1748 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset, 1749 this_step_blocks * ic->tag_size, TAG_WRITE); 1750 if (unlikely(r)) { 1751 if (likely(checksums != checksums_onstack)) 1752 kfree(checksums); 1753 goto error; 1754 } 1755 1756 bi_size -= this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block); 1757 } 1758 1759 if (likely(checksums != checksums_onstack)) 1760 kfree(checksums); 1761 goto skip_io; 1762 } 1763 1764 sector = dio->range.logical_sector; 1765 sectors_to_process = dio->range.n_sectors; 1766 1767 __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) { 1768 unsigned int pos; 1769 char *mem, *checksums_ptr; 1770 1771 again: 1772 mem = bvec_kmap_local(&bv); 1773 pos = 0; 1774 checksums_ptr = checksums; 1775 do { 1776 integrity_sector_checksum(ic, sector, mem + pos, checksums_ptr); 1777 checksums_ptr += ic->tag_size; 1778 sectors_to_process -= ic->sectors_per_block; 1779 pos += ic->sectors_per_block << SECTOR_SHIFT; 1780 sector += ic->sectors_per_block; 1781 } while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack); 1782 kunmap_local(mem); 1783 1784 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset, 1785 checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE); 1786 if (unlikely(r)) { 1787 if (r > 0) { 1788 sector_t s; 1789 1790 s = sector - ((r + ic->tag_size - 1) / ic->tag_size); 1791 DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx", 1792 bio->bi_bdev, s); 1793 r = -EILSEQ; 1794 atomic64_inc(&ic->number_of_mismatches); 1795 dm_audit_log_bio(DM_MSG_PREFIX, "integrity-checksum", 1796 bio, s, 0); 1797 } 1798 if (likely(checksums != checksums_onstack)) 1799 kfree(checksums); 1800 goto error; 1801 } 1802 1803 if (!sectors_to_process) 1804 break; 1805 1806 if (unlikely(pos < bv.bv_len)) { 1807 bv.bv_offset += pos; 1808 bv.bv_len -= pos; 1809 goto again; 1810 } 1811 } 1812 1813 if (likely(checksums != checksums_onstack)) 1814 kfree(checksums); 1815 } else { 1816 struct bio_integrity_payload *bip = dio->bio_details.bi_integrity; 1817 1818 if (bip) { 1819 struct bio_vec biv; 1820 struct bvec_iter iter; 1821 unsigned int data_to_process = dio->range.n_sectors; 1822 1823 sector_to_block(ic, data_to_process); 1824 data_to_process *= ic->tag_size; 1825 1826 bip_for_each_vec(biv, bip, iter) { 1827 unsigned char *tag; 1828 unsigned int this_len; 1829 1830 BUG_ON(PageHighMem(biv.bv_page)); 1831 tag = bvec_virt(&biv); 1832 this_len = min(biv.bv_len, data_to_process); 1833 r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset, 1834 this_len, dio->op == REQ_OP_READ ? TAG_READ : TAG_WRITE); 1835 if (unlikely(r)) 1836 goto error; 1837 data_to_process -= this_len; 1838 if (!data_to_process) 1839 break; 1840 } 1841 } 1842 } 1843 skip_io: 1844 dec_in_flight(dio); 1845 return; 1846 error: 1847 dio->bi_status = errno_to_blk_status(r); 1848 dec_in_flight(dio); 1849 } 1850 1851 static int dm_integrity_map(struct dm_target *ti, struct bio *bio) 1852 { 1853 struct dm_integrity_c *ic = ti->private; 1854 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io)); 1855 struct bio_integrity_payload *bip; 1856 1857 sector_t area, offset; 1858 1859 dio->ic = ic; 1860 dio->bi_status = 0; 1861 dio->op = bio_op(bio); 1862 1863 if (unlikely(dio->op == REQ_OP_DISCARD)) { 1864 if (ti->max_io_len) { 1865 sector_t sec = dm_target_offset(ti, bio->bi_iter.bi_sector); 1866 unsigned int log2_max_io_len = __fls(ti->max_io_len); 1867 sector_t start_boundary = sec >> log2_max_io_len; 1868 sector_t end_boundary = (sec + bio_sectors(bio) - 1) >> log2_max_io_len; 1869 1870 if (start_boundary < end_boundary) { 1871 sector_t len = ti->max_io_len - (sec & (ti->max_io_len - 1)); 1872 1873 dm_accept_partial_bio(bio, len); 1874 } 1875 } 1876 } 1877 1878 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { 1879 submit_flush_bio(ic, dio); 1880 return DM_MAPIO_SUBMITTED; 1881 } 1882 1883 dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector); 1884 dio->fua = dio->op == REQ_OP_WRITE && bio->bi_opf & REQ_FUA; 1885 if (unlikely(dio->fua)) { 1886 /* 1887 * Don't pass down the FUA flag because we have to flush 1888 * disk cache anyway. 1889 */ 1890 bio->bi_opf &= ~REQ_FUA; 1891 } 1892 if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) { 1893 DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx", 1894 dio->range.logical_sector, bio_sectors(bio), 1895 ic->provided_data_sectors); 1896 return DM_MAPIO_KILL; 1897 } 1898 if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned int)(ic->sectors_per_block - 1))) { 1899 DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x", 1900 ic->sectors_per_block, 1901 dio->range.logical_sector, bio_sectors(bio)); 1902 return DM_MAPIO_KILL; 1903 } 1904 1905 if (ic->sectors_per_block > 1 && likely(dio->op != REQ_OP_DISCARD)) { 1906 struct bvec_iter iter; 1907 struct bio_vec bv; 1908 1909 bio_for_each_segment(bv, bio, iter) { 1910 if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) { 1911 DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary", 1912 bv.bv_offset, bv.bv_len, ic->sectors_per_block); 1913 return DM_MAPIO_KILL; 1914 } 1915 } 1916 } 1917 1918 bip = bio_integrity(bio); 1919 if (!ic->internal_hash) { 1920 if (bip) { 1921 unsigned int wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block; 1922 1923 if (ic->log2_tag_size >= 0) 1924 wanted_tag_size <<= ic->log2_tag_size; 1925 else 1926 wanted_tag_size *= ic->tag_size; 1927 if (unlikely(wanted_tag_size != bip->bip_iter.bi_size)) { 1928 DMERR("Invalid integrity data size %u, expected %u", 1929 bip->bip_iter.bi_size, wanted_tag_size); 1930 return DM_MAPIO_KILL; 1931 } 1932 } 1933 } else { 1934 if (unlikely(bip != NULL)) { 1935 DMERR("Unexpected integrity data when using internal hash"); 1936 return DM_MAPIO_KILL; 1937 } 1938 } 1939 1940 if (unlikely(ic->mode == 'R') && unlikely(dio->op != REQ_OP_READ)) 1941 return DM_MAPIO_KILL; 1942 1943 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset); 1944 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset); 1945 bio->bi_iter.bi_sector = get_data_sector(ic, area, offset); 1946 1947 dm_integrity_map_continue(dio, true); 1948 return DM_MAPIO_SUBMITTED; 1949 } 1950 1951 static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio, 1952 unsigned int journal_section, unsigned int journal_entry) 1953 { 1954 struct dm_integrity_c *ic = dio->ic; 1955 sector_t logical_sector; 1956 unsigned int n_sectors; 1957 1958 logical_sector = dio->range.logical_sector; 1959 n_sectors = dio->range.n_sectors; 1960 do { 1961 struct bio_vec bv = bio_iovec(bio); 1962 char *mem; 1963 1964 if (unlikely(bv.bv_len >> SECTOR_SHIFT > n_sectors)) 1965 bv.bv_len = n_sectors << SECTOR_SHIFT; 1966 n_sectors -= bv.bv_len >> SECTOR_SHIFT; 1967 bio_advance_iter(bio, &bio->bi_iter, bv.bv_len); 1968 retry_kmap: 1969 mem = kmap_local_page(bv.bv_page); 1970 if (likely(dio->op == REQ_OP_WRITE)) 1971 flush_dcache_page(bv.bv_page); 1972 1973 do { 1974 struct journal_entry *je = access_journal_entry(ic, journal_section, journal_entry); 1975 1976 if (unlikely(dio->op == REQ_OP_READ)) { 1977 struct journal_sector *js; 1978 char *mem_ptr; 1979 unsigned int s; 1980 1981 if (unlikely(journal_entry_is_inprogress(je))) { 1982 flush_dcache_page(bv.bv_page); 1983 kunmap_local(mem); 1984 1985 __io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je)); 1986 goto retry_kmap; 1987 } 1988 smp_rmb(); 1989 BUG_ON(journal_entry_get_sector(je) != logical_sector); 1990 js = access_journal_data(ic, journal_section, journal_entry); 1991 mem_ptr = mem + bv.bv_offset; 1992 s = 0; 1993 do { 1994 memcpy(mem_ptr, js, JOURNAL_SECTOR_DATA); 1995 *(commit_id_t *)(mem_ptr + JOURNAL_SECTOR_DATA) = je->last_bytes[s]; 1996 js++; 1997 mem_ptr += 1 << SECTOR_SHIFT; 1998 } while (++s < ic->sectors_per_block); 1999 #ifdef INTERNAL_VERIFY 2000 if (ic->internal_hash) { 2001 char checksums_onstack[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)]; 2002 2003 integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack); 2004 if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) { 2005 DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx", 2006 logical_sector); 2007 dm_audit_log_bio(DM_MSG_PREFIX, "journal-checksum", 2008 bio, logical_sector, 0); 2009 } 2010 } 2011 #endif 2012 } 2013 2014 if (!ic->internal_hash) { 2015 struct bio_integrity_payload *bip = bio_integrity(bio); 2016 unsigned int tag_todo = ic->tag_size; 2017 char *tag_ptr = journal_entry_tag(ic, je); 2018 2019 if (bip) { 2020 do { 2021 struct bio_vec biv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter); 2022 unsigned int tag_now = min(biv.bv_len, tag_todo); 2023 char *tag_addr; 2024 2025 BUG_ON(PageHighMem(biv.bv_page)); 2026 tag_addr = bvec_virt(&biv); 2027 if (likely(dio->op == REQ_OP_WRITE)) 2028 memcpy(tag_ptr, tag_addr, tag_now); 2029 else 2030 memcpy(tag_addr, tag_ptr, tag_now); 2031 bvec_iter_advance(bip->bip_vec, &bip->bip_iter, tag_now); 2032 tag_ptr += tag_now; 2033 tag_todo -= tag_now; 2034 } while (unlikely(tag_todo)); 2035 } else if (likely(dio->op == REQ_OP_WRITE)) 2036 memset(tag_ptr, 0, tag_todo); 2037 } 2038 2039 if (likely(dio->op == REQ_OP_WRITE)) { 2040 struct journal_sector *js; 2041 unsigned int s; 2042 2043 js = access_journal_data(ic, journal_section, journal_entry); 2044 memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT); 2045 2046 s = 0; 2047 do { 2048 je->last_bytes[s] = js[s].commit_id; 2049 } while (++s < ic->sectors_per_block); 2050 2051 if (ic->internal_hash) { 2052 unsigned int digest_size = crypto_shash_digestsize(ic->internal_hash); 2053 2054 if (unlikely(digest_size > ic->tag_size)) { 2055 char checksums_onstack[HASH_MAX_DIGESTSIZE]; 2056 2057 integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack); 2058 memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size); 2059 } else 2060 integrity_sector_checksum(ic, logical_sector, (char *)js, journal_entry_tag(ic, je)); 2061 } 2062 2063 journal_entry_set_sector(je, logical_sector); 2064 } 2065 logical_sector += ic->sectors_per_block; 2066 2067 journal_entry++; 2068 if (unlikely(journal_entry == ic->journal_section_entries)) { 2069 journal_entry = 0; 2070 journal_section++; 2071 wraparound_section(ic, &journal_section); 2072 } 2073 2074 bv.bv_offset += ic->sectors_per_block << SECTOR_SHIFT; 2075 } while (bv.bv_len -= ic->sectors_per_block << SECTOR_SHIFT); 2076 2077 if (unlikely(dio->op == REQ_OP_READ)) 2078 flush_dcache_page(bv.bv_page); 2079 kunmap_local(mem); 2080 } while (n_sectors); 2081 2082 if (likely(dio->op == REQ_OP_WRITE)) { 2083 smp_mb(); 2084 if (unlikely(waitqueue_active(&ic->copy_to_journal_wait))) 2085 wake_up(&ic->copy_to_journal_wait); 2086 if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) 2087 queue_work(ic->commit_wq, &ic->commit_work); 2088 else 2089 schedule_autocommit(ic); 2090 } else 2091 remove_range(ic, &dio->range); 2092 2093 if (unlikely(bio->bi_iter.bi_size)) { 2094 sector_t area, offset; 2095 2096 dio->range.logical_sector = logical_sector; 2097 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset); 2098 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset); 2099 return true; 2100 } 2101 2102 return false; 2103 } 2104 2105 static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map) 2106 { 2107 struct dm_integrity_c *ic = dio->ic; 2108 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); 2109 unsigned int journal_section, journal_entry; 2110 unsigned int journal_read_pos; 2111 struct completion read_comp; 2112 bool discard_retried = false; 2113 bool need_sync_io = ic->internal_hash && dio->op == REQ_OP_READ; 2114 2115 if (unlikely(dio->op == REQ_OP_DISCARD) && ic->mode != 'D') 2116 need_sync_io = true; 2117 2118 if (need_sync_io && from_map) { 2119 INIT_WORK(&dio->work, integrity_bio_wait); 2120 queue_work(ic->offload_wq, &dio->work); 2121 return; 2122 } 2123 2124 lock_retry: 2125 spin_lock_irq(&ic->endio_wait.lock); 2126 retry: 2127 if (unlikely(dm_integrity_failed(ic))) { 2128 spin_unlock_irq(&ic->endio_wait.lock); 2129 do_endio(ic, bio); 2130 return; 2131 } 2132 dio->range.n_sectors = bio_sectors(bio); 2133 journal_read_pos = NOT_FOUND; 2134 if (ic->mode == 'J' && likely(dio->op != REQ_OP_DISCARD)) { 2135 if (dio->op == REQ_OP_WRITE) { 2136 unsigned int next_entry, i, pos; 2137 unsigned int ws, we, range_sectors; 2138 2139 dio->range.n_sectors = min(dio->range.n_sectors, 2140 (sector_t)ic->free_sectors << ic->sb->log2_sectors_per_block); 2141 if (unlikely(!dio->range.n_sectors)) { 2142 if (from_map) 2143 goto offload_to_thread; 2144 sleep_on_endio_wait(ic); 2145 goto retry; 2146 } 2147 range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block; 2148 ic->free_sectors -= range_sectors; 2149 journal_section = ic->free_section; 2150 journal_entry = ic->free_section_entry; 2151 2152 next_entry = ic->free_section_entry + range_sectors; 2153 ic->free_section_entry = next_entry % ic->journal_section_entries; 2154 ic->free_section += next_entry / ic->journal_section_entries; 2155 ic->n_uncommitted_sections += next_entry / ic->journal_section_entries; 2156 wraparound_section(ic, &ic->free_section); 2157 2158 pos = journal_section * ic->journal_section_entries + journal_entry; 2159 ws = journal_section; 2160 we = journal_entry; 2161 i = 0; 2162 do { 2163 struct journal_entry *je; 2164 2165 add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i); 2166 pos++; 2167 if (unlikely(pos >= ic->journal_entries)) 2168 pos = 0; 2169 2170 je = access_journal_entry(ic, ws, we); 2171 BUG_ON(!journal_entry_is_unused(je)); 2172 journal_entry_set_inprogress(je); 2173 we++; 2174 if (unlikely(we == ic->journal_section_entries)) { 2175 we = 0; 2176 ws++; 2177 wraparound_section(ic, &ws); 2178 } 2179 } while ((i += ic->sectors_per_block) < dio->range.n_sectors); 2180 2181 spin_unlock_irq(&ic->endio_wait.lock); 2182 goto journal_read_write; 2183 } else { 2184 sector_t next_sector; 2185 2186 journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector); 2187 if (likely(journal_read_pos == NOT_FOUND)) { 2188 if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector)) 2189 dio->range.n_sectors = next_sector - dio->range.logical_sector; 2190 } else { 2191 unsigned int i; 2192 unsigned int jp = journal_read_pos + 1; 2193 2194 for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) { 2195 if (!test_journal_node(ic, jp, dio->range.logical_sector + i)) 2196 break; 2197 } 2198 dio->range.n_sectors = i; 2199 } 2200 } 2201 } 2202 if (unlikely(!add_new_range(ic, &dio->range, true))) { 2203 /* 2204 * We must not sleep in the request routine because it could 2205 * stall bios on current->bio_list. 2206 * So, we offload the bio to a workqueue if we have to sleep. 2207 */ 2208 if (from_map) { 2209 offload_to_thread: 2210 spin_unlock_irq(&ic->endio_wait.lock); 2211 INIT_WORK(&dio->work, integrity_bio_wait); 2212 queue_work(ic->wait_wq, &dio->work); 2213 return; 2214 } 2215 if (journal_read_pos != NOT_FOUND) 2216 dio->range.n_sectors = ic->sectors_per_block; 2217 wait_and_add_new_range(ic, &dio->range); 2218 /* 2219 * wait_and_add_new_range drops the spinlock, so the journal 2220 * may have been changed arbitrarily. We need to recheck. 2221 * To simplify the code, we restrict I/O size to just one block. 2222 */ 2223 if (journal_read_pos != NOT_FOUND) { 2224 sector_t next_sector; 2225 unsigned int new_pos; 2226 2227 new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector); 2228 if (unlikely(new_pos != journal_read_pos)) { 2229 remove_range_unlocked(ic, &dio->range); 2230 goto retry; 2231 } 2232 } 2233 } 2234 if (ic->mode == 'J' && likely(dio->op == REQ_OP_DISCARD) && !discard_retried) { 2235 sector_t next_sector; 2236 unsigned int new_pos; 2237 2238 new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector); 2239 if (unlikely(new_pos != NOT_FOUND) || 2240 unlikely(next_sector < dio->range.logical_sector - dio->range.n_sectors)) { 2241 remove_range_unlocked(ic, &dio->range); 2242 spin_unlock_irq(&ic->endio_wait.lock); 2243 queue_work(ic->commit_wq, &ic->commit_work); 2244 flush_workqueue(ic->commit_wq); 2245 queue_work(ic->writer_wq, &ic->writer_work); 2246 flush_workqueue(ic->writer_wq); 2247 discard_retried = true; 2248 goto lock_retry; 2249 } 2250 } 2251 spin_unlock_irq(&ic->endio_wait.lock); 2252 2253 if (unlikely(journal_read_pos != NOT_FOUND)) { 2254 journal_section = journal_read_pos / ic->journal_section_entries; 2255 journal_entry = journal_read_pos % ic->journal_section_entries; 2256 goto journal_read_write; 2257 } 2258 2259 if (ic->mode == 'B' && (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))) { 2260 if (!block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector, 2261 dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) { 2262 struct bitmap_block_status *bbs; 2263 2264 bbs = sector_to_bitmap_block(ic, dio->range.logical_sector); 2265 spin_lock(&bbs->bio_queue_lock); 2266 bio_list_add(&bbs->bio_queue, bio); 2267 spin_unlock(&bbs->bio_queue_lock); 2268 queue_work(ic->writer_wq, &bbs->work); 2269 return; 2270 } 2271 } 2272 2273 dio->in_flight = (atomic_t)ATOMIC_INIT(2); 2274 2275 if (need_sync_io) { 2276 init_completion(&read_comp); 2277 dio->completion = &read_comp; 2278 } else 2279 dio->completion = NULL; 2280 2281 dm_bio_record(&dio->bio_details, bio); 2282 bio_set_dev(bio, ic->dev->bdev); 2283 bio->bi_integrity = NULL; 2284 bio->bi_opf &= ~REQ_INTEGRITY; 2285 bio->bi_end_io = integrity_end_io; 2286 bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT; 2287 2288 if (unlikely(dio->op == REQ_OP_DISCARD) && likely(ic->mode != 'D')) { 2289 integrity_metadata(&dio->work); 2290 dm_integrity_flush_buffers(ic, false); 2291 2292 dio->in_flight = (atomic_t)ATOMIC_INIT(1); 2293 dio->completion = NULL; 2294 2295 submit_bio_noacct(bio); 2296 2297 return; 2298 } 2299 2300 submit_bio_noacct(bio); 2301 2302 if (need_sync_io) { 2303 wait_for_completion_io(&read_comp); 2304 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) && 2305 dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector)) 2306 goto skip_check; 2307 if (ic->mode == 'B') { 2308 if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector, 2309 dio->range.n_sectors, BITMAP_OP_TEST_ALL_CLEAR)) 2310 goto skip_check; 2311 } 2312 2313 if (likely(!bio->bi_status)) 2314 integrity_metadata(&dio->work); 2315 else 2316 skip_check: 2317 dec_in_flight(dio); 2318 } else { 2319 INIT_WORK(&dio->work, integrity_metadata); 2320 queue_work(ic->metadata_wq, &dio->work); 2321 } 2322 2323 return; 2324 2325 journal_read_write: 2326 if (unlikely(__journal_read_write(dio, bio, journal_section, journal_entry))) 2327 goto lock_retry; 2328 2329 do_endio_flush(ic, dio); 2330 } 2331 2332 2333 static void integrity_bio_wait(struct work_struct *w) 2334 { 2335 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work); 2336 2337 dm_integrity_map_continue(dio, false); 2338 } 2339 2340 static void pad_uncommitted(struct dm_integrity_c *ic) 2341 { 2342 if (ic->free_section_entry) { 2343 ic->free_sectors -= ic->journal_section_entries - ic->free_section_entry; 2344 ic->free_section_entry = 0; 2345 ic->free_section++; 2346 wraparound_section(ic, &ic->free_section); 2347 ic->n_uncommitted_sections++; 2348 } 2349 if (WARN_ON(ic->journal_sections * ic->journal_section_entries != 2350 (ic->n_uncommitted_sections + ic->n_committed_sections) * 2351 ic->journal_section_entries + ic->free_sectors)) { 2352 DMCRIT("journal_sections %u, journal_section_entries %u, " 2353 "n_uncommitted_sections %u, n_committed_sections %u, " 2354 "journal_section_entries %u, free_sectors %u", 2355 ic->journal_sections, ic->journal_section_entries, 2356 ic->n_uncommitted_sections, ic->n_committed_sections, 2357 ic->journal_section_entries, ic->free_sectors); 2358 } 2359 } 2360 2361 static void integrity_commit(struct work_struct *w) 2362 { 2363 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work); 2364 unsigned int commit_start, commit_sections; 2365 unsigned int i, j, n; 2366 struct bio *flushes; 2367 2368 del_timer(&ic->autocommit_timer); 2369 2370 spin_lock_irq(&ic->endio_wait.lock); 2371 flushes = bio_list_get(&ic->flush_bio_list); 2372 if (unlikely(ic->mode != 'J')) { 2373 spin_unlock_irq(&ic->endio_wait.lock); 2374 dm_integrity_flush_buffers(ic, true); 2375 goto release_flush_bios; 2376 } 2377 2378 pad_uncommitted(ic); 2379 commit_start = ic->uncommitted_section; 2380 commit_sections = ic->n_uncommitted_sections; 2381 spin_unlock_irq(&ic->endio_wait.lock); 2382 2383 if (!commit_sections) 2384 goto release_flush_bios; 2385 2386 ic->wrote_to_journal = true; 2387 2388 i = commit_start; 2389 for (n = 0; n < commit_sections; n++) { 2390 for (j = 0; j < ic->journal_section_entries; j++) { 2391 struct journal_entry *je; 2392 2393 je = access_journal_entry(ic, i, j); 2394 io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je)); 2395 } 2396 for (j = 0; j < ic->journal_section_sectors; j++) { 2397 struct journal_sector *js; 2398 2399 js = access_journal(ic, i, j); 2400 js->commit_id = dm_integrity_commit_id(ic, i, j, ic->commit_seq); 2401 } 2402 i++; 2403 if (unlikely(i >= ic->journal_sections)) 2404 ic->commit_seq = next_commit_seq(ic->commit_seq); 2405 wraparound_section(ic, &i); 2406 } 2407 smp_rmb(); 2408 2409 write_journal(ic, commit_start, commit_sections); 2410 2411 spin_lock_irq(&ic->endio_wait.lock); 2412 ic->uncommitted_section += commit_sections; 2413 wraparound_section(ic, &ic->uncommitted_section); 2414 ic->n_uncommitted_sections -= commit_sections; 2415 ic->n_committed_sections += commit_sections; 2416 spin_unlock_irq(&ic->endio_wait.lock); 2417 2418 if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) 2419 queue_work(ic->writer_wq, &ic->writer_work); 2420 2421 release_flush_bios: 2422 while (flushes) { 2423 struct bio *next = flushes->bi_next; 2424 2425 flushes->bi_next = NULL; 2426 do_endio(ic, flushes); 2427 flushes = next; 2428 } 2429 } 2430 2431 static void complete_copy_from_journal(unsigned long error, void *context) 2432 { 2433 struct journal_io *io = context; 2434 struct journal_completion *comp = io->comp; 2435 struct dm_integrity_c *ic = comp->ic; 2436 2437 remove_range(ic, &io->range); 2438 mempool_free(io, &ic->journal_io_mempool); 2439 if (unlikely(error != 0)) 2440 dm_integrity_io_error(ic, "copying from journal", -EIO); 2441 complete_journal_op(comp); 2442 } 2443 2444 static void restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js, 2445 struct journal_entry *je) 2446 { 2447 unsigned int s = 0; 2448 2449 do { 2450 js->commit_id = je->last_bytes[s]; 2451 js++; 2452 } while (++s < ic->sectors_per_block); 2453 } 2454 2455 static void do_journal_write(struct dm_integrity_c *ic, unsigned int write_start, 2456 unsigned int write_sections, bool from_replay) 2457 { 2458 unsigned int i, j, n; 2459 struct journal_completion comp; 2460 struct blk_plug plug; 2461 2462 blk_start_plug(&plug); 2463 2464 comp.ic = ic; 2465 comp.in_flight = (atomic_t)ATOMIC_INIT(1); 2466 init_completion(&comp.comp); 2467 2468 i = write_start; 2469 for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) { 2470 #ifndef INTERNAL_VERIFY 2471 if (unlikely(from_replay)) 2472 #endif 2473 rw_section_mac(ic, i, false); 2474 for (j = 0; j < ic->journal_section_entries; j++) { 2475 struct journal_entry *je = access_journal_entry(ic, i, j); 2476 sector_t sec, area, offset; 2477 unsigned int k, l, next_loop; 2478 sector_t metadata_block; 2479 unsigned int metadata_offset; 2480 struct journal_io *io; 2481 2482 if (journal_entry_is_unused(je)) 2483 continue; 2484 BUG_ON(unlikely(journal_entry_is_inprogress(je)) && !from_replay); 2485 sec = journal_entry_get_sector(je); 2486 if (unlikely(from_replay)) { 2487 if (unlikely(sec & (unsigned int)(ic->sectors_per_block - 1))) { 2488 dm_integrity_io_error(ic, "invalid sector in journal", -EIO); 2489 sec &= ~(sector_t)(ic->sectors_per_block - 1); 2490 } 2491 if (unlikely(sec >= ic->provided_data_sectors)) { 2492 journal_entry_set_unused(je); 2493 continue; 2494 } 2495 } 2496 get_area_and_offset(ic, sec, &area, &offset); 2497 restore_last_bytes(ic, access_journal_data(ic, i, j), je); 2498 for (k = j + 1; k < ic->journal_section_entries; k++) { 2499 struct journal_entry *je2 = access_journal_entry(ic, i, k); 2500 sector_t sec2, area2, offset2; 2501 2502 if (journal_entry_is_unused(je2)) 2503 break; 2504 BUG_ON(unlikely(journal_entry_is_inprogress(je2)) && !from_replay); 2505 sec2 = journal_entry_get_sector(je2); 2506 if (unlikely(sec2 >= ic->provided_data_sectors)) 2507 break; 2508 get_area_and_offset(ic, sec2, &area2, &offset2); 2509 if (area2 != area || offset2 != offset + ((k - j) << ic->sb->log2_sectors_per_block)) 2510 break; 2511 restore_last_bytes(ic, access_journal_data(ic, i, k), je2); 2512 } 2513 next_loop = k - 1; 2514 2515 io = mempool_alloc(&ic->journal_io_mempool, GFP_NOIO); 2516 io->comp = ∁ 2517 io->range.logical_sector = sec; 2518 io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block; 2519 2520 spin_lock_irq(&ic->endio_wait.lock); 2521 add_new_range_and_wait(ic, &io->range); 2522 2523 if (likely(!from_replay)) { 2524 struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries]; 2525 2526 /* don't write if there is newer committed sector */ 2527 while (j < k && find_newer_committed_node(ic, §ion_node[j])) { 2528 struct journal_entry *je2 = access_journal_entry(ic, i, j); 2529 2530 journal_entry_set_unused(je2); 2531 remove_journal_node(ic, §ion_node[j]); 2532 j++; 2533 sec += ic->sectors_per_block; 2534 offset += ic->sectors_per_block; 2535 } 2536 while (j < k && find_newer_committed_node(ic, §ion_node[k - 1])) { 2537 struct journal_entry *je2 = access_journal_entry(ic, i, k - 1); 2538 2539 journal_entry_set_unused(je2); 2540 remove_journal_node(ic, §ion_node[k - 1]); 2541 k--; 2542 } 2543 if (j == k) { 2544 remove_range_unlocked(ic, &io->range); 2545 spin_unlock_irq(&ic->endio_wait.lock); 2546 mempool_free(io, &ic->journal_io_mempool); 2547 goto skip_io; 2548 } 2549 for (l = j; l < k; l++) 2550 remove_journal_node(ic, §ion_node[l]); 2551 } 2552 spin_unlock_irq(&ic->endio_wait.lock); 2553 2554 metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset); 2555 for (l = j; l < k; l++) { 2556 int r; 2557 struct journal_entry *je2 = access_journal_entry(ic, i, l); 2558 2559 if ( 2560 #ifndef INTERNAL_VERIFY 2561 unlikely(from_replay) && 2562 #endif 2563 ic->internal_hash) { 2564 char test_tag[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)]; 2565 2566 integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block), 2567 (char *)access_journal_data(ic, i, l), test_tag); 2568 if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size))) { 2569 dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ); 2570 dm_audit_log_target(DM_MSG_PREFIX, "integrity-replay-journal", ic->ti, 0); 2571 } 2572 } 2573 2574 journal_entry_set_unused(je2); 2575 r = dm_integrity_rw_tag(ic, journal_entry_tag(ic, je2), &metadata_block, &metadata_offset, 2576 ic->tag_size, TAG_WRITE); 2577 if (unlikely(r)) 2578 dm_integrity_io_error(ic, "reading tags", r); 2579 } 2580 2581 atomic_inc(&comp.in_flight); 2582 copy_from_journal(ic, i, j << ic->sb->log2_sectors_per_block, 2583 (k - j) << ic->sb->log2_sectors_per_block, 2584 get_data_sector(ic, area, offset), 2585 complete_copy_from_journal, io); 2586 skip_io: 2587 j = next_loop; 2588 } 2589 } 2590 2591 dm_bufio_write_dirty_buffers_async(ic->bufio); 2592 2593 blk_finish_plug(&plug); 2594 2595 complete_journal_op(&comp); 2596 wait_for_completion_io(&comp.comp); 2597 2598 dm_integrity_flush_buffers(ic, true); 2599 } 2600 2601 static void integrity_writer(struct work_struct *w) 2602 { 2603 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work); 2604 unsigned int write_start, write_sections; 2605 unsigned int prev_free_sectors; 2606 2607 spin_lock_irq(&ic->endio_wait.lock); 2608 write_start = ic->committed_section; 2609 write_sections = ic->n_committed_sections; 2610 spin_unlock_irq(&ic->endio_wait.lock); 2611 2612 if (!write_sections) 2613 return; 2614 2615 do_journal_write(ic, write_start, write_sections, false); 2616 2617 spin_lock_irq(&ic->endio_wait.lock); 2618 2619 ic->committed_section += write_sections; 2620 wraparound_section(ic, &ic->committed_section); 2621 ic->n_committed_sections -= write_sections; 2622 2623 prev_free_sectors = ic->free_sectors; 2624 ic->free_sectors += write_sections * ic->journal_section_entries; 2625 if (unlikely(!prev_free_sectors)) 2626 wake_up_locked(&ic->endio_wait); 2627 2628 spin_unlock_irq(&ic->endio_wait.lock); 2629 } 2630 2631 static void recalc_write_super(struct dm_integrity_c *ic) 2632 { 2633 int r; 2634 2635 dm_integrity_flush_buffers(ic, false); 2636 if (dm_integrity_failed(ic)) 2637 return; 2638 2639 r = sync_rw_sb(ic, REQ_OP_WRITE); 2640 if (unlikely(r)) 2641 dm_integrity_io_error(ic, "writing superblock", r); 2642 } 2643 2644 static void integrity_recalc(struct work_struct *w) 2645 { 2646 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, recalc_work); 2647 size_t recalc_tags_size; 2648 u8 *recalc_buffer = NULL; 2649 u8 *recalc_tags = NULL; 2650 struct dm_integrity_range range; 2651 struct dm_io_request io_req; 2652 struct dm_io_region io_loc; 2653 sector_t area, offset; 2654 sector_t metadata_block; 2655 unsigned int metadata_offset; 2656 sector_t logical_sector, n_sectors; 2657 __u8 *t; 2658 unsigned int i; 2659 int r; 2660 unsigned int super_counter = 0; 2661 unsigned recalc_sectors = RECALC_SECTORS; 2662 2663 retry: 2664 recalc_buffer = __vmalloc(recalc_sectors << SECTOR_SHIFT, GFP_NOIO); 2665 if (!recalc_buffer) { 2666 oom: 2667 recalc_sectors >>= 1; 2668 if (recalc_sectors >= 1U << ic->sb->log2_sectors_per_block) 2669 goto retry; 2670 DMCRIT("out of memory for recalculate buffer - recalculation disabled"); 2671 goto free_ret; 2672 } 2673 recalc_tags_size = (recalc_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size; 2674 if (crypto_shash_digestsize(ic->internal_hash) > ic->tag_size) 2675 recalc_tags_size += crypto_shash_digestsize(ic->internal_hash) - ic->tag_size; 2676 recalc_tags = kvmalloc(recalc_tags_size, GFP_NOIO); 2677 if (!recalc_tags) { 2678 vfree(recalc_buffer); 2679 recalc_buffer = NULL; 2680 goto oom; 2681 } 2682 2683 DEBUG_print("start recalculation... (position %llx)\n", le64_to_cpu(ic->sb->recalc_sector)); 2684 2685 spin_lock_irq(&ic->endio_wait.lock); 2686 2687 next_chunk: 2688 2689 if (unlikely(dm_post_suspending(ic->ti))) 2690 goto unlock_ret; 2691 2692 range.logical_sector = le64_to_cpu(ic->sb->recalc_sector); 2693 if (unlikely(range.logical_sector >= ic->provided_data_sectors)) { 2694 if (ic->mode == 'B') { 2695 block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR); 2696 DEBUG_print("queue_delayed_work: bitmap_flush_work\n"); 2697 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0); 2698 } 2699 goto unlock_ret; 2700 } 2701 2702 get_area_and_offset(ic, range.logical_sector, &area, &offset); 2703 range.n_sectors = min((sector_t)recalc_sectors, ic->provided_data_sectors - range.logical_sector); 2704 if (!ic->meta_dev) 2705 range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned int)offset); 2706 2707 add_new_range_and_wait(ic, &range); 2708 spin_unlock_irq(&ic->endio_wait.lock); 2709 logical_sector = range.logical_sector; 2710 n_sectors = range.n_sectors; 2711 2712 if (ic->mode == 'B') { 2713 if (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector, n_sectors, BITMAP_OP_TEST_ALL_CLEAR)) 2714 goto advance_and_next; 2715 2716 while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector, 2717 ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) { 2718 logical_sector += ic->sectors_per_block; 2719 n_sectors -= ic->sectors_per_block; 2720 cond_resched(); 2721 } 2722 while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector + n_sectors - ic->sectors_per_block, 2723 ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) { 2724 n_sectors -= ic->sectors_per_block; 2725 cond_resched(); 2726 } 2727 get_area_and_offset(ic, logical_sector, &area, &offset); 2728 } 2729 2730 DEBUG_print("recalculating: %llx, %llx\n", logical_sector, n_sectors); 2731 2732 if (unlikely(++super_counter == RECALC_WRITE_SUPER)) { 2733 recalc_write_super(ic); 2734 if (ic->mode == 'B') 2735 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval); 2736 2737 super_counter = 0; 2738 } 2739 2740 if (unlikely(dm_integrity_failed(ic))) 2741 goto err; 2742 2743 io_req.bi_opf = REQ_OP_READ; 2744 io_req.mem.type = DM_IO_VMA; 2745 io_req.mem.ptr.addr = recalc_buffer; 2746 io_req.notify.fn = NULL; 2747 io_req.client = ic->io; 2748 io_loc.bdev = ic->dev->bdev; 2749 io_loc.sector = get_data_sector(ic, area, offset); 2750 io_loc.count = n_sectors; 2751 2752 r = dm_io(&io_req, 1, &io_loc, NULL); 2753 if (unlikely(r)) { 2754 dm_integrity_io_error(ic, "reading data", r); 2755 goto err; 2756 } 2757 2758 t = recalc_tags; 2759 for (i = 0; i < n_sectors; i += ic->sectors_per_block) { 2760 integrity_sector_checksum(ic, logical_sector + i, recalc_buffer + (i << SECTOR_SHIFT), t); 2761 t += ic->tag_size; 2762 } 2763 2764 metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset); 2765 2766 r = dm_integrity_rw_tag(ic, recalc_tags, &metadata_block, &metadata_offset, t - recalc_tags, TAG_WRITE); 2767 if (unlikely(r)) { 2768 dm_integrity_io_error(ic, "writing tags", r); 2769 goto err; 2770 } 2771 2772 if (ic->mode == 'B') { 2773 sector_t start, end; 2774 2775 start = (range.logical_sector >> 2776 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) << 2777 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); 2778 end = ((range.logical_sector + range.n_sectors) >> 2779 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) << 2780 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); 2781 block_bitmap_op(ic, ic->recalc_bitmap, start, end - start, BITMAP_OP_CLEAR); 2782 } 2783 2784 advance_and_next: 2785 cond_resched(); 2786 2787 spin_lock_irq(&ic->endio_wait.lock); 2788 remove_range_unlocked(ic, &range); 2789 ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors); 2790 goto next_chunk; 2791 2792 err: 2793 remove_range(ic, &range); 2794 goto free_ret; 2795 2796 unlock_ret: 2797 spin_unlock_irq(&ic->endio_wait.lock); 2798 2799 recalc_write_super(ic); 2800 2801 free_ret: 2802 vfree(recalc_buffer); 2803 kvfree(recalc_tags); 2804 } 2805 2806 static void bitmap_block_work(struct work_struct *w) 2807 { 2808 struct bitmap_block_status *bbs = container_of(w, struct bitmap_block_status, work); 2809 struct dm_integrity_c *ic = bbs->ic; 2810 struct bio *bio; 2811 struct bio_list bio_queue; 2812 struct bio_list waiting; 2813 2814 bio_list_init(&waiting); 2815 2816 spin_lock(&bbs->bio_queue_lock); 2817 bio_queue = bbs->bio_queue; 2818 bio_list_init(&bbs->bio_queue); 2819 spin_unlock(&bbs->bio_queue_lock); 2820 2821 while ((bio = bio_list_pop(&bio_queue))) { 2822 struct dm_integrity_io *dio; 2823 2824 dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io)); 2825 2826 if (block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector, 2827 dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) { 2828 remove_range(ic, &dio->range); 2829 INIT_WORK(&dio->work, integrity_bio_wait); 2830 queue_work(ic->offload_wq, &dio->work); 2831 } else { 2832 block_bitmap_op(ic, ic->journal, dio->range.logical_sector, 2833 dio->range.n_sectors, BITMAP_OP_SET); 2834 bio_list_add(&waiting, bio); 2835 } 2836 } 2837 2838 if (bio_list_empty(&waiting)) 2839 return; 2840 2841 rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 2842 bbs->idx * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), 2843 BITMAP_BLOCK_SIZE >> SECTOR_SHIFT, NULL); 2844 2845 while ((bio = bio_list_pop(&waiting))) { 2846 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io)); 2847 2848 block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector, 2849 dio->range.n_sectors, BITMAP_OP_SET); 2850 2851 remove_range(ic, &dio->range); 2852 INIT_WORK(&dio->work, integrity_bio_wait); 2853 queue_work(ic->offload_wq, &dio->work); 2854 } 2855 2856 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval); 2857 } 2858 2859 static void bitmap_flush_work(struct work_struct *work) 2860 { 2861 struct dm_integrity_c *ic = container_of(work, struct dm_integrity_c, bitmap_flush_work.work); 2862 struct dm_integrity_range range; 2863 unsigned long limit; 2864 struct bio *bio; 2865 2866 dm_integrity_flush_buffers(ic, false); 2867 2868 range.logical_sector = 0; 2869 range.n_sectors = ic->provided_data_sectors; 2870 2871 spin_lock_irq(&ic->endio_wait.lock); 2872 add_new_range_and_wait(ic, &range); 2873 spin_unlock_irq(&ic->endio_wait.lock); 2874 2875 dm_integrity_flush_buffers(ic, true); 2876 2877 limit = ic->provided_data_sectors; 2878 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) { 2879 limit = le64_to_cpu(ic->sb->recalc_sector) 2880 >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit) 2881 << (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); 2882 } 2883 /*DEBUG_print("zeroing journal\n");*/ 2884 block_bitmap_op(ic, ic->journal, 0, limit, BITMAP_OP_CLEAR); 2885 block_bitmap_op(ic, ic->may_write_bitmap, 0, limit, BITMAP_OP_CLEAR); 2886 2887 rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0, 2888 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); 2889 2890 spin_lock_irq(&ic->endio_wait.lock); 2891 remove_range_unlocked(ic, &range); 2892 while (unlikely((bio = bio_list_pop(&ic->synchronous_bios)) != NULL)) { 2893 bio_endio(bio); 2894 spin_unlock_irq(&ic->endio_wait.lock); 2895 spin_lock_irq(&ic->endio_wait.lock); 2896 } 2897 spin_unlock_irq(&ic->endio_wait.lock); 2898 } 2899 2900 2901 static void init_journal(struct dm_integrity_c *ic, unsigned int start_section, 2902 unsigned int n_sections, unsigned char commit_seq) 2903 { 2904 unsigned int i, j, n; 2905 2906 if (!n_sections) 2907 return; 2908 2909 for (n = 0; n < n_sections; n++) { 2910 i = start_section + n; 2911 wraparound_section(ic, &i); 2912 for (j = 0; j < ic->journal_section_sectors; j++) { 2913 struct journal_sector *js = access_journal(ic, i, j); 2914 2915 BUILD_BUG_ON(sizeof(js->sectors) != JOURNAL_SECTOR_DATA); 2916 memset(&js->sectors, 0, sizeof(js->sectors)); 2917 js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq); 2918 } 2919 for (j = 0; j < ic->journal_section_entries; j++) { 2920 struct journal_entry *je = access_journal_entry(ic, i, j); 2921 2922 journal_entry_set_unused(je); 2923 } 2924 } 2925 2926 write_journal(ic, start_section, n_sections); 2927 } 2928 2929 static int find_commit_seq(struct dm_integrity_c *ic, unsigned int i, unsigned int j, commit_id_t id) 2930 { 2931 unsigned char k; 2932 2933 for (k = 0; k < N_COMMIT_IDS; k++) { 2934 if (dm_integrity_commit_id(ic, i, j, k) == id) 2935 return k; 2936 } 2937 dm_integrity_io_error(ic, "journal commit id", -EIO); 2938 return -EIO; 2939 } 2940 2941 static void replay_journal(struct dm_integrity_c *ic) 2942 { 2943 unsigned int i, j; 2944 bool used_commit_ids[N_COMMIT_IDS]; 2945 unsigned int max_commit_id_sections[N_COMMIT_IDS]; 2946 unsigned int write_start, write_sections; 2947 unsigned int continue_section; 2948 bool journal_empty; 2949 unsigned char unused, last_used, want_commit_seq; 2950 2951 if (ic->mode == 'R') 2952 return; 2953 2954 if (ic->journal_uptodate) 2955 return; 2956 2957 last_used = 0; 2958 write_start = 0; 2959 2960 if (!ic->just_formatted) { 2961 DEBUG_print("reading journal\n"); 2962 rw_journal(ic, REQ_OP_READ, 0, ic->journal_sections, NULL); 2963 if (ic->journal_io) 2964 DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal"); 2965 if (ic->journal_io) { 2966 struct journal_completion crypt_comp; 2967 2968 crypt_comp.ic = ic; 2969 init_completion(&crypt_comp.comp); 2970 crypt_comp.in_flight = (atomic_t)ATOMIC_INIT(0); 2971 encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp); 2972 wait_for_completion(&crypt_comp.comp); 2973 } 2974 DEBUG_bytes(lowmem_page_address(ic->journal[0].page), 64, "decrypted journal"); 2975 } 2976 2977 if (dm_integrity_failed(ic)) 2978 goto clear_journal; 2979 2980 journal_empty = true; 2981 memset(used_commit_ids, 0, sizeof(used_commit_ids)); 2982 memset(max_commit_id_sections, 0, sizeof(max_commit_id_sections)); 2983 for (i = 0; i < ic->journal_sections; i++) { 2984 for (j = 0; j < ic->journal_section_sectors; j++) { 2985 int k; 2986 struct journal_sector *js = access_journal(ic, i, j); 2987 2988 k = find_commit_seq(ic, i, j, js->commit_id); 2989 if (k < 0) 2990 goto clear_journal; 2991 used_commit_ids[k] = true; 2992 max_commit_id_sections[k] = i; 2993 } 2994 if (journal_empty) { 2995 for (j = 0; j < ic->journal_section_entries; j++) { 2996 struct journal_entry *je = access_journal_entry(ic, i, j); 2997 2998 if (!journal_entry_is_unused(je)) { 2999 journal_empty = false; 3000 break; 3001 } 3002 } 3003 } 3004 } 3005 3006 if (!used_commit_ids[N_COMMIT_IDS - 1]) { 3007 unused = N_COMMIT_IDS - 1; 3008 while (unused && !used_commit_ids[unused - 1]) 3009 unused--; 3010 } else { 3011 for (unused = 0; unused < N_COMMIT_IDS; unused++) 3012 if (!used_commit_ids[unused]) 3013 break; 3014 if (unused == N_COMMIT_IDS) { 3015 dm_integrity_io_error(ic, "journal commit ids", -EIO); 3016 goto clear_journal; 3017 } 3018 } 3019 DEBUG_print("first unused commit seq %d [%d,%d,%d,%d]\n", 3020 unused, used_commit_ids[0], used_commit_ids[1], 3021 used_commit_ids[2], used_commit_ids[3]); 3022 3023 last_used = prev_commit_seq(unused); 3024 want_commit_seq = prev_commit_seq(last_used); 3025 3026 if (!used_commit_ids[want_commit_seq] && used_commit_ids[prev_commit_seq(want_commit_seq)]) 3027 journal_empty = true; 3028 3029 write_start = max_commit_id_sections[last_used] + 1; 3030 if (unlikely(write_start >= ic->journal_sections)) 3031 want_commit_seq = next_commit_seq(want_commit_seq); 3032 wraparound_section(ic, &write_start); 3033 3034 i = write_start; 3035 for (write_sections = 0; write_sections < ic->journal_sections; write_sections++) { 3036 for (j = 0; j < ic->journal_section_sectors; j++) { 3037 struct journal_sector *js = access_journal(ic, i, j); 3038 3039 if (js->commit_id != dm_integrity_commit_id(ic, i, j, want_commit_seq)) { 3040 /* 3041 * This could be caused by crash during writing. 3042 * We won't replay the inconsistent part of the 3043 * journal. 3044 */ 3045 DEBUG_print("commit id mismatch at position (%u, %u): %d != %d\n", 3046 i, j, find_commit_seq(ic, i, j, js->commit_id), want_commit_seq); 3047 goto brk; 3048 } 3049 } 3050 i++; 3051 if (unlikely(i >= ic->journal_sections)) 3052 want_commit_seq = next_commit_seq(want_commit_seq); 3053 wraparound_section(ic, &i); 3054 } 3055 brk: 3056 3057 if (!journal_empty) { 3058 DEBUG_print("replaying %u sections, starting at %u, commit seq %d\n", 3059 write_sections, write_start, want_commit_seq); 3060 do_journal_write(ic, write_start, write_sections, true); 3061 } 3062 3063 if (write_sections == ic->journal_sections && (ic->mode == 'J' || journal_empty)) { 3064 continue_section = write_start; 3065 ic->commit_seq = want_commit_seq; 3066 DEBUG_print("continuing from section %u, commit seq %d\n", write_start, ic->commit_seq); 3067 } else { 3068 unsigned int s; 3069 unsigned char erase_seq; 3070 3071 clear_journal: 3072 DEBUG_print("clearing journal\n"); 3073 3074 erase_seq = prev_commit_seq(prev_commit_seq(last_used)); 3075 s = write_start; 3076 init_journal(ic, s, 1, erase_seq); 3077 s++; 3078 wraparound_section(ic, &s); 3079 if (ic->journal_sections >= 2) { 3080 init_journal(ic, s, ic->journal_sections - 2, erase_seq); 3081 s += ic->journal_sections - 2; 3082 wraparound_section(ic, &s); 3083 init_journal(ic, s, 1, erase_seq); 3084 } 3085 3086 continue_section = 0; 3087 ic->commit_seq = next_commit_seq(erase_seq); 3088 } 3089 3090 ic->committed_section = continue_section; 3091 ic->n_committed_sections = 0; 3092 3093 ic->uncommitted_section = continue_section; 3094 ic->n_uncommitted_sections = 0; 3095 3096 ic->free_section = continue_section; 3097 ic->free_section_entry = 0; 3098 ic->free_sectors = ic->journal_entries; 3099 3100 ic->journal_tree_root = RB_ROOT; 3101 for (i = 0; i < ic->journal_entries; i++) 3102 init_journal_node(&ic->journal_tree[i]); 3103 } 3104 3105 static void dm_integrity_enter_synchronous_mode(struct dm_integrity_c *ic) 3106 { 3107 DEBUG_print("%s\n", __func__); 3108 3109 if (ic->mode == 'B') { 3110 ic->bitmap_flush_interval = msecs_to_jiffies(10) + 1; 3111 ic->synchronous_mode = 1; 3112 3113 cancel_delayed_work_sync(&ic->bitmap_flush_work); 3114 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0); 3115 flush_workqueue(ic->commit_wq); 3116 } 3117 } 3118 3119 static int dm_integrity_reboot(struct notifier_block *n, unsigned long code, void *x) 3120 { 3121 struct dm_integrity_c *ic = container_of(n, struct dm_integrity_c, reboot_notifier); 3122 3123 DEBUG_print("%s\n", __func__); 3124 3125 dm_integrity_enter_synchronous_mode(ic); 3126 3127 return NOTIFY_DONE; 3128 } 3129 3130 static void dm_integrity_postsuspend(struct dm_target *ti) 3131 { 3132 struct dm_integrity_c *ic = ti->private; 3133 int r; 3134 3135 WARN_ON(unregister_reboot_notifier(&ic->reboot_notifier)); 3136 3137 del_timer_sync(&ic->autocommit_timer); 3138 3139 if (ic->recalc_wq) 3140 drain_workqueue(ic->recalc_wq); 3141 3142 if (ic->mode == 'B') 3143 cancel_delayed_work_sync(&ic->bitmap_flush_work); 3144 3145 queue_work(ic->commit_wq, &ic->commit_work); 3146 drain_workqueue(ic->commit_wq); 3147 3148 if (ic->mode == 'J') { 3149 queue_work(ic->writer_wq, &ic->writer_work); 3150 drain_workqueue(ic->writer_wq); 3151 dm_integrity_flush_buffers(ic, true); 3152 if (ic->wrote_to_journal) { 3153 init_journal(ic, ic->free_section, 3154 ic->journal_sections - ic->free_section, ic->commit_seq); 3155 if (ic->free_section) { 3156 init_journal(ic, 0, ic->free_section, 3157 next_commit_seq(ic->commit_seq)); 3158 } 3159 } 3160 } 3161 3162 if (ic->mode == 'B') { 3163 dm_integrity_flush_buffers(ic, true); 3164 #if 1 3165 /* set to 0 to test bitmap replay code */ 3166 init_journal(ic, 0, ic->journal_sections, 0); 3167 ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP); 3168 r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA); 3169 if (unlikely(r)) 3170 dm_integrity_io_error(ic, "writing superblock", r); 3171 #endif 3172 } 3173 3174 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress)); 3175 3176 ic->journal_uptodate = true; 3177 } 3178 3179 static void dm_integrity_resume(struct dm_target *ti) 3180 { 3181 struct dm_integrity_c *ic = ti->private; 3182 __u64 old_provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors); 3183 int r; 3184 3185 DEBUG_print("resume\n"); 3186 3187 ic->wrote_to_journal = false; 3188 3189 if (ic->provided_data_sectors != old_provided_data_sectors) { 3190 if (ic->provided_data_sectors > old_provided_data_sectors && 3191 ic->mode == 'B' && 3192 ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) { 3193 rw_journal_sectors(ic, REQ_OP_READ, 0, 3194 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); 3195 block_bitmap_op(ic, ic->journal, old_provided_data_sectors, 3196 ic->provided_data_sectors - old_provided_data_sectors, BITMAP_OP_SET); 3197 rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0, 3198 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); 3199 } 3200 3201 ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors); 3202 r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA); 3203 if (unlikely(r)) 3204 dm_integrity_io_error(ic, "writing superblock", r); 3205 } 3206 3207 if (ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) { 3208 DEBUG_print("resume dirty_bitmap\n"); 3209 rw_journal_sectors(ic, REQ_OP_READ, 0, 3210 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); 3211 if (ic->mode == 'B') { 3212 if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit && 3213 !ic->reset_recalculate_flag) { 3214 block_bitmap_copy(ic, ic->recalc_bitmap, ic->journal); 3215 block_bitmap_copy(ic, ic->may_write_bitmap, ic->journal); 3216 if (!block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, 3217 BITMAP_OP_TEST_ALL_CLEAR)) { 3218 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING); 3219 ic->sb->recalc_sector = cpu_to_le64(0); 3220 } 3221 } else { 3222 DEBUG_print("non-matching blocks_per_bitmap_bit: %u, %u\n", 3223 ic->sb->log2_blocks_per_bitmap_bit, ic->log2_blocks_per_bitmap_bit); 3224 ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit; 3225 block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET); 3226 block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET); 3227 block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_SET); 3228 rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0, 3229 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); 3230 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING); 3231 ic->sb->recalc_sector = cpu_to_le64(0); 3232 } 3233 } else { 3234 if (!(ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit && 3235 block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_TEST_ALL_CLEAR)) || 3236 ic->reset_recalculate_flag) { 3237 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING); 3238 ic->sb->recalc_sector = cpu_to_le64(0); 3239 } 3240 init_journal(ic, 0, ic->journal_sections, 0); 3241 replay_journal(ic); 3242 ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP); 3243 } 3244 r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA); 3245 if (unlikely(r)) 3246 dm_integrity_io_error(ic, "writing superblock", r); 3247 } else { 3248 replay_journal(ic); 3249 if (ic->reset_recalculate_flag) { 3250 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING); 3251 ic->sb->recalc_sector = cpu_to_le64(0); 3252 } 3253 if (ic->mode == 'B') { 3254 ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP); 3255 ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit; 3256 r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA); 3257 if (unlikely(r)) 3258 dm_integrity_io_error(ic, "writing superblock", r); 3259 3260 block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR); 3261 block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR); 3262 block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR); 3263 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) && 3264 le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors) { 3265 block_bitmap_op(ic, ic->journal, le64_to_cpu(ic->sb->recalc_sector), 3266 ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET); 3267 block_bitmap_op(ic, ic->recalc_bitmap, le64_to_cpu(ic->sb->recalc_sector), 3268 ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET); 3269 block_bitmap_op(ic, ic->may_write_bitmap, le64_to_cpu(ic->sb->recalc_sector), 3270 ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET); 3271 } 3272 rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0, 3273 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); 3274 } 3275 } 3276 3277 DEBUG_print("testing recalc: %x\n", ic->sb->flags); 3278 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) { 3279 __u64 recalc_pos = le64_to_cpu(ic->sb->recalc_sector); 3280 3281 DEBUG_print("recalc pos: %llx / %llx\n", recalc_pos, ic->provided_data_sectors); 3282 if (recalc_pos < ic->provided_data_sectors) { 3283 queue_work(ic->recalc_wq, &ic->recalc_work); 3284 } else if (recalc_pos > ic->provided_data_sectors) { 3285 ic->sb->recalc_sector = cpu_to_le64(ic->provided_data_sectors); 3286 recalc_write_super(ic); 3287 } 3288 } 3289 3290 ic->reboot_notifier.notifier_call = dm_integrity_reboot; 3291 ic->reboot_notifier.next = NULL; 3292 ic->reboot_notifier.priority = INT_MAX - 1; /* be notified after md and before hardware drivers */ 3293 WARN_ON(register_reboot_notifier(&ic->reboot_notifier)); 3294 3295 #if 0 3296 /* set to 1 to stress test synchronous mode */ 3297 dm_integrity_enter_synchronous_mode(ic); 3298 #endif 3299 } 3300 3301 static void dm_integrity_status(struct dm_target *ti, status_type_t type, 3302 unsigned int status_flags, char *result, unsigned int maxlen) 3303 { 3304 struct dm_integrity_c *ic = ti->private; 3305 unsigned int arg_count; 3306 size_t sz = 0; 3307 3308 switch (type) { 3309 case STATUSTYPE_INFO: 3310 DMEMIT("%llu %llu", 3311 (unsigned long long)atomic64_read(&ic->number_of_mismatches), 3312 ic->provided_data_sectors); 3313 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) 3314 DMEMIT(" %llu", le64_to_cpu(ic->sb->recalc_sector)); 3315 else 3316 DMEMIT(" -"); 3317 break; 3318 3319 case STATUSTYPE_TABLE: { 3320 __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100; 3321 3322 watermark_percentage += ic->journal_entries / 2; 3323 do_div(watermark_percentage, ic->journal_entries); 3324 arg_count = 3; 3325 arg_count += !!ic->meta_dev; 3326 arg_count += ic->sectors_per_block != 1; 3327 arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)); 3328 arg_count += ic->reset_recalculate_flag; 3329 arg_count += ic->discard; 3330 arg_count += ic->mode == 'J'; 3331 arg_count += ic->mode == 'J'; 3332 arg_count += ic->mode == 'B'; 3333 arg_count += ic->mode == 'B'; 3334 arg_count += !!ic->internal_hash_alg.alg_string; 3335 arg_count += !!ic->journal_crypt_alg.alg_string; 3336 arg_count += !!ic->journal_mac_alg.alg_string; 3337 arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0; 3338 arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) != 0; 3339 arg_count += ic->legacy_recalculate; 3340 DMEMIT("%s %llu %u %c %u", ic->dev->name, ic->start, 3341 ic->tag_size, ic->mode, arg_count); 3342 if (ic->meta_dev) 3343 DMEMIT(" meta_device:%s", ic->meta_dev->name); 3344 if (ic->sectors_per_block != 1) 3345 DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT); 3346 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) 3347 DMEMIT(" recalculate"); 3348 if (ic->reset_recalculate_flag) 3349 DMEMIT(" reset_recalculate"); 3350 if (ic->discard) 3351 DMEMIT(" allow_discards"); 3352 DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS); 3353 DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors); 3354 DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors); 3355 if (ic->mode == 'J') { 3356 DMEMIT(" journal_watermark:%u", (unsigned int)watermark_percentage); 3357 DMEMIT(" commit_time:%u", ic->autocommit_msec); 3358 } 3359 if (ic->mode == 'B') { 3360 DMEMIT(" sectors_per_bit:%llu", (sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit); 3361 DMEMIT(" bitmap_flush_interval:%u", jiffies_to_msecs(ic->bitmap_flush_interval)); 3362 } 3363 if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0) 3364 DMEMIT(" fix_padding"); 3365 if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) != 0) 3366 DMEMIT(" fix_hmac"); 3367 if (ic->legacy_recalculate) 3368 DMEMIT(" legacy_recalculate"); 3369 3370 #define EMIT_ALG(a, n) \ 3371 do { \ 3372 if (ic->a.alg_string) { \ 3373 DMEMIT(" %s:%s", n, ic->a.alg_string); \ 3374 if (ic->a.key_string) \ 3375 DMEMIT(":%s", ic->a.key_string);\ 3376 } \ 3377 } while (0) 3378 EMIT_ALG(internal_hash_alg, "internal_hash"); 3379 EMIT_ALG(journal_crypt_alg, "journal_crypt"); 3380 EMIT_ALG(journal_mac_alg, "journal_mac"); 3381 break; 3382 } 3383 case STATUSTYPE_IMA: 3384 DMEMIT_TARGET_NAME_VERSION(ti->type); 3385 DMEMIT(",dev_name=%s,start=%llu,tag_size=%u,mode=%c", 3386 ic->dev->name, ic->start, ic->tag_size, ic->mode); 3387 3388 if (ic->meta_dev) 3389 DMEMIT(",meta_device=%s", ic->meta_dev->name); 3390 if (ic->sectors_per_block != 1) 3391 DMEMIT(",block_size=%u", ic->sectors_per_block << SECTOR_SHIFT); 3392 3393 DMEMIT(",recalculate=%c", (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) ? 3394 'y' : 'n'); 3395 DMEMIT(",allow_discards=%c", ic->discard ? 'y' : 'n'); 3396 DMEMIT(",fix_padding=%c", 3397 ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0) ? 'y' : 'n'); 3398 DMEMIT(",fix_hmac=%c", 3399 ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) != 0) ? 'y' : 'n'); 3400 DMEMIT(",legacy_recalculate=%c", ic->legacy_recalculate ? 'y' : 'n'); 3401 3402 DMEMIT(",journal_sectors=%u", ic->initial_sectors - SB_SECTORS); 3403 DMEMIT(",interleave_sectors=%u", 1U << ic->sb->log2_interleave_sectors); 3404 DMEMIT(",buffer_sectors=%u", 1U << ic->log2_buffer_sectors); 3405 DMEMIT(";"); 3406 break; 3407 } 3408 } 3409 3410 static int dm_integrity_iterate_devices(struct dm_target *ti, 3411 iterate_devices_callout_fn fn, void *data) 3412 { 3413 struct dm_integrity_c *ic = ti->private; 3414 3415 if (!ic->meta_dev) 3416 return fn(ti, ic->dev, ic->start + ic->initial_sectors + ic->metadata_run, ti->len, data); 3417 else 3418 return fn(ti, ic->dev, 0, ti->len, data); 3419 } 3420 3421 static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *limits) 3422 { 3423 struct dm_integrity_c *ic = ti->private; 3424 3425 if (ic->sectors_per_block > 1) { 3426 limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT; 3427 limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT; 3428 blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT); 3429 limits->dma_alignment = limits->logical_block_size - 1; 3430 } 3431 } 3432 3433 static void calculate_journal_section_size(struct dm_integrity_c *ic) 3434 { 3435 unsigned int sector_space = JOURNAL_SECTOR_DATA; 3436 3437 ic->journal_sections = le32_to_cpu(ic->sb->journal_sections); 3438 ic->journal_entry_size = roundup(offsetof(struct journal_entry, last_bytes[ic->sectors_per_block]) + ic->tag_size, 3439 JOURNAL_ENTRY_ROUNDUP); 3440 3441 if (ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) 3442 sector_space -= JOURNAL_MAC_PER_SECTOR; 3443 ic->journal_entries_per_sector = sector_space / ic->journal_entry_size; 3444 ic->journal_section_entries = ic->journal_entries_per_sector * JOURNAL_BLOCK_SECTORS; 3445 ic->journal_section_sectors = (ic->journal_section_entries << ic->sb->log2_sectors_per_block) + JOURNAL_BLOCK_SECTORS; 3446 ic->journal_entries = ic->journal_section_entries * ic->journal_sections; 3447 } 3448 3449 static int calculate_device_limits(struct dm_integrity_c *ic) 3450 { 3451 __u64 initial_sectors; 3452 3453 calculate_journal_section_size(ic); 3454 initial_sectors = SB_SECTORS + (__u64)ic->journal_section_sectors * ic->journal_sections; 3455 if (initial_sectors + METADATA_PADDING_SECTORS >= ic->meta_device_sectors || initial_sectors > UINT_MAX) 3456 return -EINVAL; 3457 ic->initial_sectors = initial_sectors; 3458 3459 if (!ic->meta_dev) { 3460 sector_t last_sector, last_area, last_offset; 3461 3462 /* we have to maintain excessive padding for compatibility with existing volumes */ 3463 __u64 metadata_run_padding = 3464 ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING) ? 3465 (__u64)(METADATA_PADDING_SECTORS << SECTOR_SHIFT) : 3466 (__u64)(1 << SECTOR_SHIFT << METADATA_PADDING_SECTORS); 3467 3468 ic->metadata_run = round_up((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2_sectors_per_block), 3469 metadata_run_padding) >> SECTOR_SHIFT; 3470 if (!(ic->metadata_run & (ic->metadata_run - 1))) 3471 ic->log2_metadata_run = __ffs(ic->metadata_run); 3472 else 3473 ic->log2_metadata_run = -1; 3474 3475 get_area_and_offset(ic, ic->provided_data_sectors - 1, &last_area, &last_offset); 3476 last_sector = get_data_sector(ic, last_area, last_offset); 3477 if (last_sector < ic->start || last_sector >= ic->meta_device_sectors) 3478 return -EINVAL; 3479 } else { 3480 __u64 meta_size = (ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size; 3481 3482 meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1)) 3483 >> (ic->log2_buffer_sectors + SECTOR_SHIFT); 3484 meta_size <<= ic->log2_buffer_sectors; 3485 if (ic->initial_sectors + meta_size < ic->initial_sectors || 3486 ic->initial_sectors + meta_size > ic->meta_device_sectors) 3487 return -EINVAL; 3488 ic->metadata_run = 1; 3489 ic->log2_metadata_run = 0; 3490 } 3491 3492 return 0; 3493 } 3494 3495 static void get_provided_data_sectors(struct dm_integrity_c *ic) 3496 { 3497 if (!ic->meta_dev) { 3498 int test_bit; 3499 3500 ic->provided_data_sectors = 0; 3501 for (test_bit = fls64(ic->meta_device_sectors) - 1; test_bit >= 3; test_bit--) { 3502 __u64 prev_data_sectors = ic->provided_data_sectors; 3503 3504 ic->provided_data_sectors |= (sector_t)1 << test_bit; 3505 if (calculate_device_limits(ic)) 3506 ic->provided_data_sectors = prev_data_sectors; 3507 } 3508 } else { 3509 ic->provided_data_sectors = ic->data_device_sectors; 3510 ic->provided_data_sectors &= ~(sector_t)(ic->sectors_per_block - 1); 3511 } 3512 } 3513 3514 static int initialize_superblock(struct dm_integrity_c *ic, 3515 unsigned int journal_sectors, unsigned int interleave_sectors) 3516 { 3517 unsigned int journal_sections; 3518 int test_bit; 3519 3520 memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT); 3521 memcpy(ic->sb->magic, SB_MAGIC, 8); 3522 ic->sb->integrity_tag_size = cpu_to_le16(ic->tag_size); 3523 ic->sb->log2_sectors_per_block = __ffs(ic->sectors_per_block); 3524 if (ic->journal_mac_alg.alg_string) 3525 ic->sb->flags |= cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC); 3526 3527 calculate_journal_section_size(ic); 3528 journal_sections = journal_sectors / ic->journal_section_sectors; 3529 if (!journal_sections) 3530 journal_sections = 1; 3531 3532 if (ic->fix_hmac && (ic->internal_hash_alg.alg_string || ic->journal_mac_alg.alg_string)) { 3533 ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_HMAC); 3534 get_random_bytes(ic->sb->salt, SALT_SIZE); 3535 } 3536 3537 if (!ic->meta_dev) { 3538 if (ic->fix_padding) 3539 ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_PADDING); 3540 ic->sb->journal_sections = cpu_to_le32(journal_sections); 3541 if (!interleave_sectors) 3542 interleave_sectors = DEFAULT_INTERLEAVE_SECTORS; 3543 ic->sb->log2_interleave_sectors = __fls(interleave_sectors); 3544 ic->sb->log2_interleave_sectors = max_t(__u8, MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors); 3545 ic->sb->log2_interleave_sectors = min_t(__u8, MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors); 3546 3547 get_provided_data_sectors(ic); 3548 if (!ic->provided_data_sectors) 3549 return -EINVAL; 3550 } else { 3551 ic->sb->log2_interleave_sectors = 0; 3552 3553 get_provided_data_sectors(ic); 3554 if (!ic->provided_data_sectors) 3555 return -EINVAL; 3556 3557 try_smaller_buffer: 3558 ic->sb->journal_sections = cpu_to_le32(0); 3559 for (test_bit = fls(journal_sections) - 1; test_bit >= 0; test_bit--) { 3560 __u32 prev_journal_sections = le32_to_cpu(ic->sb->journal_sections); 3561 __u32 test_journal_sections = prev_journal_sections | (1U << test_bit); 3562 3563 if (test_journal_sections > journal_sections) 3564 continue; 3565 ic->sb->journal_sections = cpu_to_le32(test_journal_sections); 3566 if (calculate_device_limits(ic)) 3567 ic->sb->journal_sections = cpu_to_le32(prev_journal_sections); 3568 3569 } 3570 if (!le32_to_cpu(ic->sb->journal_sections)) { 3571 if (ic->log2_buffer_sectors > 3) { 3572 ic->log2_buffer_sectors--; 3573 goto try_smaller_buffer; 3574 } 3575 return -EINVAL; 3576 } 3577 } 3578 3579 ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors); 3580 3581 sb_set_version(ic); 3582 3583 return 0; 3584 } 3585 3586 static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic) 3587 { 3588 struct gendisk *disk = dm_disk(dm_table_get_md(ti->table)); 3589 struct blk_integrity bi; 3590 3591 memset(&bi, 0, sizeof(bi)); 3592 bi.profile = &dm_integrity_profile; 3593 bi.tuple_size = ic->tag_size; 3594 bi.tag_size = bi.tuple_size; 3595 bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT; 3596 3597 blk_integrity_register(disk, &bi); 3598 blk_queue_max_integrity_segments(disk->queue, UINT_MAX); 3599 } 3600 3601 static void dm_integrity_free_page_list(struct page_list *pl) 3602 { 3603 unsigned int i; 3604 3605 if (!pl) 3606 return; 3607 for (i = 0; pl[i].page; i++) 3608 __free_page(pl[i].page); 3609 kvfree(pl); 3610 } 3611 3612 static struct page_list *dm_integrity_alloc_page_list(unsigned int n_pages) 3613 { 3614 struct page_list *pl; 3615 unsigned int i; 3616 3617 pl = kvmalloc_array(n_pages + 1, sizeof(struct page_list), GFP_KERNEL | __GFP_ZERO); 3618 if (!pl) 3619 return NULL; 3620 3621 for (i = 0; i < n_pages; i++) { 3622 pl[i].page = alloc_page(GFP_KERNEL); 3623 if (!pl[i].page) { 3624 dm_integrity_free_page_list(pl); 3625 return NULL; 3626 } 3627 if (i) 3628 pl[i - 1].next = &pl[i]; 3629 } 3630 pl[i].page = NULL; 3631 pl[i].next = NULL; 3632 3633 return pl; 3634 } 3635 3636 static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **sl) 3637 { 3638 unsigned int i; 3639 3640 for (i = 0; i < ic->journal_sections; i++) 3641 kvfree(sl[i]); 3642 kvfree(sl); 3643 } 3644 3645 static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic, 3646 struct page_list *pl) 3647 { 3648 struct scatterlist **sl; 3649 unsigned int i; 3650 3651 sl = kvmalloc_array(ic->journal_sections, 3652 sizeof(struct scatterlist *), 3653 GFP_KERNEL | __GFP_ZERO); 3654 if (!sl) 3655 return NULL; 3656 3657 for (i = 0; i < ic->journal_sections; i++) { 3658 struct scatterlist *s; 3659 unsigned int start_index, start_offset; 3660 unsigned int end_index, end_offset; 3661 unsigned int n_pages; 3662 unsigned int idx; 3663 3664 page_list_location(ic, i, 0, &start_index, &start_offset); 3665 page_list_location(ic, i, ic->journal_section_sectors - 1, 3666 &end_index, &end_offset); 3667 3668 n_pages = (end_index - start_index + 1); 3669 3670 s = kvmalloc_array(n_pages, sizeof(struct scatterlist), 3671 GFP_KERNEL); 3672 if (!s) { 3673 dm_integrity_free_journal_scatterlist(ic, sl); 3674 return NULL; 3675 } 3676 3677 sg_init_table(s, n_pages); 3678 for (idx = start_index; idx <= end_index; idx++) { 3679 char *va = lowmem_page_address(pl[idx].page); 3680 unsigned int start = 0, end = PAGE_SIZE; 3681 3682 if (idx == start_index) 3683 start = start_offset; 3684 if (idx == end_index) 3685 end = end_offset + (1 << SECTOR_SHIFT); 3686 sg_set_buf(&s[idx - start_index], va + start, end - start); 3687 } 3688 3689 sl[i] = s; 3690 } 3691 3692 return sl; 3693 } 3694 3695 static void free_alg(struct alg_spec *a) 3696 { 3697 kfree_sensitive(a->alg_string); 3698 kfree_sensitive(a->key); 3699 memset(a, 0, sizeof(*a)); 3700 } 3701 3702 static int get_alg_and_key(const char *arg, struct alg_spec *a, char **error, char *error_inval) 3703 { 3704 char *k; 3705 3706 free_alg(a); 3707 3708 a->alg_string = kstrdup(strchr(arg, ':') + 1, GFP_KERNEL); 3709 if (!a->alg_string) 3710 goto nomem; 3711 3712 k = strchr(a->alg_string, ':'); 3713 if (k) { 3714 *k = 0; 3715 a->key_string = k + 1; 3716 if (strlen(a->key_string) & 1) 3717 goto inval; 3718 3719 a->key_size = strlen(a->key_string) / 2; 3720 a->key = kmalloc(a->key_size, GFP_KERNEL); 3721 if (!a->key) 3722 goto nomem; 3723 if (hex2bin(a->key, a->key_string, a->key_size)) 3724 goto inval; 3725 } 3726 3727 return 0; 3728 inval: 3729 *error = error_inval; 3730 return -EINVAL; 3731 nomem: 3732 *error = "Out of memory for an argument"; 3733 return -ENOMEM; 3734 } 3735 3736 static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error, 3737 char *error_alg, char *error_key) 3738 { 3739 int r; 3740 3741 if (a->alg_string) { 3742 *hash = crypto_alloc_shash(a->alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY); 3743 if (IS_ERR(*hash)) { 3744 *error = error_alg; 3745 r = PTR_ERR(*hash); 3746 *hash = NULL; 3747 return r; 3748 } 3749 3750 if (a->key) { 3751 r = crypto_shash_setkey(*hash, a->key, a->key_size); 3752 if (r) { 3753 *error = error_key; 3754 return r; 3755 } 3756 } else if (crypto_shash_get_flags(*hash) & CRYPTO_TFM_NEED_KEY) { 3757 *error = error_key; 3758 return -ENOKEY; 3759 } 3760 } 3761 3762 return 0; 3763 } 3764 3765 static int create_journal(struct dm_integrity_c *ic, char **error) 3766 { 3767 int r = 0; 3768 unsigned int i; 3769 __u64 journal_pages, journal_desc_size, journal_tree_size; 3770 unsigned char *crypt_data = NULL, *crypt_iv = NULL; 3771 struct skcipher_request *req = NULL; 3772 3773 ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL); 3774 ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL); 3775 ic->commit_ids[2] = cpu_to_le64(0x3333333333333333ULL); 3776 ic->commit_ids[3] = cpu_to_le64(0x4444444444444444ULL); 3777 3778 journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors, 3779 PAGE_SIZE >> SECTOR_SHIFT) >> (PAGE_SHIFT - SECTOR_SHIFT); 3780 journal_desc_size = journal_pages * sizeof(struct page_list); 3781 if (journal_pages >= totalram_pages() - totalhigh_pages() || journal_desc_size > ULONG_MAX) { 3782 *error = "Journal doesn't fit into memory"; 3783 r = -ENOMEM; 3784 goto bad; 3785 } 3786 ic->journal_pages = journal_pages; 3787 3788 ic->journal = dm_integrity_alloc_page_list(ic->journal_pages); 3789 if (!ic->journal) { 3790 *error = "Could not allocate memory for journal"; 3791 r = -ENOMEM; 3792 goto bad; 3793 } 3794 if (ic->journal_crypt_alg.alg_string) { 3795 unsigned int ivsize, blocksize; 3796 struct journal_completion comp; 3797 3798 comp.ic = ic; 3799 ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY); 3800 if (IS_ERR(ic->journal_crypt)) { 3801 *error = "Invalid journal cipher"; 3802 r = PTR_ERR(ic->journal_crypt); 3803 ic->journal_crypt = NULL; 3804 goto bad; 3805 } 3806 ivsize = crypto_skcipher_ivsize(ic->journal_crypt); 3807 blocksize = crypto_skcipher_blocksize(ic->journal_crypt); 3808 3809 if (ic->journal_crypt_alg.key) { 3810 r = crypto_skcipher_setkey(ic->journal_crypt, ic->journal_crypt_alg.key, 3811 ic->journal_crypt_alg.key_size); 3812 if (r) { 3813 *error = "Error setting encryption key"; 3814 goto bad; 3815 } 3816 } 3817 DEBUG_print("cipher %s, block size %u iv size %u\n", 3818 ic->journal_crypt_alg.alg_string, blocksize, ivsize); 3819 3820 ic->journal_io = dm_integrity_alloc_page_list(ic->journal_pages); 3821 if (!ic->journal_io) { 3822 *error = "Could not allocate memory for journal io"; 3823 r = -ENOMEM; 3824 goto bad; 3825 } 3826 3827 if (blocksize == 1) { 3828 struct scatterlist *sg; 3829 3830 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL); 3831 if (!req) { 3832 *error = "Could not allocate crypt request"; 3833 r = -ENOMEM; 3834 goto bad; 3835 } 3836 3837 crypt_iv = kzalloc(ivsize, GFP_KERNEL); 3838 if (!crypt_iv) { 3839 *error = "Could not allocate iv"; 3840 r = -ENOMEM; 3841 goto bad; 3842 } 3843 3844 ic->journal_xor = dm_integrity_alloc_page_list(ic->journal_pages); 3845 if (!ic->journal_xor) { 3846 *error = "Could not allocate memory for journal xor"; 3847 r = -ENOMEM; 3848 goto bad; 3849 } 3850 3851 sg = kvmalloc_array(ic->journal_pages + 1, 3852 sizeof(struct scatterlist), 3853 GFP_KERNEL); 3854 if (!sg) { 3855 *error = "Unable to allocate sg list"; 3856 r = -ENOMEM; 3857 goto bad; 3858 } 3859 sg_init_table(sg, ic->journal_pages + 1); 3860 for (i = 0; i < ic->journal_pages; i++) { 3861 char *va = lowmem_page_address(ic->journal_xor[i].page); 3862 3863 clear_page(va); 3864 sg_set_buf(&sg[i], va, PAGE_SIZE); 3865 } 3866 sg_set_buf(&sg[i], &ic->commit_ids, sizeof(ic->commit_ids)); 3867 3868 skcipher_request_set_crypt(req, sg, sg, 3869 PAGE_SIZE * ic->journal_pages + sizeof(ic->commit_ids), crypt_iv); 3870 init_completion(&comp.comp); 3871 comp.in_flight = (atomic_t)ATOMIC_INIT(1); 3872 if (do_crypt(true, req, &comp)) 3873 wait_for_completion(&comp.comp); 3874 kvfree(sg); 3875 r = dm_integrity_failed(ic); 3876 if (r) { 3877 *error = "Unable to encrypt journal"; 3878 goto bad; 3879 } 3880 DEBUG_bytes(lowmem_page_address(ic->journal_xor[0].page), 64, "xor data"); 3881 3882 crypto_free_skcipher(ic->journal_crypt); 3883 ic->journal_crypt = NULL; 3884 } else { 3885 unsigned int crypt_len = roundup(ivsize, blocksize); 3886 3887 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL); 3888 if (!req) { 3889 *error = "Could not allocate crypt request"; 3890 r = -ENOMEM; 3891 goto bad; 3892 } 3893 3894 crypt_iv = kmalloc(ivsize, GFP_KERNEL); 3895 if (!crypt_iv) { 3896 *error = "Could not allocate iv"; 3897 r = -ENOMEM; 3898 goto bad; 3899 } 3900 3901 crypt_data = kmalloc(crypt_len, GFP_KERNEL); 3902 if (!crypt_data) { 3903 *error = "Unable to allocate crypt data"; 3904 r = -ENOMEM; 3905 goto bad; 3906 } 3907 3908 ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal); 3909 if (!ic->journal_scatterlist) { 3910 *error = "Unable to allocate sg list"; 3911 r = -ENOMEM; 3912 goto bad; 3913 } 3914 ic->journal_io_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal_io); 3915 if (!ic->journal_io_scatterlist) { 3916 *error = "Unable to allocate sg list"; 3917 r = -ENOMEM; 3918 goto bad; 3919 } 3920 ic->sk_requests = kvmalloc_array(ic->journal_sections, 3921 sizeof(struct skcipher_request *), 3922 GFP_KERNEL | __GFP_ZERO); 3923 if (!ic->sk_requests) { 3924 *error = "Unable to allocate sk requests"; 3925 r = -ENOMEM; 3926 goto bad; 3927 } 3928 for (i = 0; i < ic->journal_sections; i++) { 3929 struct scatterlist sg; 3930 struct skcipher_request *section_req; 3931 __le32 section_le = cpu_to_le32(i); 3932 3933 memset(crypt_iv, 0x00, ivsize); 3934 memset(crypt_data, 0x00, crypt_len); 3935 memcpy(crypt_data, §ion_le, min_t(size_t, crypt_len, sizeof(section_le))); 3936 3937 sg_init_one(&sg, crypt_data, crypt_len); 3938 skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv); 3939 init_completion(&comp.comp); 3940 comp.in_flight = (atomic_t)ATOMIC_INIT(1); 3941 if (do_crypt(true, req, &comp)) 3942 wait_for_completion(&comp.comp); 3943 3944 r = dm_integrity_failed(ic); 3945 if (r) { 3946 *error = "Unable to generate iv"; 3947 goto bad; 3948 } 3949 3950 section_req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL); 3951 if (!section_req) { 3952 *error = "Unable to allocate crypt request"; 3953 r = -ENOMEM; 3954 goto bad; 3955 } 3956 section_req->iv = kmalloc_array(ivsize, 2, 3957 GFP_KERNEL); 3958 if (!section_req->iv) { 3959 skcipher_request_free(section_req); 3960 *error = "Unable to allocate iv"; 3961 r = -ENOMEM; 3962 goto bad; 3963 } 3964 memcpy(section_req->iv + ivsize, crypt_data, ivsize); 3965 section_req->cryptlen = (size_t)ic->journal_section_sectors << SECTOR_SHIFT; 3966 ic->sk_requests[i] = section_req; 3967 DEBUG_bytes(crypt_data, ivsize, "iv(%u)", i); 3968 } 3969 } 3970 } 3971 3972 for (i = 0; i < N_COMMIT_IDS; i++) { 3973 unsigned int j; 3974 3975 retest_commit_id: 3976 for (j = 0; j < i; j++) { 3977 if (ic->commit_ids[j] == ic->commit_ids[i]) { 3978 ic->commit_ids[i] = cpu_to_le64(le64_to_cpu(ic->commit_ids[i]) + 1); 3979 goto retest_commit_id; 3980 } 3981 } 3982 DEBUG_print("commit id %u: %016llx\n", i, ic->commit_ids[i]); 3983 } 3984 3985 journal_tree_size = (__u64)ic->journal_entries * sizeof(struct journal_node); 3986 if (journal_tree_size > ULONG_MAX) { 3987 *error = "Journal doesn't fit into memory"; 3988 r = -ENOMEM; 3989 goto bad; 3990 } 3991 ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL); 3992 if (!ic->journal_tree) { 3993 *error = "Could not allocate memory for journal tree"; 3994 r = -ENOMEM; 3995 } 3996 bad: 3997 kfree(crypt_data); 3998 kfree(crypt_iv); 3999 skcipher_request_free(req); 4000 4001 return r; 4002 } 4003 4004 /* 4005 * Construct a integrity mapping 4006 * 4007 * Arguments: 4008 * device 4009 * offset from the start of the device 4010 * tag size 4011 * D - direct writes, J - journal writes, B - bitmap mode, R - recovery mode 4012 * number of optional arguments 4013 * optional arguments: 4014 * journal_sectors 4015 * interleave_sectors 4016 * buffer_sectors 4017 * journal_watermark 4018 * commit_time 4019 * meta_device 4020 * block_size 4021 * sectors_per_bit 4022 * bitmap_flush_interval 4023 * internal_hash 4024 * journal_crypt 4025 * journal_mac 4026 * recalculate 4027 */ 4028 static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv) 4029 { 4030 struct dm_integrity_c *ic; 4031 char dummy; 4032 int r; 4033 unsigned int extra_args; 4034 struct dm_arg_set as; 4035 static const struct dm_arg _args[] = { 4036 {0, 18, "Invalid number of feature args"}, 4037 }; 4038 unsigned int journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec; 4039 bool should_write_sb; 4040 __u64 threshold; 4041 unsigned long long start; 4042 __s8 log2_sectors_per_bitmap_bit = -1; 4043 __s8 log2_blocks_per_bitmap_bit; 4044 __u64 bits_in_journal; 4045 __u64 n_bitmap_bits; 4046 4047 #define DIRECT_ARGUMENTS 4 4048 4049 if (argc <= DIRECT_ARGUMENTS) { 4050 ti->error = "Invalid argument count"; 4051 return -EINVAL; 4052 } 4053 4054 ic = kzalloc(sizeof(struct dm_integrity_c), GFP_KERNEL); 4055 if (!ic) { 4056 ti->error = "Cannot allocate integrity context"; 4057 return -ENOMEM; 4058 } 4059 ti->private = ic; 4060 ti->per_io_data_size = sizeof(struct dm_integrity_io); 4061 ic->ti = ti; 4062 4063 ic->in_progress = RB_ROOT; 4064 INIT_LIST_HEAD(&ic->wait_list); 4065 init_waitqueue_head(&ic->endio_wait); 4066 bio_list_init(&ic->flush_bio_list); 4067 init_waitqueue_head(&ic->copy_to_journal_wait); 4068 init_completion(&ic->crypto_backoff); 4069 atomic64_set(&ic->number_of_mismatches, 0); 4070 ic->bitmap_flush_interval = BITMAP_FLUSH_INTERVAL; 4071 4072 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev); 4073 if (r) { 4074 ti->error = "Device lookup failed"; 4075 goto bad; 4076 } 4077 4078 if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) { 4079 ti->error = "Invalid starting offset"; 4080 r = -EINVAL; 4081 goto bad; 4082 } 4083 ic->start = start; 4084 4085 if (strcmp(argv[2], "-")) { 4086 if (sscanf(argv[2], "%u%c", &ic->tag_size, &dummy) != 1 || !ic->tag_size) { 4087 ti->error = "Invalid tag size"; 4088 r = -EINVAL; 4089 goto bad; 4090 } 4091 } 4092 4093 if (!strcmp(argv[3], "J") || !strcmp(argv[3], "B") || 4094 !strcmp(argv[3], "D") || !strcmp(argv[3], "R")) { 4095 ic->mode = argv[3][0]; 4096 } else { 4097 ti->error = "Invalid mode (expecting J, B, D, R)"; 4098 r = -EINVAL; 4099 goto bad; 4100 } 4101 4102 journal_sectors = 0; 4103 interleave_sectors = DEFAULT_INTERLEAVE_SECTORS; 4104 buffer_sectors = DEFAULT_BUFFER_SECTORS; 4105 journal_watermark = DEFAULT_JOURNAL_WATERMARK; 4106 sync_msec = DEFAULT_SYNC_MSEC; 4107 ic->sectors_per_block = 1; 4108 4109 as.argc = argc - DIRECT_ARGUMENTS; 4110 as.argv = argv + DIRECT_ARGUMENTS; 4111 r = dm_read_arg_group(_args, &as, &extra_args, &ti->error); 4112 if (r) 4113 goto bad; 4114 4115 while (extra_args--) { 4116 const char *opt_string; 4117 unsigned int val; 4118 unsigned long long llval; 4119 4120 opt_string = dm_shift_arg(&as); 4121 if (!opt_string) { 4122 r = -EINVAL; 4123 ti->error = "Not enough feature arguments"; 4124 goto bad; 4125 } 4126 if (sscanf(opt_string, "journal_sectors:%u%c", &val, &dummy) == 1) 4127 journal_sectors = val ? val : 1; 4128 else if (sscanf(opt_string, "interleave_sectors:%u%c", &val, &dummy) == 1) 4129 interleave_sectors = val; 4130 else if (sscanf(opt_string, "buffer_sectors:%u%c", &val, &dummy) == 1) 4131 buffer_sectors = val; 4132 else if (sscanf(opt_string, "journal_watermark:%u%c", &val, &dummy) == 1 && val <= 100) 4133 journal_watermark = val; 4134 else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1) 4135 sync_msec = val; 4136 else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) { 4137 if (ic->meta_dev) { 4138 dm_put_device(ti, ic->meta_dev); 4139 ic->meta_dev = NULL; 4140 } 4141 r = dm_get_device(ti, strchr(opt_string, ':') + 1, 4142 dm_table_get_mode(ti->table), &ic->meta_dev); 4143 if (r) { 4144 ti->error = "Device lookup failed"; 4145 goto bad; 4146 } 4147 } else if (sscanf(opt_string, "block_size:%u%c", &val, &dummy) == 1) { 4148 if (val < 1 << SECTOR_SHIFT || 4149 val > MAX_SECTORS_PER_BLOCK << SECTOR_SHIFT || 4150 (val & (val - 1))) { 4151 r = -EINVAL; 4152 ti->error = "Invalid block_size argument"; 4153 goto bad; 4154 } 4155 ic->sectors_per_block = val >> SECTOR_SHIFT; 4156 } else if (sscanf(opt_string, "sectors_per_bit:%llu%c", &llval, &dummy) == 1) { 4157 log2_sectors_per_bitmap_bit = !llval ? 0 : __ilog2_u64(llval); 4158 } else if (sscanf(opt_string, "bitmap_flush_interval:%u%c", &val, &dummy) == 1) { 4159 if (val >= (uint64_t)UINT_MAX * 1000 / HZ) { 4160 r = -EINVAL; 4161 ti->error = "Invalid bitmap_flush_interval argument"; 4162 goto bad; 4163 } 4164 ic->bitmap_flush_interval = msecs_to_jiffies(val); 4165 } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) { 4166 r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error, 4167 "Invalid internal_hash argument"); 4168 if (r) 4169 goto bad; 4170 } else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) { 4171 r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error, 4172 "Invalid journal_crypt argument"); 4173 if (r) 4174 goto bad; 4175 } else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) { 4176 r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error, 4177 "Invalid journal_mac argument"); 4178 if (r) 4179 goto bad; 4180 } else if (!strcmp(opt_string, "recalculate")) { 4181 ic->recalculate_flag = true; 4182 } else if (!strcmp(opt_string, "reset_recalculate")) { 4183 ic->recalculate_flag = true; 4184 ic->reset_recalculate_flag = true; 4185 } else if (!strcmp(opt_string, "allow_discards")) { 4186 ic->discard = true; 4187 } else if (!strcmp(opt_string, "fix_padding")) { 4188 ic->fix_padding = true; 4189 } else if (!strcmp(opt_string, "fix_hmac")) { 4190 ic->fix_hmac = true; 4191 } else if (!strcmp(opt_string, "legacy_recalculate")) { 4192 ic->legacy_recalculate = true; 4193 } else { 4194 r = -EINVAL; 4195 ti->error = "Invalid argument"; 4196 goto bad; 4197 } 4198 } 4199 4200 ic->data_device_sectors = bdev_nr_sectors(ic->dev->bdev); 4201 if (!ic->meta_dev) 4202 ic->meta_device_sectors = ic->data_device_sectors; 4203 else 4204 ic->meta_device_sectors = bdev_nr_sectors(ic->meta_dev->bdev); 4205 4206 if (!journal_sectors) { 4207 journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS, 4208 ic->data_device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR); 4209 } 4210 4211 if (!buffer_sectors) 4212 buffer_sectors = 1; 4213 ic->log2_buffer_sectors = min((int)__fls(buffer_sectors), 31 - SECTOR_SHIFT); 4214 4215 r = get_mac(&ic->internal_hash, &ic->internal_hash_alg, &ti->error, 4216 "Invalid internal hash", "Error setting internal hash key"); 4217 if (r) 4218 goto bad; 4219 4220 r = get_mac(&ic->journal_mac, &ic->journal_mac_alg, &ti->error, 4221 "Invalid journal mac", "Error setting journal mac key"); 4222 if (r) 4223 goto bad; 4224 4225 if (!ic->tag_size) { 4226 if (!ic->internal_hash) { 4227 ti->error = "Unknown tag size"; 4228 r = -EINVAL; 4229 goto bad; 4230 } 4231 ic->tag_size = crypto_shash_digestsize(ic->internal_hash); 4232 } 4233 if (ic->tag_size > MAX_TAG_SIZE) { 4234 ti->error = "Too big tag size"; 4235 r = -EINVAL; 4236 goto bad; 4237 } 4238 if (!(ic->tag_size & (ic->tag_size - 1))) 4239 ic->log2_tag_size = __ffs(ic->tag_size); 4240 else 4241 ic->log2_tag_size = -1; 4242 4243 if (ic->mode == 'B' && !ic->internal_hash) { 4244 r = -EINVAL; 4245 ti->error = "Bitmap mode can be only used with internal hash"; 4246 goto bad; 4247 } 4248 4249 if (ic->discard && !ic->internal_hash) { 4250 r = -EINVAL; 4251 ti->error = "Discard can be only used with internal hash"; 4252 goto bad; 4253 } 4254 4255 ic->autocommit_jiffies = msecs_to_jiffies(sync_msec); 4256 ic->autocommit_msec = sync_msec; 4257 timer_setup(&ic->autocommit_timer, autocommit_fn, 0); 4258 4259 ic->io = dm_io_client_create(); 4260 if (IS_ERR(ic->io)) { 4261 r = PTR_ERR(ic->io); 4262 ic->io = NULL; 4263 ti->error = "Cannot allocate dm io"; 4264 goto bad; 4265 } 4266 4267 r = mempool_init_slab_pool(&ic->journal_io_mempool, JOURNAL_IO_MEMPOOL, journal_io_cache); 4268 if (r) { 4269 ti->error = "Cannot allocate mempool"; 4270 goto bad; 4271 } 4272 4273 ic->metadata_wq = alloc_workqueue("dm-integrity-metadata", 4274 WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE); 4275 if (!ic->metadata_wq) { 4276 ti->error = "Cannot allocate workqueue"; 4277 r = -ENOMEM; 4278 goto bad; 4279 } 4280 4281 /* 4282 * If this workqueue weren't ordered, it would cause bio reordering 4283 * and reduced performance. 4284 */ 4285 ic->wait_wq = alloc_ordered_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM); 4286 if (!ic->wait_wq) { 4287 ti->error = "Cannot allocate workqueue"; 4288 r = -ENOMEM; 4289 goto bad; 4290 } 4291 4292 ic->offload_wq = alloc_workqueue("dm-integrity-offload", WQ_MEM_RECLAIM, 4293 METADATA_WORKQUEUE_MAX_ACTIVE); 4294 if (!ic->offload_wq) { 4295 ti->error = "Cannot allocate workqueue"; 4296 r = -ENOMEM; 4297 goto bad; 4298 } 4299 4300 ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1); 4301 if (!ic->commit_wq) { 4302 ti->error = "Cannot allocate workqueue"; 4303 r = -ENOMEM; 4304 goto bad; 4305 } 4306 INIT_WORK(&ic->commit_work, integrity_commit); 4307 4308 if (ic->mode == 'J' || ic->mode == 'B') { 4309 ic->writer_wq = alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM, 1); 4310 if (!ic->writer_wq) { 4311 ti->error = "Cannot allocate workqueue"; 4312 r = -ENOMEM; 4313 goto bad; 4314 } 4315 INIT_WORK(&ic->writer_work, integrity_writer); 4316 } 4317 4318 ic->sb = alloc_pages_exact(SB_SECTORS << SECTOR_SHIFT, GFP_KERNEL); 4319 if (!ic->sb) { 4320 r = -ENOMEM; 4321 ti->error = "Cannot allocate superblock area"; 4322 goto bad; 4323 } 4324 4325 r = sync_rw_sb(ic, REQ_OP_READ); 4326 if (r) { 4327 ti->error = "Error reading superblock"; 4328 goto bad; 4329 } 4330 should_write_sb = false; 4331 if (memcmp(ic->sb->magic, SB_MAGIC, 8)) { 4332 if (ic->mode != 'R') { 4333 if (memchr_inv(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT)) { 4334 r = -EINVAL; 4335 ti->error = "The device is not initialized"; 4336 goto bad; 4337 } 4338 } 4339 4340 r = initialize_superblock(ic, journal_sectors, interleave_sectors); 4341 if (r) { 4342 ti->error = "Could not initialize superblock"; 4343 goto bad; 4344 } 4345 if (ic->mode != 'R') 4346 should_write_sb = true; 4347 } 4348 4349 if (!ic->sb->version || ic->sb->version > SB_VERSION_5) { 4350 r = -EINVAL; 4351 ti->error = "Unknown version"; 4352 goto bad; 4353 } 4354 if (le16_to_cpu(ic->sb->integrity_tag_size) != ic->tag_size) { 4355 r = -EINVAL; 4356 ti->error = "Tag size doesn't match the information in superblock"; 4357 goto bad; 4358 } 4359 if (ic->sb->log2_sectors_per_block != __ffs(ic->sectors_per_block)) { 4360 r = -EINVAL; 4361 ti->error = "Block size doesn't match the information in superblock"; 4362 goto bad; 4363 } 4364 if (!le32_to_cpu(ic->sb->journal_sections)) { 4365 r = -EINVAL; 4366 ti->error = "Corrupted superblock, journal_sections is 0"; 4367 goto bad; 4368 } 4369 /* make sure that ti->max_io_len doesn't overflow */ 4370 if (!ic->meta_dev) { 4371 if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS || 4372 ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) { 4373 r = -EINVAL; 4374 ti->error = "Invalid interleave_sectors in the superblock"; 4375 goto bad; 4376 } 4377 } else { 4378 if (ic->sb->log2_interleave_sectors) { 4379 r = -EINVAL; 4380 ti->error = "Invalid interleave_sectors in the superblock"; 4381 goto bad; 4382 } 4383 } 4384 if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) != !!ic->journal_mac_alg.alg_string) { 4385 r = -EINVAL; 4386 ti->error = "Journal mac mismatch"; 4387 goto bad; 4388 } 4389 4390 get_provided_data_sectors(ic); 4391 if (!ic->provided_data_sectors) { 4392 r = -EINVAL; 4393 ti->error = "The device is too small"; 4394 goto bad; 4395 } 4396 4397 try_smaller_buffer: 4398 r = calculate_device_limits(ic); 4399 if (r) { 4400 if (ic->meta_dev) { 4401 if (ic->log2_buffer_sectors > 3) { 4402 ic->log2_buffer_sectors--; 4403 goto try_smaller_buffer; 4404 } 4405 } 4406 ti->error = "The device is too small"; 4407 goto bad; 4408 } 4409 4410 if (log2_sectors_per_bitmap_bit < 0) 4411 log2_sectors_per_bitmap_bit = __fls(DEFAULT_SECTORS_PER_BITMAP_BIT); 4412 if (log2_sectors_per_bitmap_bit < ic->sb->log2_sectors_per_block) 4413 log2_sectors_per_bitmap_bit = ic->sb->log2_sectors_per_block; 4414 4415 bits_in_journal = ((__u64)ic->journal_section_sectors * ic->journal_sections) << (SECTOR_SHIFT + 3); 4416 if (bits_in_journal > UINT_MAX) 4417 bits_in_journal = UINT_MAX; 4418 while (bits_in_journal < (ic->provided_data_sectors + ((sector_t)1 << log2_sectors_per_bitmap_bit) - 1) >> log2_sectors_per_bitmap_bit) 4419 log2_sectors_per_bitmap_bit++; 4420 4421 log2_blocks_per_bitmap_bit = log2_sectors_per_bitmap_bit - ic->sb->log2_sectors_per_block; 4422 ic->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit; 4423 if (should_write_sb) 4424 ic->sb->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit; 4425 4426 n_bitmap_bits = ((ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) 4427 + (((sector_t)1 << log2_blocks_per_bitmap_bit) - 1)) >> log2_blocks_per_bitmap_bit; 4428 ic->n_bitmap_blocks = DIV_ROUND_UP(n_bitmap_bits, BITMAP_BLOCK_SIZE * 8); 4429 4430 if (!ic->meta_dev) 4431 ic->log2_buffer_sectors = min(ic->log2_buffer_sectors, (__u8)__ffs(ic->metadata_run)); 4432 4433 if (ti->len > ic->provided_data_sectors) { 4434 r = -EINVAL; 4435 ti->error = "Not enough provided sectors for requested mapping size"; 4436 goto bad; 4437 } 4438 4439 4440 threshold = (__u64)ic->journal_entries * (100 - journal_watermark); 4441 threshold += 50; 4442 do_div(threshold, 100); 4443 ic->free_sectors_threshold = threshold; 4444 4445 DEBUG_print("initialized:\n"); 4446 DEBUG_print(" integrity_tag_size %u\n", le16_to_cpu(ic->sb->integrity_tag_size)); 4447 DEBUG_print(" journal_entry_size %u\n", ic->journal_entry_size); 4448 DEBUG_print(" journal_entries_per_sector %u\n", ic->journal_entries_per_sector); 4449 DEBUG_print(" journal_section_entries %u\n", ic->journal_section_entries); 4450 DEBUG_print(" journal_section_sectors %u\n", ic->journal_section_sectors); 4451 DEBUG_print(" journal_sections %u\n", (unsigned int)le32_to_cpu(ic->sb->journal_sections)); 4452 DEBUG_print(" journal_entries %u\n", ic->journal_entries); 4453 DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors); 4454 DEBUG_print(" data_device_sectors 0x%llx\n", bdev_nr_sectors(ic->dev->bdev)); 4455 DEBUG_print(" initial_sectors 0x%x\n", ic->initial_sectors); 4456 DEBUG_print(" metadata_run 0x%x\n", ic->metadata_run); 4457 DEBUG_print(" log2_metadata_run %d\n", ic->log2_metadata_run); 4458 DEBUG_print(" provided_data_sectors 0x%llx (%llu)\n", ic->provided_data_sectors, ic->provided_data_sectors); 4459 DEBUG_print(" log2_buffer_sectors %u\n", ic->log2_buffer_sectors); 4460 DEBUG_print(" bits_in_journal %llu\n", bits_in_journal); 4461 4462 if (ic->recalculate_flag && !(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) { 4463 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING); 4464 ic->sb->recalc_sector = cpu_to_le64(0); 4465 } 4466 4467 if (ic->internal_hash) { 4468 ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1); 4469 if (!ic->recalc_wq) { 4470 ti->error = "Cannot allocate workqueue"; 4471 r = -ENOMEM; 4472 goto bad; 4473 } 4474 INIT_WORK(&ic->recalc_work, integrity_recalc); 4475 } else { 4476 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) { 4477 ti->error = "Recalculate can only be specified with internal_hash"; 4478 r = -EINVAL; 4479 goto bad; 4480 } 4481 } 4482 4483 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) && 4484 le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors && 4485 dm_integrity_disable_recalculate(ic)) { 4486 ti->error = "Recalculating with HMAC is disabled for security reasons - if you really need it, use the argument \"legacy_recalculate\""; 4487 r = -EOPNOTSUPP; 4488 goto bad; 4489 } 4490 4491 ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev, 4492 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL, 0); 4493 if (IS_ERR(ic->bufio)) { 4494 r = PTR_ERR(ic->bufio); 4495 ti->error = "Cannot initialize dm-bufio"; 4496 ic->bufio = NULL; 4497 goto bad; 4498 } 4499 dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors); 4500 4501 if (ic->mode != 'R') { 4502 r = create_journal(ic, &ti->error); 4503 if (r) 4504 goto bad; 4505 4506 } 4507 4508 if (ic->mode == 'B') { 4509 unsigned int i; 4510 unsigned int n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE); 4511 4512 ic->recalc_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages); 4513 if (!ic->recalc_bitmap) { 4514 r = -ENOMEM; 4515 goto bad; 4516 } 4517 ic->may_write_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages); 4518 if (!ic->may_write_bitmap) { 4519 r = -ENOMEM; 4520 goto bad; 4521 } 4522 ic->bbs = kvmalloc_array(ic->n_bitmap_blocks, sizeof(struct bitmap_block_status), GFP_KERNEL); 4523 if (!ic->bbs) { 4524 r = -ENOMEM; 4525 goto bad; 4526 } 4527 INIT_DELAYED_WORK(&ic->bitmap_flush_work, bitmap_flush_work); 4528 for (i = 0; i < ic->n_bitmap_blocks; i++) { 4529 struct bitmap_block_status *bbs = &ic->bbs[i]; 4530 unsigned int sector, pl_index, pl_offset; 4531 4532 INIT_WORK(&bbs->work, bitmap_block_work); 4533 bbs->ic = ic; 4534 bbs->idx = i; 4535 bio_list_init(&bbs->bio_queue); 4536 spin_lock_init(&bbs->bio_queue_lock); 4537 4538 sector = i * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT); 4539 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT); 4540 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1); 4541 4542 bbs->bitmap = lowmem_page_address(ic->journal[pl_index].page) + pl_offset; 4543 } 4544 } 4545 4546 if (should_write_sb) { 4547 init_journal(ic, 0, ic->journal_sections, 0); 4548 r = dm_integrity_failed(ic); 4549 if (unlikely(r)) { 4550 ti->error = "Error initializing journal"; 4551 goto bad; 4552 } 4553 r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA); 4554 if (r) { 4555 ti->error = "Error initializing superblock"; 4556 goto bad; 4557 } 4558 ic->just_formatted = true; 4559 } 4560 4561 if (!ic->meta_dev) { 4562 r = dm_set_target_max_io_len(ti, 1U << ic->sb->log2_interleave_sectors); 4563 if (r) 4564 goto bad; 4565 } 4566 if (ic->mode == 'B') { 4567 unsigned int max_io_len; 4568 4569 max_io_len = ((sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit) * (BITMAP_BLOCK_SIZE * 8); 4570 if (!max_io_len) 4571 max_io_len = 1U << 31; 4572 DEBUG_print("max_io_len: old %u, new %u\n", ti->max_io_len, max_io_len); 4573 if (!ti->max_io_len || ti->max_io_len > max_io_len) { 4574 r = dm_set_target_max_io_len(ti, max_io_len); 4575 if (r) 4576 goto bad; 4577 } 4578 } 4579 4580 if (!ic->internal_hash) 4581 dm_integrity_set(ti, ic); 4582 4583 ti->num_flush_bios = 1; 4584 ti->flush_supported = true; 4585 if (ic->discard) 4586 ti->num_discard_bios = 1; 4587 4588 dm_audit_log_ctr(DM_MSG_PREFIX, ti, 1); 4589 return 0; 4590 4591 bad: 4592 dm_audit_log_ctr(DM_MSG_PREFIX, ti, 0); 4593 dm_integrity_dtr(ti); 4594 return r; 4595 } 4596 4597 static void dm_integrity_dtr(struct dm_target *ti) 4598 { 4599 struct dm_integrity_c *ic = ti->private; 4600 4601 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress)); 4602 BUG_ON(!list_empty(&ic->wait_list)); 4603 4604 if (ic->mode == 'B') 4605 cancel_delayed_work_sync(&ic->bitmap_flush_work); 4606 if (ic->metadata_wq) 4607 destroy_workqueue(ic->metadata_wq); 4608 if (ic->wait_wq) 4609 destroy_workqueue(ic->wait_wq); 4610 if (ic->offload_wq) 4611 destroy_workqueue(ic->offload_wq); 4612 if (ic->commit_wq) 4613 destroy_workqueue(ic->commit_wq); 4614 if (ic->writer_wq) 4615 destroy_workqueue(ic->writer_wq); 4616 if (ic->recalc_wq) 4617 destroy_workqueue(ic->recalc_wq); 4618 kvfree(ic->bbs); 4619 if (ic->bufio) 4620 dm_bufio_client_destroy(ic->bufio); 4621 mempool_exit(&ic->journal_io_mempool); 4622 if (ic->io) 4623 dm_io_client_destroy(ic->io); 4624 if (ic->dev) 4625 dm_put_device(ti, ic->dev); 4626 if (ic->meta_dev) 4627 dm_put_device(ti, ic->meta_dev); 4628 dm_integrity_free_page_list(ic->journal); 4629 dm_integrity_free_page_list(ic->journal_io); 4630 dm_integrity_free_page_list(ic->journal_xor); 4631 dm_integrity_free_page_list(ic->recalc_bitmap); 4632 dm_integrity_free_page_list(ic->may_write_bitmap); 4633 if (ic->journal_scatterlist) 4634 dm_integrity_free_journal_scatterlist(ic, ic->journal_scatterlist); 4635 if (ic->journal_io_scatterlist) 4636 dm_integrity_free_journal_scatterlist(ic, ic->journal_io_scatterlist); 4637 if (ic->sk_requests) { 4638 unsigned int i; 4639 4640 for (i = 0; i < ic->journal_sections; i++) { 4641 struct skcipher_request *req; 4642 4643 req = ic->sk_requests[i]; 4644 if (req) { 4645 kfree_sensitive(req->iv); 4646 skcipher_request_free(req); 4647 } 4648 } 4649 kvfree(ic->sk_requests); 4650 } 4651 kvfree(ic->journal_tree); 4652 if (ic->sb) 4653 free_pages_exact(ic->sb, SB_SECTORS << SECTOR_SHIFT); 4654 4655 if (ic->internal_hash) 4656 crypto_free_shash(ic->internal_hash); 4657 free_alg(&ic->internal_hash_alg); 4658 4659 if (ic->journal_crypt) 4660 crypto_free_skcipher(ic->journal_crypt); 4661 free_alg(&ic->journal_crypt_alg); 4662 4663 if (ic->journal_mac) 4664 crypto_free_shash(ic->journal_mac); 4665 free_alg(&ic->journal_mac_alg); 4666 4667 kfree(ic); 4668 dm_audit_log_dtr(DM_MSG_PREFIX, ti, 1); 4669 } 4670 4671 static struct target_type integrity_target = { 4672 .name = "integrity", 4673 .version = {1, 10, 0}, 4674 .module = THIS_MODULE, 4675 .features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY, 4676 .ctr = dm_integrity_ctr, 4677 .dtr = dm_integrity_dtr, 4678 .map = dm_integrity_map, 4679 .postsuspend = dm_integrity_postsuspend, 4680 .resume = dm_integrity_resume, 4681 .status = dm_integrity_status, 4682 .iterate_devices = dm_integrity_iterate_devices, 4683 .io_hints = dm_integrity_io_hints, 4684 }; 4685 4686 static int __init dm_integrity_init(void) 4687 { 4688 int r; 4689 4690 journal_io_cache = kmem_cache_create("integrity_journal_io", 4691 sizeof(struct journal_io), 0, 0, NULL); 4692 if (!journal_io_cache) { 4693 DMERR("can't allocate journal io cache"); 4694 return -ENOMEM; 4695 } 4696 4697 r = dm_register_target(&integrity_target); 4698 if (r < 0) { 4699 kmem_cache_destroy(journal_io_cache); 4700 return r; 4701 } 4702 4703 return 0; 4704 } 4705 4706 static void __exit dm_integrity_exit(void) 4707 { 4708 dm_unregister_target(&integrity_target); 4709 kmem_cache_destroy(journal_io_cache); 4710 } 4711 4712 module_init(dm_integrity_init); 4713 module_exit(dm_integrity_exit); 4714 4715 MODULE_AUTHOR("Milan Broz"); 4716 MODULE_AUTHOR("Mikulas Patocka"); 4717 MODULE_DESCRIPTION(DM_NAME " target for integrity tags extension"); 4718 MODULE_LICENSE("GPL"); 4719