1 /* 2 * Copyright (C) 2016-2017 Red Hat, Inc. All rights reserved. 3 * Copyright (C) 2016-2017 Milan Broz 4 * Copyright (C) 2016-2017 Mikulas Patocka 5 * 6 * This file is released under the GPL. 7 */ 8 9 #include <linux/module.h> 10 #include <linux/device-mapper.h> 11 #include <linux/dm-io.h> 12 #include <linux/vmalloc.h> 13 #include <linux/sort.h> 14 #include <linux/rbtree.h> 15 #include <linux/delay.h> 16 #include <linux/random.h> 17 #include <crypto/hash.h> 18 #include <crypto/skcipher.h> 19 #include <linux/async_tx.h> 20 #include "dm-bufio.h" 21 22 #define DM_MSG_PREFIX "integrity" 23 24 #define DEFAULT_INTERLEAVE_SECTORS 32768 25 #define DEFAULT_JOURNAL_SIZE_FACTOR 7 26 #define DEFAULT_BUFFER_SECTORS 128 27 #define DEFAULT_JOURNAL_WATERMARK 50 28 #define DEFAULT_SYNC_MSEC 10000 29 #define DEFAULT_MAX_JOURNAL_SECTORS 131072 30 #define MIN_LOG2_INTERLEAVE_SECTORS 3 31 #define MAX_LOG2_INTERLEAVE_SECTORS 31 32 #define METADATA_WORKQUEUE_MAX_ACTIVE 16 33 34 /* 35 * Warning - DEBUG_PRINT prints security-sensitive data to the log, 36 * so it should not be enabled in the official kernel 37 */ 38 //#define DEBUG_PRINT 39 //#define INTERNAL_VERIFY 40 41 /* 42 * On disk structures 43 */ 44 45 #define SB_MAGIC "integrt" 46 #define SB_VERSION 1 47 #define SB_SECTORS 8 48 #define MAX_SECTORS_PER_BLOCK 8 49 50 struct superblock { 51 __u8 magic[8]; 52 __u8 version; 53 __u8 log2_interleave_sectors; 54 __u16 integrity_tag_size; 55 __u32 journal_sections; 56 __u64 provided_data_sectors; /* userspace uses this value */ 57 __u32 flags; 58 __u8 log2_sectors_per_block; 59 }; 60 61 #define SB_FLAG_HAVE_JOURNAL_MAC 0x1 62 63 #define JOURNAL_ENTRY_ROUNDUP 8 64 65 typedef __u64 commit_id_t; 66 #define JOURNAL_MAC_PER_SECTOR 8 67 68 struct journal_entry { 69 union { 70 struct { 71 __u32 sector_lo; 72 __u32 sector_hi; 73 } s; 74 __u64 sector; 75 } u; 76 commit_id_t last_bytes[0]; 77 /* __u8 tag[0]; */ 78 }; 79 80 #define journal_entry_tag(ic, je) ((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block]) 81 82 #if BITS_PER_LONG == 64 83 #define journal_entry_set_sector(je, x) do { smp_wmb(); ACCESS_ONCE((je)->u.sector) = cpu_to_le64(x); } while (0) 84 #define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector) 85 #elif defined(CONFIG_LBDAF) 86 #define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); ACCESS_ONCE((je)->u.s.sector_hi) = cpu_to_le32((x) >> 32); } while (0) 87 #define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector) 88 #else 89 #define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); ACCESS_ONCE((je)->u.s.sector_hi) = cpu_to_le32(0); } while (0) 90 #define journal_entry_get_sector(je) le32_to_cpu((je)->u.s.sector_lo) 91 #endif 92 #define journal_entry_is_unused(je) ((je)->u.s.sector_hi == cpu_to_le32(-1)) 93 #define journal_entry_set_unused(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-1)); } while (0) 94 #define journal_entry_is_inprogress(je) ((je)->u.s.sector_hi == cpu_to_le32(-2)) 95 #define journal_entry_set_inprogress(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-2)); } while (0) 96 97 #define JOURNAL_BLOCK_SECTORS 8 98 #define JOURNAL_SECTOR_DATA ((1 << SECTOR_SHIFT) - sizeof(commit_id_t)) 99 #define JOURNAL_MAC_SIZE (JOURNAL_MAC_PER_SECTOR * JOURNAL_BLOCK_SECTORS) 100 101 struct journal_sector { 102 __u8 entries[JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR]; 103 __u8 mac[JOURNAL_MAC_PER_SECTOR]; 104 commit_id_t commit_id; 105 }; 106 107 #define MAX_TAG_SIZE (JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, last_bytes[MAX_SECTORS_PER_BLOCK])) 108 109 #define METADATA_PADDING_SECTORS 8 110 111 #define N_COMMIT_IDS 4 112 113 static unsigned char prev_commit_seq(unsigned char seq) 114 { 115 return (seq + N_COMMIT_IDS - 1) % N_COMMIT_IDS; 116 } 117 118 static unsigned char next_commit_seq(unsigned char seq) 119 { 120 return (seq + 1) % N_COMMIT_IDS; 121 } 122 123 /* 124 * In-memory structures 125 */ 126 127 struct journal_node { 128 struct rb_node node; 129 sector_t sector; 130 }; 131 132 struct alg_spec { 133 char *alg_string; 134 char *key_string; 135 __u8 *key; 136 unsigned key_size; 137 }; 138 139 struct dm_integrity_c { 140 struct dm_dev *dev; 141 unsigned tag_size; 142 __s8 log2_tag_size; 143 sector_t start; 144 mempool_t *journal_io_mempool; 145 struct dm_io_client *io; 146 struct dm_bufio_client *bufio; 147 struct workqueue_struct *metadata_wq; 148 struct superblock *sb; 149 unsigned journal_pages; 150 struct page_list *journal; 151 struct page_list *journal_io; 152 struct page_list *journal_xor; 153 154 struct crypto_skcipher *journal_crypt; 155 struct scatterlist **journal_scatterlist; 156 struct scatterlist **journal_io_scatterlist; 157 struct skcipher_request **sk_requests; 158 159 struct crypto_shash *journal_mac; 160 161 struct journal_node *journal_tree; 162 struct rb_root journal_tree_root; 163 164 sector_t provided_data_sectors; 165 166 unsigned short journal_entry_size; 167 unsigned char journal_entries_per_sector; 168 unsigned char journal_section_entries; 169 unsigned short journal_section_sectors; 170 unsigned journal_sections; 171 unsigned journal_entries; 172 sector_t device_sectors; 173 unsigned initial_sectors; 174 unsigned metadata_run; 175 __s8 log2_metadata_run; 176 __u8 log2_buffer_sectors; 177 __u8 sectors_per_block; 178 179 unsigned char mode; 180 bool suspending; 181 182 int failed; 183 184 struct crypto_shash *internal_hash; 185 186 /* these variables are locked with endio_wait.lock */ 187 struct rb_root in_progress; 188 wait_queue_head_t endio_wait; 189 struct workqueue_struct *wait_wq; 190 191 unsigned char commit_seq; 192 commit_id_t commit_ids[N_COMMIT_IDS]; 193 194 unsigned committed_section; 195 unsigned n_committed_sections; 196 197 unsigned uncommitted_section; 198 unsigned n_uncommitted_sections; 199 200 unsigned free_section; 201 unsigned char free_section_entry; 202 unsigned free_sectors; 203 204 unsigned free_sectors_threshold; 205 206 struct workqueue_struct *commit_wq; 207 struct work_struct commit_work; 208 209 struct workqueue_struct *writer_wq; 210 struct work_struct writer_work; 211 212 struct bio_list flush_bio_list; 213 214 unsigned long autocommit_jiffies; 215 struct timer_list autocommit_timer; 216 unsigned autocommit_msec; 217 218 wait_queue_head_t copy_to_journal_wait; 219 220 struct completion crypto_backoff; 221 222 bool journal_uptodate; 223 bool just_formatted; 224 225 struct alg_spec internal_hash_alg; 226 struct alg_spec journal_crypt_alg; 227 struct alg_spec journal_mac_alg; 228 }; 229 230 struct dm_integrity_range { 231 sector_t logical_sector; 232 unsigned n_sectors; 233 struct rb_node node; 234 }; 235 236 struct dm_integrity_io { 237 struct work_struct work; 238 239 struct dm_integrity_c *ic; 240 bool write; 241 bool fua; 242 243 struct dm_integrity_range range; 244 245 sector_t metadata_block; 246 unsigned metadata_offset; 247 248 atomic_t in_flight; 249 int bi_error; 250 251 struct completion *completion; 252 253 struct block_device *orig_bi_bdev; 254 bio_end_io_t *orig_bi_end_io; 255 struct bio_integrity_payload *orig_bi_integrity; 256 struct bvec_iter orig_bi_iter; 257 }; 258 259 struct journal_completion { 260 struct dm_integrity_c *ic; 261 atomic_t in_flight; 262 struct completion comp; 263 }; 264 265 struct journal_io { 266 struct dm_integrity_range range; 267 struct journal_completion *comp; 268 }; 269 270 static struct kmem_cache *journal_io_cache; 271 272 #define JOURNAL_IO_MEMPOOL 32 273 274 #ifdef DEBUG_PRINT 275 #define DEBUG_print(x, ...) printk(KERN_DEBUG x, ##__VA_ARGS__) 276 static void __DEBUG_bytes(__u8 *bytes, size_t len, const char *msg, ...) 277 { 278 va_list args; 279 va_start(args, msg); 280 vprintk(msg, args); 281 va_end(args); 282 if (len) 283 pr_cont(":"); 284 while (len) { 285 pr_cont(" %02x", *bytes); 286 bytes++; 287 len--; 288 } 289 pr_cont("\n"); 290 } 291 #define DEBUG_bytes(bytes, len, msg, ...) __DEBUG_bytes(bytes, len, KERN_DEBUG msg, ##__VA_ARGS__) 292 #else 293 #define DEBUG_print(x, ...) do { } while (0) 294 #define DEBUG_bytes(bytes, len, msg, ...) do { } while (0) 295 #endif 296 297 /* 298 * DM Integrity profile, protection is performed layer above (dm-crypt) 299 */ 300 static struct blk_integrity_profile dm_integrity_profile = { 301 .name = "DM-DIF-EXT-TAG", 302 .generate_fn = NULL, 303 .verify_fn = NULL, 304 }; 305 306 static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map); 307 static void integrity_bio_wait(struct work_struct *w); 308 static void dm_integrity_dtr(struct dm_target *ti); 309 310 static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err) 311 { 312 if (!cmpxchg(&ic->failed, 0, err)) 313 DMERR("Error on %s: %d", msg, err); 314 } 315 316 static int dm_integrity_failed(struct dm_integrity_c *ic) 317 { 318 return ACCESS_ONCE(ic->failed); 319 } 320 321 static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i, 322 unsigned j, unsigned char seq) 323 { 324 /* 325 * Xor the number with section and sector, so that if a piece of 326 * journal is written at wrong place, it is detected. 327 */ 328 return ic->commit_ids[seq] ^ cpu_to_le64(((__u64)i << 32) ^ j); 329 } 330 331 static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector, 332 sector_t *area, sector_t *offset) 333 { 334 __u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors; 335 336 *area = data_sector >> log2_interleave_sectors; 337 *offset = (unsigned)data_sector & ((1U << log2_interleave_sectors) - 1); 338 } 339 340 #define sector_to_block(ic, n) \ 341 do { \ 342 BUG_ON((n) & (unsigned)((ic)->sectors_per_block - 1)); \ 343 (n) >>= (ic)->sb->log2_sectors_per_block; \ 344 } while (0) 345 346 static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area, 347 sector_t offset, unsigned *metadata_offset) 348 { 349 __u64 ms; 350 unsigned mo; 351 352 ms = area << ic->sb->log2_interleave_sectors; 353 if (likely(ic->log2_metadata_run >= 0)) 354 ms += area << ic->log2_metadata_run; 355 else 356 ms += area * ic->metadata_run; 357 ms >>= ic->log2_buffer_sectors; 358 359 sector_to_block(ic, offset); 360 361 if (likely(ic->log2_tag_size >= 0)) { 362 ms += offset >> (SECTOR_SHIFT + ic->log2_buffer_sectors - ic->log2_tag_size); 363 mo = (offset << ic->log2_tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1); 364 } else { 365 ms += (__u64)offset * ic->tag_size >> (SECTOR_SHIFT + ic->log2_buffer_sectors); 366 mo = (offset * ic->tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1); 367 } 368 *metadata_offset = mo; 369 return ms; 370 } 371 372 static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector_t offset) 373 { 374 sector_t result; 375 376 result = area << ic->sb->log2_interleave_sectors; 377 if (likely(ic->log2_metadata_run >= 0)) 378 result += (area + 1) << ic->log2_metadata_run; 379 else 380 result += (area + 1) * ic->metadata_run; 381 382 result += (sector_t)ic->initial_sectors + offset; 383 return result; 384 } 385 386 static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr) 387 { 388 if (unlikely(*sec_ptr >= ic->journal_sections)) 389 *sec_ptr -= ic->journal_sections; 390 } 391 392 static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags) 393 { 394 struct dm_io_request io_req; 395 struct dm_io_region io_loc; 396 397 io_req.bi_op = op; 398 io_req.bi_op_flags = op_flags; 399 io_req.mem.type = DM_IO_KMEM; 400 io_req.mem.ptr.addr = ic->sb; 401 io_req.notify.fn = NULL; 402 io_req.client = ic->io; 403 io_loc.bdev = ic->dev->bdev; 404 io_loc.sector = ic->start; 405 io_loc.count = SB_SECTORS; 406 407 return dm_io(&io_req, 1, &io_loc, NULL); 408 } 409 410 static void access_journal_check(struct dm_integrity_c *ic, unsigned section, unsigned offset, 411 bool e, const char *function) 412 { 413 #if defined(CONFIG_DM_DEBUG) || defined(INTERNAL_VERIFY) 414 unsigned limit = e ? ic->journal_section_entries : ic->journal_section_sectors; 415 416 if (unlikely(section >= ic->journal_sections) || 417 unlikely(offset >= limit)) { 418 printk(KERN_CRIT "%s: invalid access at (%u,%u), limit (%u,%u)\n", 419 function, section, offset, ic->journal_sections, limit); 420 BUG(); 421 } 422 #endif 423 } 424 425 static void page_list_location(struct dm_integrity_c *ic, unsigned section, unsigned offset, 426 unsigned *pl_index, unsigned *pl_offset) 427 { 428 unsigned sector; 429 430 access_journal_check(ic, section, offset, false, "page_list_location"); 431 432 sector = section * ic->journal_section_sectors + offset; 433 434 *pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT); 435 *pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1); 436 } 437 438 static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl, 439 unsigned section, unsigned offset, unsigned *n_sectors) 440 { 441 unsigned pl_index, pl_offset; 442 char *va; 443 444 page_list_location(ic, section, offset, &pl_index, &pl_offset); 445 446 if (n_sectors) 447 *n_sectors = (PAGE_SIZE - pl_offset) >> SECTOR_SHIFT; 448 449 va = lowmem_page_address(pl[pl_index].page); 450 451 return (struct journal_sector *)(va + pl_offset); 452 } 453 454 static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset) 455 { 456 return access_page_list(ic, ic->journal, section, offset, NULL); 457 } 458 459 static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned section, unsigned n) 460 { 461 unsigned rel_sector, offset; 462 struct journal_sector *js; 463 464 access_journal_check(ic, section, n, true, "access_journal_entry"); 465 466 rel_sector = n % JOURNAL_BLOCK_SECTORS; 467 offset = n / JOURNAL_BLOCK_SECTORS; 468 469 js = access_journal(ic, section, rel_sector); 470 return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size); 471 } 472 473 static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned section, unsigned n) 474 { 475 n <<= ic->sb->log2_sectors_per_block; 476 477 n += JOURNAL_BLOCK_SECTORS; 478 479 access_journal_check(ic, section, n, false, "access_journal_data"); 480 481 return access_journal(ic, section, n); 482 } 483 484 static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result[JOURNAL_MAC_SIZE]) 485 { 486 SHASH_DESC_ON_STACK(desc, ic->journal_mac); 487 int r; 488 unsigned j, size; 489 490 desc->tfm = ic->journal_mac; 491 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; 492 493 r = crypto_shash_init(desc); 494 if (unlikely(r)) { 495 dm_integrity_io_error(ic, "crypto_shash_init", r); 496 goto err; 497 } 498 499 for (j = 0; j < ic->journal_section_entries; j++) { 500 struct journal_entry *je = access_journal_entry(ic, section, j); 501 r = crypto_shash_update(desc, (__u8 *)&je->u.sector, sizeof je->u.sector); 502 if (unlikely(r)) { 503 dm_integrity_io_error(ic, "crypto_shash_update", r); 504 goto err; 505 } 506 } 507 508 size = crypto_shash_digestsize(ic->journal_mac); 509 510 if (likely(size <= JOURNAL_MAC_SIZE)) { 511 r = crypto_shash_final(desc, result); 512 if (unlikely(r)) { 513 dm_integrity_io_error(ic, "crypto_shash_final", r); 514 goto err; 515 } 516 memset(result + size, 0, JOURNAL_MAC_SIZE - size); 517 } else { 518 __u8 digest[size]; 519 r = crypto_shash_final(desc, digest); 520 if (unlikely(r)) { 521 dm_integrity_io_error(ic, "crypto_shash_final", r); 522 goto err; 523 } 524 memcpy(result, digest, JOURNAL_MAC_SIZE); 525 } 526 527 return; 528 err: 529 memset(result, 0, JOURNAL_MAC_SIZE); 530 } 531 532 static void rw_section_mac(struct dm_integrity_c *ic, unsigned section, bool wr) 533 { 534 __u8 result[JOURNAL_MAC_SIZE]; 535 unsigned j; 536 537 if (!ic->journal_mac) 538 return; 539 540 section_mac(ic, section, result); 541 542 for (j = 0; j < JOURNAL_BLOCK_SECTORS; j++) { 543 struct journal_sector *js = access_journal(ic, section, j); 544 545 if (likely(wr)) 546 memcpy(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR); 547 else { 548 if (memcmp(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR)) 549 dm_integrity_io_error(ic, "journal mac", -EILSEQ); 550 } 551 } 552 } 553 554 static void complete_journal_op(void *context) 555 { 556 struct journal_completion *comp = context; 557 BUG_ON(!atomic_read(&comp->in_flight)); 558 if (likely(atomic_dec_and_test(&comp->in_flight))) 559 complete(&comp->comp); 560 } 561 562 static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section, 563 unsigned n_sections, struct journal_completion *comp) 564 { 565 struct async_submit_ctl submit; 566 size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT; 567 unsigned pl_index, pl_offset, section_index; 568 struct page_list *source_pl, *target_pl; 569 570 if (likely(encrypt)) { 571 source_pl = ic->journal; 572 target_pl = ic->journal_io; 573 } else { 574 source_pl = ic->journal_io; 575 target_pl = ic->journal; 576 } 577 578 page_list_location(ic, section, 0, &pl_index, &pl_offset); 579 580 atomic_add(roundup(pl_offset + n_bytes, PAGE_SIZE) >> PAGE_SHIFT, &comp->in_flight); 581 582 init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, complete_journal_op, comp, NULL); 583 584 section_index = pl_index; 585 586 do { 587 size_t this_step; 588 struct page *src_pages[2]; 589 struct page *dst_page; 590 591 while (unlikely(pl_index == section_index)) { 592 unsigned dummy; 593 if (likely(encrypt)) 594 rw_section_mac(ic, section, true); 595 section++; 596 n_sections--; 597 if (!n_sections) 598 break; 599 page_list_location(ic, section, 0, §ion_index, &dummy); 600 } 601 602 this_step = min(n_bytes, (size_t)PAGE_SIZE - pl_offset); 603 dst_page = target_pl[pl_index].page; 604 src_pages[0] = source_pl[pl_index].page; 605 src_pages[1] = ic->journal_xor[pl_index].page; 606 607 async_xor(dst_page, src_pages, pl_offset, 2, this_step, &submit); 608 609 pl_index++; 610 pl_offset = 0; 611 n_bytes -= this_step; 612 } while (n_bytes); 613 614 BUG_ON(n_sections); 615 616 async_tx_issue_pending_all(); 617 } 618 619 static void complete_journal_encrypt(struct crypto_async_request *req, int err) 620 { 621 struct journal_completion *comp = req->data; 622 if (unlikely(err)) { 623 if (likely(err == -EINPROGRESS)) { 624 complete(&comp->ic->crypto_backoff); 625 return; 626 } 627 dm_integrity_io_error(comp->ic, "asynchronous encrypt", err); 628 } 629 complete_journal_op(comp); 630 } 631 632 static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp) 633 { 634 int r; 635 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 636 complete_journal_encrypt, comp); 637 if (likely(encrypt)) 638 r = crypto_skcipher_encrypt(req); 639 else 640 r = crypto_skcipher_decrypt(req); 641 if (likely(!r)) 642 return false; 643 if (likely(r == -EINPROGRESS)) 644 return true; 645 if (likely(r == -EBUSY)) { 646 wait_for_completion(&comp->ic->crypto_backoff); 647 reinit_completion(&comp->ic->crypto_backoff); 648 return true; 649 } 650 dm_integrity_io_error(comp->ic, "encrypt", r); 651 return false; 652 } 653 654 static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section, 655 unsigned n_sections, struct journal_completion *comp) 656 { 657 struct scatterlist **source_sg; 658 struct scatterlist **target_sg; 659 660 atomic_add(2, &comp->in_flight); 661 662 if (likely(encrypt)) { 663 source_sg = ic->journal_scatterlist; 664 target_sg = ic->journal_io_scatterlist; 665 } else { 666 source_sg = ic->journal_io_scatterlist; 667 target_sg = ic->journal_scatterlist; 668 } 669 670 do { 671 struct skcipher_request *req; 672 unsigned ivsize; 673 char *iv; 674 675 if (likely(encrypt)) 676 rw_section_mac(ic, section, true); 677 678 req = ic->sk_requests[section]; 679 ivsize = crypto_skcipher_ivsize(ic->journal_crypt); 680 iv = req->iv; 681 682 memcpy(iv, iv + ivsize, ivsize); 683 684 req->src = source_sg[section]; 685 req->dst = target_sg[section]; 686 687 if (unlikely(do_crypt(encrypt, req, comp))) 688 atomic_inc(&comp->in_flight); 689 690 section++; 691 n_sections--; 692 } while (n_sections); 693 694 atomic_dec(&comp->in_flight); 695 complete_journal_op(comp); 696 } 697 698 static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section, 699 unsigned n_sections, struct journal_completion *comp) 700 { 701 if (ic->journal_xor) 702 return xor_journal(ic, encrypt, section, n_sections, comp); 703 else 704 return crypt_journal(ic, encrypt, section, n_sections, comp); 705 } 706 707 static void complete_journal_io(unsigned long error, void *context) 708 { 709 struct journal_completion *comp = context; 710 if (unlikely(error != 0)) 711 dm_integrity_io_error(comp->ic, "writing journal", -EIO); 712 complete_journal_op(comp); 713 } 714 715 static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned section, 716 unsigned n_sections, struct journal_completion *comp) 717 { 718 struct dm_io_request io_req; 719 struct dm_io_region io_loc; 720 unsigned sector, n_sectors, pl_index, pl_offset; 721 int r; 722 723 if (unlikely(dm_integrity_failed(ic))) { 724 if (comp) 725 complete_journal_io(-1UL, comp); 726 return; 727 } 728 729 sector = section * ic->journal_section_sectors; 730 n_sectors = n_sections * ic->journal_section_sectors; 731 732 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT); 733 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1); 734 735 io_req.bi_op = op; 736 io_req.bi_op_flags = op_flags; 737 io_req.mem.type = DM_IO_PAGE_LIST; 738 if (ic->journal_io) 739 io_req.mem.ptr.pl = &ic->journal_io[pl_index]; 740 else 741 io_req.mem.ptr.pl = &ic->journal[pl_index]; 742 io_req.mem.offset = pl_offset; 743 if (likely(comp != NULL)) { 744 io_req.notify.fn = complete_journal_io; 745 io_req.notify.context = comp; 746 } else { 747 io_req.notify.fn = NULL; 748 } 749 io_req.client = ic->io; 750 io_loc.bdev = ic->dev->bdev; 751 io_loc.sector = ic->start + SB_SECTORS + sector; 752 io_loc.count = n_sectors; 753 754 r = dm_io(&io_req, 1, &io_loc, NULL); 755 if (unlikely(r)) { 756 dm_integrity_io_error(ic, op == REQ_OP_READ ? "reading journal" : "writing journal", r); 757 if (comp) { 758 WARN_ONCE(1, "asynchronous dm_io failed: %d", r); 759 complete_journal_io(-1UL, comp); 760 } 761 } 762 } 763 764 static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_sections) 765 { 766 struct journal_completion io_comp; 767 struct journal_completion crypt_comp_1; 768 struct journal_completion crypt_comp_2; 769 unsigned i; 770 771 io_comp.ic = ic; 772 io_comp.comp = COMPLETION_INITIALIZER_ONSTACK(io_comp.comp); 773 774 if (commit_start + commit_sections <= ic->journal_sections) { 775 io_comp.in_flight = (atomic_t)ATOMIC_INIT(1); 776 if (ic->journal_io) { 777 crypt_comp_1.ic = ic; 778 crypt_comp_1.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_1.comp); 779 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0); 780 encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1); 781 wait_for_completion_io(&crypt_comp_1.comp); 782 } else { 783 for (i = 0; i < commit_sections; i++) 784 rw_section_mac(ic, commit_start + i, true); 785 } 786 rw_journal(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, commit_start, 787 commit_sections, &io_comp); 788 } else { 789 unsigned to_end; 790 io_comp.in_flight = (atomic_t)ATOMIC_INIT(2); 791 to_end = ic->journal_sections - commit_start; 792 if (ic->journal_io) { 793 crypt_comp_1.ic = ic; 794 crypt_comp_1.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_1.comp); 795 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0); 796 encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1); 797 if (try_wait_for_completion(&crypt_comp_1.comp)) { 798 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp); 799 crypt_comp_1.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_1.comp); 800 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0); 801 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1); 802 wait_for_completion_io(&crypt_comp_1.comp); 803 } else { 804 crypt_comp_2.ic = ic; 805 crypt_comp_2.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_2.comp); 806 crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0); 807 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2); 808 wait_for_completion_io(&crypt_comp_1.comp); 809 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp); 810 wait_for_completion_io(&crypt_comp_2.comp); 811 } 812 } else { 813 for (i = 0; i < to_end; i++) 814 rw_section_mac(ic, commit_start + i, true); 815 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp); 816 for (i = 0; i < commit_sections - to_end; i++) 817 rw_section_mac(ic, i, true); 818 } 819 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, 0, commit_sections - to_end, &io_comp); 820 } 821 822 wait_for_completion_io(&io_comp.comp); 823 } 824 825 static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset, 826 unsigned n_sectors, sector_t target, io_notify_fn fn, void *data) 827 { 828 struct dm_io_request io_req; 829 struct dm_io_region io_loc; 830 int r; 831 unsigned sector, pl_index, pl_offset; 832 833 BUG_ON((target | n_sectors | offset) & (unsigned)(ic->sectors_per_block - 1)); 834 835 if (unlikely(dm_integrity_failed(ic))) { 836 fn(-1UL, data); 837 return; 838 } 839 840 sector = section * ic->journal_section_sectors + JOURNAL_BLOCK_SECTORS + offset; 841 842 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT); 843 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1); 844 845 io_req.bi_op = REQ_OP_WRITE; 846 io_req.bi_op_flags = 0; 847 io_req.mem.type = DM_IO_PAGE_LIST; 848 io_req.mem.ptr.pl = &ic->journal[pl_index]; 849 io_req.mem.offset = pl_offset; 850 io_req.notify.fn = fn; 851 io_req.notify.context = data; 852 io_req.client = ic->io; 853 io_loc.bdev = ic->dev->bdev; 854 io_loc.sector = ic->start + target; 855 io_loc.count = n_sectors; 856 857 r = dm_io(&io_req, 1, &io_loc, NULL); 858 if (unlikely(r)) { 859 WARN_ONCE(1, "asynchronous dm_io failed: %d", r); 860 fn(-1UL, data); 861 } 862 } 863 864 static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range) 865 { 866 struct rb_node **n = &ic->in_progress.rb_node; 867 struct rb_node *parent; 868 869 BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned)(ic->sectors_per_block - 1)); 870 871 parent = NULL; 872 873 while (*n) { 874 struct dm_integrity_range *range = container_of(*n, struct dm_integrity_range, node); 875 876 parent = *n; 877 if (new_range->logical_sector + new_range->n_sectors <= range->logical_sector) { 878 n = &range->node.rb_left; 879 } else if (new_range->logical_sector >= range->logical_sector + range->n_sectors) { 880 n = &range->node.rb_right; 881 } else { 882 return false; 883 } 884 } 885 886 rb_link_node(&new_range->node, parent, n); 887 rb_insert_color(&new_range->node, &ic->in_progress); 888 889 return true; 890 } 891 892 static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity_range *range) 893 { 894 rb_erase(&range->node, &ic->in_progress); 895 wake_up_locked(&ic->endio_wait); 896 } 897 898 static void remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *range) 899 { 900 unsigned long flags; 901 902 spin_lock_irqsave(&ic->endio_wait.lock, flags); 903 remove_range_unlocked(ic, range); 904 spin_unlock_irqrestore(&ic->endio_wait.lock, flags); 905 } 906 907 static void init_journal_node(struct journal_node *node) 908 { 909 RB_CLEAR_NODE(&node->node); 910 node->sector = (sector_t)-1; 911 } 912 913 static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector) 914 { 915 struct rb_node **link; 916 struct rb_node *parent; 917 918 node->sector = sector; 919 BUG_ON(!RB_EMPTY_NODE(&node->node)); 920 921 link = &ic->journal_tree_root.rb_node; 922 parent = NULL; 923 924 while (*link) { 925 struct journal_node *j; 926 parent = *link; 927 j = container_of(parent, struct journal_node, node); 928 if (sector < j->sector) 929 link = &j->node.rb_left; 930 else 931 link = &j->node.rb_right; 932 } 933 934 rb_link_node(&node->node, parent, link); 935 rb_insert_color(&node->node, &ic->journal_tree_root); 936 } 937 938 static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *node) 939 { 940 BUG_ON(RB_EMPTY_NODE(&node->node)); 941 rb_erase(&node->node, &ic->journal_tree_root); 942 init_journal_node(node); 943 } 944 945 #define NOT_FOUND (-1U) 946 947 static unsigned find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector) 948 { 949 struct rb_node *n = ic->journal_tree_root.rb_node; 950 unsigned found = NOT_FOUND; 951 *next_sector = (sector_t)-1; 952 while (n) { 953 struct journal_node *j = container_of(n, struct journal_node, node); 954 if (sector == j->sector) { 955 found = j - ic->journal_tree; 956 } 957 if (sector < j->sector) { 958 *next_sector = j->sector; 959 n = j->node.rb_left; 960 } else { 961 n = j->node.rb_right; 962 } 963 } 964 965 return found; 966 } 967 968 static bool test_journal_node(struct dm_integrity_c *ic, unsigned pos, sector_t sector) 969 { 970 struct journal_node *node, *next_node; 971 struct rb_node *next; 972 973 if (unlikely(pos >= ic->journal_entries)) 974 return false; 975 node = &ic->journal_tree[pos]; 976 if (unlikely(RB_EMPTY_NODE(&node->node))) 977 return false; 978 if (unlikely(node->sector != sector)) 979 return false; 980 981 next = rb_next(&node->node); 982 if (unlikely(!next)) 983 return true; 984 985 next_node = container_of(next, struct journal_node, node); 986 return next_node->sector != sector; 987 } 988 989 static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_node *node) 990 { 991 struct rb_node *next; 992 struct journal_node *next_node; 993 unsigned next_section; 994 995 BUG_ON(RB_EMPTY_NODE(&node->node)); 996 997 next = rb_next(&node->node); 998 if (unlikely(!next)) 999 return false; 1000 1001 next_node = container_of(next, struct journal_node, node); 1002 1003 if (next_node->sector != node->sector) 1004 return false; 1005 1006 next_section = (unsigned)(next_node - ic->journal_tree) / ic->journal_section_entries; 1007 if (next_section >= ic->committed_section && 1008 next_section < ic->committed_section + ic->n_committed_sections) 1009 return true; 1010 if (next_section + ic->journal_sections < ic->committed_section + ic->n_committed_sections) 1011 return true; 1012 1013 return false; 1014 } 1015 1016 #define TAG_READ 0 1017 #define TAG_WRITE 1 1018 #define TAG_CMP 2 1019 1020 static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block, 1021 unsigned *metadata_offset, unsigned total_size, int op) 1022 { 1023 do { 1024 unsigned char *data, *dp; 1025 struct dm_buffer *b; 1026 unsigned to_copy; 1027 int r; 1028 1029 r = dm_integrity_failed(ic); 1030 if (unlikely(r)) 1031 return r; 1032 1033 data = dm_bufio_read(ic->bufio, *metadata_block, &b); 1034 if (unlikely(IS_ERR(data))) 1035 return PTR_ERR(data); 1036 1037 to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size); 1038 dp = data + *metadata_offset; 1039 if (op == TAG_READ) { 1040 memcpy(tag, dp, to_copy); 1041 } else if (op == TAG_WRITE) { 1042 memcpy(dp, tag, to_copy); 1043 dm_bufio_mark_buffer_dirty(b); 1044 } else { 1045 /* e.g.: op == TAG_CMP */ 1046 if (unlikely(memcmp(dp, tag, to_copy))) { 1047 unsigned i; 1048 1049 for (i = 0; i < to_copy; i++) { 1050 if (dp[i] != tag[i]) 1051 break; 1052 total_size--; 1053 } 1054 dm_bufio_release(b); 1055 return total_size; 1056 } 1057 } 1058 dm_bufio_release(b); 1059 1060 tag += to_copy; 1061 *metadata_offset += to_copy; 1062 if (unlikely(*metadata_offset == 1U << SECTOR_SHIFT << ic->log2_buffer_sectors)) { 1063 (*metadata_block)++; 1064 *metadata_offset = 0; 1065 } 1066 total_size -= to_copy; 1067 } while (unlikely(total_size)); 1068 1069 return 0; 1070 } 1071 1072 static void dm_integrity_flush_buffers(struct dm_integrity_c *ic) 1073 { 1074 int r; 1075 r = dm_bufio_write_dirty_buffers(ic->bufio); 1076 if (unlikely(r)) 1077 dm_integrity_io_error(ic, "writing tags", r); 1078 } 1079 1080 static void sleep_on_endio_wait(struct dm_integrity_c *ic) 1081 { 1082 DECLARE_WAITQUEUE(wait, current); 1083 __add_wait_queue(&ic->endio_wait, &wait); 1084 __set_current_state(TASK_UNINTERRUPTIBLE); 1085 spin_unlock_irq(&ic->endio_wait.lock); 1086 io_schedule(); 1087 spin_lock_irq(&ic->endio_wait.lock); 1088 __remove_wait_queue(&ic->endio_wait, &wait); 1089 } 1090 1091 static void autocommit_fn(unsigned long data) 1092 { 1093 struct dm_integrity_c *ic = (struct dm_integrity_c *)data; 1094 1095 if (likely(!dm_integrity_failed(ic))) 1096 queue_work(ic->commit_wq, &ic->commit_work); 1097 } 1098 1099 static void schedule_autocommit(struct dm_integrity_c *ic) 1100 { 1101 if (!timer_pending(&ic->autocommit_timer)) 1102 mod_timer(&ic->autocommit_timer, jiffies + ic->autocommit_jiffies); 1103 } 1104 1105 static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio) 1106 { 1107 struct bio *bio; 1108 spin_lock_irq(&ic->endio_wait.lock); 1109 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); 1110 bio_list_add(&ic->flush_bio_list, bio); 1111 spin_unlock_irq(&ic->endio_wait.lock); 1112 queue_work(ic->commit_wq, &ic->commit_work); 1113 } 1114 1115 static void do_endio(struct dm_integrity_c *ic, struct bio *bio) 1116 { 1117 int r = dm_integrity_failed(ic); 1118 if (unlikely(r) && !bio->bi_error) 1119 bio->bi_error = r; 1120 bio_endio(bio); 1121 } 1122 1123 static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio) 1124 { 1125 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); 1126 1127 if (unlikely(dio->fua) && likely(!bio->bi_error) && likely(!dm_integrity_failed(ic))) 1128 submit_flush_bio(ic, dio); 1129 else 1130 do_endio(ic, bio); 1131 } 1132 1133 static void dec_in_flight(struct dm_integrity_io *dio) 1134 { 1135 if (atomic_dec_and_test(&dio->in_flight)) { 1136 struct dm_integrity_c *ic = dio->ic; 1137 struct bio *bio; 1138 1139 remove_range(ic, &dio->range); 1140 1141 if (unlikely(dio->write)) 1142 schedule_autocommit(ic); 1143 1144 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); 1145 1146 if (unlikely(dio->bi_error) && !bio->bi_error) 1147 bio->bi_error = dio->bi_error; 1148 if (likely(!bio->bi_error) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) { 1149 dio->range.logical_sector += dio->range.n_sectors; 1150 bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT); 1151 INIT_WORK(&dio->work, integrity_bio_wait); 1152 queue_work(ic->wait_wq, &dio->work); 1153 return; 1154 } 1155 do_endio_flush(ic, dio); 1156 } 1157 } 1158 1159 static void integrity_end_io(struct bio *bio) 1160 { 1161 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io)); 1162 1163 bio->bi_iter = dio->orig_bi_iter; 1164 bio->bi_bdev = dio->orig_bi_bdev; 1165 if (dio->orig_bi_integrity) { 1166 bio->bi_integrity = dio->orig_bi_integrity; 1167 bio->bi_opf |= REQ_INTEGRITY; 1168 } 1169 bio->bi_end_io = dio->orig_bi_end_io; 1170 1171 if (dio->completion) 1172 complete(dio->completion); 1173 1174 dec_in_flight(dio); 1175 } 1176 1177 static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector, 1178 const char *data, char *result) 1179 { 1180 __u64 sector_le = cpu_to_le64(sector); 1181 SHASH_DESC_ON_STACK(req, ic->internal_hash); 1182 int r; 1183 unsigned digest_size; 1184 1185 req->tfm = ic->internal_hash; 1186 req->flags = 0; 1187 1188 r = crypto_shash_init(req); 1189 if (unlikely(r < 0)) { 1190 dm_integrity_io_error(ic, "crypto_shash_init", r); 1191 goto failed; 1192 } 1193 1194 r = crypto_shash_update(req, (const __u8 *)§or_le, sizeof sector_le); 1195 if (unlikely(r < 0)) { 1196 dm_integrity_io_error(ic, "crypto_shash_update", r); 1197 goto failed; 1198 } 1199 1200 r = crypto_shash_update(req, data, ic->sectors_per_block << SECTOR_SHIFT); 1201 if (unlikely(r < 0)) { 1202 dm_integrity_io_error(ic, "crypto_shash_update", r); 1203 goto failed; 1204 } 1205 1206 r = crypto_shash_final(req, result); 1207 if (unlikely(r < 0)) { 1208 dm_integrity_io_error(ic, "crypto_shash_final", r); 1209 goto failed; 1210 } 1211 1212 digest_size = crypto_shash_digestsize(ic->internal_hash); 1213 if (unlikely(digest_size < ic->tag_size)) 1214 memset(result + digest_size, 0, ic->tag_size - digest_size); 1215 1216 return; 1217 1218 failed: 1219 /* this shouldn't happen anyway, the hash functions have no reason to fail */ 1220 get_random_bytes(result, ic->tag_size); 1221 } 1222 1223 static void integrity_metadata(struct work_struct *w) 1224 { 1225 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work); 1226 struct dm_integrity_c *ic = dio->ic; 1227 1228 int r; 1229 1230 if (ic->internal_hash) { 1231 struct bvec_iter iter; 1232 struct bio_vec bv; 1233 unsigned digest_size = crypto_shash_digestsize(ic->internal_hash); 1234 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); 1235 char *checksums; 1236 unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0; 1237 char checksums_onstack[ic->tag_size + extra_space]; 1238 unsigned sectors_to_process = dio->range.n_sectors; 1239 sector_t sector = dio->range.logical_sector; 1240 1241 if (unlikely(ic->mode == 'R')) 1242 goto skip_io; 1243 1244 checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space, 1245 GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN); 1246 if (!checksums) 1247 checksums = checksums_onstack; 1248 1249 __bio_for_each_segment(bv, bio, iter, dio->orig_bi_iter) { 1250 unsigned pos; 1251 char *mem, *checksums_ptr; 1252 1253 again: 1254 mem = (char *)kmap_atomic(bv.bv_page) + bv.bv_offset; 1255 pos = 0; 1256 checksums_ptr = checksums; 1257 do { 1258 integrity_sector_checksum(ic, sector, mem + pos, checksums_ptr); 1259 checksums_ptr += ic->tag_size; 1260 sectors_to_process -= ic->sectors_per_block; 1261 pos += ic->sectors_per_block << SECTOR_SHIFT; 1262 sector += ic->sectors_per_block; 1263 } while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack); 1264 kunmap_atomic(mem); 1265 1266 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset, 1267 checksums_ptr - checksums, !dio->write ? TAG_CMP : TAG_WRITE); 1268 if (unlikely(r)) { 1269 if (r > 0) { 1270 DMERR("Checksum failed at sector 0x%llx", 1271 (unsigned long long)(sector - ((r + ic->tag_size - 1) / ic->tag_size))); 1272 r = -EILSEQ; 1273 } 1274 if (likely(checksums != checksums_onstack)) 1275 kfree(checksums); 1276 goto error; 1277 } 1278 1279 if (!sectors_to_process) 1280 break; 1281 1282 if (unlikely(pos < bv.bv_len)) { 1283 bv.bv_offset += pos; 1284 bv.bv_len -= pos; 1285 goto again; 1286 } 1287 } 1288 1289 if (likely(checksums != checksums_onstack)) 1290 kfree(checksums); 1291 } else { 1292 struct bio_integrity_payload *bip = dio->orig_bi_integrity; 1293 1294 if (bip) { 1295 struct bio_vec biv; 1296 struct bvec_iter iter; 1297 unsigned data_to_process = dio->range.n_sectors; 1298 sector_to_block(ic, data_to_process); 1299 data_to_process *= ic->tag_size; 1300 1301 bip_for_each_vec(biv, bip, iter) { 1302 unsigned char *tag; 1303 unsigned this_len; 1304 1305 BUG_ON(PageHighMem(biv.bv_page)); 1306 tag = lowmem_page_address(biv.bv_page) + biv.bv_offset; 1307 this_len = min(biv.bv_len, data_to_process); 1308 r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset, 1309 this_len, !dio->write ? TAG_READ : TAG_WRITE); 1310 if (unlikely(r)) 1311 goto error; 1312 data_to_process -= this_len; 1313 if (!data_to_process) 1314 break; 1315 } 1316 } 1317 } 1318 skip_io: 1319 dec_in_flight(dio); 1320 return; 1321 error: 1322 dio->bi_error = r; 1323 dec_in_flight(dio); 1324 } 1325 1326 static int dm_integrity_map(struct dm_target *ti, struct bio *bio) 1327 { 1328 struct dm_integrity_c *ic = ti->private; 1329 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io)); 1330 struct bio_integrity_payload *bip; 1331 1332 sector_t area, offset; 1333 1334 dio->ic = ic; 1335 dio->bi_error = 0; 1336 1337 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { 1338 submit_flush_bio(ic, dio); 1339 return DM_MAPIO_SUBMITTED; 1340 } 1341 1342 dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector); 1343 dio->write = bio_op(bio) == REQ_OP_WRITE; 1344 dio->fua = dio->write && bio->bi_opf & REQ_FUA; 1345 if (unlikely(dio->fua)) { 1346 /* 1347 * Don't pass down the FUA flag because we have to flush 1348 * disk cache anyway. 1349 */ 1350 bio->bi_opf &= ~REQ_FUA; 1351 } 1352 if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) { 1353 DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx", 1354 (unsigned long long)dio->range.logical_sector, bio_sectors(bio), 1355 (unsigned long long)ic->provided_data_sectors); 1356 return -EIO; 1357 } 1358 if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) { 1359 DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x", 1360 ic->sectors_per_block, 1361 (unsigned long long)dio->range.logical_sector, bio_sectors(bio)); 1362 return -EIO; 1363 } 1364 1365 if (ic->sectors_per_block > 1) { 1366 struct bvec_iter iter; 1367 struct bio_vec bv; 1368 bio_for_each_segment(bv, bio, iter) { 1369 if (unlikely((bv.bv_offset | bv.bv_len) & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) { 1370 DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary", 1371 bv.bv_offset, bv.bv_len, ic->sectors_per_block); 1372 return -EIO; 1373 } 1374 } 1375 } 1376 1377 bip = bio_integrity(bio); 1378 if (!ic->internal_hash) { 1379 if (bip) { 1380 unsigned wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block; 1381 if (ic->log2_tag_size >= 0) 1382 wanted_tag_size <<= ic->log2_tag_size; 1383 else 1384 wanted_tag_size *= ic->tag_size; 1385 if (unlikely(wanted_tag_size != bip->bip_iter.bi_size)) { 1386 DMERR("Invalid integrity data size %u, expected %u", bip->bip_iter.bi_size, wanted_tag_size); 1387 return -EIO; 1388 } 1389 } 1390 } else { 1391 if (unlikely(bip != NULL)) { 1392 DMERR("Unexpected integrity data when using internal hash"); 1393 return -EIO; 1394 } 1395 } 1396 1397 if (unlikely(ic->mode == 'R') && unlikely(dio->write)) 1398 return -EIO; 1399 1400 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset); 1401 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset); 1402 bio->bi_iter.bi_sector = get_data_sector(ic, area, offset); 1403 1404 dm_integrity_map_continue(dio, true); 1405 return DM_MAPIO_SUBMITTED; 1406 } 1407 1408 static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio, 1409 unsigned journal_section, unsigned journal_entry) 1410 { 1411 struct dm_integrity_c *ic = dio->ic; 1412 sector_t logical_sector; 1413 unsigned n_sectors; 1414 1415 logical_sector = dio->range.logical_sector; 1416 n_sectors = dio->range.n_sectors; 1417 do { 1418 struct bio_vec bv = bio_iovec(bio); 1419 char *mem; 1420 1421 if (unlikely(bv.bv_len >> SECTOR_SHIFT > n_sectors)) 1422 bv.bv_len = n_sectors << SECTOR_SHIFT; 1423 n_sectors -= bv.bv_len >> SECTOR_SHIFT; 1424 bio_advance_iter(bio, &bio->bi_iter, bv.bv_len); 1425 retry_kmap: 1426 mem = kmap_atomic(bv.bv_page); 1427 if (likely(dio->write)) 1428 flush_dcache_page(bv.bv_page); 1429 1430 do { 1431 struct journal_entry *je = access_journal_entry(ic, journal_section, journal_entry); 1432 1433 if (unlikely(!dio->write)) { 1434 struct journal_sector *js; 1435 char *mem_ptr; 1436 unsigned s; 1437 1438 if (unlikely(journal_entry_is_inprogress(je))) { 1439 flush_dcache_page(bv.bv_page); 1440 kunmap_atomic(mem); 1441 1442 __io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je)); 1443 goto retry_kmap; 1444 } 1445 smp_rmb(); 1446 BUG_ON(journal_entry_get_sector(je) != logical_sector); 1447 js = access_journal_data(ic, journal_section, journal_entry); 1448 mem_ptr = mem + bv.bv_offset; 1449 s = 0; 1450 do { 1451 memcpy(mem_ptr, js, JOURNAL_SECTOR_DATA); 1452 *(commit_id_t *)(mem_ptr + JOURNAL_SECTOR_DATA) = je->last_bytes[s]; 1453 js++; 1454 mem_ptr += 1 << SECTOR_SHIFT; 1455 } while (++s < ic->sectors_per_block); 1456 #ifdef INTERNAL_VERIFY 1457 if (ic->internal_hash) { 1458 char checksums_onstack[max(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)]; 1459 1460 integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack); 1461 if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) { 1462 DMERR("Checksum failed when reading from journal, at sector 0x%llx", 1463 (unsigned long long)logical_sector); 1464 } 1465 } 1466 #endif 1467 } 1468 1469 if (!ic->internal_hash) { 1470 struct bio_integrity_payload *bip = bio_integrity(bio); 1471 unsigned tag_todo = ic->tag_size; 1472 char *tag_ptr = journal_entry_tag(ic, je); 1473 1474 if (bip) do { 1475 struct bio_vec biv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter); 1476 unsigned tag_now = min(biv.bv_len, tag_todo); 1477 char *tag_addr; 1478 BUG_ON(PageHighMem(biv.bv_page)); 1479 tag_addr = lowmem_page_address(biv.bv_page) + biv.bv_offset; 1480 if (likely(dio->write)) 1481 memcpy(tag_ptr, tag_addr, tag_now); 1482 else 1483 memcpy(tag_addr, tag_ptr, tag_now); 1484 bvec_iter_advance(bip->bip_vec, &bip->bip_iter, tag_now); 1485 tag_ptr += tag_now; 1486 tag_todo -= tag_now; 1487 } while (unlikely(tag_todo)); else { 1488 if (likely(dio->write)) 1489 memset(tag_ptr, 0, tag_todo); 1490 } 1491 } 1492 1493 if (likely(dio->write)) { 1494 struct journal_sector *js; 1495 unsigned s; 1496 1497 js = access_journal_data(ic, journal_section, journal_entry); 1498 memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT); 1499 1500 s = 0; 1501 do { 1502 je->last_bytes[s] = js[s].commit_id; 1503 } while (++s < ic->sectors_per_block); 1504 1505 if (ic->internal_hash) { 1506 unsigned digest_size = crypto_shash_digestsize(ic->internal_hash); 1507 if (unlikely(digest_size > ic->tag_size)) { 1508 char checksums_onstack[digest_size]; 1509 integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack); 1510 memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size); 1511 } else 1512 integrity_sector_checksum(ic, logical_sector, (char *)js, journal_entry_tag(ic, je)); 1513 } 1514 1515 journal_entry_set_sector(je, logical_sector); 1516 } 1517 logical_sector += ic->sectors_per_block; 1518 1519 journal_entry++; 1520 if (unlikely(journal_entry == ic->journal_section_entries)) { 1521 journal_entry = 0; 1522 journal_section++; 1523 wraparound_section(ic, &journal_section); 1524 } 1525 1526 bv.bv_offset += ic->sectors_per_block << SECTOR_SHIFT; 1527 } while (bv.bv_len -= ic->sectors_per_block << SECTOR_SHIFT); 1528 1529 if (unlikely(!dio->write)) 1530 flush_dcache_page(bv.bv_page); 1531 kunmap_atomic(mem); 1532 } while (n_sectors); 1533 1534 if (likely(dio->write)) { 1535 smp_mb(); 1536 if (unlikely(waitqueue_active(&ic->copy_to_journal_wait))) 1537 wake_up(&ic->copy_to_journal_wait); 1538 if (ACCESS_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) { 1539 queue_work(ic->commit_wq, &ic->commit_work); 1540 } else { 1541 schedule_autocommit(ic); 1542 } 1543 } else { 1544 remove_range(ic, &dio->range); 1545 } 1546 1547 if (unlikely(bio->bi_iter.bi_size)) { 1548 sector_t area, offset; 1549 1550 dio->range.logical_sector = logical_sector; 1551 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset); 1552 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset); 1553 return true; 1554 } 1555 1556 return false; 1557 } 1558 1559 static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map) 1560 { 1561 struct dm_integrity_c *ic = dio->ic; 1562 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); 1563 unsigned journal_section, journal_entry; 1564 unsigned journal_read_pos; 1565 struct completion read_comp; 1566 bool need_sync_io = ic->internal_hash && !dio->write; 1567 1568 if (need_sync_io && from_map) { 1569 INIT_WORK(&dio->work, integrity_bio_wait); 1570 queue_work(ic->metadata_wq, &dio->work); 1571 return; 1572 } 1573 1574 lock_retry: 1575 spin_lock_irq(&ic->endio_wait.lock); 1576 retry: 1577 if (unlikely(dm_integrity_failed(ic))) { 1578 spin_unlock_irq(&ic->endio_wait.lock); 1579 do_endio(ic, bio); 1580 return; 1581 } 1582 dio->range.n_sectors = bio_sectors(bio); 1583 journal_read_pos = NOT_FOUND; 1584 if (likely(ic->mode == 'J')) { 1585 if (dio->write) { 1586 unsigned next_entry, i, pos; 1587 unsigned ws, we; 1588 1589 dio->range.n_sectors = min(dio->range.n_sectors, ic->free_sectors); 1590 if (unlikely(!dio->range.n_sectors)) 1591 goto sleep; 1592 ic->free_sectors -= dio->range.n_sectors; 1593 journal_section = ic->free_section; 1594 journal_entry = ic->free_section_entry; 1595 1596 next_entry = ic->free_section_entry + dio->range.n_sectors; 1597 ic->free_section_entry = next_entry % ic->journal_section_entries; 1598 ic->free_section += next_entry / ic->journal_section_entries; 1599 ic->n_uncommitted_sections += next_entry / ic->journal_section_entries; 1600 wraparound_section(ic, &ic->free_section); 1601 1602 pos = journal_section * ic->journal_section_entries + journal_entry; 1603 ws = journal_section; 1604 we = journal_entry; 1605 i = 0; 1606 do { 1607 struct journal_entry *je; 1608 1609 add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i); 1610 pos++; 1611 if (unlikely(pos >= ic->journal_entries)) 1612 pos = 0; 1613 1614 je = access_journal_entry(ic, ws, we); 1615 BUG_ON(!journal_entry_is_unused(je)); 1616 journal_entry_set_inprogress(je); 1617 we++; 1618 if (unlikely(we == ic->journal_section_entries)) { 1619 we = 0; 1620 ws++; 1621 wraparound_section(ic, &ws); 1622 } 1623 } while ((i += ic->sectors_per_block) < dio->range.n_sectors); 1624 1625 spin_unlock_irq(&ic->endio_wait.lock); 1626 goto journal_read_write; 1627 } else { 1628 sector_t next_sector; 1629 journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector); 1630 if (likely(journal_read_pos == NOT_FOUND)) { 1631 if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector)) 1632 dio->range.n_sectors = next_sector - dio->range.logical_sector; 1633 } else { 1634 unsigned i; 1635 unsigned jp = journal_read_pos + 1; 1636 for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) { 1637 if (!test_journal_node(ic, jp, dio->range.logical_sector + i)) 1638 break; 1639 } 1640 dio->range.n_sectors = i; 1641 } 1642 } 1643 } 1644 if (unlikely(!add_new_range(ic, &dio->range))) { 1645 /* 1646 * We must not sleep in the request routine because it could 1647 * stall bios on current->bio_list. 1648 * So, we offload the bio to a workqueue if we have to sleep. 1649 */ 1650 sleep: 1651 if (from_map) { 1652 spin_unlock_irq(&ic->endio_wait.lock); 1653 INIT_WORK(&dio->work, integrity_bio_wait); 1654 queue_work(ic->wait_wq, &dio->work); 1655 return; 1656 } else { 1657 sleep_on_endio_wait(ic); 1658 goto retry; 1659 } 1660 } 1661 spin_unlock_irq(&ic->endio_wait.lock); 1662 1663 if (unlikely(journal_read_pos != NOT_FOUND)) { 1664 journal_section = journal_read_pos / ic->journal_section_entries; 1665 journal_entry = journal_read_pos % ic->journal_section_entries; 1666 goto journal_read_write; 1667 } 1668 1669 dio->in_flight = (atomic_t)ATOMIC_INIT(2); 1670 1671 if (need_sync_io) { 1672 read_comp = COMPLETION_INITIALIZER_ONSTACK(read_comp); 1673 dio->completion = &read_comp; 1674 } else 1675 dio->completion = NULL; 1676 1677 dio->orig_bi_iter = bio->bi_iter; 1678 1679 dio->orig_bi_bdev = bio->bi_bdev; 1680 bio->bi_bdev = ic->dev->bdev; 1681 1682 dio->orig_bi_integrity = bio_integrity(bio); 1683 bio->bi_integrity = NULL; 1684 bio->bi_opf &= ~REQ_INTEGRITY; 1685 1686 dio->orig_bi_end_io = bio->bi_end_io; 1687 bio->bi_end_io = integrity_end_io; 1688 1689 bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT; 1690 bio->bi_iter.bi_sector += ic->start; 1691 generic_make_request(bio); 1692 1693 if (need_sync_io) { 1694 wait_for_completion_io(&read_comp); 1695 integrity_metadata(&dio->work); 1696 } else { 1697 INIT_WORK(&dio->work, integrity_metadata); 1698 queue_work(ic->metadata_wq, &dio->work); 1699 } 1700 1701 return; 1702 1703 journal_read_write: 1704 if (unlikely(__journal_read_write(dio, bio, journal_section, journal_entry))) 1705 goto lock_retry; 1706 1707 do_endio_flush(ic, dio); 1708 } 1709 1710 1711 static void integrity_bio_wait(struct work_struct *w) 1712 { 1713 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work); 1714 1715 dm_integrity_map_continue(dio, false); 1716 } 1717 1718 static void pad_uncommitted(struct dm_integrity_c *ic) 1719 { 1720 if (ic->free_section_entry) { 1721 ic->free_sectors -= ic->journal_section_entries - ic->free_section_entry; 1722 ic->free_section_entry = 0; 1723 ic->free_section++; 1724 wraparound_section(ic, &ic->free_section); 1725 ic->n_uncommitted_sections++; 1726 } 1727 } 1728 1729 static void integrity_commit(struct work_struct *w) 1730 { 1731 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work); 1732 unsigned commit_start, commit_sections; 1733 unsigned i, j, n; 1734 struct bio *flushes; 1735 1736 del_timer(&ic->autocommit_timer); 1737 1738 spin_lock_irq(&ic->endio_wait.lock); 1739 flushes = bio_list_get(&ic->flush_bio_list); 1740 if (unlikely(ic->mode != 'J')) { 1741 spin_unlock_irq(&ic->endio_wait.lock); 1742 dm_integrity_flush_buffers(ic); 1743 goto release_flush_bios; 1744 } 1745 1746 pad_uncommitted(ic); 1747 commit_start = ic->uncommitted_section; 1748 commit_sections = ic->n_uncommitted_sections; 1749 spin_unlock_irq(&ic->endio_wait.lock); 1750 1751 if (!commit_sections) 1752 goto release_flush_bios; 1753 1754 i = commit_start; 1755 for (n = 0; n < commit_sections; n++) { 1756 for (j = 0; j < ic->journal_section_entries; j++) { 1757 struct journal_entry *je; 1758 je = access_journal_entry(ic, i, j); 1759 io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je)); 1760 } 1761 for (j = 0; j < ic->journal_section_sectors; j++) { 1762 struct journal_sector *js; 1763 js = access_journal(ic, i, j); 1764 js->commit_id = dm_integrity_commit_id(ic, i, j, ic->commit_seq); 1765 } 1766 i++; 1767 if (unlikely(i >= ic->journal_sections)) 1768 ic->commit_seq = next_commit_seq(ic->commit_seq); 1769 wraparound_section(ic, &i); 1770 } 1771 smp_rmb(); 1772 1773 write_journal(ic, commit_start, commit_sections); 1774 1775 spin_lock_irq(&ic->endio_wait.lock); 1776 ic->uncommitted_section += commit_sections; 1777 wraparound_section(ic, &ic->uncommitted_section); 1778 ic->n_uncommitted_sections -= commit_sections; 1779 ic->n_committed_sections += commit_sections; 1780 spin_unlock_irq(&ic->endio_wait.lock); 1781 1782 if (ACCESS_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) 1783 queue_work(ic->writer_wq, &ic->writer_work); 1784 1785 release_flush_bios: 1786 while (flushes) { 1787 struct bio *next = flushes->bi_next; 1788 flushes->bi_next = NULL; 1789 do_endio(ic, flushes); 1790 flushes = next; 1791 } 1792 } 1793 1794 static void complete_copy_from_journal(unsigned long error, void *context) 1795 { 1796 struct journal_io *io = context; 1797 struct journal_completion *comp = io->comp; 1798 struct dm_integrity_c *ic = comp->ic; 1799 remove_range(ic, &io->range); 1800 mempool_free(io, ic->journal_io_mempool); 1801 if (unlikely(error != 0)) 1802 dm_integrity_io_error(ic, "copying from journal", -EIO); 1803 complete_journal_op(comp); 1804 } 1805 1806 static void restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js, 1807 struct journal_entry *je) 1808 { 1809 unsigned s = 0; 1810 do { 1811 js->commit_id = je->last_bytes[s]; 1812 js++; 1813 } while (++s < ic->sectors_per_block); 1814 } 1815 1816 static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start, 1817 unsigned write_sections, bool from_replay) 1818 { 1819 unsigned i, j, n; 1820 struct journal_completion comp; 1821 1822 comp.ic = ic; 1823 comp.in_flight = (atomic_t)ATOMIC_INIT(1); 1824 comp.comp = COMPLETION_INITIALIZER_ONSTACK(comp.comp); 1825 1826 i = write_start; 1827 for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) { 1828 #ifndef INTERNAL_VERIFY 1829 if (unlikely(from_replay)) 1830 #endif 1831 rw_section_mac(ic, i, false); 1832 for (j = 0; j < ic->journal_section_entries; j++) { 1833 struct journal_entry *je = access_journal_entry(ic, i, j); 1834 sector_t sec, area, offset; 1835 unsigned k, l, next_loop; 1836 sector_t metadata_block; 1837 unsigned metadata_offset; 1838 struct journal_io *io; 1839 1840 if (journal_entry_is_unused(je)) 1841 continue; 1842 BUG_ON(unlikely(journal_entry_is_inprogress(je)) && !from_replay); 1843 sec = journal_entry_get_sector(je); 1844 if (unlikely(from_replay)) { 1845 if (unlikely(sec & (unsigned)(ic->sectors_per_block - 1))) { 1846 dm_integrity_io_error(ic, "invalid sector in journal", -EIO); 1847 sec &= ~(sector_t)(ic->sectors_per_block - 1); 1848 } 1849 } 1850 get_area_and_offset(ic, sec, &area, &offset); 1851 restore_last_bytes(ic, access_journal_data(ic, i, j), je); 1852 for (k = j + 1; k < ic->journal_section_entries; k++) { 1853 struct journal_entry *je2 = access_journal_entry(ic, i, k); 1854 sector_t sec2, area2, offset2; 1855 if (journal_entry_is_unused(je2)) 1856 break; 1857 BUG_ON(unlikely(journal_entry_is_inprogress(je2)) && !from_replay); 1858 sec2 = journal_entry_get_sector(je2); 1859 get_area_and_offset(ic, sec2, &area2, &offset2); 1860 if (area2 != area || offset2 != offset + ((k - j) << ic->sb->log2_sectors_per_block)) 1861 break; 1862 restore_last_bytes(ic, access_journal_data(ic, i, k), je2); 1863 } 1864 next_loop = k - 1; 1865 1866 io = mempool_alloc(ic->journal_io_mempool, GFP_NOIO); 1867 io->comp = ∁ 1868 io->range.logical_sector = sec; 1869 io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block; 1870 1871 spin_lock_irq(&ic->endio_wait.lock); 1872 while (unlikely(!add_new_range(ic, &io->range))) 1873 sleep_on_endio_wait(ic); 1874 1875 if (likely(!from_replay)) { 1876 struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries]; 1877 1878 /* don't write if there is newer committed sector */ 1879 while (j < k && find_newer_committed_node(ic, §ion_node[j])) { 1880 struct journal_entry *je2 = access_journal_entry(ic, i, j); 1881 1882 journal_entry_set_unused(je2); 1883 remove_journal_node(ic, §ion_node[j]); 1884 j++; 1885 sec += ic->sectors_per_block; 1886 offset += ic->sectors_per_block; 1887 } 1888 while (j < k && find_newer_committed_node(ic, §ion_node[k - 1])) { 1889 struct journal_entry *je2 = access_journal_entry(ic, i, k - 1); 1890 1891 journal_entry_set_unused(je2); 1892 remove_journal_node(ic, §ion_node[k - 1]); 1893 k--; 1894 } 1895 if (j == k) { 1896 remove_range_unlocked(ic, &io->range); 1897 spin_unlock_irq(&ic->endio_wait.lock); 1898 mempool_free(io, ic->journal_io_mempool); 1899 goto skip_io; 1900 } 1901 for (l = j; l < k; l++) { 1902 remove_journal_node(ic, §ion_node[l]); 1903 } 1904 } 1905 spin_unlock_irq(&ic->endio_wait.lock); 1906 1907 metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset); 1908 for (l = j; l < k; l++) { 1909 int r; 1910 struct journal_entry *je2 = access_journal_entry(ic, i, l); 1911 1912 if ( 1913 #ifndef INTERNAL_VERIFY 1914 unlikely(from_replay) && 1915 #endif 1916 ic->internal_hash) { 1917 char test_tag[max(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)]; 1918 1919 integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block), 1920 (char *)access_journal_data(ic, i, l), test_tag); 1921 if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size))) 1922 dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ); 1923 } 1924 1925 journal_entry_set_unused(je2); 1926 r = dm_integrity_rw_tag(ic, journal_entry_tag(ic, je2), &metadata_block, &metadata_offset, 1927 ic->tag_size, TAG_WRITE); 1928 if (unlikely(r)) { 1929 dm_integrity_io_error(ic, "reading tags", r); 1930 } 1931 } 1932 1933 atomic_inc(&comp.in_flight); 1934 copy_from_journal(ic, i, j << ic->sb->log2_sectors_per_block, 1935 (k - j) << ic->sb->log2_sectors_per_block, 1936 get_data_sector(ic, area, offset), 1937 complete_copy_from_journal, io); 1938 skip_io: 1939 j = next_loop; 1940 } 1941 } 1942 1943 dm_bufio_write_dirty_buffers_async(ic->bufio); 1944 1945 complete_journal_op(&comp); 1946 wait_for_completion_io(&comp.comp); 1947 1948 dm_integrity_flush_buffers(ic); 1949 } 1950 1951 static void integrity_writer(struct work_struct *w) 1952 { 1953 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work); 1954 unsigned write_start, write_sections; 1955 1956 unsigned prev_free_sectors; 1957 1958 /* the following test is not needed, but it tests the replay code */ 1959 if (ACCESS_ONCE(ic->suspending)) 1960 return; 1961 1962 spin_lock_irq(&ic->endio_wait.lock); 1963 write_start = ic->committed_section; 1964 write_sections = ic->n_committed_sections; 1965 spin_unlock_irq(&ic->endio_wait.lock); 1966 1967 if (!write_sections) 1968 return; 1969 1970 do_journal_write(ic, write_start, write_sections, false); 1971 1972 spin_lock_irq(&ic->endio_wait.lock); 1973 1974 ic->committed_section += write_sections; 1975 wraparound_section(ic, &ic->committed_section); 1976 ic->n_committed_sections -= write_sections; 1977 1978 prev_free_sectors = ic->free_sectors; 1979 ic->free_sectors += write_sections * ic->journal_section_entries; 1980 if (unlikely(!prev_free_sectors)) 1981 wake_up_locked(&ic->endio_wait); 1982 1983 spin_unlock_irq(&ic->endio_wait.lock); 1984 } 1985 1986 static void init_journal(struct dm_integrity_c *ic, unsigned start_section, 1987 unsigned n_sections, unsigned char commit_seq) 1988 { 1989 unsigned i, j, n; 1990 1991 if (!n_sections) 1992 return; 1993 1994 for (n = 0; n < n_sections; n++) { 1995 i = start_section + n; 1996 wraparound_section(ic, &i); 1997 for (j = 0; j < ic->journal_section_sectors; j++) { 1998 struct journal_sector *js = access_journal(ic, i, j); 1999 memset(&js->entries, 0, JOURNAL_SECTOR_DATA); 2000 js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq); 2001 } 2002 for (j = 0; j < ic->journal_section_entries; j++) { 2003 struct journal_entry *je = access_journal_entry(ic, i, j); 2004 journal_entry_set_unused(je); 2005 } 2006 } 2007 2008 write_journal(ic, start_section, n_sections); 2009 } 2010 2011 static int find_commit_seq(struct dm_integrity_c *ic, unsigned i, unsigned j, commit_id_t id) 2012 { 2013 unsigned char k; 2014 for (k = 0; k < N_COMMIT_IDS; k++) { 2015 if (dm_integrity_commit_id(ic, i, j, k) == id) 2016 return k; 2017 } 2018 dm_integrity_io_error(ic, "journal commit id", -EIO); 2019 return -EIO; 2020 } 2021 2022 static void replay_journal(struct dm_integrity_c *ic) 2023 { 2024 unsigned i, j; 2025 bool used_commit_ids[N_COMMIT_IDS]; 2026 unsigned max_commit_id_sections[N_COMMIT_IDS]; 2027 unsigned write_start, write_sections; 2028 unsigned continue_section; 2029 bool journal_empty; 2030 unsigned char unused, last_used, want_commit_seq; 2031 2032 if (ic->mode == 'R') 2033 return; 2034 2035 if (ic->journal_uptodate) 2036 return; 2037 2038 last_used = 0; 2039 write_start = 0; 2040 2041 if (!ic->just_formatted) { 2042 DEBUG_print("reading journal\n"); 2043 rw_journal(ic, REQ_OP_READ, 0, 0, ic->journal_sections, NULL); 2044 if (ic->journal_io) 2045 DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal"); 2046 if (ic->journal_io) { 2047 struct journal_completion crypt_comp; 2048 crypt_comp.ic = ic; 2049 crypt_comp.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp.comp); 2050 crypt_comp.in_flight = (atomic_t)ATOMIC_INIT(0); 2051 encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp); 2052 wait_for_completion(&crypt_comp.comp); 2053 } 2054 DEBUG_bytes(lowmem_page_address(ic->journal[0].page), 64, "decrypted journal"); 2055 } 2056 2057 if (dm_integrity_failed(ic)) 2058 goto clear_journal; 2059 2060 journal_empty = true; 2061 memset(used_commit_ids, 0, sizeof used_commit_ids); 2062 memset(max_commit_id_sections, 0, sizeof max_commit_id_sections); 2063 for (i = 0; i < ic->journal_sections; i++) { 2064 for (j = 0; j < ic->journal_section_sectors; j++) { 2065 int k; 2066 struct journal_sector *js = access_journal(ic, i, j); 2067 k = find_commit_seq(ic, i, j, js->commit_id); 2068 if (k < 0) 2069 goto clear_journal; 2070 used_commit_ids[k] = true; 2071 max_commit_id_sections[k] = i; 2072 } 2073 if (journal_empty) { 2074 for (j = 0; j < ic->journal_section_entries; j++) { 2075 struct journal_entry *je = access_journal_entry(ic, i, j); 2076 if (!journal_entry_is_unused(je)) { 2077 journal_empty = false; 2078 break; 2079 } 2080 } 2081 } 2082 } 2083 2084 if (!used_commit_ids[N_COMMIT_IDS - 1]) { 2085 unused = N_COMMIT_IDS - 1; 2086 while (unused && !used_commit_ids[unused - 1]) 2087 unused--; 2088 } else { 2089 for (unused = 0; unused < N_COMMIT_IDS; unused++) 2090 if (!used_commit_ids[unused]) 2091 break; 2092 if (unused == N_COMMIT_IDS) { 2093 dm_integrity_io_error(ic, "journal commit ids", -EIO); 2094 goto clear_journal; 2095 } 2096 } 2097 DEBUG_print("first unused commit seq %d [%d,%d,%d,%d]\n", 2098 unused, used_commit_ids[0], used_commit_ids[1], 2099 used_commit_ids[2], used_commit_ids[3]); 2100 2101 last_used = prev_commit_seq(unused); 2102 want_commit_seq = prev_commit_seq(last_used); 2103 2104 if (!used_commit_ids[want_commit_seq] && used_commit_ids[prev_commit_seq(want_commit_seq)]) 2105 journal_empty = true; 2106 2107 write_start = max_commit_id_sections[last_used] + 1; 2108 if (unlikely(write_start >= ic->journal_sections)) 2109 want_commit_seq = next_commit_seq(want_commit_seq); 2110 wraparound_section(ic, &write_start); 2111 2112 i = write_start; 2113 for (write_sections = 0; write_sections < ic->journal_sections; write_sections++) { 2114 for (j = 0; j < ic->journal_section_sectors; j++) { 2115 struct journal_sector *js = access_journal(ic, i, j); 2116 2117 if (js->commit_id != dm_integrity_commit_id(ic, i, j, want_commit_seq)) { 2118 /* 2119 * This could be caused by crash during writing. 2120 * We won't replay the inconsistent part of the 2121 * journal. 2122 */ 2123 DEBUG_print("commit id mismatch at position (%u, %u): %d != %d\n", 2124 i, j, find_commit_seq(ic, i, j, js->commit_id), want_commit_seq); 2125 goto brk; 2126 } 2127 } 2128 i++; 2129 if (unlikely(i >= ic->journal_sections)) 2130 want_commit_seq = next_commit_seq(want_commit_seq); 2131 wraparound_section(ic, &i); 2132 } 2133 brk: 2134 2135 if (!journal_empty) { 2136 DEBUG_print("replaying %u sections, starting at %u, commit seq %d\n", 2137 write_sections, write_start, want_commit_seq); 2138 do_journal_write(ic, write_start, write_sections, true); 2139 } 2140 2141 if (write_sections == ic->journal_sections && (ic->mode == 'J' || journal_empty)) { 2142 continue_section = write_start; 2143 ic->commit_seq = want_commit_seq; 2144 DEBUG_print("continuing from section %u, commit seq %d\n", write_start, ic->commit_seq); 2145 } else { 2146 unsigned s; 2147 unsigned char erase_seq; 2148 clear_journal: 2149 DEBUG_print("clearing journal\n"); 2150 2151 erase_seq = prev_commit_seq(prev_commit_seq(last_used)); 2152 s = write_start; 2153 init_journal(ic, s, 1, erase_seq); 2154 s++; 2155 wraparound_section(ic, &s); 2156 if (ic->journal_sections >= 2) { 2157 init_journal(ic, s, ic->journal_sections - 2, erase_seq); 2158 s += ic->journal_sections - 2; 2159 wraparound_section(ic, &s); 2160 init_journal(ic, s, 1, erase_seq); 2161 } 2162 2163 continue_section = 0; 2164 ic->commit_seq = next_commit_seq(erase_seq); 2165 } 2166 2167 ic->committed_section = continue_section; 2168 ic->n_committed_sections = 0; 2169 2170 ic->uncommitted_section = continue_section; 2171 ic->n_uncommitted_sections = 0; 2172 2173 ic->free_section = continue_section; 2174 ic->free_section_entry = 0; 2175 ic->free_sectors = ic->journal_entries; 2176 2177 ic->journal_tree_root = RB_ROOT; 2178 for (i = 0; i < ic->journal_entries; i++) 2179 init_journal_node(&ic->journal_tree[i]); 2180 } 2181 2182 static void dm_integrity_postsuspend(struct dm_target *ti) 2183 { 2184 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private; 2185 2186 del_timer_sync(&ic->autocommit_timer); 2187 2188 ic->suspending = true; 2189 2190 queue_work(ic->commit_wq, &ic->commit_work); 2191 drain_workqueue(ic->commit_wq); 2192 2193 if (ic->mode == 'J') { 2194 drain_workqueue(ic->writer_wq); 2195 dm_integrity_flush_buffers(ic); 2196 } 2197 2198 ic->suspending = false; 2199 2200 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress)); 2201 2202 ic->journal_uptodate = true; 2203 } 2204 2205 static void dm_integrity_resume(struct dm_target *ti) 2206 { 2207 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private; 2208 2209 replay_journal(ic); 2210 } 2211 2212 static void dm_integrity_status(struct dm_target *ti, status_type_t type, 2213 unsigned status_flags, char *result, unsigned maxlen) 2214 { 2215 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private; 2216 unsigned arg_count; 2217 size_t sz = 0; 2218 2219 switch (type) { 2220 case STATUSTYPE_INFO: 2221 result[0] = '\0'; 2222 break; 2223 2224 case STATUSTYPE_TABLE: { 2225 __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100; 2226 watermark_percentage += ic->journal_entries / 2; 2227 do_div(watermark_percentage, ic->journal_entries); 2228 arg_count = 5; 2229 arg_count += ic->sectors_per_block != 1; 2230 arg_count += !!ic->internal_hash_alg.alg_string; 2231 arg_count += !!ic->journal_crypt_alg.alg_string; 2232 arg_count += !!ic->journal_mac_alg.alg_string; 2233 DMEMIT("%s %llu %u %c %u", ic->dev->name, (unsigned long long)ic->start, 2234 ic->tag_size, ic->mode, arg_count); 2235 DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS); 2236 DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors); 2237 DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors); 2238 DMEMIT(" journal_watermark:%u", (unsigned)watermark_percentage); 2239 DMEMIT(" commit_time:%u", ic->autocommit_msec); 2240 if (ic->sectors_per_block != 1) 2241 DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT); 2242 2243 #define EMIT_ALG(a, n) \ 2244 do { \ 2245 if (ic->a.alg_string) { \ 2246 DMEMIT(" %s:%s", n, ic->a.alg_string); \ 2247 if (ic->a.key_string) \ 2248 DMEMIT(":%s", ic->a.key_string);\ 2249 } \ 2250 } while (0) 2251 EMIT_ALG(internal_hash_alg, "internal_hash"); 2252 EMIT_ALG(journal_crypt_alg, "journal_crypt"); 2253 EMIT_ALG(journal_mac_alg, "journal_mac"); 2254 break; 2255 } 2256 } 2257 } 2258 2259 static int dm_integrity_iterate_devices(struct dm_target *ti, 2260 iterate_devices_callout_fn fn, void *data) 2261 { 2262 struct dm_integrity_c *ic = ti->private; 2263 2264 return fn(ti, ic->dev, ic->start + ic->initial_sectors + ic->metadata_run, ti->len, data); 2265 } 2266 2267 static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *limits) 2268 { 2269 struct dm_integrity_c *ic = ti->private; 2270 2271 if (ic->sectors_per_block > 1) { 2272 limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT; 2273 limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT; 2274 blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT); 2275 } 2276 } 2277 2278 static void calculate_journal_section_size(struct dm_integrity_c *ic) 2279 { 2280 unsigned sector_space = JOURNAL_SECTOR_DATA; 2281 2282 ic->journal_sections = le32_to_cpu(ic->sb->journal_sections); 2283 ic->journal_entry_size = roundup(offsetof(struct journal_entry, last_bytes[ic->sectors_per_block]) + ic->tag_size, 2284 JOURNAL_ENTRY_ROUNDUP); 2285 2286 if (ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) 2287 sector_space -= JOURNAL_MAC_PER_SECTOR; 2288 ic->journal_entries_per_sector = sector_space / ic->journal_entry_size; 2289 ic->journal_section_entries = ic->journal_entries_per_sector * JOURNAL_BLOCK_SECTORS; 2290 ic->journal_section_sectors = (ic->journal_section_entries << ic->sb->log2_sectors_per_block) + JOURNAL_BLOCK_SECTORS; 2291 ic->journal_entries = ic->journal_section_entries * ic->journal_sections; 2292 } 2293 2294 static int calculate_device_limits(struct dm_integrity_c *ic) 2295 { 2296 __u64 initial_sectors; 2297 sector_t last_sector, last_area, last_offset; 2298 2299 calculate_journal_section_size(ic); 2300 initial_sectors = SB_SECTORS + (__u64)ic->journal_section_sectors * ic->journal_sections; 2301 if (initial_sectors + METADATA_PADDING_SECTORS >= ic->device_sectors || initial_sectors > UINT_MAX) 2302 return -EINVAL; 2303 ic->initial_sectors = initial_sectors; 2304 2305 ic->metadata_run = roundup((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2_sectors_per_block), 2306 (__u64)(1 << SECTOR_SHIFT << METADATA_PADDING_SECTORS)) >> SECTOR_SHIFT; 2307 if (!(ic->metadata_run & (ic->metadata_run - 1))) 2308 ic->log2_metadata_run = __ffs(ic->metadata_run); 2309 else 2310 ic->log2_metadata_run = -1; 2311 2312 get_area_and_offset(ic, ic->provided_data_sectors - 1, &last_area, &last_offset); 2313 last_sector = get_data_sector(ic, last_area, last_offset); 2314 2315 if (ic->start + last_sector < last_sector || ic->start + last_sector >= ic->device_sectors) 2316 return -EINVAL; 2317 2318 return 0; 2319 } 2320 2321 static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sectors, unsigned interleave_sectors) 2322 { 2323 unsigned journal_sections; 2324 int test_bit; 2325 2326 memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT); 2327 memcpy(ic->sb->magic, SB_MAGIC, 8); 2328 ic->sb->version = SB_VERSION; 2329 ic->sb->integrity_tag_size = cpu_to_le16(ic->tag_size); 2330 ic->sb->log2_sectors_per_block = __ffs(ic->sectors_per_block); 2331 if (ic->journal_mac_alg.alg_string) 2332 ic->sb->flags |= cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC); 2333 2334 calculate_journal_section_size(ic); 2335 journal_sections = journal_sectors / ic->journal_section_sectors; 2336 if (!journal_sections) 2337 journal_sections = 1; 2338 ic->sb->journal_sections = cpu_to_le32(journal_sections); 2339 2340 if (!interleave_sectors) 2341 interleave_sectors = DEFAULT_INTERLEAVE_SECTORS; 2342 ic->sb->log2_interleave_sectors = __fls(interleave_sectors); 2343 ic->sb->log2_interleave_sectors = max((__u8)MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors); 2344 ic->sb->log2_interleave_sectors = min((__u8)MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors); 2345 2346 ic->provided_data_sectors = 0; 2347 for (test_bit = fls64(ic->device_sectors) - 1; test_bit >= 3; test_bit--) { 2348 __u64 prev_data_sectors = ic->provided_data_sectors; 2349 2350 ic->provided_data_sectors |= (sector_t)1 << test_bit; 2351 if (calculate_device_limits(ic)) 2352 ic->provided_data_sectors = prev_data_sectors; 2353 } 2354 2355 if (!ic->provided_data_sectors) 2356 return -EINVAL; 2357 2358 ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors); 2359 2360 return 0; 2361 } 2362 2363 static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic) 2364 { 2365 struct gendisk *disk = dm_disk(dm_table_get_md(ti->table)); 2366 struct blk_integrity bi; 2367 2368 memset(&bi, 0, sizeof(bi)); 2369 bi.profile = &dm_integrity_profile; 2370 bi.tuple_size = ic->tag_size; 2371 bi.tag_size = bi.tuple_size; 2372 bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT; 2373 2374 blk_integrity_register(disk, &bi); 2375 blk_queue_max_integrity_segments(disk->queue, UINT_MAX); 2376 } 2377 2378 static void dm_integrity_free_page_list(struct dm_integrity_c *ic, struct page_list *pl) 2379 { 2380 unsigned i; 2381 2382 if (!pl) 2383 return; 2384 for (i = 0; i < ic->journal_pages; i++) 2385 if (pl[i].page) 2386 __free_page(pl[i].page); 2387 kvfree(pl); 2388 } 2389 2390 static struct page_list *dm_integrity_alloc_page_list(struct dm_integrity_c *ic) 2391 { 2392 size_t page_list_desc_size = ic->journal_pages * sizeof(struct page_list); 2393 struct page_list *pl; 2394 unsigned i; 2395 2396 pl = kvmalloc(page_list_desc_size, GFP_KERNEL | __GFP_ZERO); 2397 if (!pl) 2398 return NULL; 2399 2400 for (i = 0; i < ic->journal_pages; i++) { 2401 pl[i].page = alloc_page(GFP_KERNEL); 2402 if (!pl[i].page) { 2403 dm_integrity_free_page_list(ic, pl); 2404 return NULL; 2405 } 2406 if (i) 2407 pl[i - 1].next = &pl[i]; 2408 } 2409 2410 return pl; 2411 } 2412 2413 static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **sl) 2414 { 2415 unsigned i; 2416 for (i = 0; i < ic->journal_sections; i++) 2417 kvfree(sl[i]); 2418 kfree(sl); 2419 } 2420 2421 static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic, struct page_list *pl) 2422 { 2423 struct scatterlist **sl; 2424 unsigned i; 2425 2426 sl = kvmalloc(ic->journal_sections * sizeof(struct scatterlist *), GFP_KERNEL | __GFP_ZERO); 2427 if (!sl) 2428 return NULL; 2429 2430 for (i = 0; i < ic->journal_sections; i++) { 2431 struct scatterlist *s; 2432 unsigned start_index, start_offset; 2433 unsigned end_index, end_offset; 2434 unsigned n_pages; 2435 unsigned idx; 2436 2437 page_list_location(ic, i, 0, &start_index, &start_offset); 2438 page_list_location(ic, i, ic->journal_section_sectors - 1, &end_index, &end_offset); 2439 2440 n_pages = (end_index - start_index + 1); 2441 2442 s = kvmalloc(n_pages * sizeof(struct scatterlist), GFP_KERNEL); 2443 if (!s) { 2444 dm_integrity_free_journal_scatterlist(ic, sl); 2445 return NULL; 2446 } 2447 2448 sg_init_table(s, n_pages); 2449 for (idx = start_index; idx <= end_index; idx++) { 2450 char *va = lowmem_page_address(pl[idx].page); 2451 unsigned start = 0, end = PAGE_SIZE; 2452 if (idx == start_index) 2453 start = start_offset; 2454 if (idx == end_index) 2455 end = end_offset + (1 << SECTOR_SHIFT); 2456 sg_set_buf(&s[idx - start_index], va + start, end - start); 2457 } 2458 2459 sl[i] = s; 2460 } 2461 2462 return sl; 2463 } 2464 2465 static void free_alg(struct alg_spec *a) 2466 { 2467 kzfree(a->alg_string); 2468 kzfree(a->key); 2469 memset(a, 0, sizeof *a); 2470 } 2471 2472 static int get_alg_and_key(const char *arg, struct alg_spec *a, char **error, char *error_inval) 2473 { 2474 char *k; 2475 2476 free_alg(a); 2477 2478 a->alg_string = kstrdup(strchr(arg, ':') + 1, GFP_KERNEL); 2479 if (!a->alg_string) 2480 goto nomem; 2481 2482 k = strchr(a->alg_string, ':'); 2483 if (k) { 2484 *k = 0; 2485 a->key_string = k + 1; 2486 if (strlen(a->key_string) & 1) 2487 goto inval; 2488 2489 a->key_size = strlen(a->key_string) / 2; 2490 a->key = kmalloc(a->key_size, GFP_KERNEL); 2491 if (!a->key) 2492 goto nomem; 2493 if (hex2bin(a->key, a->key_string, a->key_size)) 2494 goto inval; 2495 } 2496 2497 return 0; 2498 inval: 2499 *error = error_inval; 2500 return -EINVAL; 2501 nomem: 2502 *error = "Out of memory for an argument"; 2503 return -ENOMEM; 2504 } 2505 2506 static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error, 2507 char *error_alg, char *error_key) 2508 { 2509 int r; 2510 2511 if (a->alg_string) { 2512 *hash = crypto_alloc_shash(a->alg_string, 0, CRYPTO_ALG_ASYNC); 2513 if (IS_ERR(*hash)) { 2514 *error = error_alg; 2515 r = PTR_ERR(*hash); 2516 *hash = NULL; 2517 return r; 2518 } 2519 2520 if (a->key) { 2521 r = crypto_shash_setkey(*hash, a->key, a->key_size); 2522 if (r) { 2523 *error = error_key; 2524 return r; 2525 } 2526 } 2527 } 2528 2529 return 0; 2530 } 2531 2532 static int create_journal(struct dm_integrity_c *ic, char **error) 2533 { 2534 int r = 0; 2535 unsigned i; 2536 __u64 journal_pages, journal_desc_size, journal_tree_size; 2537 unsigned char *crypt_data = NULL; 2538 2539 ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL); 2540 ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL); 2541 ic->commit_ids[2] = cpu_to_le64(0x3333333333333333ULL); 2542 ic->commit_ids[3] = cpu_to_le64(0x4444444444444444ULL); 2543 2544 journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors, 2545 PAGE_SIZE >> SECTOR_SHIFT) >> (PAGE_SHIFT - SECTOR_SHIFT); 2546 journal_desc_size = journal_pages * sizeof(struct page_list); 2547 if (journal_pages >= totalram_pages - totalhigh_pages || journal_desc_size > ULONG_MAX) { 2548 *error = "Journal doesn't fit into memory"; 2549 r = -ENOMEM; 2550 goto bad; 2551 } 2552 ic->journal_pages = journal_pages; 2553 2554 ic->journal = dm_integrity_alloc_page_list(ic); 2555 if (!ic->journal) { 2556 *error = "Could not allocate memory for journal"; 2557 r = -ENOMEM; 2558 goto bad; 2559 } 2560 if (ic->journal_crypt_alg.alg_string) { 2561 unsigned ivsize, blocksize; 2562 struct journal_completion comp; 2563 2564 comp.ic = ic; 2565 ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, 0); 2566 if (IS_ERR(ic->journal_crypt)) { 2567 *error = "Invalid journal cipher"; 2568 r = PTR_ERR(ic->journal_crypt); 2569 ic->journal_crypt = NULL; 2570 goto bad; 2571 } 2572 ivsize = crypto_skcipher_ivsize(ic->journal_crypt); 2573 blocksize = crypto_skcipher_blocksize(ic->journal_crypt); 2574 2575 if (ic->journal_crypt_alg.key) { 2576 r = crypto_skcipher_setkey(ic->journal_crypt, ic->journal_crypt_alg.key, 2577 ic->journal_crypt_alg.key_size); 2578 if (r) { 2579 *error = "Error setting encryption key"; 2580 goto bad; 2581 } 2582 } 2583 DEBUG_print("cipher %s, block size %u iv size %u\n", 2584 ic->journal_crypt_alg.alg_string, blocksize, ivsize); 2585 2586 ic->journal_io = dm_integrity_alloc_page_list(ic); 2587 if (!ic->journal_io) { 2588 *error = "Could not allocate memory for journal io"; 2589 r = -ENOMEM; 2590 goto bad; 2591 } 2592 2593 if (blocksize == 1) { 2594 struct scatterlist *sg; 2595 SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt); 2596 unsigned char iv[ivsize]; 2597 skcipher_request_set_tfm(req, ic->journal_crypt); 2598 2599 ic->journal_xor = dm_integrity_alloc_page_list(ic); 2600 if (!ic->journal_xor) { 2601 *error = "Could not allocate memory for journal xor"; 2602 r = -ENOMEM; 2603 goto bad; 2604 } 2605 2606 sg = kvmalloc((ic->journal_pages + 1) * sizeof(struct scatterlist), GFP_KERNEL); 2607 if (!sg) { 2608 *error = "Unable to allocate sg list"; 2609 r = -ENOMEM; 2610 goto bad; 2611 } 2612 sg_init_table(sg, ic->journal_pages + 1); 2613 for (i = 0; i < ic->journal_pages; i++) { 2614 char *va = lowmem_page_address(ic->journal_xor[i].page); 2615 clear_page(va); 2616 sg_set_buf(&sg[i], va, PAGE_SIZE); 2617 } 2618 sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids); 2619 memset(iv, 0x00, ivsize); 2620 2621 skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, iv); 2622 comp.comp = COMPLETION_INITIALIZER_ONSTACK(comp.comp); 2623 comp.in_flight = (atomic_t)ATOMIC_INIT(1); 2624 if (do_crypt(true, req, &comp)) 2625 wait_for_completion(&comp.comp); 2626 kvfree(sg); 2627 r = dm_integrity_failed(ic); 2628 if (r) { 2629 *error = "Unable to encrypt journal"; 2630 goto bad; 2631 } 2632 DEBUG_bytes(lowmem_page_address(ic->journal_xor[0].page), 64, "xor data"); 2633 2634 crypto_free_skcipher(ic->journal_crypt); 2635 ic->journal_crypt = NULL; 2636 } else { 2637 SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt); 2638 unsigned char iv[ivsize]; 2639 unsigned crypt_len = roundup(ivsize, blocksize); 2640 2641 crypt_data = kmalloc(crypt_len, GFP_KERNEL); 2642 if (!crypt_data) { 2643 *error = "Unable to allocate crypt data"; 2644 r = -ENOMEM; 2645 goto bad; 2646 } 2647 2648 skcipher_request_set_tfm(req, ic->journal_crypt); 2649 2650 ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal); 2651 if (!ic->journal_scatterlist) { 2652 *error = "Unable to allocate sg list"; 2653 r = -ENOMEM; 2654 goto bad; 2655 } 2656 ic->journal_io_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal_io); 2657 if (!ic->journal_io_scatterlist) { 2658 *error = "Unable to allocate sg list"; 2659 r = -ENOMEM; 2660 goto bad; 2661 } 2662 ic->sk_requests = kvmalloc(ic->journal_sections * sizeof(struct skcipher_request *), GFP_KERNEL | __GFP_ZERO); 2663 if (!ic->sk_requests) { 2664 *error = "Unable to allocate sk requests"; 2665 r = -ENOMEM; 2666 goto bad; 2667 } 2668 for (i = 0; i < ic->journal_sections; i++) { 2669 struct scatterlist sg; 2670 struct skcipher_request *section_req; 2671 __u32 section_le = cpu_to_le32(i); 2672 2673 memset(iv, 0x00, ivsize); 2674 memset(crypt_data, 0x00, crypt_len); 2675 memcpy(crypt_data, §ion_le, min((size_t)crypt_len, sizeof(section_le))); 2676 2677 sg_init_one(&sg, crypt_data, crypt_len); 2678 skcipher_request_set_crypt(req, &sg, &sg, crypt_len, iv); 2679 comp.comp = COMPLETION_INITIALIZER_ONSTACK(comp.comp); 2680 comp.in_flight = (atomic_t)ATOMIC_INIT(1); 2681 if (do_crypt(true, req, &comp)) 2682 wait_for_completion(&comp.comp); 2683 2684 r = dm_integrity_failed(ic); 2685 if (r) { 2686 *error = "Unable to generate iv"; 2687 goto bad; 2688 } 2689 2690 section_req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL); 2691 if (!section_req) { 2692 *error = "Unable to allocate crypt request"; 2693 r = -ENOMEM; 2694 goto bad; 2695 } 2696 section_req->iv = kmalloc(ivsize * 2, GFP_KERNEL); 2697 if (!section_req->iv) { 2698 skcipher_request_free(section_req); 2699 *error = "Unable to allocate iv"; 2700 r = -ENOMEM; 2701 goto bad; 2702 } 2703 memcpy(section_req->iv + ivsize, crypt_data, ivsize); 2704 section_req->cryptlen = (size_t)ic->journal_section_sectors << SECTOR_SHIFT; 2705 ic->sk_requests[i] = section_req; 2706 DEBUG_bytes(crypt_data, ivsize, "iv(%u)", i); 2707 } 2708 } 2709 } 2710 2711 for (i = 0; i < N_COMMIT_IDS; i++) { 2712 unsigned j; 2713 retest_commit_id: 2714 for (j = 0; j < i; j++) { 2715 if (ic->commit_ids[j] == ic->commit_ids[i]) { 2716 ic->commit_ids[i] = cpu_to_le64(le64_to_cpu(ic->commit_ids[i]) + 1); 2717 goto retest_commit_id; 2718 } 2719 } 2720 DEBUG_print("commit id %u: %016llx\n", i, ic->commit_ids[i]); 2721 } 2722 2723 journal_tree_size = (__u64)ic->journal_entries * sizeof(struct journal_node); 2724 if (journal_tree_size > ULONG_MAX) { 2725 *error = "Journal doesn't fit into memory"; 2726 r = -ENOMEM; 2727 goto bad; 2728 } 2729 ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL); 2730 if (!ic->journal_tree) { 2731 *error = "Could not allocate memory for journal tree"; 2732 r = -ENOMEM; 2733 } 2734 bad: 2735 kfree(crypt_data); 2736 return r; 2737 } 2738 2739 /* 2740 * Construct a integrity mapping 2741 * 2742 * Arguments: 2743 * device 2744 * offset from the start of the device 2745 * tag size 2746 * D - direct writes, J - journal writes, R - recovery mode 2747 * number of optional arguments 2748 * optional arguments: 2749 * journal_sectors 2750 * interleave_sectors 2751 * buffer_sectors 2752 * journal_watermark 2753 * commit_time 2754 * internal_hash 2755 * journal_crypt 2756 * journal_mac 2757 * block_size 2758 */ 2759 static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) 2760 { 2761 struct dm_integrity_c *ic; 2762 char dummy; 2763 int r; 2764 unsigned extra_args; 2765 struct dm_arg_set as; 2766 static struct dm_arg _args[] = { 2767 {0, 9, "Invalid number of feature args"}, 2768 }; 2769 unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec; 2770 bool should_write_sb; 2771 __u64 threshold; 2772 unsigned long long start; 2773 2774 #define DIRECT_ARGUMENTS 4 2775 2776 if (argc <= DIRECT_ARGUMENTS) { 2777 ti->error = "Invalid argument count"; 2778 return -EINVAL; 2779 } 2780 2781 ic = kzalloc(sizeof(struct dm_integrity_c), GFP_KERNEL); 2782 if (!ic) { 2783 ti->error = "Cannot allocate integrity context"; 2784 return -ENOMEM; 2785 } 2786 ti->private = ic; 2787 ti->per_io_data_size = sizeof(struct dm_integrity_io); 2788 2789 ic->in_progress = RB_ROOT; 2790 init_waitqueue_head(&ic->endio_wait); 2791 bio_list_init(&ic->flush_bio_list); 2792 init_waitqueue_head(&ic->copy_to_journal_wait); 2793 init_completion(&ic->crypto_backoff); 2794 2795 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev); 2796 if (r) { 2797 ti->error = "Device lookup failed"; 2798 goto bad; 2799 } 2800 2801 if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) { 2802 ti->error = "Invalid starting offset"; 2803 r = -EINVAL; 2804 goto bad; 2805 } 2806 ic->start = start; 2807 2808 if (strcmp(argv[2], "-")) { 2809 if (sscanf(argv[2], "%u%c", &ic->tag_size, &dummy) != 1 || !ic->tag_size) { 2810 ti->error = "Invalid tag size"; 2811 r = -EINVAL; 2812 goto bad; 2813 } 2814 } 2815 2816 if (!strcmp(argv[3], "J") || !strcmp(argv[3], "D") || !strcmp(argv[3], "R")) 2817 ic->mode = argv[3][0]; 2818 else { 2819 ti->error = "Invalid mode (expecting J, D, R)"; 2820 r = -EINVAL; 2821 goto bad; 2822 } 2823 2824 ic->device_sectors = i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT; 2825 journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS, 2826 ic->device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR); 2827 interleave_sectors = DEFAULT_INTERLEAVE_SECTORS; 2828 buffer_sectors = DEFAULT_BUFFER_SECTORS; 2829 journal_watermark = DEFAULT_JOURNAL_WATERMARK; 2830 sync_msec = DEFAULT_SYNC_MSEC; 2831 ic->sectors_per_block = 1; 2832 2833 as.argc = argc - DIRECT_ARGUMENTS; 2834 as.argv = argv + DIRECT_ARGUMENTS; 2835 r = dm_read_arg_group(_args, &as, &extra_args, &ti->error); 2836 if (r) 2837 goto bad; 2838 2839 while (extra_args--) { 2840 const char *opt_string; 2841 unsigned val; 2842 opt_string = dm_shift_arg(&as); 2843 if (!opt_string) { 2844 r = -EINVAL; 2845 ti->error = "Not enough feature arguments"; 2846 goto bad; 2847 } 2848 if (sscanf(opt_string, "journal_sectors:%u%c", &val, &dummy) == 1) 2849 journal_sectors = val; 2850 else if (sscanf(opt_string, "interleave_sectors:%u%c", &val, &dummy) == 1) 2851 interleave_sectors = val; 2852 else if (sscanf(opt_string, "buffer_sectors:%u%c", &val, &dummy) == 1) 2853 buffer_sectors = val; 2854 else if (sscanf(opt_string, "journal_watermark:%u%c", &val, &dummy) == 1 && val <= 100) 2855 journal_watermark = val; 2856 else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1) 2857 sync_msec = val; 2858 else if (sscanf(opt_string, "block_size:%u%c", &val, &dummy) == 1) { 2859 if (val < 1 << SECTOR_SHIFT || 2860 val > MAX_SECTORS_PER_BLOCK << SECTOR_SHIFT || 2861 (val & (val -1))) { 2862 r = -EINVAL; 2863 ti->error = "Invalid block_size argument"; 2864 goto bad; 2865 } 2866 ic->sectors_per_block = val >> SECTOR_SHIFT; 2867 } else if (!memcmp(opt_string, "internal_hash:", strlen("internal_hash:"))) { 2868 r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error, 2869 "Invalid internal_hash argument"); 2870 if (r) 2871 goto bad; 2872 } else if (!memcmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) { 2873 r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error, 2874 "Invalid journal_crypt argument"); 2875 if (r) 2876 goto bad; 2877 } else if (!memcmp(opt_string, "journal_mac:", strlen("journal_mac:"))) { 2878 r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error, 2879 "Invalid journal_mac argument"); 2880 if (r) 2881 goto bad; 2882 } else { 2883 r = -EINVAL; 2884 ti->error = "Invalid argument"; 2885 goto bad; 2886 } 2887 } 2888 2889 r = get_mac(&ic->internal_hash, &ic->internal_hash_alg, &ti->error, 2890 "Invalid internal hash", "Error setting internal hash key"); 2891 if (r) 2892 goto bad; 2893 2894 r = get_mac(&ic->journal_mac, &ic->journal_mac_alg, &ti->error, 2895 "Invalid journal mac", "Error setting journal mac key"); 2896 if (r) 2897 goto bad; 2898 2899 if (!ic->tag_size) { 2900 if (!ic->internal_hash) { 2901 ti->error = "Unknown tag size"; 2902 r = -EINVAL; 2903 goto bad; 2904 } 2905 ic->tag_size = crypto_shash_digestsize(ic->internal_hash); 2906 } 2907 if (ic->tag_size > MAX_TAG_SIZE) { 2908 ti->error = "Too big tag size"; 2909 r = -EINVAL; 2910 goto bad; 2911 } 2912 if (!(ic->tag_size & (ic->tag_size - 1))) 2913 ic->log2_tag_size = __ffs(ic->tag_size); 2914 else 2915 ic->log2_tag_size = -1; 2916 2917 ic->autocommit_jiffies = msecs_to_jiffies(sync_msec); 2918 ic->autocommit_msec = sync_msec; 2919 setup_timer(&ic->autocommit_timer, autocommit_fn, (unsigned long)ic); 2920 2921 ic->io = dm_io_client_create(); 2922 if (IS_ERR(ic->io)) { 2923 r = PTR_ERR(ic->io); 2924 ic->io = NULL; 2925 ti->error = "Cannot allocate dm io"; 2926 goto bad; 2927 } 2928 2929 ic->journal_io_mempool = mempool_create_slab_pool(JOURNAL_IO_MEMPOOL, journal_io_cache); 2930 if (!ic->journal_io_mempool) { 2931 r = -ENOMEM; 2932 ti->error = "Cannot allocate mempool"; 2933 goto bad; 2934 } 2935 2936 ic->metadata_wq = alloc_workqueue("dm-integrity-metadata", 2937 WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE); 2938 if (!ic->metadata_wq) { 2939 ti->error = "Cannot allocate workqueue"; 2940 r = -ENOMEM; 2941 goto bad; 2942 } 2943 2944 /* 2945 * If this workqueue were percpu, it would cause bio reordering 2946 * and reduced performance. 2947 */ 2948 ic->wait_wq = alloc_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM | WQ_UNBOUND, 1); 2949 if (!ic->wait_wq) { 2950 ti->error = "Cannot allocate workqueue"; 2951 r = -ENOMEM; 2952 goto bad; 2953 } 2954 2955 ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1); 2956 if (!ic->commit_wq) { 2957 ti->error = "Cannot allocate workqueue"; 2958 r = -ENOMEM; 2959 goto bad; 2960 } 2961 INIT_WORK(&ic->commit_work, integrity_commit); 2962 2963 if (ic->mode == 'J') { 2964 ic->writer_wq = alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM, 1); 2965 if (!ic->writer_wq) { 2966 ti->error = "Cannot allocate workqueue"; 2967 r = -ENOMEM; 2968 goto bad; 2969 } 2970 INIT_WORK(&ic->writer_work, integrity_writer); 2971 } 2972 2973 ic->sb = alloc_pages_exact(SB_SECTORS << SECTOR_SHIFT, GFP_KERNEL); 2974 if (!ic->sb) { 2975 r = -ENOMEM; 2976 ti->error = "Cannot allocate superblock area"; 2977 goto bad; 2978 } 2979 2980 r = sync_rw_sb(ic, REQ_OP_READ, 0); 2981 if (r) { 2982 ti->error = "Error reading superblock"; 2983 goto bad; 2984 } 2985 should_write_sb = false; 2986 if (memcmp(ic->sb->magic, SB_MAGIC, 8)) { 2987 if (ic->mode != 'R') { 2988 if (memchr_inv(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT)) { 2989 r = -EINVAL; 2990 ti->error = "The device is not initialized"; 2991 goto bad; 2992 } 2993 } 2994 2995 r = initialize_superblock(ic, journal_sectors, interleave_sectors); 2996 if (r) { 2997 ti->error = "Could not initialize superblock"; 2998 goto bad; 2999 } 3000 if (ic->mode != 'R') 3001 should_write_sb = true; 3002 } 3003 3004 if (ic->sb->version != SB_VERSION) { 3005 r = -EINVAL; 3006 ti->error = "Unknown version"; 3007 goto bad; 3008 } 3009 if (le16_to_cpu(ic->sb->integrity_tag_size) != ic->tag_size) { 3010 r = -EINVAL; 3011 ti->error = "Tag size doesn't match the information in superblock"; 3012 goto bad; 3013 } 3014 if (ic->sb->log2_sectors_per_block != __ffs(ic->sectors_per_block)) { 3015 r = -EINVAL; 3016 ti->error = "Block size doesn't match the information in superblock"; 3017 goto bad; 3018 } 3019 /* make sure that ti->max_io_len doesn't overflow */ 3020 if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS || 3021 ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) { 3022 r = -EINVAL; 3023 ti->error = "Invalid interleave_sectors in the superblock"; 3024 goto bad; 3025 } 3026 ic->provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors); 3027 if (ic->provided_data_sectors != le64_to_cpu(ic->sb->provided_data_sectors)) { 3028 /* test for overflow */ 3029 r = -EINVAL; 3030 ti->error = "The superblock has 64-bit device size, but the kernel was compiled with 32-bit sectors"; 3031 goto bad; 3032 } 3033 if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) != !!ic->journal_mac_alg.alg_string) { 3034 r = -EINVAL; 3035 ti->error = "Journal mac mismatch"; 3036 goto bad; 3037 } 3038 r = calculate_device_limits(ic); 3039 if (r) { 3040 ti->error = "The device is too small"; 3041 goto bad; 3042 } 3043 3044 if (!buffer_sectors) 3045 buffer_sectors = 1; 3046 ic->log2_buffer_sectors = min3((int)__fls(buffer_sectors), (int)__ffs(ic->metadata_run), 31 - SECTOR_SHIFT); 3047 3048 threshold = (__u64)ic->journal_entries * (100 - journal_watermark); 3049 threshold += 50; 3050 do_div(threshold, 100); 3051 ic->free_sectors_threshold = threshold; 3052 3053 DEBUG_print("initialized:\n"); 3054 DEBUG_print(" integrity_tag_size %u\n", le16_to_cpu(ic->sb->integrity_tag_size)); 3055 DEBUG_print(" journal_entry_size %u\n", ic->journal_entry_size); 3056 DEBUG_print(" journal_entries_per_sector %u\n", ic->journal_entries_per_sector); 3057 DEBUG_print(" journal_section_entries %u\n", ic->journal_section_entries); 3058 DEBUG_print(" journal_section_sectors %u\n", ic->journal_section_sectors); 3059 DEBUG_print(" journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections)); 3060 DEBUG_print(" journal_entries %u\n", ic->journal_entries); 3061 DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors); 3062 DEBUG_print(" device_sectors 0x%llx\n", (unsigned long long)ic->device_sectors); 3063 DEBUG_print(" initial_sectors 0x%x\n", ic->initial_sectors); 3064 DEBUG_print(" metadata_run 0x%x\n", ic->metadata_run); 3065 DEBUG_print(" log2_metadata_run %d\n", ic->log2_metadata_run); 3066 DEBUG_print(" provided_data_sectors 0x%llx (%llu)\n", (unsigned long long)ic->provided_data_sectors, 3067 (unsigned long long)ic->provided_data_sectors); 3068 DEBUG_print(" log2_buffer_sectors %u\n", ic->log2_buffer_sectors); 3069 3070 ic->bufio = dm_bufio_client_create(ic->dev->bdev, 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 3071 1, 0, NULL, NULL); 3072 if (IS_ERR(ic->bufio)) { 3073 r = PTR_ERR(ic->bufio); 3074 ti->error = "Cannot initialize dm-bufio"; 3075 ic->bufio = NULL; 3076 goto bad; 3077 } 3078 dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors); 3079 3080 if (ic->mode != 'R') { 3081 r = create_journal(ic, &ti->error); 3082 if (r) 3083 goto bad; 3084 } 3085 3086 if (should_write_sb) { 3087 int r; 3088 3089 init_journal(ic, 0, ic->journal_sections, 0); 3090 r = dm_integrity_failed(ic); 3091 if (unlikely(r)) { 3092 ti->error = "Error initializing journal"; 3093 goto bad; 3094 } 3095 r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA); 3096 if (r) { 3097 ti->error = "Error initializing superblock"; 3098 goto bad; 3099 } 3100 ic->just_formatted = true; 3101 } 3102 3103 r = dm_set_target_max_io_len(ti, 1U << ic->sb->log2_interleave_sectors); 3104 if (r) 3105 goto bad; 3106 3107 if (!ic->internal_hash) 3108 dm_integrity_set(ti, ic); 3109 3110 ti->num_flush_bios = 1; 3111 ti->flush_supported = true; 3112 3113 return 0; 3114 bad: 3115 dm_integrity_dtr(ti); 3116 return r; 3117 } 3118 3119 static void dm_integrity_dtr(struct dm_target *ti) 3120 { 3121 struct dm_integrity_c *ic = ti->private; 3122 3123 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress)); 3124 3125 if (ic->metadata_wq) 3126 destroy_workqueue(ic->metadata_wq); 3127 if (ic->wait_wq) 3128 destroy_workqueue(ic->wait_wq); 3129 if (ic->commit_wq) 3130 destroy_workqueue(ic->commit_wq); 3131 if (ic->writer_wq) 3132 destroy_workqueue(ic->writer_wq); 3133 if (ic->bufio) 3134 dm_bufio_client_destroy(ic->bufio); 3135 mempool_destroy(ic->journal_io_mempool); 3136 if (ic->io) 3137 dm_io_client_destroy(ic->io); 3138 if (ic->dev) 3139 dm_put_device(ti, ic->dev); 3140 dm_integrity_free_page_list(ic, ic->journal); 3141 dm_integrity_free_page_list(ic, ic->journal_io); 3142 dm_integrity_free_page_list(ic, ic->journal_xor); 3143 if (ic->journal_scatterlist) 3144 dm_integrity_free_journal_scatterlist(ic, ic->journal_scatterlist); 3145 if (ic->journal_io_scatterlist) 3146 dm_integrity_free_journal_scatterlist(ic, ic->journal_io_scatterlist); 3147 if (ic->sk_requests) { 3148 unsigned i; 3149 3150 for (i = 0; i < ic->journal_sections; i++) { 3151 struct skcipher_request *req = ic->sk_requests[i]; 3152 if (req) { 3153 kzfree(req->iv); 3154 skcipher_request_free(req); 3155 } 3156 } 3157 kvfree(ic->sk_requests); 3158 } 3159 kvfree(ic->journal_tree); 3160 if (ic->sb) 3161 free_pages_exact(ic->sb, SB_SECTORS << SECTOR_SHIFT); 3162 3163 if (ic->internal_hash) 3164 crypto_free_shash(ic->internal_hash); 3165 free_alg(&ic->internal_hash_alg); 3166 3167 if (ic->journal_crypt) 3168 crypto_free_skcipher(ic->journal_crypt); 3169 free_alg(&ic->journal_crypt_alg); 3170 3171 if (ic->journal_mac) 3172 crypto_free_shash(ic->journal_mac); 3173 free_alg(&ic->journal_mac_alg); 3174 3175 kfree(ic); 3176 } 3177 3178 static struct target_type integrity_target = { 3179 .name = "integrity", 3180 .version = {1, 0, 0}, 3181 .module = THIS_MODULE, 3182 .features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY, 3183 .ctr = dm_integrity_ctr, 3184 .dtr = dm_integrity_dtr, 3185 .map = dm_integrity_map, 3186 .postsuspend = dm_integrity_postsuspend, 3187 .resume = dm_integrity_resume, 3188 .status = dm_integrity_status, 3189 .iterate_devices = dm_integrity_iterate_devices, 3190 .io_hints = dm_integrity_io_hints, 3191 }; 3192 3193 int __init dm_integrity_init(void) 3194 { 3195 int r; 3196 3197 journal_io_cache = kmem_cache_create("integrity_journal_io", 3198 sizeof(struct journal_io), 0, 0, NULL); 3199 if (!journal_io_cache) { 3200 DMERR("can't allocate journal io cache"); 3201 return -ENOMEM; 3202 } 3203 3204 r = dm_register_target(&integrity_target); 3205 3206 if (r < 0) 3207 DMERR("register failed %d", r); 3208 3209 return r; 3210 } 3211 3212 void dm_integrity_exit(void) 3213 { 3214 dm_unregister_target(&integrity_target); 3215 kmem_cache_destroy(journal_io_cache); 3216 } 3217 3218 module_init(dm_integrity_init); 3219 module_exit(dm_integrity_exit); 3220 3221 MODULE_AUTHOR("Milan Broz"); 3222 MODULE_AUTHOR("Mikulas Patocka"); 3223 MODULE_DESCRIPTION(DM_NAME " target for integrity tags extension"); 3224 MODULE_LICENSE("GPL"); 3225