1 /* 2 * Copyright (C) 2016-2017 Red Hat, Inc. All rights reserved. 3 * Copyright (C) 2016-2017 Milan Broz 4 * Copyright (C) 2016-2017 Mikulas Patocka 5 * 6 * This file is released under the GPL. 7 */ 8 9 #include <linux/module.h> 10 #include <linux/device-mapper.h> 11 #include <linux/dm-io.h> 12 #include <linux/vmalloc.h> 13 #include <linux/sort.h> 14 #include <linux/rbtree.h> 15 #include <linux/delay.h> 16 #include <linux/random.h> 17 #include <crypto/hash.h> 18 #include <crypto/skcipher.h> 19 #include <linux/async_tx.h> 20 #include "dm-bufio.h" 21 22 #define DM_MSG_PREFIX "integrity" 23 24 #define DEFAULT_INTERLEAVE_SECTORS 32768 25 #define DEFAULT_JOURNAL_SIZE_FACTOR 7 26 #define DEFAULT_BUFFER_SECTORS 128 27 #define DEFAULT_JOURNAL_WATERMARK 50 28 #define DEFAULT_SYNC_MSEC 10000 29 #define DEFAULT_MAX_JOURNAL_SECTORS 131072 30 #define MIN_LOG2_INTERLEAVE_SECTORS 3 31 #define MAX_LOG2_INTERLEAVE_SECTORS 31 32 #define METADATA_WORKQUEUE_MAX_ACTIVE 16 33 34 /* 35 * Warning - DEBUG_PRINT prints security-sensitive data to the log, 36 * so it should not be enabled in the official kernel 37 */ 38 //#define DEBUG_PRINT 39 //#define INTERNAL_VERIFY 40 41 /* 42 * On disk structures 43 */ 44 45 #define SB_MAGIC "integrt" 46 #define SB_VERSION 1 47 #define SB_SECTORS 8 48 #define MAX_SECTORS_PER_BLOCK 8 49 50 struct superblock { 51 __u8 magic[8]; 52 __u8 version; 53 __u8 log2_interleave_sectors; 54 __u16 integrity_tag_size; 55 __u32 journal_sections; 56 __u64 provided_data_sectors; /* userspace uses this value */ 57 __u32 flags; 58 __u8 log2_sectors_per_block; 59 }; 60 61 #define SB_FLAG_HAVE_JOURNAL_MAC 0x1 62 63 #define JOURNAL_ENTRY_ROUNDUP 8 64 65 typedef __u64 commit_id_t; 66 #define JOURNAL_MAC_PER_SECTOR 8 67 68 struct journal_entry { 69 union { 70 struct { 71 __u32 sector_lo; 72 __u32 sector_hi; 73 } s; 74 __u64 sector; 75 } u; 76 commit_id_t last_bytes[0]; 77 /* __u8 tag[0]; */ 78 }; 79 80 #define journal_entry_tag(ic, je) ((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block]) 81 82 #if BITS_PER_LONG == 64 83 #define journal_entry_set_sector(je, x) do { smp_wmb(); ACCESS_ONCE((je)->u.sector) = cpu_to_le64(x); } while (0) 84 #define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector) 85 #elif defined(CONFIG_LBDAF) 86 #define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); ACCESS_ONCE((je)->u.s.sector_hi) = cpu_to_le32((x) >> 32); } while (0) 87 #define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector) 88 #else 89 #define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); ACCESS_ONCE((je)->u.s.sector_hi) = cpu_to_le32(0); } while (0) 90 #define journal_entry_get_sector(je) le32_to_cpu((je)->u.s.sector_lo) 91 #endif 92 #define journal_entry_is_unused(je) ((je)->u.s.sector_hi == cpu_to_le32(-1)) 93 #define journal_entry_set_unused(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-1)); } while (0) 94 #define journal_entry_is_inprogress(je) ((je)->u.s.sector_hi == cpu_to_le32(-2)) 95 #define journal_entry_set_inprogress(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-2)); } while (0) 96 97 #define JOURNAL_BLOCK_SECTORS 8 98 #define JOURNAL_SECTOR_DATA ((1 << SECTOR_SHIFT) - sizeof(commit_id_t)) 99 #define JOURNAL_MAC_SIZE (JOURNAL_MAC_PER_SECTOR * JOURNAL_BLOCK_SECTORS) 100 101 struct journal_sector { 102 __u8 entries[JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR]; 103 __u8 mac[JOURNAL_MAC_PER_SECTOR]; 104 commit_id_t commit_id; 105 }; 106 107 #define MAX_TAG_SIZE (JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, last_bytes[MAX_SECTORS_PER_BLOCK])) 108 109 #define METADATA_PADDING_SECTORS 8 110 111 #define N_COMMIT_IDS 4 112 113 static unsigned char prev_commit_seq(unsigned char seq) 114 { 115 return (seq + N_COMMIT_IDS - 1) % N_COMMIT_IDS; 116 } 117 118 static unsigned char next_commit_seq(unsigned char seq) 119 { 120 return (seq + 1) % N_COMMIT_IDS; 121 } 122 123 /* 124 * In-memory structures 125 */ 126 127 struct journal_node { 128 struct rb_node node; 129 sector_t sector; 130 }; 131 132 struct alg_spec { 133 char *alg_string; 134 char *key_string; 135 __u8 *key; 136 unsigned key_size; 137 }; 138 139 struct dm_integrity_c { 140 struct dm_dev *dev; 141 unsigned tag_size; 142 __s8 log2_tag_size; 143 sector_t start; 144 mempool_t *journal_io_mempool; 145 struct dm_io_client *io; 146 struct dm_bufio_client *bufio; 147 struct workqueue_struct *metadata_wq; 148 struct superblock *sb; 149 unsigned journal_pages; 150 struct page_list *journal; 151 struct page_list *journal_io; 152 struct page_list *journal_xor; 153 154 struct crypto_skcipher *journal_crypt; 155 struct scatterlist **journal_scatterlist; 156 struct scatterlist **journal_io_scatterlist; 157 struct skcipher_request **sk_requests; 158 159 struct crypto_shash *journal_mac; 160 161 struct journal_node *journal_tree; 162 struct rb_root journal_tree_root; 163 164 sector_t provided_data_sectors; 165 166 unsigned short journal_entry_size; 167 unsigned char journal_entries_per_sector; 168 unsigned char journal_section_entries; 169 unsigned short journal_section_sectors; 170 unsigned journal_sections; 171 unsigned journal_entries; 172 sector_t device_sectors; 173 unsigned initial_sectors; 174 unsigned metadata_run; 175 __s8 log2_metadata_run; 176 __u8 log2_buffer_sectors; 177 __u8 sectors_per_block; 178 179 unsigned char mode; 180 bool suspending; 181 182 int failed; 183 184 struct crypto_shash *internal_hash; 185 186 /* these variables are locked with endio_wait.lock */ 187 struct rb_root in_progress; 188 wait_queue_head_t endio_wait; 189 struct workqueue_struct *wait_wq; 190 191 unsigned char commit_seq; 192 commit_id_t commit_ids[N_COMMIT_IDS]; 193 194 unsigned committed_section; 195 unsigned n_committed_sections; 196 197 unsigned uncommitted_section; 198 unsigned n_uncommitted_sections; 199 200 unsigned free_section; 201 unsigned char free_section_entry; 202 unsigned free_sectors; 203 204 unsigned free_sectors_threshold; 205 206 struct workqueue_struct *commit_wq; 207 struct work_struct commit_work; 208 209 struct workqueue_struct *writer_wq; 210 struct work_struct writer_work; 211 212 struct bio_list flush_bio_list; 213 214 unsigned long autocommit_jiffies; 215 struct timer_list autocommit_timer; 216 unsigned autocommit_msec; 217 218 wait_queue_head_t copy_to_journal_wait; 219 220 struct completion crypto_backoff; 221 222 bool journal_uptodate; 223 bool just_formatted; 224 225 struct alg_spec internal_hash_alg; 226 struct alg_spec journal_crypt_alg; 227 struct alg_spec journal_mac_alg; 228 }; 229 230 struct dm_integrity_range { 231 sector_t logical_sector; 232 unsigned n_sectors; 233 struct rb_node node; 234 }; 235 236 struct dm_integrity_io { 237 struct work_struct work; 238 239 struct dm_integrity_c *ic; 240 bool write; 241 bool fua; 242 243 struct dm_integrity_range range; 244 245 sector_t metadata_block; 246 unsigned metadata_offset; 247 248 atomic_t in_flight; 249 int bi_error; 250 251 struct completion *completion; 252 253 struct block_device *orig_bi_bdev; 254 bio_end_io_t *orig_bi_end_io; 255 struct bio_integrity_payload *orig_bi_integrity; 256 struct bvec_iter orig_bi_iter; 257 }; 258 259 struct journal_completion { 260 struct dm_integrity_c *ic; 261 atomic_t in_flight; 262 struct completion comp; 263 }; 264 265 struct journal_io { 266 struct dm_integrity_range range; 267 struct journal_completion *comp; 268 }; 269 270 static struct kmem_cache *journal_io_cache; 271 272 #define JOURNAL_IO_MEMPOOL 32 273 274 #ifdef DEBUG_PRINT 275 #define DEBUG_print(x, ...) printk(KERN_DEBUG x, ##__VA_ARGS__) 276 static void __DEBUG_bytes(__u8 *bytes, size_t len, const char *msg, ...) 277 { 278 va_list args; 279 va_start(args, msg); 280 vprintk(msg, args); 281 va_end(args); 282 if (len) 283 pr_cont(":"); 284 while (len) { 285 pr_cont(" %02x", *bytes); 286 bytes++; 287 len--; 288 } 289 pr_cont("\n"); 290 } 291 #define DEBUG_bytes(bytes, len, msg, ...) __DEBUG_bytes(bytes, len, KERN_DEBUG msg, ##__VA_ARGS__) 292 #else 293 #define DEBUG_print(x, ...) do { } while (0) 294 #define DEBUG_bytes(bytes, len, msg, ...) do { } while (0) 295 #endif 296 297 /* 298 * DM Integrity profile, protection is performed layer above (dm-crypt) 299 */ 300 static struct blk_integrity_profile dm_integrity_profile = { 301 .name = "DM-DIF-EXT-TAG", 302 .generate_fn = NULL, 303 .verify_fn = NULL, 304 }; 305 306 static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map); 307 static void integrity_bio_wait(struct work_struct *w); 308 static void dm_integrity_dtr(struct dm_target *ti); 309 310 static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err) 311 { 312 if (!cmpxchg(&ic->failed, 0, err)) 313 DMERR("Error on %s: %d", msg, err); 314 } 315 316 static int dm_integrity_failed(struct dm_integrity_c *ic) 317 { 318 return ACCESS_ONCE(ic->failed); 319 } 320 321 static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i, 322 unsigned j, unsigned char seq) 323 { 324 /* 325 * Xor the number with section and sector, so that if a piece of 326 * journal is written at wrong place, it is detected. 327 */ 328 return ic->commit_ids[seq] ^ cpu_to_le64(((__u64)i << 32) ^ j); 329 } 330 331 static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector, 332 sector_t *area, sector_t *offset) 333 { 334 __u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors; 335 336 *area = data_sector >> log2_interleave_sectors; 337 *offset = (unsigned)data_sector & ((1U << log2_interleave_sectors) - 1); 338 } 339 340 #define sector_to_block(ic, n) \ 341 do { \ 342 BUG_ON((n) & (unsigned)((ic)->sectors_per_block - 1)); \ 343 (n) >>= (ic)->sb->log2_sectors_per_block; \ 344 } while (0) 345 346 static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area, 347 sector_t offset, unsigned *metadata_offset) 348 { 349 __u64 ms; 350 unsigned mo; 351 352 ms = area << ic->sb->log2_interleave_sectors; 353 if (likely(ic->log2_metadata_run >= 0)) 354 ms += area << ic->log2_metadata_run; 355 else 356 ms += area * ic->metadata_run; 357 ms >>= ic->log2_buffer_sectors; 358 359 sector_to_block(ic, offset); 360 361 if (likely(ic->log2_tag_size >= 0)) { 362 ms += offset >> (SECTOR_SHIFT + ic->log2_buffer_sectors - ic->log2_tag_size); 363 mo = (offset << ic->log2_tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1); 364 } else { 365 ms += (__u64)offset * ic->tag_size >> (SECTOR_SHIFT + ic->log2_buffer_sectors); 366 mo = (offset * ic->tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1); 367 } 368 *metadata_offset = mo; 369 return ms; 370 } 371 372 static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector_t offset) 373 { 374 sector_t result; 375 376 result = area << ic->sb->log2_interleave_sectors; 377 if (likely(ic->log2_metadata_run >= 0)) 378 result += (area + 1) << ic->log2_metadata_run; 379 else 380 result += (area + 1) * ic->metadata_run; 381 382 result += (sector_t)ic->initial_sectors + offset; 383 return result; 384 } 385 386 static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr) 387 { 388 if (unlikely(*sec_ptr >= ic->journal_sections)) 389 *sec_ptr -= ic->journal_sections; 390 } 391 392 static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags) 393 { 394 struct dm_io_request io_req; 395 struct dm_io_region io_loc; 396 397 io_req.bi_op = op; 398 io_req.bi_op_flags = op_flags; 399 io_req.mem.type = DM_IO_KMEM; 400 io_req.mem.ptr.addr = ic->sb; 401 io_req.notify.fn = NULL; 402 io_req.client = ic->io; 403 io_loc.bdev = ic->dev->bdev; 404 io_loc.sector = ic->start; 405 io_loc.count = SB_SECTORS; 406 407 return dm_io(&io_req, 1, &io_loc, NULL); 408 } 409 410 static void access_journal_check(struct dm_integrity_c *ic, unsigned section, unsigned offset, 411 bool e, const char *function) 412 { 413 #if defined(CONFIG_DM_DEBUG) || defined(INTERNAL_VERIFY) 414 unsigned limit = e ? ic->journal_section_entries : ic->journal_section_sectors; 415 416 if (unlikely(section >= ic->journal_sections) || 417 unlikely(offset >= limit)) { 418 printk(KERN_CRIT "%s: invalid access at (%u,%u), limit (%u,%u)\n", 419 function, section, offset, ic->journal_sections, limit); 420 BUG(); 421 } 422 #endif 423 } 424 425 static void page_list_location(struct dm_integrity_c *ic, unsigned section, unsigned offset, 426 unsigned *pl_index, unsigned *pl_offset) 427 { 428 unsigned sector; 429 430 access_journal_check(ic, section, offset, false, "page_list_location"); 431 432 sector = section * ic->journal_section_sectors + offset; 433 434 *pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT); 435 *pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1); 436 } 437 438 static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl, 439 unsigned section, unsigned offset, unsigned *n_sectors) 440 { 441 unsigned pl_index, pl_offset; 442 char *va; 443 444 page_list_location(ic, section, offset, &pl_index, &pl_offset); 445 446 if (n_sectors) 447 *n_sectors = (PAGE_SIZE - pl_offset) >> SECTOR_SHIFT; 448 449 va = lowmem_page_address(pl[pl_index].page); 450 451 return (struct journal_sector *)(va + pl_offset); 452 } 453 454 static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset) 455 { 456 return access_page_list(ic, ic->journal, section, offset, NULL); 457 } 458 459 static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned section, unsigned n) 460 { 461 unsigned rel_sector, offset; 462 struct journal_sector *js; 463 464 access_journal_check(ic, section, n, true, "access_journal_entry"); 465 466 rel_sector = n % JOURNAL_BLOCK_SECTORS; 467 offset = n / JOURNAL_BLOCK_SECTORS; 468 469 js = access_journal(ic, section, rel_sector); 470 return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size); 471 } 472 473 static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned section, unsigned n) 474 { 475 n <<= ic->sb->log2_sectors_per_block; 476 477 n += JOURNAL_BLOCK_SECTORS; 478 479 access_journal_check(ic, section, n, false, "access_journal_data"); 480 481 return access_journal(ic, section, n); 482 } 483 484 static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result[JOURNAL_MAC_SIZE]) 485 { 486 SHASH_DESC_ON_STACK(desc, ic->journal_mac); 487 int r; 488 unsigned j, size; 489 490 desc->tfm = ic->journal_mac; 491 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; 492 493 r = crypto_shash_init(desc); 494 if (unlikely(r)) { 495 dm_integrity_io_error(ic, "crypto_shash_init", r); 496 goto err; 497 } 498 499 for (j = 0; j < ic->journal_section_entries; j++) { 500 struct journal_entry *je = access_journal_entry(ic, section, j); 501 r = crypto_shash_update(desc, (__u8 *)&je->u.sector, sizeof je->u.sector); 502 if (unlikely(r)) { 503 dm_integrity_io_error(ic, "crypto_shash_update", r); 504 goto err; 505 } 506 } 507 508 size = crypto_shash_digestsize(ic->journal_mac); 509 510 if (likely(size <= JOURNAL_MAC_SIZE)) { 511 r = crypto_shash_final(desc, result); 512 if (unlikely(r)) { 513 dm_integrity_io_error(ic, "crypto_shash_final", r); 514 goto err; 515 } 516 memset(result + size, 0, JOURNAL_MAC_SIZE - size); 517 } else { 518 __u8 digest[size]; 519 r = crypto_shash_final(desc, digest); 520 if (unlikely(r)) { 521 dm_integrity_io_error(ic, "crypto_shash_final", r); 522 goto err; 523 } 524 memcpy(result, digest, JOURNAL_MAC_SIZE); 525 } 526 527 return; 528 err: 529 memset(result, 0, JOURNAL_MAC_SIZE); 530 } 531 532 static void rw_section_mac(struct dm_integrity_c *ic, unsigned section, bool wr) 533 { 534 __u8 result[JOURNAL_MAC_SIZE]; 535 unsigned j; 536 537 if (!ic->journal_mac) 538 return; 539 540 section_mac(ic, section, result); 541 542 for (j = 0; j < JOURNAL_BLOCK_SECTORS; j++) { 543 struct journal_sector *js = access_journal(ic, section, j); 544 545 if (likely(wr)) 546 memcpy(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR); 547 else { 548 if (memcmp(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR)) 549 dm_integrity_io_error(ic, "journal mac", -EILSEQ); 550 } 551 } 552 } 553 554 static void complete_journal_op(void *context) 555 { 556 struct journal_completion *comp = context; 557 BUG_ON(!atomic_read(&comp->in_flight)); 558 if (likely(atomic_dec_and_test(&comp->in_flight))) 559 complete(&comp->comp); 560 } 561 562 static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section, 563 unsigned n_sections, struct journal_completion *comp) 564 { 565 struct async_submit_ctl submit; 566 size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT; 567 unsigned pl_index, pl_offset, section_index; 568 struct page_list *source_pl, *target_pl; 569 570 if (likely(encrypt)) { 571 source_pl = ic->journal; 572 target_pl = ic->journal_io; 573 } else { 574 source_pl = ic->journal_io; 575 target_pl = ic->journal; 576 } 577 578 page_list_location(ic, section, 0, &pl_index, &pl_offset); 579 580 atomic_add(roundup(pl_offset + n_bytes, PAGE_SIZE) >> PAGE_SHIFT, &comp->in_flight); 581 582 init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, complete_journal_op, comp, NULL); 583 584 section_index = pl_index; 585 586 do { 587 size_t this_step; 588 struct page *src_pages[2]; 589 struct page *dst_page; 590 591 while (unlikely(pl_index == section_index)) { 592 unsigned dummy; 593 if (likely(encrypt)) 594 rw_section_mac(ic, section, true); 595 section++; 596 n_sections--; 597 if (!n_sections) 598 break; 599 page_list_location(ic, section, 0, §ion_index, &dummy); 600 } 601 602 this_step = min(n_bytes, (size_t)PAGE_SIZE - pl_offset); 603 dst_page = target_pl[pl_index].page; 604 src_pages[0] = source_pl[pl_index].page; 605 src_pages[1] = ic->journal_xor[pl_index].page; 606 607 async_xor(dst_page, src_pages, pl_offset, 2, this_step, &submit); 608 609 pl_index++; 610 pl_offset = 0; 611 n_bytes -= this_step; 612 } while (n_bytes); 613 614 BUG_ON(n_sections); 615 616 async_tx_issue_pending_all(); 617 } 618 619 static void complete_journal_encrypt(struct crypto_async_request *req, int err) 620 { 621 struct journal_completion *comp = req->data; 622 if (unlikely(err)) { 623 if (likely(err == -EINPROGRESS)) { 624 complete(&comp->ic->crypto_backoff); 625 return; 626 } 627 dm_integrity_io_error(comp->ic, "asynchronous encrypt", err); 628 } 629 complete_journal_op(comp); 630 } 631 632 static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp) 633 { 634 int r; 635 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 636 complete_journal_encrypt, comp); 637 if (likely(encrypt)) 638 r = crypto_skcipher_encrypt(req); 639 else 640 r = crypto_skcipher_decrypt(req); 641 if (likely(!r)) 642 return false; 643 if (likely(r == -EINPROGRESS)) 644 return true; 645 if (likely(r == -EBUSY)) { 646 wait_for_completion(&comp->ic->crypto_backoff); 647 reinit_completion(&comp->ic->crypto_backoff); 648 return true; 649 } 650 dm_integrity_io_error(comp->ic, "encrypt", r); 651 return false; 652 } 653 654 static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section, 655 unsigned n_sections, struct journal_completion *comp) 656 { 657 struct scatterlist **source_sg; 658 struct scatterlist **target_sg; 659 660 atomic_add(2, &comp->in_flight); 661 662 if (likely(encrypt)) { 663 source_sg = ic->journal_scatterlist; 664 target_sg = ic->journal_io_scatterlist; 665 } else { 666 source_sg = ic->journal_io_scatterlist; 667 target_sg = ic->journal_scatterlist; 668 } 669 670 do { 671 struct skcipher_request *req; 672 unsigned ivsize; 673 char *iv; 674 675 if (likely(encrypt)) 676 rw_section_mac(ic, section, true); 677 678 req = ic->sk_requests[section]; 679 ivsize = crypto_skcipher_ivsize(ic->journal_crypt); 680 iv = req->iv; 681 682 memcpy(iv, iv + ivsize, ivsize); 683 684 req->src = source_sg[section]; 685 req->dst = target_sg[section]; 686 687 if (unlikely(do_crypt(encrypt, req, comp))) 688 atomic_inc(&comp->in_flight); 689 690 section++; 691 n_sections--; 692 } while (n_sections); 693 694 atomic_dec(&comp->in_flight); 695 complete_journal_op(comp); 696 } 697 698 static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section, 699 unsigned n_sections, struct journal_completion *comp) 700 { 701 if (ic->journal_xor) 702 return xor_journal(ic, encrypt, section, n_sections, comp); 703 else 704 return crypt_journal(ic, encrypt, section, n_sections, comp); 705 } 706 707 static void complete_journal_io(unsigned long error, void *context) 708 { 709 struct journal_completion *comp = context; 710 if (unlikely(error != 0)) 711 dm_integrity_io_error(comp->ic, "writing journal", -EIO); 712 complete_journal_op(comp); 713 } 714 715 static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned section, 716 unsigned n_sections, struct journal_completion *comp) 717 { 718 struct dm_io_request io_req; 719 struct dm_io_region io_loc; 720 unsigned sector, n_sectors, pl_index, pl_offset; 721 int r; 722 723 if (unlikely(dm_integrity_failed(ic))) { 724 if (comp) 725 complete_journal_io(-1UL, comp); 726 return; 727 } 728 729 sector = section * ic->journal_section_sectors; 730 n_sectors = n_sections * ic->journal_section_sectors; 731 732 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT); 733 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1); 734 735 io_req.bi_op = op; 736 io_req.bi_op_flags = op_flags; 737 io_req.mem.type = DM_IO_PAGE_LIST; 738 if (ic->journal_io) 739 io_req.mem.ptr.pl = &ic->journal_io[pl_index]; 740 else 741 io_req.mem.ptr.pl = &ic->journal[pl_index]; 742 io_req.mem.offset = pl_offset; 743 if (likely(comp != NULL)) { 744 io_req.notify.fn = complete_journal_io; 745 io_req.notify.context = comp; 746 } else { 747 io_req.notify.fn = NULL; 748 } 749 io_req.client = ic->io; 750 io_loc.bdev = ic->dev->bdev; 751 io_loc.sector = ic->start + SB_SECTORS + sector; 752 io_loc.count = n_sectors; 753 754 r = dm_io(&io_req, 1, &io_loc, NULL); 755 if (unlikely(r)) { 756 dm_integrity_io_error(ic, op == REQ_OP_READ ? "reading journal" : "writing journal", r); 757 if (comp) { 758 WARN_ONCE(1, "asynchronous dm_io failed: %d", r); 759 complete_journal_io(-1UL, comp); 760 } 761 } 762 } 763 764 static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_sections) 765 { 766 struct journal_completion io_comp; 767 struct journal_completion crypt_comp_1; 768 struct journal_completion crypt_comp_2; 769 unsigned i; 770 771 io_comp.ic = ic; 772 io_comp.comp = COMPLETION_INITIALIZER_ONSTACK(io_comp.comp); 773 774 if (commit_start + commit_sections <= ic->journal_sections) { 775 io_comp.in_flight = (atomic_t)ATOMIC_INIT(1); 776 if (ic->journal_io) { 777 crypt_comp_1.ic = ic; 778 crypt_comp_1.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_1.comp); 779 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0); 780 encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1); 781 wait_for_completion_io(&crypt_comp_1.comp); 782 } else { 783 for (i = 0; i < commit_sections; i++) 784 rw_section_mac(ic, commit_start + i, true); 785 } 786 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, commit_sections, &io_comp); 787 } else { 788 unsigned to_end; 789 io_comp.in_flight = (atomic_t)ATOMIC_INIT(2); 790 to_end = ic->journal_sections - commit_start; 791 if (ic->journal_io) { 792 crypt_comp_1.ic = ic; 793 crypt_comp_1.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_1.comp); 794 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0); 795 encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1); 796 if (try_wait_for_completion(&crypt_comp_1.comp)) { 797 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp); 798 crypt_comp_1.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_1.comp); 799 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0); 800 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1); 801 wait_for_completion_io(&crypt_comp_1.comp); 802 } else { 803 crypt_comp_2.ic = ic; 804 crypt_comp_2.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_2.comp); 805 crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0); 806 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2); 807 wait_for_completion_io(&crypt_comp_1.comp); 808 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp); 809 wait_for_completion_io(&crypt_comp_2.comp); 810 } 811 } else { 812 for (i = 0; i < to_end; i++) 813 rw_section_mac(ic, commit_start + i, true); 814 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp); 815 for (i = 0; i < commit_sections - to_end; i++) 816 rw_section_mac(ic, i, true); 817 } 818 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, 0, commit_sections - to_end, &io_comp); 819 } 820 821 wait_for_completion_io(&io_comp.comp); 822 } 823 824 static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset, 825 unsigned n_sectors, sector_t target, io_notify_fn fn, void *data) 826 { 827 struct dm_io_request io_req; 828 struct dm_io_region io_loc; 829 int r; 830 unsigned sector, pl_index, pl_offset; 831 832 BUG_ON((target | n_sectors | offset) & (unsigned)(ic->sectors_per_block - 1)); 833 834 if (unlikely(dm_integrity_failed(ic))) { 835 fn(-1UL, data); 836 return; 837 } 838 839 sector = section * ic->journal_section_sectors + JOURNAL_BLOCK_SECTORS + offset; 840 841 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT); 842 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1); 843 844 io_req.bi_op = REQ_OP_WRITE; 845 io_req.bi_op_flags = 0; 846 io_req.mem.type = DM_IO_PAGE_LIST; 847 io_req.mem.ptr.pl = &ic->journal[pl_index]; 848 io_req.mem.offset = pl_offset; 849 io_req.notify.fn = fn; 850 io_req.notify.context = data; 851 io_req.client = ic->io; 852 io_loc.bdev = ic->dev->bdev; 853 io_loc.sector = ic->start + target; 854 io_loc.count = n_sectors; 855 856 r = dm_io(&io_req, 1, &io_loc, NULL); 857 if (unlikely(r)) { 858 WARN_ONCE(1, "asynchronous dm_io failed: %d", r); 859 fn(-1UL, data); 860 } 861 } 862 863 static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range) 864 { 865 struct rb_node **n = &ic->in_progress.rb_node; 866 struct rb_node *parent; 867 868 BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned)(ic->sectors_per_block - 1)); 869 870 parent = NULL; 871 872 while (*n) { 873 struct dm_integrity_range *range = container_of(*n, struct dm_integrity_range, node); 874 875 parent = *n; 876 if (new_range->logical_sector + new_range->n_sectors <= range->logical_sector) { 877 n = &range->node.rb_left; 878 } else if (new_range->logical_sector >= range->logical_sector + range->n_sectors) { 879 n = &range->node.rb_right; 880 } else { 881 return false; 882 } 883 } 884 885 rb_link_node(&new_range->node, parent, n); 886 rb_insert_color(&new_range->node, &ic->in_progress); 887 888 return true; 889 } 890 891 static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity_range *range) 892 { 893 rb_erase(&range->node, &ic->in_progress); 894 wake_up_locked(&ic->endio_wait); 895 } 896 897 static void remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *range) 898 { 899 unsigned long flags; 900 901 spin_lock_irqsave(&ic->endio_wait.lock, flags); 902 remove_range_unlocked(ic, range); 903 spin_unlock_irqrestore(&ic->endio_wait.lock, flags); 904 } 905 906 static void init_journal_node(struct journal_node *node) 907 { 908 RB_CLEAR_NODE(&node->node); 909 node->sector = (sector_t)-1; 910 } 911 912 static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector) 913 { 914 struct rb_node **link; 915 struct rb_node *parent; 916 917 node->sector = sector; 918 BUG_ON(!RB_EMPTY_NODE(&node->node)); 919 920 link = &ic->journal_tree_root.rb_node; 921 parent = NULL; 922 923 while (*link) { 924 struct journal_node *j; 925 parent = *link; 926 j = container_of(parent, struct journal_node, node); 927 if (sector < j->sector) 928 link = &j->node.rb_left; 929 else 930 link = &j->node.rb_right; 931 } 932 933 rb_link_node(&node->node, parent, link); 934 rb_insert_color(&node->node, &ic->journal_tree_root); 935 } 936 937 static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *node) 938 { 939 BUG_ON(RB_EMPTY_NODE(&node->node)); 940 rb_erase(&node->node, &ic->journal_tree_root); 941 init_journal_node(node); 942 } 943 944 #define NOT_FOUND (-1U) 945 946 static unsigned find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector) 947 { 948 struct rb_node *n = ic->journal_tree_root.rb_node; 949 unsigned found = NOT_FOUND; 950 *next_sector = (sector_t)-1; 951 while (n) { 952 struct journal_node *j = container_of(n, struct journal_node, node); 953 if (sector == j->sector) { 954 found = j - ic->journal_tree; 955 } 956 if (sector < j->sector) { 957 *next_sector = j->sector; 958 n = j->node.rb_left; 959 } else { 960 n = j->node.rb_right; 961 } 962 } 963 964 return found; 965 } 966 967 static bool test_journal_node(struct dm_integrity_c *ic, unsigned pos, sector_t sector) 968 { 969 struct journal_node *node, *next_node; 970 struct rb_node *next; 971 972 if (unlikely(pos >= ic->journal_entries)) 973 return false; 974 node = &ic->journal_tree[pos]; 975 if (unlikely(RB_EMPTY_NODE(&node->node))) 976 return false; 977 if (unlikely(node->sector != sector)) 978 return false; 979 980 next = rb_next(&node->node); 981 if (unlikely(!next)) 982 return true; 983 984 next_node = container_of(next, struct journal_node, node); 985 return next_node->sector != sector; 986 } 987 988 static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_node *node) 989 { 990 struct rb_node *next; 991 struct journal_node *next_node; 992 unsigned next_section; 993 994 BUG_ON(RB_EMPTY_NODE(&node->node)); 995 996 next = rb_next(&node->node); 997 if (unlikely(!next)) 998 return false; 999 1000 next_node = container_of(next, struct journal_node, node); 1001 1002 if (next_node->sector != node->sector) 1003 return false; 1004 1005 next_section = (unsigned)(next_node - ic->journal_tree) / ic->journal_section_entries; 1006 if (next_section >= ic->committed_section && 1007 next_section < ic->committed_section + ic->n_committed_sections) 1008 return true; 1009 if (next_section + ic->journal_sections < ic->committed_section + ic->n_committed_sections) 1010 return true; 1011 1012 return false; 1013 } 1014 1015 #define TAG_READ 0 1016 #define TAG_WRITE 1 1017 #define TAG_CMP 2 1018 1019 static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block, 1020 unsigned *metadata_offset, unsigned total_size, int op) 1021 { 1022 do { 1023 unsigned char *data, *dp; 1024 struct dm_buffer *b; 1025 unsigned to_copy; 1026 int r; 1027 1028 r = dm_integrity_failed(ic); 1029 if (unlikely(r)) 1030 return r; 1031 1032 data = dm_bufio_read(ic->bufio, *metadata_block, &b); 1033 if (unlikely(IS_ERR(data))) 1034 return PTR_ERR(data); 1035 1036 to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size); 1037 dp = data + *metadata_offset; 1038 if (op == TAG_READ) { 1039 memcpy(tag, dp, to_copy); 1040 } else if (op == TAG_WRITE) { 1041 memcpy(dp, tag, to_copy); 1042 dm_bufio_mark_buffer_dirty(b); 1043 } else { 1044 /* e.g.: op == TAG_CMP */ 1045 if (unlikely(memcmp(dp, tag, to_copy))) { 1046 unsigned i; 1047 1048 for (i = 0; i < to_copy; i++) { 1049 if (dp[i] != tag[i]) 1050 break; 1051 total_size--; 1052 } 1053 dm_bufio_release(b); 1054 return total_size; 1055 } 1056 } 1057 dm_bufio_release(b); 1058 1059 tag += to_copy; 1060 *metadata_offset += to_copy; 1061 if (unlikely(*metadata_offset == 1U << SECTOR_SHIFT << ic->log2_buffer_sectors)) { 1062 (*metadata_block)++; 1063 *metadata_offset = 0; 1064 } 1065 total_size -= to_copy; 1066 } while (unlikely(total_size)); 1067 1068 return 0; 1069 } 1070 1071 static void dm_integrity_flush_buffers(struct dm_integrity_c *ic) 1072 { 1073 int r; 1074 r = dm_bufio_write_dirty_buffers(ic->bufio); 1075 if (unlikely(r)) 1076 dm_integrity_io_error(ic, "writing tags", r); 1077 } 1078 1079 static void sleep_on_endio_wait(struct dm_integrity_c *ic) 1080 { 1081 DECLARE_WAITQUEUE(wait, current); 1082 __add_wait_queue(&ic->endio_wait, &wait); 1083 __set_current_state(TASK_UNINTERRUPTIBLE); 1084 spin_unlock_irq(&ic->endio_wait.lock); 1085 io_schedule(); 1086 spin_lock_irq(&ic->endio_wait.lock); 1087 __remove_wait_queue(&ic->endio_wait, &wait); 1088 } 1089 1090 static void autocommit_fn(unsigned long data) 1091 { 1092 struct dm_integrity_c *ic = (struct dm_integrity_c *)data; 1093 1094 if (likely(!dm_integrity_failed(ic))) 1095 queue_work(ic->commit_wq, &ic->commit_work); 1096 } 1097 1098 static void schedule_autocommit(struct dm_integrity_c *ic) 1099 { 1100 if (!timer_pending(&ic->autocommit_timer)) 1101 mod_timer(&ic->autocommit_timer, jiffies + ic->autocommit_jiffies); 1102 } 1103 1104 static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio) 1105 { 1106 struct bio *bio; 1107 spin_lock_irq(&ic->endio_wait.lock); 1108 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); 1109 bio_list_add(&ic->flush_bio_list, bio); 1110 spin_unlock_irq(&ic->endio_wait.lock); 1111 queue_work(ic->commit_wq, &ic->commit_work); 1112 } 1113 1114 static void do_endio(struct dm_integrity_c *ic, struct bio *bio) 1115 { 1116 int r = dm_integrity_failed(ic); 1117 if (unlikely(r) && !bio->bi_error) 1118 bio->bi_error = r; 1119 bio_endio(bio); 1120 } 1121 1122 static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio) 1123 { 1124 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); 1125 1126 if (unlikely(dio->fua) && likely(!bio->bi_error) && likely(!dm_integrity_failed(ic))) 1127 submit_flush_bio(ic, dio); 1128 else 1129 do_endio(ic, bio); 1130 } 1131 1132 static void dec_in_flight(struct dm_integrity_io *dio) 1133 { 1134 if (atomic_dec_and_test(&dio->in_flight)) { 1135 struct dm_integrity_c *ic = dio->ic; 1136 struct bio *bio; 1137 1138 remove_range(ic, &dio->range); 1139 1140 if (unlikely(dio->write)) 1141 schedule_autocommit(ic); 1142 1143 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); 1144 1145 if (unlikely(dio->bi_error) && !bio->bi_error) 1146 bio->bi_error = dio->bi_error; 1147 if (likely(!bio->bi_error) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) { 1148 dio->range.logical_sector += dio->range.n_sectors; 1149 bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT); 1150 INIT_WORK(&dio->work, integrity_bio_wait); 1151 queue_work(ic->wait_wq, &dio->work); 1152 return; 1153 } 1154 do_endio_flush(ic, dio); 1155 } 1156 } 1157 1158 static void integrity_end_io(struct bio *bio) 1159 { 1160 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io)); 1161 1162 bio->bi_iter = dio->orig_bi_iter; 1163 bio->bi_bdev = dio->orig_bi_bdev; 1164 if (dio->orig_bi_integrity) { 1165 bio->bi_integrity = dio->orig_bi_integrity; 1166 bio->bi_opf |= REQ_INTEGRITY; 1167 } 1168 bio->bi_end_io = dio->orig_bi_end_io; 1169 1170 if (dio->completion) 1171 complete(dio->completion); 1172 1173 dec_in_flight(dio); 1174 } 1175 1176 static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector, 1177 const char *data, char *result) 1178 { 1179 __u64 sector_le = cpu_to_le64(sector); 1180 SHASH_DESC_ON_STACK(req, ic->internal_hash); 1181 int r; 1182 unsigned digest_size; 1183 1184 req->tfm = ic->internal_hash; 1185 req->flags = 0; 1186 1187 r = crypto_shash_init(req); 1188 if (unlikely(r < 0)) { 1189 dm_integrity_io_error(ic, "crypto_shash_init", r); 1190 goto failed; 1191 } 1192 1193 r = crypto_shash_update(req, (const __u8 *)§or_le, sizeof sector_le); 1194 if (unlikely(r < 0)) { 1195 dm_integrity_io_error(ic, "crypto_shash_update", r); 1196 goto failed; 1197 } 1198 1199 r = crypto_shash_update(req, data, ic->sectors_per_block << SECTOR_SHIFT); 1200 if (unlikely(r < 0)) { 1201 dm_integrity_io_error(ic, "crypto_shash_update", r); 1202 goto failed; 1203 } 1204 1205 r = crypto_shash_final(req, result); 1206 if (unlikely(r < 0)) { 1207 dm_integrity_io_error(ic, "crypto_shash_final", r); 1208 goto failed; 1209 } 1210 1211 digest_size = crypto_shash_digestsize(ic->internal_hash); 1212 if (unlikely(digest_size < ic->tag_size)) 1213 memset(result + digest_size, 0, ic->tag_size - digest_size); 1214 1215 return; 1216 1217 failed: 1218 /* this shouldn't happen anyway, the hash functions have no reason to fail */ 1219 get_random_bytes(result, ic->tag_size); 1220 } 1221 1222 static void integrity_metadata(struct work_struct *w) 1223 { 1224 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work); 1225 struct dm_integrity_c *ic = dio->ic; 1226 1227 int r; 1228 1229 if (ic->internal_hash) { 1230 struct bvec_iter iter; 1231 struct bio_vec bv; 1232 unsigned digest_size = crypto_shash_digestsize(ic->internal_hash); 1233 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); 1234 char *checksums; 1235 unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0; 1236 char checksums_onstack[ic->tag_size + extra_space]; 1237 unsigned sectors_to_process = dio->range.n_sectors; 1238 sector_t sector = dio->range.logical_sector; 1239 1240 if (unlikely(ic->mode == 'R')) 1241 goto skip_io; 1242 1243 checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space, 1244 GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN); 1245 if (!checksums) 1246 checksums = checksums_onstack; 1247 1248 __bio_for_each_segment(bv, bio, iter, dio->orig_bi_iter) { 1249 unsigned pos; 1250 char *mem, *checksums_ptr; 1251 1252 again: 1253 mem = (char *)kmap_atomic(bv.bv_page) + bv.bv_offset; 1254 pos = 0; 1255 checksums_ptr = checksums; 1256 do { 1257 integrity_sector_checksum(ic, sector, mem + pos, checksums_ptr); 1258 checksums_ptr += ic->tag_size; 1259 sectors_to_process -= ic->sectors_per_block; 1260 pos += ic->sectors_per_block << SECTOR_SHIFT; 1261 sector += ic->sectors_per_block; 1262 } while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack); 1263 kunmap_atomic(mem); 1264 1265 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset, 1266 checksums_ptr - checksums, !dio->write ? TAG_CMP : TAG_WRITE); 1267 if (unlikely(r)) { 1268 if (r > 0) { 1269 DMERR("Checksum failed at sector 0x%llx", 1270 (unsigned long long)(sector - ((r + ic->tag_size - 1) / ic->tag_size))); 1271 r = -EILSEQ; 1272 } 1273 if (likely(checksums != checksums_onstack)) 1274 kfree(checksums); 1275 goto error; 1276 } 1277 1278 if (!sectors_to_process) 1279 break; 1280 1281 if (unlikely(pos < bv.bv_len)) { 1282 bv.bv_offset += pos; 1283 bv.bv_len -= pos; 1284 goto again; 1285 } 1286 } 1287 1288 if (likely(checksums != checksums_onstack)) 1289 kfree(checksums); 1290 } else { 1291 struct bio_integrity_payload *bip = dio->orig_bi_integrity; 1292 1293 if (bip) { 1294 struct bio_vec biv; 1295 struct bvec_iter iter; 1296 unsigned data_to_process = dio->range.n_sectors; 1297 sector_to_block(ic, data_to_process); 1298 data_to_process *= ic->tag_size; 1299 1300 bip_for_each_vec(biv, bip, iter) { 1301 unsigned char *tag; 1302 unsigned this_len; 1303 1304 BUG_ON(PageHighMem(biv.bv_page)); 1305 tag = lowmem_page_address(biv.bv_page) + biv.bv_offset; 1306 this_len = min(biv.bv_len, data_to_process); 1307 r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset, 1308 this_len, !dio->write ? TAG_READ : TAG_WRITE); 1309 if (unlikely(r)) 1310 goto error; 1311 data_to_process -= this_len; 1312 if (!data_to_process) 1313 break; 1314 } 1315 } 1316 } 1317 skip_io: 1318 dec_in_flight(dio); 1319 return; 1320 error: 1321 dio->bi_error = r; 1322 dec_in_flight(dio); 1323 } 1324 1325 static int dm_integrity_map(struct dm_target *ti, struct bio *bio) 1326 { 1327 struct dm_integrity_c *ic = ti->private; 1328 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io)); 1329 struct bio_integrity_payload *bip; 1330 1331 sector_t area, offset; 1332 1333 dio->ic = ic; 1334 dio->bi_error = 0; 1335 1336 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { 1337 submit_flush_bio(ic, dio); 1338 return DM_MAPIO_SUBMITTED; 1339 } 1340 1341 dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector); 1342 dio->write = bio_op(bio) == REQ_OP_WRITE; 1343 dio->fua = dio->write && bio->bi_opf & REQ_FUA; 1344 if (unlikely(dio->fua)) { 1345 /* 1346 * Don't pass down the FUA flag because we have to flush 1347 * disk cache anyway. 1348 */ 1349 bio->bi_opf &= ~REQ_FUA; 1350 } 1351 if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) { 1352 DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx", 1353 (unsigned long long)dio->range.logical_sector, bio_sectors(bio), 1354 (unsigned long long)ic->provided_data_sectors); 1355 return -EIO; 1356 } 1357 if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) { 1358 DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x", 1359 ic->sectors_per_block, 1360 (unsigned long long)dio->range.logical_sector, bio_sectors(bio)); 1361 return -EIO; 1362 } 1363 1364 if (ic->sectors_per_block > 1) { 1365 struct bvec_iter iter; 1366 struct bio_vec bv; 1367 bio_for_each_segment(bv, bio, iter) { 1368 if (unlikely((bv.bv_offset | bv.bv_len) & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) { 1369 DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary", 1370 bv.bv_offset, bv.bv_len, ic->sectors_per_block); 1371 return -EIO; 1372 } 1373 } 1374 } 1375 1376 bip = bio_integrity(bio); 1377 if (!ic->internal_hash) { 1378 if (bip) { 1379 unsigned wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block; 1380 if (ic->log2_tag_size >= 0) 1381 wanted_tag_size <<= ic->log2_tag_size; 1382 else 1383 wanted_tag_size *= ic->tag_size; 1384 if (unlikely(wanted_tag_size != bip->bip_iter.bi_size)) { 1385 DMERR("Invalid integrity data size %u, expected %u", bip->bip_iter.bi_size, wanted_tag_size); 1386 return -EIO; 1387 } 1388 } 1389 } else { 1390 if (unlikely(bip != NULL)) { 1391 DMERR("Unexpected integrity data when using internal hash"); 1392 return -EIO; 1393 } 1394 } 1395 1396 if (unlikely(ic->mode == 'R') && unlikely(dio->write)) 1397 return -EIO; 1398 1399 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset); 1400 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset); 1401 bio->bi_iter.bi_sector = get_data_sector(ic, area, offset); 1402 1403 dm_integrity_map_continue(dio, true); 1404 return DM_MAPIO_SUBMITTED; 1405 } 1406 1407 static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio, 1408 unsigned journal_section, unsigned journal_entry) 1409 { 1410 struct dm_integrity_c *ic = dio->ic; 1411 sector_t logical_sector; 1412 unsigned n_sectors; 1413 1414 logical_sector = dio->range.logical_sector; 1415 n_sectors = dio->range.n_sectors; 1416 do { 1417 struct bio_vec bv = bio_iovec(bio); 1418 char *mem; 1419 1420 if (unlikely(bv.bv_len >> SECTOR_SHIFT > n_sectors)) 1421 bv.bv_len = n_sectors << SECTOR_SHIFT; 1422 n_sectors -= bv.bv_len >> SECTOR_SHIFT; 1423 bio_advance_iter(bio, &bio->bi_iter, bv.bv_len); 1424 retry_kmap: 1425 mem = kmap_atomic(bv.bv_page); 1426 if (likely(dio->write)) 1427 flush_dcache_page(bv.bv_page); 1428 1429 do { 1430 struct journal_entry *je = access_journal_entry(ic, journal_section, journal_entry); 1431 1432 if (unlikely(!dio->write)) { 1433 struct journal_sector *js; 1434 char *mem_ptr; 1435 unsigned s; 1436 1437 if (unlikely(journal_entry_is_inprogress(je))) { 1438 flush_dcache_page(bv.bv_page); 1439 kunmap_atomic(mem); 1440 1441 __io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je)); 1442 goto retry_kmap; 1443 } 1444 smp_rmb(); 1445 BUG_ON(journal_entry_get_sector(je) != logical_sector); 1446 js = access_journal_data(ic, journal_section, journal_entry); 1447 mem_ptr = mem + bv.bv_offset; 1448 s = 0; 1449 do { 1450 memcpy(mem_ptr, js, JOURNAL_SECTOR_DATA); 1451 *(commit_id_t *)(mem_ptr + JOURNAL_SECTOR_DATA) = je->last_bytes[s]; 1452 js++; 1453 mem_ptr += 1 << SECTOR_SHIFT; 1454 } while (++s < ic->sectors_per_block); 1455 #ifdef INTERNAL_VERIFY 1456 if (ic->internal_hash) { 1457 char checksums_onstack[max(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)]; 1458 1459 integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack); 1460 if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) { 1461 DMERR("Checksum failed when reading from journal, at sector 0x%llx", 1462 (unsigned long long)logical_sector); 1463 } 1464 } 1465 #endif 1466 } 1467 1468 if (!ic->internal_hash) { 1469 struct bio_integrity_payload *bip = bio_integrity(bio); 1470 unsigned tag_todo = ic->tag_size; 1471 char *tag_ptr = journal_entry_tag(ic, je); 1472 1473 if (bip) do { 1474 struct bio_vec biv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter); 1475 unsigned tag_now = min(biv.bv_len, tag_todo); 1476 char *tag_addr; 1477 BUG_ON(PageHighMem(biv.bv_page)); 1478 tag_addr = lowmem_page_address(biv.bv_page) + biv.bv_offset; 1479 if (likely(dio->write)) 1480 memcpy(tag_ptr, tag_addr, tag_now); 1481 else 1482 memcpy(tag_addr, tag_ptr, tag_now); 1483 bvec_iter_advance(bip->bip_vec, &bip->bip_iter, tag_now); 1484 tag_ptr += tag_now; 1485 tag_todo -= tag_now; 1486 } while (unlikely(tag_todo)); else { 1487 if (likely(dio->write)) 1488 memset(tag_ptr, 0, tag_todo); 1489 } 1490 } 1491 1492 if (likely(dio->write)) { 1493 struct journal_sector *js; 1494 unsigned s; 1495 1496 js = access_journal_data(ic, journal_section, journal_entry); 1497 memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT); 1498 1499 s = 0; 1500 do { 1501 je->last_bytes[s] = js[s].commit_id; 1502 } while (++s < ic->sectors_per_block); 1503 1504 if (ic->internal_hash) { 1505 unsigned digest_size = crypto_shash_digestsize(ic->internal_hash); 1506 if (unlikely(digest_size > ic->tag_size)) { 1507 char checksums_onstack[digest_size]; 1508 integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack); 1509 memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size); 1510 } else 1511 integrity_sector_checksum(ic, logical_sector, (char *)js, journal_entry_tag(ic, je)); 1512 } 1513 1514 journal_entry_set_sector(je, logical_sector); 1515 } 1516 logical_sector += ic->sectors_per_block; 1517 1518 journal_entry++; 1519 if (unlikely(journal_entry == ic->journal_section_entries)) { 1520 journal_entry = 0; 1521 journal_section++; 1522 wraparound_section(ic, &journal_section); 1523 } 1524 1525 bv.bv_offset += ic->sectors_per_block << SECTOR_SHIFT; 1526 } while (bv.bv_len -= ic->sectors_per_block << SECTOR_SHIFT); 1527 1528 if (unlikely(!dio->write)) 1529 flush_dcache_page(bv.bv_page); 1530 kunmap_atomic(mem); 1531 } while (n_sectors); 1532 1533 if (likely(dio->write)) { 1534 smp_mb(); 1535 if (unlikely(waitqueue_active(&ic->copy_to_journal_wait))) 1536 wake_up(&ic->copy_to_journal_wait); 1537 if (ACCESS_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) { 1538 queue_work(ic->commit_wq, &ic->commit_work); 1539 } else { 1540 schedule_autocommit(ic); 1541 } 1542 } else { 1543 remove_range(ic, &dio->range); 1544 } 1545 1546 if (unlikely(bio->bi_iter.bi_size)) { 1547 sector_t area, offset; 1548 1549 dio->range.logical_sector = logical_sector; 1550 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset); 1551 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset); 1552 return true; 1553 } 1554 1555 return false; 1556 } 1557 1558 static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map) 1559 { 1560 struct dm_integrity_c *ic = dio->ic; 1561 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); 1562 unsigned journal_section, journal_entry; 1563 unsigned journal_read_pos; 1564 struct completion read_comp; 1565 bool need_sync_io = ic->internal_hash && !dio->write; 1566 1567 if (need_sync_io && from_map) { 1568 INIT_WORK(&dio->work, integrity_bio_wait); 1569 queue_work(ic->metadata_wq, &dio->work); 1570 return; 1571 } 1572 1573 lock_retry: 1574 spin_lock_irq(&ic->endio_wait.lock); 1575 retry: 1576 if (unlikely(dm_integrity_failed(ic))) { 1577 spin_unlock_irq(&ic->endio_wait.lock); 1578 do_endio(ic, bio); 1579 return; 1580 } 1581 dio->range.n_sectors = bio_sectors(bio); 1582 journal_read_pos = NOT_FOUND; 1583 if (likely(ic->mode == 'J')) { 1584 if (dio->write) { 1585 unsigned next_entry, i, pos; 1586 unsigned ws, we; 1587 1588 dio->range.n_sectors = min(dio->range.n_sectors, ic->free_sectors); 1589 if (unlikely(!dio->range.n_sectors)) 1590 goto sleep; 1591 ic->free_sectors -= dio->range.n_sectors; 1592 journal_section = ic->free_section; 1593 journal_entry = ic->free_section_entry; 1594 1595 next_entry = ic->free_section_entry + dio->range.n_sectors; 1596 ic->free_section_entry = next_entry % ic->journal_section_entries; 1597 ic->free_section += next_entry / ic->journal_section_entries; 1598 ic->n_uncommitted_sections += next_entry / ic->journal_section_entries; 1599 wraparound_section(ic, &ic->free_section); 1600 1601 pos = journal_section * ic->journal_section_entries + journal_entry; 1602 ws = journal_section; 1603 we = journal_entry; 1604 i = 0; 1605 do { 1606 struct journal_entry *je; 1607 1608 add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i); 1609 pos++; 1610 if (unlikely(pos >= ic->journal_entries)) 1611 pos = 0; 1612 1613 je = access_journal_entry(ic, ws, we); 1614 BUG_ON(!journal_entry_is_unused(je)); 1615 journal_entry_set_inprogress(je); 1616 we++; 1617 if (unlikely(we == ic->journal_section_entries)) { 1618 we = 0; 1619 ws++; 1620 wraparound_section(ic, &ws); 1621 } 1622 } while ((i += ic->sectors_per_block) < dio->range.n_sectors); 1623 1624 spin_unlock_irq(&ic->endio_wait.lock); 1625 goto journal_read_write; 1626 } else { 1627 sector_t next_sector; 1628 journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector); 1629 if (likely(journal_read_pos == NOT_FOUND)) { 1630 if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector)) 1631 dio->range.n_sectors = next_sector - dio->range.logical_sector; 1632 } else { 1633 unsigned i; 1634 unsigned jp = journal_read_pos + 1; 1635 for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) { 1636 if (!test_journal_node(ic, jp, dio->range.logical_sector + i)) 1637 break; 1638 } 1639 dio->range.n_sectors = i; 1640 } 1641 } 1642 } 1643 if (unlikely(!add_new_range(ic, &dio->range))) { 1644 /* 1645 * We must not sleep in the request routine because it could 1646 * stall bios on current->bio_list. 1647 * So, we offload the bio to a workqueue if we have to sleep. 1648 */ 1649 sleep: 1650 if (from_map) { 1651 spin_unlock_irq(&ic->endio_wait.lock); 1652 INIT_WORK(&dio->work, integrity_bio_wait); 1653 queue_work(ic->wait_wq, &dio->work); 1654 return; 1655 } else { 1656 sleep_on_endio_wait(ic); 1657 goto retry; 1658 } 1659 } 1660 spin_unlock_irq(&ic->endio_wait.lock); 1661 1662 if (unlikely(journal_read_pos != NOT_FOUND)) { 1663 journal_section = journal_read_pos / ic->journal_section_entries; 1664 journal_entry = journal_read_pos % ic->journal_section_entries; 1665 goto journal_read_write; 1666 } 1667 1668 dio->in_flight = (atomic_t)ATOMIC_INIT(2); 1669 1670 if (need_sync_io) { 1671 read_comp = COMPLETION_INITIALIZER_ONSTACK(read_comp); 1672 dio->completion = &read_comp; 1673 } else 1674 dio->completion = NULL; 1675 1676 dio->orig_bi_iter = bio->bi_iter; 1677 1678 dio->orig_bi_bdev = bio->bi_bdev; 1679 bio->bi_bdev = ic->dev->bdev; 1680 1681 dio->orig_bi_integrity = bio_integrity(bio); 1682 bio->bi_integrity = NULL; 1683 bio->bi_opf &= ~REQ_INTEGRITY; 1684 1685 dio->orig_bi_end_io = bio->bi_end_io; 1686 bio->bi_end_io = integrity_end_io; 1687 1688 bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT; 1689 bio->bi_iter.bi_sector += ic->start; 1690 generic_make_request(bio); 1691 1692 if (need_sync_io) { 1693 wait_for_completion_io(&read_comp); 1694 integrity_metadata(&dio->work); 1695 } else { 1696 INIT_WORK(&dio->work, integrity_metadata); 1697 queue_work(ic->metadata_wq, &dio->work); 1698 } 1699 1700 return; 1701 1702 journal_read_write: 1703 if (unlikely(__journal_read_write(dio, bio, journal_section, journal_entry))) 1704 goto lock_retry; 1705 1706 do_endio_flush(ic, dio); 1707 } 1708 1709 1710 static void integrity_bio_wait(struct work_struct *w) 1711 { 1712 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work); 1713 1714 dm_integrity_map_continue(dio, false); 1715 } 1716 1717 static void pad_uncommitted(struct dm_integrity_c *ic) 1718 { 1719 if (ic->free_section_entry) { 1720 ic->free_sectors -= ic->journal_section_entries - ic->free_section_entry; 1721 ic->free_section_entry = 0; 1722 ic->free_section++; 1723 wraparound_section(ic, &ic->free_section); 1724 ic->n_uncommitted_sections++; 1725 } 1726 } 1727 1728 static void integrity_commit(struct work_struct *w) 1729 { 1730 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work); 1731 unsigned commit_start, commit_sections; 1732 unsigned i, j, n; 1733 struct bio *flushes; 1734 1735 del_timer(&ic->autocommit_timer); 1736 1737 spin_lock_irq(&ic->endio_wait.lock); 1738 flushes = bio_list_get(&ic->flush_bio_list); 1739 if (unlikely(ic->mode != 'J')) { 1740 spin_unlock_irq(&ic->endio_wait.lock); 1741 dm_integrity_flush_buffers(ic); 1742 goto release_flush_bios; 1743 } 1744 1745 pad_uncommitted(ic); 1746 commit_start = ic->uncommitted_section; 1747 commit_sections = ic->n_uncommitted_sections; 1748 spin_unlock_irq(&ic->endio_wait.lock); 1749 1750 if (!commit_sections) 1751 goto release_flush_bios; 1752 1753 i = commit_start; 1754 for (n = 0; n < commit_sections; n++) { 1755 for (j = 0; j < ic->journal_section_entries; j++) { 1756 struct journal_entry *je; 1757 je = access_journal_entry(ic, i, j); 1758 io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je)); 1759 } 1760 for (j = 0; j < ic->journal_section_sectors; j++) { 1761 struct journal_sector *js; 1762 js = access_journal(ic, i, j); 1763 js->commit_id = dm_integrity_commit_id(ic, i, j, ic->commit_seq); 1764 } 1765 i++; 1766 if (unlikely(i >= ic->journal_sections)) 1767 ic->commit_seq = next_commit_seq(ic->commit_seq); 1768 wraparound_section(ic, &i); 1769 } 1770 smp_rmb(); 1771 1772 write_journal(ic, commit_start, commit_sections); 1773 1774 spin_lock_irq(&ic->endio_wait.lock); 1775 ic->uncommitted_section += commit_sections; 1776 wraparound_section(ic, &ic->uncommitted_section); 1777 ic->n_uncommitted_sections -= commit_sections; 1778 ic->n_committed_sections += commit_sections; 1779 spin_unlock_irq(&ic->endio_wait.lock); 1780 1781 if (ACCESS_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) 1782 queue_work(ic->writer_wq, &ic->writer_work); 1783 1784 release_flush_bios: 1785 while (flushes) { 1786 struct bio *next = flushes->bi_next; 1787 flushes->bi_next = NULL; 1788 do_endio(ic, flushes); 1789 flushes = next; 1790 } 1791 } 1792 1793 static void complete_copy_from_journal(unsigned long error, void *context) 1794 { 1795 struct journal_io *io = context; 1796 struct journal_completion *comp = io->comp; 1797 struct dm_integrity_c *ic = comp->ic; 1798 remove_range(ic, &io->range); 1799 mempool_free(io, ic->journal_io_mempool); 1800 if (unlikely(error != 0)) 1801 dm_integrity_io_error(ic, "copying from journal", -EIO); 1802 complete_journal_op(comp); 1803 } 1804 1805 static void restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js, 1806 struct journal_entry *je) 1807 { 1808 unsigned s = 0; 1809 do { 1810 js->commit_id = je->last_bytes[s]; 1811 js++; 1812 } while (++s < ic->sectors_per_block); 1813 } 1814 1815 static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start, 1816 unsigned write_sections, bool from_replay) 1817 { 1818 unsigned i, j, n; 1819 struct journal_completion comp; 1820 1821 comp.ic = ic; 1822 comp.in_flight = (atomic_t)ATOMIC_INIT(1); 1823 comp.comp = COMPLETION_INITIALIZER_ONSTACK(comp.comp); 1824 1825 i = write_start; 1826 for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) { 1827 #ifndef INTERNAL_VERIFY 1828 if (unlikely(from_replay)) 1829 #endif 1830 rw_section_mac(ic, i, false); 1831 for (j = 0; j < ic->journal_section_entries; j++) { 1832 struct journal_entry *je = access_journal_entry(ic, i, j); 1833 sector_t sec, area, offset; 1834 unsigned k, l, next_loop; 1835 sector_t metadata_block; 1836 unsigned metadata_offset; 1837 struct journal_io *io; 1838 1839 if (journal_entry_is_unused(je)) 1840 continue; 1841 BUG_ON(unlikely(journal_entry_is_inprogress(je)) && !from_replay); 1842 sec = journal_entry_get_sector(je); 1843 if (unlikely(from_replay)) { 1844 if (unlikely(sec & (unsigned)(ic->sectors_per_block - 1))) { 1845 dm_integrity_io_error(ic, "invalid sector in journal", -EIO); 1846 sec &= ~(sector_t)(ic->sectors_per_block - 1); 1847 } 1848 } 1849 get_area_and_offset(ic, sec, &area, &offset); 1850 restore_last_bytes(ic, access_journal_data(ic, i, j), je); 1851 for (k = j + 1; k < ic->journal_section_entries; k++) { 1852 struct journal_entry *je2 = access_journal_entry(ic, i, k); 1853 sector_t sec2, area2, offset2; 1854 if (journal_entry_is_unused(je2)) 1855 break; 1856 BUG_ON(unlikely(journal_entry_is_inprogress(je2)) && !from_replay); 1857 sec2 = journal_entry_get_sector(je2); 1858 get_area_and_offset(ic, sec2, &area2, &offset2); 1859 if (area2 != area || offset2 != offset + ((k - j) << ic->sb->log2_sectors_per_block)) 1860 break; 1861 restore_last_bytes(ic, access_journal_data(ic, i, k), je2); 1862 } 1863 next_loop = k - 1; 1864 1865 io = mempool_alloc(ic->journal_io_mempool, GFP_NOIO); 1866 io->comp = ∁ 1867 io->range.logical_sector = sec; 1868 io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block; 1869 1870 spin_lock_irq(&ic->endio_wait.lock); 1871 while (unlikely(!add_new_range(ic, &io->range))) 1872 sleep_on_endio_wait(ic); 1873 1874 if (likely(!from_replay)) { 1875 struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries]; 1876 1877 /* don't write if there is newer committed sector */ 1878 while (j < k && find_newer_committed_node(ic, §ion_node[j])) { 1879 struct journal_entry *je2 = access_journal_entry(ic, i, j); 1880 1881 journal_entry_set_unused(je2); 1882 remove_journal_node(ic, §ion_node[j]); 1883 j++; 1884 sec += ic->sectors_per_block; 1885 offset += ic->sectors_per_block; 1886 } 1887 while (j < k && find_newer_committed_node(ic, §ion_node[k - 1])) { 1888 struct journal_entry *je2 = access_journal_entry(ic, i, k - 1); 1889 1890 journal_entry_set_unused(je2); 1891 remove_journal_node(ic, §ion_node[k - 1]); 1892 k--; 1893 } 1894 if (j == k) { 1895 remove_range_unlocked(ic, &io->range); 1896 spin_unlock_irq(&ic->endio_wait.lock); 1897 mempool_free(io, ic->journal_io_mempool); 1898 goto skip_io; 1899 } 1900 for (l = j; l < k; l++) { 1901 remove_journal_node(ic, §ion_node[l]); 1902 } 1903 } 1904 spin_unlock_irq(&ic->endio_wait.lock); 1905 1906 metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset); 1907 for (l = j; l < k; l++) { 1908 int r; 1909 struct journal_entry *je2 = access_journal_entry(ic, i, l); 1910 1911 if ( 1912 #ifndef INTERNAL_VERIFY 1913 unlikely(from_replay) && 1914 #endif 1915 ic->internal_hash) { 1916 char test_tag[max(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)]; 1917 1918 integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block), 1919 (char *)access_journal_data(ic, i, l), test_tag); 1920 if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size))) 1921 dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ); 1922 } 1923 1924 journal_entry_set_unused(je2); 1925 r = dm_integrity_rw_tag(ic, journal_entry_tag(ic, je2), &metadata_block, &metadata_offset, 1926 ic->tag_size, TAG_WRITE); 1927 if (unlikely(r)) { 1928 dm_integrity_io_error(ic, "reading tags", r); 1929 } 1930 } 1931 1932 atomic_inc(&comp.in_flight); 1933 copy_from_journal(ic, i, j << ic->sb->log2_sectors_per_block, 1934 (k - j) << ic->sb->log2_sectors_per_block, 1935 get_data_sector(ic, area, offset), 1936 complete_copy_from_journal, io); 1937 skip_io: 1938 j = next_loop; 1939 } 1940 } 1941 1942 dm_bufio_write_dirty_buffers_async(ic->bufio); 1943 1944 complete_journal_op(&comp); 1945 wait_for_completion_io(&comp.comp); 1946 1947 dm_integrity_flush_buffers(ic); 1948 } 1949 1950 static void integrity_writer(struct work_struct *w) 1951 { 1952 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work); 1953 unsigned write_start, write_sections; 1954 1955 unsigned prev_free_sectors; 1956 1957 /* the following test is not needed, but it tests the replay code */ 1958 if (ACCESS_ONCE(ic->suspending)) 1959 return; 1960 1961 spin_lock_irq(&ic->endio_wait.lock); 1962 write_start = ic->committed_section; 1963 write_sections = ic->n_committed_sections; 1964 spin_unlock_irq(&ic->endio_wait.lock); 1965 1966 if (!write_sections) 1967 return; 1968 1969 do_journal_write(ic, write_start, write_sections, false); 1970 1971 spin_lock_irq(&ic->endio_wait.lock); 1972 1973 ic->committed_section += write_sections; 1974 wraparound_section(ic, &ic->committed_section); 1975 ic->n_committed_sections -= write_sections; 1976 1977 prev_free_sectors = ic->free_sectors; 1978 ic->free_sectors += write_sections * ic->journal_section_entries; 1979 if (unlikely(!prev_free_sectors)) 1980 wake_up_locked(&ic->endio_wait); 1981 1982 spin_unlock_irq(&ic->endio_wait.lock); 1983 } 1984 1985 static void init_journal(struct dm_integrity_c *ic, unsigned start_section, 1986 unsigned n_sections, unsigned char commit_seq) 1987 { 1988 unsigned i, j, n; 1989 1990 if (!n_sections) 1991 return; 1992 1993 for (n = 0; n < n_sections; n++) { 1994 i = start_section + n; 1995 wraparound_section(ic, &i); 1996 for (j = 0; j < ic->journal_section_sectors; j++) { 1997 struct journal_sector *js = access_journal(ic, i, j); 1998 memset(&js->entries, 0, JOURNAL_SECTOR_DATA); 1999 js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq); 2000 } 2001 for (j = 0; j < ic->journal_section_entries; j++) { 2002 struct journal_entry *je = access_journal_entry(ic, i, j); 2003 journal_entry_set_unused(je); 2004 } 2005 } 2006 2007 write_journal(ic, start_section, n_sections); 2008 } 2009 2010 static int find_commit_seq(struct dm_integrity_c *ic, unsigned i, unsigned j, commit_id_t id) 2011 { 2012 unsigned char k; 2013 for (k = 0; k < N_COMMIT_IDS; k++) { 2014 if (dm_integrity_commit_id(ic, i, j, k) == id) 2015 return k; 2016 } 2017 dm_integrity_io_error(ic, "journal commit id", -EIO); 2018 return -EIO; 2019 } 2020 2021 static void replay_journal(struct dm_integrity_c *ic) 2022 { 2023 unsigned i, j; 2024 bool used_commit_ids[N_COMMIT_IDS]; 2025 unsigned max_commit_id_sections[N_COMMIT_IDS]; 2026 unsigned write_start, write_sections; 2027 unsigned continue_section; 2028 bool journal_empty; 2029 unsigned char unused, last_used, want_commit_seq; 2030 2031 if (ic->mode == 'R') 2032 return; 2033 2034 if (ic->journal_uptodate) 2035 return; 2036 2037 last_used = 0; 2038 write_start = 0; 2039 2040 if (!ic->just_formatted) { 2041 DEBUG_print("reading journal\n"); 2042 rw_journal(ic, REQ_OP_READ, 0, 0, ic->journal_sections, NULL); 2043 if (ic->journal_io) 2044 DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal"); 2045 if (ic->journal_io) { 2046 struct journal_completion crypt_comp; 2047 crypt_comp.ic = ic; 2048 crypt_comp.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp.comp); 2049 crypt_comp.in_flight = (atomic_t)ATOMIC_INIT(0); 2050 encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp); 2051 wait_for_completion(&crypt_comp.comp); 2052 } 2053 DEBUG_bytes(lowmem_page_address(ic->journal[0].page), 64, "decrypted journal"); 2054 } 2055 2056 if (dm_integrity_failed(ic)) 2057 goto clear_journal; 2058 2059 journal_empty = true; 2060 memset(used_commit_ids, 0, sizeof used_commit_ids); 2061 memset(max_commit_id_sections, 0, sizeof max_commit_id_sections); 2062 for (i = 0; i < ic->journal_sections; i++) { 2063 for (j = 0; j < ic->journal_section_sectors; j++) { 2064 int k; 2065 struct journal_sector *js = access_journal(ic, i, j); 2066 k = find_commit_seq(ic, i, j, js->commit_id); 2067 if (k < 0) 2068 goto clear_journal; 2069 used_commit_ids[k] = true; 2070 max_commit_id_sections[k] = i; 2071 } 2072 if (journal_empty) { 2073 for (j = 0; j < ic->journal_section_entries; j++) { 2074 struct journal_entry *je = access_journal_entry(ic, i, j); 2075 if (!journal_entry_is_unused(je)) { 2076 journal_empty = false; 2077 break; 2078 } 2079 } 2080 } 2081 } 2082 2083 if (!used_commit_ids[N_COMMIT_IDS - 1]) { 2084 unused = N_COMMIT_IDS - 1; 2085 while (unused && !used_commit_ids[unused - 1]) 2086 unused--; 2087 } else { 2088 for (unused = 0; unused < N_COMMIT_IDS; unused++) 2089 if (!used_commit_ids[unused]) 2090 break; 2091 if (unused == N_COMMIT_IDS) { 2092 dm_integrity_io_error(ic, "journal commit ids", -EIO); 2093 goto clear_journal; 2094 } 2095 } 2096 DEBUG_print("first unused commit seq %d [%d,%d,%d,%d]\n", 2097 unused, used_commit_ids[0], used_commit_ids[1], 2098 used_commit_ids[2], used_commit_ids[3]); 2099 2100 last_used = prev_commit_seq(unused); 2101 want_commit_seq = prev_commit_seq(last_used); 2102 2103 if (!used_commit_ids[want_commit_seq] && used_commit_ids[prev_commit_seq(want_commit_seq)]) 2104 journal_empty = true; 2105 2106 write_start = max_commit_id_sections[last_used] + 1; 2107 if (unlikely(write_start >= ic->journal_sections)) 2108 want_commit_seq = next_commit_seq(want_commit_seq); 2109 wraparound_section(ic, &write_start); 2110 2111 i = write_start; 2112 for (write_sections = 0; write_sections < ic->journal_sections; write_sections++) { 2113 for (j = 0; j < ic->journal_section_sectors; j++) { 2114 struct journal_sector *js = access_journal(ic, i, j); 2115 2116 if (js->commit_id != dm_integrity_commit_id(ic, i, j, want_commit_seq)) { 2117 /* 2118 * This could be caused by crash during writing. 2119 * We won't replay the inconsistent part of the 2120 * journal. 2121 */ 2122 DEBUG_print("commit id mismatch at position (%u, %u): %d != %d\n", 2123 i, j, find_commit_seq(ic, i, j, js->commit_id), want_commit_seq); 2124 goto brk; 2125 } 2126 } 2127 i++; 2128 if (unlikely(i >= ic->journal_sections)) 2129 want_commit_seq = next_commit_seq(want_commit_seq); 2130 wraparound_section(ic, &i); 2131 } 2132 brk: 2133 2134 if (!journal_empty) { 2135 DEBUG_print("replaying %u sections, starting at %u, commit seq %d\n", 2136 write_sections, write_start, want_commit_seq); 2137 do_journal_write(ic, write_start, write_sections, true); 2138 } 2139 2140 if (write_sections == ic->journal_sections && (ic->mode == 'J' || journal_empty)) { 2141 continue_section = write_start; 2142 ic->commit_seq = want_commit_seq; 2143 DEBUG_print("continuing from section %u, commit seq %d\n", write_start, ic->commit_seq); 2144 } else { 2145 unsigned s; 2146 unsigned char erase_seq; 2147 clear_journal: 2148 DEBUG_print("clearing journal\n"); 2149 2150 erase_seq = prev_commit_seq(prev_commit_seq(last_used)); 2151 s = write_start; 2152 init_journal(ic, s, 1, erase_seq); 2153 s++; 2154 wraparound_section(ic, &s); 2155 if (ic->journal_sections >= 2) { 2156 init_journal(ic, s, ic->journal_sections - 2, erase_seq); 2157 s += ic->journal_sections - 2; 2158 wraparound_section(ic, &s); 2159 init_journal(ic, s, 1, erase_seq); 2160 } 2161 2162 continue_section = 0; 2163 ic->commit_seq = next_commit_seq(erase_seq); 2164 } 2165 2166 ic->committed_section = continue_section; 2167 ic->n_committed_sections = 0; 2168 2169 ic->uncommitted_section = continue_section; 2170 ic->n_uncommitted_sections = 0; 2171 2172 ic->free_section = continue_section; 2173 ic->free_section_entry = 0; 2174 ic->free_sectors = ic->journal_entries; 2175 2176 ic->journal_tree_root = RB_ROOT; 2177 for (i = 0; i < ic->journal_entries; i++) 2178 init_journal_node(&ic->journal_tree[i]); 2179 } 2180 2181 static void dm_integrity_postsuspend(struct dm_target *ti) 2182 { 2183 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private; 2184 2185 del_timer_sync(&ic->autocommit_timer); 2186 2187 ic->suspending = true; 2188 2189 queue_work(ic->commit_wq, &ic->commit_work); 2190 drain_workqueue(ic->commit_wq); 2191 2192 if (ic->mode == 'J') { 2193 drain_workqueue(ic->writer_wq); 2194 dm_integrity_flush_buffers(ic); 2195 } 2196 2197 ic->suspending = false; 2198 2199 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress)); 2200 2201 ic->journal_uptodate = true; 2202 } 2203 2204 static void dm_integrity_resume(struct dm_target *ti) 2205 { 2206 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private; 2207 2208 replay_journal(ic); 2209 } 2210 2211 static void dm_integrity_status(struct dm_target *ti, status_type_t type, 2212 unsigned status_flags, char *result, unsigned maxlen) 2213 { 2214 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private; 2215 unsigned arg_count; 2216 size_t sz = 0; 2217 2218 switch (type) { 2219 case STATUSTYPE_INFO: 2220 result[0] = '\0'; 2221 break; 2222 2223 case STATUSTYPE_TABLE: { 2224 __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100; 2225 watermark_percentage += ic->journal_entries / 2; 2226 do_div(watermark_percentage, ic->journal_entries); 2227 arg_count = 5; 2228 arg_count += ic->sectors_per_block != 1; 2229 arg_count += !!ic->internal_hash_alg.alg_string; 2230 arg_count += !!ic->journal_crypt_alg.alg_string; 2231 arg_count += !!ic->journal_mac_alg.alg_string; 2232 DMEMIT("%s %llu %u %c %u", ic->dev->name, (unsigned long long)ic->start, 2233 ic->tag_size, ic->mode, arg_count); 2234 DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS); 2235 DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors); 2236 DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors); 2237 DMEMIT(" journal_watermark:%u", (unsigned)watermark_percentage); 2238 DMEMIT(" commit_time:%u", ic->autocommit_msec); 2239 if (ic->sectors_per_block != 1) 2240 DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT); 2241 2242 #define EMIT_ALG(a, n) \ 2243 do { \ 2244 if (ic->a.alg_string) { \ 2245 DMEMIT(" %s:%s", n, ic->a.alg_string); \ 2246 if (ic->a.key_string) \ 2247 DMEMIT(":%s", ic->a.key_string);\ 2248 } \ 2249 } while (0) 2250 EMIT_ALG(internal_hash_alg, "internal_hash"); 2251 EMIT_ALG(journal_crypt_alg, "journal_crypt"); 2252 EMIT_ALG(journal_mac_alg, "journal_mac"); 2253 break; 2254 } 2255 } 2256 } 2257 2258 static int dm_integrity_iterate_devices(struct dm_target *ti, 2259 iterate_devices_callout_fn fn, void *data) 2260 { 2261 struct dm_integrity_c *ic = ti->private; 2262 2263 return fn(ti, ic->dev, ic->start + ic->initial_sectors + ic->metadata_run, ti->len, data); 2264 } 2265 2266 static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *limits) 2267 { 2268 struct dm_integrity_c *ic = ti->private; 2269 2270 if (ic->sectors_per_block > 1) { 2271 limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT; 2272 limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT; 2273 blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT); 2274 } 2275 } 2276 2277 static void calculate_journal_section_size(struct dm_integrity_c *ic) 2278 { 2279 unsigned sector_space = JOURNAL_SECTOR_DATA; 2280 2281 ic->journal_sections = le32_to_cpu(ic->sb->journal_sections); 2282 ic->journal_entry_size = roundup(offsetof(struct journal_entry, last_bytes[ic->sectors_per_block]) + ic->tag_size, 2283 JOURNAL_ENTRY_ROUNDUP); 2284 2285 if (ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) 2286 sector_space -= JOURNAL_MAC_PER_SECTOR; 2287 ic->journal_entries_per_sector = sector_space / ic->journal_entry_size; 2288 ic->journal_section_entries = ic->journal_entries_per_sector * JOURNAL_BLOCK_SECTORS; 2289 ic->journal_section_sectors = (ic->journal_section_entries << ic->sb->log2_sectors_per_block) + JOURNAL_BLOCK_SECTORS; 2290 ic->journal_entries = ic->journal_section_entries * ic->journal_sections; 2291 } 2292 2293 static int calculate_device_limits(struct dm_integrity_c *ic) 2294 { 2295 __u64 initial_sectors; 2296 sector_t last_sector, last_area, last_offset; 2297 2298 calculate_journal_section_size(ic); 2299 initial_sectors = SB_SECTORS + (__u64)ic->journal_section_sectors * ic->journal_sections; 2300 if (initial_sectors + METADATA_PADDING_SECTORS >= ic->device_sectors || initial_sectors > UINT_MAX) 2301 return -EINVAL; 2302 ic->initial_sectors = initial_sectors; 2303 2304 ic->metadata_run = roundup((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2_sectors_per_block), 2305 (__u64)(1 << SECTOR_SHIFT << METADATA_PADDING_SECTORS)) >> SECTOR_SHIFT; 2306 if (!(ic->metadata_run & (ic->metadata_run - 1))) 2307 ic->log2_metadata_run = __ffs(ic->metadata_run); 2308 else 2309 ic->log2_metadata_run = -1; 2310 2311 get_area_and_offset(ic, ic->provided_data_sectors - 1, &last_area, &last_offset); 2312 last_sector = get_data_sector(ic, last_area, last_offset); 2313 2314 if (ic->start + last_sector < last_sector || ic->start + last_sector >= ic->device_sectors) 2315 return -EINVAL; 2316 2317 return 0; 2318 } 2319 2320 static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sectors, unsigned interleave_sectors) 2321 { 2322 unsigned journal_sections; 2323 int test_bit; 2324 2325 memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT); 2326 memcpy(ic->sb->magic, SB_MAGIC, 8); 2327 ic->sb->version = SB_VERSION; 2328 ic->sb->integrity_tag_size = cpu_to_le16(ic->tag_size); 2329 ic->sb->log2_sectors_per_block = __ffs(ic->sectors_per_block); 2330 if (ic->journal_mac_alg.alg_string) 2331 ic->sb->flags |= cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC); 2332 2333 calculate_journal_section_size(ic); 2334 journal_sections = journal_sectors / ic->journal_section_sectors; 2335 if (!journal_sections) 2336 journal_sections = 1; 2337 ic->sb->journal_sections = cpu_to_le32(journal_sections); 2338 2339 if (!interleave_sectors) 2340 interleave_sectors = DEFAULT_INTERLEAVE_SECTORS; 2341 ic->sb->log2_interleave_sectors = __fls(interleave_sectors); 2342 ic->sb->log2_interleave_sectors = max((__u8)MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors); 2343 ic->sb->log2_interleave_sectors = min((__u8)MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors); 2344 2345 ic->provided_data_sectors = 0; 2346 for (test_bit = fls64(ic->device_sectors) - 1; test_bit >= 3; test_bit--) { 2347 __u64 prev_data_sectors = ic->provided_data_sectors; 2348 2349 ic->provided_data_sectors |= (sector_t)1 << test_bit; 2350 if (calculate_device_limits(ic)) 2351 ic->provided_data_sectors = prev_data_sectors; 2352 } 2353 2354 if (!ic->provided_data_sectors) 2355 return -EINVAL; 2356 2357 ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors); 2358 2359 return 0; 2360 } 2361 2362 static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic) 2363 { 2364 struct gendisk *disk = dm_disk(dm_table_get_md(ti->table)); 2365 struct blk_integrity bi; 2366 2367 memset(&bi, 0, sizeof(bi)); 2368 bi.profile = &dm_integrity_profile; 2369 bi.tuple_size = ic->tag_size; 2370 bi.tag_size = bi.tuple_size; 2371 bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT; 2372 2373 blk_integrity_register(disk, &bi); 2374 blk_queue_max_integrity_segments(disk->queue, UINT_MAX); 2375 } 2376 2377 /* FIXME: use new kvmalloc */ 2378 static void *dm_integrity_kvmalloc(size_t size, gfp_t gfp) 2379 { 2380 void *ptr = NULL; 2381 2382 if (size <= PAGE_SIZE) 2383 ptr = kmalloc(size, GFP_KERNEL | gfp); 2384 if (!ptr && size <= KMALLOC_MAX_SIZE) 2385 ptr = kmalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | gfp); 2386 if (!ptr) 2387 ptr = __vmalloc(size, GFP_KERNEL | gfp, PAGE_KERNEL); 2388 2389 return ptr; 2390 } 2391 2392 static void dm_integrity_free_page_list(struct dm_integrity_c *ic, struct page_list *pl) 2393 { 2394 unsigned i; 2395 2396 if (!pl) 2397 return; 2398 for (i = 0; i < ic->journal_pages; i++) 2399 if (pl[i].page) 2400 __free_page(pl[i].page); 2401 kvfree(pl); 2402 } 2403 2404 static struct page_list *dm_integrity_alloc_page_list(struct dm_integrity_c *ic) 2405 { 2406 size_t page_list_desc_size = ic->journal_pages * sizeof(struct page_list); 2407 struct page_list *pl; 2408 unsigned i; 2409 2410 pl = dm_integrity_kvmalloc(page_list_desc_size, __GFP_ZERO); 2411 if (!pl) 2412 return NULL; 2413 2414 for (i = 0; i < ic->journal_pages; i++) { 2415 pl[i].page = alloc_page(GFP_KERNEL); 2416 if (!pl[i].page) { 2417 dm_integrity_free_page_list(ic, pl); 2418 return NULL; 2419 } 2420 if (i) 2421 pl[i - 1].next = &pl[i]; 2422 } 2423 2424 return pl; 2425 } 2426 2427 static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **sl) 2428 { 2429 unsigned i; 2430 for (i = 0; i < ic->journal_sections; i++) 2431 kvfree(sl[i]); 2432 kfree(sl); 2433 } 2434 2435 static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic, struct page_list *pl) 2436 { 2437 struct scatterlist **sl; 2438 unsigned i; 2439 2440 sl = dm_integrity_kvmalloc(ic->journal_sections * sizeof(struct scatterlist *), __GFP_ZERO); 2441 if (!sl) 2442 return NULL; 2443 2444 for (i = 0; i < ic->journal_sections; i++) { 2445 struct scatterlist *s; 2446 unsigned start_index, start_offset; 2447 unsigned end_index, end_offset; 2448 unsigned n_pages; 2449 unsigned idx; 2450 2451 page_list_location(ic, i, 0, &start_index, &start_offset); 2452 page_list_location(ic, i, ic->journal_section_sectors - 1, &end_index, &end_offset); 2453 2454 n_pages = (end_index - start_index + 1); 2455 2456 s = dm_integrity_kvmalloc(n_pages * sizeof(struct scatterlist), 0); 2457 if (!s) { 2458 dm_integrity_free_journal_scatterlist(ic, sl); 2459 return NULL; 2460 } 2461 2462 sg_init_table(s, n_pages); 2463 for (idx = start_index; idx <= end_index; idx++) { 2464 char *va = lowmem_page_address(pl[idx].page); 2465 unsigned start = 0, end = PAGE_SIZE; 2466 if (idx == start_index) 2467 start = start_offset; 2468 if (idx == end_index) 2469 end = end_offset + (1 << SECTOR_SHIFT); 2470 sg_set_buf(&s[idx - start_index], va + start, end - start); 2471 } 2472 2473 sl[i] = s; 2474 } 2475 2476 return sl; 2477 } 2478 2479 static void free_alg(struct alg_spec *a) 2480 { 2481 kzfree(a->alg_string); 2482 kzfree(a->key); 2483 memset(a, 0, sizeof *a); 2484 } 2485 2486 static int get_alg_and_key(const char *arg, struct alg_spec *a, char **error, char *error_inval) 2487 { 2488 char *k; 2489 2490 free_alg(a); 2491 2492 a->alg_string = kstrdup(strchr(arg, ':') + 1, GFP_KERNEL); 2493 if (!a->alg_string) 2494 goto nomem; 2495 2496 k = strchr(a->alg_string, ':'); 2497 if (k) { 2498 *k = 0; 2499 a->key_string = k + 1; 2500 if (strlen(a->key_string) & 1) 2501 goto inval; 2502 2503 a->key_size = strlen(a->key_string) / 2; 2504 a->key = kmalloc(a->key_size, GFP_KERNEL); 2505 if (!a->key) 2506 goto nomem; 2507 if (hex2bin(a->key, a->key_string, a->key_size)) 2508 goto inval; 2509 } 2510 2511 return 0; 2512 inval: 2513 *error = error_inval; 2514 return -EINVAL; 2515 nomem: 2516 *error = "Out of memory for an argument"; 2517 return -ENOMEM; 2518 } 2519 2520 static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error, 2521 char *error_alg, char *error_key) 2522 { 2523 int r; 2524 2525 if (a->alg_string) { 2526 *hash = crypto_alloc_shash(a->alg_string, 0, CRYPTO_ALG_ASYNC); 2527 if (IS_ERR(*hash)) { 2528 *error = error_alg; 2529 r = PTR_ERR(*hash); 2530 *hash = NULL; 2531 return r; 2532 } 2533 2534 if (a->key) { 2535 r = crypto_shash_setkey(*hash, a->key, a->key_size); 2536 if (r) { 2537 *error = error_key; 2538 return r; 2539 } 2540 } 2541 } 2542 2543 return 0; 2544 } 2545 2546 static int create_journal(struct dm_integrity_c *ic, char **error) 2547 { 2548 int r = 0; 2549 unsigned i; 2550 __u64 journal_pages, journal_desc_size, journal_tree_size; 2551 unsigned char *crypt_data = NULL; 2552 2553 ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL); 2554 ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL); 2555 ic->commit_ids[2] = cpu_to_le64(0x3333333333333333ULL); 2556 ic->commit_ids[3] = cpu_to_le64(0x4444444444444444ULL); 2557 2558 journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors, 2559 PAGE_SIZE >> SECTOR_SHIFT) >> (PAGE_SHIFT - SECTOR_SHIFT); 2560 journal_desc_size = journal_pages * sizeof(struct page_list); 2561 if (journal_pages >= totalram_pages - totalhigh_pages || journal_desc_size > ULONG_MAX) { 2562 *error = "Journal doesn't fit into memory"; 2563 r = -ENOMEM; 2564 goto bad; 2565 } 2566 ic->journal_pages = journal_pages; 2567 2568 ic->journal = dm_integrity_alloc_page_list(ic); 2569 if (!ic->journal) { 2570 *error = "Could not allocate memory for journal"; 2571 r = -ENOMEM; 2572 goto bad; 2573 } 2574 if (ic->journal_crypt_alg.alg_string) { 2575 unsigned ivsize, blocksize; 2576 struct journal_completion comp; 2577 2578 comp.ic = ic; 2579 ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, 0); 2580 if (IS_ERR(ic->journal_crypt)) { 2581 *error = "Invalid journal cipher"; 2582 r = PTR_ERR(ic->journal_crypt); 2583 ic->journal_crypt = NULL; 2584 goto bad; 2585 } 2586 ivsize = crypto_skcipher_ivsize(ic->journal_crypt); 2587 blocksize = crypto_skcipher_blocksize(ic->journal_crypt); 2588 2589 if (ic->journal_crypt_alg.key) { 2590 r = crypto_skcipher_setkey(ic->journal_crypt, ic->journal_crypt_alg.key, 2591 ic->journal_crypt_alg.key_size); 2592 if (r) { 2593 *error = "Error setting encryption key"; 2594 goto bad; 2595 } 2596 } 2597 DEBUG_print("cipher %s, block size %u iv size %u\n", 2598 ic->journal_crypt_alg.alg_string, blocksize, ivsize); 2599 2600 ic->journal_io = dm_integrity_alloc_page_list(ic); 2601 if (!ic->journal_io) { 2602 *error = "Could not allocate memory for journal io"; 2603 r = -ENOMEM; 2604 goto bad; 2605 } 2606 2607 if (blocksize == 1) { 2608 struct scatterlist *sg; 2609 SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt); 2610 unsigned char iv[ivsize]; 2611 skcipher_request_set_tfm(req, ic->journal_crypt); 2612 2613 ic->journal_xor = dm_integrity_alloc_page_list(ic); 2614 if (!ic->journal_xor) { 2615 *error = "Could not allocate memory for journal xor"; 2616 r = -ENOMEM; 2617 goto bad; 2618 } 2619 2620 sg = dm_integrity_kvmalloc((ic->journal_pages + 1) * sizeof(struct scatterlist), 0); 2621 if (!sg) { 2622 *error = "Unable to allocate sg list"; 2623 r = -ENOMEM; 2624 goto bad; 2625 } 2626 sg_init_table(sg, ic->journal_pages + 1); 2627 for (i = 0; i < ic->journal_pages; i++) { 2628 char *va = lowmem_page_address(ic->journal_xor[i].page); 2629 clear_page(va); 2630 sg_set_buf(&sg[i], va, PAGE_SIZE); 2631 } 2632 sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids); 2633 memset(iv, 0x00, ivsize); 2634 2635 skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, iv); 2636 comp.comp = COMPLETION_INITIALIZER_ONSTACK(comp.comp); 2637 comp.in_flight = (atomic_t)ATOMIC_INIT(1); 2638 if (do_crypt(true, req, &comp)) 2639 wait_for_completion(&comp.comp); 2640 kvfree(sg); 2641 r = dm_integrity_failed(ic); 2642 if (r) { 2643 *error = "Unable to encrypt journal"; 2644 goto bad; 2645 } 2646 DEBUG_bytes(lowmem_page_address(ic->journal_xor[0].page), 64, "xor data"); 2647 2648 crypto_free_skcipher(ic->journal_crypt); 2649 ic->journal_crypt = NULL; 2650 } else { 2651 SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt); 2652 unsigned char iv[ivsize]; 2653 unsigned crypt_len = roundup(ivsize, blocksize); 2654 2655 crypt_data = kmalloc(crypt_len, GFP_KERNEL); 2656 if (!crypt_data) { 2657 *error = "Unable to allocate crypt data"; 2658 r = -ENOMEM; 2659 goto bad; 2660 } 2661 2662 skcipher_request_set_tfm(req, ic->journal_crypt); 2663 2664 ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal); 2665 if (!ic->journal_scatterlist) { 2666 *error = "Unable to allocate sg list"; 2667 r = -ENOMEM; 2668 goto bad; 2669 } 2670 ic->journal_io_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal_io); 2671 if (!ic->journal_io_scatterlist) { 2672 *error = "Unable to allocate sg list"; 2673 r = -ENOMEM; 2674 goto bad; 2675 } 2676 ic->sk_requests = dm_integrity_kvmalloc(ic->journal_sections * sizeof(struct skcipher_request *), __GFP_ZERO); 2677 if (!ic->sk_requests) { 2678 *error = "Unable to allocate sk requests"; 2679 r = -ENOMEM; 2680 goto bad; 2681 } 2682 for (i = 0; i < ic->journal_sections; i++) { 2683 struct scatterlist sg; 2684 struct skcipher_request *section_req; 2685 __u32 section_le = cpu_to_le32(i); 2686 2687 memset(iv, 0x00, ivsize); 2688 memset(crypt_data, 0x00, crypt_len); 2689 memcpy(crypt_data, §ion_le, min((size_t)crypt_len, sizeof(section_le))); 2690 2691 sg_init_one(&sg, crypt_data, crypt_len); 2692 skcipher_request_set_crypt(req, &sg, &sg, crypt_len, iv); 2693 comp.comp = COMPLETION_INITIALIZER_ONSTACK(comp.comp); 2694 comp.in_flight = (atomic_t)ATOMIC_INIT(1); 2695 if (do_crypt(true, req, &comp)) 2696 wait_for_completion(&comp.comp); 2697 2698 r = dm_integrity_failed(ic); 2699 if (r) { 2700 *error = "Unable to generate iv"; 2701 goto bad; 2702 } 2703 2704 section_req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL); 2705 if (!section_req) { 2706 *error = "Unable to allocate crypt request"; 2707 r = -ENOMEM; 2708 goto bad; 2709 } 2710 section_req->iv = kmalloc(ivsize * 2, GFP_KERNEL); 2711 if (!section_req->iv) { 2712 skcipher_request_free(section_req); 2713 *error = "Unable to allocate iv"; 2714 r = -ENOMEM; 2715 goto bad; 2716 } 2717 memcpy(section_req->iv + ivsize, crypt_data, ivsize); 2718 section_req->cryptlen = (size_t)ic->journal_section_sectors << SECTOR_SHIFT; 2719 ic->sk_requests[i] = section_req; 2720 DEBUG_bytes(crypt_data, ivsize, "iv(%u)", i); 2721 } 2722 } 2723 } 2724 2725 for (i = 0; i < N_COMMIT_IDS; i++) { 2726 unsigned j; 2727 retest_commit_id: 2728 for (j = 0; j < i; j++) { 2729 if (ic->commit_ids[j] == ic->commit_ids[i]) { 2730 ic->commit_ids[i] = cpu_to_le64(le64_to_cpu(ic->commit_ids[i]) + 1); 2731 goto retest_commit_id; 2732 } 2733 } 2734 DEBUG_print("commit id %u: %016llx\n", i, ic->commit_ids[i]); 2735 } 2736 2737 journal_tree_size = (__u64)ic->journal_entries * sizeof(struct journal_node); 2738 if (journal_tree_size > ULONG_MAX) { 2739 *error = "Journal doesn't fit into memory"; 2740 r = -ENOMEM; 2741 goto bad; 2742 } 2743 ic->journal_tree = dm_integrity_kvmalloc(journal_tree_size, 0); 2744 if (!ic->journal_tree) { 2745 *error = "Could not allocate memory for journal tree"; 2746 r = -ENOMEM; 2747 } 2748 bad: 2749 kfree(crypt_data); 2750 return r; 2751 } 2752 2753 /* 2754 * Construct a integrity mapping 2755 * 2756 * Arguments: 2757 * device 2758 * offset from the start of the device 2759 * tag size 2760 * D - direct writes, J - journal writes, R - recovery mode 2761 * number of optional arguments 2762 * optional arguments: 2763 * journal_sectors 2764 * interleave_sectors 2765 * buffer_sectors 2766 * journal_watermark 2767 * commit_time 2768 * internal_hash 2769 * journal_crypt 2770 * journal_mac 2771 * block_size 2772 */ 2773 static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) 2774 { 2775 struct dm_integrity_c *ic; 2776 char dummy; 2777 int r; 2778 unsigned extra_args; 2779 struct dm_arg_set as; 2780 static struct dm_arg _args[] = { 2781 {0, 9, "Invalid number of feature args"}, 2782 }; 2783 unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec; 2784 bool should_write_sb; 2785 __u64 threshold; 2786 unsigned long long start; 2787 2788 #define DIRECT_ARGUMENTS 4 2789 2790 if (argc <= DIRECT_ARGUMENTS) { 2791 ti->error = "Invalid argument count"; 2792 return -EINVAL; 2793 } 2794 2795 ic = kzalloc(sizeof(struct dm_integrity_c), GFP_KERNEL); 2796 if (!ic) { 2797 ti->error = "Cannot allocate integrity context"; 2798 return -ENOMEM; 2799 } 2800 ti->private = ic; 2801 ti->per_io_data_size = sizeof(struct dm_integrity_io); 2802 2803 ic->in_progress = RB_ROOT; 2804 init_waitqueue_head(&ic->endio_wait); 2805 bio_list_init(&ic->flush_bio_list); 2806 init_waitqueue_head(&ic->copy_to_journal_wait); 2807 init_completion(&ic->crypto_backoff); 2808 2809 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev); 2810 if (r) { 2811 ti->error = "Device lookup failed"; 2812 goto bad; 2813 } 2814 2815 if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) { 2816 ti->error = "Invalid starting offset"; 2817 r = -EINVAL; 2818 goto bad; 2819 } 2820 ic->start = start; 2821 2822 if (strcmp(argv[2], "-")) { 2823 if (sscanf(argv[2], "%u%c", &ic->tag_size, &dummy) != 1 || !ic->tag_size) { 2824 ti->error = "Invalid tag size"; 2825 r = -EINVAL; 2826 goto bad; 2827 } 2828 } 2829 2830 if (!strcmp(argv[3], "J") || !strcmp(argv[3], "D") || !strcmp(argv[3], "R")) 2831 ic->mode = argv[3][0]; 2832 else { 2833 ti->error = "Invalid mode (expecting J, D, R)"; 2834 r = -EINVAL; 2835 goto bad; 2836 } 2837 2838 ic->device_sectors = i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT; 2839 journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS, 2840 ic->device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR); 2841 interleave_sectors = DEFAULT_INTERLEAVE_SECTORS; 2842 buffer_sectors = DEFAULT_BUFFER_SECTORS; 2843 journal_watermark = DEFAULT_JOURNAL_WATERMARK; 2844 sync_msec = DEFAULT_SYNC_MSEC; 2845 ic->sectors_per_block = 1; 2846 2847 as.argc = argc - DIRECT_ARGUMENTS; 2848 as.argv = argv + DIRECT_ARGUMENTS; 2849 r = dm_read_arg_group(_args, &as, &extra_args, &ti->error); 2850 if (r) 2851 goto bad; 2852 2853 while (extra_args--) { 2854 const char *opt_string; 2855 unsigned val; 2856 opt_string = dm_shift_arg(&as); 2857 if (!opt_string) { 2858 r = -EINVAL; 2859 ti->error = "Not enough feature arguments"; 2860 goto bad; 2861 } 2862 if (sscanf(opt_string, "journal_sectors:%u%c", &val, &dummy) == 1) 2863 journal_sectors = val; 2864 else if (sscanf(opt_string, "interleave_sectors:%u%c", &val, &dummy) == 1) 2865 interleave_sectors = val; 2866 else if (sscanf(opt_string, "buffer_sectors:%u%c", &val, &dummy) == 1) 2867 buffer_sectors = val; 2868 else if (sscanf(opt_string, "journal_watermark:%u%c", &val, &dummy) == 1 && val <= 100) 2869 journal_watermark = val; 2870 else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1) 2871 sync_msec = val; 2872 else if (sscanf(opt_string, "block_size:%u%c", &val, &dummy) == 1) { 2873 if (val < 1 << SECTOR_SHIFT || 2874 val > MAX_SECTORS_PER_BLOCK << SECTOR_SHIFT || 2875 (val & (val -1))) { 2876 r = -EINVAL; 2877 ti->error = "Invalid block_size argument"; 2878 goto bad; 2879 } 2880 ic->sectors_per_block = val >> SECTOR_SHIFT; 2881 } else if (!memcmp(opt_string, "internal_hash:", strlen("internal_hash:"))) { 2882 r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error, 2883 "Invalid internal_hash argument"); 2884 if (r) 2885 goto bad; 2886 } else if (!memcmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) { 2887 r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error, 2888 "Invalid journal_crypt argument"); 2889 if (r) 2890 goto bad; 2891 } else if (!memcmp(opt_string, "journal_mac:", strlen("journal_mac:"))) { 2892 r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error, 2893 "Invalid journal_mac argument"); 2894 if (r) 2895 goto bad; 2896 } else { 2897 r = -EINVAL; 2898 ti->error = "Invalid argument"; 2899 goto bad; 2900 } 2901 } 2902 2903 r = get_mac(&ic->internal_hash, &ic->internal_hash_alg, &ti->error, 2904 "Invalid internal hash", "Error setting internal hash key"); 2905 if (r) 2906 goto bad; 2907 2908 r = get_mac(&ic->journal_mac, &ic->journal_mac_alg, &ti->error, 2909 "Invalid journal mac", "Error setting journal mac key"); 2910 if (r) 2911 goto bad; 2912 2913 if (!ic->tag_size) { 2914 if (!ic->internal_hash) { 2915 ti->error = "Unknown tag size"; 2916 r = -EINVAL; 2917 goto bad; 2918 } 2919 ic->tag_size = crypto_shash_digestsize(ic->internal_hash); 2920 } 2921 if (ic->tag_size > MAX_TAG_SIZE) { 2922 ti->error = "Too big tag size"; 2923 r = -EINVAL; 2924 goto bad; 2925 } 2926 if (!(ic->tag_size & (ic->tag_size - 1))) 2927 ic->log2_tag_size = __ffs(ic->tag_size); 2928 else 2929 ic->log2_tag_size = -1; 2930 2931 ic->autocommit_jiffies = msecs_to_jiffies(sync_msec); 2932 ic->autocommit_msec = sync_msec; 2933 setup_timer(&ic->autocommit_timer, autocommit_fn, (unsigned long)ic); 2934 2935 ic->io = dm_io_client_create(); 2936 if (IS_ERR(ic->io)) { 2937 r = PTR_ERR(ic->io); 2938 ic->io = NULL; 2939 ti->error = "Cannot allocate dm io"; 2940 goto bad; 2941 } 2942 2943 ic->journal_io_mempool = mempool_create_slab_pool(JOURNAL_IO_MEMPOOL, journal_io_cache); 2944 if (!ic->journal_io_mempool) { 2945 r = -ENOMEM; 2946 ti->error = "Cannot allocate mempool"; 2947 goto bad; 2948 } 2949 2950 ic->metadata_wq = alloc_workqueue("dm-integrity-metadata", 2951 WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE); 2952 if (!ic->metadata_wq) { 2953 ti->error = "Cannot allocate workqueue"; 2954 r = -ENOMEM; 2955 goto bad; 2956 } 2957 2958 /* 2959 * If this workqueue were percpu, it would cause bio reordering 2960 * and reduced performance. 2961 */ 2962 ic->wait_wq = alloc_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM | WQ_UNBOUND, 1); 2963 if (!ic->wait_wq) { 2964 ti->error = "Cannot allocate workqueue"; 2965 r = -ENOMEM; 2966 goto bad; 2967 } 2968 2969 ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1); 2970 if (!ic->commit_wq) { 2971 ti->error = "Cannot allocate workqueue"; 2972 r = -ENOMEM; 2973 goto bad; 2974 } 2975 INIT_WORK(&ic->commit_work, integrity_commit); 2976 2977 if (ic->mode == 'J') { 2978 ic->writer_wq = alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM, 1); 2979 if (!ic->writer_wq) { 2980 ti->error = "Cannot allocate workqueue"; 2981 r = -ENOMEM; 2982 goto bad; 2983 } 2984 INIT_WORK(&ic->writer_work, integrity_writer); 2985 } 2986 2987 ic->sb = alloc_pages_exact(SB_SECTORS << SECTOR_SHIFT, GFP_KERNEL); 2988 if (!ic->sb) { 2989 r = -ENOMEM; 2990 ti->error = "Cannot allocate superblock area"; 2991 goto bad; 2992 } 2993 2994 r = sync_rw_sb(ic, REQ_OP_READ, 0); 2995 if (r) { 2996 ti->error = "Error reading superblock"; 2997 goto bad; 2998 } 2999 should_write_sb = false; 3000 if (memcmp(ic->sb->magic, SB_MAGIC, 8)) { 3001 if (ic->mode != 'R') { 3002 if (memchr_inv(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT)) { 3003 r = -EINVAL; 3004 ti->error = "The device is not initialized"; 3005 goto bad; 3006 } 3007 } 3008 3009 r = initialize_superblock(ic, journal_sectors, interleave_sectors); 3010 if (r) { 3011 ti->error = "Could not initialize superblock"; 3012 goto bad; 3013 } 3014 if (ic->mode != 'R') 3015 should_write_sb = true; 3016 } 3017 3018 if (ic->sb->version != SB_VERSION) { 3019 r = -EINVAL; 3020 ti->error = "Unknown version"; 3021 goto bad; 3022 } 3023 if (le16_to_cpu(ic->sb->integrity_tag_size) != ic->tag_size) { 3024 r = -EINVAL; 3025 ti->error = "Tag size doesn't match the information in superblock"; 3026 goto bad; 3027 } 3028 if (ic->sb->log2_sectors_per_block != __ffs(ic->sectors_per_block)) { 3029 r = -EINVAL; 3030 ti->error = "Block size doesn't match the information in superblock"; 3031 goto bad; 3032 } 3033 /* make sure that ti->max_io_len doesn't overflow */ 3034 if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS || 3035 ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) { 3036 r = -EINVAL; 3037 ti->error = "Invalid interleave_sectors in the superblock"; 3038 goto bad; 3039 } 3040 ic->provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors); 3041 if (ic->provided_data_sectors != le64_to_cpu(ic->sb->provided_data_sectors)) { 3042 /* test for overflow */ 3043 r = -EINVAL; 3044 ti->error = "The superblock has 64-bit device size, but the kernel was compiled with 32-bit sectors"; 3045 goto bad; 3046 } 3047 if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) != !!ic->journal_mac_alg.alg_string) { 3048 r = -EINVAL; 3049 ti->error = "Journal mac mismatch"; 3050 goto bad; 3051 } 3052 r = calculate_device_limits(ic); 3053 if (r) { 3054 ti->error = "The device is too small"; 3055 goto bad; 3056 } 3057 3058 if (!buffer_sectors) 3059 buffer_sectors = 1; 3060 ic->log2_buffer_sectors = min3((int)__fls(buffer_sectors), (int)__ffs(ic->metadata_run), 31 - SECTOR_SHIFT); 3061 3062 threshold = (__u64)ic->journal_entries * (100 - journal_watermark); 3063 threshold += 50; 3064 do_div(threshold, 100); 3065 ic->free_sectors_threshold = threshold; 3066 3067 DEBUG_print("initialized:\n"); 3068 DEBUG_print(" integrity_tag_size %u\n", le16_to_cpu(ic->sb->integrity_tag_size)); 3069 DEBUG_print(" journal_entry_size %u\n", ic->journal_entry_size); 3070 DEBUG_print(" journal_entries_per_sector %u\n", ic->journal_entries_per_sector); 3071 DEBUG_print(" journal_section_entries %u\n", ic->journal_section_entries); 3072 DEBUG_print(" journal_section_sectors %u\n", ic->journal_section_sectors); 3073 DEBUG_print(" journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections)); 3074 DEBUG_print(" journal_entries %u\n", ic->journal_entries); 3075 DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors); 3076 DEBUG_print(" device_sectors 0x%llx\n", (unsigned long long)ic->device_sectors); 3077 DEBUG_print(" initial_sectors 0x%x\n", ic->initial_sectors); 3078 DEBUG_print(" metadata_run 0x%x\n", ic->metadata_run); 3079 DEBUG_print(" log2_metadata_run %d\n", ic->log2_metadata_run); 3080 DEBUG_print(" provided_data_sectors 0x%llx (%llu)\n", (unsigned long long)ic->provided_data_sectors, 3081 (unsigned long long)ic->provided_data_sectors); 3082 DEBUG_print(" log2_buffer_sectors %u\n", ic->log2_buffer_sectors); 3083 3084 ic->bufio = dm_bufio_client_create(ic->dev->bdev, 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 3085 1, 0, NULL, NULL); 3086 if (IS_ERR(ic->bufio)) { 3087 r = PTR_ERR(ic->bufio); 3088 ti->error = "Cannot initialize dm-bufio"; 3089 ic->bufio = NULL; 3090 goto bad; 3091 } 3092 dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors); 3093 3094 if (ic->mode != 'R') { 3095 r = create_journal(ic, &ti->error); 3096 if (r) 3097 goto bad; 3098 } 3099 3100 if (should_write_sb) { 3101 int r; 3102 3103 init_journal(ic, 0, ic->journal_sections, 0); 3104 r = dm_integrity_failed(ic); 3105 if (unlikely(r)) { 3106 ti->error = "Error initializing journal"; 3107 goto bad; 3108 } 3109 r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA); 3110 if (r) { 3111 ti->error = "Error initializing superblock"; 3112 goto bad; 3113 } 3114 ic->just_formatted = true; 3115 } 3116 3117 r = dm_set_target_max_io_len(ti, 1U << ic->sb->log2_interleave_sectors); 3118 if (r) 3119 goto bad; 3120 3121 if (!ic->internal_hash) 3122 dm_integrity_set(ti, ic); 3123 3124 ti->num_flush_bios = 1; 3125 ti->flush_supported = true; 3126 3127 return 0; 3128 bad: 3129 dm_integrity_dtr(ti); 3130 return r; 3131 } 3132 3133 static void dm_integrity_dtr(struct dm_target *ti) 3134 { 3135 struct dm_integrity_c *ic = ti->private; 3136 3137 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress)); 3138 3139 if (ic->metadata_wq) 3140 destroy_workqueue(ic->metadata_wq); 3141 if (ic->wait_wq) 3142 destroy_workqueue(ic->wait_wq); 3143 if (ic->commit_wq) 3144 destroy_workqueue(ic->commit_wq); 3145 if (ic->writer_wq) 3146 destroy_workqueue(ic->writer_wq); 3147 if (ic->bufio) 3148 dm_bufio_client_destroy(ic->bufio); 3149 mempool_destroy(ic->journal_io_mempool); 3150 if (ic->io) 3151 dm_io_client_destroy(ic->io); 3152 if (ic->dev) 3153 dm_put_device(ti, ic->dev); 3154 dm_integrity_free_page_list(ic, ic->journal); 3155 dm_integrity_free_page_list(ic, ic->journal_io); 3156 dm_integrity_free_page_list(ic, ic->journal_xor); 3157 if (ic->journal_scatterlist) 3158 dm_integrity_free_journal_scatterlist(ic, ic->journal_scatterlist); 3159 if (ic->journal_io_scatterlist) 3160 dm_integrity_free_journal_scatterlist(ic, ic->journal_io_scatterlist); 3161 if (ic->sk_requests) { 3162 unsigned i; 3163 3164 for (i = 0; i < ic->journal_sections; i++) { 3165 struct skcipher_request *req = ic->sk_requests[i]; 3166 if (req) { 3167 kzfree(req->iv); 3168 skcipher_request_free(req); 3169 } 3170 } 3171 kvfree(ic->sk_requests); 3172 } 3173 kvfree(ic->journal_tree); 3174 if (ic->sb) 3175 free_pages_exact(ic->sb, SB_SECTORS << SECTOR_SHIFT); 3176 3177 if (ic->internal_hash) 3178 crypto_free_shash(ic->internal_hash); 3179 free_alg(&ic->internal_hash_alg); 3180 3181 if (ic->journal_crypt) 3182 crypto_free_skcipher(ic->journal_crypt); 3183 free_alg(&ic->journal_crypt_alg); 3184 3185 if (ic->journal_mac) 3186 crypto_free_shash(ic->journal_mac); 3187 free_alg(&ic->journal_mac_alg); 3188 3189 kfree(ic); 3190 } 3191 3192 static struct target_type integrity_target = { 3193 .name = "integrity", 3194 .version = {1, 0, 0}, 3195 .module = THIS_MODULE, 3196 .features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY, 3197 .ctr = dm_integrity_ctr, 3198 .dtr = dm_integrity_dtr, 3199 .map = dm_integrity_map, 3200 .postsuspend = dm_integrity_postsuspend, 3201 .resume = dm_integrity_resume, 3202 .status = dm_integrity_status, 3203 .iterate_devices = dm_integrity_iterate_devices, 3204 .io_hints = dm_integrity_io_hints, 3205 }; 3206 3207 int __init dm_integrity_init(void) 3208 { 3209 int r; 3210 3211 journal_io_cache = kmem_cache_create("integrity_journal_io", 3212 sizeof(struct journal_io), 0, 0, NULL); 3213 if (!journal_io_cache) { 3214 DMERR("can't allocate journal io cache"); 3215 return -ENOMEM; 3216 } 3217 3218 r = dm_register_target(&integrity_target); 3219 3220 if (r < 0) 3221 DMERR("register failed %d", r); 3222 3223 return r; 3224 } 3225 3226 void dm_integrity_exit(void) 3227 { 3228 dm_unregister_target(&integrity_target); 3229 kmem_cache_destroy(journal_io_cache); 3230 } 3231 3232 module_init(dm_integrity_init); 3233 module_exit(dm_integrity_exit); 3234 3235 MODULE_AUTHOR("Milan Broz"); 3236 MODULE_AUTHOR("Mikulas Patocka"); 3237 MODULE_DESCRIPTION(DM_NAME " target for integrity tags extension"); 3238 MODULE_LICENSE("GPL"); 3239