1 /* 2 * Copyright (C) 2003 Jana Saout <jana@saout.de> 3 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> 4 * Copyright (C) 2006-2020 Red Hat, Inc. All rights reserved. 5 * Copyright (C) 2013-2020 Milan Broz <gmazyland@gmail.com> 6 * 7 * This file is released under the GPL. 8 */ 9 10 #include <linux/completion.h> 11 #include <linux/err.h> 12 #include <linux/module.h> 13 #include <linux/init.h> 14 #include <linux/kernel.h> 15 #include <linux/key.h> 16 #include <linux/bio.h> 17 #include <linux/blkdev.h> 18 #include <linux/mempool.h> 19 #include <linux/slab.h> 20 #include <linux/crypto.h> 21 #include <linux/workqueue.h> 22 #include <linux/kthread.h> 23 #include <linux/backing-dev.h> 24 #include <linux/atomic.h> 25 #include <linux/scatterlist.h> 26 #include <linux/rbtree.h> 27 #include <linux/ctype.h> 28 #include <asm/page.h> 29 #include <asm/unaligned.h> 30 #include <crypto/hash.h> 31 #include <crypto/md5.h> 32 #include <crypto/algapi.h> 33 #include <crypto/skcipher.h> 34 #include <crypto/aead.h> 35 #include <crypto/authenc.h> 36 #include <linux/rtnetlink.h> /* for struct rtattr and RTA macros only */ 37 #include <keys/user-type.h> 38 39 #include <linux/device-mapper.h> 40 41 #define DM_MSG_PREFIX "crypt" 42 43 /* 44 * context holding the current state of a multi-part conversion 45 */ 46 struct convert_context { 47 struct completion restart; 48 struct bio *bio_in; 49 struct bio *bio_out; 50 struct bvec_iter iter_in; 51 struct bvec_iter iter_out; 52 u64 cc_sector; 53 atomic_t cc_pending; 54 union { 55 struct skcipher_request *req; 56 struct aead_request *req_aead; 57 } r; 58 59 }; 60 61 /* 62 * per bio private data 63 */ 64 struct dm_crypt_io { 65 struct crypt_config *cc; 66 struct bio *base_bio; 67 u8 *integrity_metadata; 68 bool integrity_metadata_from_pool; 69 struct work_struct work; 70 71 struct convert_context ctx; 72 73 atomic_t io_pending; 74 blk_status_t error; 75 sector_t sector; 76 77 struct rb_node rb_node; 78 } CRYPTO_MINALIGN_ATTR; 79 80 struct dm_crypt_request { 81 struct convert_context *ctx; 82 struct scatterlist sg_in[4]; 83 struct scatterlist sg_out[4]; 84 u64 iv_sector; 85 }; 86 87 struct crypt_config; 88 89 struct crypt_iv_operations { 90 int (*ctr)(struct crypt_config *cc, struct dm_target *ti, 91 const char *opts); 92 void (*dtr)(struct crypt_config *cc); 93 int (*init)(struct crypt_config *cc); 94 int (*wipe)(struct crypt_config *cc); 95 int (*generator)(struct crypt_config *cc, u8 *iv, 96 struct dm_crypt_request *dmreq); 97 int (*post)(struct crypt_config *cc, u8 *iv, 98 struct dm_crypt_request *dmreq); 99 }; 100 101 struct iv_benbi_private { 102 int shift; 103 }; 104 105 #define LMK_SEED_SIZE 64 /* hash + 0 */ 106 struct iv_lmk_private { 107 struct crypto_shash *hash_tfm; 108 u8 *seed; 109 }; 110 111 #define TCW_WHITENING_SIZE 16 112 struct iv_tcw_private { 113 struct crypto_shash *crc32_tfm; 114 u8 *iv_seed; 115 u8 *whitening; 116 }; 117 118 #define ELEPHANT_MAX_KEY_SIZE 32 119 struct iv_elephant_private { 120 struct crypto_skcipher *tfm; 121 }; 122 123 /* 124 * Crypt: maps a linear range of a block device 125 * and encrypts / decrypts at the same time. 126 */ 127 enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID, 128 DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD }; 129 130 enum cipher_flags { 131 CRYPT_MODE_INTEGRITY_AEAD, /* Use authenticated mode for cihper */ 132 CRYPT_IV_LARGE_SECTORS, /* Calculate IV from sector_size, not 512B sectors */ 133 CRYPT_ENCRYPT_PREPROCESS, /* Must preprocess data for encryption (elephant) */ 134 }; 135 136 /* 137 * The fields in here must be read only after initialization. 138 */ 139 struct crypt_config { 140 struct dm_dev *dev; 141 sector_t start; 142 143 struct percpu_counter n_allocated_pages; 144 145 struct workqueue_struct *io_queue; 146 struct workqueue_struct *crypt_queue; 147 148 spinlock_t write_thread_lock; 149 struct task_struct *write_thread; 150 struct rb_root write_tree; 151 152 char *cipher_string; 153 char *cipher_auth; 154 char *key_string; 155 156 const struct crypt_iv_operations *iv_gen_ops; 157 union { 158 struct iv_benbi_private benbi; 159 struct iv_lmk_private lmk; 160 struct iv_tcw_private tcw; 161 struct iv_elephant_private elephant; 162 } iv_gen_private; 163 u64 iv_offset; 164 unsigned int iv_size; 165 unsigned short int sector_size; 166 unsigned char sector_shift; 167 168 union { 169 struct crypto_skcipher **tfms; 170 struct crypto_aead **tfms_aead; 171 } cipher_tfm; 172 unsigned tfms_count; 173 unsigned long cipher_flags; 174 175 /* 176 * Layout of each crypto request: 177 * 178 * struct skcipher_request 179 * context 180 * padding 181 * struct dm_crypt_request 182 * padding 183 * IV 184 * 185 * The padding is added so that dm_crypt_request and the IV are 186 * correctly aligned. 187 */ 188 unsigned int dmreq_start; 189 190 unsigned int per_bio_data_size; 191 192 unsigned long flags; 193 unsigned int key_size; 194 unsigned int key_parts; /* independent parts in key buffer */ 195 unsigned int key_extra_size; /* additional keys length */ 196 unsigned int key_mac_size; /* MAC key size for authenc(...) */ 197 198 unsigned int integrity_tag_size; 199 unsigned int integrity_iv_size; 200 unsigned int on_disk_tag_size; 201 202 /* 203 * pool for per bio private data, crypto requests, 204 * encryption requeusts/buffer pages and integrity tags 205 */ 206 unsigned tag_pool_max_sectors; 207 mempool_t tag_pool; 208 mempool_t req_pool; 209 mempool_t page_pool; 210 211 struct bio_set bs; 212 struct mutex bio_alloc_lock; 213 214 u8 *authenc_key; /* space for keys in authenc() format (if used) */ 215 u8 key[0]; 216 }; 217 218 #define MIN_IOS 64 219 #define MAX_TAG_SIZE 480 220 #define POOL_ENTRY_SIZE 512 221 222 static DEFINE_SPINLOCK(dm_crypt_clients_lock); 223 static unsigned dm_crypt_clients_n = 0; 224 static volatile unsigned long dm_crypt_pages_per_client; 225 #define DM_CRYPT_MEMORY_PERCENT 2 226 #define DM_CRYPT_MIN_PAGES_PER_CLIENT (BIO_MAX_PAGES * 16) 227 228 static void clone_init(struct dm_crypt_io *, struct bio *); 229 static void kcryptd_queue_crypt(struct dm_crypt_io *io); 230 static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc, 231 struct scatterlist *sg); 232 233 /* 234 * Use this to access cipher attributes that are independent of the key. 235 */ 236 static struct crypto_skcipher *any_tfm(struct crypt_config *cc) 237 { 238 return cc->cipher_tfm.tfms[0]; 239 } 240 241 static struct crypto_aead *any_tfm_aead(struct crypt_config *cc) 242 { 243 return cc->cipher_tfm.tfms_aead[0]; 244 } 245 246 /* 247 * Different IV generation algorithms: 248 * 249 * plain: the initial vector is the 32-bit little-endian version of the sector 250 * number, padded with zeros if necessary. 251 * 252 * plain64: the initial vector is the 64-bit little-endian version of the sector 253 * number, padded with zeros if necessary. 254 * 255 * plain64be: the initial vector is the 64-bit big-endian version of the sector 256 * number, padded with zeros if necessary. 257 * 258 * essiv: "encrypted sector|salt initial vector", the sector number is 259 * encrypted with the bulk cipher using a salt as key. The salt 260 * should be derived from the bulk cipher's key via hashing. 261 * 262 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1 263 * (needed for LRW-32-AES and possible other narrow block modes) 264 * 265 * null: the initial vector is always zero. Provides compatibility with 266 * obsolete loop_fish2 devices. Do not use for new devices. 267 * 268 * lmk: Compatible implementation of the block chaining mode used 269 * by the Loop-AES block device encryption system 270 * designed by Jari Ruusu. See http://loop-aes.sourceforge.net/ 271 * It operates on full 512 byte sectors and uses CBC 272 * with an IV derived from the sector number, the data and 273 * optionally extra IV seed. 274 * This means that after decryption the first block 275 * of sector must be tweaked according to decrypted data. 276 * Loop-AES can use three encryption schemes: 277 * version 1: is plain aes-cbc mode 278 * version 2: uses 64 multikey scheme with lmk IV generator 279 * version 3: the same as version 2 with additional IV seed 280 * (it uses 65 keys, last key is used as IV seed) 281 * 282 * tcw: Compatible implementation of the block chaining mode used 283 * by the TrueCrypt device encryption system (prior to version 4.1). 284 * For more info see: https://gitlab.com/cryptsetup/cryptsetup/wikis/TrueCryptOnDiskFormat 285 * It operates on full 512 byte sectors and uses CBC 286 * with an IV derived from initial key and the sector number. 287 * In addition, whitening value is applied on every sector, whitening 288 * is calculated from initial key, sector number and mixed using CRC32. 289 * Note that this encryption scheme is vulnerable to watermarking attacks 290 * and should be used for old compatible containers access only. 291 * 292 * eboiv: Encrypted byte-offset IV (used in Bitlocker in CBC mode) 293 * The IV is encrypted little-endian byte-offset (with the same key 294 * and cipher as the volume). 295 * 296 * elephant: The extended version of eboiv with additional Elephant diffuser 297 * used with Bitlocker CBC mode. 298 * This mode was used in older Windows systems 299 * http://download.microsoft.com/download/0/2/3/0238acaf-d3bf-4a6d-b3d6-0a0be4bbb36e/bitlockercipher200608.pdf 300 */ 301 302 static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, 303 struct dm_crypt_request *dmreq) 304 { 305 memset(iv, 0, cc->iv_size); 306 *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff); 307 308 return 0; 309 } 310 311 static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv, 312 struct dm_crypt_request *dmreq) 313 { 314 memset(iv, 0, cc->iv_size); 315 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector); 316 317 return 0; 318 } 319 320 static int crypt_iv_plain64be_gen(struct crypt_config *cc, u8 *iv, 321 struct dm_crypt_request *dmreq) 322 { 323 memset(iv, 0, cc->iv_size); 324 /* iv_size is at least of size u64; usually it is 16 bytes */ 325 *(__be64 *)&iv[cc->iv_size - sizeof(u64)] = cpu_to_be64(dmreq->iv_sector); 326 327 return 0; 328 } 329 330 static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, 331 struct dm_crypt_request *dmreq) 332 { 333 /* 334 * ESSIV encryption of the IV is now handled by the crypto API, 335 * so just pass the plain sector number here. 336 */ 337 memset(iv, 0, cc->iv_size); 338 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector); 339 340 return 0; 341 } 342 343 static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, 344 const char *opts) 345 { 346 unsigned bs; 347 int log; 348 349 if (test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags)) 350 bs = crypto_aead_blocksize(any_tfm_aead(cc)); 351 else 352 bs = crypto_skcipher_blocksize(any_tfm(cc)); 353 log = ilog2(bs); 354 355 /* we need to calculate how far we must shift the sector count 356 * to get the cipher block count, we use this shift in _gen */ 357 358 if (1 << log != bs) { 359 ti->error = "cypher blocksize is not a power of 2"; 360 return -EINVAL; 361 } 362 363 if (log > 9) { 364 ti->error = "cypher blocksize is > 512"; 365 return -EINVAL; 366 } 367 368 cc->iv_gen_private.benbi.shift = 9 - log; 369 370 return 0; 371 } 372 373 static void crypt_iv_benbi_dtr(struct crypt_config *cc) 374 { 375 } 376 377 static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, 378 struct dm_crypt_request *dmreq) 379 { 380 __be64 val; 381 382 memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */ 383 384 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1); 385 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64))); 386 387 return 0; 388 } 389 390 static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, 391 struct dm_crypt_request *dmreq) 392 { 393 memset(iv, 0, cc->iv_size); 394 395 return 0; 396 } 397 398 static void crypt_iv_lmk_dtr(struct crypt_config *cc) 399 { 400 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; 401 402 if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm)) 403 crypto_free_shash(lmk->hash_tfm); 404 lmk->hash_tfm = NULL; 405 406 kzfree(lmk->seed); 407 lmk->seed = NULL; 408 } 409 410 static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti, 411 const char *opts) 412 { 413 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; 414 415 if (cc->sector_size != (1 << SECTOR_SHIFT)) { 416 ti->error = "Unsupported sector size for LMK"; 417 return -EINVAL; 418 } 419 420 lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0); 421 if (IS_ERR(lmk->hash_tfm)) { 422 ti->error = "Error initializing LMK hash"; 423 return PTR_ERR(lmk->hash_tfm); 424 } 425 426 /* No seed in LMK version 2 */ 427 if (cc->key_parts == cc->tfms_count) { 428 lmk->seed = NULL; 429 return 0; 430 } 431 432 lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL); 433 if (!lmk->seed) { 434 crypt_iv_lmk_dtr(cc); 435 ti->error = "Error kmallocing seed storage in LMK"; 436 return -ENOMEM; 437 } 438 439 return 0; 440 } 441 442 static int crypt_iv_lmk_init(struct crypt_config *cc) 443 { 444 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; 445 int subkey_size = cc->key_size / cc->key_parts; 446 447 /* LMK seed is on the position of LMK_KEYS + 1 key */ 448 if (lmk->seed) 449 memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size), 450 crypto_shash_digestsize(lmk->hash_tfm)); 451 452 return 0; 453 } 454 455 static int crypt_iv_lmk_wipe(struct crypt_config *cc) 456 { 457 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; 458 459 if (lmk->seed) 460 memset(lmk->seed, 0, LMK_SEED_SIZE); 461 462 return 0; 463 } 464 465 static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv, 466 struct dm_crypt_request *dmreq, 467 u8 *data) 468 { 469 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; 470 SHASH_DESC_ON_STACK(desc, lmk->hash_tfm); 471 struct md5_state md5state; 472 __le32 buf[4]; 473 int i, r; 474 475 desc->tfm = lmk->hash_tfm; 476 477 r = crypto_shash_init(desc); 478 if (r) 479 return r; 480 481 if (lmk->seed) { 482 r = crypto_shash_update(desc, lmk->seed, LMK_SEED_SIZE); 483 if (r) 484 return r; 485 } 486 487 /* Sector is always 512B, block size 16, add data of blocks 1-31 */ 488 r = crypto_shash_update(desc, data + 16, 16 * 31); 489 if (r) 490 return r; 491 492 /* Sector is cropped to 56 bits here */ 493 buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF); 494 buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000); 495 buf[2] = cpu_to_le32(4024); 496 buf[3] = 0; 497 r = crypto_shash_update(desc, (u8 *)buf, sizeof(buf)); 498 if (r) 499 return r; 500 501 /* No MD5 padding here */ 502 r = crypto_shash_export(desc, &md5state); 503 if (r) 504 return r; 505 506 for (i = 0; i < MD5_HASH_WORDS; i++) 507 __cpu_to_le32s(&md5state.hash[i]); 508 memcpy(iv, &md5state.hash, cc->iv_size); 509 510 return 0; 511 } 512 513 static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv, 514 struct dm_crypt_request *dmreq) 515 { 516 struct scatterlist *sg; 517 u8 *src; 518 int r = 0; 519 520 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { 521 sg = crypt_get_sg_data(cc, dmreq->sg_in); 522 src = kmap_atomic(sg_page(sg)); 523 r = crypt_iv_lmk_one(cc, iv, dmreq, src + sg->offset); 524 kunmap_atomic(src); 525 } else 526 memset(iv, 0, cc->iv_size); 527 528 return r; 529 } 530 531 static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv, 532 struct dm_crypt_request *dmreq) 533 { 534 struct scatterlist *sg; 535 u8 *dst; 536 int r; 537 538 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) 539 return 0; 540 541 sg = crypt_get_sg_data(cc, dmreq->sg_out); 542 dst = kmap_atomic(sg_page(sg)); 543 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + sg->offset); 544 545 /* Tweak the first block of plaintext sector */ 546 if (!r) 547 crypto_xor(dst + sg->offset, iv, cc->iv_size); 548 549 kunmap_atomic(dst); 550 return r; 551 } 552 553 static void crypt_iv_tcw_dtr(struct crypt_config *cc) 554 { 555 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; 556 557 kzfree(tcw->iv_seed); 558 tcw->iv_seed = NULL; 559 kzfree(tcw->whitening); 560 tcw->whitening = NULL; 561 562 if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm)) 563 crypto_free_shash(tcw->crc32_tfm); 564 tcw->crc32_tfm = NULL; 565 } 566 567 static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti, 568 const char *opts) 569 { 570 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; 571 572 if (cc->sector_size != (1 << SECTOR_SHIFT)) { 573 ti->error = "Unsupported sector size for TCW"; 574 return -EINVAL; 575 } 576 577 if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) { 578 ti->error = "Wrong key size for TCW"; 579 return -EINVAL; 580 } 581 582 tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0); 583 if (IS_ERR(tcw->crc32_tfm)) { 584 ti->error = "Error initializing CRC32 in TCW"; 585 return PTR_ERR(tcw->crc32_tfm); 586 } 587 588 tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL); 589 tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL); 590 if (!tcw->iv_seed || !tcw->whitening) { 591 crypt_iv_tcw_dtr(cc); 592 ti->error = "Error allocating seed storage in TCW"; 593 return -ENOMEM; 594 } 595 596 return 0; 597 } 598 599 static int crypt_iv_tcw_init(struct crypt_config *cc) 600 { 601 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; 602 int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE; 603 604 memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size); 605 memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size], 606 TCW_WHITENING_SIZE); 607 608 return 0; 609 } 610 611 static int crypt_iv_tcw_wipe(struct crypt_config *cc) 612 { 613 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; 614 615 memset(tcw->iv_seed, 0, cc->iv_size); 616 memset(tcw->whitening, 0, TCW_WHITENING_SIZE); 617 618 return 0; 619 } 620 621 static int crypt_iv_tcw_whitening(struct crypt_config *cc, 622 struct dm_crypt_request *dmreq, 623 u8 *data) 624 { 625 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; 626 __le64 sector = cpu_to_le64(dmreq->iv_sector); 627 u8 buf[TCW_WHITENING_SIZE]; 628 SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm); 629 int i, r; 630 631 /* xor whitening with sector number */ 632 crypto_xor_cpy(buf, tcw->whitening, (u8 *)§or, 8); 633 crypto_xor_cpy(&buf[8], tcw->whitening + 8, (u8 *)§or, 8); 634 635 /* calculate crc32 for every 32bit part and xor it */ 636 desc->tfm = tcw->crc32_tfm; 637 for (i = 0; i < 4; i++) { 638 r = crypto_shash_init(desc); 639 if (r) 640 goto out; 641 r = crypto_shash_update(desc, &buf[i * 4], 4); 642 if (r) 643 goto out; 644 r = crypto_shash_final(desc, &buf[i * 4]); 645 if (r) 646 goto out; 647 } 648 crypto_xor(&buf[0], &buf[12], 4); 649 crypto_xor(&buf[4], &buf[8], 4); 650 651 /* apply whitening (8 bytes) to whole sector */ 652 for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++) 653 crypto_xor(data + i * 8, buf, 8); 654 out: 655 memzero_explicit(buf, sizeof(buf)); 656 return r; 657 } 658 659 static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv, 660 struct dm_crypt_request *dmreq) 661 { 662 struct scatterlist *sg; 663 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; 664 __le64 sector = cpu_to_le64(dmreq->iv_sector); 665 u8 *src; 666 int r = 0; 667 668 /* Remove whitening from ciphertext */ 669 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) { 670 sg = crypt_get_sg_data(cc, dmreq->sg_in); 671 src = kmap_atomic(sg_page(sg)); 672 r = crypt_iv_tcw_whitening(cc, dmreq, src + sg->offset); 673 kunmap_atomic(src); 674 } 675 676 /* Calculate IV */ 677 crypto_xor_cpy(iv, tcw->iv_seed, (u8 *)§or, 8); 678 if (cc->iv_size > 8) 679 crypto_xor_cpy(&iv[8], tcw->iv_seed + 8, (u8 *)§or, 680 cc->iv_size - 8); 681 682 return r; 683 } 684 685 static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv, 686 struct dm_crypt_request *dmreq) 687 { 688 struct scatterlist *sg; 689 u8 *dst; 690 int r; 691 692 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) 693 return 0; 694 695 /* Apply whitening on ciphertext */ 696 sg = crypt_get_sg_data(cc, dmreq->sg_out); 697 dst = kmap_atomic(sg_page(sg)); 698 r = crypt_iv_tcw_whitening(cc, dmreq, dst + sg->offset); 699 kunmap_atomic(dst); 700 701 return r; 702 } 703 704 static int crypt_iv_random_gen(struct crypt_config *cc, u8 *iv, 705 struct dm_crypt_request *dmreq) 706 { 707 /* Used only for writes, there must be an additional space to store IV */ 708 get_random_bytes(iv, cc->iv_size); 709 return 0; 710 } 711 712 static int crypt_iv_eboiv_ctr(struct crypt_config *cc, struct dm_target *ti, 713 const char *opts) 714 { 715 if (test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags)) { 716 ti->error = "AEAD transforms not supported for EBOIV"; 717 return -EINVAL; 718 } 719 720 if (crypto_skcipher_blocksize(any_tfm(cc)) != cc->iv_size) { 721 ti->error = "Block size of EBOIV cipher does " 722 "not match IV size of block cipher"; 723 return -EINVAL; 724 } 725 726 return 0; 727 } 728 729 static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv, 730 struct dm_crypt_request *dmreq) 731 { 732 u8 buf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(__le64)); 733 struct skcipher_request *req; 734 struct scatterlist src, dst; 735 struct crypto_wait wait; 736 int err; 737 738 req = skcipher_request_alloc(any_tfm(cc), GFP_NOIO); 739 if (!req) 740 return -ENOMEM; 741 742 memset(buf, 0, cc->iv_size); 743 *(__le64 *)buf = cpu_to_le64(dmreq->iv_sector * cc->sector_size); 744 745 sg_init_one(&src, page_address(ZERO_PAGE(0)), cc->iv_size); 746 sg_init_one(&dst, iv, cc->iv_size); 747 skcipher_request_set_crypt(req, &src, &dst, cc->iv_size, buf); 748 skcipher_request_set_callback(req, 0, crypto_req_done, &wait); 749 err = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); 750 skcipher_request_free(req); 751 752 return err; 753 } 754 755 static void crypt_iv_elephant_dtr(struct crypt_config *cc) 756 { 757 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant; 758 759 crypto_free_skcipher(elephant->tfm); 760 elephant->tfm = NULL; 761 } 762 763 static int crypt_iv_elephant_ctr(struct crypt_config *cc, struct dm_target *ti, 764 const char *opts) 765 { 766 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant; 767 int r; 768 769 elephant->tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0); 770 if (IS_ERR(elephant->tfm)) { 771 r = PTR_ERR(elephant->tfm); 772 elephant->tfm = NULL; 773 return r; 774 } 775 776 r = crypt_iv_eboiv_ctr(cc, ti, NULL); 777 if (r) 778 crypt_iv_elephant_dtr(cc); 779 return r; 780 } 781 782 static void diffuser_disk_to_cpu(u32 *d, size_t n) 783 { 784 #ifndef __LITTLE_ENDIAN 785 int i; 786 787 for (i = 0; i < n; i++) 788 d[i] = le32_to_cpu((__le32)d[i]); 789 #endif 790 } 791 792 static void diffuser_cpu_to_disk(__le32 *d, size_t n) 793 { 794 #ifndef __LITTLE_ENDIAN 795 int i; 796 797 for (i = 0; i < n; i++) 798 d[i] = cpu_to_le32((u32)d[i]); 799 #endif 800 } 801 802 static void diffuser_a_decrypt(u32 *d, size_t n) 803 { 804 int i, i1, i2, i3; 805 806 for (i = 0; i < 5; i++) { 807 i1 = 0; 808 i2 = n - 2; 809 i3 = n - 5; 810 811 while (i1 < (n - 1)) { 812 d[i1] += d[i2] ^ (d[i3] << 9 | d[i3] >> 23); 813 i1++; i2++; i3++; 814 815 if (i3 >= n) 816 i3 -= n; 817 818 d[i1] += d[i2] ^ d[i3]; 819 i1++; i2++; i3++; 820 821 if (i2 >= n) 822 i2 -= n; 823 824 d[i1] += d[i2] ^ (d[i3] << 13 | d[i3] >> 19); 825 i1++; i2++; i3++; 826 827 d[i1] += d[i2] ^ d[i3]; 828 i1++; i2++; i3++; 829 } 830 } 831 } 832 833 static void diffuser_a_encrypt(u32 *d, size_t n) 834 { 835 int i, i1, i2, i3; 836 837 for (i = 0; i < 5; i++) { 838 i1 = n - 1; 839 i2 = n - 2 - 1; 840 i3 = n - 5 - 1; 841 842 while (i1 > 0) { 843 d[i1] -= d[i2] ^ d[i3]; 844 i1--; i2--; i3--; 845 846 d[i1] -= d[i2] ^ (d[i3] << 13 | d[i3] >> 19); 847 i1--; i2--; i3--; 848 849 if (i2 < 0) 850 i2 += n; 851 852 d[i1] -= d[i2] ^ d[i3]; 853 i1--; i2--; i3--; 854 855 if (i3 < 0) 856 i3 += n; 857 858 d[i1] -= d[i2] ^ (d[i3] << 9 | d[i3] >> 23); 859 i1--; i2--; i3--; 860 } 861 } 862 } 863 864 static void diffuser_b_decrypt(u32 *d, size_t n) 865 { 866 int i, i1, i2, i3; 867 868 for (i = 0; i < 3; i++) { 869 i1 = 0; 870 i2 = 2; 871 i3 = 5; 872 873 while (i1 < (n - 1)) { 874 d[i1] += d[i2] ^ d[i3]; 875 i1++; i2++; i3++; 876 877 d[i1] += d[i2] ^ (d[i3] << 10 | d[i3] >> 22); 878 i1++; i2++; i3++; 879 880 if (i2 >= n) 881 i2 -= n; 882 883 d[i1] += d[i2] ^ d[i3]; 884 i1++; i2++; i3++; 885 886 if (i3 >= n) 887 i3 -= n; 888 889 d[i1] += d[i2] ^ (d[i3] << 25 | d[i3] >> 7); 890 i1++; i2++; i3++; 891 } 892 } 893 } 894 895 static void diffuser_b_encrypt(u32 *d, size_t n) 896 { 897 int i, i1, i2, i3; 898 899 for (i = 0; i < 3; i++) { 900 i1 = n - 1; 901 i2 = 2 - 1; 902 i3 = 5 - 1; 903 904 while (i1 > 0) { 905 d[i1] -= d[i2] ^ (d[i3] << 25 | d[i3] >> 7); 906 i1--; i2--; i3--; 907 908 if (i3 < 0) 909 i3 += n; 910 911 d[i1] -= d[i2] ^ d[i3]; 912 i1--; i2--; i3--; 913 914 if (i2 < 0) 915 i2 += n; 916 917 d[i1] -= d[i2] ^ (d[i3] << 10 | d[i3] >> 22); 918 i1--; i2--; i3--; 919 920 d[i1] -= d[i2] ^ d[i3]; 921 i1--; i2--; i3--; 922 } 923 } 924 } 925 926 static int crypt_iv_elephant(struct crypt_config *cc, struct dm_crypt_request *dmreq) 927 { 928 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant; 929 u8 *es, *ks, *data, *data2, *data_offset; 930 struct skcipher_request *req; 931 struct scatterlist *sg, *sg2, src, dst; 932 struct crypto_wait wait; 933 int i, r; 934 935 req = skcipher_request_alloc(elephant->tfm, GFP_NOIO); 936 es = kzalloc(16, GFP_NOIO); /* Key for AES */ 937 ks = kzalloc(32, GFP_NOIO); /* Elephant sector key */ 938 939 if (!req || !es || !ks) { 940 r = -ENOMEM; 941 goto out; 942 } 943 944 *(__le64 *)es = cpu_to_le64(dmreq->iv_sector * cc->sector_size); 945 946 /* E(Ks, e(s)) */ 947 sg_init_one(&src, es, 16); 948 sg_init_one(&dst, ks, 16); 949 skcipher_request_set_crypt(req, &src, &dst, 16, NULL); 950 skcipher_request_set_callback(req, 0, crypto_req_done, &wait); 951 r = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); 952 if (r) 953 goto out; 954 955 /* E(Ks, e'(s)) */ 956 es[15] = 0x80; 957 sg_init_one(&dst, &ks[16], 16); 958 r = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); 959 if (r) 960 goto out; 961 962 sg = crypt_get_sg_data(cc, dmreq->sg_out); 963 data = kmap_atomic(sg_page(sg)); 964 data_offset = data + sg->offset; 965 966 /* Cannot modify original bio, copy to sg_out and apply Elephant to it */ 967 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { 968 sg2 = crypt_get_sg_data(cc, dmreq->sg_in); 969 data2 = kmap_atomic(sg_page(sg2)); 970 memcpy(data_offset, data2 + sg2->offset, cc->sector_size); 971 kunmap_atomic(data2); 972 } 973 974 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) { 975 diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32)); 976 diffuser_b_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32)); 977 diffuser_a_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32)); 978 diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32)); 979 } 980 981 for (i = 0; i < (cc->sector_size / 32); i++) 982 crypto_xor(data_offset + i * 32, ks, 32); 983 984 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { 985 diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32)); 986 diffuser_a_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32)); 987 diffuser_b_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32)); 988 diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32)); 989 } 990 991 kunmap_atomic(data); 992 out: 993 kzfree(ks); 994 kzfree(es); 995 skcipher_request_free(req); 996 return r; 997 } 998 999 static int crypt_iv_elephant_gen(struct crypt_config *cc, u8 *iv, 1000 struct dm_crypt_request *dmreq) 1001 { 1002 int r; 1003 1004 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { 1005 r = crypt_iv_elephant(cc, dmreq); 1006 if (r) 1007 return r; 1008 } 1009 1010 return crypt_iv_eboiv_gen(cc, iv, dmreq); 1011 } 1012 1013 static int crypt_iv_elephant_post(struct crypt_config *cc, u8 *iv, 1014 struct dm_crypt_request *dmreq) 1015 { 1016 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) 1017 return crypt_iv_elephant(cc, dmreq); 1018 1019 return 0; 1020 } 1021 1022 static int crypt_iv_elephant_init(struct crypt_config *cc) 1023 { 1024 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant; 1025 int key_offset = cc->key_size - cc->key_extra_size; 1026 1027 return crypto_skcipher_setkey(elephant->tfm, &cc->key[key_offset], cc->key_extra_size); 1028 } 1029 1030 static int crypt_iv_elephant_wipe(struct crypt_config *cc) 1031 { 1032 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant; 1033 u8 key[ELEPHANT_MAX_KEY_SIZE]; 1034 1035 memset(key, 0, cc->key_extra_size); 1036 return crypto_skcipher_setkey(elephant->tfm, key, cc->key_extra_size); 1037 } 1038 1039 static const struct crypt_iv_operations crypt_iv_plain_ops = { 1040 .generator = crypt_iv_plain_gen 1041 }; 1042 1043 static const struct crypt_iv_operations crypt_iv_plain64_ops = { 1044 .generator = crypt_iv_plain64_gen 1045 }; 1046 1047 static const struct crypt_iv_operations crypt_iv_plain64be_ops = { 1048 .generator = crypt_iv_plain64be_gen 1049 }; 1050 1051 static const struct crypt_iv_operations crypt_iv_essiv_ops = { 1052 .generator = crypt_iv_essiv_gen 1053 }; 1054 1055 static const struct crypt_iv_operations crypt_iv_benbi_ops = { 1056 .ctr = crypt_iv_benbi_ctr, 1057 .dtr = crypt_iv_benbi_dtr, 1058 .generator = crypt_iv_benbi_gen 1059 }; 1060 1061 static const struct crypt_iv_operations crypt_iv_null_ops = { 1062 .generator = crypt_iv_null_gen 1063 }; 1064 1065 static const struct crypt_iv_operations crypt_iv_lmk_ops = { 1066 .ctr = crypt_iv_lmk_ctr, 1067 .dtr = crypt_iv_lmk_dtr, 1068 .init = crypt_iv_lmk_init, 1069 .wipe = crypt_iv_lmk_wipe, 1070 .generator = crypt_iv_lmk_gen, 1071 .post = crypt_iv_lmk_post 1072 }; 1073 1074 static const struct crypt_iv_operations crypt_iv_tcw_ops = { 1075 .ctr = crypt_iv_tcw_ctr, 1076 .dtr = crypt_iv_tcw_dtr, 1077 .init = crypt_iv_tcw_init, 1078 .wipe = crypt_iv_tcw_wipe, 1079 .generator = crypt_iv_tcw_gen, 1080 .post = crypt_iv_tcw_post 1081 }; 1082 1083 static struct crypt_iv_operations crypt_iv_random_ops = { 1084 .generator = crypt_iv_random_gen 1085 }; 1086 1087 static struct crypt_iv_operations crypt_iv_eboiv_ops = { 1088 .ctr = crypt_iv_eboiv_ctr, 1089 .generator = crypt_iv_eboiv_gen 1090 }; 1091 1092 static struct crypt_iv_operations crypt_iv_elephant_ops = { 1093 .ctr = crypt_iv_elephant_ctr, 1094 .dtr = crypt_iv_elephant_dtr, 1095 .init = crypt_iv_elephant_init, 1096 .wipe = crypt_iv_elephant_wipe, 1097 .generator = crypt_iv_elephant_gen, 1098 .post = crypt_iv_elephant_post 1099 }; 1100 1101 /* 1102 * Integrity extensions 1103 */ 1104 static bool crypt_integrity_aead(struct crypt_config *cc) 1105 { 1106 return test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags); 1107 } 1108 1109 static bool crypt_integrity_hmac(struct crypt_config *cc) 1110 { 1111 return crypt_integrity_aead(cc) && cc->key_mac_size; 1112 } 1113 1114 /* Get sg containing data */ 1115 static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc, 1116 struct scatterlist *sg) 1117 { 1118 if (unlikely(crypt_integrity_aead(cc))) 1119 return &sg[2]; 1120 1121 return sg; 1122 } 1123 1124 static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio) 1125 { 1126 struct bio_integrity_payload *bip; 1127 unsigned int tag_len; 1128 int ret; 1129 1130 if (!bio_sectors(bio) || !io->cc->on_disk_tag_size) 1131 return 0; 1132 1133 bip = bio_integrity_alloc(bio, GFP_NOIO, 1); 1134 if (IS_ERR(bip)) 1135 return PTR_ERR(bip); 1136 1137 tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift); 1138 1139 bip->bip_iter.bi_size = tag_len; 1140 bip->bip_iter.bi_sector = io->cc->start + io->sector; 1141 1142 ret = bio_integrity_add_page(bio, virt_to_page(io->integrity_metadata), 1143 tag_len, offset_in_page(io->integrity_metadata)); 1144 if (unlikely(ret != tag_len)) 1145 return -ENOMEM; 1146 1147 return 0; 1148 } 1149 1150 static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti) 1151 { 1152 #ifdef CONFIG_BLK_DEV_INTEGRITY 1153 struct blk_integrity *bi = blk_get_integrity(cc->dev->bdev->bd_disk); 1154 struct mapped_device *md = dm_table_get_md(ti->table); 1155 1156 /* From now we require underlying device with our integrity profile */ 1157 if (!bi || strcasecmp(bi->profile->name, "DM-DIF-EXT-TAG")) { 1158 ti->error = "Integrity profile not supported."; 1159 return -EINVAL; 1160 } 1161 1162 if (bi->tag_size != cc->on_disk_tag_size || 1163 bi->tuple_size != cc->on_disk_tag_size) { 1164 ti->error = "Integrity profile tag size mismatch."; 1165 return -EINVAL; 1166 } 1167 if (1 << bi->interval_exp != cc->sector_size) { 1168 ti->error = "Integrity profile sector size mismatch."; 1169 return -EINVAL; 1170 } 1171 1172 if (crypt_integrity_aead(cc)) { 1173 cc->integrity_tag_size = cc->on_disk_tag_size - cc->integrity_iv_size; 1174 DMDEBUG("%s: Integrity AEAD, tag size %u, IV size %u.", dm_device_name(md), 1175 cc->integrity_tag_size, cc->integrity_iv_size); 1176 1177 if (crypto_aead_setauthsize(any_tfm_aead(cc), cc->integrity_tag_size)) { 1178 ti->error = "Integrity AEAD auth tag size is not supported."; 1179 return -EINVAL; 1180 } 1181 } else if (cc->integrity_iv_size) 1182 DMDEBUG("%s: Additional per-sector space %u bytes for IV.", dm_device_name(md), 1183 cc->integrity_iv_size); 1184 1185 if ((cc->integrity_tag_size + cc->integrity_iv_size) != bi->tag_size) { 1186 ti->error = "Not enough space for integrity tag in the profile."; 1187 return -EINVAL; 1188 } 1189 1190 return 0; 1191 #else 1192 ti->error = "Integrity profile not supported."; 1193 return -EINVAL; 1194 #endif 1195 } 1196 1197 static void crypt_convert_init(struct crypt_config *cc, 1198 struct convert_context *ctx, 1199 struct bio *bio_out, struct bio *bio_in, 1200 sector_t sector) 1201 { 1202 ctx->bio_in = bio_in; 1203 ctx->bio_out = bio_out; 1204 if (bio_in) 1205 ctx->iter_in = bio_in->bi_iter; 1206 if (bio_out) 1207 ctx->iter_out = bio_out->bi_iter; 1208 ctx->cc_sector = sector + cc->iv_offset; 1209 init_completion(&ctx->restart); 1210 } 1211 1212 static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc, 1213 void *req) 1214 { 1215 return (struct dm_crypt_request *)((char *)req + cc->dmreq_start); 1216 } 1217 1218 static void *req_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq) 1219 { 1220 return (void *)((char *)dmreq - cc->dmreq_start); 1221 } 1222 1223 static u8 *iv_of_dmreq(struct crypt_config *cc, 1224 struct dm_crypt_request *dmreq) 1225 { 1226 if (crypt_integrity_aead(cc)) 1227 return (u8 *)ALIGN((unsigned long)(dmreq + 1), 1228 crypto_aead_alignmask(any_tfm_aead(cc)) + 1); 1229 else 1230 return (u8 *)ALIGN((unsigned long)(dmreq + 1), 1231 crypto_skcipher_alignmask(any_tfm(cc)) + 1); 1232 } 1233 1234 static u8 *org_iv_of_dmreq(struct crypt_config *cc, 1235 struct dm_crypt_request *dmreq) 1236 { 1237 return iv_of_dmreq(cc, dmreq) + cc->iv_size; 1238 } 1239 1240 static __le64 *org_sector_of_dmreq(struct crypt_config *cc, 1241 struct dm_crypt_request *dmreq) 1242 { 1243 u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + cc->iv_size; 1244 return (__le64 *) ptr; 1245 } 1246 1247 static unsigned int *org_tag_of_dmreq(struct crypt_config *cc, 1248 struct dm_crypt_request *dmreq) 1249 { 1250 u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + 1251 cc->iv_size + sizeof(uint64_t); 1252 return (unsigned int*)ptr; 1253 } 1254 1255 static void *tag_from_dmreq(struct crypt_config *cc, 1256 struct dm_crypt_request *dmreq) 1257 { 1258 struct convert_context *ctx = dmreq->ctx; 1259 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); 1260 1261 return &io->integrity_metadata[*org_tag_of_dmreq(cc, dmreq) * 1262 cc->on_disk_tag_size]; 1263 } 1264 1265 static void *iv_tag_from_dmreq(struct crypt_config *cc, 1266 struct dm_crypt_request *dmreq) 1267 { 1268 return tag_from_dmreq(cc, dmreq) + cc->integrity_tag_size; 1269 } 1270 1271 static int crypt_convert_block_aead(struct crypt_config *cc, 1272 struct convert_context *ctx, 1273 struct aead_request *req, 1274 unsigned int tag_offset) 1275 { 1276 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in); 1277 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out); 1278 struct dm_crypt_request *dmreq; 1279 u8 *iv, *org_iv, *tag_iv, *tag; 1280 __le64 *sector; 1281 int r = 0; 1282 1283 BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size); 1284 1285 /* Reject unexpected unaligned bio. */ 1286 if (unlikely(bv_in.bv_len & (cc->sector_size - 1))) 1287 return -EIO; 1288 1289 dmreq = dmreq_of_req(cc, req); 1290 dmreq->iv_sector = ctx->cc_sector; 1291 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags)) 1292 dmreq->iv_sector >>= cc->sector_shift; 1293 dmreq->ctx = ctx; 1294 1295 *org_tag_of_dmreq(cc, dmreq) = tag_offset; 1296 1297 sector = org_sector_of_dmreq(cc, dmreq); 1298 *sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset); 1299 1300 iv = iv_of_dmreq(cc, dmreq); 1301 org_iv = org_iv_of_dmreq(cc, dmreq); 1302 tag = tag_from_dmreq(cc, dmreq); 1303 tag_iv = iv_tag_from_dmreq(cc, dmreq); 1304 1305 /* AEAD request: 1306 * |----- AAD -------|------ DATA -------|-- AUTH TAG --| 1307 * | (authenticated) | (auth+encryption) | | 1308 * | sector_LE | IV | sector in/out | tag in/out | 1309 */ 1310 sg_init_table(dmreq->sg_in, 4); 1311 sg_set_buf(&dmreq->sg_in[0], sector, sizeof(uint64_t)); 1312 sg_set_buf(&dmreq->sg_in[1], org_iv, cc->iv_size); 1313 sg_set_page(&dmreq->sg_in[2], bv_in.bv_page, cc->sector_size, bv_in.bv_offset); 1314 sg_set_buf(&dmreq->sg_in[3], tag, cc->integrity_tag_size); 1315 1316 sg_init_table(dmreq->sg_out, 4); 1317 sg_set_buf(&dmreq->sg_out[0], sector, sizeof(uint64_t)); 1318 sg_set_buf(&dmreq->sg_out[1], org_iv, cc->iv_size); 1319 sg_set_page(&dmreq->sg_out[2], bv_out.bv_page, cc->sector_size, bv_out.bv_offset); 1320 sg_set_buf(&dmreq->sg_out[3], tag, cc->integrity_tag_size); 1321 1322 if (cc->iv_gen_ops) { 1323 /* For READs use IV stored in integrity metadata */ 1324 if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) { 1325 memcpy(org_iv, tag_iv, cc->iv_size); 1326 } else { 1327 r = cc->iv_gen_ops->generator(cc, org_iv, dmreq); 1328 if (r < 0) 1329 return r; 1330 /* Store generated IV in integrity metadata */ 1331 if (cc->integrity_iv_size) 1332 memcpy(tag_iv, org_iv, cc->iv_size); 1333 } 1334 /* Working copy of IV, to be modified in crypto API */ 1335 memcpy(iv, org_iv, cc->iv_size); 1336 } 1337 1338 aead_request_set_ad(req, sizeof(uint64_t) + cc->iv_size); 1339 if (bio_data_dir(ctx->bio_in) == WRITE) { 1340 aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out, 1341 cc->sector_size, iv); 1342 r = crypto_aead_encrypt(req); 1343 if (cc->integrity_tag_size + cc->integrity_iv_size != cc->on_disk_tag_size) 1344 memset(tag + cc->integrity_tag_size + cc->integrity_iv_size, 0, 1345 cc->on_disk_tag_size - (cc->integrity_tag_size + cc->integrity_iv_size)); 1346 } else { 1347 aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out, 1348 cc->sector_size + cc->integrity_tag_size, iv); 1349 r = crypto_aead_decrypt(req); 1350 } 1351 1352 if (r == -EBADMSG) { 1353 char b[BDEVNAME_SIZE]; 1354 DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", bio_devname(ctx->bio_in, b), 1355 (unsigned long long)le64_to_cpu(*sector)); 1356 } 1357 1358 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post) 1359 r = cc->iv_gen_ops->post(cc, org_iv, dmreq); 1360 1361 bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size); 1362 bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size); 1363 1364 return r; 1365 } 1366 1367 static int crypt_convert_block_skcipher(struct crypt_config *cc, 1368 struct convert_context *ctx, 1369 struct skcipher_request *req, 1370 unsigned int tag_offset) 1371 { 1372 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in); 1373 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out); 1374 struct scatterlist *sg_in, *sg_out; 1375 struct dm_crypt_request *dmreq; 1376 u8 *iv, *org_iv, *tag_iv; 1377 __le64 *sector; 1378 int r = 0; 1379 1380 /* Reject unexpected unaligned bio. */ 1381 if (unlikely(bv_in.bv_len & (cc->sector_size - 1))) 1382 return -EIO; 1383 1384 dmreq = dmreq_of_req(cc, req); 1385 dmreq->iv_sector = ctx->cc_sector; 1386 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags)) 1387 dmreq->iv_sector >>= cc->sector_shift; 1388 dmreq->ctx = ctx; 1389 1390 *org_tag_of_dmreq(cc, dmreq) = tag_offset; 1391 1392 iv = iv_of_dmreq(cc, dmreq); 1393 org_iv = org_iv_of_dmreq(cc, dmreq); 1394 tag_iv = iv_tag_from_dmreq(cc, dmreq); 1395 1396 sector = org_sector_of_dmreq(cc, dmreq); 1397 *sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset); 1398 1399 /* For skcipher we use only the first sg item */ 1400 sg_in = &dmreq->sg_in[0]; 1401 sg_out = &dmreq->sg_out[0]; 1402 1403 sg_init_table(sg_in, 1); 1404 sg_set_page(sg_in, bv_in.bv_page, cc->sector_size, bv_in.bv_offset); 1405 1406 sg_init_table(sg_out, 1); 1407 sg_set_page(sg_out, bv_out.bv_page, cc->sector_size, bv_out.bv_offset); 1408 1409 if (cc->iv_gen_ops) { 1410 /* For READs use IV stored in integrity metadata */ 1411 if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) { 1412 memcpy(org_iv, tag_iv, cc->integrity_iv_size); 1413 } else { 1414 r = cc->iv_gen_ops->generator(cc, org_iv, dmreq); 1415 if (r < 0) 1416 return r; 1417 /* Data can be already preprocessed in generator */ 1418 if (test_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags)) 1419 sg_in = sg_out; 1420 /* Store generated IV in integrity metadata */ 1421 if (cc->integrity_iv_size) 1422 memcpy(tag_iv, org_iv, cc->integrity_iv_size); 1423 } 1424 /* Working copy of IV, to be modified in crypto API */ 1425 memcpy(iv, org_iv, cc->iv_size); 1426 } 1427 1428 skcipher_request_set_crypt(req, sg_in, sg_out, cc->sector_size, iv); 1429 1430 if (bio_data_dir(ctx->bio_in) == WRITE) 1431 r = crypto_skcipher_encrypt(req); 1432 else 1433 r = crypto_skcipher_decrypt(req); 1434 1435 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post) 1436 r = cc->iv_gen_ops->post(cc, org_iv, dmreq); 1437 1438 bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size); 1439 bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size); 1440 1441 return r; 1442 } 1443 1444 static void kcryptd_async_done(struct crypto_async_request *async_req, 1445 int error); 1446 1447 static void crypt_alloc_req_skcipher(struct crypt_config *cc, 1448 struct convert_context *ctx) 1449 { 1450 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1); 1451 1452 if (!ctx->r.req) 1453 ctx->r.req = mempool_alloc(&cc->req_pool, GFP_NOIO); 1454 1455 skcipher_request_set_tfm(ctx->r.req, cc->cipher_tfm.tfms[key_index]); 1456 1457 /* 1458 * Use REQ_MAY_BACKLOG so a cipher driver internally backlogs 1459 * requests if driver request queue is full. 1460 */ 1461 skcipher_request_set_callback(ctx->r.req, 1462 CRYPTO_TFM_REQ_MAY_BACKLOG, 1463 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req)); 1464 } 1465 1466 static void crypt_alloc_req_aead(struct crypt_config *cc, 1467 struct convert_context *ctx) 1468 { 1469 if (!ctx->r.req_aead) 1470 ctx->r.req_aead = mempool_alloc(&cc->req_pool, GFP_NOIO); 1471 1472 aead_request_set_tfm(ctx->r.req_aead, cc->cipher_tfm.tfms_aead[0]); 1473 1474 /* 1475 * Use REQ_MAY_BACKLOG so a cipher driver internally backlogs 1476 * requests if driver request queue is full. 1477 */ 1478 aead_request_set_callback(ctx->r.req_aead, 1479 CRYPTO_TFM_REQ_MAY_BACKLOG, 1480 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead)); 1481 } 1482 1483 static void crypt_alloc_req(struct crypt_config *cc, 1484 struct convert_context *ctx) 1485 { 1486 if (crypt_integrity_aead(cc)) 1487 crypt_alloc_req_aead(cc, ctx); 1488 else 1489 crypt_alloc_req_skcipher(cc, ctx); 1490 } 1491 1492 static void crypt_free_req_skcipher(struct crypt_config *cc, 1493 struct skcipher_request *req, struct bio *base_bio) 1494 { 1495 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size); 1496 1497 if ((struct skcipher_request *)(io + 1) != req) 1498 mempool_free(req, &cc->req_pool); 1499 } 1500 1501 static void crypt_free_req_aead(struct crypt_config *cc, 1502 struct aead_request *req, struct bio *base_bio) 1503 { 1504 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size); 1505 1506 if ((struct aead_request *)(io + 1) != req) 1507 mempool_free(req, &cc->req_pool); 1508 } 1509 1510 static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_bio) 1511 { 1512 if (crypt_integrity_aead(cc)) 1513 crypt_free_req_aead(cc, req, base_bio); 1514 else 1515 crypt_free_req_skcipher(cc, req, base_bio); 1516 } 1517 1518 /* 1519 * Encrypt / decrypt data from one bio to another one (can be the same one) 1520 */ 1521 static blk_status_t crypt_convert(struct crypt_config *cc, 1522 struct convert_context *ctx) 1523 { 1524 unsigned int tag_offset = 0; 1525 unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT; 1526 int r; 1527 1528 atomic_set(&ctx->cc_pending, 1); 1529 1530 while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) { 1531 1532 crypt_alloc_req(cc, ctx); 1533 atomic_inc(&ctx->cc_pending); 1534 1535 if (crypt_integrity_aead(cc)) 1536 r = crypt_convert_block_aead(cc, ctx, ctx->r.req_aead, tag_offset); 1537 else 1538 r = crypt_convert_block_skcipher(cc, ctx, ctx->r.req, tag_offset); 1539 1540 switch (r) { 1541 /* 1542 * The request was queued by a crypto driver 1543 * but the driver request queue is full, let's wait. 1544 */ 1545 case -EBUSY: 1546 wait_for_completion(&ctx->restart); 1547 reinit_completion(&ctx->restart); 1548 /* fall through */ 1549 /* 1550 * The request is queued and processed asynchronously, 1551 * completion function kcryptd_async_done() will be called. 1552 */ 1553 case -EINPROGRESS: 1554 ctx->r.req = NULL; 1555 ctx->cc_sector += sector_step; 1556 tag_offset++; 1557 continue; 1558 /* 1559 * The request was already processed (synchronously). 1560 */ 1561 case 0: 1562 atomic_dec(&ctx->cc_pending); 1563 ctx->cc_sector += sector_step; 1564 tag_offset++; 1565 cond_resched(); 1566 continue; 1567 /* 1568 * There was a data integrity error. 1569 */ 1570 case -EBADMSG: 1571 atomic_dec(&ctx->cc_pending); 1572 return BLK_STS_PROTECTION; 1573 /* 1574 * There was an error while processing the request. 1575 */ 1576 default: 1577 atomic_dec(&ctx->cc_pending); 1578 return BLK_STS_IOERR; 1579 } 1580 } 1581 1582 return 0; 1583 } 1584 1585 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone); 1586 1587 /* 1588 * Generate a new unfragmented bio with the given size 1589 * This should never violate the device limitations (but only because 1590 * max_segment_size is being constrained to PAGE_SIZE). 1591 * 1592 * This function may be called concurrently. If we allocate from the mempool 1593 * concurrently, there is a possibility of deadlock. For example, if we have 1594 * mempool of 256 pages, two processes, each wanting 256, pages allocate from 1595 * the mempool concurrently, it may deadlock in a situation where both processes 1596 * have allocated 128 pages and the mempool is exhausted. 1597 * 1598 * In order to avoid this scenario we allocate the pages under a mutex. 1599 * 1600 * In order to not degrade performance with excessive locking, we try 1601 * non-blocking allocations without a mutex first but on failure we fallback 1602 * to blocking allocations with a mutex. 1603 */ 1604 static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) 1605 { 1606 struct crypt_config *cc = io->cc; 1607 struct bio *clone; 1608 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 1609 gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM; 1610 unsigned i, len, remaining_size; 1611 struct page *page; 1612 1613 retry: 1614 if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM)) 1615 mutex_lock(&cc->bio_alloc_lock); 1616 1617 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, &cc->bs); 1618 if (!clone) 1619 goto out; 1620 1621 clone_init(io, clone); 1622 1623 remaining_size = size; 1624 1625 for (i = 0; i < nr_iovecs; i++) { 1626 page = mempool_alloc(&cc->page_pool, gfp_mask); 1627 if (!page) { 1628 crypt_free_buffer_pages(cc, clone); 1629 bio_put(clone); 1630 gfp_mask |= __GFP_DIRECT_RECLAIM; 1631 goto retry; 1632 } 1633 1634 len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size; 1635 1636 bio_add_page(clone, page, len, 0); 1637 1638 remaining_size -= len; 1639 } 1640 1641 /* Allocate space for integrity tags */ 1642 if (dm_crypt_integrity_io_alloc(io, clone)) { 1643 crypt_free_buffer_pages(cc, clone); 1644 bio_put(clone); 1645 clone = NULL; 1646 } 1647 out: 1648 if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM)) 1649 mutex_unlock(&cc->bio_alloc_lock); 1650 1651 return clone; 1652 } 1653 1654 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) 1655 { 1656 struct bio_vec *bv; 1657 struct bvec_iter_all iter_all; 1658 1659 bio_for_each_segment_all(bv, clone, iter_all) { 1660 BUG_ON(!bv->bv_page); 1661 mempool_free(bv->bv_page, &cc->page_pool); 1662 } 1663 } 1664 1665 static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc, 1666 struct bio *bio, sector_t sector) 1667 { 1668 io->cc = cc; 1669 io->base_bio = bio; 1670 io->sector = sector; 1671 io->error = 0; 1672 io->ctx.r.req = NULL; 1673 io->integrity_metadata = NULL; 1674 io->integrity_metadata_from_pool = false; 1675 atomic_set(&io->io_pending, 0); 1676 } 1677 1678 static void crypt_inc_pending(struct dm_crypt_io *io) 1679 { 1680 atomic_inc(&io->io_pending); 1681 } 1682 1683 /* 1684 * One of the bios was finished. Check for completion of 1685 * the whole request and correctly clean up the buffer. 1686 */ 1687 static void crypt_dec_pending(struct dm_crypt_io *io) 1688 { 1689 struct crypt_config *cc = io->cc; 1690 struct bio *base_bio = io->base_bio; 1691 blk_status_t error = io->error; 1692 1693 if (!atomic_dec_and_test(&io->io_pending)) 1694 return; 1695 1696 if (io->ctx.r.req) 1697 crypt_free_req(cc, io->ctx.r.req, base_bio); 1698 1699 if (unlikely(io->integrity_metadata_from_pool)) 1700 mempool_free(io->integrity_metadata, &io->cc->tag_pool); 1701 else 1702 kfree(io->integrity_metadata); 1703 1704 base_bio->bi_status = error; 1705 bio_endio(base_bio); 1706 } 1707 1708 /* 1709 * kcryptd/kcryptd_io: 1710 * 1711 * Needed because it would be very unwise to do decryption in an 1712 * interrupt context. 1713 * 1714 * kcryptd performs the actual encryption or decryption. 1715 * 1716 * kcryptd_io performs the IO submission. 1717 * 1718 * They must be separated as otherwise the final stages could be 1719 * starved by new requests which can block in the first stages due 1720 * to memory allocation. 1721 * 1722 * The work is done per CPU global for all dm-crypt instances. 1723 * They should not depend on each other and do not block. 1724 */ 1725 static void crypt_endio(struct bio *clone) 1726 { 1727 struct dm_crypt_io *io = clone->bi_private; 1728 struct crypt_config *cc = io->cc; 1729 unsigned rw = bio_data_dir(clone); 1730 blk_status_t error; 1731 1732 /* 1733 * free the processed pages 1734 */ 1735 if (rw == WRITE) 1736 crypt_free_buffer_pages(cc, clone); 1737 1738 error = clone->bi_status; 1739 bio_put(clone); 1740 1741 if (rw == READ && !error) { 1742 kcryptd_queue_crypt(io); 1743 return; 1744 } 1745 1746 if (unlikely(error)) 1747 io->error = error; 1748 1749 crypt_dec_pending(io); 1750 } 1751 1752 static void clone_init(struct dm_crypt_io *io, struct bio *clone) 1753 { 1754 struct crypt_config *cc = io->cc; 1755 1756 clone->bi_private = io; 1757 clone->bi_end_io = crypt_endio; 1758 bio_set_dev(clone, cc->dev->bdev); 1759 clone->bi_opf = io->base_bio->bi_opf; 1760 } 1761 1762 static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) 1763 { 1764 struct crypt_config *cc = io->cc; 1765 struct bio *clone; 1766 1767 /* 1768 * We need the original biovec array in order to decrypt 1769 * the whole bio data *afterwards* -- thanks to immutable 1770 * biovecs we don't need to worry about the block layer 1771 * modifying the biovec array; so leverage bio_clone_fast(). 1772 */ 1773 clone = bio_clone_fast(io->base_bio, gfp, &cc->bs); 1774 if (!clone) 1775 return 1; 1776 1777 crypt_inc_pending(io); 1778 1779 clone_init(io, clone); 1780 clone->bi_iter.bi_sector = cc->start + io->sector; 1781 1782 if (dm_crypt_integrity_io_alloc(io, clone)) { 1783 crypt_dec_pending(io); 1784 bio_put(clone); 1785 return 1; 1786 } 1787 1788 generic_make_request(clone); 1789 return 0; 1790 } 1791 1792 static void kcryptd_io_read_work(struct work_struct *work) 1793 { 1794 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); 1795 1796 crypt_inc_pending(io); 1797 if (kcryptd_io_read(io, GFP_NOIO)) 1798 io->error = BLK_STS_RESOURCE; 1799 crypt_dec_pending(io); 1800 } 1801 1802 static void kcryptd_queue_read(struct dm_crypt_io *io) 1803 { 1804 struct crypt_config *cc = io->cc; 1805 1806 INIT_WORK(&io->work, kcryptd_io_read_work); 1807 queue_work(cc->io_queue, &io->work); 1808 } 1809 1810 static void kcryptd_io_write(struct dm_crypt_io *io) 1811 { 1812 struct bio *clone = io->ctx.bio_out; 1813 1814 generic_make_request(clone); 1815 } 1816 1817 #define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node) 1818 1819 static int dmcrypt_write(void *data) 1820 { 1821 struct crypt_config *cc = data; 1822 struct dm_crypt_io *io; 1823 1824 while (1) { 1825 struct rb_root write_tree; 1826 struct blk_plug plug; 1827 1828 spin_lock_irq(&cc->write_thread_lock); 1829 continue_locked: 1830 1831 if (!RB_EMPTY_ROOT(&cc->write_tree)) 1832 goto pop_from_list; 1833 1834 set_current_state(TASK_INTERRUPTIBLE); 1835 1836 spin_unlock_irq(&cc->write_thread_lock); 1837 1838 if (unlikely(kthread_should_stop())) { 1839 set_current_state(TASK_RUNNING); 1840 break; 1841 } 1842 1843 schedule(); 1844 1845 set_current_state(TASK_RUNNING); 1846 spin_lock_irq(&cc->write_thread_lock); 1847 goto continue_locked; 1848 1849 pop_from_list: 1850 write_tree = cc->write_tree; 1851 cc->write_tree = RB_ROOT; 1852 spin_unlock_irq(&cc->write_thread_lock); 1853 1854 BUG_ON(rb_parent(write_tree.rb_node)); 1855 1856 /* 1857 * Note: we cannot walk the tree here with rb_next because 1858 * the structures may be freed when kcryptd_io_write is called. 1859 */ 1860 blk_start_plug(&plug); 1861 do { 1862 io = crypt_io_from_node(rb_first(&write_tree)); 1863 rb_erase(&io->rb_node, &write_tree); 1864 kcryptd_io_write(io); 1865 } while (!RB_EMPTY_ROOT(&write_tree)); 1866 blk_finish_plug(&plug); 1867 } 1868 return 0; 1869 } 1870 1871 static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) 1872 { 1873 struct bio *clone = io->ctx.bio_out; 1874 struct crypt_config *cc = io->cc; 1875 unsigned long flags; 1876 sector_t sector; 1877 struct rb_node **rbp, *parent; 1878 1879 if (unlikely(io->error)) { 1880 crypt_free_buffer_pages(cc, clone); 1881 bio_put(clone); 1882 crypt_dec_pending(io); 1883 return; 1884 } 1885 1886 /* crypt_convert should have filled the clone bio */ 1887 BUG_ON(io->ctx.iter_out.bi_size); 1888 1889 clone->bi_iter.bi_sector = cc->start + io->sector; 1890 1891 if (likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) { 1892 generic_make_request(clone); 1893 return; 1894 } 1895 1896 spin_lock_irqsave(&cc->write_thread_lock, flags); 1897 if (RB_EMPTY_ROOT(&cc->write_tree)) 1898 wake_up_process(cc->write_thread); 1899 rbp = &cc->write_tree.rb_node; 1900 parent = NULL; 1901 sector = io->sector; 1902 while (*rbp) { 1903 parent = *rbp; 1904 if (sector < crypt_io_from_node(parent)->sector) 1905 rbp = &(*rbp)->rb_left; 1906 else 1907 rbp = &(*rbp)->rb_right; 1908 } 1909 rb_link_node(&io->rb_node, parent, rbp); 1910 rb_insert_color(&io->rb_node, &cc->write_tree); 1911 spin_unlock_irqrestore(&cc->write_thread_lock, flags); 1912 } 1913 1914 static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) 1915 { 1916 struct crypt_config *cc = io->cc; 1917 struct bio *clone; 1918 int crypt_finished; 1919 sector_t sector = io->sector; 1920 blk_status_t r; 1921 1922 /* 1923 * Prevent io from disappearing until this function completes. 1924 */ 1925 crypt_inc_pending(io); 1926 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector); 1927 1928 clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size); 1929 if (unlikely(!clone)) { 1930 io->error = BLK_STS_IOERR; 1931 goto dec; 1932 } 1933 1934 io->ctx.bio_out = clone; 1935 io->ctx.iter_out = clone->bi_iter; 1936 1937 sector += bio_sectors(clone); 1938 1939 crypt_inc_pending(io); 1940 r = crypt_convert(cc, &io->ctx); 1941 if (r) 1942 io->error = r; 1943 crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending); 1944 1945 /* Encryption was already finished, submit io now */ 1946 if (crypt_finished) { 1947 kcryptd_crypt_write_io_submit(io, 0); 1948 io->sector = sector; 1949 } 1950 1951 dec: 1952 crypt_dec_pending(io); 1953 } 1954 1955 static void kcryptd_crypt_read_done(struct dm_crypt_io *io) 1956 { 1957 crypt_dec_pending(io); 1958 } 1959 1960 static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) 1961 { 1962 struct crypt_config *cc = io->cc; 1963 blk_status_t r; 1964 1965 crypt_inc_pending(io); 1966 1967 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, 1968 io->sector); 1969 1970 r = crypt_convert(cc, &io->ctx); 1971 if (r) 1972 io->error = r; 1973 1974 if (atomic_dec_and_test(&io->ctx.cc_pending)) 1975 kcryptd_crypt_read_done(io); 1976 1977 crypt_dec_pending(io); 1978 } 1979 1980 static void kcryptd_async_done(struct crypto_async_request *async_req, 1981 int error) 1982 { 1983 struct dm_crypt_request *dmreq = async_req->data; 1984 struct convert_context *ctx = dmreq->ctx; 1985 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); 1986 struct crypt_config *cc = io->cc; 1987 1988 /* 1989 * A request from crypto driver backlog is going to be processed now, 1990 * finish the completion and continue in crypt_convert(). 1991 * (Callback will be called for the second time for this request.) 1992 */ 1993 if (error == -EINPROGRESS) { 1994 complete(&ctx->restart); 1995 return; 1996 } 1997 1998 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post) 1999 error = cc->iv_gen_ops->post(cc, org_iv_of_dmreq(cc, dmreq), dmreq); 2000 2001 if (error == -EBADMSG) { 2002 char b[BDEVNAME_SIZE]; 2003 DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", bio_devname(ctx->bio_in, b), 2004 (unsigned long long)le64_to_cpu(*org_sector_of_dmreq(cc, dmreq))); 2005 io->error = BLK_STS_PROTECTION; 2006 } else if (error < 0) 2007 io->error = BLK_STS_IOERR; 2008 2009 crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio); 2010 2011 if (!atomic_dec_and_test(&ctx->cc_pending)) 2012 return; 2013 2014 if (bio_data_dir(io->base_bio) == READ) 2015 kcryptd_crypt_read_done(io); 2016 else 2017 kcryptd_crypt_write_io_submit(io, 1); 2018 } 2019 2020 static void kcryptd_crypt(struct work_struct *work) 2021 { 2022 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); 2023 2024 if (bio_data_dir(io->base_bio) == READ) 2025 kcryptd_crypt_read_convert(io); 2026 else 2027 kcryptd_crypt_write_convert(io); 2028 } 2029 2030 static void kcryptd_queue_crypt(struct dm_crypt_io *io) 2031 { 2032 struct crypt_config *cc = io->cc; 2033 2034 INIT_WORK(&io->work, kcryptd_crypt); 2035 queue_work(cc->crypt_queue, &io->work); 2036 } 2037 2038 static void crypt_free_tfms_aead(struct crypt_config *cc) 2039 { 2040 if (!cc->cipher_tfm.tfms_aead) 2041 return; 2042 2043 if (cc->cipher_tfm.tfms_aead[0] && !IS_ERR(cc->cipher_tfm.tfms_aead[0])) { 2044 crypto_free_aead(cc->cipher_tfm.tfms_aead[0]); 2045 cc->cipher_tfm.tfms_aead[0] = NULL; 2046 } 2047 2048 kfree(cc->cipher_tfm.tfms_aead); 2049 cc->cipher_tfm.tfms_aead = NULL; 2050 } 2051 2052 static void crypt_free_tfms_skcipher(struct crypt_config *cc) 2053 { 2054 unsigned i; 2055 2056 if (!cc->cipher_tfm.tfms) 2057 return; 2058 2059 for (i = 0; i < cc->tfms_count; i++) 2060 if (cc->cipher_tfm.tfms[i] && !IS_ERR(cc->cipher_tfm.tfms[i])) { 2061 crypto_free_skcipher(cc->cipher_tfm.tfms[i]); 2062 cc->cipher_tfm.tfms[i] = NULL; 2063 } 2064 2065 kfree(cc->cipher_tfm.tfms); 2066 cc->cipher_tfm.tfms = NULL; 2067 } 2068 2069 static void crypt_free_tfms(struct crypt_config *cc) 2070 { 2071 if (crypt_integrity_aead(cc)) 2072 crypt_free_tfms_aead(cc); 2073 else 2074 crypt_free_tfms_skcipher(cc); 2075 } 2076 2077 static int crypt_alloc_tfms_skcipher(struct crypt_config *cc, char *ciphermode) 2078 { 2079 unsigned i; 2080 int err; 2081 2082 cc->cipher_tfm.tfms = kcalloc(cc->tfms_count, 2083 sizeof(struct crypto_skcipher *), 2084 GFP_KERNEL); 2085 if (!cc->cipher_tfm.tfms) 2086 return -ENOMEM; 2087 2088 for (i = 0; i < cc->tfms_count; i++) { 2089 cc->cipher_tfm.tfms[i] = crypto_alloc_skcipher(ciphermode, 0, 0); 2090 if (IS_ERR(cc->cipher_tfm.tfms[i])) { 2091 err = PTR_ERR(cc->cipher_tfm.tfms[i]); 2092 crypt_free_tfms(cc); 2093 return err; 2094 } 2095 } 2096 2097 /* 2098 * dm-crypt performance can vary greatly depending on which crypto 2099 * algorithm implementation is used. Help people debug performance 2100 * problems by logging the ->cra_driver_name. 2101 */ 2102 DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode, 2103 crypto_skcipher_alg(any_tfm(cc))->base.cra_driver_name); 2104 return 0; 2105 } 2106 2107 static int crypt_alloc_tfms_aead(struct crypt_config *cc, char *ciphermode) 2108 { 2109 int err; 2110 2111 cc->cipher_tfm.tfms = kmalloc(sizeof(struct crypto_aead *), GFP_KERNEL); 2112 if (!cc->cipher_tfm.tfms) 2113 return -ENOMEM; 2114 2115 cc->cipher_tfm.tfms_aead[0] = crypto_alloc_aead(ciphermode, 0, 0); 2116 if (IS_ERR(cc->cipher_tfm.tfms_aead[0])) { 2117 err = PTR_ERR(cc->cipher_tfm.tfms_aead[0]); 2118 crypt_free_tfms(cc); 2119 return err; 2120 } 2121 2122 DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode, 2123 crypto_aead_alg(any_tfm_aead(cc))->base.cra_driver_name); 2124 return 0; 2125 } 2126 2127 static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode) 2128 { 2129 if (crypt_integrity_aead(cc)) 2130 return crypt_alloc_tfms_aead(cc, ciphermode); 2131 else 2132 return crypt_alloc_tfms_skcipher(cc, ciphermode); 2133 } 2134 2135 static unsigned crypt_subkey_size(struct crypt_config *cc) 2136 { 2137 return (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count); 2138 } 2139 2140 static unsigned crypt_authenckey_size(struct crypt_config *cc) 2141 { 2142 return crypt_subkey_size(cc) + RTA_SPACE(sizeof(struct crypto_authenc_key_param)); 2143 } 2144 2145 /* 2146 * If AEAD is composed like authenc(hmac(sha256),xts(aes)), 2147 * the key must be for some reason in special format. 2148 * This funcion converts cc->key to this special format. 2149 */ 2150 static void crypt_copy_authenckey(char *p, const void *key, 2151 unsigned enckeylen, unsigned authkeylen) 2152 { 2153 struct crypto_authenc_key_param *param; 2154 struct rtattr *rta; 2155 2156 rta = (struct rtattr *)p; 2157 param = RTA_DATA(rta); 2158 param->enckeylen = cpu_to_be32(enckeylen); 2159 rta->rta_len = RTA_LENGTH(sizeof(*param)); 2160 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM; 2161 p += RTA_SPACE(sizeof(*param)); 2162 memcpy(p, key + enckeylen, authkeylen); 2163 p += authkeylen; 2164 memcpy(p, key, enckeylen); 2165 } 2166 2167 static int crypt_setkey(struct crypt_config *cc) 2168 { 2169 unsigned subkey_size; 2170 int err = 0, i, r; 2171 2172 /* Ignore extra keys (which are used for IV etc) */ 2173 subkey_size = crypt_subkey_size(cc); 2174 2175 if (crypt_integrity_hmac(cc)) { 2176 if (subkey_size < cc->key_mac_size) 2177 return -EINVAL; 2178 2179 crypt_copy_authenckey(cc->authenc_key, cc->key, 2180 subkey_size - cc->key_mac_size, 2181 cc->key_mac_size); 2182 } 2183 2184 for (i = 0; i < cc->tfms_count; i++) { 2185 if (crypt_integrity_hmac(cc)) 2186 r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i], 2187 cc->authenc_key, crypt_authenckey_size(cc)); 2188 else if (crypt_integrity_aead(cc)) 2189 r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i], 2190 cc->key + (i * subkey_size), 2191 subkey_size); 2192 else 2193 r = crypto_skcipher_setkey(cc->cipher_tfm.tfms[i], 2194 cc->key + (i * subkey_size), 2195 subkey_size); 2196 if (r) 2197 err = r; 2198 } 2199 2200 if (crypt_integrity_hmac(cc)) 2201 memzero_explicit(cc->authenc_key, crypt_authenckey_size(cc)); 2202 2203 return err; 2204 } 2205 2206 #ifdef CONFIG_KEYS 2207 2208 static bool contains_whitespace(const char *str) 2209 { 2210 while (*str) 2211 if (isspace(*str++)) 2212 return true; 2213 return false; 2214 } 2215 2216 static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string) 2217 { 2218 char *new_key_string, *key_desc; 2219 int ret; 2220 struct key *key; 2221 const struct user_key_payload *ukp; 2222 2223 /* 2224 * Reject key_string with whitespace. dm core currently lacks code for 2225 * proper whitespace escaping in arguments on DM_TABLE_STATUS path. 2226 */ 2227 if (contains_whitespace(key_string)) { 2228 DMERR("whitespace chars not allowed in key string"); 2229 return -EINVAL; 2230 } 2231 2232 /* look for next ':' separating key_type from key_description */ 2233 key_desc = strpbrk(key_string, ":"); 2234 if (!key_desc || key_desc == key_string || !strlen(key_desc + 1)) 2235 return -EINVAL; 2236 2237 if (strncmp(key_string, "logon:", key_desc - key_string + 1) && 2238 strncmp(key_string, "user:", key_desc - key_string + 1)) 2239 return -EINVAL; 2240 2241 new_key_string = kstrdup(key_string, GFP_KERNEL); 2242 if (!new_key_string) 2243 return -ENOMEM; 2244 2245 key = request_key(key_string[0] == 'l' ? &key_type_logon : &key_type_user, 2246 key_desc + 1, NULL); 2247 if (IS_ERR(key)) { 2248 kzfree(new_key_string); 2249 return PTR_ERR(key); 2250 } 2251 2252 down_read(&key->sem); 2253 2254 ukp = user_key_payload_locked(key); 2255 if (!ukp) { 2256 up_read(&key->sem); 2257 key_put(key); 2258 kzfree(new_key_string); 2259 return -EKEYREVOKED; 2260 } 2261 2262 if (cc->key_size != ukp->datalen) { 2263 up_read(&key->sem); 2264 key_put(key); 2265 kzfree(new_key_string); 2266 return -EINVAL; 2267 } 2268 2269 memcpy(cc->key, ukp->data, cc->key_size); 2270 2271 up_read(&key->sem); 2272 key_put(key); 2273 2274 /* clear the flag since following operations may invalidate previously valid key */ 2275 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); 2276 2277 ret = crypt_setkey(cc); 2278 2279 if (!ret) { 2280 set_bit(DM_CRYPT_KEY_VALID, &cc->flags); 2281 kzfree(cc->key_string); 2282 cc->key_string = new_key_string; 2283 } else 2284 kzfree(new_key_string); 2285 2286 return ret; 2287 } 2288 2289 static int get_key_size(char **key_string) 2290 { 2291 char *colon, dummy; 2292 int ret; 2293 2294 if (*key_string[0] != ':') 2295 return strlen(*key_string) >> 1; 2296 2297 /* look for next ':' in key string */ 2298 colon = strpbrk(*key_string + 1, ":"); 2299 if (!colon) 2300 return -EINVAL; 2301 2302 if (sscanf(*key_string + 1, "%u%c", &ret, &dummy) != 2 || dummy != ':') 2303 return -EINVAL; 2304 2305 *key_string = colon; 2306 2307 /* remaining key string should be :<logon|user>:<key_desc> */ 2308 2309 return ret; 2310 } 2311 2312 #else 2313 2314 static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string) 2315 { 2316 return -EINVAL; 2317 } 2318 2319 static int get_key_size(char **key_string) 2320 { 2321 return (*key_string[0] == ':') ? -EINVAL : strlen(*key_string) >> 1; 2322 } 2323 2324 #endif 2325 2326 static int crypt_set_key(struct crypt_config *cc, char *key) 2327 { 2328 int r = -EINVAL; 2329 int key_string_len = strlen(key); 2330 2331 /* Hyphen (which gives a key_size of zero) means there is no key. */ 2332 if (!cc->key_size && strcmp(key, "-")) 2333 goto out; 2334 2335 /* ':' means the key is in kernel keyring, short-circuit normal key processing */ 2336 if (key[0] == ':') { 2337 r = crypt_set_keyring_key(cc, key + 1); 2338 goto out; 2339 } 2340 2341 /* clear the flag since following operations may invalidate previously valid key */ 2342 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); 2343 2344 /* wipe references to any kernel keyring key */ 2345 kzfree(cc->key_string); 2346 cc->key_string = NULL; 2347 2348 /* Decode key from its hex representation. */ 2349 if (cc->key_size && hex2bin(cc->key, key, cc->key_size) < 0) 2350 goto out; 2351 2352 r = crypt_setkey(cc); 2353 if (!r) 2354 set_bit(DM_CRYPT_KEY_VALID, &cc->flags); 2355 2356 out: 2357 /* Hex key string not needed after here, so wipe it. */ 2358 memset(key, '0', key_string_len); 2359 2360 return r; 2361 } 2362 2363 static int crypt_wipe_key(struct crypt_config *cc) 2364 { 2365 int r; 2366 2367 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); 2368 get_random_bytes(&cc->key, cc->key_size); 2369 2370 /* Wipe IV private keys */ 2371 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) { 2372 r = cc->iv_gen_ops->wipe(cc); 2373 if (r) 2374 return r; 2375 } 2376 2377 kzfree(cc->key_string); 2378 cc->key_string = NULL; 2379 r = crypt_setkey(cc); 2380 memset(&cc->key, 0, cc->key_size * sizeof(u8)); 2381 2382 return r; 2383 } 2384 2385 static void crypt_calculate_pages_per_client(void) 2386 { 2387 unsigned long pages = (totalram_pages() - totalhigh_pages()) * DM_CRYPT_MEMORY_PERCENT / 100; 2388 2389 if (!dm_crypt_clients_n) 2390 return; 2391 2392 pages /= dm_crypt_clients_n; 2393 if (pages < DM_CRYPT_MIN_PAGES_PER_CLIENT) 2394 pages = DM_CRYPT_MIN_PAGES_PER_CLIENT; 2395 dm_crypt_pages_per_client = pages; 2396 } 2397 2398 static void *crypt_page_alloc(gfp_t gfp_mask, void *pool_data) 2399 { 2400 struct crypt_config *cc = pool_data; 2401 struct page *page; 2402 2403 if (unlikely(percpu_counter_compare(&cc->n_allocated_pages, dm_crypt_pages_per_client) >= 0) && 2404 likely(gfp_mask & __GFP_NORETRY)) 2405 return NULL; 2406 2407 page = alloc_page(gfp_mask); 2408 if (likely(page != NULL)) 2409 percpu_counter_add(&cc->n_allocated_pages, 1); 2410 2411 return page; 2412 } 2413 2414 static void crypt_page_free(void *page, void *pool_data) 2415 { 2416 struct crypt_config *cc = pool_data; 2417 2418 __free_page(page); 2419 percpu_counter_sub(&cc->n_allocated_pages, 1); 2420 } 2421 2422 static void crypt_dtr(struct dm_target *ti) 2423 { 2424 struct crypt_config *cc = ti->private; 2425 2426 ti->private = NULL; 2427 2428 if (!cc) 2429 return; 2430 2431 if (cc->write_thread) 2432 kthread_stop(cc->write_thread); 2433 2434 if (cc->io_queue) 2435 destroy_workqueue(cc->io_queue); 2436 if (cc->crypt_queue) 2437 destroy_workqueue(cc->crypt_queue); 2438 2439 crypt_free_tfms(cc); 2440 2441 bioset_exit(&cc->bs); 2442 2443 mempool_exit(&cc->page_pool); 2444 mempool_exit(&cc->req_pool); 2445 mempool_exit(&cc->tag_pool); 2446 2447 WARN_ON(percpu_counter_sum(&cc->n_allocated_pages) != 0); 2448 percpu_counter_destroy(&cc->n_allocated_pages); 2449 2450 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) 2451 cc->iv_gen_ops->dtr(cc); 2452 2453 if (cc->dev) 2454 dm_put_device(ti, cc->dev); 2455 2456 kzfree(cc->cipher_string); 2457 kzfree(cc->key_string); 2458 kzfree(cc->cipher_auth); 2459 kzfree(cc->authenc_key); 2460 2461 mutex_destroy(&cc->bio_alloc_lock); 2462 2463 /* Must zero key material before freeing */ 2464 kzfree(cc); 2465 2466 spin_lock(&dm_crypt_clients_lock); 2467 WARN_ON(!dm_crypt_clients_n); 2468 dm_crypt_clients_n--; 2469 crypt_calculate_pages_per_client(); 2470 spin_unlock(&dm_crypt_clients_lock); 2471 } 2472 2473 static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode) 2474 { 2475 struct crypt_config *cc = ti->private; 2476 2477 if (crypt_integrity_aead(cc)) 2478 cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc)); 2479 else 2480 cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc)); 2481 2482 if (cc->iv_size) 2483 /* at least a 64 bit sector number should fit in our buffer */ 2484 cc->iv_size = max(cc->iv_size, 2485 (unsigned int)(sizeof(u64) / sizeof(u8))); 2486 else if (ivmode) { 2487 DMWARN("Selected cipher does not support IVs"); 2488 ivmode = NULL; 2489 } 2490 2491 /* Choose ivmode, see comments at iv code. */ 2492 if (ivmode == NULL) 2493 cc->iv_gen_ops = NULL; 2494 else if (strcmp(ivmode, "plain") == 0) 2495 cc->iv_gen_ops = &crypt_iv_plain_ops; 2496 else if (strcmp(ivmode, "plain64") == 0) 2497 cc->iv_gen_ops = &crypt_iv_plain64_ops; 2498 else if (strcmp(ivmode, "plain64be") == 0) 2499 cc->iv_gen_ops = &crypt_iv_plain64be_ops; 2500 else if (strcmp(ivmode, "essiv") == 0) 2501 cc->iv_gen_ops = &crypt_iv_essiv_ops; 2502 else if (strcmp(ivmode, "benbi") == 0) 2503 cc->iv_gen_ops = &crypt_iv_benbi_ops; 2504 else if (strcmp(ivmode, "null") == 0) 2505 cc->iv_gen_ops = &crypt_iv_null_ops; 2506 else if (strcmp(ivmode, "eboiv") == 0) 2507 cc->iv_gen_ops = &crypt_iv_eboiv_ops; 2508 else if (strcmp(ivmode, "elephant") == 0) { 2509 cc->iv_gen_ops = &crypt_iv_elephant_ops; 2510 cc->key_parts = 2; 2511 cc->key_extra_size = cc->key_size / 2; 2512 if (cc->key_extra_size > ELEPHANT_MAX_KEY_SIZE) 2513 return -EINVAL; 2514 set_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags); 2515 } else if (strcmp(ivmode, "lmk") == 0) { 2516 cc->iv_gen_ops = &crypt_iv_lmk_ops; 2517 /* 2518 * Version 2 and 3 is recognised according 2519 * to length of provided multi-key string. 2520 * If present (version 3), last key is used as IV seed. 2521 * All keys (including IV seed) are always the same size. 2522 */ 2523 if (cc->key_size % cc->key_parts) { 2524 cc->key_parts++; 2525 cc->key_extra_size = cc->key_size / cc->key_parts; 2526 } 2527 } else if (strcmp(ivmode, "tcw") == 0) { 2528 cc->iv_gen_ops = &crypt_iv_tcw_ops; 2529 cc->key_parts += 2; /* IV + whitening */ 2530 cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE; 2531 } else if (strcmp(ivmode, "random") == 0) { 2532 cc->iv_gen_ops = &crypt_iv_random_ops; 2533 /* Need storage space in integrity fields. */ 2534 cc->integrity_iv_size = cc->iv_size; 2535 } else { 2536 ti->error = "Invalid IV mode"; 2537 return -EINVAL; 2538 } 2539 2540 return 0; 2541 } 2542 2543 /* 2544 * Workaround to parse HMAC algorithm from AEAD crypto API spec. 2545 * The HMAC is needed to calculate tag size (HMAC digest size). 2546 * This should be probably done by crypto-api calls (once available...) 2547 */ 2548 static int crypt_ctr_auth_cipher(struct crypt_config *cc, char *cipher_api) 2549 { 2550 char *start, *end, *mac_alg = NULL; 2551 struct crypto_ahash *mac; 2552 2553 if (!strstarts(cipher_api, "authenc(")) 2554 return 0; 2555 2556 start = strchr(cipher_api, '('); 2557 end = strchr(cipher_api, ','); 2558 if (!start || !end || ++start > end) 2559 return -EINVAL; 2560 2561 mac_alg = kzalloc(end - start + 1, GFP_KERNEL); 2562 if (!mac_alg) 2563 return -ENOMEM; 2564 strncpy(mac_alg, start, end - start); 2565 2566 mac = crypto_alloc_ahash(mac_alg, 0, 0); 2567 kfree(mac_alg); 2568 2569 if (IS_ERR(mac)) 2570 return PTR_ERR(mac); 2571 2572 cc->key_mac_size = crypto_ahash_digestsize(mac); 2573 crypto_free_ahash(mac); 2574 2575 cc->authenc_key = kmalloc(crypt_authenckey_size(cc), GFP_KERNEL); 2576 if (!cc->authenc_key) 2577 return -ENOMEM; 2578 2579 return 0; 2580 } 2581 2582 static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key, 2583 char **ivmode, char **ivopts) 2584 { 2585 struct crypt_config *cc = ti->private; 2586 char *tmp, *cipher_api, buf[CRYPTO_MAX_ALG_NAME]; 2587 int ret = -EINVAL; 2588 2589 cc->tfms_count = 1; 2590 2591 /* 2592 * New format (capi: prefix) 2593 * capi:cipher_api_spec-iv:ivopts 2594 */ 2595 tmp = &cipher_in[strlen("capi:")]; 2596 2597 /* Separate IV options if present, it can contain another '-' in hash name */ 2598 *ivopts = strrchr(tmp, ':'); 2599 if (*ivopts) { 2600 **ivopts = '\0'; 2601 (*ivopts)++; 2602 } 2603 /* Parse IV mode */ 2604 *ivmode = strrchr(tmp, '-'); 2605 if (*ivmode) { 2606 **ivmode = '\0'; 2607 (*ivmode)++; 2608 } 2609 /* The rest is crypto API spec */ 2610 cipher_api = tmp; 2611 2612 /* Alloc AEAD, can be used only in new format. */ 2613 if (crypt_integrity_aead(cc)) { 2614 ret = crypt_ctr_auth_cipher(cc, cipher_api); 2615 if (ret < 0) { 2616 ti->error = "Invalid AEAD cipher spec"; 2617 return -ENOMEM; 2618 } 2619 } 2620 2621 if (*ivmode && !strcmp(*ivmode, "lmk")) 2622 cc->tfms_count = 64; 2623 2624 if (*ivmode && !strcmp(*ivmode, "essiv")) { 2625 if (!*ivopts) { 2626 ti->error = "Digest algorithm missing for ESSIV mode"; 2627 return -EINVAL; 2628 } 2629 ret = snprintf(buf, CRYPTO_MAX_ALG_NAME, "essiv(%s,%s)", 2630 cipher_api, *ivopts); 2631 if (ret < 0 || ret >= CRYPTO_MAX_ALG_NAME) { 2632 ti->error = "Cannot allocate cipher string"; 2633 return -ENOMEM; 2634 } 2635 cipher_api = buf; 2636 } 2637 2638 cc->key_parts = cc->tfms_count; 2639 2640 /* Allocate cipher */ 2641 ret = crypt_alloc_tfms(cc, cipher_api); 2642 if (ret < 0) { 2643 ti->error = "Error allocating crypto tfm"; 2644 return ret; 2645 } 2646 2647 if (crypt_integrity_aead(cc)) 2648 cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc)); 2649 else 2650 cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc)); 2651 2652 return 0; 2653 } 2654 2655 static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key, 2656 char **ivmode, char **ivopts) 2657 { 2658 struct crypt_config *cc = ti->private; 2659 char *tmp, *cipher, *chainmode, *keycount; 2660 char *cipher_api = NULL; 2661 int ret = -EINVAL; 2662 char dummy; 2663 2664 if (strchr(cipher_in, '(') || crypt_integrity_aead(cc)) { 2665 ti->error = "Bad cipher specification"; 2666 return -EINVAL; 2667 } 2668 2669 /* 2670 * Legacy dm-crypt cipher specification 2671 * cipher[:keycount]-mode-iv:ivopts 2672 */ 2673 tmp = cipher_in; 2674 keycount = strsep(&tmp, "-"); 2675 cipher = strsep(&keycount, ":"); 2676 2677 if (!keycount) 2678 cc->tfms_count = 1; 2679 else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 || 2680 !is_power_of_2(cc->tfms_count)) { 2681 ti->error = "Bad cipher key count specification"; 2682 return -EINVAL; 2683 } 2684 cc->key_parts = cc->tfms_count; 2685 2686 chainmode = strsep(&tmp, "-"); 2687 *ivmode = strsep(&tmp, ":"); 2688 *ivopts = tmp; 2689 2690 /* 2691 * For compatibility with the original dm-crypt mapping format, if 2692 * only the cipher name is supplied, use cbc-plain. 2693 */ 2694 if (!chainmode || (!strcmp(chainmode, "plain") && !*ivmode)) { 2695 chainmode = "cbc"; 2696 *ivmode = "plain"; 2697 } 2698 2699 if (strcmp(chainmode, "ecb") && !*ivmode) { 2700 ti->error = "IV mechanism required"; 2701 return -EINVAL; 2702 } 2703 2704 cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL); 2705 if (!cipher_api) 2706 goto bad_mem; 2707 2708 if (*ivmode && !strcmp(*ivmode, "essiv")) { 2709 if (!*ivopts) { 2710 ti->error = "Digest algorithm missing for ESSIV mode"; 2711 kfree(cipher_api); 2712 return -EINVAL; 2713 } 2714 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME, 2715 "essiv(%s(%s),%s)", chainmode, cipher, *ivopts); 2716 } else { 2717 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME, 2718 "%s(%s)", chainmode, cipher); 2719 } 2720 if (ret < 0 || ret >= CRYPTO_MAX_ALG_NAME) { 2721 kfree(cipher_api); 2722 goto bad_mem; 2723 } 2724 2725 /* Allocate cipher */ 2726 ret = crypt_alloc_tfms(cc, cipher_api); 2727 if (ret < 0) { 2728 ti->error = "Error allocating crypto tfm"; 2729 kfree(cipher_api); 2730 return ret; 2731 } 2732 kfree(cipher_api); 2733 2734 return 0; 2735 bad_mem: 2736 ti->error = "Cannot allocate cipher strings"; 2737 return -ENOMEM; 2738 } 2739 2740 static int crypt_ctr_cipher(struct dm_target *ti, char *cipher_in, char *key) 2741 { 2742 struct crypt_config *cc = ti->private; 2743 char *ivmode = NULL, *ivopts = NULL; 2744 int ret; 2745 2746 cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL); 2747 if (!cc->cipher_string) { 2748 ti->error = "Cannot allocate cipher strings"; 2749 return -ENOMEM; 2750 } 2751 2752 if (strstarts(cipher_in, "capi:")) 2753 ret = crypt_ctr_cipher_new(ti, cipher_in, key, &ivmode, &ivopts); 2754 else 2755 ret = crypt_ctr_cipher_old(ti, cipher_in, key, &ivmode, &ivopts); 2756 if (ret) 2757 return ret; 2758 2759 /* Initialize IV */ 2760 ret = crypt_ctr_ivmode(ti, ivmode); 2761 if (ret < 0) 2762 return ret; 2763 2764 /* Initialize and set key */ 2765 ret = crypt_set_key(cc, key); 2766 if (ret < 0) { 2767 ti->error = "Error decoding and setting key"; 2768 return ret; 2769 } 2770 2771 /* Allocate IV */ 2772 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) { 2773 ret = cc->iv_gen_ops->ctr(cc, ti, ivopts); 2774 if (ret < 0) { 2775 ti->error = "Error creating IV"; 2776 return ret; 2777 } 2778 } 2779 2780 /* Initialize IV (set keys for ESSIV etc) */ 2781 if (cc->iv_gen_ops && cc->iv_gen_ops->init) { 2782 ret = cc->iv_gen_ops->init(cc); 2783 if (ret < 0) { 2784 ti->error = "Error initialising IV"; 2785 return ret; 2786 } 2787 } 2788 2789 /* wipe the kernel key payload copy */ 2790 if (cc->key_string) 2791 memset(cc->key, 0, cc->key_size * sizeof(u8)); 2792 2793 return ret; 2794 } 2795 2796 static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **argv) 2797 { 2798 struct crypt_config *cc = ti->private; 2799 struct dm_arg_set as; 2800 static const struct dm_arg _args[] = { 2801 {0, 6, "Invalid number of feature args"}, 2802 }; 2803 unsigned int opt_params, val; 2804 const char *opt_string, *sval; 2805 char dummy; 2806 int ret; 2807 2808 /* Optional parameters */ 2809 as.argc = argc; 2810 as.argv = argv; 2811 2812 ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error); 2813 if (ret) 2814 return ret; 2815 2816 while (opt_params--) { 2817 opt_string = dm_shift_arg(&as); 2818 if (!opt_string) { 2819 ti->error = "Not enough feature arguments"; 2820 return -EINVAL; 2821 } 2822 2823 if (!strcasecmp(opt_string, "allow_discards")) 2824 ti->num_discard_bios = 1; 2825 2826 else if (!strcasecmp(opt_string, "same_cpu_crypt")) 2827 set_bit(DM_CRYPT_SAME_CPU, &cc->flags); 2828 2829 else if (!strcasecmp(opt_string, "submit_from_crypt_cpus")) 2830 set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags); 2831 else if (sscanf(opt_string, "integrity:%u:", &val) == 1) { 2832 if (val == 0 || val > MAX_TAG_SIZE) { 2833 ti->error = "Invalid integrity arguments"; 2834 return -EINVAL; 2835 } 2836 cc->on_disk_tag_size = val; 2837 sval = strchr(opt_string + strlen("integrity:"), ':') + 1; 2838 if (!strcasecmp(sval, "aead")) { 2839 set_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags); 2840 } else if (strcasecmp(sval, "none")) { 2841 ti->error = "Unknown integrity profile"; 2842 return -EINVAL; 2843 } 2844 2845 cc->cipher_auth = kstrdup(sval, GFP_KERNEL); 2846 if (!cc->cipher_auth) 2847 return -ENOMEM; 2848 } else if (sscanf(opt_string, "sector_size:%hu%c", &cc->sector_size, &dummy) == 1) { 2849 if (cc->sector_size < (1 << SECTOR_SHIFT) || 2850 cc->sector_size > 4096 || 2851 (cc->sector_size & (cc->sector_size - 1))) { 2852 ti->error = "Invalid feature value for sector_size"; 2853 return -EINVAL; 2854 } 2855 if (ti->len & ((cc->sector_size >> SECTOR_SHIFT) - 1)) { 2856 ti->error = "Device size is not multiple of sector_size feature"; 2857 return -EINVAL; 2858 } 2859 cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT; 2860 } else if (!strcasecmp(opt_string, "iv_large_sectors")) 2861 set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags); 2862 else { 2863 ti->error = "Invalid feature arguments"; 2864 return -EINVAL; 2865 } 2866 } 2867 2868 return 0; 2869 } 2870 2871 /* 2872 * Construct an encryption mapping: 2873 * <cipher> [<key>|:<key_size>:<user|logon>:<key_description>] <iv_offset> <dev_path> <start> 2874 */ 2875 static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) 2876 { 2877 struct crypt_config *cc; 2878 const char *devname = dm_table_device_name(ti->table); 2879 int key_size; 2880 unsigned int align_mask; 2881 unsigned long long tmpll; 2882 int ret; 2883 size_t iv_size_padding, additional_req_size; 2884 char dummy; 2885 2886 if (argc < 5) { 2887 ti->error = "Not enough arguments"; 2888 return -EINVAL; 2889 } 2890 2891 key_size = get_key_size(&argv[1]); 2892 if (key_size < 0) { 2893 ti->error = "Cannot parse key size"; 2894 return -EINVAL; 2895 } 2896 2897 cc = kzalloc(struct_size(cc, key, key_size), GFP_KERNEL); 2898 if (!cc) { 2899 ti->error = "Cannot allocate encryption context"; 2900 return -ENOMEM; 2901 } 2902 cc->key_size = key_size; 2903 cc->sector_size = (1 << SECTOR_SHIFT); 2904 cc->sector_shift = 0; 2905 2906 ti->private = cc; 2907 2908 spin_lock(&dm_crypt_clients_lock); 2909 dm_crypt_clients_n++; 2910 crypt_calculate_pages_per_client(); 2911 spin_unlock(&dm_crypt_clients_lock); 2912 2913 ret = percpu_counter_init(&cc->n_allocated_pages, 0, GFP_KERNEL); 2914 if (ret < 0) 2915 goto bad; 2916 2917 /* Optional parameters need to be read before cipher constructor */ 2918 if (argc > 5) { 2919 ret = crypt_ctr_optional(ti, argc - 5, &argv[5]); 2920 if (ret) 2921 goto bad; 2922 } 2923 2924 ret = crypt_ctr_cipher(ti, argv[0], argv[1]); 2925 if (ret < 0) 2926 goto bad; 2927 2928 if (crypt_integrity_aead(cc)) { 2929 cc->dmreq_start = sizeof(struct aead_request); 2930 cc->dmreq_start += crypto_aead_reqsize(any_tfm_aead(cc)); 2931 align_mask = crypto_aead_alignmask(any_tfm_aead(cc)); 2932 } else { 2933 cc->dmreq_start = sizeof(struct skcipher_request); 2934 cc->dmreq_start += crypto_skcipher_reqsize(any_tfm(cc)); 2935 align_mask = crypto_skcipher_alignmask(any_tfm(cc)); 2936 } 2937 cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request)); 2938 2939 if (align_mask < CRYPTO_MINALIGN) { 2940 /* Allocate the padding exactly */ 2941 iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request)) 2942 & align_mask; 2943 } else { 2944 /* 2945 * If the cipher requires greater alignment than kmalloc 2946 * alignment, we don't know the exact position of the 2947 * initialization vector. We must assume worst case. 2948 */ 2949 iv_size_padding = align_mask; 2950 } 2951 2952 /* ...| IV + padding | original IV | original sec. number | bio tag offset | */ 2953 additional_req_size = sizeof(struct dm_crypt_request) + 2954 iv_size_padding + cc->iv_size + 2955 cc->iv_size + 2956 sizeof(uint64_t) + 2957 sizeof(unsigned int); 2958 2959 ret = mempool_init_kmalloc_pool(&cc->req_pool, MIN_IOS, cc->dmreq_start + additional_req_size); 2960 if (ret) { 2961 ti->error = "Cannot allocate crypt request mempool"; 2962 goto bad; 2963 } 2964 2965 cc->per_bio_data_size = ti->per_io_data_size = 2966 ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + additional_req_size, 2967 ARCH_KMALLOC_MINALIGN); 2968 2969 ret = mempool_init(&cc->page_pool, BIO_MAX_PAGES, crypt_page_alloc, crypt_page_free, cc); 2970 if (ret) { 2971 ti->error = "Cannot allocate page mempool"; 2972 goto bad; 2973 } 2974 2975 ret = bioset_init(&cc->bs, MIN_IOS, 0, BIOSET_NEED_BVECS); 2976 if (ret) { 2977 ti->error = "Cannot allocate crypt bioset"; 2978 goto bad; 2979 } 2980 2981 mutex_init(&cc->bio_alloc_lock); 2982 2983 ret = -EINVAL; 2984 if ((sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) || 2985 (tmpll & ((cc->sector_size >> SECTOR_SHIFT) - 1))) { 2986 ti->error = "Invalid iv_offset sector"; 2987 goto bad; 2988 } 2989 cc->iv_offset = tmpll; 2990 2991 ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev); 2992 if (ret) { 2993 ti->error = "Device lookup failed"; 2994 goto bad; 2995 } 2996 2997 ret = -EINVAL; 2998 if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) { 2999 ti->error = "Invalid device sector"; 3000 goto bad; 3001 } 3002 cc->start = tmpll; 3003 3004 if (crypt_integrity_aead(cc) || cc->integrity_iv_size) { 3005 ret = crypt_integrity_ctr(cc, ti); 3006 if (ret) 3007 goto bad; 3008 3009 cc->tag_pool_max_sectors = POOL_ENTRY_SIZE / cc->on_disk_tag_size; 3010 if (!cc->tag_pool_max_sectors) 3011 cc->tag_pool_max_sectors = 1; 3012 3013 ret = mempool_init_kmalloc_pool(&cc->tag_pool, MIN_IOS, 3014 cc->tag_pool_max_sectors * cc->on_disk_tag_size); 3015 if (ret) { 3016 ti->error = "Cannot allocate integrity tags mempool"; 3017 goto bad; 3018 } 3019 3020 cc->tag_pool_max_sectors <<= cc->sector_shift; 3021 } 3022 3023 ret = -ENOMEM; 3024 cc->io_queue = alloc_workqueue("kcryptd_io/%s", WQ_MEM_RECLAIM, 1, devname); 3025 if (!cc->io_queue) { 3026 ti->error = "Couldn't create kcryptd io queue"; 3027 goto bad; 3028 } 3029 3030 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags)) 3031 cc->crypt_queue = alloc_workqueue("kcryptd/%s", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 3032 1, devname); 3033 else 3034 cc->crypt_queue = alloc_workqueue("kcryptd/%s", 3035 WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, 3036 num_online_cpus(), devname); 3037 if (!cc->crypt_queue) { 3038 ti->error = "Couldn't create kcryptd queue"; 3039 goto bad; 3040 } 3041 3042 spin_lock_init(&cc->write_thread_lock); 3043 cc->write_tree = RB_ROOT; 3044 3045 cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write/%s", devname); 3046 if (IS_ERR(cc->write_thread)) { 3047 ret = PTR_ERR(cc->write_thread); 3048 cc->write_thread = NULL; 3049 ti->error = "Couldn't spawn write thread"; 3050 goto bad; 3051 } 3052 wake_up_process(cc->write_thread); 3053 3054 ti->num_flush_bios = 1; 3055 3056 return 0; 3057 3058 bad: 3059 crypt_dtr(ti); 3060 return ret; 3061 } 3062 3063 static int crypt_map(struct dm_target *ti, struct bio *bio) 3064 { 3065 struct dm_crypt_io *io; 3066 struct crypt_config *cc = ti->private; 3067 3068 /* 3069 * If bio is REQ_PREFLUSH or REQ_OP_DISCARD, just bypass crypt queues. 3070 * - for REQ_PREFLUSH device-mapper core ensures that no IO is in-flight 3071 * - for REQ_OP_DISCARD caller must use flush if IO ordering matters 3072 */ 3073 if (unlikely(bio->bi_opf & REQ_PREFLUSH || 3074 bio_op(bio) == REQ_OP_DISCARD)) { 3075 bio_set_dev(bio, cc->dev->bdev); 3076 if (bio_sectors(bio)) 3077 bio->bi_iter.bi_sector = cc->start + 3078 dm_target_offset(ti, bio->bi_iter.bi_sector); 3079 return DM_MAPIO_REMAPPED; 3080 } 3081 3082 /* 3083 * Check if bio is too large, split as needed. 3084 */ 3085 if (unlikely(bio->bi_iter.bi_size > (BIO_MAX_PAGES << PAGE_SHIFT)) && 3086 (bio_data_dir(bio) == WRITE || cc->on_disk_tag_size)) 3087 dm_accept_partial_bio(bio, ((BIO_MAX_PAGES << PAGE_SHIFT) >> SECTOR_SHIFT)); 3088 3089 /* 3090 * Ensure that bio is a multiple of internal sector encryption size 3091 * and is aligned to this size as defined in IO hints. 3092 */ 3093 if (unlikely((bio->bi_iter.bi_sector & ((cc->sector_size >> SECTOR_SHIFT) - 1)) != 0)) 3094 return DM_MAPIO_KILL; 3095 3096 if (unlikely(bio->bi_iter.bi_size & (cc->sector_size - 1))) 3097 return DM_MAPIO_KILL; 3098 3099 io = dm_per_bio_data(bio, cc->per_bio_data_size); 3100 crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); 3101 3102 if (cc->on_disk_tag_size) { 3103 unsigned tag_len = cc->on_disk_tag_size * (bio_sectors(bio) >> cc->sector_shift); 3104 3105 if (unlikely(tag_len > KMALLOC_MAX_SIZE) || 3106 unlikely(!(io->integrity_metadata = kmalloc(tag_len, 3107 GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN)))) { 3108 if (bio_sectors(bio) > cc->tag_pool_max_sectors) 3109 dm_accept_partial_bio(bio, cc->tag_pool_max_sectors); 3110 io->integrity_metadata = mempool_alloc(&cc->tag_pool, GFP_NOIO); 3111 io->integrity_metadata_from_pool = true; 3112 } 3113 } 3114 3115 if (crypt_integrity_aead(cc)) 3116 io->ctx.r.req_aead = (struct aead_request *)(io + 1); 3117 else 3118 io->ctx.r.req = (struct skcipher_request *)(io + 1); 3119 3120 if (bio_data_dir(io->base_bio) == READ) { 3121 if (kcryptd_io_read(io, GFP_NOWAIT)) 3122 kcryptd_queue_read(io); 3123 } else 3124 kcryptd_queue_crypt(io); 3125 3126 return DM_MAPIO_SUBMITTED; 3127 } 3128 3129 static void crypt_status(struct dm_target *ti, status_type_t type, 3130 unsigned status_flags, char *result, unsigned maxlen) 3131 { 3132 struct crypt_config *cc = ti->private; 3133 unsigned i, sz = 0; 3134 int num_feature_args = 0; 3135 3136 switch (type) { 3137 case STATUSTYPE_INFO: 3138 result[0] = '\0'; 3139 break; 3140 3141 case STATUSTYPE_TABLE: 3142 DMEMIT("%s ", cc->cipher_string); 3143 3144 if (cc->key_size > 0) { 3145 if (cc->key_string) 3146 DMEMIT(":%u:%s", cc->key_size, cc->key_string); 3147 else 3148 for (i = 0; i < cc->key_size; i++) 3149 DMEMIT("%02x", cc->key[i]); 3150 } else 3151 DMEMIT("-"); 3152 3153 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset, 3154 cc->dev->name, (unsigned long long)cc->start); 3155 3156 num_feature_args += !!ti->num_discard_bios; 3157 num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags); 3158 num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags); 3159 num_feature_args += cc->sector_size != (1 << SECTOR_SHIFT); 3160 num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags); 3161 if (cc->on_disk_tag_size) 3162 num_feature_args++; 3163 if (num_feature_args) { 3164 DMEMIT(" %d", num_feature_args); 3165 if (ti->num_discard_bios) 3166 DMEMIT(" allow_discards"); 3167 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags)) 3168 DMEMIT(" same_cpu_crypt"); 3169 if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) 3170 DMEMIT(" submit_from_crypt_cpus"); 3171 if (cc->on_disk_tag_size) 3172 DMEMIT(" integrity:%u:%s", cc->on_disk_tag_size, cc->cipher_auth); 3173 if (cc->sector_size != (1 << SECTOR_SHIFT)) 3174 DMEMIT(" sector_size:%d", cc->sector_size); 3175 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags)) 3176 DMEMIT(" iv_large_sectors"); 3177 } 3178 3179 break; 3180 } 3181 } 3182 3183 static void crypt_postsuspend(struct dm_target *ti) 3184 { 3185 struct crypt_config *cc = ti->private; 3186 3187 set_bit(DM_CRYPT_SUSPENDED, &cc->flags); 3188 } 3189 3190 static int crypt_preresume(struct dm_target *ti) 3191 { 3192 struct crypt_config *cc = ti->private; 3193 3194 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) { 3195 DMERR("aborting resume - crypt key is not set."); 3196 return -EAGAIN; 3197 } 3198 3199 return 0; 3200 } 3201 3202 static void crypt_resume(struct dm_target *ti) 3203 { 3204 struct crypt_config *cc = ti->private; 3205 3206 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags); 3207 } 3208 3209 /* Message interface 3210 * key set <key> 3211 * key wipe 3212 */ 3213 static int crypt_message(struct dm_target *ti, unsigned argc, char **argv, 3214 char *result, unsigned maxlen) 3215 { 3216 struct crypt_config *cc = ti->private; 3217 int key_size, ret = -EINVAL; 3218 3219 if (argc < 2) 3220 goto error; 3221 3222 if (!strcasecmp(argv[0], "key")) { 3223 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) { 3224 DMWARN("not suspended during key manipulation."); 3225 return -EINVAL; 3226 } 3227 if (argc == 3 && !strcasecmp(argv[1], "set")) { 3228 /* The key size may not be changed. */ 3229 key_size = get_key_size(&argv[2]); 3230 if (key_size < 0 || cc->key_size != key_size) { 3231 memset(argv[2], '0', strlen(argv[2])); 3232 return -EINVAL; 3233 } 3234 3235 ret = crypt_set_key(cc, argv[2]); 3236 if (ret) 3237 return ret; 3238 if (cc->iv_gen_ops && cc->iv_gen_ops->init) 3239 ret = cc->iv_gen_ops->init(cc); 3240 /* wipe the kernel key payload copy */ 3241 if (cc->key_string) 3242 memset(cc->key, 0, cc->key_size * sizeof(u8)); 3243 return ret; 3244 } 3245 if (argc == 2 && !strcasecmp(argv[1], "wipe")) 3246 return crypt_wipe_key(cc); 3247 } 3248 3249 error: 3250 DMWARN("unrecognised message received."); 3251 return -EINVAL; 3252 } 3253 3254 static int crypt_iterate_devices(struct dm_target *ti, 3255 iterate_devices_callout_fn fn, void *data) 3256 { 3257 struct crypt_config *cc = ti->private; 3258 3259 return fn(ti, cc->dev, cc->start, ti->len, data); 3260 } 3261 3262 static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits) 3263 { 3264 struct crypt_config *cc = ti->private; 3265 3266 /* 3267 * Unfortunate constraint that is required to avoid the potential 3268 * for exceeding underlying device's max_segments limits -- due to 3269 * crypt_alloc_buffer() possibly allocating pages for the encryption 3270 * bio that are not as physically contiguous as the original bio. 3271 */ 3272 limits->max_segment_size = PAGE_SIZE; 3273 3274 limits->logical_block_size = 3275 max_t(unsigned short, limits->logical_block_size, cc->sector_size); 3276 limits->physical_block_size = 3277 max_t(unsigned, limits->physical_block_size, cc->sector_size); 3278 limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size); 3279 } 3280 3281 static struct target_type crypt_target = { 3282 .name = "crypt", 3283 .version = {1, 20, 0}, 3284 .module = THIS_MODULE, 3285 .ctr = crypt_ctr, 3286 .dtr = crypt_dtr, 3287 .map = crypt_map, 3288 .status = crypt_status, 3289 .postsuspend = crypt_postsuspend, 3290 .preresume = crypt_preresume, 3291 .resume = crypt_resume, 3292 .message = crypt_message, 3293 .iterate_devices = crypt_iterate_devices, 3294 .io_hints = crypt_io_hints, 3295 }; 3296 3297 static int __init dm_crypt_init(void) 3298 { 3299 int r; 3300 3301 r = dm_register_target(&crypt_target); 3302 if (r < 0) 3303 DMERR("register failed %d", r); 3304 3305 return r; 3306 } 3307 3308 static void __exit dm_crypt_exit(void) 3309 { 3310 dm_unregister_target(&crypt_target); 3311 } 3312 3313 module_init(dm_crypt_init); 3314 module_exit(dm_crypt_exit); 3315 3316 MODULE_AUTHOR("Jana Saout <jana@saout.de>"); 3317 MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption"); 3318 MODULE_LICENSE("GPL"); 3319