1 /* 2 * Copyright (C) 2003 Jana Saout <jana@saout.de> 3 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> 4 * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved. 5 * Copyright (C) 2013 Milan Broz <gmazyland@gmail.com> 6 * 7 * This file is released under the GPL. 8 */ 9 10 #include <linux/completion.h> 11 #include <linux/err.h> 12 #include <linux/module.h> 13 #include <linux/init.h> 14 #include <linux/kernel.h> 15 #include <linux/bio.h> 16 #include <linux/blkdev.h> 17 #include <linux/mempool.h> 18 #include <linux/slab.h> 19 #include <linux/crypto.h> 20 #include <linux/workqueue.h> 21 #include <linux/backing-dev.h> 22 #include <linux/atomic.h> 23 #include <linux/scatterlist.h> 24 #include <asm/page.h> 25 #include <asm/unaligned.h> 26 #include <crypto/hash.h> 27 #include <crypto/md5.h> 28 #include <crypto/algapi.h> 29 30 #include <linux/device-mapper.h> 31 32 #define DM_MSG_PREFIX "crypt" 33 34 /* 35 * context holding the current state of a multi-part conversion 36 */ 37 struct convert_context { 38 struct completion restart; 39 struct bio *bio_in; 40 struct bio *bio_out; 41 struct bvec_iter iter_in; 42 struct bvec_iter iter_out; 43 sector_t cc_sector; 44 atomic_t cc_pending; 45 struct ablkcipher_request *req; 46 }; 47 48 /* 49 * per bio private data 50 */ 51 struct dm_crypt_io { 52 struct crypt_config *cc; 53 struct bio *base_bio; 54 struct work_struct work; 55 56 struct convert_context ctx; 57 58 atomic_t io_pending; 59 int error; 60 sector_t sector; 61 struct dm_crypt_io *base_io; 62 } CRYPTO_MINALIGN_ATTR; 63 64 struct dm_crypt_request { 65 struct convert_context *ctx; 66 struct scatterlist sg_in; 67 struct scatterlist sg_out; 68 sector_t iv_sector; 69 }; 70 71 struct crypt_config; 72 73 struct crypt_iv_operations { 74 int (*ctr)(struct crypt_config *cc, struct dm_target *ti, 75 const char *opts); 76 void (*dtr)(struct crypt_config *cc); 77 int (*init)(struct crypt_config *cc); 78 int (*wipe)(struct crypt_config *cc); 79 int (*generator)(struct crypt_config *cc, u8 *iv, 80 struct dm_crypt_request *dmreq); 81 int (*post)(struct crypt_config *cc, u8 *iv, 82 struct dm_crypt_request *dmreq); 83 }; 84 85 struct iv_essiv_private { 86 struct crypto_hash *hash_tfm; 87 u8 *salt; 88 }; 89 90 struct iv_benbi_private { 91 int shift; 92 }; 93 94 #define LMK_SEED_SIZE 64 /* hash + 0 */ 95 struct iv_lmk_private { 96 struct crypto_shash *hash_tfm; 97 u8 *seed; 98 }; 99 100 #define TCW_WHITENING_SIZE 16 101 struct iv_tcw_private { 102 struct crypto_shash *crc32_tfm; 103 u8 *iv_seed; 104 u8 *whitening; 105 }; 106 107 /* 108 * Crypt: maps a linear range of a block device 109 * and encrypts / decrypts at the same time. 110 */ 111 enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; 112 113 /* 114 * The fields in here must be read only after initialization. 115 */ 116 struct crypt_config { 117 struct dm_dev *dev; 118 sector_t start; 119 120 /* 121 * pool for per bio private data, crypto requests and 122 * encryption requeusts/buffer pages 123 */ 124 mempool_t *io_pool; 125 mempool_t *req_pool; 126 mempool_t *page_pool; 127 struct bio_set *bs; 128 129 struct workqueue_struct *io_queue; 130 struct workqueue_struct *crypt_queue; 131 132 char *cipher; 133 char *cipher_string; 134 135 struct crypt_iv_operations *iv_gen_ops; 136 union { 137 struct iv_essiv_private essiv; 138 struct iv_benbi_private benbi; 139 struct iv_lmk_private lmk; 140 struct iv_tcw_private tcw; 141 } iv_gen_private; 142 sector_t iv_offset; 143 unsigned int iv_size; 144 145 /* ESSIV: struct crypto_cipher *essiv_tfm */ 146 void *iv_private; 147 struct crypto_ablkcipher **tfms; 148 unsigned tfms_count; 149 150 /* 151 * Layout of each crypto request: 152 * 153 * struct ablkcipher_request 154 * context 155 * padding 156 * struct dm_crypt_request 157 * padding 158 * IV 159 * 160 * The padding is added so that dm_crypt_request and the IV are 161 * correctly aligned. 162 */ 163 unsigned int dmreq_start; 164 165 unsigned int per_bio_data_size; 166 167 unsigned long flags; 168 unsigned int key_size; 169 unsigned int key_parts; /* independent parts in key buffer */ 170 unsigned int key_extra_size; /* additional keys length */ 171 u8 key[0]; 172 }; 173 174 #define MIN_IOS 16 175 #define MIN_POOL_PAGES 32 176 177 static struct kmem_cache *_crypt_io_pool; 178 179 static void clone_init(struct dm_crypt_io *, struct bio *); 180 static void kcryptd_queue_crypt(struct dm_crypt_io *io); 181 static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq); 182 183 /* 184 * Use this to access cipher attributes that are the same for each CPU. 185 */ 186 static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc) 187 { 188 return cc->tfms[0]; 189 } 190 191 /* 192 * Different IV generation algorithms: 193 * 194 * plain: the initial vector is the 32-bit little-endian version of the sector 195 * number, padded with zeros if necessary. 196 * 197 * plain64: the initial vector is the 64-bit little-endian version of the sector 198 * number, padded with zeros if necessary. 199 * 200 * essiv: "encrypted sector|salt initial vector", the sector number is 201 * encrypted with the bulk cipher using a salt as key. The salt 202 * should be derived from the bulk cipher's key via hashing. 203 * 204 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1 205 * (needed for LRW-32-AES and possible other narrow block modes) 206 * 207 * null: the initial vector is always zero. Provides compatibility with 208 * obsolete loop_fish2 devices. Do not use for new devices. 209 * 210 * lmk: Compatible implementation of the block chaining mode used 211 * by the Loop-AES block device encryption system 212 * designed by Jari Ruusu. See http://loop-aes.sourceforge.net/ 213 * It operates on full 512 byte sectors and uses CBC 214 * with an IV derived from the sector number, the data and 215 * optionally extra IV seed. 216 * This means that after decryption the first block 217 * of sector must be tweaked according to decrypted data. 218 * Loop-AES can use three encryption schemes: 219 * version 1: is plain aes-cbc mode 220 * version 2: uses 64 multikey scheme with lmk IV generator 221 * version 3: the same as version 2 with additional IV seed 222 * (it uses 65 keys, last key is used as IV seed) 223 * 224 * tcw: Compatible implementation of the block chaining mode used 225 * by the TrueCrypt device encryption system (prior to version 4.1). 226 * For more info see: http://www.truecrypt.org 227 * It operates on full 512 byte sectors and uses CBC 228 * with an IV derived from initial key and the sector number. 229 * In addition, whitening value is applied on every sector, whitening 230 * is calculated from initial key, sector number and mixed using CRC32. 231 * Note that this encryption scheme is vulnerable to watermarking attacks 232 * and should be used for old compatible containers access only. 233 * 234 * plumb: unimplemented, see: 235 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 236 */ 237 238 static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, 239 struct dm_crypt_request *dmreq) 240 { 241 memset(iv, 0, cc->iv_size); 242 *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff); 243 244 return 0; 245 } 246 247 static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv, 248 struct dm_crypt_request *dmreq) 249 { 250 memset(iv, 0, cc->iv_size); 251 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector); 252 253 return 0; 254 } 255 256 /* Initialise ESSIV - compute salt but no local memory allocations */ 257 static int crypt_iv_essiv_init(struct crypt_config *cc) 258 { 259 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; 260 struct hash_desc desc; 261 struct scatterlist sg; 262 struct crypto_cipher *essiv_tfm; 263 int err; 264 265 sg_init_one(&sg, cc->key, cc->key_size); 266 desc.tfm = essiv->hash_tfm; 267 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; 268 269 err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt); 270 if (err) 271 return err; 272 273 essiv_tfm = cc->iv_private; 274 275 err = crypto_cipher_setkey(essiv_tfm, essiv->salt, 276 crypto_hash_digestsize(essiv->hash_tfm)); 277 if (err) 278 return err; 279 280 return 0; 281 } 282 283 /* Wipe salt and reset key derived from volume key */ 284 static int crypt_iv_essiv_wipe(struct crypt_config *cc) 285 { 286 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; 287 unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm); 288 struct crypto_cipher *essiv_tfm; 289 int r, err = 0; 290 291 memset(essiv->salt, 0, salt_size); 292 293 essiv_tfm = cc->iv_private; 294 r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size); 295 if (r) 296 err = r; 297 298 return err; 299 } 300 301 /* Set up per cpu cipher state */ 302 static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc, 303 struct dm_target *ti, 304 u8 *salt, unsigned saltsize) 305 { 306 struct crypto_cipher *essiv_tfm; 307 int err; 308 309 /* Setup the essiv_tfm with the given salt */ 310 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); 311 if (IS_ERR(essiv_tfm)) { 312 ti->error = "Error allocating crypto tfm for ESSIV"; 313 return essiv_tfm; 314 } 315 316 if (crypto_cipher_blocksize(essiv_tfm) != 317 crypto_ablkcipher_ivsize(any_tfm(cc))) { 318 ti->error = "Block size of ESSIV cipher does " 319 "not match IV size of block cipher"; 320 crypto_free_cipher(essiv_tfm); 321 return ERR_PTR(-EINVAL); 322 } 323 324 err = crypto_cipher_setkey(essiv_tfm, salt, saltsize); 325 if (err) { 326 ti->error = "Failed to set key for ESSIV cipher"; 327 crypto_free_cipher(essiv_tfm); 328 return ERR_PTR(err); 329 } 330 331 return essiv_tfm; 332 } 333 334 static void crypt_iv_essiv_dtr(struct crypt_config *cc) 335 { 336 struct crypto_cipher *essiv_tfm; 337 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; 338 339 crypto_free_hash(essiv->hash_tfm); 340 essiv->hash_tfm = NULL; 341 342 kzfree(essiv->salt); 343 essiv->salt = NULL; 344 345 essiv_tfm = cc->iv_private; 346 347 if (essiv_tfm) 348 crypto_free_cipher(essiv_tfm); 349 350 cc->iv_private = NULL; 351 } 352 353 static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, 354 const char *opts) 355 { 356 struct crypto_cipher *essiv_tfm = NULL; 357 struct crypto_hash *hash_tfm = NULL; 358 u8 *salt = NULL; 359 int err; 360 361 if (!opts) { 362 ti->error = "Digest algorithm missing for ESSIV mode"; 363 return -EINVAL; 364 } 365 366 /* Allocate hash algorithm */ 367 hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC); 368 if (IS_ERR(hash_tfm)) { 369 ti->error = "Error initializing ESSIV hash"; 370 err = PTR_ERR(hash_tfm); 371 goto bad; 372 } 373 374 salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL); 375 if (!salt) { 376 ti->error = "Error kmallocing salt storage in ESSIV"; 377 err = -ENOMEM; 378 goto bad; 379 } 380 381 cc->iv_gen_private.essiv.salt = salt; 382 cc->iv_gen_private.essiv.hash_tfm = hash_tfm; 383 384 essiv_tfm = setup_essiv_cpu(cc, ti, salt, 385 crypto_hash_digestsize(hash_tfm)); 386 if (IS_ERR(essiv_tfm)) { 387 crypt_iv_essiv_dtr(cc); 388 return PTR_ERR(essiv_tfm); 389 } 390 cc->iv_private = essiv_tfm; 391 392 return 0; 393 394 bad: 395 if (hash_tfm && !IS_ERR(hash_tfm)) 396 crypto_free_hash(hash_tfm); 397 kfree(salt); 398 return err; 399 } 400 401 static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, 402 struct dm_crypt_request *dmreq) 403 { 404 struct crypto_cipher *essiv_tfm = cc->iv_private; 405 406 memset(iv, 0, cc->iv_size); 407 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector); 408 crypto_cipher_encrypt_one(essiv_tfm, iv, iv); 409 410 return 0; 411 } 412 413 static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, 414 const char *opts) 415 { 416 unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc)); 417 int log = ilog2(bs); 418 419 /* we need to calculate how far we must shift the sector count 420 * to get the cipher block count, we use this shift in _gen */ 421 422 if (1 << log != bs) { 423 ti->error = "cypher blocksize is not a power of 2"; 424 return -EINVAL; 425 } 426 427 if (log > 9) { 428 ti->error = "cypher blocksize is > 512"; 429 return -EINVAL; 430 } 431 432 cc->iv_gen_private.benbi.shift = 9 - log; 433 434 return 0; 435 } 436 437 static void crypt_iv_benbi_dtr(struct crypt_config *cc) 438 { 439 } 440 441 static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, 442 struct dm_crypt_request *dmreq) 443 { 444 __be64 val; 445 446 memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */ 447 448 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1); 449 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64))); 450 451 return 0; 452 } 453 454 static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, 455 struct dm_crypt_request *dmreq) 456 { 457 memset(iv, 0, cc->iv_size); 458 459 return 0; 460 } 461 462 static void crypt_iv_lmk_dtr(struct crypt_config *cc) 463 { 464 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; 465 466 if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm)) 467 crypto_free_shash(lmk->hash_tfm); 468 lmk->hash_tfm = NULL; 469 470 kzfree(lmk->seed); 471 lmk->seed = NULL; 472 } 473 474 static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti, 475 const char *opts) 476 { 477 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; 478 479 lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0); 480 if (IS_ERR(lmk->hash_tfm)) { 481 ti->error = "Error initializing LMK hash"; 482 return PTR_ERR(lmk->hash_tfm); 483 } 484 485 /* No seed in LMK version 2 */ 486 if (cc->key_parts == cc->tfms_count) { 487 lmk->seed = NULL; 488 return 0; 489 } 490 491 lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL); 492 if (!lmk->seed) { 493 crypt_iv_lmk_dtr(cc); 494 ti->error = "Error kmallocing seed storage in LMK"; 495 return -ENOMEM; 496 } 497 498 return 0; 499 } 500 501 static int crypt_iv_lmk_init(struct crypt_config *cc) 502 { 503 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; 504 int subkey_size = cc->key_size / cc->key_parts; 505 506 /* LMK seed is on the position of LMK_KEYS + 1 key */ 507 if (lmk->seed) 508 memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size), 509 crypto_shash_digestsize(lmk->hash_tfm)); 510 511 return 0; 512 } 513 514 static int crypt_iv_lmk_wipe(struct crypt_config *cc) 515 { 516 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; 517 518 if (lmk->seed) 519 memset(lmk->seed, 0, LMK_SEED_SIZE); 520 521 return 0; 522 } 523 524 static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv, 525 struct dm_crypt_request *dmreq, 526 u8 *data) 527 { 528 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; 529 SHASH_DESC_ON_STACK(desc, lmk->hash_tfm); 530 struct md5_state md5state; 531 __le32 buf[4]; 532 int i, r; 533 534 desc->tfm = lmk->hash_tfm; 535 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; 536 537 r = crypto_shash_init(desc); 538 if (r) 539 return r; 540 541 if (lmk->seed) { 542 r = crypto_shash_update(desc, lmk->seed, LMK_SEED_SIZE); 543 if (r) 544 return r; 545 } 546 547 /* Sector is always 512B, block size 16, add data of blocks 1-31 */ 548 r = crypto_shash_update(desc, data + 16, 16 * 31); 549 if (r) 550 return r; 551 552 /* Sector is cropped to 56 bits here */ 553 buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF); 554 buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000); 555 buf[2] = cpu_to_le32(4024); 556 buf[3] = 0; 557 r = crypto_shash_update(desc, (u8 *)buf, sizeof(buf)); 558 if (r) 559 return r; 560 561 /* No MD5 padding here */ 562 r = crypto_shash_export(desc, &md5state); 563 if (r) 564 return r; 565 566 for (i = 0; i < MD5_HASH_WORDS; i++) 567 __cpu_to_le32s(&md5state.hash[i]); 568 memcpy(iv, &md5state.hash, cc->iv_size); 569 570 return 0; 571 } 572 573 static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv, 574 struct dm_crypt_request *dmreq) 575 { 576 u8 *src; 577 int r = 0; 578 579 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { 580 src = kmap_atomic(sg_page(&dmreq->sg_in)); 581 r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset); 582 kunmap_atomic(src); 583 } else 584 memset(iv, 0, cc->iv_size); 585 586 return r; 587 } 588 589 static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv, 590 struct dm_crypt_request *dmreq) 591 { 592 u8 *dst; 593 int r; 594 595 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) 596 return 0; 597 598 dst = kmap_atomic(sg_page(&dmreq->sg_out)); 599 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset); 600 601 /* Tweak the first block of plaintext sector */ 602 if (!r) 603 crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size); 604 605 kunmap_atomic(dst); 606 return r; 607 } 608 609 static void crypt_iv_tcw_dtr(struct crypt_config *cc) 610 { 611 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; 612 613 kzfree(tcw->iv_seed); 614 tcw->iv_seed = NULL; 615 kzfree(tcw->whitening); 616 tcw->whitening = NULL; 617 618 if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm)) 619 crypto_free_shash(tcw->crc32_tfm); 620 tcw->crc32_tfm = NULL; 621 } 622 623 static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti, 624 const char *opts) 625 { 626 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; 627 628 if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) { 629 ti->error = "Wrong key size for TCW"; 630 return -EINVAL; 631 } 632 633 tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0); 634 if (IS_ERR(tcw->crc32_tfm)) { 635 ti->error = "Error initializing CRC32 in TCW"; 636 return PTR_ERR(tcw->crc32_tfm); 637 } 638 639 tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL); 640 tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL); 641 if (!tcw->iv_seed || !tcw->whitening) { 642 crypt_iv_tcw_dtr(cc); 643 ti->error = "Error allocating seed storage in TCW"; 644 return -ENOMEM; 645 } 646 647 return 0; 648 } 649 650 static int crypt_iv_tcw_init(struct crypt_config *cc) 651 { 652 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; 653 int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE; 654 655 memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size); 656 memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size], 657 TCW_WHITENING_SIZE); 658 659 return 0; 660 } 661 662 static int crypt_iv_tcw_wipe(struct crypt_config *cc) 663 { 664 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; 665 666 memset(tcw->iv_seed, 0, cc->iv_size); 667 memset(tcw->whitening, 0, TCW_WHITENING_SIZE); 668 669 return 0; 670 } 671 672 static int crypt_iv_tcw_whitening(struct crypt_config *cc, 673 struct dm_crypt_request *dmreq, 674 u8 *data) 675 { 676 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; 677 u64 sector = cpu_to_le64((u64)dmreq->iv_sector); 678 u8 buf[TCW_WHITENING_SIZE]; 679 SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm); 680 int i, r; 681 682 /* xor whitening with sector number */ 683 memcpy(buf, tcw->whitening, TCW_WHITENING_SIZE); 684 crypto_xor(buf, (u8 *)§or, 8); 685 crypto_xor(&buf[8], (u8 *)§or, 8); 686 687 /* calculate crc32 for every 32bit part and xor it */ 688 desc->tfm = tcw->crc32_tfm; 689 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; 690 for (i = 0; i < 4; i++) { 691 r = crypto_shash_init(desc); 692 if (r) 693 goto out; 694 r = crypto_shash_update(desc, &buf[i * 4], 4); 695 if (r) 696 goto out; 697 r = crypto_shash_final(desc, &buf[i * 4]); 698 if (r) 699 goto out; 700 } 701 crypto_xor(&buf[0], &buf[12], 4); 702 crypto_xor(&buf[4], &buf[8], 4); 703 704 /* apply whitening (8 bytes) to whole sector */ 705 for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++) 706 crypto_xor(data + i * 8, buf, 8); 707 out: 708 memset(buf, 0, sizeof(buf)); 709 return r; 710 } 711 712 static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv, 713 struct dm_crypt_request *dmreq) 714 { 715 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; 716 u64 sector = cpu_to_le64((u64)dmreq->iv_sector); 717 u8 *src; 718 int r = 0; 719 720 /* Remove whitening from ciphertext */ 721 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) { 722 src = kmap_atomic(sg_page(&dmreq->sg_in)); 723 r = crypt_iv_tcw_whitening(cc, dmreq, src + dmreq->sg_in.offset); 724 kunmap_atomic(src); 725 } 726 727 /* Calculate IV */ 728 memcpy(iv, tcw->iv_seed, cc->iv_size); 729 crypto_xor(iv, (u8 *)§or, 8); 730 if (cc->iv_size > 8) 731 crypto_xor(&iv[8], (u8 *)§or, cc->iv_size - 8); 732 733 return r; 734 } 735 736 static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv, 737 struct dm_crypt_request *dmreq) 738 { 739 u8 *dst; 740 int r; 741 742 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) 743 return 0; 744 745 /* Apply whitening on ciphertext */ 746 dst = kmap_atomic(sg_page(&dmreq->sg_out)); 747 r = crypt_iv_tcw_whitening(cc, dmreq, dst + dmreq->sg_out.offset); 748 kunmap_atomic(dst); 749 750 return r; 751 } 752 753 static struct crypt_iv_operations crypt_iv_plain_ops = { 754 .generator = crypt_iv_plain_gen 755 }; 756 757 static struct crypt_iv_operations crypt_iv_plain64_ops = { 758 .generator = crypt_iv_plain64_gen 759 }; 760 761 static struct crypt_iv_operations crypt_iv_essiv_ops = { 762 .ctr = crypt_iv_essiv_ctr, 763 .dtr = crypt_iv_essiv_dtr, 764 .init = crypt_iv_essiv_init, 765 .wipe = crypt_iv_essiv_wipe, 766 .generator = crypt_iv_essiv_gen 767 }; 768 769 static struct crypt_iv_operations crypt_iv_benbi_ops = { 770 .ctr = crypt_iv_benbi_ctr, 771 .dtr = crypt_iv_benbi_dtr, 772 .generator = crypt_iv_benbi_gen 773 }; 774 775 static struct crypt_iv_operations crypt_iv_null_ops = { 776 .generator = crypt_iv_null_gen 777 }; 778 779 static struct crypt_iv_operations crypt_iv_lmk_ops = { 780 .ctr = crypt_iv_lmk_ctr, 781 .dtr = crypt_iv_lmk_dtr, 782 .init = crypt_iv_lmk_init, 783 .wipe = crypt_iv_lmk_wipe, 784 .generator = crypt_iv_lmk_gen, 785 .post = crypt_iv_lmk_post 786 }; 787 788 static struct crypt_iv_operations crypt_iv_tcw_ops = { 789 .ctr = crypt_iv_tcw_ctr, 790 .dtr = crypt_iv_tcw_dtr, 791 .init = crypt_iv_tcw_init, 792 .wipe = crypt_iv_tcw_wipe, 793 .generator = crypt_iv_tcw_gen, 794 .post = crypt_iv_tcw_post 795 }; 796 797 static void crypt_convert_init(struct crypt_config *cc, 798 struct convert_context *ctx, 799 struct bio *bio_out, struct bio *bio_in, 800 sector_t sector) 801 { 802 ctx->bio_in = bio_in; 803 ctx->bio_out = bio_out; 804 if (bio_in) 805 ctx->iter_in = bio_in->bi_iter; 806 if (bio_out) 807 ctx->iter_out = bio_out->bi_iter; 808 ctx->cc_sector = sector + cc->iv_offset; 809 init_completion(&ctx->restart); 810 } 811 812 static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc, 813 struct ablkcipher_request *req) 814 { 815 return (struct dm_crypt_request *)((char *)req + cc->dmreq_start); 816 } 817 818 static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc, 819 struct dm_crypt_request *dmreq) 820 { 821 return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start); 822 } 823 824 static u8 *iv_of_dmreq(struct crypt_config *cc, 825 struct dm_crypt_request *dmreq) 826 { 827 return (u8 *)ALIGN((unsigned long)(dmreq + 1), 828 crypto_ablkcipher_alignmask(any_tfm(cc)) + 1); 829 } 830 831 static int crypt_convert_block(struct crypt_config *cc, 832 struct convert_context *ctx, 833 struct ablkcipher_request *req) 834 { 835 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in); 836 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out); 837 struct dm_crypt_request *dmreq; 838 u8 *iv; 839 int r; 840 841 dmreq = dmreq_of_req(cc, req); 842 iv = iv_of_dmreq(cc, dmreq); 843 844 dmreq->iv_sector = ctx->cc_sector; 845 dmreq->ctx = ctx; 846 sg_init_table(&dmreq->sg_in, 1); 847 sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT, 848 bv_in.bv_offset); 849 850 sg_init_table(&dmreq->sg_out, 1); 851 sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT, 852 bv_out.bv_offset); 853 854 bio_advance_iter(ctx->bio_in, &ctx->iter_in, 1 << SECTOR_SHIFT); 855 bio_advance_iter(ctx->bio_out, &ctx->iter_out, 1 << SECTOR_SHIFT); 856 857 if (cc->iv_gen_ops) { 858 r = cc->iv_gen_ops->generator(cc, iv, dmreq); 859 if (r < 0) 860 return r; 861 } 862 863 ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out, 864 1 << SECTOR_SHIFT, iv); 865 866 if (bio_data_dir(ctx->bio_in) == WRITE) 867 r = crypto_ablkcipher_encrypt(req); 868 else 869 r = crypto_ablkcipher_decrypt(req); 870 871 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post) 872 r = cc->iv_gen_ops->post(cc, iv, dmreq); 873 874 return r; 875 } 876 877 static void kcryptd_async_done(struct crypto_async_request *async_req, 878 int error); 879 880 static void crypt_alloc_req(struct crypt_config *cc, 881 struct convert_context *ctx) 882 { 883 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1); 884 885 if (!ctx->req) 886 ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO); 887 888 ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]); 889 ablkcipher_request_set_callback(ctx->req, 890 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 891 kcryptd_async_done, dmreq_of_req(cc, ctx->req)); 892 } 893 894 static void crypt_free_req(struct crypt_config *cc, 895 struct ablkcipher_request *req, struct bio *base_bio) 896 { 897 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size); 898 899 if ((struct ablkcipher_request *)(io + 1) != req) 900 mempool_free(req, cc->req_pool); 901 } 902 903 /* 904 * Encrypt / decrypt data from one bio to another one (can be the same one) 905 */ 906 static int crypt_convert(struct crypt_config *cc, 907 struct convert_context *ctx) 908 { 909 int r; 910 911 atomic_set(&ctx->cc_pending, 1); 912 913 while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) { 914 915 crypt_alloc_req(cc, ctx); 916 917 atomic_inc(&ctx->cc_pending); 918 919 r = crypt_convert_block(cc, ctx, ctx->req); 920 921 switch (r) { 922 /* async */ 923 case -EBUSY: 924 wait_for_completion(&ctx->restart); 925 reinit_completion(&ctx->restart); 926 /* fall through*/ 927 case -EINPROGRESS: 928 ctx->req = NULL; 929 ctx->cc_sector++; 930 continue; 931 932 /* sync */ 933 case 0: 934 atomic_dec(&ctx->cc_pending); 935 ctx->cc_sector++; 936 cond_resched(); 937 continue; 938 939 /* error */ 940 default: 941 atomic_dec(&ctx->cc_pending); 942 return r; 943 } 944 } 945 946 return 0; 947 } 948 949 /* 950 * Generate a new unfragmented bio with the given size 951 * This should never violate the device limitations 952 * May return a smaller bio when running out of pages, indicated by 953 * *out_of_pages set to 1. 954 */ 955 static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size, 956 unsigned *out_of_pages) 957 { 958 struct crypt_config *cc = io->cc; 959 struct bio *clone; 960 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 961 gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; 962 unsigned i, len; 963 struct page *page; 964 965 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); 966 if (!clone) 967 return NULL; 968 969 clone_init(io, clone); 970 *out_of_pages = 0; 971 972 for (i = 0; i < nr_iovecs; i++) { 973 page = mempool_alloc(cc->page_pool, gfp_mask); 974 if (!page) { 975 *out_of_pages = 1; 976 break; 977 } 978 979 /* 980 * If additional pages cannot be allocated without waiting, 981 * return a partially-allocated bio. The caller will then try 982 * to allocate more bios while submitting this partial bio. 983 */ 984 gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; 985 986 len = (size > PAGE_SIZE) ? PAGE_SIZE : size; 987 988 if (!bio_add_page(clone, page, len, 0)) { 989 mempool_free(page, cc->page_pool); 990 break; 991 } 992 993 size -= len; 994 } 995 996 if (!clone->bi_iter.bi_size) { 997 bio_put(clone); 998 return NULL; 999 } 1000 1001 return clone; 1002 } 1003 1004 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) 1005 { 1006 unsigned int i; 1007 struct bio_vec *bv; 1008 1009 bio_for_each_segment_all(bv, clone, i) { 1010 BUG_ON(!bv->bv_page); 1011 mempool_free(bv->bv_page, cc->page_pool); 1012 bv->bv_page = NULL; 1013 } 1014 } 1015 1016 static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc, 1017 struct bio *bio, sector_t sector) 1018 { 1019 io->cc = cc; 1020 io->base_bio = bio; 1021 io->sector = sector; 1022 io->error = 0; 1023 io->base_io = NULL; 1024 io->ctx.req = NULL; 1025 atomic_set(&io->io_pending, 0); 1026 } 1027 1028 static void crypt_inc_pending(struct dm_crypt_io *io) 1029 { 1030 atomic_inc(&io->io_pending); 1031 } 1032 1033 /* 1034 * One of the bios was finished. Check for completion of 1035 * the whole request and correctly clean up the buffer. 1036 * If base_io is set, wait for the last fragment to complete. 1037 */ 1038 static void crypt_dec_pending(struct dm_crypt_io *io) 1039 { 1040 struct crypt_config *cc = io->cc; 1041 struct bio *base_bio = io->base_bio; 1042 struct dm_crypt_io *base_io = io->base_io; 1043 int error = io->error; 1044 1045 if (!atomic_dec_and_test(&io->io_pending)) 1046 return; 1047 1048 if (io->ctx.req) 1049 crypt_free_req(cc, io->ctx.req, base_bio); 1050 if (io != dm_per_bio_data(base_bio, cc->per_bio_data_size)) 1051 mempool_free(io, cc->io_pool); 1052 1053 if (likely(!base_io)) 1054 bio_endio(base_bio, error); 1055 else { 1056 if (error && !base_io->error) 1057 base_io->error = error; 1058 crypt_dec_pending(base_io); 1059 } 1060 } 1061 1062 /* 1063 * kcryptd/kcryptd_io: 1064 * 1065 * Needed because it would be very unwise to do decryption in an 1066 * interrupt context. 1067 * 1068 * kcryptd performs the actual encryption or decryption. 1069 * 1070 * kcryptd_io performs the IO submission. 1071 * 1072 * They must be separated as otherwise the final stages could be 1073 * starved by new requests which can block in the first stages due 1074 * to memory allocation. 1075 * 1076 * The work is done per CPU global for all dm-crypt instances. 1077 * They should not depend on each other and do not block. 1078 */ 1079 static void crypt_endio(struct bio *clone, int error) 1080 { 1081 struct dm_crypt_io *io = clone->bi_private; 1082 struct crypt_config *cc = io->cc; 1083 unsigned rw = bio_data_dir(clone); 1084 1085 if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error)) 1086 error = -EIO; 1087 1088 /* 1089 * free the processed pages 1090 */ 1091 if (rw == WRITE) 1092 crypt_free_buffer_pages(cc, clone); 1093 1094 bio_put(clone); 1095 1096 if (rw == READ && !error) { 1097 kcryptd_queue_crypt(io); 1098 return; 1099 } 1100 1101 if (unlikely(error)) 1102 io->error = error; 1103 1104 crypt_dec_pending(io); 1105 } 1106 1107 static void clone_init(struct dm_crypt_io *io, struct bio *clone) 1108 { 1109 struct crypt_config *cc = io->cc; 1110 1111 clone->bi_private = io; 1112 clone->bi_end_io = crypt_endio; 1113 clone->bi_bdev = cc->dev->bdev; 1114 clone->bi_rw = io->base_bio->bi_rw; 1115 } 1116 1117 static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) 1118 { 1119 struct crypt_config *cc = io->cc; 1120 struct bio *base_bio = io->base_bio; 1121 struct bio *clone; 1122 1123 /* 1124 * The block layer might modify the bvec array, so always 1125 * copy the required bvecs because we need the original 1126 * one in order to decrypt the whole bio data *afterwards*. 1127 */ 1128 clone = bio_clone_bioset(base_bio, gfp, cc->bs); 1129 if (!clone) 1130 return 1; 1131 1132 crypt_inc_pending(io); 1133 1134 clone_init(io, clone); 1135 clone->bi_iter.bi_sector = cc->start + io->sector; 1136 1137 generic_make_request(clone); 1138 return 0; 1139 } 1140 1141 static void kcryptd_io_write(struct dm_crypt_io *io) 1142 { 1143 struct bio *clone = io->ctx.bio_out; 1144 generic_make_request(clone); 1145 } 1146 1147 static void kcryptd_io(struct work_struct *work) 1148 { 1149 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); 1150 1151 if (bio_data_dir(io->base_bio) == READ) { 1152 crypt_inc_pending(io); 1153 if (kcryptd_io_read(io, GFP_NOIO)) 1154 io->error = -ENOMEM; 1155 crypt_dec_pending(io); 1156 } else 1157 kcryptd_io_write(io); 1158 } 1159 1160 static void kcryptd_queue_io(struct dm_crypt_io *io) 1161 { 1162 struct crypt_config *cc = io->cc; 1163 1164 INIT_WORK(&io->work, kcryptd_io); 1165 queue_work(cc->io_queue, &io->work); 1166 } 1167 1168 static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) 1169 { 1170 struct bio *clone = io->ctx.bio_out; 1171 struct crypt_config *cc = io->cc; 1172 1173 if (unlikely(io->error < 0)) { 1174 crypt_free_buffer_pages(cc, clone); 1175 bio_put(clone); 1176 crypt_dec_pending(io); 1177 return; 1178 } 1179 1180 /* crypt_convert should have filled the clone bio */ 1181 BUG_ON(io->ctx.iter_out.bi_size); 1182 1183 clone->bi_iter.bi_sector = cc->start + io->sector; 1184 1185 if (async) 1186 kcryptd_queue_io(io); 1187 else 1188 generic_make_request(clone); 1189 } 1190 1191 static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) 1192 { 1193 struct crypt_config *cc = io->cc; 1194 struct bio *clone; 1195 struct dm_crypt_io *new_io; 1196 int crypt_finished; 1197 unsigned out_of_pages = 0; 1198 unsigned remaining = io->base_bio->bi_iter.bi_size; 1199 sector_t sector = io->sector; 1200 int r; 1201 1202 /* 1203 * Prevent io from disappearing until this function completes. 1204 */ 1205 crypt_inc_pending(io); 1206 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector); 1207 1208 /* 1209 * The allocated buffers can be smaller than the whole bio, 1210 * so repeat the whole process until all the data can be handled. 1211 */ 1212 while (remaining) { 1213 clone = crypt_alloc_buffer(io, remaining, &out_of_pages); 1214 if (unlikely(!clone)) { 1215 io->error = -ENOMEM; 1216 break; 1217 } 1218 1219 io->ctx.bio_out = clone; 1220 io->ctx.iter_out = clone->bi_iter; 1221 1222 remaining -= clone->bi_iter.bi_size; 1223 sector += bio_sectors(clone); 1224 1225 crypt_inc_pending(io); 1226 1227 r = crypt_convert(cc, &io->ctx); 1228 if (r < 0) 1229 io->error = -EIO; 1230 1231 crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending); 1232 1233 /* Encryption was already finished, submit io now */ 1234 if (crypt_finished) { 1235 kcryptd_crypt_write_io_submit(io, 0); 1236 1237 /* 1238 * If there was an error, do not try next fragments. 1239 * For async, error is processed in async handler. 1240 */ 1241 if (unlikely(r < 0)) 1242 break; 1243 1244 io->sector = sector; 1245 } 1246 1247 /* 1248 * Out of memory -> run queues 1249 * But don't wait if split was due to the io size restriction 1250 */ 1251 if (unlikely(out_of_pages)) 1252 congestion_wait(BLK_RW_ASYNC, HZ/100); 1253 1254 /* 1255 * With async crypto it is unsafe to share the crypto context 1256 * between fragments, so switch to a new dm_crypt_io structure. 1257 */ 1258 if (unlikely(!crypt_finished && remaining)) { 1259 new_io = mempool_alloc(cc->io_pool, GFP_NOIO); 1260 crypt_io_init(new_io, io->cc, io->base_bio, sector); 1261 crypt_inc_pending(new_io); 1262 crypt_convert_init(cc, &new_io->ctx, NULL, 1263 io->base_bio, sector); 1264 new_io->ctx.iter_in = io->ctx.iter_in; 1265 1266 /* 1267 * Fragments after the first use the base_io 1268 * pending count. 1269 */ 1270 if (!io->base_io) 1271 new_io->base_io = io; 1272 else { 1273 new_io->base_io = io->base_io; 1274 crypt_inc_pending(io->base_io); 1275 crypt_dec_pending(io); 1276 } 1277 1278 io = new_io; 1279 } 1280 } 1281 1282 crypt_dec_pending(io); 1283 } 1284 1285 static void kcryptd_crypt_read_done(struct dm_crypt_io *io) 1286 { 1287 crypt_dec_pending(io); 1288 } 1289 1290 static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) 1291 { 1292 struct crypt_config *cc = io->cc; 1293 int r = 0; 1294 1295 crypt_inc_pending(io); 1296 1297 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, 1298 io->sector); 1299 1300 r = crypt_convert(cc, &io->ctx); 1301 if (r < 0) 1302 io->error = -EIO; 1303 1304 if (atomic_dec_and_test(&io->ctx.cc_pending)) 1305 kcryptd_crypt_read_done(io); 1306 1307 crypt_dec_pending(io); 1308 } 1309 1310 static void kcryptd_async_done(struct crypto_async_request *async_req, 1311 int error) 1312 { 1313 struct dm_crypt_request *dmreq = async_req->data; 1314 struct convert_context *ctx = dmreq->ctx; 1315 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); 1316 struct crypt_config *cc = io->cc; 1317 1318 if (error == -EINPROGRESS) { 1319 complete(&ctx->restart); 1320 return; 1321 } 1322 1323 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post) 1324 error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq); 1325 1326 if (error < 0) 1327 io->error = -EIO; 1328 1329 crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio); 1330 1331 if (!atomic_dec_and_test(&ctx->cc_pending)) 1332 return; 1333 1334 if (bio_data_dir(io->base_bio) == READ) 1335 kcryptd_crypt_read_done(io); 1336 else 1337 kcryptd_crypt_write_io_submit(io, 1); 1338 } 1339 1340 static void kcryptd_crypt(struct work_struct *work) 1341 { 1342 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); 1343 1344 if (bio_data_dir(io->base_bio) == READ) 1345 kcryptd_crypt_read_convert(io); 1346 else 1347 kcryptd_crypt_write_convert(io); 1348 } 1349 1350 static void kcryptd_queue_crypt(struct dm_crypt_io *io) 1351 { 1352 struct crypt_config *cc = io->cc; 1353 1354 INIT_WORK(&io->work, kcryptd_crypt); 1355 queue_work(cc->crypt_queue, &io->work); 1356 } 1357 1358 /* 1359 * Decode key from its hex representation 1360 */ 1361 static int crypt_decode_key(u8 *key, char *hex, unsigned int size) 1362 { 1363 char buffer[3]; 1364 unsigned int i; 1365 1366 buffer[2] = '\0'; 1367 1368 for (i = 0; i < size; i++) { 1369 buffer[0] = *hex++; 1370 buffer[1] = *hex++; 1371 1372 if (kstrtou8(buffer, 16, &key[i])) 1373 return -EINVAL; 1374 } 1375 1376 if (*hex != '\0') 1377 return -EINVAL; 1378 1379 return 0; 1380 } 1381 1382 static void crypt_free_tfms(struct crypt_config *cc) 1383 { 1384 unsigned i; 1385 1386 if (!cc->tfms) 1387 return; 1388 1389 for (i = 0; i < cc->tfms_count; i++) 1390 if (cc->tfms[i] && !IS_ERR(cc->tfms[i])) { 1391 crypto_free_ablkcipher(cc->tfms[i]); 1392 cc->tfms[i] = NULL; 1393 } 1394 1395 kfree(cc->tfms); 1396 cc->tfms = NULL; 1397 } 1398 1399 static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode) 1400 { 1401 unsigned i; 1402 int err; 1403 1404 cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_ablkcipher *), 1405 GFP_KERNEL); 1406 if (!cc->tfms) 1407 return -ENOMEM; 1408 1409 for (i = 0; i < cc->tfms_count; i++) { 1410 cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0); 1411 if (IS_ERR(cc->tfms[i])) { 1412 err = PTR_ERR(cc->tfms[i]); 1413 crypt_free_tfms(cc); 1414 return err; 1415 } 1416 } 1417 1418 return 0; 1419 } 1420 1421 static int crypt_setkey_allcpus(struct crypt_config *cc) 1422 { 1423 unsigned subkey_size; 1424 int err = 0, i, r; 1425 1426 /* Ignore extra keys (which are used for IV etc) */ 1427 subkey_size = (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count); 1428 1429 for (i = 0; i < cc->tfms_count; i++) { 1430 r = crypto_ablkcipher_setkey(cc->tfms[i], 1431 cc->key + (i * subkey_size), 1432 subkey_size); 1433 if (r) 1434 err = r; 1435 } 1436 1437 return err; 1438 } 1439 1440 static int crypt_set_key(struct crypt_config *cc, char *key) 1441 { 1442 int r = -EINVAL; 1443 int key_string_len = strlen(key); 1444 1445 /* The key size may not be changed. */ 1446 if (cc->key_size != (key_string_len >> 1)) 1447 goto out; 1448 1449 /* Hyphen (which gives a key_size of zero) means there is no key. */ 1450 if (!cc->key_size && strcmp(key, "-")) 1451 goto out; 1452 1453 if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0) 1454 goto out; 1455 1456 set_bit(DM_CRYPT_KEY_VALID, &cc->flags); 1457 1458 r = crypt_setkey_allcpus(cc); 1459 1460 out: 1461 /* Hex key string not needed after here, so wipe it. */ 1462 memset(key, '0', key_string_len); 1463 1464 return r; 1465 } 1466 1467 static int crypt_wipe_key(struct crypt_config *cc) 1468 { 1469 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); 1470 memset(&cc->key, 0, cc->key_size * sizeof(u8)); 1471 1472 return crypt_setkey_allcpus(cc); 1473 } 1474 1475 static void crypt_dtr(struct dm_target *ti) 1476 { 1477 struct crypt_config *cc = ti->private; 1478 1479 ti->private = NULL; 1480 1481 if (!cc) 1482 return; 1483 1484 if (cc->io_queue) 1485 destroy_workqueue(cc->io_queue); 1486 if (cc->crypt_queue) 1487 destroy_workqueue(cc->crypt_queue); 1488 1489 crypt_free_tfms(cc); 1490 1491 if (cc->bs) 1492 bioset_free(cc->bs); 1493 1494 if (cc->page_pool) 1495 mempool_destroy(cc->page_pool); 1496 if (cc->req_pool) 1497 mempool_destroy(cc->req_pool); 1498 if (cc->io_pool) 1499 mempool_destroy(cc->io_pool); 1500 1501 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) 1502 cc->iv_gen_ops->dtr(cc); 1503 1504 if (cc->dev) 1505 dm_put_device(ti, cc->dev); 1506 1507 kzfree(cc->cipher); 1508 kzfree(cc->cipher_string); 1509 1510 /* Must zero key material before freeing */ 1511 kzfree(cc); 1512 } 1513 1514 static int crypt_ctr_cipher(struct dm_target *ti, 1515 char *cipher_in, char *key) 1516 { 1517 struct crypt_config *cc = ti->private; 1518 char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount; 1519 char *cipher_api = NULL; 1520 int ret = -EINVAL; 1521 char dummy; 1522 1523 /* Convert to crypto api definition? */ 1524 if (strchr(cipher_in, '(')) { 1525 ti->error = "Bad cipher specification"; 1526 return -EINVAL; 1527 } 1528 1529 cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL); 1530 if (!cc->cipher_string) 1531 goto bad_mem; 1532 1533 /* 1534 * Legacy dm-crypt cipher specification 1535 * cipher[:keycount]-mode-iv:ivopts 1536 */ 1537 tmp = cipher_in; 1538 keycount = strsep(&tmp, "-"); 1539 cipher = strsep(&keycount, ":"); 1540 1541 if (!keycount) 1542 cc->tfms_count = 1; 1543 else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 || 1544 !is_power_of_2(cc->tfms_count)) { 1545 ti->error = "Bad cipher key count specification"; 1546 return -EINVAL; 1547 } 1548 cc->key_parts = cc->tfms_count; 1549 cc->key_extra_size = 0; 1550 1551 cc->cipher = kstrdup(cipher, GFP_KERNEL); 1552 if (!cc->cipher) 1553 goto bad_mem; 1554 1555 chainmode = strsep(&tmp, "-"); 1556 ivopts = strsep(&tmp, "-"); 1557 ivmode = strsep(&ivopts, ":"); 1558 1559 if (tmp) 1560 DMWARN("Ignoring unexpected additional cipher options"); 1561 1562 /* 1563 * For compatibility with the original dm-crypt mapping format, if 1564 * only the cipher name is supplied, use cbc-plain. 1565 */ 1566 if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) { 1567 chainmode = "cbc"; 1568 ivmode = "plain"; 1569 } 1570 1571 if (strcmp(chainmode, "ecb") && !ivmode) { 1572 ti->error = "IV mechanism required"; 1573 return -EINVAL; 1574 } 1575 1576 cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL); 1577 if (!cipher_api) 1578 goto bad_mem; 1579 1580 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME, 1581 "%s(%s)", chainmode, cipher); 1582 if (ret < 0) { 1583 kfree(cipher_api); 1584 goto bad_mem; 1585 } 1586 1587 /* Allocate cipher */ 1588 ret = crypt_alloc_tfms(cc, cipher_api); 1589 if (ret < 0) { 1590 ti->error = "Error allocating crypto tfm"; 1591 goto bad; 1592 } 1593 1594 /* Initialize IV */ 1595 cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc)); 1596 if (cc->iv_size) 1597 /* at least a 64 bit sector number should fit in our buffer */ 1598 cc->iv_size = max(cc->iv_size, 1599 (unsigned int)(sizeof(u64) / sizeof(u8))); 1600 else if (ivmode) { 1601 DMWARN("Selected cipher does not support IVs"); 1602 ivmode = NULL; 1603 } 1604 1605 /* Choose ivmode, see comments at iv code. */ 1606 if (ivmode == NULL) 1607 cc->iv_gen_ops = NULL; 1608 else if (strcmp(ivmode, "plain") == 0) 1609 cc->iv_gen_ops = &crypt_iv_plain_ops; 1610 else if (strcmp(ivmode, "plain64") == 0) 1611 cc->iv_gen_ops = &crypt_iv_plain64_ops; 1612 else if (strcmp(ivmode, "essiv") == 0) 1613 cc->iv_gen_ops = &crypt_iv_essiv_ops; 1614 else if (strcmp(ivmode, "benbi") == 0) 1615 cc->iv_gen_ops = &crypt_iv_benbi_ops; 1616 else if (strcmp(ivmode, "null") == 0) 1617 cc->iv_gen_ops = &crypt_iv_null_ops; 1618 else if (strcmp(ivmode, "lmk") == 0) { 1619 cc->iv_gen_ops = &crypt_iv_lmk_ops; 1620 /* 1621 * Version 2 and 3 is recognised according 1622 * to length of provided multi-key string. 1623 * If present (version 3), last key is used as IV seed. 1624 * All keys (including IV seed) are always the same size. 1625 */ 1626 if (cc->key_size % cc->key_parts) { 1627 cc->key_parts++; 1628 cc->key_extra_size = cc->key_size / cc->key_parts; 1629 } 1630 } else if (strcmp(ivmode, "tcw") == 0) { 1631 cc->iv_gen_ops = &crypt_iv_tcw_ops; 1632 cc->key_parts += 2; /* IV + whitening */ 1633 cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE; 1634 } else { 1635 ret = -EINVAL; 1636 ti->error = "Invalid IV mode"; 1637 goto bad; 1638 } 1639 1640 /* Initialize and set key */ 1641 ret = crypt_set_key(cc, key); 1642 if (ret < 0) { 1643 ti->error = "Error decoding and setting key"; 1644 goto bad; 1645 } 1646 1647 /* Allocate IV */ 1648 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) { 1649 ret = cc->iv_gen_ops->ctr(cc, ti, ivopts); 1650 if (ret < 0) { 1651 ti->error = "Error creating IV"; 1652 goto bad; 1653 } 1654 } 1655 1656 /* Initialize IV (set keys for ESSIV etc) */ 1657 if (cc->iv_gen_ops && cc->iv_gen_ops->init) { 1658 ret = cc->iv_gen_ops->init(cc); 1659 if (ret < 0) { 1660 ti->error = "Error initialising IV"; 1661 goto bad; 1662 } 1663 } 1664 1665 ret = 0; 1666 bad: 1667 kfree(cipher_api); 1668 return ret; 1669 1670 bad_mem: 1671 ti->error = "Cannot allocate cipher strings"; 1672 return -ENOMEM; 1673 } 1674 1675 /* 1676 * Construct an encryption mapping: 1677 * <cipher> <key> <iv_offset> <dev_path> <start> 1678 */ 1679 static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) 1680 { 1681 struct crypt_config *cc; 1682 unsigned int key_size, opt_params; 1683 unsigned long long tmpll; 1684 int ret; 1685 size_t iv_size_padding; 1686 struct dm_arg_set as; 1687 const char *opt_string; 1688 char dummy; 1689 1690 static struct dm_arg _args[] = { 1691 {0, 1, "Invalid number of feature args"}, 1692 }; 1693 1694 if (argc < 5) { 1695 ti->error = "Not enough arguments"; 1696 return -EINVAL; 1697 } 1698 1699 key_size = strlen(argv[1]) >> 1; 1700 1701 cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL); 1702 if (!cc) { 1703 ti->error = "Cannot allocate encryption context"; 1704 return -ENOMEM; 1705 } 1706 cc->key_size = key_size; 1707 1708 ti->private = cc; 1709 ret = crypt_ctr_cipher(ti, argv[0], argv[1]); 1710 if (ret < 0) 1711 goto bad; 1712 1713 ret = -ENOMEM; 1714 cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool); 1715 if (!cc->io_pool) { 1716 ti->error = "Cannot allocate crypt io mempool"; 1717 goto bad; 1718 } 1719 1720 cc->dmreq_start = sizeof(struct ablkcipher_request); 1721 cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc)); 1722 cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request)); 1723 1724 if (crypto_ablkcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) { 1725 /* Allocate the padding exactly */ 1726 iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request)) 1727 & crypto_ablkcipher_alignmask(any_tfm(cc)); 1728 } else { 1729 /* 1730 * If the cipher requires greater alignment than kmalloc 1731 * alignment, we don't know the exact position of the 1732 * initialization vector. We must assume worst case. 1733 */ 1734 iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc)); 1735 } 1736 1737 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + 1738 sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size); 1739 if (!cc->req_pool) { 1740 ti->error = "Cannot allocate crypt request mempool"; 1741 goto bad; 1742 } 1743 1744 cc->per_bio_data_size = ti->per_bio_data_size = 1745 ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + 1746 sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size, 1747 ARCH_KMALLOC_MINALIGN); 1748 1749 cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); 1750 if (!cc->page_pool) { 1751 ti->error = "Cannot allocate page mempool"; 1752 goto bad; 1753 } 1754 1755 cc->bs = bioset_create(MIN_IOS, 0); 1756 if (!cc->bs) { 1757 ti->error = "Cannot allocate crypt bioset"; 1758 goto bad; 1759 } 1760 1761 ret = -EINVAL; 1762 if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) { 1763 ti->error = "Invalid iv_offset sector"; 1764 goto bad; 1765 } 1766 cc->iv_offset = tmpll; 1767 1768 if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) { 1769 ti->error = "Device lookup failed"; 1770 goto bad; 1771 } 1772 1773 if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) { 1774 ti->error = "Invalid device sector"; 1775 goto bad; 1776 } 1777 cc->start = tmpll; 1778 1779 argv += 5; 1780 argc -= 5; 1781 1782 /* Optional parameters */ 1783 if (argc) { 1784 as.argc = argc; 1785 as.argv = argv; 1786 1787 ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error); 1788 if (ret) 1789 goto bad; 1790 1791 opt_string = dm_shift_arg(&as); 1792 1793 if (opt_params == 1 && opt_string && 1794 !strcasecmp(opt_string, "allow_discards")) 1795 ti->num_discard_bios = 1; 1796 else if (opt_params) { 1797 ret = -EINVAL; 1798 ti->error = "Invalid feature arguments"; 1799 goto bad; 1800 } 1801 } 1802 1803 ret = -ENOMEM; 1804 cc->io_queue = alloc_workqueue("kcryptd_io", WQ_MEM_RECLAIM, 1); 1805 if (!cc->io_queue) { 1806 ti->error = "Couldn't create kcryptd io queue"; 1807 goto bad; 1808 } 1809 1810 cc->crypt_queue = alloc_workqueue("kcryptd", 1811 WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1); 1812 if (!cc->crypt_queue) { 1813 ti->error = "Couldn't create kcryptd queue"; 1814 goto bad; 1815 } 1816 1817 ti->num_flush_bios = 1; 1818 ti->discard_zeroes_data_unsupported = true; 1819 1820 return 0; 1821 1822 bad: 1823 crypt_dtr(ti); 1824 return ret; 1825 } 1826 1827 static int crypt_map(struct dm_target *ti, struct bio *bio) 1828 { 1829 struct dm_crypt_io *io; 1830 struct crypt_config *cc = ti->private; 1831 1832 /* 1833 * If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues. 1834 * - for REQ_FLUSH device-mapper core ensures that no IO is in-flight 1835 * - for REQ_DISCARD caller must use flush if IO ordering matters 1836 */ 1837 if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) { 1838 bio->bi_bdev = cc->dev->bdev; 1839 if (bio_sectors(bio)) 1840 bio->bi_iter.bi_sector = cc->start + 1841 dm_target_offset(ti, bio->bi_iter.bi_sector); 1842 return DM_MAPIO_REMAPPED; 1843 } 1844 1845 io = dm_per_bio_data(bio, cc->per_bio_data_size); 1846 crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); 1847 io->ctx.req = (struct ablkcipher_request *)(io + 1); 1848 1849 if (bio_data_dir(io->base_bio) == READ) { 1850 if (kcryptd_io_read(io, GFP_NOWAIT)) 1851 kcryptd_queue_io(io); 1852 } else 1853 kcryptd_queue_crypt(io); 1854 1855 return DM_MAPIO_SUBMITTED; 1856 } 1857 1858 static void crypt_status(struct dm_target *ti, status_type_t type, 1859 unsigned status_flags, char *result, unsigned maxlen) 1860 { 1861 struct crypt_config *cc = ti->private; 1862 unsigned i, sz = 0; 1863 1864 switch (type) { 1865 case STATUSTYPE_INFO: 1866 result[0] = '\0'; 1867 break; 1868 1869 case STATUSTYPE_TABLE: 1870 DMEMIT("%s ", cc->cipher_string); 1871 1872 if (cc->key_size > 0) 1873 for (i = 0; i < cc->key_size; i++) 1874 DMEMIT("%02x", cc->key[i]); 1875 else 1876 DMEMIT("-"); 1877 1878 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset, 1879 cc->dev->name, (unsigned long long)cc->start); 1880 1881 if (ti->num_discard_bios) 1882 DMEMIT(" 1 allow_discards"); 1883 1884 break; 1885 } 1886 } 1887 1888 static void crypt_postsuspend(struct dm_target *ti) 1889 { 1890 struct crypt_config *cc = ti->private; 1891 1892 set_bit(DM_CRYPT_SUSPENDED, &cc->flags); 1893 } 1894 1895 static int crypt_preresume(struct dm_target *ti) 1896 { 1897 struct crypt_config *cc = ti->private; 1898 1899 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) { 1900 DMERR("aborting resume - crypt key is not set."); 1901 return -EAGAIN; 1902 } 1903 1904 return 0; 1905 } 1906 1907 static void crypt_resume(struct dm_target *ti) 1908 { 1909 struct crypt_config *cc = ti->private; 1910 1911 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags); 1912 } 1913 1914 /* Message interface 1915 * key set <key> 1916 * key wipe 1917 */ 1918 static int crypt_message(struct dm_target *ti, unsigned argc, char **argv) 1919 { 1920 struct crypt_config *cc = ti->private; 1921 int ret = -EINVAL; 1922 1923 if (argc < 2) 1924 goto error; 1925 1926 if (!strcasecmp(argv[0], "key")) { 1927 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) { 1928 DMWARN("not suspended during key manipulation."); 1929 return -EINVAL; 1930 } 1931 if (argc == 3 && !strcasecmp(argv[1], "set")) { 1932 ret = crypt_set_key(cc, argv[2]); 1933 if (ret) 1934 return ret; 1935 if (cc->iv_gen_ops && cc->iv_gen_ops->init) 1936 ret = cc->iv_gen_ops->init(cc); 1937 return ret; 1938 } 1939 if (argc == 2 && !strcasecmp(argv[1], "wipe")) { 1940 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) { 1941 ret = cc->iv_gen_ops->wipe(cc); 1942 if (ret) 1943 return ret; 1944 } 1945 return crypt_wipe_key(cc); 1946 } 1947 } 1948 1949 error: 1950 DMWARN("unrecognised message received."); 1951 return -EINVAL; 1952 } 1953 1954 static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm, 1955 struct bio_vec *biovec, int max_size) 1956 { 1957 struct crypt_config *cc = ti->private; 1958 struct request_queue *q = bdev_get_queue(cc->dev->bdev); 1959 1960 if (!q->merge_bvec_fn) 1961 return max_size; 1962 1963 bvm->bi_bdev = cc->dev->bdev; 1964 bvm->bi_sector = cc->start + dm_target_offset(ti, bvm->bi_sector); 1965 1966 return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); 1967 } 1968 1969 static int crypt_iterate_devices(struct dm_target *ti, 1970 iterate_devices_callout_fn fn, void *data) 1971 { 1972 struct crypt_config *cc = ti->private; 1973 1974 return fn(ti, cc->dev, cc->start, ti->len, data); 1975 } 1976 1977 static struct target_type crypt_target = { 1978 .name = "crypt", 1979 .version = {1, 13, 0}, 1980 .module = THIS_MODULE, 1981 .ctr = crypt_ctr, 1982 .dtr = crypt_dtr, 1983 .map = crypt_map, 1984 .status = crypt_status, 1985 .postsuspend = crypt_postsuspend, 1986 .preresume = crypt_preresume, 1987 .resume = crypt_resume, 1988 .message = crypt_message, 1989 .merge = crypt_merge, 1990 .iterate_devices = crypt_iterate_devices, 1991 }; 1992 1993 static int __init dm_crypt_init(void) 1994 { 1995 int r; 1996 1997 _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0); 1998 if (!_crypt_io_pool) 1999 return -ENOMEM; 2000 2001 r = dm_register_target(&crypt_target); 2002 if (r < 0) { 2003 DMERR("register failed %d", r); 2004 kmem_cache_destroy(_crypt_io_pool); 2005 } 2006 2007 return r; 2008 } 2009 2010 static void __exit dm_crypt_exit(void) 2011 { 2012 dm_unregister_target(&crypt_target); 2013 kmem_cache_destroy(_crypt_io_pool); 2014 } 2015 2016 module_init(dm_crypt_init); 2017 module_exit(dm_crypt_exit); 2018 2019 MODULE_AUTHOR("Jana Saout <jana@saout.de>"); 2020 MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption"); 2021 MODULE_LICENSE("GPL"); 2022