1 /* 2 * Copyright (C) 2003 Christophe Saout <christophe@saout.de> 3 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> 4 * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved. 5 * Copyright (C) 2013 Milan Broz <gmazyland@gmail.com> 6 * 7 * This file is released under the GPL. 8 */ 9 10 #include <linux/completion.h> 11 #include <linux/err.h> 12 #include <linux/module.h> 13 #include <linux/init.h> 14 #include <linux/kernel.h> 15 #include <linux/bio.h> 16 #include <linux/blkdev.h> 17 #include <linux/mempool.h> 18 #include <linux/slab.h> 19 #include <linux/crypto.h> 20 #include <linux/workqueue.h> 21 #include <linux/backing-dev.h> 22 #include <linux/atomic.h> 23 #include <linux/scatterlist.h> 24 #include <asm/page.h> 25 #include <asm/unaligned.h> 26 #include <crypto/hash.h> 27 #include <crypto/md5.h> 28 #include <crypto/algapi.h> 29 30 #include <linux/device-mapper.h> 31 32 #define DM_MSG_PREFIX "crypt" 33 34 /* 35 * context holding the current state of a multi-part conversion 36 */ 37 struct convert_context { 38 struct completion restart; 39 struct bio *bio_in; 40 struct bio *bio_out; 41 struct bvec_iter iter_in; 42 struct bvec_iter iter_out; 43 sector_t cc_sector; 44 atomic_t cc_pending; 45 struct ablkcipher_request *req; 46 }; 47 48 /* 49 * per bio private data 50 */ 51 struct dm_crypt_io { 52 struct crypt_config *cc; 53 struct bio *base_bio; 54 struct work_struct work; 55 56 struct convert_context ctx; 57 58 atomic_t io_pending; 59 int error; 60 sector_t sector; 61 struct dm_crypt_io *base_io; 62 }; 63 64 struct dm_crypt_request { 65 struct convert_context *ctx; 66 struct scatterlist sg_in; 67 struct scatterlist sg_out; 68 sector_t iv_sector; 69 }; 70 71 struct crypt_config; 72 73 struct crypt_iv_operations { 74 int (*ctr)(struct crypt_config *cc, struct dm_target *ti, 75 const char *opts); 76 void (*dtr)(struct crypt_config *cc); 77 int (*init)(struct crypt_config *cc); 78 int (*wipe)(struct crypt_config *cc); 79 int (*generator)(struct crypt_config *cc, u8 *iv, 80 struct dm_crypt_request *dmreq); 81 int (*post)(struct crypt_config *cc, u8 *iv, 82 struct dm_crypt_request *dmreq); 83 }; 84 85 struct iv_essiv_private { 86 struct crypto_hash *hash_tfm; 87 u8 *salt; 88 }; 89 90 struct iv_benbi_private { 91 int shift; 92 }; 93 94 #define LMK_SEED_SIZE 64 /* hash + 0 */ 95 struct iv_lmk_private { 96 struct crypto_shash *hash_tfm; 97 u8 *seed; 98 }; 99 100 #define TCW_WHITENING_SIZE 16 101 struct iv_tcw_private { 102 struct crypto_shash *crc32_tfm; 103 u8 *iv_seed; 104 u8 *whitening; 105 }; 106 107 /* 108 * Crypt: maps a linear range of a block device 109 * and encrypts / decrypts at the same time. 110 */ 111 enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; 112 113 /* 114 * The fields in here must be read only after initialization. 115 */ 116 struct crypt_config { 117 struct dm_dev *dev; 118 sector_t start; 119 120 /* 121 * pool for per bio private data, crypto requests and 122 * encryption requeusts/buffer pages 123 */ 124 mempool_t *io_pool; 125 mempool_t *req_pool; 126 mempool_t *page_pool; 127 struct bio_set *bs; 128 129 struct workqueue_struct *io_queue; 130 struct workqueue_struct *crypt_queue; 131 132 char *cipher; 133 char *cipher_string; 134 135 struct crypt_iv_operations *iv_gen_ops; 136 union { 137 struct iv_essiv_private essiv; 138 struct iv_benbi_private benbi; 139 struct iv_lmk_private lmk; 140 struct iv_tcw_private tcw; 141 } iv_gen_private; 142 sector_t iv_offset; 143 unsigned int iv_size; 144 145 /* ESSIV: struct crypto_cipher *essiv_tfm */ 146 void *iv_private; 147 struct crypto_ablkcipher **tfms; 148 unsigned tfms_count; 149 150 /* 151 * Layout of each crypto request: 152 * 153 * struct ablkcipher_request 154 * context 155 * padding 156 * struct dm_crypt_request 157 * padding 158 * IV 159 * 160 * The padding is added so that dm_crypt_request and the IV are 161 * correctly aligned. 162 */ 163 unsigned int dmreq_start; 164 165 unsigned long flags; 166 unsigned int key_size; 167 unsigned int key_parts; /* independent parts in key buffer */ 168 unsigned int key_extra_size; /* additional keys length */ 169 u8 key[0]; 170 }; 171 172 #define MIN_IOS 16 173 #define MIN_POOL_PAGES 32 174 175 static struct kmem_cache *_crypt_io_pool; 176 177 static void clone_init(struct dm_crypt_io *, struct bio *); 178 static void kcryptd_queue_crypt(struct dm_crypt_io *io); 179 static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq); 180 181 /* 182 * Use this to access cipher attributes that are the same for each CPU. 183 */ 184 static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc) 185 { 186 return cc->tfms[0]; 187 } 188 189 /* 190 * Different IV generation algorithms: 191 * 192 * plain: the initial vector is the 32-bit little-endian version of the sector 193 * number, padded with zeros if necessary. 194 * 195 * plain64: the initial vector is the 64-bit little-endian version of the sector 196 * number, padded with zeros if necessary. 197 * 198 * essiv: "encrypted sector|salt initial vector", the sector number is 199 * encrypted with the bulk cipher using a salt as key. The salt 200 * should be derived from the bulk cipher's key via hashing. 201 * 202 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1 203 * (needed for LRW-32-AES and possible other narrow block modes) 204 * 205 * null: the initial vector is always zero. Provides compatibility with 206 * obsolete loop_fish2 devices. Do not use for new devices. 207 * 208 * lmk: Compatible implementation of the block chaining mode used 209 * by the Loop-AES block device encryption system 210 * designed by Jari Ruusu. See http://loop-aes.sourceforge.net/ 211 * It operates on full 512 byte sectors and uses CBC 212 * with an IV derived from the sector number, the data and 213 * optionally extra IV seed. 214 * This means that after decryption the first block 215 * of sector must be tweaked according to decrypted data. 216 * Loop-AES can use three encryption schemes: 217 * version 1: is plain aes-cbc mode 218 * version 2: uses 64 multikey scheme with lmk IV generator 219 * version 3: the same as version 2 with additional IV seed 220 * (it uses 65 keys, last key is used as IV seed) 221 * 222 * tcw: Compatible implementation of the block chaining mode used 223 * by the TrueCrypt device encryption system (prior to version 4.1). 224 * For more info see: http://www.truecrypt.org 225 * It operates on full 512 byte sectors and uses CBC 226 * with an IV derived from initial key and the sector number. 227 * In addition, whitening value is applied on every sector, whitening 228 * is calculated from initial key, sector number and mixed using CRC32. 229 * Note that this encryption scheme is vulnerable to watermarking attacks 230 * and should be used for old compatible containers access only. 231 * 232 * plumb: unimplemented, see: 233 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 234 */ 235 236 static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, 237 struct dm_crypt_request *dmreq) 238 { 239 memset(iv, 0, cc->iv_size); 240 *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff); 241 242 return 0; 243 } 244 245 static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv, 246 struct dm_crypt_request *dmreq) 247 { 248 memset(iv, 0, cc->iv_size); 249 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector); 250 251 return 0; 252 } 253 254 /* Initialise ESSIV - compute salt but no local memory allocations */ 255 static int crypt_iv_essiv_init(struct crypt_config *cc) 256 { 257 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; 258 struct hash_desc desc; 259 struct scatterlist sg; 260 struct crypto_cipher *essiv_tfm; 261 int err; 262 263 sg_init_one(&sg, cc->key, cc->key_size); 264 desc.tfm = essiv->hash_tfm; 265 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; 266 267 err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt); 268 if (err) 269 return err; 270 271 essiv_tfm = cc->iv_private; 272 273 err = crypto_cipher_setkey(essiv_tfm, essiv->salt, 274 crypto_hash_digestsize(essiv->hash_tfm)); 275 if (err) 276 return err; 277 278 return 0; 279 } 280 281 /* Wipe salt and reset key derived from volume key */ 282 static int crypt_iv_essiv_wipe(struct crypt_config *cc) 283 { 284 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; 285 unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm); 286 struct crypto_cipher *essiv_tfm; 287 int r, err = 0; 288 289 memset(essiv->salt, 0, salt_size); 290 291 essiv_tfm = cc->iv_private; 292 r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size); 293 if (r) 294 err = r; 295 296 return err; 297 } 298 299 /* Set up per cpu cipher state */ 300 static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc, 301 struct dm_target *ti, 302 u8 *salt, unsigned saltsize) 303 { 304 struct crypto_cipher *essiv_tfm; 305 int err; 306 307 /* Setup the essiv_tfm with the given salt */ 308 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); 309 if (IS_ERR(essiv_tfm)) { 310 ti->error = "Error allocating crypto tfm for ESSIV"; 311 return essiv_tfm; 312 } 313 314 if (crypto_cipher_blocksize(essiv_tfm) != 315 crypto_ablkcipher_ivsize(any_tfm(cc))) { 316 ti->error = "Block size of ESSIV cipher does " 317 "not match IV size of block cipher"; 318 crypto_free_cipher(essiv_tfm); 319 return ERR_PTR(-EINVAL); 320 } 321 322 err = crypto_cipher_setkey(essiv_tfm, salt, saltsize); 323 if (err) { 324 ti->error = "Failed to set key for ESSIV cipher"; 325 crypto_free_cipher(essiv_tfm); 326 return ERR_PTR(err); 327 } 328 329 return essiv_tfm; 330 } 331 332 static void crypt_iv_essiv_dtr(struct crypt_config *cc) 333 { 334 struct crypto_cipher *essiv_tfm; 335 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; 336 337 crypto_free_hash(essiv->hash_tfm); 338 essiv->hash_tfm = NULL; 339 340 kzfree(essiv->salt); 341 essiv->salt = NULL; 342 343 essiv_tfm = cc->iv_private; 344 345 if (essiv_tfm) 346 crypto_free_cipher(essiv_tfm); 347 348 cc->iv_private = NULL; 349 } 350 351 static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, 352 const char *opts) 353 { 354 struct crypto_cipher *essiv_tfm = NULL; 355 struct crypto_hash *hash_tfm = NULL; 356 u8 *salt = NULL; 357 int err; 358 359 if (!opts) { 360 ti->error = "Digest algorithm missing for ESSIV mode"; 361 return -EINVAL; 362 } 363 364 /* Allocate hash algorithm */ 365 hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC); 366 if (IS_ERR(hash_tfm)) { 367 ti->error = "Error initializing ESSIV hash"; 368 err = PTR_ERR(hash_tfm); 369 goto bad; 370 } 371 372 salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL); 373 if (!salt) { 374 ti->error = "Error kmallocing salt storage in ESSIV"; 375 err = -ENOMEM; 376 goto bad; 377 } 378 379 cc->iv_gen_private.essiv.salt = salt; 380 cc->iv_gen_private.essiv.hash_tfm = hash_tfm; 381 382 essiv_tfm = setup_essiv_cpu(cc, ti, salt, 383 crypto_hash_digestsize(hash_tfm)); 384 if (IS_ERR(essiv_tfm)) { 385 crypt_iv_essiv_dtr(cc); 386 return PTR_ERR(essiv_tfm); 387 } 388 cc->iv_private = essiv_tfm; 389 390 return 0; 391 392 bad: 393 if (hash_tfm && !IS_ERR(hash_tfm)) 394 crypto_free_hash(hash_tfm); 395 kfree(salt); 396 return err; 397 } 398 399 static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, 400 struct dm_crypt_request *dmreq) 401 { 402 struct crypto_cipher *essiv_tfm = cc->iv_private; 403 404 memset(iv, 0, cc->iv_size); 405 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector); 406 crypto_cipher_encrypt_one(essiv_tfm, iv, iv); 407 408 return 0; 409 } 410 411 static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, 412 const char *opts) 413 { 414 unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc)); 415 int log = ilog2(bs); 416 417 /* we need to calculate how far we must shift the sector count 418 * to get the cipher block count, we use this shift in _gen */ 419 420 if (1 << log != bs) { 421 ti->error = "cypher blocksize is not a power of 2"; 422 return -EINVAL; 423 } 424 425 if (log > 9) { 426 ti->error = "cypher blocksize is > 512"; 427 return -EINVAL; 428 } 429 430 cc->iv_gen_private.benbi.shift = 9 - log; 431 432 return 0; 433 } 434 435 static void crypt_iv_benbi_dtr(struct crypt_config *cc) 436 { 437 } 438 439 static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, 440 struct dm_crypt_request *dmreq) 441 { 442 __be64 val; 443 444 memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */ 445 446 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1); 447 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64))); 448 449 return 0; 450 } 451 452 static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, 453 struct dm_crypt_request *dmreq) 454 { 455 memset(iv, 0, cc->iv_size); 456 457 return 0; 458 } 459 460 static void crypt_iv_lmk_dtr(struct crypt_config *cc) 461 { 462 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; 463 464 if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm)) 465 crypto_free_shash(lmk->hash_tfm); 466 lmk->hash_tfm = NULL; 467 468 kzfree(lmk->seed); 469 lmk->seed = NULL; 470 } 471 472 static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti, 473 const char *opts) 474 { 475 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; 476 477 lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0); 478 if (IS_ERR(lmk->hash_tfm)) { 479 ti->error = "Error initializing LMK hash"; 480 return PTR_ERR(lmk->hash_tfm); 481 } 482 483 /* No seed in LMK version 2 */ 484 if (cc->key_parts == cc->tfms_count) { 485 lmk->seed = NULL; 486 return 0; 487 } 488 489 lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL); 490 if (!lmk->seed) { 491 crypt_iv_lmk_dtr(cc); 492 ti->error = "Error kmallocing seed storage in LMK"; 493 return -ENOMEM; 494 } 495 496 return 0; 497 } 498 499 static int crypt_iv_lmk_init(struct crypt_config *cc) 500 { 501 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; 502 int subkey_size = cc->key_size / cc->key_parts; 503 504 /* LMK seed is on the position of LMK_KEYS + 1 key */ 505 if (lmk->seed) 506 memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size), 507 crypto_shash_digestsize(lmk->hash_tfm)); 508 509 return 0; 510 } 511 512 static int crypt_iv_lmk_wipe(struct crypt_config *cc) 513 { 514 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; 515 516 if (lmk->seed) 517 memset(lmk->seed, 0, LMK_SEED_SIZE); 518 519 return 0; 520 } 521 522 static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv, 523 struct dm_crypt_request *dmreq, 524 u8 *data) 525 { 526 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; 527 struct { 528 struct shash_desc desc; 529 char ctx[crypto_shash_descsize(lmk->hash_tfm)]; 530 } sdesc; 531 struct md5_state md5state; 532 __le32 buf[4]; 533 int i, r; 534 535 sdesc.desc.tfm = lmk->hash_tfm; 536 sdesc.desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; 537 538 r = crypto_shash_init(&sdesc.desc); 539 if (r) 540 return r; 541 542 if (lmk->seed) { 543 r = crypto_shash_update(&sdesc.desc, lmk->seed, LMK_SEED_SIZE); 544 if (r) 545 return r; 546 } 547 548 /* Sector is always 512B, block size 16, add data of blocks 1-31 */ 549 r = crypto_shash_update(&sdesc.desc, data + 16, 16 * 31); 550 if (r) 551 return r; 552 553 /* Sector is cropped to 56 bits here */ 554 buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF); 555 buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000); 556 buf[2] = cpu_to_le32(4024); 557 buf[3] = 0; 558 r = crypto_shash_update(&sdesc.desc, (u8 *)buf, sizeof(buf)); 559 if (r) 560 return r; 561 562 /* No MD5 padding here */ 563 r = crypto_shash_export(&sdesc.desc, &md5state); 564 if (r) 565 return r; 566 567 for (i = 0; i < MD5_HASH_WORDS; i++) 568 __cpu_to_le32s(&md5state.hash[i]); 569 memcpy(iv, &md5state.hash, cc->iv_size); 570 571 return 0; 572 } 573 574 static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv, 575 struct dm_crypt_request *dmreq) 576 { 577 u8 *src; 578 int r = 0; 579 580 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { 581 src = kmap_atomic(sg_page(&dmreq->sg_in)); 582 r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset); 583 kunmap_atomic(src); 584 } else 585 memset(iv, 0, cc->iv_size); 586 587 return r; 588 } 589 590 static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv, 591 struct dm_crypt_request *dmreq) 592 { 593 u8 *dst; 594 int r; 595 596 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) 597 return 0; 598 599 dst = kmap_atomic(sg_page(&dmreq->sg_out)); 600 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset); 601 602 /* Tweak the first block of plaintext sector */ 603 if (!r) 604 crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size); 605 606 kunmap_atomic(dst); 607 return r; 608 } 609 610 static void crypt_iv_tcw_dtr(struct crypt_config *cc) 611 { 612 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; 613 614 kzfree(tcw->iv_seed); 615 tcw->iv_seed = NULL; 616 kzfree(tcw->whitening); 617 tcw->whitening = NULL; 618 619 if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm)) 620 crypto_free_shash(tcw->crc32_tfm); 621 tcw->crc32_tfm = NULL; 622 } 623 624 static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti, 625 const char *opts) 626 { 627 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; 628 629 if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) { 630 ti->error = "Wrong key size for TCW"; 631 return -EINVAL; 632 } 633 634 tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0); 635 if (IS_ERR(tcw->crc32_tfm)) { 636 ti->error = "Error initializing CRC32 in TCW"; 637 return PTR_ERR(tcw->crc32_tfm); 638 } 639 640 tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL); 641 tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL); 642 if (!tcw->iv_seed || !tcw->whitening) { 643 crypt_iv_tcw_dtr(cc); 644 ti->error = "Error allocating seed storage in TCW"; 645 return -ENOMEM; 646 } 647 648 return 0; 649 } 650 651 static int crypt_iv_tcw_init(struct crypt_config *cc) 652 { 653 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; 654 int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE; 655 656 memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size); 657 memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size], 658 TCW_WHITENING_SIZE); 659 660 return 0; 661 } 662 663 static int crypt_iv_tcw_wipe(struct crypt_config *cc) 664 { 665 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; 666 667 memset(tcw->iv_seed, 0, cc->iv_size); 668 memset(tcw->whitening, 0, TCW_WHITENING_SIZE); 669 670 return 0; 671 } 672 673 static int crypt_iv_tcw_whitening(struct crypt_config *cc, 674 struct dm_crypt_request *dmreq, 675 u8 *data) 676 { 677 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; 678 u64 sector = cpu_to_le64((u64)dmreq->iv_sector); 679 u8 buf[TCW_WHITENING_SIZE]; 680 struct { 681 struct shash_desc desc; 682 char ctx[crypto_shash_descsize(tcw->crc32_tfm)]; 683 } sdesc; 684 int i, r; 685 686 /* xor whitening with sector number */ 687 memcpy(buf, tcw->whitening, TCW_WHITENING_SIZE); 688 crypto_xor(buf, (u8 *)§or, 8); 689 crypto_xor(&buf[8], (u8 *)§or, 8); 690 691 /* calculate crc32 for every 32bit part and xor it */ 692 sdesc.desc.tfm = tcw->crc32_tfm; 693 sdesc.desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; 694 for (i = 0; i < 4; i++) { 695 r = crypto_shash_init(&sdesc.desc); 696 if (r) 697 goto out; 698 r = crypto_shash_update(&sdesc.desc, &buf[i * 4], 4); 699 if (r) 700 goto out; 701 r = crypto_shash_final(&sdesc.desc, &buf[i * 4]); 702 if (r) 703 goto out; 704 } 705 crypto_xor(&buf[0], &buf[12], 4); 706 crypto_xor(&buf[4], &buf[8], 4); 707 708 /* apply whitening (8 bytes) to whole sector */ 709 for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++) 710 crypto_xor(data + i * 8, buf, 8); 711 out: 712 memset(buf, 0, sizeof(buf)); 713 return r; 714 } 715 716 static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv, 717 struct dm_crypt_request *dmreq) 718 { 719 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; 720 u64 sector = cpu_to_le64((u64)dmreq->iv_sector); 721 u8 *src; 722 int r = 0; 723 724 /* Remove whitening from ciphertext */ 725 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) { 726 src = kmap_atomic(sg_page(&dmreq->sg_in)); 727 r = crypt_iv_tcw_whitening(cc, dmreq, src + dmreq->sg_in.offset); 728 kunmap_atomic(src); 729 } 730 731 /* Calculate IV */ 732 memcpy(iv, tcw->iv_seed, cc->iv_size); 733 crypto_xor(iv, (u8 *)§or, 8); 734 if (cc->iv_size > 8) 735 crypto_xor(&iv[8], (u8 *)§or, cc->iv_size - 8); 736 737 return r; 738 } 739 740 static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv, 741 struct dm_crypt_request *dmreq) 742 { 743 u8 *dst; 744 int r; 745 746 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) 747 return 0; 748 749 /* Apply whitening on ciphertext */ 750 dst = kmap_atomic(sg_page(&dmreq->sg_out)); 751 r = crypt_iv_tcw_whitening(cc, dmreq, dst + dmreq->sg_out.offset); 752 kunmap_atomic(dst); 753 754 return r; 755 } 756 757 static struct crypt_iv_operations crypt_iv_plain_ops = { 758 .generator = crypt_iv_plain_gen 759 }; 760 761 static struct crypt_iv_operations crypt_iv_plain64_ops = { 762 .generator = crypt_iv_plain64_gen 763 }; 764 765 static struct crypt_iv_operations crypt_iv_essiv_ops = { 766 .ctr = crypt_iv_essiv_ctr, 767 .dtr = crypt_iv_essiv_dtr, 768 .init = crypt_iv_essiv_init, 769 .wipe = crypt_iv_essiv_wipe, 770 .generator = crypt_iv_essiv_gen 771 }; 772 773 static struct crypt_iv_operations crypt_iv_benbi_ops = { 774 .ctr = crypt_iv_benbi_ctr, 775 .dtr = crypt_iv_benbi_dtr, 776 .generator = crypt_iv_benbi_gen 777 }; 778 779 static struct crypt_iv_operations crypt_iv_null_ops = { 780 .generator = crypt_iv_null_gen 781 }; 782 783 static struct crypt_iv_operations crypt_iv_lmk_ops = { 784 .ctr = crypt_iv_lmk_ctr, 785 .dtr = crypt_iv_lmk_dtr, 786 .init = crypt_iv_lmk_init, 787 .wipe = crypt_iv_lmk_wipe, 788 .generator = crypt_iv_lmk_gen, 789 .post = crypt_iv_lmk_post 790 }; 791 792 static struct crypt_iv_operations crypt_iv_tcw_ops = { 793 .ctr = crypt_iv_tcw_ctr, 794 .dtr = crypt_iv_tcw_dtr, 795 .init = crypt_iv_tcw_init, 796 .wipe = crypt_iv_tcw_wipe, 797 .generator = crypt_iv_tcw_gen, 798 .post = crypt_iv_tcw_post 799 }; 800 801 static void crypt_convert_init(struct crypt_config *cc, 802 struct convert_context *ctx, 803 struct bio *bio_out, struct bio *bio_in, 804 sector_t sector) 805 { 806 ctx->bio_in = bio_in; 807 ctx->bio_out = bio_out; 808 if (bio_in) 809 ctx->iter_in = bio_in->bi_iter; 810 if (bio_out) 811 ctx->iter_out = bio_out->bi_iter; 812 ctx->cc_sector = sector + cc->iv_offset; 813 init_completion(&ctx->restart); 814 } 815 816 static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc, 817 struct ablkcipher_request *req) 818 { 819 return (struct dm_crypt_request *)((char *)req + cc->dmreq_start); 820 } 821 822 static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc, 823 struct dm_crypt_request *dmreq) 824 { 825 return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start); 826 } 827 828 static u8 *iv_of_dmreq(struct crypt_config *cc, 829 struct dm_crypt_request *dmreq) 830 { 831 return (u8 *)ALIGN((unsigned long)(dmreq + 1), 832 crypto_ablkcipher_alignmask(any_tfm(cc)) + 1); 833 } 834 835 static int crypt_convert_block(struct crypt_config *cc, 836 struct convert_context *ctx, 837 struct ablkcipher_request *req) 838 { 839 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in); 840 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out); 841 struct dm_crypt_request *dmreq; 842 u8 *iv; 843 int r; 844 845 dmreq = dmreq_of_req(cc, req); 846 iv = iv_of_dmreq(cc, dmreq); 847 848 dmreq->iv_sector = ctx->cc_sector; 849 dmreq->ctx = ctx; 850 sg_init_table(&dmreq->sg_in, 1); 851 sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT, 852 bv_in.bv_offset); 853 854 sg_init_table(&dmreq->sg_out, 1); 855 sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT, 856 bv_out.bv_offset); 857 858 bio_advance_iter(ctx->bio_in, &ctx->iter_in, 1 << SECTOR_SHIFT); 859 bio_advance_iter(ctx->bio_out, &ctx->iter_out, 1 << SECTOR_SHIFT); 860 861 if (cc->iv_gen_ops) { 862 r = cc->iv_gen_ops->generator(cc, iv, dmreq); 863 if (r < 0) 864 return r; 865 } 866 867 ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out, 868 1 << SECTOR_SHIFT, iv); 869 870 if (bio_data_dir(ctx->bio_in) == WRITE) 871 r = crypto_ablkcipher_encrypt(req); 872 else 873 r = crypto_ablkcipher_decrypt(req); 874 875 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post) 876 r = cc->iv_gen_ops->post(cc, iv, dmreq); 877 878 return r; 879 } 880 881 static void kcryptd_async_done(struct crypto_async_request *async_req, 882 int error); 883 884 static void crypt_alloc_req(struct crypt_config *cc, 885 struct convert_context *ctx) 886 { 887 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1); 888 889 if (!ctx->req) 890 ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO); 891 892 ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]); 893 ablkcipher_request_set_callback(ctx->req, 894 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 895 kcryptd_async_done, dmreq_of_req(cc, ctx->req)); 896 } 897 898 /* 899 * Encrypt / decrypt data from one bio to another one (can be the same one) 900 */ 901 static int crypt_convert(struct crypt_config *cc, 902 struct convert_context *ctx) 903 { 904 int r; 905 906 atomic_set(&ctx->cc_pending, 1); 907 908 while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) { 909 910 crypt_alloc_req(cc, ctx); 911 912 atomic_inc(&ctx->cc_pending); 913 914 r = crypt_convert_block(cc, ctx, ctx->req); 915 916 switch (r) { 917 /* async */ 918 case -EBUSY: 919 wait_for_completion(&ctx->restart); 920 reinit_completion(&ctx->restart); 921 /* fall through*/ 922 case -EINPROGRESS: 923 ctx->req = NULL; 924 ctx->cc_sector++; 925 continue; 926 927 /* sync */ 928 case 0: 929 atomic_dec(&ctx->cc_pending); 930 ctx->cc_sector++; 931 cond_resched(); 932 continue; 933 934 /* error */ 935 default: 936 atomic_dec(&ctx->cc_pending); 937 return r; 938 } 939 } 940 941 return 0; 942 } 943 944 /* 945 * Generate a new unfragmented bio with the given size 946 * This should never violate the device limitations 947 * May return a smaller bio when running out of pages, indicated by 948 * *out_of_pages set to 1. 949 */ 950 static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size, 951 unsigned *out_of_pages) 952 { 953 struct crypt_config *cc = io->cc; 954 struct bio *clone; 955 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 956 gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; 957 unsigned i, len; 958 struct page *page; 959 960 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); 961 if (!clone) 962 return NULL; 963 964 clone_init(io, clone); 965 *out_of_pages = 0; 966 967 for (i = 0; i < nr_iovecs; i++) { 968 page = mempool_alloc(cc->page_pool, gfp_mask); 969 if (!page) { 970 *out_of_pages = 1; 971 break; 972 } 973 974 /* 975 * If additional pages cannot be allocated without waiting, 976 * return a partially-allocated bio. The caller will then try 977 * to allocate more bios while submitting this partial bio. 978 */ 979 gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; 980 981 len = (size > PAGE_SIZE) ? PAGE_SIZE : size; 982 983 if (!bio_add_page(clone, page, len, 0)) { 984 mempool_free(page, cc->page_pool); 985 break; 986 } 987 988 size -= len; 989 } 990 991 if (!clone->bi_iter.bi_size) { 992 bio_put(clone); 993 return NULL; 994 } 995 996 return clone; 997 } 998 999 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) 1000 { 1001 unsigned int i; 1002 struct bio_vec *bv; 1003 1004 bio_for_each_segment_all(bv, clone, i) { 1005 BUG_ON(!bv->bv_page); 1006 mempool_free(bv->bv_page, cc->page_pool); 1007 bv->bv_page = NULL; 1008 } 1009 } 1010 1011 static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc, 1012 struct bio *bio, sector_t sector) 1013 { 1014 struct dm_crypt_io *io; 1015 1016 io = mempool_alloc(cc->io_pool, GFP_NOIO); 1017 io->cc = cc; 1018 io->base_bio = bio; 1019 io->sector = sector; 1020 io->error = 0; 1021 io->base_io = NULL; 1022 io->ctx.req = NULL; 1023 atomic_set(&io->io_pending, 0); 1024 1025 return io; 1026 } 1027 1028 static void crypt_inc_pending(struct dm_crypt_io *io) 1029 { 1030 atomic_inc(&io->io_pending); 1031 } 1032 1033 /* 1034 * One of the bios was finished. Check for completion of 1035 * the whole request and correctly clean up the buffer. 1036 * If base_io is set, wait for the last fragment to complete. 1037 */ 1038 static void crypt_dec_pending(struct dm_crypt_io *io) 1039 { 1040 struct crypt_config *cc = io->cc; 1041 struct bio *base_bio = io->base_bio; 1042 struct dm_crypt_io *base_io = io->base_io; 1043 int error = io->error; 1044 1045 if (!atomic_dec_and_test(&io->io_pending)) 1046 return; 1047 1048 if (io->ctx.req) 1049 mempool_free(io->ctx.req, cc->req_pool); 1050 mempool_free(io, cc->io_pool); 1051 1052 if (likely(!base_io)) 1053 bio_endio(base_bio, error); 1054 else { 1055 if (error && !base_io->error) 1056 base_io->error = error; 1057 crypt_dec_pending(base_io); 1058 } 1059 } 1060 1061 /* 1062 * kcryptd/kcryptd_io: 1063 * 1064 * Needed because it would be very unwise to do decryption in an 1065 * interrupt context. 1066 * 1067 * kcryptd performs the actual encryption or decryption. 1068 * 1069 * kcryptd_io performs the IO submission. 1070 * 1071 * They must be separated as otherwise the final stages could be 1072 * starved by new requests which can block in the first stages due 1073 * to memory allocation. 1074 * 1075 * The work is done per CPU global for all dm-crypt instances. 1076 * They should not depend on each other and do not block. 1077 */ 1078 static void crypt_endio(struct bio *clone, int error) 1079 { 1080 struct dm_crypt_io *io = clone->bi_private; 1081 struct crypt_config *cc = io->cc; 1082 unsigned rw = bio_data_dir(clone); 1083 1084 if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error)) 1085 error = -EIO; 1086 1087 /* 1088 * free the processed pages 1089 */ 1090 if (rw == WRITE) 1091 crypt_free_buffer_pages(cc, clone); 1092 1093 bio_put(clone); 1094 1095 if (rw == READ && !error) { 1096 kcryptd_queue_crypt(io); 1097 return; 1098 } 1099 1100 if (unlikely(error)) 1101 io->error = error; 1102 1103 crypt_dec_pending(io); 1104 } 1105 1106 static void clone_init(struct dm_crypt_io *io, struct bio *clone) 1107 { 1108 struct crypt_config *cc = io->cc; 1109 1110 clone->bi_private = io; 1111 clone->bi_end_io = crypt_endio; 1112 clone->bi_bdev = cc->dev->bdev; 1113 clone->bi_rw = io->base_bio->bi_rw; 1114 } 1115 1116 static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) 1117 { 1118 struct crypt_config *cc = io->cc; 1119 struct bio *base_bio = io->base_bio; 1120 struct bio *clone; 1121 1122 /* 1123 * The block layer might modify the bvec array, so always 1124 * copy the required bvecs because we need the original 1125 * one in order to decrypt the whole bio data *afterwards*. 1126 */ 1127 clone = bio_clone_bioset(base_bio, gfp, cc->bs); 1128 if (!clone) 1129 return 1; 1130 1131 crypt_inc_pending(io); 1132 1133 clone_init(io, clone); 1134 clone->bi_iter.bi_sector = cc->start + io->sector; 1135 1136 generic_make_request(clone); 1137 return 0; 1138 } 1139 1140 static void kcryptd_io_write(struct dm_crypt_io *io) 1141 { 1142 struct bio *clone = io->ctx.bio_out; 1143 generic_make_request(clone); 1144 } 1145 1146 static void kcryptd_io(struct work_struct *work) 1147 { 1148 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); 1149 1150 if (bio_data_dir(io->base_bio) == READ) { 1151 crypt_inc_pending(io); 1152 if (kcryptd_io_read(io, GFP_NOIO)) 1153 io->error = -ENOMEM; 1154 crypt_dec_pending(io); 1155 } else 1156 kcryptd_io_write(io); 1157 } 1158 1159 static void kcryptd_queue_io(struct dm_crypt_io *io) 1160 { 1161 struct crypt_config *cc = io->cc; 1162 1163 INIT_WORK(&io->work, kcryptd_io); 1164 queue_work(cc->io_queue, &io->work); 1165 } 1166 1167 static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) 1168 { 1169 struct bio *clone = io->ctx.bio_out; 1170 struct crypt_config *cc = io->cc; 1171 1172 if (unlikely(io->error < 0)) { 1173 crypt_free_buffer_pages(cc, clone); 1174 bio_put(clone); 1175 crypt_dec_pending(io); 1176 return; 1177 } 1178 1179 /* crypt_convert should have filled the clone bio */ 1180 BUG_ON(io->ctx.iter_out.bi_size); 1181 1182 clone->bi_iter.bi_sector = cc->start + io->sector; 1183 1184 if (async) 1185 kcryptd_queue_io(io); 1186 else 1187 generic_make_request(clone); 1188 } 1189 1190 static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) 1191 { 1192 struct crypt_config *cc = io->cc; 1193 struct bio *clone; 1194 struct dm_crypt_io *new_io; 1195 int crypt_finished; 1196 unsigned out_of_pages = 0; 1197 unsigned remaining = io->base_bio->bi_iter.bi_size; 1198 sector_t sector = io->sector; 1199 int r; 1200 1201 /* 1202 * Prevent io from disappearing until this function completes. 1203 */ 1204 crypt_inc_pending(io); 1205 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector); 1206 1207 /* 1208 * The allocated buffers can be smaller than the whole bio, 1209 * so repeat the whole process until all the data can be handled. 1210 */ 1211 while (remaining) { 1212 clone = crypt_alloc_buffer(io, remaining, &out_of_pages); 1213 if (unlikely(!clone)) { 1214 io->error = -ENOMEM; 1215 break; 1216 } 1217 1218 io->ctx.bio_out = clone; 1219 io->ctx.iter_out = clone->bi_iter; 1220 1221 remaining -= clone->bi_iter.bi_size; 1222 sector += bio_sectors(clone); 1223 1224 crypt_inc_pending(io); 1225 1226 r = crypt_convert(cc, &io->ctx); 1227 if (r < 0) 1228 io->error = -EIO; 1229 1230 crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending); 1231 1232 /* Encryption was already finished, submit io now */ 1233 if (crypt_finished) { 1234 kcryptd_crypt_write_io_submit(io, 0); 1235 1236 /* 1237 * If there was an error, do not try next fragments. 1238 * For async, error is processed in async handler. 1239 */ 1240 if (unlikely(r < 0)) 1241 break; 1242 1243 io->sector = sector; 1244 } 1245 1246 /* 1247 * Out of memory -> run queues 1248 * But don't wait if split was due to the io size restriction 1249 */ 1250 if (unlikely(out_of_pages)) 1251 congestion_wait(BLK_RW_ASYNC, HZ/100); 1252 1253 /* 1254 * With async crypto it is unsafe to share the crypto context 1255 * between fragments, so switch to a new dm_crypt_io structure. 1256 */ 1257 if (unlikely(!crypt_finished && remaining)) { 1258 new_io = crypt_io_alloc(io->cc, io->base_bio, 1259 sector); 1260 crypt_inc_pending(new_io); 1261 crypt_convert_init(cc, &new_io->ctx, NULL, 1262 io->base_bio, sector); 1263 new_io->ctx.iter_in = io->ctx.iter_in; 1264 1265 /* 1266 * Fragments after the first use the base_io 1267 * pending count. 1268 */ 1269 if (!io->base_io) 1270 new_io->base_io = io; 1271 else { 1272 new_io->base_io = io->base_io; 1273 crypt_inc_pending(io->base_io); 1274 crypt_dec_pending(io); 1275 } 1276 1277 io = new_io; 1278 } 1279 } 1280 1281 crypt_dec_pending(io); 1282 } 1283 1284 static void kcryptd_crypt_read_done(struct dm_crypt_io *io) 1285 { 1286 crypt_dec_pending(io); 1287 } 1288 1289 static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) 1290 { 1291 struct crypt_config *cc = io->cc; 1292 int r = 0; 1293 1294 crypt_inc_pending(io); 1295 1296 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, 1297 io->sector); 1298 1299 r = crypt_convert(cc, &io->ctx); 1300 if (r < 0) 1301 io->error = -EIO; 1302 1303 if (atomic_dec_and_test(&io->ctx.cc_pending)) 1304 kcryptd_crypt_read_done(io); 1305 1306 crypt_dec_pending(io); 1307 } 1308 1309 static void kcryptd_async_done(struct crypto_async_request *async_req, 1310 int error) 1311 { 1312 struct dm_crypt_request *dmreq = async_req->data; 1313 struct convert_context *ctx = dmreq->ctx; 1314 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); 1315 struct crypt_config *cc = io->cc; 1316 1317 if (error == -EINPROGRESS) { 1318 complete(&ctx->restart); 1319 return; 1320 } 1321 1322 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post) 1323 error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq); 1324 1325 if (error < 0) 1326 io->error = -EIO; 1327 1328 mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool); 1329 1330 if (!atomic_dec_and_test(&ctx->cc_pending)) 1331 return; 1332 1333 if (bio_data_dir(io->base_bio) == READ) 1334 kcryptd_crypt_read_done(io); 1335 else 1336 kcryptd_crypt_write_io_submit(io, 1); 1337 } 1338 1339 static void kcryptd_crypt(struct work_struct *work) 1340 { 1341 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); 1342 1343 if (bio_data_dir(io->base_bio) == READ) 1344 kcryptd_crypt_read_convert(io); 1345 else 1346 kcryptd_crypt_write_convert(io); 1347 } 1348 1349 static void kcryptd_queue_crypt(struct dm_crypt_io *io) 1350 { 1351 struct crypt_config *cc = io->cc; 1352 1353 INIT_WORK(&io->work, kcryptd_crypt); 1354 queue_work(cc->crypt_queue, &io->work); 1355 } 1356 1357 /* 1358 * Decode key from its hex representation 1359 */ 1360 static int crypt_decode_key(u8 *key, char *hex, unsigned int size) 1361 { 1362 char buffer[3]; 1363 unsigned int i; 1364 1365 buffer[2] = '\0'; 1366 1367 for (i = 0; i < size; i++) { 1368 buffer[0] = *hex++; 1369 buffer[1] = *hex++; 1370 1371 if (kstrtou8(buffer, 16, &key[i])) 1372 return -EINVAL; 1373 } 1374 1375 if (*hex != '\0') 1376 return -EINVAL; 1377 1378 return 0; 1379 } 1380 1381 static void crypt_free_tfms(struct crypt_config *cc) 1382 { 1383 unsigned i; 1384 1385 if (!cc->tfms) 1386 return; 1387 1388 for (i = 0; i < cc->tfms_count; i++) 1389 if (cc->tfms[i] && !IS_ERR(cc->tfms[i])) { 1390 crypto_free_ablkcipher(cc->tfms[i]); 1391 cc->tfms[i] = NULL; 1392 } 1393 1394 kfree(cc->tfms); 1395 cc->tfms = NULL; 1396 } 1397 1398 static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode) 1399 { 1400 unsigned i; 1401 int err; 1402 1403 cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_ablkcipher *), 1404 GFP_KERNEL); 1405 if (!cc->tfms) 1406 return -ENOMEM; 1407 1408 for (i = 0; i < cc->tfms_count; i++) { 1409 cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0); 1410 if (IS_ERR(cc->tfms[i])) { 1411 err = PTR_ERR(cc->tfms[i]); 1412 crypt_free_tfms(cc); 1413 return err; 1414 } 1415 } 1416 1417 return 0; 1418 } 1419 1420 static int crypt_setkey_allcpus(struct crypt_config *cc) 1421 { 1422 unsigned subkey_size; 1423 int err = 0, i, r; 1424 1425 /* Ignore extra keys (which are used for IV etc) */ 1426 subkey_size = (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count); 1427 1428 for (i = 0; i < cc->tfms_count; i++) { 1429 r = crypto_ablkcipher_setkey(cc->tfms[i], 1430 cc->key + (i * subkey_size), 1431 subkey_size); 1432 if (r) 1433 err = r; 1434 } 1435 1436 return err; 1437 } 1438 1439 static int crypt_set_key(struct crypt_config *cc, char *key) 1440 { 1441 int r = -EINVAL; 1442 int key_string_len = strlen(key); 1443 1444 /* The key size may not be changed. */ 1445 if (cc->key_size != (key_string_len >> 1)) 1446 goto out; 1447 1448 /* Hyphen (which gives a key_size of zero) means there is no key. */ 1449 if (!cc->key_size && strcmp(key, "-")) 1450 goto out; 1451 1452 if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0) 1453 goto out; 1454 1455 set_bit(DM_CRYPT_KEY_VALID, &cc->flags); 1456 1457 r = crypt_setkey_allcpus(cc); 1458 1459 out: 1460 /* Hex key string not needed after here, so wipe it. */ 1461 memset(key, '0', key_string_len); 1462 1463 return r; 1464 } 1465 1466 static int crypt_wipe_key(struct crypt_config *cc) 1467 { 1468 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); 1469 memset(&cc->key, 0, cc->key_size * sizeof(u8)); 1470 1471 return crypt_setkey_allcpus(cc); 1472 } 1473 1474 static void crypt_dtr(struct dm_target *ti) 1475 { 1476 struct crypt_config *cc = ti->private; 1477 1478 ti->private = NULL; 1479 1480 if (!cc) 1481 return; 1482 1483 if (cc->io_queue) 1484 destroy_workqueue(cc->io_queue); 1485 if (cc->crypt_queue) 1486 destroy_workqueue(cc->crypt_queue); 1487 1488 crypt_free_tfms(cc); 1489 1490 if (cc->bs) 1491 bioset_free(cc->bs); 1492 1493 if (cc->page_pool) 1494 mempool_destroy(cc->page_pool); 1495 if (cc->req_pool) 1496 mempool_destroy(cc->req_pool); 1497 if (cc->io_pool) 1498 mempool_destroy(cc->io_pool); 1499 1500 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) 1501 cc->iv_gen_ops->dtr(cc); 1502 1503 if (cc->dev) 1504 dm_put_device(ti, cc->dev); 1505 1506 kzfree(cc->cipher); 1507 kzfree(cc->cipher_string); 1508 1509 /* Must zero key material before freeing */ 1510 kzfree(cc); 1511 } 1512 1513 static int crypt_ctr_cipher(struct dm_target *ti, 1514 char *cipher_in, char *key) 1515 { 1516 struct crypt_config *cc = ti->private; 1517 char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount; 1518 char *cipher_api = NULL; 1519 int ret = -EINVAL; 1520 char dummy; 1521 1522 /* Convert to crypto api definition? */ 1523 if (strchr(cipher_in, '(')) { 1524 ti->error = "Bad cipher specification"; 1525 return -EINVAL; 1526 } 1527 1528 cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL); 1529 if (!cc->cipher_string) 1530 goto bad_mem; 1531 1532 /* 1533 * Legacy dm-crypt cipher specification 1534 * cipher[:keycount]-mode-iv:ivopts 1535 */ 1536 tmp = cipher_in; 1537 keycount = strsep(&tmp, "-"); 1538 cipher = strsep(&keycount, ":"); 1539 1540 if (!keycount) 1541 cc->tfms_count = 1; 1542 else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 || 1543 !is_power_of_2(cc->tfms_count)) { 1544 ti->error = "Bad cipher key count specification"; 1545 return -EINVAL; 1546 } 1547 cc->key_parts = cc->tfms_count; 1548 cc->key_extra_size = 0; 1549 1550 cc->cipher = kstrdup(cipher, GFP_KERNEL); 1551 if (!cc->cipher) 1552 goto bad_mem; 1553 1554 chainmode = strsep(&tmp, "-"); 1555 ivopts = strsep(&tmp, "-"); 1556 ivmode = strsep(&ivopts, ":"); 1557 1558 if (tmp) 1559 DMWARN("Ignoring unexpected additional cipher options"); 1560 1561 /* 1562 * For compatibility with the original dm-crypt mapping format, if 1563 * only the cipher name is supplied, use cbc-plain. 1564 */ 1565 if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) { 1566 chainmode = "cbc"; 1567 ivmode = "plain"; 1568 } 1569 1570 if (strcmp(chainmode, "ecb") && !ivmode) { 1571 ti->error = "IV mechanism required"; 1572 return -EINVAL; 1573 } 1574 1575 cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL); 1576 if (!cipher_api) 1577 goto bad_mem; 1578 1579 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME, 1580 "%s(%s)", chainmode, cipher); 1581 if (ret < 0) { 1582 kfree(cipher_api); 1583 goto bad_mem; 1584 } 1585 1586 /* Allocate cipher */ 1587 ret = crypt_alloc_tfms(cc, cipher_api); 1588 if (ret < 0) { 1589 ti->error = "Error allocating crypto tfm"; 1590 goto bad; 1591 } 1592 1593 /* Initialize IV */ 1594 cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc)); 1595 if (cc->iv_size) 1596 /* at least a 64 bit sector number should fit in our buffer */ 1597 cc->iv_size = max(cc->iv_size, 1598 (unsigned int)(sizeof(u64) / sizeof(u8))); 1599 else if (ivmode) { 1600 DMWARN("Selected cipher does not support IVs"); 1601 ivmode = NULL; 1602 } 1603 1604 /* Choose ivmode, see comments at iv code. */ 1605 if (ivmode == NULL) 1606 cc->iv_gen_ops = NULL; 1607 else if (strcmp(ivmode, "plain") == 0) 1608 cc->iv_gen_ops = &crypt_iv_plain_ops; 1609 else if (strcmp(ivmode, "plain64") == 0) 1610 cc->iv_gen_ops = &crypt_iv_plain64_ops; 1611 else if (strcmp(ivmode, "essiv") == 0) 1612 cc->iv_gen_ops = &crypt_iv_essiv_ops; 1613 else if (strcmp(ivmode, "benbi") == 0) 1614 cc->iv_gen_ops = &crypt_iv_benbi_ops; 1615 else if (strcmp(ivmode, "null") == 0) 1616 cc->iv_gen_ops = &crypt_iv_null_ops; 1617 else if (strcmp(ivmode, "lmk") == 0) { 1618 cc->iv_gen_ops = &crypt_iv_lmk_ops; 1619 /* 1620 * Version 2 and 3 is recognised according 1621 * to length of provided multi-key string. 1622 * If present (version 3), last key is used as IV seed. 1623 * All keys (including IV seed) are always the same size. 1624 */ 1625 if (cc->key_size % cc->key_parts) { 1626 cc->key_parts++; 1627 cc->key_extra_size = cc->key_size / cc->key_parts; 1628 } 1629 } else if (strcmp(ivmode, "tcw") == 0) { 1630 cc->iv_gen_ops = &crypt_iv_tcw_ops; 1631 cc->key_parts += 2; /* IV + whitening */ 1632 cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE; 1633 } else { 1634 ret = -EINVAL; 1635 ti->error = "Invalid IV mode"; 1636 goto bad; 1637 } 1638 1639 /* Initialize and set key */ 1640 ret = crypt_set_key(cc, key); 1641 if (ret < 0) { 1642 ti->error = "Error decoding and setting key"; 1643 goto bad; 1644 } 1645 1646 /* Allocate IV */ 1647 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) { 1648 ret = cc->iv_gen_ops->ctr(cc, ti, ivopts); 1649 if (ret < 0) { 1650 ti->error = "Error creating IV"; 1651 goto bad; 1652 } 1653 } 1654 1655 /* Initialize IV (set keys for ESSIV etc) */ 1656 if (cc->iv_gen_ops && cc->iv_gen_ops->init) { 1657 ret = cc->iv_gen_ops->init(cc); 1658 if (ret < 0) { 1659 ti->error = "Error initialising IV"; 1660 goto bad; 1661 } 1662 } 1663 1664 ret = 0; 1665 bad: 1666 kfree(cipher_api); 1667 return ret; 1668 1669 bad_mem: 1670 ti->error = "Cannot allocate cipher strings"; 1671 return -ENOMEM; 1672 } 1673 1674 /* 1675 * Construct an encryption mapping: 1676 * <cipher> <key> <iv_offset> <dev_path> <start> 1677 */ 1678 static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) 1679 { 1680 struct crypt_config *cc; 1681 unsigned int key_size, opt_params; 1682 unsigned long long tmpll; 1683 int ret; 1684 struct dm_arg_set as; 1685 const char *opt_string; 1686 char dummy; 1687 1688 static struct dm_arg _args[] = { 1689 {0, 1, "Invalid number of feature args"}, 1690 }; 1691 1692 if (argc < 5) { 1693 ti->error = "Not enough arguments"; 1694 return -EINVAL; 1695 } 1696 1697 key_size = strlen(argv[1]) >> 1; 1698 1699 cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL); 1700 if (!cc) { 1701 ti->error = "Cannot allocate encryption context"; 1702 return -ENOMEM; 1703 } 1704 cc->key_size = key_size; 1705 1706 ti->private = cc; 1707 ret = crypt_ctr_cipher(ti, argv[0], argv[1]); 1708 if (ret < 0) 1709 goto bad; 1710 1711 ret = -ENOMEM; 1712 cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool); 1713 if (!cc->io_pool) { 1714 ti->error = "Cannot allocate crypt io mempool"; 1715 goto bad; 1716 } 1717 1718 cc->dmreq_start = sizeof(struct ablkcipher_request); 1719 cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc)); 1720 cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment()); 1721 cc->dmreq_start += crypto_ablkcipher_alignmask(any_tfm(cc)) & 1722 ~(crypto_tfm_ctx_alignment() - 1); 1723 1724 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + 1725 sizeof(struct dm_crypt_request) + cc->iv_size); 1726 if (!cc->req_pool) { 1727 ti->error = "Cannot allocate crypt request mempool"; 1728 goto bad; 1729 } 1730 1731 cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); 1732 if (!cc->page_pool) { 1733 ti->error = "Cannot allocate page mempool"; 1734 goto bad; 1735 } 1736 1737 cc->bs = bioset_create(MIN_IOS, 0); 1738 if (!cc->bs) { 1739 ti->error = "Cannot allocate crypt bioset"; 1740 goto bad; 1741 } 1742 1743 ret = -EINVAL; 1744 if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) { 1745 ti->error = "Invalid iv_offset sector"; 1746 goto bad; 1747 } 1748 cc->iv_offset = tmpll; 1749 1750 if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) { 1751 ti->error = "Device lookup failed"; 1752 goto bad; 1753 } 1754 1755 if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) { 1756 ti->error = "Invalid device sector"; 1757 goto bad; 1758 } 1759 cc->start = tmpll; 1760 1761 argv += 5; 1762 argc -= 5; 1763 1764 /* Optional parameters */ 1765 if (argc) { 1766 as.argc = argc; 1767 as.argv = argv; 1768 1769 ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error); 1770 if (ret) 1771 goto bad; 1772 1773 opt_string = dm_shift_arg(&as); 1774 1775 if (opt_params == 1 && opt_string && 1776 !strcasecmp(opt_string, "allow_discards")) 1777 ti->num_discard_bios = 1; 1778 else if (opt_params) { 1779 ret = -EINVAL; 1780 ti->error = "Invalid feature arguments"; 1781 goto bad; 1782 } 1783 } 1784 1785 ret = -ENOMEM; 1786 cc->io_queue = alloc_workqueue("kcryptd_io", WQ_MEM_RECLAIM, 1); 1787 if (!cc->io_queue) { 1788 ti->error = "Couldn't create kcryptd io queue"; 1789 goto bad; 1790 } 1791 1792 cc->crypt_queue = alloc_workqueue("kcryptd", 1793 WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1); 1794 if (!cc->crypt_queue) { 1795 ti->error = "Couldn't create kcryptd queue"; 1796 goto bad; 1797 } 1798 1799 ti->num_flush_bios = 1; 1800 ti->discard_zeroes_data_unsupported = true; 1801 1802 return 0; 1803 1804 bad: 1805 crypt_dtr(ti); 1806 return ret; 1807 } 1808 1809 static int crypt_map(struct dm_target *ti, struct bio *bio) 1810 { 1811 struct dm_crypt_io *io; 1812 struct crypt_config *cc = ti->private; 1813 1814 /* 1815 * If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues. 1816 * - for REQ_FLUSH device-mapper core ensures that no IO is in-flight 1817 * - for REQ_DISCARD caller must use flush if IO ordering matters 1818 */ 1819 if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) { 1820 bio->bi_bdev = cc->dev->bdev; 1821 if (bio_sectors(bio)) 1822 bio->bi_iter.bi_sector = cc->start + 1823 dm_target_offset(ti, bio->bi_iter.bi_sector); 1824 return DM_MAPIO_REMAPPED; 1825 } 1826 1827 io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); 1828 1829 if (bio_data_dir(io->base_bio) == READ) { 1830 if (kcryptd_io_read(io, GFP_NOWAIT)) 1831 kcryptd_queue_io(io); 1832 } else 1833 kcryptd_queue_crypt(io); 1834 1835 return DM_MAPIO_SUBMITTED; 1836 } 1837 1838 static void crypt_status(struct dm_target *ti, status_type_t type, 1839 unsigned status_flags, char *result, unsigned maxlen) 1840 { 1841 struct crypt_config *cc = ti->private; 1842 unsigned i, sz = 0; 1843 1844 switch (type) { 1845 case STATUSTYPE_INFO: 1846 result[0] = '\0'; 1847 break; 1848 1849 case STATUSTYPE_TABLE: 1850 DMEMIT("%s ", cc->cipher_string); 1851 1852 if (cc->key_size > 0) 1853 for (i = 0; i < cc->key_size; i++) 1854 DMEMIT("%02x", cc->key[i]); 1855 else 1856 DMEMIT("-"); 1857 1858 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset, 1859 cc->dev->name, (unsigned long long)cc->start); 1860 1861 if (ti->num_discard_bios) 1862 DMEMIT(" 1 allow_discards"); 1863 1864 break; 1865 } 1866 } 1867 1868 static void crypt_postsuspend(struct dm_target *ti) 1869 { 1870 struct crypt_config *cc = ti->private; 1871 1872 set_bit(DM_CRYPT_SUSPENDED, &cc->flags); 1873 } 1874 1875 static int crypt_preresume(struct dm_target *ti) 1876 { 1877 struct crypt_config *cc = ti->private; 1878 1879 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) { 1880 DMERR("aborting resume - crypt key is not set."); 1881 return -EAGAIN; 1882 } 1883 1884 return 0; 1885 } 1886 1887 static void crypt_resume(struct dm_target *ti) 1888 { 1889 struct crypt_config *cc = ti->private; 1890 1891 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags); 1892 } 1893 1894 /* Message interface 1895 * key set <key> 1896 * key wipe 1897 */ 1898 static int crypt_message(struct dm_target *ti, unsigned argc, char **argv) 1899 { 1900 struct crypt_config *cc = ti->private; 1901 int ret = -EINVAL; 1902 1903 if (argc < 2) 1904 goto error; 1905 1906 if (!strcasecmp(argv[0], "key")) { 1907 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) { 1908 DMWARN("not suspended during key manipulation."); 1909 return -EINVAL; 1910 } 1911 if (argc == 3 && !strcasecmp(argv[1], "set")) { 1912 ret = crypt_set_key(cc, argv[2]); 1913 if (ret) 1914 return ret; 1915 if (cc->iv_gen_ops && cc->iv_gen_ops->init) 1916 ret = cc->iv_gen_ops->init(cc); 1917 return ret; 1918 } 1919 if (argc == 2 && !strcasecmp(argv[1], "wipe")) { 1920 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) { 1921 ret = cc->iv_gen_ops->wipe(cc); 1922 if (ret) 1923 return ret; 1924 } 1925 return crypt_wipe_key(cc); 1926 } 1927 } 1928 1929 error: 1930 DMWARN("unrecognised message received."); 1931 return -EINVAL; 1932 } 1933 1934 static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm, 1935 struct bio_vec *biovec, int max_size) 1936 { 1937 struct crypt_config *cc = ti->private; 1938 struct request_queue *q = bdev_get_queue(cc->dev->bdev); 1939 1940 if (!q->merge_bvec_fn) 1941 return max_size; 1942 1943 bvm->bi_bdev = cc->dev->bdev; 1944 bvm->bi_sector = cc->start + dm_target_offset(ti, bvm->bi_sector); 1945 1946 return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); 1947 } 1948 1949 static int crypt_iterate_devices(struct dm_target *ti, 1950 iterate_devices_callout_fn fn, void *data) 1951 { 1952 struct crypt_config *cc = ti->private; 1953 1954 return fn(ti, cc->dev, cc->start, ti->len, data); 1955 } 1956 1957 static struct target_type crypt_target = { 1958 .name = "crypt", 1959 .version = {1, 13, 0}, 1960 .module = THIS_MODULE, 1961 .ctr = crypt_ctr, 1962 .dtr = crypt_dtr, 1963 .map = crypt_map, 1964 .status = crypt_status, 1965 .postsuspend = crypt_postsuspend, 1966 .preresume = crypt_preresume, 1967 .resume = crypt_resume, 1968 .message = crypt_message, 1969 .merge = crypt_merge, 1970 .iterate_devices = crypt_iterate_devices, 1971 }; 1972 1973 static int __init dm_crypt_init(void) 1974 { 1975 int r; 1976 1977 _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0); 1978 if (!_crypt_io_pool) 1979 return -ENOMEM; 1980 1981 r = dm_register_target(&crypt_target); 1982 if (r < 0) { 1983 DMERR("register failed %d", r); 1984 kmem_cache_destroy(_crypt_io_pool); 1985 } 1986 1987 return r; 1988 } 1989 1990 static void __exit dm_crypt_exit(void) 1991 { 1992 dm_unregister_target(&crypt_target); 1993 kmem_cache_destroy(_crypt_io_pool); 1994 } 1995 1996 module_init(dm_crypt_init); 1997 module_exit(dm_crypt_exit); 1998 1999 MODULE_AUTHOR("Christophe Saout <christophe@saout.de>"); 2000 MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption"); 2001 MODULE_LICENSE("GPL"); 2002