1 /* 2 * Copyright (C) 2003 Christophe Saout <christophe@saout.de> 3 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> 4 * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved. 5 * 6 * This file is released under the GPL. 7 */ 8 9 #include <linux/completion.h> 10 #include <linux/err.h> 11 #include <linux/module.h> 12 #include <linux/init.h> 13 #include <linux/kernel.h> 14 #include <linux/bio.h> 15 #include <linux/blkdev.h> 16 #include <linux/mempool.h> 17 #include <linux/slab.h> 18 #include <linux/crypto.h> 19 #include <linux/workqueue.h> 20 #include <linux/backing-dev.h> 21 #include <linux/percpu.h> 22 #include <linux/atomic.h> 23 #include <linux/scatterlist.h> 24 #include <asm/page.h> 25 #include <asm/unaligned.h> 26 #include <crypto/hash.h> 27 #include <crypto/md5.h> 28 #include <crypto/algapi.h> 29 30 #include <linux/device-mapper.h> 31 32 #define DM_MSG_PREFIX "crypt" 33 34 /* 35 * context holding the current state of a multi-part conversion 36 */ 37 struct convert_context { 38 struct completion restart; 39 struct bio *bio_in; 40 struct bio *bio_out; 41 unsigned int offset_in; 42 unsigned int offset_out; 43 unsigned int idx_in; 44 unsigned int idx_out; 45 sector_t sector; 46 atomic_t pending; 47 }; 48 49 /* 50 * per bio private data 51 */ 52 struct dm_crypt_io { 53 struct dm_target *target; 54 struct bio *base_bio; 55 struct work_struct work; 56 57 struct convert_context ctx; 58 59 atomic_t pending; 60 int error; 61 sector_t sector; 62 struct dm_crypt_io *base_io; 63 }; 64 65 struct dm_crypt_request { 66 struct convert_context *ctx; 67 struct scatterlist sg_in; 68 struct scatterlist sg_out; 69 sector_t iv_sector; 70 }; 71 72 struct crypt_config; 73 74 struct crypt_iv_operations { 75 int (*ctr)(struct crypt_config *cc, struct dm_target *ti, 76 const char *opts); 77 void (*dtr)(struct crypt_config *cc); 78 int (*init)(struct crypt_config *cc); 79 int (*wipe)(struct crypt_config *cc); 80 int (*generator)(struct crypt_config *cc, u8 *iv, 81 struct dm_crypt_request *dmreq); 82 int (*post)(struct crypt_config *cc, u8 *iv, 83 struct dm_crypt_request *dmreq); 84 }; 85 86 struct iv_essiv_private { 87 struct crypto_hash *hash_tfm; 88 u8 *salt; 89 }; 90 91 struct iv_benbi_private { 92 int shift; 93 }; 94 95 #define LMK_SEED_SIZE 64 /* hash + 0 */ 96 struct iv_lmk_private { 97 struct crypto_shash *hash_tfm; 98 u8 *seed; 99 }; 100 101 /* 102 * Crypt: maps a linear range of a block device 103 * and encrypts / decrypts at the same time. 104 */ 105 enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; 106 107 /* 108 * Duplicated per-CPU state for cipher. 109 */ 110 struct crypt_cpu { 111 struct ablkcipher_request *req; 112 /* ESSIV: struct crypto_cipher *essiv_tfm */ 113 void *iv_private; 114 struct crypto_ablkcipher *tfms[0]; 115 }; 116 117 /* 118 * The fields in here must be read only after initialization, 119 * changing state should be in crypt_cpu. 120 */ 121 struct crypt_config { 122 struct dm_dev *dev; 123 sector_t start; 124 125 /* 126 * pool for per bio private data, crypto requests and 127 * encryption requeusts/buffer pages 128 */ 129 mempool_t *io_pool; 130 mempool_t *req_pool; 131 mempool_t *page_pool; 132 struct bio_set *bs; 133 134 struct workqueue_struct *io_queue; 135 struct workqueue_struct *crypt_queue; 136 137 char *cipher; 138 char *cipher_string; 139 140 struct crypt_iv_operations *iv_gen_ops; 141 union { 142 struct iv_essiv_private essiv; 143 struct iv_benbi_private benbi; 144 struct iv_lmk_private lmk; 145 } iv_gen_private; 146 sector_t iv_offset; 147 unsigned int iv_size; 148 149 /* 150 * Duplicated per cpu state. Access through 151 * per_cpu_ptr() only. 152 */ 153 struct crypt_cpu __percpu *cpu; 154 unsigned tfms_count; 155 156 /* 157 * Layout of each crypto request: 158 * 159 * struct ablkcipher_request 160 * context 161 * padding 162 * struct dm_crypt_request 163 * padding 164 * IV 165 * 166 * The padding is added so that dm_crypt_request and the IV are 167 * correctly aligned. 168 */ 169 unsigned int dmreq_start; 170 171 unsigned long flags; 172 unsigned int key_size; 173 unsigned int key_parts; 174 u8 key[0]; 175 }; 176 177 #define MIN_IOS 16 178 #define MIN_POOL_PAGES 32 179 180 static struct kmem_cache *_crypt_io_pool; 181 182 static void clone_init(struct dm_crypt_io *, struct bio *); 183 static void kcryptd_queue_crypt(struct dm_crypt_io *io); 184 static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq); 185 186 static struct crypt_cpu *this_crypt_config(struct crypt_config *cc) 187 { 188 return this_cpu_ptr(cc->cpu); 189 } 190 191 /* 192 * Use this to access cipher attributes that are the same for each CPU. 193 */ 194 static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc) 195 { 196 return __this_cpu_ptr(cc->cpu)->tfms[0]; 197 } 198 199 /* 200 * Different IV generation algorithms: 201 * 202 * plain: the initial vector is the 32-bit little-endian version of the sector 203 * number, padded with zeros if necessary. 204 * 205 * plain64: the initial vector is the 64-bit little-endian version of the sector 206 * number, padded with zeros if necessary. 207 * 208 * essiv: "encrypted sector|salt initial vector", the sector number is 209 * encrypted with the bulk cipher using a salt as key. The salt 210 * should be derived from the bulk cipher's key via hashing. 211 * 212 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1 213 * (needed for LRW-32-AES and possible other narrow block modes) 214 * 215 * null: the initial vector is always zero. Provides compatibility with 216 * obsolete loop_fish2 devices. Do not use for new devices. 217 * 218 * lmk: Compatible implementation of the block chaining mode used 219 * by the Loop-AES block device encryption system 220 * designed by Jari Ruusu. See http://loop-aes.sourceforge.net/ 221 * It operates on full 512 byte sectors and uses CBC 222 * with an IV derived from the sector number, the data and 223 * optionally extra IV seed. 224 * This means that after decryption the first block 225 * of sector must be tweaked according to decrypted data. 226 * Loop-AES can use three encryption schemes: 227 * version 1: is plain aes-cbc mode 228 * version 2: uses 64 multikey scheme with lmk IV generator 229 * version 3: the same as version 2 with additional IV seed 230 * (it uses 65 keys, last key is used as IV seed) 231 * 232 * plumb: unimplemented, see: 233 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 234 */ 235 236 static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, 237 struct dm_crypt_request *dmreq) 238 { 239 memset(iv, 0, cc->iv_size); 240 *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff); 241 242 return 0; 243 } 244 245 static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv, 246 struct dm_crypt_request *dmreq) 247 { 248 memset(iv, 0, cc->iv_size); 249 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector); 250 251 return 0; 252 } 253 254 /* Initialise ESSIV - compute salt but no local memory allocations */ 255 static int crypt_iv_essiv_init(struct crypt_config *cc) 256 { 257 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; 258 struct hash_desc desc; 259 struct scatterlist sg; 260 struct crypto_cipher *essiv_tfm; 261 int err, cpu; 262 263 sg_init_one(&sg, cc->key, cc->key_size); 264 desc.tfm = essiv->hash_tfm; 265 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; 266 267 err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt); 268 if (err) 269 return err; 270 271 for_each_possible_cpu(cpu) { 272 essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private, 273 274 err = crypto_cipher_setkey(essiv_tfm, essiv->salt, 275 crypto_hash_digestsize(essiv->hash_tfm)); 276 if (err) 277 return err; 278 } 279 280 return 0; 281 } 282 283 /* Wipe salt and reset key derived from volume key */ 284 static int crypt_iv_essiv_wipe(struct crypt_config *cc) 285 { 286 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; 287 unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm); 288 struct crypto_cipher *essiv_tfm; 289 int cpu, r, err = 0; 290 291 memset(essiv->salt, 0, salt_size); 292 293 for_each_possible_cpu(cpu) { 294 essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private; 295 r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size); 296 if (r) 297 err = r; 298 } 299 300 return err; 301 } 302 303 /* Set up per cpu cipher state */ 304 static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc, 305 struct dm_target *ti, 306 u8 *salt, unsigned saltsize) 307 { 308 struct crypto_cipher *essiv_tfm; 309 int err; 310 311 /* Setup the essiv_tfm with the given salt */ 312 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); 313 if (IS_ERR(essiv_tfm)) { 314 ti->error = "Error allocating crypto tfm for ESSIV"; 315 return essiv_tfm; 316 } 317 318 if (crypto_cipher_blocksize(essiv_tfm) != 319 crypto_ablkcipher_ivsize(any_tfm(cc))) { 320 ti->error = "Block size of ESSIV cipher does " 321 "not match IV size of block cipher"; 322 crypto_free_cipher(essiv_tfm); 323 return ERR_PTR(-EINVAL); 324 } 325 326 err = crypto_cipher_setkey(essiv_tfm, salt, saltsize); 327 if (err) { 328 ti->error = "Failed to set key for ESSIV cipher"; 329 crypto_free_cipher(essiv_tfm); 330 return ERR_PTR(err); 331 } 332 333 return essiv_tfm; 334 } 335 336 static void crypt_iv_essiv_dtr(struct crypt_config *cc) 337 { 338 int cpu; 339 struct crypt_cpu *cpu_cc; 340 struct crypto_cipher *essiv_tfm; 341 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; 342 343 crypto_free_hash(essiv->hash_tfm); 344 essiv->hash_tfm = NULL; 345 346 kzfree(essiv->salt); 347 essiv->salt = NULL; 348 349 for_each_possible_cpu(cpu) { 350 cpu_cc = per_cpu_ptr(cc->cpu, cpu); 351 essiv_tfm = cpu_cc->iv_private; 352 353 if (essiv_tfm) 354 crypto_free_cipher(essiv_tfm); 355 356 cpu_cc->iv_private = NULL; 357 } 358 } 359 360 static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, 361 const char *opts) 362 { 363 struct crypto_cipher *essiv_tfm = NULL; 364 struct crypto_hash *hash_tfm = NULL; 365 u8 *salt = NULL; 366 int err, cpu; 367 368 if (!opts) { 369 ti->error = "Digest algorithm missing for ESSIV mode"; 370 return -EINVAL; 371 } 372 373 /* Allocate hash algorithm */ 374 hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC); 375 if (IS_ERR(hash_tfm)) { 376 ti->error = "Error initializing ESSIV hash"; 377 err = PTR_ERR(hash_tfm); 378 goto bad; 379 } 380 381 salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL); 382 if (!salt) { 383 ti->error = "Error kmallocing salt storage in ESSIV"; 384 err = -ENOMEM; 385 goto bad; 386 } 387 388 cc->iv_gen_private.essiv.salt = salt; 389 cc->iv_gen_private.essiv.hash_tfm = hash_tfm; 390 391 for_each_possible_cpu(cpu) { 392 essiv_tfm = setup_essiv_cpu(cc, ti, salt, 393 crypto_hash_digestsize(hash_tfm)); 394 if (IS_ERR(essiv_tfm)) { 395 crypt_iv_essiv_dtr(cc); 396 return PTR_ERR(essiv_tfm); 397 } 398 per_cpu_ptr(cc->cpu, cpu)->iv_private = essiv_tfm; 399 } 400 401 return 0; 402 403 bad: 404 if (hash_tfm && !IS_ERR(hash_tfm)) 405 crypto_free_hash(hash_tfm); 406 kfree(salt); 407 return err; 408 } 409 410 static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, 411 struct dm_crypt_request *dmreq) 412 { 413 struct crypto_cipher *essiv_tfm = this_crypt_config(cc)->iv_private; 414 415 memset(iv, 0, cc->iv_size); 416 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector); 417 crypto_cipher_encrypt_one(essiv_tfm, iv, iv); 418 419 return 0; 420 } 421 422 static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, 423 const char *opts) 424 { 425 unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc)); 426 int log = ilog2(bs); 427 428 /* we need to calculate how far we must shift the sector count 429 * to get the cipher block count, we use this shift in _gen */ 430 431 if (1 << log != bs) { 432 ti->error = "cypher blocksize is not a power of 2"; 433 return -EINVAL; 434 } 435 436 if (log > 9) { 437 ti->error = "cypher blocksize is > 512"; 438 return -EINVAL; 439 } 440 441 cc->iv_gen_private.benbi.shift = 9 - log; 442 443 return 0; 444 } 445 446 static void crypt_iv_benbi_dtr(struct crypt_config *cc) 447 { 448 } 449 450 static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, 451 struct dm_crypt_request *dmreq) 452 { 453 __be64 val; 454 455 memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */ 456 457 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1); 458 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64))); 459 460 return 0; 461 } 462 463 static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, 464 struct dm_crypt_request *dmreq) 465 { 466 memset(iv, 0, cc->iv_size); 467 468 return 0; 469 } 470 471 static void crypt_iv_lmk_dtr(struct crypt_config *cc) 472 { 473 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; 474 475 if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm)) 476 crypto_free_shash(lmk->hash_tfm); 477 lmk->hash_tfm = NULL; 478 479 kzfree(lmk->seed); 480 lmk->seed = NULL; 481 } 482 483 static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti, 484 const char *opts) 485 { 486 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; 487 488 lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0); 489 if (IS_ERR(lmk->hash_tfm)) { 490 ti->error = "Error initializing LMK hash"; 491 return PTR_ERR(lmk->hash_tfm); 492 } 493 494 /* No seed in LMK version 2 */ 495 if (cc->key_parts == cc->tfms_count) { 496 lmk->seed = NULL; 497 return 0; 498 } 499 500 lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL); 501 if (!lmk->seed) { 502 crypt_iv_lmk_dtr(cc); 503 ti->error = "Error kmallocing seed storage in LMK"; 504 return -ENOMEM; 505 } 506 507 return 0; 508 } 509 510 static int crypt_iv_lmk_init(struct crypt_config *cc) 511 { 512 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; 513 int subkey_size = cc->key_size / cc->key_parts; 514 515 /* LMK seed is on the position of LMK_KEYS + 1 key */ 516 if (lmk->seed) 517 memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size), 518 crypto_shash_digestsize(lmk->hash_tfm)); 519 520 return 0; 521 } 522 523 static int crypt_iv_lmk_wipe(struct crypt_config *cc) 524 { 525 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; 526 527 if (lmk->seed) 528 memset(lmk->seed, 0, LMK_SEED_SIZE); 529 530 return 0; 531 } 532 533 static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv, 534 struct dm_crypt_request *dmreq, 535 u8 *data) 536 { 537 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; 538 struct { 539 struct shash_desc desc; 540 char ctx[crypto_shash_descsize(lmk->hash_tfm)]; 541 } sdesc; 542 struct md5_state md5state; 543 u32 buf[4]; 544 int i, r; 545 546 sdesc.desc.tfm = lmk->hash_tfm; 547 sdesc.desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; 548 549 r = crypto_shash_init(&sdesc.desc); 550 if (r) 551 return r; 552 553 if (lmk->seed) { 554 r = crypto_shash_update(&sdesc.desc, lmk->seed, LMK_SEED_SIZE); 555 if (r) 556 return r; 557 } 558 559 /* Sector is always 512B, block size 16, add data of blocks 1-31 */ 560 r = crypto_shash_update(&sdesc.desc, data + 16, 16 * 31); 561 if (r) 562 return r; 563 564 /* Sector is cropped to 56 bits here */ 565 buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF); 566 buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000); 567 buf[2] = cpu_to_le32(4024); 568 buf[3] = 0; 569 r = crypto_shash_update(&sdesc.desc, (u8 *)buf, sizeof(buf)); 570 if (r) 571 return r; 572 573 /* No MD5 padding here */ 574 r = crypto_shash_export(&sdesc.desc, &md5state); 575 if (r) 576 return r; 577 578 for (i = 0; i < MD5_HASH_WORDS; i++) 579 __cpu_to_le32s(&md5state.hash[i]); 580 memcpy(iv, &md5state.hash, cc->iv_size); 581 582 return 0; 583 } 584 585 static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv, 586 struct dm_crypt_request *dmreq) 587 { 588 u8 *src; 589 int r = 0; 590 591 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { 592 src = kmap_atomic(sg_page(&dmreq->sg_in)); 593 r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset); 594 kunmap_atomic(src); 595 } else 596 memset(iv, 0, cc->iv_size); 597 598 return r; 599 } 600 601 static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv, 602 struct dm_crypt_request *dmreq) 603 { 604 u8 *dst; 605 int r; 606 607 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) 608 return 0; 609 610 dst = kmap_atomic(sg_page(&dmreq->sg_out)); 611 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset); 612 613 /* Tweak the first block of plaintext sector */ 614 if (!r) 615 crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size); 616 617 kunmap_atomic(dst); 618 return r; 619 } 620 621 static struct crypt_iv_operations crypt_iv_plain_ops = { 622 .generator = crypt_iv_plain_gen 623 }; 624 625 static struct crypt_iv_operations crypt_iv_plain64_ops = { 626 .generator = crypt_iv_plain64_gen 627 }; 628 629 static struct crypt_iv_operations crypt_iv_essiv_ops = { 630 .ctr = crypt_iv_essiv_ctr, 631 .dtr = crypt_iv_essiv_dtr, 632 .init = crypt_iv_essiv_init, 633 .wipe = crypt_iv_essiv_wipe, 634 .generator = crypt_iv_essiv_gen 635 }; 636 637 static struct crypt_iv_operations crypt_iv_benbi_ops = { 638 .ctr = crypt_iv_benbi_ctr, 639 .dtr = crypt_iv_benbi_dtr, 640 .generator = crypt_iv_benbi_gen 641 }; 642 643 static struct crypt_iv_operations crypt_iv_null_ops = { 644 .generator = crypt_iv_null_gen 645 }; 646 647 static struct crypt_iv_operations crypt_iv_lmk_ops = { 648 .ctr = crypt_iv_lmk_ctr, 649 .dtr = crypt_iv_lmk_dtr, 650 .init = crypt_iv_lmk_init, 651 .wipe = crypt_iv_lmk_wipe, 652 .generator = crypt_iv_lmk_gen, 653 .post = crypt_iv_lmk_post 654 }; 655 656 static void crypt_convert_init(struct crypt_config *cc, 657 struct convert_context *ctx, 658 struct bio *bio_out, struct bio *bio_in, 659 sector_t sector) 660 { 661 ctx->bio_in = bio_in; 662 ctx->bio_out = bio_out; 663 ctx->offset_in = 0; 664 ctx->offset_out = 0; 665 ctx->idx_in = bio_in ? bio_in->bi_idx : 0; 666 ctx->idx_out = bio_out ? bio_out->bi_idx : 0; 667 ctx->sector = sector + cc->iv_offset; 668 init_completion(&ctx->restart); 669 } 670 671 static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc, 672 struct ablkcipher_request *req) 673 { 674 return (struct dm_crypt_request *)((char *)req + cc->dmreq_start); 675 } 676 677 static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc, 678 struct dm_crypt_request *dmreq) 679 { 680 return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start); 681 } 682 683 static u8 *iv_of_dmreq(struct crypt_config *cc, 684 struct dm_crypt_request *dmreq) 685 { 686 return (u8 *)ALIGN((unsigned long)(dmreq + 1), 687 crypto_ablkcipher_alignmask(any_tfm(cc)) + 1); 688 } 689 690 static int crypt_convert_block(struct crypt_config *cc, 691 struct convert_context *ctx, 692 struct ablkcipher_request *req) 693 { 694 struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in); 695 struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out); 696 struct dm_crypt_request *dmreq; 697 u8 *iv; 698 int r = 0; 699 700 dmreq = dmreq_of_req(cc, req); 701 iv = iv_of_dmreq(cc, dmreq); 702 703 dmreq->iv_sector = ctx->sector; 704 dmreq->ctx = ctx; 705 sg_init_table(&dmreq->sg_in, 1); 706 sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, 707 bv_in->bv_offset + ctx->offset_in); 708 709 sg_init_table(&dmreq->sg_out, 1); 710 sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT, 711 bv_out->bv_offset + ctx->offset_out); 712 713 ctx->offset_in += 1 << SECTOR_SHIFT; 714 if (ctx->offset_in >= bv_in->bv_len) { 715 ctx->offset_in = 0; 716 ctx->idx_in++; 717 } 718 719 ctx->offset_out += 1 << SECTOR_SHIFT; 720 if (ctx->offset_out >= bv_out->bv_len) { 721 ctx->offset_out = 0; 722 ctx->idx_out++; 723 } 724 725 if (cc->iv_gen_ops) { 726 r = cc->iv_gen_ops->generator(cc, iv, dmreq); 727 if (r < 0) 728 return r; 729 } 730 731 ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out, 732 1 << SECTOR_SHIFT, iv); 733 734 if (bio_data_dir(ctx->bio_in) == WRITE) 735 r = crypto_ablkcipher_encrypt(req); 736 else 737 r = crypto_ablkcipher_decrypt(req); 738 739 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post) 740 r = cc->iv_gen_ops->post(cc, iv, dmreq); 741 742 return r; 743 } 744 745 static void kcryptd_async_done(struct crypto_async_request *async_req, 746 int error); 747 748 static void crypt_alloc_req(struct crypt_config *cc, 749 struct convert_context *ctx) 750 { 751 struct crypt_cpu *this_cc = this_crypt_config(cc); 752 unsigned key_index = ctx->sector & (cc->tfms_count - 1); 753 754 if (!this_cc->req) 755 this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); 756 757 ablkcipher_request_set_tfm(this_cc->req, this_cc->tfms[key_index]); 758 ablkcipher_request_set_callback(this_cc->req, 759 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 760 kcryptd_async_done, dmreq_of_req(cc, this_cc->req)); 761 } 762 763 /* 764 * Encrypt / decrypt data from one bio to another one (can be the same one) 765 */ 766 static int crypt_convert(struct crypt_config *cc, 767 struct convert_context *ctx) 768 { 769 struct crypt_cpu *this_cc = this_crypt_config(cc); 770 int r; 771 772 atomic_set(&ctx->pending, 1); 773 774 while(ctx->idx_in < ctx->bio_in->bi_vcnt && 775 ctx->idx_out < ctx->bio_out->bi_vcnt) { 776 777 crypt_alloc_req(cc, ctx); 778 779 atomic_inc(&ctx->pending); 780 781 r = crypt_convert_block(cc, ctx, this_cc->req); 782 783 switch (r) { 784 /* async */ 785 case -EBUSY: 786 wait_for_completion(&ctx->restart); 787 INIT_COMPLETION(ctx->restart); 788 /* fall through*/ 789 case -EINPROGRESS: 790 this_cc->req = NULL; 791 ctx->sector++; 792 continue; 793 794 /* sync */ 795 case 0: 796 atomic_dec(&ctx->pending); 797 ctx->sector++; 798 cond_resched(); 799 continue; 800 801 /* error */ 802 default: 803 atomic_dec(&ctx->pending); 804 return r; 805 } 806 } 807 808 return 0; 809 } 810 811 static void dm_crypt_bio_destructor(struct bio *bio) 812 { 813 struct dm_crypt_io *io = bio->bi_private; 814 struct crypt_config *cc = io->target->private; 815 816 bio_free(bio, cc->bs); 817 } 818 819 /* 820 * Generate a new unfragmented bio with the given size 821 * This should never violate the device limitations 822 * May return a smaller bio when running out of pages, indicated by 823 * *out_of_pages set to 1. 824 */ 825 static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size, 826 unsigned *out_of_pages) 827 { 828 struct crypt_config *cc = io->target->private; 829 struct bio *clone; 830 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 831 gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; 832 unsigned i, len; 833 struct page *page; 834 835 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); 836 if (!clone) 837 return NULL; 838 839 clone_init(io, clone); 840 *out_of_pages = 0; 841 842 for (i = 0; i < nr_iovecs; i++) { 843 page = mempool_alloc(cc->page_pool, gfp_mask); 844 if (!page) { 845 *out_of_pages = 1; 846 break; 847 } 848 849 /* 850 * If additional pages cannot be allocated without waiting, 851 * return a partially-allocated bio. The caller will then try 852 * to allocate more bios while submitting this partial bio. 853 */ 854 gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; 855 856 len = (size > PAGE_SIZE) ? PAGE_SIZE : size; 857 858 if (!bio_add_page(clone, page, len, 0)) { 859 mempool_free(page, cc->page_pool); 860 break; 861 } 862 863 size -= len; 864 } 865 866 if (!clone->bi_size) { 867 bio_put(clone); 868 return NULL; 869 } 870 871 return clone; 872 } 873 874 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) 875 { 876 unsigned int i; 877 struct bio_vec *bv; 878 879 for (i = 0; i < clone->bi_vcnt; i++) { 880 bv = bio_iovec_idx(clone, i); 881 BUG_ON(!bv->bv_page); 882 mempool_free(bv->bv_page, cc->page_pool); 883 bv->bv_page = NULL; 884 } 885 } 886 887 static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti, 888 struct bio *bio, sector_t sector) 889 { 890 struct crypt_config *cc = ti->private; 891 struct dm_crypt_io *io; 892 893 io = mempool_alloc(cc->io_pool, GFP_NOIO); 894 io->target = ti; 895 io->base_bio = bio; 896 io->sector = sector; 897 io->error = 0; 898 io->base_io = NULL; 899 atomic_set(&io->pending, 0); 900 901 return io; 902 } 903 904 static void crypt_inc_pending(struct dm_crypt_io *io) 905 { 906 atomic_inc(&io->pending); 907 } 908 909 /* 910 * One of the bios was finished. Check for completion of 911 * the whole request and correctly clean up the buffer. 912 * If base_io is set, wait for the last fragment to complete. 913 */ 914 static void crypt_dec_pending(struct dm_crypt_io *io) 915 { 916 struct crypt_config *cc = io->target->private; 917 struct bio *base_bio = io->base_bio; 918 struct dm_crypt_io *base_io = io->base_io; 919 int error = io->error; 920 921 if (!atomic_dec_and_test(&io->pending)) 922 return; 923 924 mempool_free(io, cc->io_pool); 925 926 if (likely(!base_io)) 927 bio_endio(base_bio, error); 928 else { 929 if (error && !base_io->error) 930 base_io->error = error; 931 crypt_dec_pending(base_io); 932 } 933 } 934 935 /* 936 * kcryptd/kcryptd_io: 937 * 938 * Needed because it would be very unwise to do decryption in an 939 * interrupt context. 940 * 941 * kcryptd performs the actual encryption or decryption. 942 * 943 * kcryptd_io performs the IO submission. 944 * 945 * They must be separated as otherwise the final stages could be 946 * starved by new requests which can block in the first stages due 947 * to memory allocation. 948 * 949 * The work is done per CPU global for all dm-crypt instances. 950 * They should not depend on each other and do not block. 951 */ 952 static void crypt_endio(struct bio *clone, int error) 953 { 954 struct dm_crypt_io *io = clone->bi_private; 955 struct crypt_config *cc = io->target->private; 956 unsigned rw = bio_data_dir(clone); 957 958 if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error)) 959 error = -EIO; 960 961 /* 962 * free the processed pages 963 */ 964 if (rw == WRITE) 965 crypt_free_buffer_pages(cc, clone); 966 967 bio_put(clone); 968 969 if (rw == READ && !error) { 970 kcryptd_queue_crypt(io); 971 return; 972 } 973 974 if (unlikely(error)) 975 io->error = error; 976 977 crypt_dec_pending(io); 978 } 979 980 static void clone_init(struct dm_crypt_io *io, struct bio *clone) 981 { 982 struct crypt_config *cc = io->target->private; 983 984 clone->bi_private = io; 985 clone->bi_end_io = crypt_endio; 986 clone->bi_bdev = cc->dev->bdev; 987 clone->bi_rw = io->base_bio->bi_rw; 988 clone->bi_destructor = dm_crypt_bio_destructor; 989 } 990 991 static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) 992 { 993 struct crypt_config *cc = io->target->private; 994 struct bio *base_bio = io->base_bio; 995 struct bio *clone; 996 997 /* 998 * The block layer might modify the bvec array, so always 999 * copy the required bvecs because we need the original 1000 * one in order to decrypt the whole bio data *afterwards*. 1001 */ 1002 clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs); 1003 if (!clone) 1004 return 1; 1005 1006 crypt_inc_pending(io); 1007 1008 clone_init(io, clone); 1009 clone->bi_idx = 0; 1010 clone->bi_vcnt = bio_segments(base_bio); 1011 clone->bi_size = base_bio->bi_size; 1012 clone->bi_sector = cc->start + io->sector; 1013 memcpy(clone->bi_io_vec, bio_iovec(base_bio), 1014 sizeof(struct bio_vec) * clone->bi_vcnt); 1015 1016 generic_make_request(clone); 1017 return 0; 1018 } 1019 1020 static void kcryptd_io_write(struct dm_crypt_io *io) 1021 { 1022 struct bio *clone = io->ctx.bio_out; 1023 generic_make_request(clone); 1024 } 1025 1026 static void kcryptd_io(struct work_struct *work) 1027 { 1028 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); 1029 1030 if (bio_data_dir(io->base_bio) == READ) { 1031 crypt_inc_pending(io); 1032 if (kcryptd_io_read(io, GFP_NOIO)) 1033 io->error = -ENOMEM; 1034 crypt_dec_pending(io); 1035 } else 1036 kcryptd_io_write(io); 1037 } 1038 1039 static void kcryptd_queue_io(struct dm_crypt_io *io) 1040 { 1041 struct crypt_config *cc = io->target->private; 1042 1043 INIT_WORK(&io->work, kcryptd_io); 1044 queue_work(cc->io_queue, &io->work); 1045 } 1046 1047 static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) 1048 { 1049 struct bio *clone = io->ctx.bio_out; 1050 struct crypt_config *cc = io->target->private; 1051 1052 if (unlikely(io->error < 0)) { 1053 crypt_free_buffer_pages(cc, clone); 1054 bio_put(clone); 1055 crypt_dec_pending(io); 1056 return; 1057 } 1058 1059 /* crypt_convert should have filled the clone bio */ 1060 BUG_ON(io->ctx.idx_out < clone->bi_vcnt); 1061 1062 clone->bi_sector = cc->start + io->sector; 1063 1064 if (async) 1065 kcryptd_queue_io(io); 1066 else 1067 generic_make_request(clone); 1068 } 1069 1070 static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) 1071 { 1072 struct crypt_config *cc = io->target->private; 1073 struct bio *clone; 1074 struct dm_crypt_io *new_io; 1075 int crypt_finished; 1076 unsigned out_of_pages = 0; 1077 unsigned remaining = io->base_bio->bi_size; 1078 sector_t sector = io->sector; 1079 int r; 1080 1081 /* 1082 * Prevent io from disappearing until this function completes. 1083 */ 1084 crypt_inc_pending(io); 1085 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector); 1086 1087 /* 1088 * The allocated buffers can be smaller than the whole bio, 1089 * so repeat the whole process until all the data can be handled. 1090 */ 1091 while (remaining) { 1092 clone = crypt_alloc_buffer(io, remaining, &out_of_pages); 1093 if (unlikely(!clone)) { 1094 io->error = -ENOMEM; 1095 break; 1096 } 1097 1098 io->ctx.bio_out = clone; 1099 io->ctx.idx_out = 0; 1100 1101 remaining -= clone->bi_size; 1102 sector += bio_sectors(clone); 1103 1104 crypt_inc_pending(io); 1105 1106 r = crypt_convert(cc, &io->ctx); 1107 if (r < 0) 1108 io->error = -EIO; 1109 1110 crypt_finished = atomic_dec_and_test(&io->ctx.pending); 1111 1112 /* Encryption was already finished, submit io now */ 1113 if (crypt_finished) { 1114 kcryptd_crypt_write_io_submit(io, 0); 1115 1116 /* 1117 * If there was an error, do not try next fragments. 1118 * For async, error is processed in async handler. 1119 */ 1120 if (unlikely(r < 0)) 1121 break; 1122 1123 io->sector = sector; 1124 } 1125 1126 /* 1127 * Out of memory -> run queues 1128 * But don't wait if split was due to the io size restriction 1129 */ 1130 if (unlikely(out_of_pages)) 1131 congestion_wait(BLK_RW_ASYNC, HZ/100); 1132 1133 /* 1134 * With async crypto it is unsafe to share the crypto context 1135 * between fragments, so switch to a new dm_crypt_io structure. 1136 */ 1137 if (unlikely(!crypt_finished && remaining)) { 1138 new_io = crypt_io_alloc(io->target, io->base_bio, 1139 sector); 1140 crypt_inc_pending(new_io); 1141 crypt_convert_init(cc, &new_io->ctx, NULL, 1142 io->base_bio, sector); 1143 new_io->ctx.idx_in = io->ctx.idx_in; 1144 new_io->ctx.offset_in = io->ctx.offset_in; 1145 1146 /* 1147 * Fragments after the first use the base_io 1148 * pending count. 1149 */ 1150 if (!io->base_io) 1151 new_io->base_io = io; 1152 else { 1153 new_io->base_io = io->base_io; 1154 crypt_inc_pending(io->base_io); 1155 crypt_dec_pending(io); 1156 } 1157 1158 io = new_io; 1159 } 1160 } 1161 1162 crypt_dec_pending(io); 1163 } 1164 1165 static void kcryptd_crypt_read_done(struct dm_crypt_io *io) 1166 { 1167 crypt_dec_pending(io); 1168 } 1169 1170 static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) 1171 { 1172 struct crypt_config *cc = io->target->private; 1173 int r = 0; 1174 1175 crypt_inc_pending(io); 1176 1177 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, 1178 io->sector); 1179 1180 r = crypt_convert(cc, &io->ctx); 1181 if (r < 0) 1182 io->error = -EIO; 1183 1184 if (atomic_dec_and_test(&io->ctx.pending)) 1185 kcryptd_crypt_read_done(io); 1186 1187 crypt_dec_pending(io); 1188 } 1189 1190 static void kcryptd_async_done(struct crypto_async_request *async_req, 1191 int error) 1192 { 1193 struct dm_crypt_request *dmreq = async_req->data; 1194 struct convert_context *ctx = dmreq->ctx; 1195 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); 1196 struct crypt_config *cc = io->target->private; 1197 1198 if (error == -EINPROGRESS) { 1199 complete(&ctx->restart); 1200 return; 1201 } 1202 1203 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post) 1204 error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq); 1205 1206 if (error < 0) 1207 io->error = -EIO; 1208 1209 mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool); 1210 1211 if (!atomic_dec_and_test(&ctx->pending)) 1212 return; 1213 1214 if (bio_data_dir(io->base_bio) == READ) 1215 kcryptd_crypt_read_done(io); 1216 else 1217 kcryptd_crypt_write_io_submit(io, 1); 1218 } 1219 1220 static void kcryptd_crypt(struct work_struct *work) 1221 { 1222 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); 1223 1224 if (bio_data_dir(io->base_bio) == READ) 1225 kcryptd_crypt_read_convert(io); 1226 else 1227 kcryptd_crypt_write_convert(io); 1228 } 1229 1230 static void kcryptd_queue_crypt(struct dm_crypt_io *io) 1231 { 1232 struct crypt_config *cc = io->target->private; 1233 1234 INIT_WORK(&io->work, kcryptd_crypt); 1235 queue_work(cc->crypt_queue, &io->work); 1236 } 1237 1238 /* 1239 * Decode key from its hex representation 1240 */ 1241 static int crypt_decode_key(u8 *key, char *hex, unsigned int size) 1242 { 1243 char buffer[3]; 1244 char *endp; 1245 unsigned int i; 1246 1247 buffer[2] = '\0'; 1248 1249 for (i = 0; i < size; i++) { 1250 buffer[0] = *hex++; 1251 buffer[1] = *hex++; 1252 1253 key[i] = (u8)simple_strtoul(buffer, &endp, 16); 1254 1255 if (endp != &buffer[2]) 1256 return -EINVAL; 1257 } 1258 1259 if (*hex != '\0') 1260 return -EINVAL; 1261 1262 return 0; 1263 } 1264 1265 /* 1266 * Encode key into its hex representation 1267 */ 1268 static void crypt_encode_key(char *hex, u8 *key, unsigned int size) 1269 { 1270 unsigned int i; 1271 1272 for (i = 0; i < size; i++) { 1273 sprintf(hex, "%02x", *key); 1274 hex += 2; 1275 key++; 1276 } 1277 } 1278 1279 static void crypt_free_tfms(struct crypt_config *cc, int cpu) 1280 { 1281 struct crypt_cpu *cpu_cc = per_cpu_ptr(cc->cpu, cpu); 1282 unsigned i; 1283 1284 for (i = 0; i < cc->tfms_count; i++) 1285 if (cpu_cc->tfms[i] && !IS_ERR(cpu_cc->tfms[i])) { 1286 crypto_free_ablkcipher(cpu_cc->tfms[i]); 1287 cpu_cc->tfms[i] = NULL; 1288 } 1289 } 1290 1291 static int crypt_alloc_tfms(struct crypt_config *cc, int cpu, char *ciphermode) 1292 { 1293 struct crypt_cpu *cpu_cc = per_cpu_ptr(cc->cpu, cpu); 1294 unsigned i; 1295 int err; 1296 1297 for (i = 0; i < cc->tfms_count; i++) { 1298 cpu_cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0); 1299 if (IS_ERR(cpu_cc->tfms[i])) { 1300 err = PTR_ERR(cpu_cc->tfms[i]); 1301 crypt_free_tfms(cc, cpu); 1302 return err; 1303 } 1304 } 1305 1306 return 0; 1307 } 1308 1309 static int crypt_setkey_allcpus(struct crypt_config *cc) 1310 { 1311 unsigned subkey_size = cc->key_size >> ilog2(cc->tfms_count); 1312 int cpu, err = 0, i, r; 1313 1314 for_each_possible_cpu(cpu) { 1315 for (i = 0; i < cc->tfms_count; i++) { 1316 r = crypto_ablkcipher_setkey(per_cpu_ptr(cc->cpu, cpu)->tfms[i], 1317 cc->key + (i * subkey_size), subkey_size); 1318 if (r) 1319 err = r; 1320 } 1321 } 1322 1323 return err; 1324 } 1325 1326 static int crypt_set_key(struct crypt_config *cc, char *key) 1327 { 1328 int r = -EINVAL; 1329 int key_string_len = strlen(key); 1330 1331 /* The key size may not be changed. */ 1332 if (cc->key_size != (key_string_len >> 1)) 1333 goto out; 1334 1335 /* Hyphen (which gives a key_size of zero) means there is no key. */ 1336 if (!cc->key_size && strcmp(key, "-")) 1337 goto out; 1338 1339 if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0) 1340 goto out; 1341 1342 set_bit(DM_CRYPT_KEY_VALID, &cc->flags); 1343 1344 r = crypt_setkey_allcpus(cc); 1345 1346 out: 1347 /* Hex key string not needed after here, so wipe it. */ 1348 memset(key, '0', key_string_len); 1349 1350 return r; 1351 } 1352 1353 static int crypt_wipe_key(struct crypt_config *cc) 1354 { 1355 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); 1356 memset(&cc->key, 0, cc->key_size * sizeof(u8)); 1357 1358 return crypt_setkey_allcpus(cc); 1359 } 1360 1361 static void crypt_dtr(struct dm_target *ti) 1362 { 1363 struct crypt_config *cc = ti->private; 1364 struct crypt_cpu *cpu_cc; 1365 int cpu; 1366 1367 ti->private = NULL; 1368 1369 if (!cc) 1370 return; 1371 1372 if (cc->io_queue) 1373 destroy_workqueue(cc->io_queue); 1374 if (cc->crypt_queue) 1375 destroy_workqueue(cc->crypt_queue); 1376 1377 if (cc->cpu) 1378 for_each_possible_cpu(cpu) { 1379 cpu_cc = per_cpu_ptr(cc->cpu, cpu); 1380 if (cpu_cc->req) 1381 mempool_free(cpu_cc->req, cc->req_pool); 1382 crypt_free_tfms(cc, cpu); 1383 } 1384 1385 if (cc->bs) 1386 bioset_free(cc->bs); 1387 1388 if (cc->page_pool) 1389 mempool_destroy(cc->page_pool); 1390 if (cc->req_pool) 1391 mempool_destroy(cc->req_pool); 1392 if (cc->io_pool) 1393 mempool_destroy(cc->io_pool); 1394 1395 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) 1396 cc->iv_gen_ops->dtr(cc); 1397 1398 if (cc->dev) 1399 dm_put_device(ti, cc->dev); 1400 1401 if (cc->cpu) 1402 free_percpu(cc->cpu); 1403 1404 kzfree(cc->cipher); 1405 kzfree(cc->cipher_string); 1406 1407 /* Must zero key material before freeing */ 1408 kzfree(cc); 1409 } 1410 1411 static int crypt_ctr_cipher(struct dm_target *ti, 1412 char *cipher_in, char *key) 1413 { 1414 struct crypt_config *cc = ti->private; 1415 char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount; 1416 char *cipher_api = NULL; 1417 int cpu, ret = -EINVAL; 1418 char dummy; 1419 1420 /* Convert to crypto api definition? */ 1421 if (strchr(cipher_in, '(')) { 1422 ti->error = "Bad cipher specification"; 1423 return -EINVAL; 1424 } 1425 1426 cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL); 1427 if (!cc->cipher_string) 1428 goto bad_mem; 1429 1430 /* 1431 * Legacy dm-crypt cipher specification 1432 * cipher[:keycount]-mode-iv:ivopts 1433 */ 1434 tmp = cipher_in; 1435 keycount = strsep(&tmp, "-"); 1436 cipher = strsep(&keycount, ":"); 1437 1438 if (!keycount) 1439 cc->tfms_count = 1; 1440 else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 || 1441 !is_power_of_2(cc->tfms_count)) { 1442 ti->error = "Bad cipher key count specification"; 1443 return -EINVAL; 1444 } 1445 cc->key_parts = cc->tfms_count; 1446 1447 cc->cipher = kstrdup(cipher, GFP_KERNEL); 1448 if (!cc->cipher) 1449 goto bad_mem; 1450 1451 chainmode = strsep(&tmp, "-"); 1452 ivopts = strsep(&tmp, "-"); 1453 ivmode = strsep(&ivopts, ":"); 1454 1455 if (tmp) 1456 DMWARN("Ignoring unexpected additional cipher options"); 1457 1458 cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)) + 1459 cc->tfms_count * sizeof(*(cc->cpu->tfms)), 1460 __alignof__(struct crypt_cpu)); 1461 if (!cc->cpu) { 1462 ti->error = "Cannot allocate per cpu state"; 1463 goto bad_mem; 1464 } 1465 1466 /* 1467 * For compatibility with the original dm-crypt mapping format, if 1468 * only the cipher name is supplied, use cbc-plain. 1469 */ 1470 if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) { 1471 chainmode = "cbc"; 1472 ivmode = "plain"; 1473 } 1474 1475 if (strcmp(chainmode, "ecb") && !ivmode) { 1476 ti->error = "IV mechanism required"; 1477 return -EINVAL; 1478 } 1479 1480 cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL); 1481 if (!cipher_api) 1482 goto bad_mem; 1483 1484 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME, 1485 "%s(%s)", chainmode, cipher); 1486 if (ret < 0) { 1487 kfree(cipher_api); 1488 goto bad_mem; 1489 } 1490 1491 /* Allocate cipher */ 1492 for_each_possible_cpu(cpu) { 1493 ret = crypt_alloc_tfms(cc, cpu, cipher_api); 1494 if (ret < 0) { 1495 ti->error = "Error allocating crypto tfm"; 1496 goto bad; 1497 } 1498 } 1499 1500 /* Initialize and set key */ 1501 ret = crypt_set_key(cc, key); 1502 if (ret < 0) { 1503 ti->error = "Error decoding and setting key"; 1504 goto bad; 1505 } 1506 1507 /* Initialize IV */ 1508 cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc)); 1509 if (cc->iv_size) 1510 /* at least a 64 bit sector number should fit in our buffer */ 1511 cc->iv_size = max(cc->iv_size, 1512 (unsigned int)(sizeof(u64) / sizeof(u8))); 1513 else if (ivmode) { 1514 DMWARN("Selected cipher does not support IVs"); 1515 ivmode = NULL; 1516 } 1517 1518 /* Choose ivmode, see comments at iv code. */ 1519 if (ivmode == NULL) 1520 cc->iv_gen_ops = NULL; 1521 else if (strcmp(ivmode, "plain") == 0) 1522 cc->iv_gen_ops = &crypt_iv_plain_ops; 1523 else if (strcmp(ivmode, "plain64") == 0) 1524 cc->iv_gen_ops = &crypt_iv_plain64_ops; 1525 else if (strcmp(ivmode, "essiv") == 0) 1526 cc->iv_gen_ops = &crypt_iv_essiv_ops; 1527 else if (strcmp(ivmode, "benbi") == 0) 1528 cc->iv_gen_ops = &crypt_iv_benbi_ops; 1529 else if (strcmp(ivmode, "null") == 0) 1530 cc->iv_gen_ops = &crypt_iv_null_ops; 1531 else if (strcmp(ivmode, "lmk") == 0) { 1532 cc->iv_gen_ops = &crypt_iv_lmk_ops; 1533 /* Version 2 and 3 is recognised according 1534 * to length of provided multi-key string. 1535 * If present (version 3), last key is used as IV seed. 1536 */ 1537 if (cc->key_size % cc->key_parts) 1538 cc->key_parts++; 1539 } else { 1540 ret = -EINVAL; 1541 ti->error = "Invalid IV mode"; 1542 goto bad; 1543 } 1544 1545 /* Allocate IV */ 1546 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) { 1547 ret = cc->iv_gen_ops->ctr(cc, ti, ivopts); 1548 if (ret < 0) { 1549 ti->error = "Error creating IV"; 1550 goto bad; 1551 } 1552 } 1553 1554 /* Initialize IV (set keys for ESSIV etc) */ 1555 if (cc->iv_gen_ops && cc->iv_gen_ops->init) { 1556 ret = cc->iv_gen_ops->init(cc); 1557 if (ret < 0) { 1558 ti->error = "Error initialising IV"; 1559 goto bad; 1560 } 1561 } 1562 1563 ret = 0; 1564 bad: 1565 kfree(cipher_api); 1566 return ret; 1567 1568 bad_mem: 1569 ti->error = "Cannot allocate cipher strings"; 1570 return -ENOMEM; 1571 } 1572 1573 /* 1574 * Construct an encryption mapping: 1575 * <cipher> <key> <iv_offset> <dev_path> <start> 1576 */ 1577 static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) 1578 { 1579 struct crypt_config *cc; 1580 unsigned int key_size, opt_params; 1581 unsigned long long tmpll; 1582 int ret; 1583 struct dm_arg_set as; 1584 const char *opt_string; 1585 char dummy; 1586 1587 static struct dm_arg _args[] = { 1588 {0, 1, "Invalid number of feature args"}, 1589 }; 1590 1591 if (argc < 5) { 1592 ti->error = "Not enough arguments"; 1593 return -EINVAL; 1594 } 1595 1596 key_size = strlen(argv[1]) >> 1; 1597 1598 cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL); 1599 if (!cc) { 1600 ti->error = "Cannot allocate encryption context"; 1601 return -ENOMEM; 1602 } 1603 cc->key_size = key_size; 1604 1605 ti->private = cc; 1606 ret = crypt_ctr_cipher(ti, argv[0], argv[1]); 1607 if (ret < 0) 1608 goto bad; 1609 1610 ret = -ENOMEM; 1611 cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool); 1612 if (!cc->io_pool) { 1613 ti->error = "Cannot allocate crypt io mempool"; 1614 goto bad; 1615 } 1616 1617 cc->dmreq_start = sizeof(struct ablkcipher_request); 1618 cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc)); 1619 cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment()); 1620 cc->dmreq_start += crypto_ablkcipher_alignmask(any_tfm(cc)) & 1621 ~(crypto_tfm_ctx_alignment() - 1); 1622 1623 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + 1624 sizeof(struct dm_crypt_request) + cc->iv_size); 1625 if (!cc->req_pool) { 1626 ti->error = "Cannot allocate crypt request mempool"; 1627 goto bad; 1628 } 1629 1630 cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); 1631 if (!cc->page_pool) { 1632 ti->error = "Cannot allocate page mempool"; 1633 goto bad; 1634 } 1635 1636 cc->bs = bioset_create(MIN_IOS, 0); 1637 if (!cc->bs) { 1638 ti->error = "Cannot allocate crypt bioset"; 1639 goto bad; 1640 } 1641 1642 ret = -EINVAL; 1643 if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) { 1644 ti->error = "Invalid iv_offset sector"; 1645 goto bad; 1646 } 1647 cc->iv_offset = tmpll; 1648 1649 if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) { 1650 ti->error = "Device lookup failed"; 1651 goto bad; 1652 } 1653 1654 if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) { 1655 ti->error = "Invalid device sector"; 1656 goto bad; 1657 } 1658 cc->start = tmpll; 1659 1660 argv += 5; 1661 argc -= 5; 1662 1663 /* Optional parameters */ 1664 if (argc) { 1665 as.argc = argc; 1666 as.argv = argv; 1667 1668 ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error); 1669 if (ret) 1670 goto bad; 1671 1672 opt_string = dm_shift_arg(&as); 1673 1674 if (opt_params == 1 && opt_string && 1675 !strcasecmp(opt_string, "allow_discards")) 1676 ti->num_discard_requests = 1; 1677 else if (opt_params) { 1678 ret = -EINVAL; 1679 ti->error = "Invalid feature arguments"; 1680 goto bad; 1681 } 1682 } 1683 1684 ret = -ENOMEM; 1685 cc->io_queue = alloc_workqueue("kcryptd_io", 1686 WQ_NON_REENTRANT| 1687 WQ_MEM_RECLAIM, 1688 1); 1689 if (!cc->io_queue) { 1690 ti->error = "Couldn't create kcryptd io queue"; 1691 goto bad; 1692 } 1693 1694 cc->crypt_queue = alloc_workqueue("kcryptd", 1695 WQ_NON_REENTRANT| 1696 WQ_CPU_INTENSIVE| 1697 WQ_MEM_RECLAIM, 1698 1); 1699 if (!cc->crypt_queue) { 1700 ti->error = "Couldn't create kcryptd queue"; 1701 goto bad; 1702 } 1703 1704 ti->num_flush_requests = 1; 1705 ti->discard_zeroes_data_unsupported = 1; 1706 1707 return 0; 1708 1709 bad: 1710 crypt_dtr(ti); 1711 return ret; 1712 } 1713 1714 static int crypt_map(struct dm_target *ti, struct bio *bio, 1715 union map_info *map_context) 1716 { 1717 struct dm_crypt_io *io; 1718 struct crypt_config *cc; 1719 1720 /* 1721 * If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues. 1722 * - for REQ_FLUSH device-mapper core ensures that no IO is in-flight 1723 * - for REQ_DISCARD caller must use flush if IO ordering matters 1724 */ 1725 if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) { 1726 cc = ti->private; 1727 bio->bi_bdev = cc->dev->bdev; 1728 if (bio_sectors(bio)) 1729 bio->bi_sector = cc->start + dm_target_offset(ti, bio->bi_sector); 1730 return DM_MAPIO_REMAPPED; 1731 } 1732 1733 io = crypt_io_alloc(ti, bio, dm_target_offset(ti, bio->bi_sector)); 1734 1735 if (bio_data_dir(io->base_bio) == READ) { 1736 if (kcryptd_io_read(io, GFP_NOWAIT)) 1737 kcryptd_queue_io(io); 1738 } else 1739 kcryptd_queue_crypt(io); 1740 1741 return DM_MAPIO_SUBMITTED; 1742 } 1743 1744 static int crypt_status(struct dm_target *ti, status_type_t type, 1745 char *result, unsigned int maxlen) 1746 { 1747 struct crypt_config *cc = ti->private; 1748 unsigned int sz = 0; 1749 1750 switch (type) { 1751 case STATUSTYPE_INFO: 1752 result[0] = '\0'; 1753 break; 1754 1755 case STATUSTYPE_TABLE: 1756 DMEMIT("%s ", cc->cipher_string); 1757 1758 if (cc->key_size > 0) { 1759 if ((maxlen - sz) < ((cc->key_size << 1) + 1)) 1760 return -ENOMEM; 1761 1762 crypt_encode_key(result + sz, cc->key, cc->key_size); 1763 sz += cc->key_size << 1; 1764 } else { 1765 if (sz >= maxlen) 1766 return -ENOMEM; 1767 result[sz++] = '-'; 1768 } 1769 1770 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset, 1771 cc->dev->name, (unsigned long long)cc->start); 1772 1773 if (ti->num_discard_requests) 1774 DMEMIT(" 1 allow_discards"); 1775 1776 break; 1777 } 1778 return 0; 1779 } 1780 1781 static void crypt_postsuspend(struct dm_target *ti) 1782 { 1783 struct crypt_config *cc = ti->private; 1784 1785 set_bit(DM_CRYPT_SUSPENDED, &cc->flags); 1786 } 1787 1788 static int crypt_preresume(struct dm_target *ti) 1789 { 1790 struct crypt_config *cc = ti->private; 1791 1792 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) { 1793 DMERR("aborting resume - crypt key is not set."); 1794 return -EAGAIN; 1795 } 1796 1797 return 0; 1798 } 1799 1800 static void crypt_resume(struct dm_target *ti) 1801 { 1802 struct crypt_config *cc = ti->private; 1803 1804 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags); 1805 } 1806 1807 /* Message interface 1808 * key set <key> 1809 * key wipe 1810 */ 1811 static int crypt_message(struct dm_target *ti, unsigned argc, char **argv) 1812 { 1813 struct crypt_config *cc = ti->private; 1814 int ret = -EINVAL; 1815 1816 if (argc < 2) 1817 goto error; 1818 1819 if (!strcasecmp(argv[0], "key")) { 1820 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) { 1821 DMWARN("not suspended during key manipulation."); 1822 return -EINVAL; 1823 } 1824 if (argc == 3 && !strcasecmp(argv[1], "set")) { 1825 ret = crypt_set_key(cc, argv[2]); 1826 if (ret) 1827 return ret; 1828 if (cc->iv_gen_ops && cc->iv_gen_ops->init) 1829 ret = cc->iv_gen_ops->init(cc); 1830 return ret; 1831 } 1832 if (argc == 2 && !strcasecmp(argv[1], "wipe")) { 1833 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) { 1834 ret = cc->iv_gen_ops->wipe(cc); 1835 if (ret) 1836 return ret; 1837 } 1838 return crypt_wipe_key(cc); 1839 } 1840 } 1841 1842 error: 1843 DMWARN("unrecognised message received."); 1844 return -EINVAL; 1845 } 1846 1847 static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm, 1848 struct bio_vec *biovec, int max_size) 1849 { 1850 struct crypt_config *cc = ti->private; 1851 struct request_queue *q = bdev_get_queue(cc->dev->bdev); 1852 1853 if (!q->merge_bvec_fn) 1854 return max_size; 1855 1856 bvm->bi_bdev = cc->dev->bdev; 1857 bvm->bi_sector = cc->start + dm_target_offset(ti, bvm->bi_sector); 1858 1859 return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); 1860 } 1861 1862 static int crypt_iterate_devices(struct dm_target *ti, 1863 iterate_devices_callout_fn fn, void *data) 1864 { 1865 struct crypt_config *cc = ti->private; 1866 1867 return fn(ti, cc->dev, cc->start, ti->len, data); 1868 } 1869 1870 static struct target_type crypt_target = { 1871 .name = "crypt", 1872 .version = {1, 11, 0}, 1873 .module = THIS_MODULE, 1874 .ctr = crypt_ctr, 1875 .dtr = crypt_dtr, 1876 .map = crypt_map, 1877 .status = crypt_status, 1878 .postsuspend = crypt_postsuspend, 1879 .preresume = crypt_preresume, 1880 .resume = crypt_resume, 1881 .message = crypt_message, 1882 .merge = crypt_merge, 1883 .iterate_devices = crypt_iterate_devices, 1884 }; 1885 1886 static int __init dm_crypt_init(void) 1887 { 1888 int r; 1889 1890 _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0); 1891 if (!_crypt_io_pool) 1892 return -ENOMEM; 1893 1894 r = dm_register_target(&crypt_target); 1895 if (r < 0) { 1896 DMERR("register failed %d", r); 1897 kmem_cache_destroy(_crypt_io_pool); 1898 } 1899 1900 return r; 1901 } 1902 1903 static void __exit dm_crypt_exit(void) 1904 { 1905 dm_unregister_target(&crypt_target); 1906 kmem_cache_destroy(_crypt_io_pool); 1907 } 1908 1909 module_init(dm_crypt_init); 1910 module_exit(dm_crypt_exit); 1911 1912 MODULE_AUTHOR("Christophe Saout <christophe@saout.de>"); 1913 MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption"); 1914 MODULE_LICENSE("GPL"); 1915