1 /* 2 * Copyright (C) 2003 Christophe Saout <christophe@saout.de> 3 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> 4 * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved. 5 * 6 * This file is released under the GPL. 7 */ 8 9 #include <linux/completion.h> 10 #include <linux/err.h> 11 #include <linux/module.h> 12 #include <linux/init.h> 13 #include <linux/kernel.h> 14 #include <linux/bio.h> 15 #include <linux/blkdev.h> 16 #include <linux/mempool.h> 17 #include <linux/slab.h> 18 #include <linux/crypto.h> 19 #include <linux/workqueue.h> 20 #include <linux/backing-dev.h> 21 #include <linux/percpu.h> 22 #include <linux/atomic.h> 23 #include <linux/scatterlist.h> 24 #include <asm/page.h> 25 #include <asm/unaligned.h> 26 #include <crypto/hash.h> 27 #include <crypto/md5.h> 28 #include <crypto/algapi.h> 29 30 #include <linux/device-mapper.h> 31 32 #define DM_MSG_PREFIX "crypt" 33 34 /* 35 * context holding the current state of a multi-part conversion 36 */ 37 struct convert_context { 38 struct completion restart; 39 struct bio *bio_in; 40 struct bio *bio_out; 41 unsigned int offset_in; 42 unsigned int offset_out; 43 unsigned int idx_in; 44 unsigned int idx_out; 45 sector_t sector; 46 atomic_t pending; 47 }; 48 49 /* 50 * per bio private data 51 */ 52 struct dm_crypt_io { 53 struct dm_target *target; 54 struct bio *base_bio; 55 struct work_struct work; 56 57 struct convert_context ctx; 58 59 atomic_t pending; 60 int error; 61 sector_t sector; 62 struct dm_crypt_io *base_io; 63 }; 64 65 struct dm_crypt_request { 66 struct convert_context *ctx; 67 struct scatterlist sg_in; 68 struct scatterlist sg_out; 69 sector_t iv_sector; 70 }; 71 72 struct crypt_config; 73 74 struct crypt_iv_operations { 75 int (*ctr)(struct crypt_config *cc, struct dm_target *ti, 76 const char *opts); 77 void (*dtr)(struct crypt_config *cc); 78 int (*init)(struct crypt_config *cc); 79 int (*wipe)(struct crypt_config *cc); 80 int (*generator)(struct crypt_config *cc, u8 *iv, 81 struct dm_crypt_request *dmreq); 82 int (*post)(struct crypt_config *cc, u8 *iv, 83 struct dm_crypt_request *dmreq); 84 }; 85 86 struct iv_essiv_private { 87 struct crypto_hash *hash_tfm; 88 u8 *salt; 89 }; 90 91 struct iv_benbi_private { 92 int shift; 93 }; 94 95 #define LMK_SEED_SIZE 64 /* hash + 0 */ 96 struct iv_lmk_private { 97 struct crypto_shash *hash_tfm; 98 u8 *seed; 99 }; 100 101 /* 102 * Crypt: maps a linear range of a block device 103 * and encrypts / decrypts at the same time. 104 */ 105 enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; 106 107 /* 108 * Duplicated per-CPU state for cipher. 109 */ 110 struct crypt_cpu { 111 struct ablkcipher_request *req; 112 /* ESSIV: struct crypto_cipher *essiv_tfm */ 113 void *iv_private; 114 struct crypto_ablkcipher *tfms[0]; 115 }; 116 117 /* 118 * The fields in here must be read only after initialization, 119 * changing state should be in crypt_cpu. 120 */ 121 struct crypt_config { 122 struct dm_dev *dev; 123 sector_t start; 124 125 /* 126 * pool for per bio private data, crypto requests and 127 * encryption requeusts/buffer pages 128 */ 129 mempool_t *io_pool; 130 mempool_t *req_pool; 131 mempool_t *page_pool; 132 struct bio_set *bs; 133 134 struct workqueue_struct *io_queue; 135 struct workqueue_struct *crypt_queue; 136 137 char *cipher; 138 char *cipher_string; 139 140 struct crypt_iv_operations *iv_gen_ops; 141 union { 142 struct iv_essiv_private essiv; 143 struct iv_benbi_private benbi; 144 struct iv_lmk_private lmk; 145 } iv_gen_private; 146 sector_t iv_offset; 147 unsigned int iv_size; 148 149 /* 150 * Duplicated per cpu state. Access through 151 * per_cpu_ptr() only. 152 */ 153 struct crypt_cpu __percpu *cpu; 154 unsigned tfms_count; 155 156 /* 157 * Layout of each crypto request: 158 * 159 * struct ablkcipher_request 160 * context 161 * padding 162 * struct dm_crypt_request 163 * padding 164 * IV 165 * 166 * The padding is added so that dm_crypt_request and the IV are 167 * correctly aligned. 168 */ 169 unsigned int dmreq_start; 170 171 unsigned long flags; 172 unsigned int key_size; 173 unsigned int key_parts; 174 u8 key[0]; 175 }; 176 177 #define MIN_IOS 16 178 #define MIN_POOL_PAGES 32 179 #define MIN_BIO_PAGES 8 180 181 static struct kmem_cache *_crypt_io_pool; 182 183 static void clone_init(struct dm_crypt_io *, struct bio *); 184 static void kcryptd_queue_crypt(struct dm_crypt_io *io); 185 static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq); 186 187 static struct crypt_cpu *this_crypt_config(struct crypt_config *cc) 188 { 189 return this_cpu_ptr(cc->cpu); 190 } 191 192 /* 193 * Use this to access cipher attributes that are the same for each CPU. 194 */ 195 static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc) 196 { 197 return __this_cpu_ptr(cc->cpu)->tfms[0]; 198 } 199 200 /* 201 * Different IV generation algorithms: 202 * 203 * plain: the initial vector is the 32-bit little-endian version of the sector 204 * number, padded with zeros if necessary. 205 * 206 * plain64: the initial vector is the 64-bit little-endian version of the sector 207 * number, padded with zeros if necessary. 208 * 209 * essiv: "encrypted sector|salt initial vector", the sector number is 210 * encrypted with the bulk cipher using a salt as key. The salt 211 * should be derived from the bulk cipher's key via hashing. 212 * 213 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1 214 * (needed for LRW-32-AES and possible other narrow block modes) 215 * 216 * null: the initial vector is always zero. Provides compatibility with 217 * obsolete loop_fish2 devices. Do not use for new devices. 218 * 219 * lmk: Compatible implementation of the block chaining mode used 220 * by the Loop-AES block device encryption system 221 * designed by Jari Ruusu. See http://loop-aes.sourceforge.net/ 222 * It operates on full 512 byte sectors and uses CBC 223 * with an IV derived from the sector number, the data and 224 * optionally extra IV seed. 225 * This means that after decryption the first block 226 * of sector must be tweaked according to decrypted data. 227 * Loop-AES can use three encryption schemes: 228 * version 1: is plain aes-cbc mode 229 * version 2: uses 64 multikey scheme with lmk IV generator 230 * version 3: the same as version 2 with additional IV seed 231 * (it uses 65 keys, last key is used as IV seed) 232 * 233 * plumb: unimplemented, see: 234 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 235 */ 236 237 static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, 238 struct dm_crypt_request *dmreq) 239 { 240 memset(iv, 0, cc->iv_size); 241 *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff); 242 243 return 0; 244 } 245 246 static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv, 247 struct dm_crypt_request *dmreq) 248 { 249 memset(iv, 0, cc->iv_size); 250 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector); 251 252 return 0; 253 } 254 255 /* Initialise ESSIV - compute salt but no local memory allocations */ 256 static int crypt_iv_essiv_init(struct crypt_config *cc) 257 { 258 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; 259 struct hash_desc desc; 260 struct scatterlist sg; 261 struct crypto_cipher *essiv_tfm; 262 int err, cpu; 263 264 sg_init_one(&sg, cc->key, cc->key_size); 265 desc.tfm = essiv->hash_tfm; 266 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; 267 268 err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt); 269 if (err) 270 return err; 271 272 for_each_possible_cpu(cpu) { 273 essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private, 274 275 err = crypto_cipher_setkey(essiv_tfm, essiv->salt, 276 crypto_hash_digestsize(essiv->hash_tfm)); 277 if (err) 278 return err; 279 } 280 281 return 0; 282 } 283 284 /* Wipe salt and reset key derived from volume key */ 285 static int crypt_iv_essiv_wipe(struct crypt_config *cc) 286 { 287 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; 288 unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm); 289 struct crypto_cipher *essiv_tfm; 290 int cpu, r, err = 0; 291 292 memset(essiv->salt, 0, salt_size); 293 294 for_each_possible_cpu(cpu) { 295 essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private; 296 r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size); 297 if (r) 298 err = r; 299 } 300 301 return err; 302 } 303 304 /* Set up per cpu cipher state */ 305 static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc, 306 struct dm_target *ti, 307 u8 *salt, unsigned saltsize) 308 { 309 struct crypto_cipher *essiv_tfm; 310 int err; 311 312 /* Setup the essiv_tfm with the given salt */ 313 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); 314 if (IS_ERR(essiv_tfm)) { 315 ti->error = "Error allocating crypto tfm for ESSIV"; 316 return essiv_tfm; 317 } 318 319 if (crypto_cipher_blocksize(essiv_tfm) != 320 crypto_ablkcipher_ivsize(any_tfm(cc))) { 321 ti->error = "Block size of ESSIV cipher does " 322 "not match IV size of block cipher"; 323 crypto_free_cipher(essiv_tfm); 324 return ERR_PTR(-EINVAL); 325 } 326 327 err = crypto_cipher_setkey(essiv_tfm, salt, saltsize); 328 if (err) { 329 ti->error = "Failed to set key for ESSIV cipher"; 330 crypto_free_cipher(essiv_tfm); 331 return ERR_PTR(err); 332 } 333 334 return essiv_tfm; 335 } 336 337 static void crypt_iv_essiv_dtr(struct crypt_config *cc) 338 { 339 int cpu; 340 struct crypt_cpu *cpu_cc; 341 struct crypto_cipher *essiv_tfm; 342 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; 343 344 crypto_free_hash(essiv->hash_tfm); 345 essiv->hash_tfm = NULL; 346 347 kzfree(essiv->salt); 348 essiv->salt = NULL; 349 350 for_each_possible_cpu(cpu) { 351 cpu_cc = per_cpu_ptr(cc->cpu, cpu); 352 essiv_tfm = cpu_cc->iv_private; 353 354 if (essiv_tfm) 355 crypto_free_cipher(essiv_tfm); 356 357 cpu_cc->iv_private = NULL; 358 } 359 } 360 361 static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, 362 const char *opts) 363 { 364 struct crypto_cipher *essiv_tfm = NULL; 365 struct crypto_hash *hash_tfm = NULL; 366 u8 *salt = NULL; 367 int err, cpu; 368 369 if (!opts) { 370 ti->error = "Digest algorithm missing for ESSIV mode"; 371 return -EINVAL; 372 } 373 374 /* Allocate hash algorithm */ 375 hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC); 376 if (IS_ERR(hash_tfm)) { 377 ti->error = "Error initializing ESSIV hash"; 378 err = PTR_ERR(hash_tfm); 379 goto bad; 380 } 381 382 salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL); 383 if (!salt) { 384 ti->error = "Error kmallocing salt storage in ESSIV"; 385 err = -ENOMEM; 386 goto bad; 387 } 388 389 cc->iv_gen_private.essiv.salt = salt; 390 cc->iv_gen_private.essiv.hash_tfm = hash_tfm; 391 392 for_each_possible_cpu(cpu) { 393 essiv_tfm = setup_essiv_cpu(cc, ti, salt, 394 crypto_hash_digestsize(hash_tfm)); 395 if (IS_ERR(essiv_tfm)) { 396 crypt_iv_essiv_dtr(cc); 397 return PTR_ERR(essiv_tfm); 398 } 399 per_cpu_ptr(cc->cpu, cpu)->iv_private = essiv_tfm; 400 } 401 402 return 0; 403 404 bad: 405 if (hash_tfm && !IS_ERR(hash_tfm)) 406 crypto_free_hash(hash_tfm); 407 kfree(salt); 408 return err; 409 } 410 411 static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, 412 struct dm_crypt_request *dmreq) 413 { 414 struct crypto_cipher *essiv_tfm = this_crypt_config(cc)->iv_private; 415 416 memset(iv, 0, cc->iv_size); 417 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector); 418 crypto_cipher_encrypt_one(essiv_tfm, iv, iv); 419 420 return 0; 421 } 422 423 static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, 424 const char *opts) 425 { 426 unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc)); 427 int log = ilog2(bs); 428 429 /* we need to calculate how far we must shift the sector count 430 * to get the cipher block count, we use this shift in _gen */ 431 432 if (1 << log != bs) { 433 ti->error = "cypher blocksize is not a power of 2"; 434 return -EINVAL; 435 } 436 437 if (log > 9) { 438 ti->error = "cypher blocksize is > 512"; 439 return -EINVAL; 440 } 441 442 cc->iv_gen_private.benbi.shift = 9 - log; 443 444 return 0; 445 } 446 447 static void crypt_iv_benbi_dtr(struct crypt_config *cc) 448 { 449 } 450 451 static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, 452 struct dm_crypt_request *dmreq) 453 { 454 __be64 val; 455 456 memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */ 457 458 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1); 459 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64))); 460 461 return 0; 462 } 463 464 static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, 465 struct dm_crypt_request *dmreq) 466 { 467 memset(iv, 0, cc->iv_size); 468 469 return 0; 470 } 471 472 static void crypt_iv_lmk_dtr(struct crypt_config *cc) 473 { 474 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; 475 476 if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm)) 477 crypto_free_shash(lmk->hash_tfm); 478 lmk->hash_tfm = NULL; 479 480 kzfree(lmk->seed); 481 lmk->seed = NULL; 482 } 483 484 static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti, 485 const char *opts) 486 { 487 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; 488 489 lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0); 490 if (IS_ERR(lmk->hash_tfm)) { 491 ti->error = "Error initializing LMK hash"; 492 return PTR_ERR(lmk->hash_tfm); 493 } 494 495 /* No seed in LMK version 2 */ 496 if (cc->key_parts == cc->tfms_count) { 497 lmk->seed = NULL; 498 return 0; 499 } 500 501 lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL); 502 if (!lmk->seed) { 503 crypt_iv_lmk_dtr(cc); 504 ti->error = "Error kmallocing seed storage in LMK"; 505 return -ENOMEM; 506 } 507 508 return 0; 509 } 510 511 static int crypt_iv_lmk_init(struct crypt_config *cc) 512 { 513 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; 514 int subkey_size = cc->key_size / cc->key_parts; 515 516 /* LMK seed is on the position of LMK_KEYS + 1 key */ 517 if (lmk->seed) 518 memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size), 519 crypto_shash_digestsize(lmk->hash_tfm)); 520 521 return 0; 522 } 523 524 static int crypt_iv_lmk_wipe(struct crypt_config *cc) 525 { 526 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; 527 528 if (lmk->seed) 529 memset(lmk->seed, 0, LMK_SEED_SIZE); 530 531 return 0; 532 } 533 534 static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv, 535 struct dm_crypt_request *dmreq, 536 u8 *data) 537 { 538 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; 539 struct { 540 struct shash_desc desc; 541 char ctx[crypto_shash_descsize(lmk->hash_tfm)]; 542 } sdesc; 543 struct md5_state md5state; 544 u32 buf[4]; 545 int i, r; 546 547 sdesc.desc.tfm = lmk->hash_tfm; 548 sdesc.desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; 549 550 r = crypto_shash_init(&sdesc.desc); 551 if (r) 552 return r; 553 554 if (lmk->seed) { 555 r = crypto_shash_update(&sdesc.desc, lmk->seed, LMK_SEED_SIZE); 556 if (r) 557 return r; 558 } 559 560 /* Sector is always 512B, block size 16, add data of blocks 1-31 */ 561 r = crypto_shash_update(&sdesc.desc, data + 16, 16 * 31); 562 if (r) 563 return r; 564 565 /* Sector is cropped to 56 bits here */ 566 buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF); 567 buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000); 568 buf[2] = cpu_to_le32(4024); 569 buf[3] = 0; 570 r = crypto_shash_update(&sdesc.desc, (u8 *)buf, sizeof(buf)); 571 if (r) 572 return r; 573 574 /* No MD5 padding here */ 575 r = crypto_shash_export(&sdesc.desc, &md5state); 576 if (r) 577 return r; 578 579 for (i = 0; i < MD5_HASH_WORDS; i++) 580 __cpu_to_le32s(&md5state.hash[i]); 581 memcpy(iv, &md5state.hash, cc->iv_size); 582 583 return 0; 584 } 585 586 static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv, 587 struct dm_crypt_request *dmreq) 588 { 589 u8 *src; 590 int r = 0; 591 592 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { 593 src = kmap_atomic(sg_page(&dmreq->sg_in), KM_USER0); 594 r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset); 595 kunmap_atomic(src, KM_USER0); 596 } else 597 memset(iv, 0, cc->iv_size); 598 599 return r; 600 } 601 602 static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv, 603 struct dm_crypt_request *dmreq) 604 { 605 u8 *dst; 606 int r; 607 608 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) 609 return 0; 610 611 dst = kmap_atomic(sg_page(&dmreq->sg_out), KM_USER0); 612 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset); 613 614 /* Tweak the first block of plaintext sector */ 615 if (!r) 616 crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size); 617 618 kunmap_atomic(dst, KM_USER0); 619 return r; 620 } 621 622 static struct crypt_iv_operations crypt_iv_plain_ops = { 623 .generator = crypt_iv_plain_gen 624 }; 625 626 static struct crypt_iv_operations crypt_iv_plain64_ops = { 627 .generator = crypt_iv_plain64_gen 628 }; 629 630 static struct crypt_iv_operations crypt_iv_essiv_ops = { 631 .ctr = crypt_iv_essiv_ctr, 632 .dtr = crypt_iv_essiv_dtr, 633 .init = crypt_iv_essiv_init, 634 .wipe = crypt_iv_essiv_wipe, 635 .generator = crypt_iv_essiv_gen 636 }; 637 638 static struct crypt_iv_operations crypt_iv_benbi_ops = { 639 .ctr = crypt_iv_benbi_ctr, 640 .dtr = crypt_iv_benbi_dtr, 641 .generator = crypt_iv_benbi_gen 642 }; 643 644 static struct crypt_iv_operations crypt_iv_null_ops = { 645 .generator = crypt_iv_null_gen 646 }; 647 648 static struct crypt_iv_operations crypt_iv_lmk_ops = { 649 .ctr = crypt_iv_lmk_ctr, 650 .dtr = crypt_iv_lmk_dtr, 651 .init = crypt_iv_lmk_init, 652 .wipe = crypt_iv_lmk_wipe, 653 .generator = crypt_iv_lmk_gen, 654 .post = crypt_iv_lmk_post 655 }; 656 657 static void crypt_convert_init(struct crypt_config *cc, 658 struct convert_context *ctx, 659 struct bio *bio_out, struct bio *bio_in, 660 sector_t sector) 661 { 662 ctx->bio_in = bio_in; 663 ctx->bio_out = bio_out; 664 ctx->offset_in = 0; 665 ctx->offset_out = 0; 666 ctx->idx_in = bio_in ? bio_in->bi_idx : 0; 667 ctx->idx_out = bio_out ? bio_out->bi_idx : 0; 668 ctx->sector = sector + cc->iv_offset; 669 init_completion(&ctx->restart); 670 } 671 672 static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc, 673 struct ablkcipher_request *req) 674 { 675 return (struct dm_crypt_request *)((char *)req + cc->dmreq_start); 676 } 677 678 static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc, 679 struct dm_crypt_request *dmreq) 680 { 681 return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start); 682 } 683 684 static u8 *iv_of_dmreq(struct crypt_config *cc, 685 struct dm_crypt_request *dmreq) 686 { 687 return (u8 *)ALIGN((unsigned long)(dmreq + 1), 688 crypto_ablkcipher_alignmask(any_tfm(cc)) + 1); 689 } 690 691 static int crypt_convert_block(struct crypt_config *cc, 692 struct convert_context *ctx, 693 struct ablkcipher_request *req) 694 { 695 struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in); 696 struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out); 697 struct dm_crypt_request *dmreq; 698 u8 *iv; 699 int r = 0; 700 701 dmreq = dmreq_of_req(cc, req); 702 iv = iv_of_dmreq(cc, dmreq); 703 704 dmreq->iv_sector = ctx->sector; 705 dmreq->ctx = ctx; 706 sg_init_table(&dmreq->sg_in, 1); 707 sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, 708 bv_in->bv_offset + ctx->offset_in); 709 710 sg_init_table(&dmreq->sg_out, 1); 711 sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT, 712 bv_out->bv_offset + ctx->offset_out); 713 714 ctx->offset_in += 1 << SECTOR_SHIFT; 715 if (ctx->offset_in >= bv_in->bv_len) { 716 ctx->offset_in = 0; 717 ctx->idx_in++; 718 } 719 720 ctx->offset_out += 1 << SECTOR_SHIFT; 721 if (ctx->offset_out >= bv_out->bv_len) { 722 ctx->offset_out = 0; 723 ctx->idx_out++; 724 } 725 726 if (cc->iv_gen_ops) { 727 r = cc->iv_gen_ops->generator(cc, iv, dmreq); 728 if (r < 0) 729 return r; 730 } 731 732 ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out, 733 1 << SECTOR_SHIFT, iv); 734 735 if (bio_data_dir(ctx->bio_in) == WRITE) 736 r = crypto_ablkcipher_encrypt(req); 737 else 738 r = crypto_ablkcipher_decrypt(req); 739 740 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post) 741 r = cc->iv_gen_ops->post(cc, iv, dmreq); 742 743 return r; 744 } 745 746 static void kcryptd_async_done(struct crypto_async_request *async_req, 747 int error); 748 749 static void crypt_alloc_req(struct crypt_config *cc, 750 struct convert_context *ctx) 751 { 752 struct crypt_cpu *this_cc = this_crypt_config(cc); 753 unsigned key_index = ctx->sector & (cc->tfms_count - 1); 754 755 if (!this_cc->req) 756 this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); 757 758 ablkcipher_request_set_tfm(this_cc->req, this_cc->tfms[key_index]); 759 ablkcipher_request_set_callback(this_cc->req, 760 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 761 kcryptd_async_done, dmreq_of_req(cc, this_cc->req)); 762 } 763 764 /* 765 * Encrypt / decrypt data from one bio to another one (can be the same one) 766 */ 767 static int crypt_convert(struct crypt_config *cc, 768 struct convert_context *ctx) 769 { 770 struct crypt_cpu *this_cc = this_crypt_config(cc); 771 int r; 772 773 atomic_set(&ctx->pending, 1); 774 775 while(ctx->idx_in < ctx->bio_in->bi_vcnt && 776 ctx->idx_out < ctx->bio_out->bi_vcnt) { 777 778 crypt_alloc_req(cc, ctx); 779 780 atomic_inc(&ctx->pending); 781 782 r = crypt_convert_block(cc, ctx, this_cc->req); 783 784 switch (r) { 785 /* async */ 786 case -EBUSY: 787 wait_for_completion(&ctx->restart); 788 INIT_COMPLETION(ctx->restart); 789 /* fall through*/ 790 case -EINPROGRESS: 791 this_cc->req = NULL; 792 ctx->sector++; 793 continue; 794 795 /* sync */ 796 case 0: 797 atomic_dec(&ctx->pending); 798 ctx->sector++; 799 cond_resched(); 800 continue; 801 802 /* error */ 803 default: 804 atomic_dec(&ctx->pending); 805 return r; 806 } 807 } 808 809 return 0; 810 } 811 812 static void dm_crypt_bio_destructor(struct bio *bio) 813 { 814 struct dm_crypt_io *io = bio->bi_private; 815 struct crypt_config *cc = io->target->private; 816 817 bio_free(bio, cc->bs); 818 } 819 820 /* 821 * Generate a new unfragmented bio with the given size 822 * This should never violate the device limitations 823 * May return a smaller bio when running out of pages, indicated by 824 * *out_of_pages set to 1. 825 */ 826 static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size, 827 unsigned *out_of_pages) 828 { 829 struct crypt_config *cc = io->target->private; 830 struct bio *clone; 831 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 832 gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; 833 unsigned i, len; 834 struct page *page; 835 836 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); 837 if (!clone) 838 return NULL; 839 840 clone_init(io, clone); 841 *out_of_pages = 0; 842 843 for (i = 0; i < nr_iovecs; i++) { 844 page = mempool_alloc(cc->page_pool, gfp_mask); 845 if (!page) { 846 *out_of_pages = 1; 847 break; 848 } 849 850 /* 851 * if additional pages cannot be allocated without waiting, 852 * return a partially allocated bio, the caller will then try 853 * to allocate additional bios while submitting this partial bio 854 */ 855 if (i == (MIN_BIO_PAGES - 1)) 856 gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; 857 858 len = (size > PAGE_SIZE) ? PAGE_SIZE : size; 859 860 if (!bio_add_page(clone, page, len, 0)) { 861 mempool_free(page, cc->page_pool); 862 break; 863 } 864 865 size -= len; 866 } 867 868 if (!clone->bi_size) { 869 bio_put(clone); 870 return NULL; 871 } 872 873 return clone; 874 } 875 876 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) 877 { 878 unsigned int i; 879 struct bio_vec *bv; 880 881 for (i = 0; i < clone->bi_vcnt; i++) { 882 bv = bio_iovec_idx(clone, i); 883 BUG_ON(!bv->bv_page); 884 mempool_free(bv->bv_page, cc->page_pool); 885 bv->bv_page = NULL; 886 } 887 } 888 889 static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti, 890 struct bio *bio, sector_t sector) 891 { 892 struct crypt_config *cc = ti->private; 893 struct dm_crypt_io *io; 894 895 io = mempool_alloc(cc->io_pool, GFP_NOIO); 896 io->target = ti; 897 io->base_bio = bio; 898 io->sector = sector; 899 io->error = 0; 900 io->base_io = NULL; 901 atomic_set(&io->pending, 0); 902 903 return io; 904 } 905 906 static void crypt_inc_pending(struct dm_crypt_io *io) 907 { 908 atomic_inc(&io->pending); 909 } 910 911 /* 912 * One of the bios was finished. Check for completion of 913 * the whole request and correctly clean up the buffer. 914 * If base_io is set, wait for the last fragment to complete. 915 */ 916 static void crypt_dec_pending(struct dm_crypt_io *io) 917 { 918 struct crypt_config *cc = io->target->private; 919 struct bio *base_bio = io->base_bio; 920 struct dm_crypt_io *base_io = io->base_io; 921 int error = io->error; 922 923 if (!atomic_dec_and_test(&io->pending)) 924 return; 925 926 mempool_free(io, cc->io_pool); 927 928 if (likely(!base_io)) 929 bio_endio(base_bio, error); 930 else { 931 if (error && !base_io->error) 932 base_io->error = error; 933 crypt_dec_pending(base_io); 934 } 935 } 936 937 /* 938 * kcryptd/kcryptd_io: 939 * 940 * Needed because it would be very unwise to do decryption in an 941 * interrupt context. 942 * 943 * kcryptd performs the actual encryption or decryption. 944 * 945 * kcryptd_io performs the IO submission. 946 * 947 * They must be separated as otherwise the final stages could be 948 * starved by new requests which can block in the first stages due 949 * to memory allocation. 950 * 951 * The work is done per CPU global for all dm-crypt instances. 952 * They should not depend on each other and do not block. 953 */ 954 static void crypt_endio(struct bio *clone, int error) 955 { 956 struct dm_crypt_io *io = clone->bi_private; 957 struct crypt_config *cc = io->target->private; 958 unsigned rw = bio_data_dir(clone); 959 960 if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error)) 961 error = -EIO; 962 963 /* 964 * free the processed pages 965 */ 966 if (rw == WRITE) 967 crypt_free_buffer_pages(cc, clone); 968 969 bio_put(clone); 970 971 if (rw == READ && !error) { 972 kcryptd_queue_crypt(io); 973 return; 974 } 975 976 if (unlikely(error)) 977 io->error = error; 978 979 crypt_dec_pending(io); 980 } 981 982 static void clone_init(struct dm_crypt_io *io, struct bio *clone) 983 { 984 struct crypt_config *cc = io->target->private; 985 986 clone->bi_private = io; 987 clone->bi_end_io = crypt_endio; 988 clone->bi_bdev = cc->dev->bdev; 989 clone->bi_rw = io->base_bio->bi_rw; 990 clone->bi_destructor = dm_crypt_bio_destructor; 991 } 992 993 static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) 994 { 995 struct crypt_config *cc = io->target->private; 996 struct bio *base_bio = io->base_bio; 997 struct bio *clone; 998 999 /* 1000 * The block layer might modify the bvec array, so always 1001 * copy the required bvecs because we need the original 1002 * one in order to decrypt the whole bio data *afterwards*. 1003 */ 1004 clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs); 1005 if (!clone) 1006 return 1; 1007 1008 crypt_inc_pending(io); 1009 1010 clone_init(io, clone); 1011 clone->bi_idx = 0; 1012 clone->bi_vcnt = bio_segments(base_bio); 1013 clone->bi_size = base_bio->bi_size; 1014 clone->bi_sector = cc->start + io->sector; 1015 memcpy(clone->bi_io_vec, bio_iovec(base_bio), 1016 sizeof(struct bio_vec) * clone->bi_vcnt); 1017 1018 generic_make_request(clone); 1019 return 0; 1020 } 1021 1022 static void kcryptd_io_write(struct dm_crypt_io *io) 1023 { 1024 struct bio *clone = io->ctx.bio_out; 1025 generic_make_request(clone); 1026 } 1027 1028 static void kcryptd_io(struct work_struct *work) 1029 { 1030 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); 1031 1032 if (bio_data_dir(io->base_bio) == READ) { 1033 crypt_inc_pending(io); 1034 if (kcryptd_io_read(io, GFP_NOIO)) 1035 io->error = -ENOMEM; 1036 crypt_dec_pending(io); 1037 } else 1038 kcryptd_io_write(io); 1039 } 1040 1041 static void kcryptd_queue_io(struct dm_crypt_io *io) 1042 { 1043 struct crypt_config *cc = io->target->private; 1044 1045 INIT_WORK(&io->work, kcryptd_io); 1046 queue_work(cc->io_queue, &io->work); 1047 } 1048 1049 static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, 1050 int error, int async) 1051 { 1052 struct bio *clone = io->ctx.bio_out; 1053 struct crypt_config *cc = io->target->private; 1054 1055 if (unlikely(error < 0)) { 1056 crypt_free_buffer_pages(cc, clone); 1057 bio_put(clone); 1058 io->error = -EIO; 1059 crypt_dec_pending(io); 1060 return; 1061 } 1062 1063 /* crypt_convert should have filled the clone bio */ 1064 BUG_ON(io->ctx.idx_out < clone->bi_vcnt); 1065 1066 clone->bi_sector = cc->start + io->sector; 1067 1068 if (async) 1069 kcryptd_queue_io(io); 1070 else 1071 generic_make_request(clone); 1072 } 1073 1074 static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) 1075 { 1076 struct crypt_config *cc = io->target->private; 1077 struct bio *clone; 1078 struct dm_crypt_io *new_io; 1079 int crypt_finished; 1080 unsigned out_of_pages = 0; 1081 unsigned remaining = io->base_bio->bi_size; 1082 sector_t sector = io->sector; 1083 int r; 1084 1085 /* 1086 * Prevent io from disappearing until this function completes. 1087 */ 1088 crypt_inc_pending(io); 1089 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector); 1090 1091 /* 1092 * The allocated buffers can be smaller than the whole bio, 1093 * so repeat the whole process until all the data can be handled. 1094 */ 1095 while (remaining) { 1096 clone = crypt_alloc_buffer(io, remaining, &out_of_pages); 1097 if (unlikely(!clone)) { 1098 io->error = -ENOMEM; 1099 break; 1100 } 1101 1102 io->ctx.bio_out = clone; 1103 io->ctx.idx_out = 0; 1104 1105 remaining -= clone->bi_size; 1106 sector += bio_sectors(clone); 1107 1108 crypt_inc_pending(io); 1109 r = crypt_convert(cc, &io->ctx); 1110 crypt_finished = atomic_dec_and_test(&io->ctx.pending); 1111 1112 /* Encryption was already finished, submit io now */ 1113 if (crypt_finished) { 1114 kcryptd_crypt_write_io_submit(io, r, 0); 1115 1116 /* 1117 * If there was an error, do not try next fragments. 1118 * For async, error is processed in async handler. 1119 */ 1120 if (unlikely(r < 0)) 1121 break; 1122 1123 io->sector = sector; 1124 } 1125 1126 /* 1127 * Out of memory -> run queues 1128 * But don't wait if split was due to the io size restriction 1129 */ 1130 if (unlikely(out_of_pages)) 1131 congestion_wait(BLK_RW_ASYNC, HZ/100); 1132 1133 /* 1134 * With async crypto it is unsafe to share the crypto context 1135 * between fragments, so switch to a new dm_crypt_io structure. 1136 */ 1137 if (unlikely(!crypt_finished && remaining)) { 1138 new_io = crypt_io_alloc(io->target, io->base_bio, 1139 sector); 1140 crypt_inc_pending(new_io); 1141 crypt_convert_init(cc, &new_io->ctx, NULL, 1142 io->base_bio, sector); 1143 new_io->ctx.idx_in = io->ctx.idx_in; 1144 new_io->ctx.offset_in = io->ctx.offset_in; 1145 1146 /* 1147 * Fragments after the first use the base_io 1148 * pending count. 1149 */ 1150 if (!io->base_io) 1151 new_io->base_io = io; 1152 else { 1153 new_io->base_io = io->base_io; 1154 crypt_inc_pending(io->base_io); 1155 crypt_dec_pending(io); 1156 } 1157 1158 io = new_io; 1159 } 1160 } 1161 1162 crypt_dec_pending(io); 1163 } 1164 1165 static void kcryptd_crypt_read_done(struct dm_crypt_io *io, int error) 1166 { 1167 if (unlikely(error < 0)) 1168 io->error = -EIO; 1169 1170 crypt_dec_pending(io); 1171 } 1172 1173 static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) 1174 { 1175 struct crypt_config *cc = io->target->private; 1176 int r = 0; 1177 1178 crypt_inc_pending(io); 1179 1180 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, 1181 io->sector); 1182 1183 r = crypt_convert(cc, &io->ctx); 1184 1185 if (atomic_dec_and_test(&io->ctx.pending)) 1186 kcryptd_crypt_read_done(io, r); 1187 1188 crypt_dec_pending(io); 1189 } 1190 1191 static void kcryptd_async_done(struct crypto_async_request *async_req, 1192 int error) 1193 { 1194 struct dm_crypt_request *dmreq = async_req->data; 1195 struct convert_context *ctx = dmreq->ctx; 1196 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); 1197 struct crypt_config *cc = io->target->private; 1198 1199 if (error == -EINPROGRESS) { 1200 complete(&ctx->restart); 1201 return; 1202 } 1203 1204 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post) 1205 error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq); 1206 1207 mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool); 1208 1209 if (!atomic_dec_and_test(&ctx->pending)) 1210 return; 1211 1212 if (bio_data_dir(io->base_bio) == READ) 1213 kcryptd_crypt_read_done(io, error); 1214 else 1215 kcryptd_crypt_write_io_submit(io, error, 1); 1216 } 1217 1218 static void kcryptd_crypt(struct work_struct *work) 1219 { 1220 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); 1221 1222 if (bio_data_dir(io->base_bio) == READ) 1223 kcryptd_crypt_read_convert(io); 1224 else 1225 kcryptd_crypt_write_convert(io); 1226 } 1227 1228 static void kcryptd_queue_crypt(struct dm_crypt_io *io) 1229 { 1230 struct crypt_config *cc = io->target->private; 1231 1232 INIT_WORK(&io->work, kcryptd_crypt); 1233 queue_work(cc->crypt_queue, &io->work); 1234 } 1235 1236 /* 1237 * Decode key from its hex representation 1238 */ 1239 static int crypt_decode_key(u8 *key, char *hex, unsigned int size) 1240 { 1241 char buffer[3]; 1242 char *endp; 1243 unsigned int i; 1244 1245 buffer[2] = '\0'; 1246 1247 for (i = 0; i < size; i++) { 1248 buffer[0] = *hex++; 1249 buffer[1] = *hex++; 1250 1251 key[i] = (u8)simple_strtoul(buffer, &endp, 16); 1252 1253 if (endp != &buffer[2]) 1254 return -EINVAL; 1255 } 1256 1257 if (*hex != '\0') 1258 return -EINVAL; 1259 1260 return 0; 1261 } 1262 1263 /* 1264 * Encode key into its hex representation 1265 */ 1266 static void crypt_encode_key(char *hex, u8 *key, unsigned int size) 1267 { 1268 unsigned int i; 1269 1270 for (i = 0; i < size; i++) { 1271 sprintf(hex, "%02x", *key); 1272 hex += 2; 1273 key++; 1274 } 1275 } 1276 1277 static void crypt_free_tfms(struct crypt_config *cc, int cpu) 1278 { 1279 struct crypt_cpu *cpu_cc = per_cpu_ptr(cc->cpu, cpu); 1280 unsigned i; 1281 1282 for (i = 0; i < cc->tfms_count; i++) 1283 if (cpu_cc->tfms[i] && !IS_ERR(cpu_cc->tfms[i])) { 1284 crypto_free_ablkcipher(cpu_cc->tfms[i]); 1285 cpu_cc->tfms[i] = NULL; 1286 } 1287 } 1288 1289 static int crypt_alloc_tfms(struct crypt_config *cc, int cpu, char *ciphermode) 1290 { 1291 struct crypt_cpu *cpu_cc = per_cpu_ptr(cc->cpu, cpu); 1292 unsigned i; 1293 int err; 1294 1295 for (i = 0; i < cc->tfms_count; i++) { 1296 cpu_cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0); 1297 if (IS_ERR(cpu_cc->tfms[i])) { 1298 err = PTR_ERR(cpu_cc->tfms[i]); 1299 crypt_free_tfms(cc, cpu); 1300 return err; 1301 } 1302 } 1303 1304 return 0; 1305 } 1306 1307 static int crypt_setkey_allcpus(struct crypt_config *cc) 1308 { 1309 unsigned subkey_size = cc->key_size >> ilog2(cc->tfms_count); 1310 int cpu, err = 0, i, r; 1311 1312 for_each_possible_cpu(cpu) { 1313 for (i = 0; i < cc->tfms_count; i++) { 1314 r = crypto_ablkcipher_setkey(per_cpu_ptr(cc->cpu, cpu)->tfms[i], 1315 cc->key + (i * subkey_size), subkey_size); 1316 if (r) 1317 err = r; 1318 } 1319 } 1320 1321 return err; 1322 } 1323 1324 static int crypt_set_key(struct crypt_config *cc, char *key) 1325 { 1326 int r = -EINVAL; 1327 int key_string_len = strlen(key); 1328 1329 /* The key size may not be changed. */ 1330 if (cc->key_size != (key_string_len >> 1)) 1331 goto out; 1332 1333 /* Hyphen (which gives a key_size of zero) means there is no key. */ 1334 if (!cc->key_size && strcmp(key, "-")) 1335 goto out; 1336 1337 if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0) 1338 goto out; 1339 1340 set_bit(DM_CRYPT_KEY_VALID, &cc->flags); 1341 1342 r = crypt_setkey_allcpus(cc); 1343 1344 out: 1345 /* Hex key string not needed after here, so wipe it. */ 1346 memset(key, '0', key_string_len); 1347 1348 return r; 1349 } 1350 1351 static int crypt_wipe_key(struct crypt_config *cc) 1352 { 1353 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); 1354 memset(&cc->key, 0, cc->key_size * sizeof(u8)); 1355 1356 return crypt_setkey_allcpus(cc); 1357 } 1358 1359 static void crypt_dtr(struct dm_target *ti) 1360 { 1361 struct crypt_config *cc = ti->private; 1362 struct crypt_cpu *cpu_cc; 1363 int cpu; 1364 1365 ti->private = NULL; 1366 1367 if (!cc) 1368 return; 1369 1370 if (cc->io_queue) 1371 destroy_workqueue(cc->io_queue); 1372 if (cc->crypt_queue) 1373 destroy_workqueue(cc->crypt_queue); 1374 1375 if (cc->cpu) 1376 for_each_possible_cpu(cpu) { 1377 cpu_cc = per_cpu_ptr(cc->cpu, cpu); 1378 if (cpu_cc->req) 1379 mempool_free(cpu_cc->req, cc->req_pool); 1380 crypt_free_tfms(cc, cpu); 1381 } 1382 1383 if (cc->bs) 1384 bioset_free(cc->bs); 1385 1386 if (cc->page_pool) 1387 mempool_destroy(cc->page_pool); 1388 if (cc->req_pool) 1389 mempool_destroy(cc->req_pool); 1390 if (cc->io_pool) 1391 mempool_destroy(cc->io_pool); 1392 1393 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) 1394 cc->iv_gen_ops->dtr(cc); 1395 1396 if (cc->dev) 1397 dm_put_device(ti, cc->dev); 1398 1399 if (cc->cpu) 1400 free_percpu(cc->cpu); 1401 1402 kzfree(cc->cipher); 1403 kzfree(cc->cipher_string); 1404 1405 /* Must zero key material before freeing */ 1406 kzfree(cc); 1407 } 1408 1409 static int crypt_ctr_cipher(struct dm_target *ti, 1410 char *cipher_in, char *key) 1411 { 1412 struct crypt_config *cc = ti->private; 1413 char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount; 1414 char *cipher_api = NULL; 1415 int cpu, ret = -EINVAL; 1416 1417 /* Convert to crypto api definition? */ 1418 if (strchr(cipher_in, '(')) { 1419 ti->error = "Bad cipher specification"; 1420 return -EINVAL; 1421 } 1422 1423 cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL); 1424 if (!cc->cipher_string) 1425 goto bad_mem; 1426 1427 /* 1428 * Legacy dm-crypt cipher specification 1429 * cipher[:keycount]-mode-iv:ivopts 1430 */ 1431 tmp = cipher_in; 1432 keycount = strsep(&tmp, "-"); 1433 cipher = strsep(&keycount, ":"); 1434 1435 if (!keycount) 1436 cc->tfms_count = 1; 1437 else if (sscanf(keycount, "%u", &cc->tfms_count) != 1 || 1438 !is_power_of_2(cc->tfms_count)) { 1439 ti->error = "Bad cipher key count specification"; 1440 return -EINVAL; 1441 } 1442 cc->key_parts = cc->tfms_count; 1443 1444 cc->cipher = kstrdup(cipher, GFP_KERNEL); 1445 if (!cc->cipher) 1446 goto bad_mem; 1447 1448 chainmode = strsep(&tmp, "-"); 1449 ivopts = strsep(&tmp, "-"); 1450 ivmode = strsep(&ivopts, ":"); 1451 1452 if (tmp) 1453 DMWARN("Ignoring unexpected additional cipher options"); 1454 1455 cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)) + 1456 cc->tfms_count * sizeof(*(cc->cpu->tfms)), 1457 __alignof__(struct crypt_cpu)); 1458 if (!cc->cpu) { 1459 ti->error = "Cannot allocate per cpu state"; 1460 goto bad_mem; 1461 } 1462 1463 /* 1464 * For compatibility with the original dm-crypt mapping format, if 1465 * only the cipher name is supplied, use cbc-plain. 1466 */ 1467 if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) { 1468 chainmode = "cbc"; 1469 ivmode = "plain"; 1470 } 1471 1472 if (strcmp(chainmode, "ecb") && !ivmode) { 1473 ti->error = "IV mechanism required"; 1474 return -EINVAL; 1475 } 1476 1477 cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL); 1478 if (!cipher_api) 1479 goto bad_mem; 1480 1481 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME, 1482 "%s(%s)", chainmode, cipher); 1483 if (ret < 0) { 1484 kfree(cipher_api); 1485 goto bad_mem; 1486 } 1487 1488 /* Allocate cipher */ 1489 for_each_possible_cpu(cpu) { 1490 ret = crypt_alloc_tfms(cc, cpu, cipher_api); 1491 if (ret < 0) { 1492 ti->error = "Error allocating crypto tfm"; 1493 goto bad; 1494 } 1495 } 1496 1497 /* Initialize and set key */ 1498 ret = crypt_set_key(cc, key); 1499 if (ret < 0) { 1500 ti->error = "Error decoding and setting key"; 1501 goto bad; 1502 } 1503 1504 /* Initialize IV */ 1505 cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc)); 1506 if (cc->iv_size) 1507 /* at least a 64 bit sector number should fit in our buffer */ 1508 cc->iv_size = max(cc->iv_size, 1509 (unsigned int)(sizeof(u64) / sizeof(u8))); 1510 else if (ivmode) { 1511 DMWARN("Selected cipher does not support IVs"); 1512 ivmode = NULL; 1513 } 1514 1515 /* Choose ivmode, see comments at iv code. */ 1516 if (ivmode == NULL) 1517 cc->iv_gen_ops = NULL; 1518 else if (strcmp(ivmode, "plain") == 0) 1519 cc->iv_gen_ops = &crypt_iv_plain_ops; 1520 else if (strcmp(ivmode, "plain64") == 0) 1521 cc->iv_gen_ops = &crypt_iv_plain64_ops; 1522 else if (strcmp(ivmode, "essiv") == 0) 1523 cc->iv_gen_ops = &crypt_iv_essiv_ops; 1524 else if (strcmp(ivmode, "benbi") == 0) 1525 cc->iv_gen_ops = &crypt_iv_benbi_ops; 1526 else if (strcmp(ivmode, "null") == 0) 1527 cc->iv_gen_ops = &crypt_iv_null_ops; 1528 else if (strcmp(ivmode, "lmk") == 0) { 1529 cc->iv_gen_ops = &crypt_iv_lmk_ops; 1530 /* Version 2 and 3 is recognised according 1531 * to length of provided multi-key string. 1532 * If present (version 3), last key is used as IV seed. 1533 */ 1534 if (cc->key_size % cc->key_parts) 1535 cc->key_parts++; 1536 } else { 1537 ret = -EINVAL; 1538 ti->error = "Invalid IV mode"; 1539 goto bad; 1540 } 1541 1542 /* Allocate IV */ 1543 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) { 1544 ret = cc->iv_gen_ops->ctr(cc, ti, ivopts); 1545 if (ret < 0) { 1546 ti->error = "Error creating IV"; 1547 goto bad; 1548 } 1549 } 1550 1551 /* Initialize IV (set keys for ESSIV etc) */ 1552 if (cc->iv_gen_ops && cc->iv_gen_ops->init) { 1553 ret = cc->iv_gen_ops->init(cc); 1554 if (ret < 0) { 1555 ti->error = "Error initialising IV"; 1556 goto bad; 1557 } 1558 } 1559 1560 ret = 0; 1561 bad: 1562 kfree(cipher_api); 1563 return ret; 1564 1565 bad_mem: 1566 ti->error = "Cannot allocate cipher strings"; 1567 return -ENOMEM; 1568 } 1569 1570 /* 1571 * Construct an encryption mapping: 1572 * <cipher> <key> <iv_offset> <dev_path> <start> 1573 */ 1574 static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) 1575 { 1576 struct crypt_config *cc; 1577 unsigned int key_size, opt_params; 1578 unsigned long long tmpll; 1579 int ret; 1580 struct dm_arg_set as; 1581 const char *opt_string; 1582 1583 static struct dm_arg _args[] = { 1584 {0, 1, "Invalid number of feature args"}, 1585 }; 1586 1587 if (argc < 5) { 1588 ti->error = "Not enough arguments"; 1589 return -EINVAL; 1590 } 1591 1592 key_size = strlen(argv[1]) >> 1; 1593 1594 cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL); 1595 if (!cc) { 1596 ti->error = "Cannot allocate encryption context"; 1597 return -ENOMEM; 1598 } 1599 cc->key_size = key_size; 1600 1601 ti->private = cc; 1602 ret = crypt_ctr_cipher(ti, argv[0], argv[1]); 1603 if (ret < 0) 1604 goto bad; 1605 1606 ret = -ENOMEM; 1607 cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool); 1608 if (!cc->io_pool) { 1609 ti->error = "Cannot allocate crypt io mempool"; 1610 goto bad; 1611 } 1612 1613 cc->dmreq_start = sizeof(struct ablkcipher_request); 1614 cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc)); 1615 cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment()); 1616 cc->dmreq_start += crypto_ablkcipher_alignmask(any_tfm(cc)) & 1617 ~(crypto_tfm_ctx_alignment() - 1); 1618 1619 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + 1620 sizeof(struct dm_crypt_request) + cc->iv_size); 1621 if (!cc->req_pool) { 1622 ti->error = "Cannot allocate crypt request mempool"; 1623 goto bad; 1624 } 1625 1626 cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); 1627 if (!cc->page_pool) { 1628 ti->error = "Cannot allocate page mempool"; 1629 goto bad; 1630 } 1631 1632 cc->bs = bioset_create(MIN_IOS, 0); 1633 if (!cc->bs) { 1634 ti->error = "Cannot allocate crypt bioset"; 1635 goto bad; 1636 } 1637 1638 ret = -EINVAL; 1639 if (sscanf(argv[2], "%llu", &tmpll) != 1) { 1640 ti->error = "Invalid iv_offset sector"; 1641 goto bad; 1642 } 1643 cc->iv_offset = tmpll; 1644 1645 if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) { 1646 ti->error = "Device lookup failed"; 1647 goto bad; 1648 } 1649 1650 if (sscanf(argv[4], "%llu", &tmpll) != 1) { 1651 ti->error = "Invalid device sector"; 1652 goto bad; 1653 } 1654 cc->start = tmpll; 1655 1656 argv += 5; 1657 argc -= 5; 1658 1659 /* Optional parameters */ 1660 if (argc) { 1661 as.argc = argc; 1662 as.argv = argv; 1663 1664 ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error); 1665 if (ret) 1666 goto bad; 1667 1668 opt_string = dm_shift_arg(&as); 1669 1670 if (opt_params == 1 && opt_string && 1671 !strcasecmp(opt_string, "allow_discards")) 1672 ti->num_discard_requests = 1; 1673 else if (opt_params) { 1674 ret = -EINVAL; 1675 ti->error = "Invalid feature arguments"; 1676 goto bad; 1677 } 1678 } 1679 1680 ret = -ENOMEM; 1681 cc->io_queue = alloc_workqueue("kcryptd_io", 1682 WQ_NON_REENTRANT| 1683 WQ_MEM_RECLAIM, 1684 1); 1685 if (!cc->io_queue) { 1686 ti->error = "Couldn't create kcryptd io queue"; 1687 goto bad; 1688 } 1689 1690 cc->crypt_queue = alloc_workqueue("kcryptd", 1691 WQ_NON_REENTRANT| 1692 WQ_CPU_INTENSIVE| 1693 WQ_MEM_RECLAIM, 1694 1); 1695 if (!cc->crypt_queue) { 1696 ti->error = "Couldn't create kcryptd queue"; 1697 goto bad; 1698 } 1699 1700 ti->num_flush_requests = 1; 1701 return 0; 1702 1703 bad: 1704 crypt_dtr(ti); 1705 return ret; 1706 } 1707 1708 static int crypt_map(struct dm_target *ti, struct bio *bio, 1709 union map_info *map_context) 1710 { 1711 struct dm_crypt_io *io; 1712 struct crypt_config *cc; 1713 1714 /* 1715 * If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues. 1716 * - for REQ_FLUSH device-mapper core ensures that no IO is in-flight 1717 * - for REQ_DISCARD caller must use flush if IO ordering matters 1718 */ 1719 if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) { 1720 cc = ti->private; 1721 bio->bi_bdev = cc->dev->bdev; 1722 if (bio_sectors(bio)) 1723 bio->bi_sector = cc->start + dm_target_offset(ti, bio->bi_sector); 1724 return DM_MAPIO_REMAPPED; 1725 } 1726 1727 io = crypt_io_alloc(ti, bio, dm_target_offset(ti, bio->bi_sector)); 1728 1729 if (bio_data_dir(io->base_bio) == READ) { 1730 if (kcryptd_io_read(io, GFP_NOWAIT)) 1731 kcryptd_queue_io(io); 1732 } else 1733 kcryptd_queue_crypt(io); 1734 1735 return DM_MAPIO_SUBMITTED; 1736 } 1737 1738 static int crypt_status(struct dm_target *ti, status_type_t type, 1739 char *result, unsigned int maxlen) 1740 { 1741 struct crypt_config *cc = ti->private; 1742 unsigned int sz = 0; 1743 1744 switch (type) { 1745 case STATUSTYPE_INFO: 1746 result[0] = '\0'; 1747 break; 1748 1749 case STATUSTYPE_TABLE: 1750 DMEMIT("%s ", cc->cipher_string); 1751 1752 if (cc->key_size > 0) { 1753 if ((maxlen - sz) < ((cc->key_size << 1) + 1)) 1754 return -ENOMEM; 1755 1756 crypt_encode_key(result + sz, cc->key, cc->key_size); 1757 sz += cc->key_size << 1; 1758 } else { 1759 if (sz >= maxlen) 1760 return -ENOMEM; 1761 result[sz++] = '-'; 1762 } 1763 1764 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset, 1765 cc->dev->name, (unsigned long long)cc->start); 1766 1767 if (ti->num_discard_requests) 1768 DMEMIT(" 1 allow_discards"); 1769 1770 break; 1771 } 1772 return 0; 1773 } 1774 1775 static void crypt_postsuspend(struct dm_target *ti) 1776 { 1777 struct crypt_config *cc = ti->private; 1778 1779 set_bit(DM_CRYPT_SUSPENDED, &cc->flags); 1780 } 1781 1782 static int crypt_preresume(struct dm_target *ti) 1783 { 1784 struct crypt_config *cc = ti->private; 1785 1786 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) { 1787 DMERR("aborting resume - crypt key is not set."); 1788 return -EAGAIN; 1789 } 1790 1791 return 0; 1792 } 1793 1794 static void crypt_resume(struct dm_target *ti) 1795 { 1796 struct crypt_config *cc = ti->private; 1797 1798 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags); 1799 } 1800 1801 /* Message interface 1802 * key set <key> 1803 * key wipe 1804 */ 1805 static int crypt_message(struct dm_target *ti, unsigned argc, char **argv) 1806 { 1807 struct crypt_config *cc = ti->private; 1808 int ret = -EINVAL; 1809 1810 if (argc < 2) 1811 goto error; 1812 1813 if (!strcasecmp(argv[0], "key")) { 1814 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) { 1815 DMWARN("not suspended during key manipulation."); 1816 return -EINVAL; 1817 } 1818 if (argc == 3 && !strcasecmp(argv[1], "set")) { 1819 ret = crypt_set_key(cc, argv[2]); 1820 if (ret) 1821 return ret; 1822 if (cc->iv_gen_ops && cc->iv_gen_ops->init) 1823 ret = cc->iv_gen_ops->init(cc); 1824 return ret; 1825 } 1826 if (argc == 2 && !strcasecmp(argv[1], "wipe")) { 1827 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) { 1828 ret = cc->iv_gen_ops->wipe(cc); 1829 if (ret) 1830 return ret; 1831 } 1832 return crypt_wipe_key(cc); 1833 } 1834 } 1835 1836 error: 1837 DMWARN("unrecognised message received."); 1838 return -EINVAL; 1839 } 1840 1841 static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm, 1842 struct bio_vec *biovec, int max_size) 1843 { 1844 struct crypt_config *cc = ti->private; 1845 struct request_queue *q = bdev_get_queue(cc->dev->bdev); 1846 1847 if (!q->merge_bvec_fn) 1848 return max_size; 1849 1850 bvm->bi_bdev = cc->dev->bdev; 1851 bvm->bi_sector = cc->start + dm_target_offset(ti, bvm->bi_sector); 1852 1853 return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); 1854 } 1855 1856 static int crypt_iterate_devices(struct dm_target *ti, 1857 iterate_devices_callout_fn fn, void *data) 1858 { 1859 struct crypt_config *cc = ti->private; 1860 1861 return fn(ti, cc->dev, cc->start, ti->len, data); 1862 } 1863 1864 static struct target_type crypt_target = { 1865 .name = "crypt", 1866 .version = {1, 11, 0}, 1867 .module = THIS_MODULE, 1868 .ctr = crypt_ctr, 1869 .dtr = crypt_dtr, 1870 .map = crypt_map, 1871 .status = crypt_status, 1872 .postsuspend = crypt_postsuspend, 1873 .preresume = crypt_preresume, 1874 .resume = crypt_resume, 1875 .message = crypt_message, 1876 .merge = crypt_merge, 1877 .iterate_devices = crypt_iterate_devices, 1878 }; 1879 1880 static int __init dm_crypt_init(void) 1881 { 1882 int r; 1883 1884 _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0); 1885 if (!_crypt_io_pool) 1886 return -ENOMEM; 1887 1888 r = dm_register_target(&crypt_target); 1889 if (r < 0) { 1890 DMERR("register failed %d", r); 1891 kmem_cache_destroy(_crypt_io_pool); 1892 } 1893 1894 return r; 1895 } 1896 1897 static void __exit dm_crypt_exit(void) 1898 { 1899 dm_unregister_target(&crypt_target); 1900 kmem_cache_destroy(_crypt_io_pool); 1901 } 1902 1903 module_init(dm_crypt_init); 1904 module_exit(dm_crypt_exit); 1905 1906 MODULE_AUTHOR("Christophe Saout <christophe@saout.de>"); 1907 MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption"); 1908 MODULE_LICENSE("GPL"); 1909