1 /* 2 * Copyright (C) 2003 Christophe Saout <christophe@saout.de> 3 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> 4 * Copyright (C) 2006-2008 Red Hat, Inc. All rights reserved. 5 * 6 * This file is released under the GPL. 7 */ 8 9 #include <linux/completion.h> 10 #include <linux/err.h> 11 #include <linux/module.h> 12 #include <linux/init.h> 13 #include <linux/kernel.h> 14 #include <linux/bio.h> 15 #include <linux/blkdev.h> 16 #include <linux/mempool.h> 17 #include <linux/slab.h> 18 #include <linux/crypto.h> 19 #include <linux/workqueue.h> 20 #include <linux/backing-dev.h> 21 #include <asm/atomic.h> 22 #include <linux/scatterlist.h> 23 #include <asm/page.h> 24 #include <asm/unaligned.h> 25 26 #include <linux/device-mapper.h> 27 28 #define DM_MSG_PREFIX "crypt" 29 #define MESG_STR(x) x, sizeof(x) 30 31 /* 32 * context holding the current state of a multi-part conversion 33 */ 34 struct convert_context { 35 struct completion restart; 36 struct bio *bio_in; 37 struct bio *bio_out; 38 unsigned int offset_in; 39 unsigned int offset_out; 40 unsigned int idx_in; 41 unsigned int idx_out; 42 sector_t sector; 43 atomic_t pending; 44 }; 45 46 /* 47 * per bio private data 48 */ 49 struct dm_crypt_io { 50 struct dm_target *target; 51 struct bio *base_bio; 52 struct work_struct work; 53 54 struct convert_context ctx; 55 56 atomic_t pending; 57 int error; 58 sector_t sector; 59 struct dm_crypt_io *base_io; 60 }; 61 62 struct dm_crypt_request { 63 struct convert_context *ctx; 64 struct scatterlist sg_in; 65 struct scatterlist sg_out; 66 }; 67 68 struct crypt_config; 69 70 struct crypt_iv_operations { 71 int (*ctr)(struct crypt_config *cc, struct dm_target *ti, 72 const char *opts); 73 void (*dtr)(struct crypt_config *cc); 74 const char *(*status)(struct crypt_config *cc); 75 int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector); 76 }; 77 78 /* 79 * Crypt: maps a linear range of a block device 80 * and encrypts / decrypts at the same time. 81 */ 82 enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; 83 struct crypt_config { 84 struct dm_dev *dev; 85 sector_t start; 86 87 /* 88 * pool for per bio private data, crypto requests and 89 * encryption requeusts/buffer pages 90 */ 91 mempool_t *io_pool; 92 mempool_t *req_pool; 93 mempool_t *page_pool; 94 struct bio_set *bs; 95 96 struct workqueue_struct *io_queue; 97 struct workqueue_struct *crypt_queue; 98 99 /* 100 * crypto related data 101 */ 102 struct crypt_iv_operations *iv_gen_ops; 103 char *iv_mode; 104 union { 105 struct crypto_cipher *essiv_tfm; 106 int benbi_shift; 107 } iv_gen_private; 108 sector_t iv_offset; 109 unsigned int iv_size; 110 111 /* 112 * Layout of each crypto request: 113 * 114 * struct ablkcipher_request 115 * context 116 * padding 117 * struct dm_crypt_request 118 * padding 119 * IV 120 * 121 * The padding is added so that dm_crypt_request and the IV are 122 * correctly aligned. 123 */ 124 unsigned int dmreq_start; 125 struct ablkcipher_request *req; 126 127 char cipher[CRYPTO_MAX_ALG_NAME]; 128 char chainmode[CRYPTO_MAX_ALG_NAME]; 129 struct crypto_ablkcipher *tfm; 130 unsigned long flags; 131 unsigned int key_size; 132 u8 key[0]; 133 }; 134 135 #define MIN_IOS 16 136 #define MIN_POOL_PAGES 32 137 #define MIN_BIO_PAGES 8 138 139 static struct kmem_cache *_crypt_io_pool; 140 141 static void clone_init(struct dm_crypt_io *, struct bio *); 142 static void kcryptd_queue_crypt(struct dm_crypt_io *io); 143 144 /* 145 * Different IV generation algorithms: 146 * 147 * plain: the initial vector is the 32-bit little-endian version of the sector 148 * number, padded with zeros if necessary. 149 * 150 * essiv: "encrypted sector|salt initial vector", the sector number is 151 * encrypted with the bulk cipher using a salt as key. The salt 152 * should be derived from the bulk cipher's key via hashing. 153 * 154 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1 155 * (needed for LRW-32-AES and possible other narrow block modes) 156 * 157 * null: the initial vector is always zero. Provides compatibility with 158 * obsolete loop_fish2 devices. Do not use for new devices. 159 * 160 * plumb: unimplemented, see: 161 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 162 */ 163 164 static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector) 165 { 166 memset(iv, 0, cc->iv_size); 167 *(u32 *)iv = cpu_to_le32(sector & 0xffffffff); 168 169 return 0; 170 } 171 172 static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, 173 const char *opts) 174 { 175 struct crypto_cipher *essiv_tfm; 176 struct crypto_hash *hash_tfm; 177 struct hash_desc desc; 178 struct scatterlist sg; 179 unsigned int saltsize; 180 u8 *salt; 181 int err; 182 183 if (opts == NULL) { 184 ti->error = "Digest algorithm missing for ESSIV mode"; 185 return -EINVAL; 186 } 187 188 /* Hash the cipher key with the given hash algorithm */ 189 hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC); 190 if (IS_ERR(hash_tfm)) { 191 ti->error = "Error initializing ESSIV hash"; 192 return PTR_ERR(hash_tfm); 193 } 194 195 saltsize = crypto_hash_digestsize(hash_tfm); 196 salt = kmalloc(saltsize, GFP_KERNEL); 197 if (salt == NULL) { 198 ti->error = "Error kmallocing salt storage in ESSIV"; 199 crypto_free_hash(hash_tfm); 200 return -ENOMEM; 201 } 202 203 sg_init_one(&sg, cc->key, cc->key_size); 204 desc.tfm = hash_tfm; 205 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; 206 err = crypto_hash_digest(&desc, &sg, cc->key_size, salt); 207 crypto_free_hash(hash_tfm); 208 209 if (err) { 210 ti->error = "Error calculating hash in ESSIV"; 211 kfree(salt); 212 return err; 213 } 214 215 /* Setup the essiv_tfm with the given salt */ 216 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); 217 if (IS_ERR(essiv_tfm)) { 218 ti->error = "Error allocating crypto tfm for ESSIV"; 219 kfree(salt); 220 return PTR_ERR(essiv_tfm); 221 } 222 if (crypto_cipher_blocksize(essiv_tfm) != 223 crypto_ablkcipher_ivsize(cc->tfm)) { 224 ti->error = "Block size of ESSIV cipher does " 225 "not match IV size of block cipher"; 226 crypto_free_cipher(essiv_tfm); 227 kfree(salt); 228 return -EINVAL; 229 } 230 err = crypto_cipher_setkey(essiv_tfm, salt, saltsize); 231 if (err) { 232 ti->error = "Failed to set key for ESSIV cipher"; 233 crypto_free_cipher(essiv_tfm); 234 kfree(salt); 235 return err; 236 } 237 kfree(salt); 238 239 cc->iv_gen_private.essiv_tfm = essiv_tfm; 240 return 0; 241 } 242 243 static void crypt_iv_essiv_dtr(struct crypt_config *cc) 244 { 245 crypto_free_cipher(cc->iv_gen_private.essiv_tfm); 246 cc->iv_gen_private.essiv_tfm = NULL; 247 } 248 249 static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector) 250 { 251 memset(iv, 0, cc->iv_size); 252 *(u64 *)iv = cpu_to_le64(sector); 253 crypto_cipher_encrypt_one(cc->iv_gen_private.essiv_tfm, iv, iv); 254 return 0; 255 } 256 257 static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, 258 const char *opts) 259 { 260 unsigned bs = crypto_ablkcipher_blocksize(cc->tfm); 261 int log = ilog2(bs); 262 263 /* we need to calculate how far we must shift the sector count 264 * to get the cipher block count, we use this shift in _gen */ 265 266 if (1 << log != bs) { 267 ti->error = "cypher blocksize is not a power of 2"; 268 return -EINVAL; 269 } 270 271 if (log > 9) { 272 ti->error = "cypher blocksize is > 512"; 273 return -EINVAL; 274 } 275 276 cc->iv_gen_private.benbi_shift = 9 - log; 277 278 return 0; 279 } 280 281 static void crypt_iv_benbi_dtr(struct crypt_config *cc) 282 { 283 } 284 285 static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector) 286 { 287 __be64 val; 288 289 memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */ 290 291 val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi_shift) + 1); 292 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64))); 293 294 return 0; 295 } 296 297 static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, sector_t sector) 298 { 299 memset(iv, 0, cc->iv_size); 300 301 return 0; 302 } 303 304 static struct crypt_iv_operations crypt_iv_plain_ops = { 305 .generator = crypt_iv_plain_gen 306 }; 307 308 static struct crypt_iv_operations crypt_iv_essiv_ops = { 309 .ctr = crypt_iv_essiv_ctr, 310 .dtr = crypt_iv_essiv_dtr, 311 .generator = crypt_iv_essiv_gen 312 }; 313 314 static struct crypt_iv_operations crypt_iv_benbi_ops = { 315 .ctr = crypt_iv_benbi_ctr, 316 .dtr = crypt_iv_benbi_dtr, 317 .generator = crypt_iv_benbi_gen 318 }; 319 320 static struct crypt_iv_operations crypt_iv_null_ops = { 321 .generator = crypt_iv_null_gen 322 }; 323 324 static void crypt_convert_init(struct crypt_config *cc, 325 struct convert_context *ctx, 326 struct bio *bio_out, struct bio *bio_in, 327 sector_t sector) 328 { 329 ctx->bio_in = bio_in; 330 ctx->bio_out = bio_out; 331 ctx->offset_in = 0; 332 ctx->offset_out = 0; 333 ctx->idx_in = bio_in ? bio_in->bi_idx : 0; 334 ctx->idx_out = bio_out ? bio_out->bi_idx : 0; 335 ctx->sector = sector + cc->iv_offset; 336 init_completion(&ctx->restart); 337 } 338 339 static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc, 340 struct ablkcipher_request *req) 341 { 342 return (struct dm_crypt_request *)((char *)req + cc->dmreq_start); 343 } 344 345 static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc, 346 struct dm_crypt_request *dmreq) 347 { 348 return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start); 349 } 350 351 static int crypt_convert_block(struct crypt_config *cc, 352 struct convert_context *ctx, 353 struct ablkcipher_request *req) 354 { 355 struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in); 356 struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out); 357 struct dm_crypt_request *dmreq; 358 u8 *iv; 359 int r = 0; 360 361 dmreq = dmreq_of_req(cc, req); 362 iv = (u8 *)ALIGN((unsigned long)(dmreq + 1), 363 crypto_ablkcipher_alignmask(cc->tfm) + 1); 364 365 dmreq->ctx = ctx; 366 sg_init_table(&dmreq->sg_in, 1); 367 sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, 368 bv_in->bv_offset + ctx->offset_in); 369 370 sg_init_table(&dmreq->sg_out, 1); 371 sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT, 372 bv_out->bv_offset + ctx->offset_out); 373 374 ctx->offset_in += 1 << SECTOR_SHIFT; 375 if (ctx->offset_in >= bv_in->bv_len) { 376 ctx->offset_in = 0; 377 ctx->idx_in++; 378 } 379 380 ctx->offset_out += 1 << SECTOR_SHIFT; 381 if (ctx->offset_out >= bv_out->bv_len) { 382 ctx->offset_out = 0; 383 ctx->idx_out++; 384 } 385 386 if (cc->iv_gen_ops) { 387 r = cc->iv_gen_ops->generator(cc, iv, ctx->sector); 388 if (r < 0) 389 return r; 390 } 391 392 ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out, 393 1 << SECTOR_SHIFT, iv); 394 395 if (bio_data_dir(ctx->bio_in) == WRITE) 396 r = crypto_ablkcipher_encrypt(req); 397 else 398 r = crypto_ablkcipher_decrypt(req); 399 400 return r; 401 } 402 403 static void kcryptd_async_done(struct crypto_async_request *async_req, 404 int error); 405 static void crypt_alloc_req(struct crypt_config *cc, 406 struct convert_context *ctx) 407 { 408 if (!cc->req) 409 cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); 410 ablkcipher_request_set_tfm(cc->req, cc->tfm); 411 ablkcipher_request_set_callback(cc->req, CRYPTO_TFM_REQ_MAY_BACKLOG | 412 CRYPTO_TFM_REQ_MAY_SLEEP, 413 kcryptd_async_done, 414 dmreq_of_req(cc, cc->req)); 415 } 416 417 /* 418 * Encrypt / decrypt data from one bio to another one (can be the same one) 419 */ 420 static int crypt_convert(struct crypt_config *cc, 421 struct convert_context *ctx) 422 { 423 int r; 424 425 atomic_set(&ctx->pending, 1); 426 427 while(ctx->idx_in < ctx->bio_in->bi_vcnt && 428 ctx->idx_out < ctx->bio_out->bi_vcnt) { 429 430 crypt_alloc_req(cc, ctx); 431 432 atomic_inc(&ctx->pending); 433 434 r = crypt_convert_block(cc, ctx, cc->req); 435 436 switch (r) { 437 /* async */ 438 case -EBUSY: 439 wait_for_completion(&ctx->restart); 440 INIT_COMPLETION(ctx->restart); 441 /* fall through*/ 442 case -EINPROGRESS: 443 cc->req = NULL; 444 ctx->sector++; 445 continue; 446 447 /* sync */ 448 case 0: 449 atomic_dec(&ctx->pending); 450 ctx->sector++; 451 cond_resched(); 452 continue; 453 454 /* error */ 455 default: 456 atomic_dec(&ctx->pending); 457 return r; 458 } 459 } 460 461 return 0; 462 } 463 464 static void dm_crypt_bio_destructor(struct bio *bio) 465 { 466 struct dm_crypt_io *io = bio->bi_private; 467 struct crypt_config *cc = io->target->private; 468 469 bio_free(bio, cc->bs); 470 } 471 472 /* 473 * Generate a new unfragmented bio with the given size 474 * This should never violate the device limitations 475 * May return a smaller bio when running out of pages, indicated by 476 * *out_of_pages set to 1. 477 */ 478 static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size, 479 unsigned *out_of_pages) 480 { 481 struct crypt_config *cc = io->target->private; 482 struct bio *clone; 483 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 484 gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; 485 unsigned i, len; 486 struct page *page; 487 488 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); 489 if (!clone) 490 return NULL; 491 492 clone_init(io, clone); 493 *out_of_pages = 0; 494 495 for (i = 0; i < nr_iovecs; i++) { 496 page = mempool_alloc(cc->page_pool, gfp_mask); 497 if (!page) { 498 *out_of_pages = 1; 499 break; 500 } 501 502 /* 503 * if additional pages cannot be allocated without waiting, 504 * return a partially allocated bio, the caller will then try 505 * to allocate additional bios while submitting this partial bio 506 */ 507 if (i == (MIN_BIO_PAGES - 1)) 508 gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; 509 510 len = (size > PAGE_SIZE) ? PAGE_SIZE : size; 511 512 if (!bio_add_page(clone, page, len, 0)) { 513 mempool_free(page, cc->page_pool); 514 break; 515 } 516 517 size -= len; 518 } 519 520 if (!clone->bi_size) { 521 bio_put(clone); 522 return NULL; 523 } 524 525 return clone; 526 } 527 528 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) 529 { 530 unsigned int i; 531 struct bio_vec *bv; 532 533 for (i = 0; i < clone->bi_vcnt; i++) { 534 bv = bio_iovec_idx(clone, i); 535 BUG_ON(!bv->bv_page); 536 mempool_free(bv->bv_page, cc->page_pool); 537 bv->bv_page = NULL; 538 } 539 } 540 541 static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti, 542 struct bio *bio, sector_t sector) 543 { 544 struct crypt_config *cc = ti->private; 545 struct dm_crypt_io *io; 546 547 io = mempool_alloc(cc->io_pool, GFP_NOIO); 548 io->target = ti; 549 io->base_bio = bio; 550 io->sector = sector; 551 io->error = 0; 552 io->base_io = NULL; 553 atomic_set(&io->pending, 0); 554 555 return io; 556 } 557 558 static void crypt_inc_pending(struct dm_crypt_io *io) 559 { 560 atomic_inc(&io->pending); 561 } 562 563 /* 564 * One of the bios was finished. Check for completion of 565 * the whole request and correctly clean up the buffer. 566 * If base_io is set, wait for the last fragment to complete. 567 */ 568 static void crypt_dec_pending(struct dm_crypt_io *io) 569 { 570 struct crypt_config *cc = io->target->private; 571 struct bio *base_bio = io->base_bio; 572 struct dm_crypt_io *base_io = io->base_io; 573 int error = io->error; 574 575 if (!atomic_dec_and_test(&io->pending)) 576 return; 577 578 mempool_free(io, cc->io_pool); 579 580 if (likely(!base_io)) 581 bio_endio(base_bio, error); 582 else { 583 if (error && !base_io->error) 584 base_io->error = error; 585 crypt_dec_pending(base_io); 586 } 587 } 588 589 /* 590 * kcryptd/kcryptd_io: 591 * 592 * Needed because it would be very unwise to do decryption in an 593 * interrupt context. 594 * 595 * kcryptd performs the actual encryption or decryption. 596 * 597 * kcryptd_io performs the IO submission. 598 * 599 * They must be separated as otherwise the final stages could be 600 * starved by new requests which can block in the first stages due 601 * to memory allocation. 602 */ 603 static void crypt_endio(struct bio *clone, int error) 604 { 605 struct dm_crypt_io *io = clone->bi_private; 606 struct crypt_config *cc = io->target->private; 607 unsigned rw = bio_data_dir(clone); 608 609 if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error)) 610 error = -EIO; 611 612 /* 613 * free the processed pages 614 */ 615 if (rw == WRITE) 616 crypt_free_buffer_pages(cc, clone); 617 618 bio_put(clone); 619 620 if (rw == READ && !error) { 621 kcryptd_queue_crypt(io); 622 return; 623 } 624 625 if (unlikely(error)) 626 io->error = error; 627 628 crypt_dec_pending(io); 629 } 630 631 static void clone_init(struct dm_crypt_io *io, struct bio *clone) 632 { 633 struct crypt_config *cc = io->target->private; 634 635 clone->bi_private = io; 636 clone->bi_end_io = crypt_endio; 637 clone->bi_bdev = cc->dev->bdev; 638 clone->bi_rw = io->base_bio->bi_rw; 639 clone->bi_destructor = dm_crypt_bio_destructor; 640 } 641 642 static void kcryptd_io_read(struct dm_crypt_io *io) 643 { 644 struct crypt_config *cc = io->target->private; 645 struct bio *base_bio = io->base_bio; 646 struct bio *clone; 647 648 crypt_inc_pending(io); 649 650 /* 651 * The block layer might modify the bvec array, so always 652 * copy the required bvecs because we need the original 653 * one in order to decrypt the whole bio data *afterwards*. 654 */ 655 clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs); 656 if (unlikely(!clone)) { 657 io->error = -ENOMEM; 658 crypt_dec_pending(io); 659 return; 660 } 661 662 clone_init(io, clone); 663 clone->bi_idx = 0; 664 clone->bi_vcnt = bio_segments(base_bio); 665 clone->bi_size = base_bio->bi_size; 666 clone->bi_sector = cc->start + io->sector; 667 memcpy(clone->bi_io_vec, bio_iovec(base_bio), 668 sizeof(struct bio_vec) * clone->bi_vcnt); 669 670 generic_make_request(clone); 671 } 672 673 static void kcryptd_io_write(struct dm_crypt_io *io) 674 { 675 struct bio *clone = io->ctx.bio_out; 676 generic_make_request(clone); 677 } 678 679 static void kcryptd_io(struct work_struct *work) 680 { 681 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); 682 683 if (bio_data_dir(io->base_bio) == READ) 684 kcryptd_io_read(io); 685 else 686 kcryptd_io_write(io); 687 } 688 689 static void kcryptd_queue_io(struct dm_crypt_io *io) 690 { 691 struct crypt_config *cc = io->target->private; 692 693 INIT_WORK(&io->work, kcryptd_io); 694 queue_work(cc->io_queue, &io->work); 695 } 696 697 static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, 698 int error, int async) 699 { 700 struct bio *clone = io->ctx.bio_out; 701 struct crypt_config *cc = io->target->private; 702 703 if (unlikely(error < 0)) { 704 crypt_free_buffer_pages(cc, clone); 705 bio_put(clone); 706 io->error = -EIO; 707 crypt_dec_pending(io); 708 return; 709 } 710 711 /* crypt_convert should have filled the clone bio */ 712 BUG_ON(io->ctx.idx_out < clone->bi_vcnt); 713 714 clone->bi_sector = cc->start + io->sector; 715 716 if (async) 717 kcryptd_queue_io(io); 718 else 719 generic_make_request(clone); 720 } 721 722 static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) 723 { 724 struct crypt_config *cc = io->target->private; 725 struct bio *clone; 726 struct dm_crypt_io *new_io; 727 int crypt_finished; 728 unsigned out_of_pages = 0; 729 unsigned remaining = io->base_bio->bi_size; 730 sector_t sector = io->sector; 731 int r; 732 733 /* 734 * Prevent io from disappearing until this function completes. 735 */ 736 crypt_inc_pending(io); 737 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector); 738 739 /* 740 * The allocated buffers can be smaller than the whole bio, 741 * so repeat the whole process until all the data can be handled. 742 */ 743 while (remaining) { 744 clone = crypt_alloc_buffer(io, remaining, &out_of_pages); 745 if (unlikely(!clone)) { 746 io->error = -ENOMEM; 747 break; 748 } 749 750 io->ctx.bio_out = clone; 751 io->ctx.idx_out = 0; 752 753 remaining -= clone->bi_size; 754 sector += bio_sectors(clone); 755 756 crypt_inc_pending(io); 757 r = crypt_convert(cc, &io->ctx); 758 crypt_finished = atomic_dec_and_test(&io->ctx.pending); 759 760 /* Encryption was already finished, submit io now */ 761 if (crypt_finished) { 762 kcryptd_crypt_write_io_submit(io, r, 0); 763 764 /* 765 * If there was an error, do not try next fragments. 766 * For async, error is processed in async handler. 767 */ 768 if (unlikely(r < 0)) 769 break; 770 771 io->sector = sector; 772 } 773 774 /* 775 * Out of memory -> run queues 776 * But don't wait if split was due to the io size restriction 777 */ 778 if (unlikely(out_of_pages)) 779 congestion_wait(WRITE, HZ/100); 780 781 /* 782 * With async crypto it is unsafe to share the crypto context 783 * between fragments, so switch to a new dm_crypt_io structure. 784 */ 785 if (unlikely(!crypt_finished && remaining)) { 786 new_io = crypt_io_alloc(io->target, io->base_bio, 787 sector); 788 crypt_inc_pending(new_io); 789 crypt_convert_init(cc, &new_io->ctx, NULL, 790 io->base_bio, sector); 791 new_io->ctx.idx_in = io->ctx.idx_in; 792 new_io->ctx.offset_in = io->ctx.offset_in; 793 794 /* 795 * Fragments after the first use the base_io 796 * pending count. 797 */ 798 if (!io->base_io) 799 new_io->base_io = io; 800 else { 801 new_io->base_io = io->base_io; 802 crypt_inc_pending(io->base_io); 803 crypt_dec_pending(io); 804 } 805 806 io = new_io; 807 } 808 } 809 810 crypt_dec_pending(io); 811 } 812 813 static void kcryptd_crypt_read_done(struct dm_crypt_io *io, int error) 814 { 815 if (unlikely(error < 0)) 816 io->error = -EIO; 817 818 crypt_dec_pending(io); 819 } 820 821 static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) 822 { 823 struct crypt_config *cc = io->target->private; 824 int r = 0; 825 826 crypt_inc_pending(io); 827 828 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, 829 io->sector); 830 831 r = crypt_convert(cc, &io->ctx); 832 833 if (atomic_dec_and_test(&io->ctx.pending)) 834 kcryptd_crypt_read_done(io, r); 835 836 crypt_dec_pending(io); 837 } 838 839 static void kcryptd_async_done(struct crypto_async_request *async_req, 840 int error) 841 { 842 struct dm_crypt_request *dmreq = async_req->data; 843 struct convert_context *ctx = dmreq->ctx; 844 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); 845 struct crypt_config *cc = io->target->private; 846 847 if (error == -EINPROGRESS) { 848 complete(&ctx->restart); 849 return; 850 } 851 852 mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool); 853 854 if (!atomic_dec_and_test(&ctx->pending)) 855 return; 856 857 if (bio_data_dir(io->base_bio) == READ) 858 kcryptd_crypt_read_done(io, error); 859 else 860 kcryptd_crypt_write_io_submit(io, error, 1); 861 } 862 863 static void kcryptd_crypt(struct work_struct *work) 864 { 865 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); 866 867 if (bio_data_dir(io->base_bio) == READ) 868 kcryptd_crypt_read_convert(io); 869 else 870 kcryptd_crypt_write_convert(io); 871 } 872 873 static void kcryptd_queue_crypt(struct dm_crypt_io *io) 874 { 875 struct crypt_config *cc = io->target->private; 876 877 INIT_WORK(&io->work, kcryptd_crypt); 878 queue_work(cc->crypt_queue, &io->work); 879 } 880 881 /* 882 * Decode key from its hex representation 883 */ 884 static int crypt_decode_key(u8 *key, char *hex, unsigned int size) 885 { 886 char buffer[3]; 887 char *endp; 888 unsigned int i; 889 890 buffer[2] = '\0'; 891 892 for (i = 0; i < size; i++) { 893 buffer[0] = *hex++; 894 buffer[1] = *hex++; 895 896 key[i] = (u8)simple_strtoul(buffer, &endp, 16); 897 898 if (endp != &buffer[2]) 899 return -EINVAL; 900 } 901 902 if (*hex != '\0') 903 return -EINVAL; 904 905 return 0; 906 } 907 908 /* 909 * Encode key into its hex representation 910 */ 911 static void crypt_encode_key(char *hex, u8 *key, unsigned int size) 912 { 913 unsigned int i; 914 915 for (i = 0; i < size; i++) { 916 sprintf(hex, "%02x", *key); 917 hex += 2; 918 key++; 919 } 920 } 921 922 static int crypt_set_key(struct crypt_config *cc, char *key) 923 { 924 unsigned key_size = strlen(key) >> 1; 925 926 if (cc->key_size && cc->key_size != key_size) 927 return -EINVAL; 928 929 cc->key_size = key_size; /* initial settings */ 930 931 if ((!key_size && strcmp(key, "-")) || 932 (key_size && crypt_decode_key(cc->key, key, key_size) < 0)) 933 return -EINVAL; 934 935 set_bit(DM_CRYPT_KEY_VALID, &cc->flags); 936 937 return 0; 938 } 939 940 static int crypt_wipe_key(struct crypt_config *cc) 941 { 942 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); 943 memset(&cc->key, 0, cc->key_size * sizeof(u8)); 944 return 0; 945 } 946 947 /* 948 * Construct an encryption mapping: 949 * <cipher> <key> <iv_offset> <dev_path> <start> 950 */ 951 static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) 952 { 953 struct crypt_config *cc; 954 struct crypto_ablkcipher *tfm; 955 char *tmp; 956 char *cipher; 957 char *chainmode; 958 char *ivmode; 959 char *ivopts; 960 unsigned int key_size; 961 unsigned long long tmpll; 962 963 if (argc != 5) { 964 ti->error = "Not enough arguments"; 965 return -EINVAL; 966 } 967 968 tmp = argv[0]; 969 cipher = strsep(&tmp, "-"); 970 chainmode = strsep(&tmp, "-"); 971 ivopts = strsep(&tmp, "-"); 972 ivmode = strsep(&ivopts, ":"); 973 974 if (tmp) 975 DMWARN("Unexpected additional cipher options"); 976 977 key_size = strlen(argv[1]) >> 1; 978 979 cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL); 980 if (cc == NULL) { 981 ti->error = 982 "Cannot allocate transparent encryption context"; 983 return -ENOMEM; 984 } 985 986 if (crypt_set_key(cc, argv[1])) { 987 ti->error = "Error decoding key"; 988 goto bad_cipher; 989 } 990 991 /* Compatiblity mode for old dm-crypt cipher strings */ 992 if (!chainmode || (strcmp(chainmode, "plain") == 0 && !ivmode)) { 993 chainmode = "cbc"; 994 ivmode = "plain"; 995 } 996 997 if (strcmp(chainmode, "ecb") && !ivmode) { 998 ti->error = "This chaining mode requires an IV mechanism"; 999 goto bad_cipher; 1000 } 1001 1002 if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)", 1003 chainmode, cipher) >= CRYPTO_MAX_ALG_NAME) { 1004 ti->error = "Chain mode + cipher name is too long"; 1005 goto bad_cipher; 1006 } 1007 1008 tfm = crypto_alloc_ablkcipher(cc->cipher, 0, 0); 1009 if (IS_ERR(tfm)) { 1010 ti->error = "Error allocating crypto tfm"; 1011 goto bad_cipher; 1012 } 1013 1014 strcpy(cc->cipher, cipher); 1015 strcpy(cc->chainmode, chainmode); 1016 cc->tfm = tfm; 1017 1018 /* 1019 * Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi". 1020 * See comments at iv code 1021 */ 1022 1023 if (ivmode == NULL) 1024 cc->iv_gen_ops = NULL; 1025 else if (strcmp(ivmode, "plain") == 0) 1026 cc->iv_gen_ops = &crypt_iv_plain_ops; 1027 else if (strcmp(ivmode, "essiv") == 0) 1028 cc->iv_gen_ops = &crypt_iv_essiv_ops; 1029 else if (strcmp(ivmode, "benbi") == 0) 1030 cc->iv_gen_ops = &crypt_iv_benbi_ops; 1031 else if (strcmp(ivmode, "null") == 0) 1032 cc->iv_gen_ops = &crypt_iv_null_ops; 1033 else { 1034 ti->error = "Invalid IV mode"; 1035 goto bad_ivmode; 1036 } 1037 1038 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr && 1039 cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0) 1040 goto bad_ivmode; 1041 1042 cc->iv_size = crypto_ablkcipher_ivsize(tfm); 1043 if (cc->iv_size) 1044 /* at least a 64 bit sector number should fit in our buffer */ 1045 cc->iv_size = max(cc->iv_size, 1046 (unsigned int)(sizeof(u64) / sizeof(u8))); 1047 else { 1048 if (cc->iv_gen_ops) { 1049 DMWARN("Selected cipher does not support IVs"); 1050 if (cc->iv_gen_ops->dtr) 1051 cc->iv_gen_ops->dtr(cc); 1052 cc->iv_gen_ops = NULL; 1053 } 1054 } 1055 1056 cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool); 1057 if (!cc->io_pool) { 1058 ti->error = "Cannot allocate crypt io mempool"; 1059 goto bad_slab_pool; 1060 } 1061 1062 cc->dmreq_start = sizeof(struct ablkcipher_request); 1063 cc->dmreq_start += crypto_ablkcipher_reqsize(tfm); 1064 cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment()); 1065 cc->dmreq_start += crypto_ablkcipher_alignmask(tfm) & 1066 ~(crypto_tfm_ctx_alignment() - 1); 1067 1068 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + 1069 sizeof(struct dm_crypt_request) + cc->iv_size); 1070 if (!cc->req_pool) { 1071 ti->error = "Cannot allocate crypt request mempool"; 1072 goto bad_req_pool; 1073 } 1074 cc->req = NULL; 1075 1076 cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); 1077 if (!cc->page_pool) { 1078 ti->error = "Cannot allocate page mempool"; 1079 goto bad_page_pool; 1080 } 1081 1082 cc->bs = bioset_create(MIN_IOS, 0); 1083 if (!cc->bs) { 1084 ti->error = "Cannot allocate crypt bioset"; 1085 goto bad_bs; 1086 } 1087 1088 if (crypto_ablkcipher_setkey(tfm, cc->key, key_size) < 0) { 1089 ti->error = "Error setting key"; 1090 goto bad_device; 1091 } 1092 1093 if (sscanf(argv[2], "%llu", &tmpll) != 1) { 1094 ti->error = "Invalid iv_offset sector"; 1095 goto bad_device; 1096 } 1097 cc->iv_offset = tmpll; 1098 1099 if (sscanf(argv[4], "%llu", &tmpll) != 1) { 1100 ti->error = "Invalid device sector"; 1101 goto bad_device; 1102 } 1103 cc->start = tmpll; 1104 1105 if (dm_get_device(ti, argv[3], cc->start, ti->len, 1106 dm_table_get_mode(ti->table), &cc->dev)) { 1107 ti->error = "Device lookup failed"; 1108 goto bad_device; 1109 } 1110 1111 if (ivmode && cc->iv_gen_ops) { 1112 if (ivopts) 1113 *(ivopts - 1) = ':'; 1114 cc->iv_mode = kmalloc(strlen(ivmode) + 1, GFP_KERNEL); 1115 if (!cc->iv_mode) { 1116 ti->error = "Error kmallocing iv_mode string"; 1117 goto bad_ivmode_string; 1118 } 1119 strcpy(cc->iv_mode, ivmode); 1120 } else 1121 cc->iv_mode = NULL; 1122 1123 cc->io_queue = create_singlethread_workqueue("kcryptd_io"); 1124 if (!cc->io_queue) { 1125 ti->error = "Couldn't create kcryptd io queue"; 1126 goto bad_io_queue; 1127 } 1128 1129 cc->crypt_queue = create_singlethread_workqueue("kcryptd"); 1130 if (!cc->crypt_queue) { 1131 ti->error = "Couldn't create kcryptd queue"; 1132 goto bad_crypt_queue; 1133 } 1134 1135 ti->private = cc; 1136 return 0; 1137 1138 bad_crypt_queue: 1139 destroy_workqueue(cc->io_queue); 1140 bad_io_queue: 1141 kfree(cc->iv_mode); 1142 bad_ivmode_string: 1143 dm_put_device(ti, cc->dev); 1144 bad_device: 1145 bioset_free(cc->bs); 1146 bad_bs: 1147 mempool_destroy(cc->page_pool); 1148 bad_page_pool: 1149 mempool_destroy(cc->req_pool); 1150 bad_req_pool: 1151 mempool_destroy(cc->io_pool); 1152 bad_slab_pool: 1153 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) 1154 cc->iv_gen_ops->dtr(cc); 1155 bad_ivmode: 1156 crypto_free_ablkcipher(tfm); 1157 bad_cipher: 1158 /* Must zero key material before freeing */ 1159 kzfree(cc); 1160 return -EINVAL; 1161 } 1162 1163 static void crypt_dtr(struct dm_target *ti) 1164 { 1165 struct crypt_config *cc = (struct crypt_config *) ti->private; 1166 1167 destroy_workqueue(cc->io_queue); 1168 destroy_workqueue(cc->crypt_queue); 1169 1170 if (cc->req) 1171 mempool_free(cc->req, cc->req_pool); 1172 1173 bioset_free(cc->bs); 1174 mempool_destroy(cc->page_pool); 1175 mempool_destroy(cc->req_pool); 1176 mempool_destroy(cc->io_pool); 1177 1178 kfree(cc->iv_mode); 1179 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) 1180 cc->iv_gen_ops->dtr(cc); 1181 crypto_free_ablkcipher(cc->tfm); 1182 dm_put_device(ti, cc->dev); 1183 1184 /* Must zero key material before freeing */ 1185 kzfree(cc); 1186 } 1187 1188 static int crypt_map(struct dm_target *ti, struct bio *bio, 1189 union map_info *map_context) 1190 { 1191 struct dm_crypt_io *io; 1192 1193 io = crypt_io_alloc(ti, bio, bio->bi_sector - ti->begin); 1194 1195 if (bio_data_dir(io->base_bio) == READ) 1196 kcryptd_queue_io(io); 1197 else 1198 kcryptd_queue_crypt(io); 1199 1200 return DM_MAPIO_SUBMITTED; 1201 } 1202 1203 static int crypt_status(struct dm_target *ti, status_type_t type, 1204 char *result, unsigned int maxlen) 1205 { 1206 struct crypt_config *cc = (struct crypt_config *) ti->private; 1207 unsigned int sz = 0; 1208 1209 switch (type) { 1210 case STATUSTYPE_INFO: 1211 result[0] = '\0'; 1212 break; 1213 1214 case STATUSTYPE_TABLE: 1215 if (cc->iv_mode) 1216 DMEMIT("%s-%s-%s ", cc->cipher, cc->chainmode, 1217 cc->iv_mode); 1218 else 1219 DMEMIT("%s-%s ", cc->cipher, cc->chainmode); 1220 1221 if (cc->key_size > 0) { 1222 if ((maxlen - sz) < ((cc->key_size << 1) + 1)) 1223 return -ENOMEM; 1224 1225 crypt_encode_key(result + sz, cc->key, cc->key_size); 1226 sz += cc->key_size << 1; 1227 } else { 1228 if (sz >= maxlen) 1229 return -ENOMEM; 1230 result[sz++] = '-'; 1231 } 1232 1233 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset, 1234 cc->dev->name, (unsigned long long)cc->start); 1235 break; 1236 } 1237 return 0; 1238 } 1239 1240 static void crypt_postsuspend(struct dm_target *ti) 1241 { 1242 struct crypt_config *cc = ti->private; 1243 1244 set_bit(DM_CRYPT_SUSPENDED, &cc->flags); 1245 } 1246 1247 static int crypt_preresume(struct dm_target *ti) 1248 { 1249 struct crypt_config *cc = ti->private; 1250 1251 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) { 1252 DMERR("aborting resume - crypt key is not set."); 1253 return -EAGAIN; 1254 } 1255 1256 return 0; 1257 } 1258 1259 static void crypt_resume(struct dm_target *ti) 1260 { 1261 struct crypt_config *cc = ti->private; 1262 1263 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags); 1264 } 1265 1266 /* Message interface 1267 * key set <key> 1268 * key wipe 1269 */ 1270 static int crypt_message(struct dm_target *ti, unsigned argc, char **argv) 1271 { 1272 struct crypt_config *cc = ti->private; 1273 1274 if (argc < 2) 1275 goto error; 1276 1277 if (!strnicmp(argv[0], MESG_STR("key"))) { 1278 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) { 1279 DMWARN("not suspended during key manipulation."); 1280 return -EINVAL; 1281 } 1282 if (argc == 3 && !strnicmp(argv[1], MESG_STR("set"))) 1283 return crypt_set_key(cc, argv[2]); 1284 if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe"))) 1285 return crypt_wipe_key(cc); 1286 } 1287 1288 error: 1289 DMWARN("unrecognised message received."); 1290 return -EINVAL; 1291 } 1292 1293 static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm, 1294 struct bio_vec *biovec, int max_size) 1295 { 1296 struct crypt_config *cc = ti->private; 1297 struct request_queue *q = bdev_get_queue(cc->dev->bdev); 1298 1299 if (!q->merge_bvec_fn) 1300 return max_size; 1301 1302 bvm->bi_bdev = cc->dev->bdev; 1303 bvm->bi_sector = cc->start + bvm->bi_sector - ti->begin; 1304 1305 return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); 1306 } 1307 1308 static struct target_type crypt_target = { 1309 .name = "crypt", 1310 .version= {1, 6, 0}, 1311 .module = THIS_MODULE, 1312 .ctr = crypt_ctr, 1313 .dtr = crypt_dtr, 1314 .map = crypt_map, 1315 .status = crypt_status, 1316 .postsuspend = crypt_postsuspend, 1317 .preresume = crypt_preresume, 1318 .resume = crypt_resume, 1319 .message = crypt_message, 1320 .merge = crypt_merge, 1321 }; 1322 1323 static int __init dm_crypt_init(void) 1324 { 1325 int r; 1326 1327 _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0); 1328 if (!_crypt_io_pool) 1329 return -ENOMEM; 1330 1331 r = dm_register_target(&crypt_target); 1332 if (r < 0) { 1333 DMERR("register failed %d", r); 1334 kmem_cache_destroy(_crypt_io_pool); 1335 } 1336 1337 return r; 1338 } 1339 1340 static void __exit dm_crypt_exit(void) 1341 { 1342 dm_unregister_target(&crypt_target); 1343 kmem_cache_destroy(_crypt_io_pool); 1344 } 1345 1346 module_init(dm_crypt_init); 1347 module_exit(dm_crypt_exit); 1348 1349 MODULE_AUTHOR("Christophe Saout <christophe@saout.de>"); 1350 MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption"); 1351 MODULE_LICENSE("GPL"); 1352