xref: /openbmc/linux/drivers/md/dm-crypt.c (revision 9ac8d3fb)
1 /*
2  * Copyright (C) 2003 Christophe Saout <christophe@saout.de>
3  * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
4  * Copyright (C) 2006-2008 Red Hat, Inc. All rights reserved.
5  *
6  * This file is released under the GPL.
7  */
8 
9 #include <linux/completion.h>
10 #include <linux/err.h>
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/bio.h>
15 #include <linux/blkdev.h>
16 #include <linux/mempool.h>
17 #include <linux/slab.h>
18 #include <linux/crypto.h>
19 #include <linux/workqueue.h>
20 #include <linux/backing-dev.h>
21 #include <asm/atomic.h>
22 #include <linux/scatterlist.h>
23 #include <asm/page.h>
24 #include <asm/unaligned.h>
25 
26 #include <linux/device-mapper.h>
27 
28 #define DM_MSG_PREFIX "crypt"
29 #define MESG_STR(x) x, sizeof(x)
30 
31 /*
32  * context holding the current state of a multi-part conversion
33  */
34 struct convert_context {
35 	struct completion restart;
36 	struct bio *bio_in;
37 	struct bio *bio_out;
38 	unsigned int offset_in;
39 	unsigned int offset_out;
40 	unsigned int idx_in;
41 	unsigned int idx_out;
42 	sector_t sector;
43 	atomic_t pending;
44 };
45 
46 /*
47  * per bio private data
48  */
49 struct dm_crypt_io {
50 	struct dm_target *target;
51 	struct bio *base_bio;
52 	struct work_struct work;
53 
54 	struct convert_context ctx;
55 
56 	atomic_t pending;
57 	int error;
58 	sector_t sector;
59 	struct dm_crypt_io *base_io;
60 };
61 
62 struct dm_crypt_request {
63 	struct scatterlist sg_in;
64 	struct scatterlist sg_out;
65 };
66 
67 struct crypt_config;
68 
69 struct crypt_iv_operations {
70 	int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
71 		   const char *opts);
72 	void (*dtr)(struct crypt_config *cc);
73 	const char *(*status)(struct crypt_config *cc);
74 	int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector);
75 };
76 
77 /*
78  * Crypt: maps a linear range of a block device
79  * and encrypts / decrypts at the same time.
80  */
81 enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
82 struct crypt_config {
83 	struct dm_dev *dev;
84 	sector_t start;
85 
86 	/*
87 	 * pool for per bio private data, crypto requests and
88 	 * encryption requeusts/buffer pages
89 	 */
90 	mempool_t *io_pool;
91 	mempool_t *req_pool;
92 	mempool_t *page_pool;
93 	struct bio_set *bs;
94 
95 	struct workqueue_struct *io_queue;
96 	struct workqueue_struct *crypt_queue;
97 
98 	/*
99 	 * crypto related data
100 	 */
101 	struct crypt_iv_operations *iv_gen_ops;
102 	char *iv_mode;
103 	union {
104 		struct crypto_cipher *essiv_tfm;
105 		int benbi_shift;
106 	} iv_gen_private;
107 	sector_t iv_offset;
108 	unsigned int iv_size;
109 
110 	/*
111 	 * Layout of each crypto request:
112 	 *
113 	 *   struct ablkcipher_request
114 	 *      context
115 	 *      padding
116 	 *   struct dm_crypt_request
117 	 *      padding
118 	 *   IV
119 	 *
120 	 * The padding is added so that dm_crypt_request and the IV are
121 	 * correctly aligned.
122 	 */
123 	unsigned int dmreq_start;
124 	struct ablkcipher_request *req;
125 
126 	char cipher[CRYPTO_MAX_ALG_NAME];
127 	char chainmode[CRYPTO_MAX_ALG_NAME];
128 	struct crypto_ablkcipher *tfm;
129 	unsigned long flags;
130 	unsigned int key_size;
131 	u8 key[0];
132 };
133 
134 #define MIN_IOS        16
135 #define MIN_POOL_PAGES 32
136 #define MIN_BIO_PAGES  8
137 
138 static struct kmem_cache *_crypt_io_pool;
139 
140 static void clone_init(struct dm_crypt_io *, struct bio *);
141 static void kcryptd_queue_crypt(struct dm_crypt_io *io);
142 
143 /*
144  * Different IV generation algorithms:
145  *
146  * plain: the initial vector is the 32-bit little-endian version of the sector
147  *        number, padded with zeros if necessary.
148  *
149  * essiv: "encrypted sector|salt initial vector", the sector number is
150  *        encrypted with the bulk cipher using a salt as key. The salt
151  *        should be derived from the bulk cipher's key via hashing.
152  *
153  * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
154  *        (needed for LRW-32-AES and possible other narrow block modes)
155  *
156  * null: the initial vector is always zero.  Provides compatibility with
157  *       obsolete loop_fish2 devices.  Do not use for new devices.
158  *
159  * plumb: unimplemented, see:
160  * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
161  */
162 
163 static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
164 {
165 	memset(iv, 0, cc->iv_size);
166 	*(u32 *)iv = cpu_to_le32(sector & 0xffffffff);
167 
168 	return 0;
169 }
170 
171 static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
172 			      const char *opts)
173 {
174 	struct crypto_cipher *essiv_tfm;
175 	struct crypto_hash *hash_tfm;
176 	struct hash_desc desc;
177 	struct scatterlist sg;
178 	unsigned int saltsize;
179 	u8 *salt;
180 	int err;
181 
182 	if (opts == NULL) {
183 		ti->error = "Digest algorithm missing for ESSIV mode";
184 		return -EINVAL;
185 	}
186 
187 	/* Hash the cipher key with the given hash algorithm */
188 	hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
189 	if (IS_ERR(hash_tfm)) {
190 		ti->error = "Error initializing ESSIV hash";
191 		return PTR_ERR(hash_tfm);
192 	}
193 
194 	saltsize = crypto_hash_digestsize(hash_tfm);
195 	salt = kmalloc(saltsize, GFP_KERNEL);
196 	if (salt == NULL) {
197 		ti->error = "Error kmallocing salt storage in ESSIV";
198 		crypto_free_hash(hash_tfm);
199 		return -ENOMEM;
200 	}
201 
202 	sg_init_one(&sg, cc->key, cc->key_size);
203 	desc.tfm = hash_tfm;
204 	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
205 	err = crypto_hash_digest(&desc, &sg, cc->key_size, salt);
206 	crypto_free_hash(hash_tfm);
207 
208 	if (err) {
209 		ti->error = "Error calculating hash in ESSIV";
210 		kfree(salt);
211 		return err;
212 	}
213 
214 	/* Setup the essiv_tfm with the given salt */
215 	essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
216 	if (IS_ERR(essiv_tfm)) {
217 		ti->error = "Error allocating crypto tfm for ESSIV";
218 		kfree(salt);
219 		return PTR_ERR(essiv_tfm);
220 	}
221 	if (crypto_cipher_blocksize(essiv_tfm) !=
222 	    crypto_ablkcipher_ivsize(cc->tfm)) {
223 		ti->error = "Block size of ESSIV cipher does "
224 			    "not match IV size of block cipher";
225 		crypto_free_cipher(essiv_tfm);
226 		kfree(salt);
227 		return -EINVAL;
228 	}
229 	err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
230 	if (err) {
231 		ti->error = "Failed to set key for ESSIV cipher";
232 		crypto_free_cipher(essiv_tfm);
233 		kfree(salt);
234 		return err;
235 	}
236 	kfree(salt);
237 
238 	cc->iv_gen_private.essiv_tfm = essiv_tfm;
239 	return 0;
240 }
241 
242 static void crypt_iv_essiv_dtr(struct crypt_config *cc)
243 {
244 	crypto_free_cipher(cc->iv_gen_private.essiv_tfm);
245 	cc->iv_gen_private.essiv_tfm = NULL;
246 }
247 
248 static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
249 {
250 	memset(iv, 0, cc->iv_size);
251 	*(u64 *)iv = cpu_to_le64(sector);
252 	crypto_cipher_encrypt_one(cc->iv_gen_private.essiv_tfm, iv, iv);
253 	return 0;
254 }
255 
256 static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
257 			      const char *opts)
258 {
259 	unsigned bs = crypto_ablkcipher_blocksize(cc->tfm);
260 	int log = ilog2(bs);
261 
262 	/* we need to calculate how far we must shift the sector count
263 	 * to get the cipher block count, we use this shift in _gen */
264 
265 	if (1 << log != bs) {
266 		ti->error = "cypher blocksize is not a power of 2";
267 		return -EINVAL;
268 	}
269 
270 	if (log > 9) {
271 		ti->error = "cypher blocksize is > 512";
272 		return -EINVAL;
273 	}
274 
275 	cc->iv_gen_private.benbi_shift = 9 - log;
276 
277 	return 0;
278 }
279 
280 static void crypt_iv_benbi_dtr(struct crypt_config *cc)
281 {
282 }
283 
284 static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
285 {
286 	__be64 val;
287 
288 	memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
289 
290 	val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi_shift) + 1);
291 	put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
292 
293 	return 0;
294 }
295 
296 static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
297 {
298 	memset(iv, 0, cc->iv_size);
299 
300 	return 0;
301 }
302 
303 static struct crypt_iv_operations crypt_iv_plain_ops = {
304 	.generator = crypt_iv_plain_gen
305 };
306 
307 static struct crypt_iv_operations crypt_iv_essiv_ops = {
308 	.ctr       = crypt_iv_essiv_ctr,
309 	.dtr       = crypt_iv_essiv_dtr,
310 	.generator = crypt_iv_essiv_gen
311 };
312 
313 static struct crypt_iv_operations crypt_iv_benbi_ops = {
314 	.ctr	   = crypt_iv_benbi_ctr,
315 	.dtr	   = crypt_iv_benbi_dtr,
316 	.generator = crypt_iv_benbi_gen
317 };
318 
319 static struct crypt_iv_operations crypt_iv_null_ops = {
320 	.generator = crypt_iv_null_gen
321 };
322 
323 static void crypt_convert_init(struct crypt_config *cc,
324 			       struct convert_context *ctx,
325 			       struct bio *bio_out, struct bio *bio_in,
326 			       sector_t sector)
327 {
328 	ctx->bio_in = bio_in;
329 	ctx->bio_out = bio_out;
330 	ctx->offset_in = 0;
331 	ctx->offset_out = 0;
332 	ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
333 	ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
334 	ctx->sector = sector + cc->iv_offset;
335 	init_completion(&ctx->restart);
336 }
337 
338 static int crypt_convert_block(struct crypt_config *cc,
339 			       struct convert_context *ctx,
340 			       struct ablkcipher_request *req)
341 {
342 	struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
343 	struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
344 	struct dm_crypt_request *dmreq;
345 	u8 *iv;
346 	int r = 0;
347 
348 	dmreq = (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
349 	iv = (u8 *)ALIGN((unsigned long)(dmreq + 1),
350 			 crypto_ablkcipher_alignmask(cc->tfm) + 1);
351 
352 	sg_init_table(&dmreq->sg_in, 1);
353 	sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
354 		    bv_in->bv_offset + ctx->offset_in);
355 
356 	sg_init_table(&dmreq->sg_out, 1);
357 	sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT,
358 		    bv_out->bv_offset + ctx->offset_out);
359 
360 	ctx->offset_in += 1 << SECTOR_SHIFT;
361 	if (ctx->offset_in >= bv_in->bv_len) {
362 		ctx->offset_in = 0;
363 		ctx->idx_in++;
364 	}
365 
366 	ctx->offset_out += 1 << SECTOR_SHIFT;
367 	if (ctx->offset_out >= bv_out->bv_len) {
368 		ctx->offset_out = 0;
369 		ctx->idx_out++;
370 	}
371 
372 	if (cc->iv_gen_ops) {
373 		r = cc->iv_gen_ops->generator(cc, iv, ctx->sector);
374 		if (r < 0)
375 			return r;
376 	}
377 
378 	ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out,
379 				     1 << SECTOR_SHIFT, iv);
380 
381 	if (bio_data_dir(ctx->bio_in) == WRITE)
382 		r = crypto_ablkcipher_encrypt(req);
383 	else
384 		r = crypto_ablkcipher_decrypt(req);
385 
386 	return r;
387 }
388 
389 static void kcryptd_async_done(struct crypto_async_request *async_req,
390 			       int error);
391 static void crypt_alloc_req(struct crypt_config *cc,
392 			    struct convert_context *ctx)
393 {
394 	if (!cc->req)
395 		cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
396 	ablkcipher_request_set_tfm(cc->req, cc->tfm);
397 	ablkcipher_request_set_callback(cc->req, CRYPTO_TFM_REQ_MAY_BACKLOG |
398 					     CRYPTO_TFM_REQ_MAY_SLEEP,
399 					     kcryptd_async_done, ctx);
400 }
401 
402 /*
403  * Encrypt / decrypt data from one bio to another one (can be the same one)
404  */
405 static int crypt_convert(struct crypt_config *cc,
406 			 struct convert_context *ctx)
407 {
408 	int r;
409 
410 	atomic_set(&ctx->pending, 1);
411 
412 	while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
413 	      ctx->idx_out < ctx->bio_out->bi_vcnt) {
414 
415 		crypt_alloc_req(cc, ctx);
416 
417 		atomic_inc(&ctx->pending);
418 
419 		r = crypt_convert_block(cc, ctx, cc->req);
420 
421 		switch (r) {
422 		/* async */
423 		case -EBUSY:
424 			wait_for_completion(&ctx->restart);
425 			INIT_COMPLETION(ctx->restart);
426 			/* fall through*/
427 		case -EINPROGRESS:
428 			cc->req = NULL;
429 			ctx->sector++;
430 			continue;
431 
432 		/* sync */
433 		case 0:
434 			atomic_dec(&ctx->pending);
435 			ctx->sector++;
436 			cond_resched();
437 			continue;
438 
439 		/* error */
440 		default:
441 			atomic_dec(&ctx->pending);
442 			return r;
443 		}
444 	}
445 
446 	return 0;
447 }
448 
449 static void dm_crypt_bio_destructor(struct bio *bio)
450 {
451 	struct dm_crypt_io *io = bio->bi_private;
452 	struct crypt_config *cc = io->target->private;
453 
454 	bio_free(bio, cc->bs);
455 }
456 
457 /*
458  * Generate a new unfragmented bio with the given size
459  * This should never violate the device limitations
460  * May return a smaller bio when running out of pages, indicated by
461  * *out_of_pages set to 1.
462  */
463 static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
464 				      unsigned *out_of_pages)
465 {
466 	struct crypt_config *cc = io->target->private;
467 	struct bio *clone;
468 	unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
469 	gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
470 	unsigned i, len;
471 	struct page *page;
472 
473 	clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
474 	if (!clone)
475 		return NULL;
476 
477 	clone_init(io, clone);
478 	*out_of_pages = 0;
479 
480 	for (i = 0; i < nr_iovecs; i++) {
481 		page = mempool_alloc(cc->page_pool, gfp_mask);
482 		if (!page) {
483 			*out_of_pages = 1;
484 			break;
485 		}
486 
487 		/*
488 		 * if additional pages cannot be allocated without waiting,
489 		 * return a partially allocated bio, the caller will then try
490 		 * to allocate additional bios while submitting this partial bio
491 		 */
492 		if (i == (MIN_BIO_PAGES - 1))
493 			gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
494 
495 		len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
496 
497 		if (!bio_add_page(clone, page, len, 0)) {
498 			mempool_free(page, cc->page_pool);
499 			break;
500 		}
501 
502 		size -= len;
503 	}
504 
505 	if (!clone->bi_size) {
506 		bio_put(clone);
507 		return NULL;
508 	}
509 
510 	return clone;
511 }
512 
513 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
514 {
515 	unsigned int i;
516 	struct bio_vec *bv;
517 
518 	for (i = 0; i < clone->bi_vcnt; i++) {
519 		bv = bio_iovec_idx(clone, i);
520 		BUG_ON(!bv->bv_page);
521 		mempool_free(bv->bv_page, cc->page_pool);
522 		bv->bv_page = NULL;
523 	}
524 }
525 
526 static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti,
527 					  struct bio *bio, sector_t sector)
528 {
529 	struct crypt_config *cc = ti->private;
530 	struct dm_crypt_io *io;
531 
532 	io = mempool_alloc(cc->io_pool, GFP_NOIO);
533 	io->target = ti;
534 	io->base_bio = bio;
535 	io->sector = sector;
536 	io->error = 0;
537 	io->base_io = NULL;
538 	atomic_set(&io->pending, 0);
539 
540 	return io;
541 }
542 
543 static void crypt_inc_pending(struct dm_crypt_io *io)
544 {
545 	atomic_inc(&io->pending);
546 }
547 
548 /*
549  * One of the bios was finished. Check for completion of
550  * the whole request and correctly clean up the buffer.
551  * If base_io is set, wait for the last fragment to complete.
552  */
553 static void crypt_dec_pending(struct dm_crypt_io *io)
554 {
555 	struct crypt_config *cc = io->target->private;
556 
557 	if (!atomic_dec_and_test(&io->pending))
558 		return;
559 
560 	if (likely(!io->base_io))
561 		bio_endio(io->base_bio, io->error);
562 	else {
563 		if (io->error && !io->base_io->error)
564 			io->base_io->error = io->error;
565 		crypt_dec_pending(io->base_io);
566 	}
567 
568 	mempool_free(io, cc->io_pool);
569 }
570 
571 /*
572  * kcryptd/kcryptd_io:
573  *
574  * Needed because it would be very unwise to do decryption in an
575  * interrupt context.
576  *
577  * kcryptd performs the actual encryption or decryption.
578  *
579  * kcryptd_io performs the IO submission.
580  *
581  * They must be separated as otherwise the final stages could be
582  * starved by new requests which can block in the first stages due
583  * to memory allocation.
584  */
585 static void crypt_endio(struct bio *clone, int error)
586 {
587 	struct dm_crypt_io *io = clone->bi_private;
588 	struct crypt_config *cc = io->target->private;
589 	unsigned rw = bio_data_dir(clone);
590 
591 	if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))
592 		error = -EIO;
593 
594 	/*
595 	 * free the processed pages
596 	 */
597 	if (rw == WRITE)
598 		crypt_free_buffer_pages(cc, clone);
599 
600 	bio_put(clone);
601 
602 	if (rw == READ && !error) {
603 		kcryptd_queue_crypt(io);
604 		return;
605 	}
606 
607 	if (unlikely(error))
608 		io->error = error;
609 
610 	crypt_dec_pending(io);
611 }
612 
613 static void clone_init(struct dm_crypt_io *io, struct bio *clone)
614 {
615 	struct crypt_config *cc = io->target->private;
616 
617 	clone->bi_private = io;
618 	clone->bi_end_io  = crypt_endio;
619 	clone->bi_bdev    = cc->dev->bdev;
620 	clone->bi_rw      = io->base_bio->bi_rw;
621 	clone->bi_destructor = dm_crypt_bio_destructor;
622 }
623 
624 static void kcryptd_io_read(struct dm_crypt_io *io)
625 {
626 	struct crypt_config *cc = io->target->private;
627 	struct bio *base_bio = io->base_bio;
628 	struct bio *clone;
629 
630 	crypt_inc_pending(io);
631 
632 	/*
633 	 * The block layer might modify the bvec array, so always
634 	 * copy the required bvecs because we need the original
635 	 * one in order to decrypt the whole bio data *afterwards*.
636 	 */
637 	clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs);
638 	if (unlikely(!clone)) {
639 		io->error = -ENOMEM;
640 		crypt_dec_pending(io);
641 		return;
642 	}
643 
644 	clone_init(io, clone);
645 	clone->bi_idx = 0;
646 	clone->bi_vcnt = bio_segments(base_bio);
647 	clone->bi_size = base_bio->bi_size;
648 	clone->bi_sector = cc->start + io->sector;
649 	memcpy(clone->bi_io_vec, bio_iovec(base_bio),
650 	       sizeof(struct bio_vec) * clone->bi_vcnt);
651 
652 	generic_make_request(clone);
653 }
654 
655 static void kcryptd_io_write(struct dm_crypt_io *io)
656 {
657 	struct bio *clone = io->ctx.bio_out;
658 	generic_make_request(clone);
659 }
660 
661 static void kcryptd_io(struct work_struct *work)
662 {
663 	struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
664 
665 	if (bio_data_dir(io->base_bio) == READ)
666 		kcryptd_io_read(io);
667 	else
668 		kcryptd_io_write(io);
669 }
670 
671 static void kcryptd_queue_io(struct dm_crypt_io *io)
672 {
673 	struct crypt_config *cc = io->target->private;
674 
675 	INIT_WORK(&io->work, kcryptd_io);
676 	queue_work(cc->io_queue, &io->work);
677 }
678 
679 static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io,
680 					  int error, int async)
681 {
682 	struct bio *clone = io->ctx.bio_out;
683 	struct crypt_config *cc = io->target->private;
684 
685 	if (unlikely(error < 0)) {
686 		crypt_free_buffer_pages(cc, clone);
687 		bio_put(clone);
688 		io->error = -EIO;
689 		crypt_dec_pending(io);
690 		return;
691 	}
692 
693 	/* crypt_convert should have filled the clone bio */
694 	BUG_ON(io->ctx.idx_out < clone->bi_vcnt);
695 
696 	clone->bi_sector = cc->start + io->sector;
697 
698 	if (async)
699 		kcryptd_queue_io(io);
700 	else
701 		generic_make_request(clone);
702 }
703 
704 static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
705 {
706 	struct crypt_config *cc = io->target->private;
707 	struct bio *clone;
708 	struct dm_crypt_io *new_io;
709 	int crypt_finished;
710 	unsigned out_of_pages = 0;
711 	unsigned remaining = io->base_bio->bi_size;
712 	sector_t sector = io->sector;
713 	int r;
714 
715 	/*
716 	 * Prevent io from disappearing until this function completes.
717 	 */
718 	crypt_inc_pending(io);
719 	crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
720 
721 	/*
722 	 * The allocated buffers can be smaller than the whole bio,
723 	 * so repeat the whole process until all the data can be handled.
724 	 */
725 	while (remaining) {
726 		clone = crypt_alloc_buffer(io, remaining, &out_of_pages);
727 		if (unlikely(!clone)) {
728 			io->error = -ENOMEM;
729 			break;
730 		}
731 
732 		io->ctx.bio_out = clone;
733 		io->ctx.idx_out = 0;
734 
735 		remaining -= clone->bi_size;
736 		sector += bio_sectors(clone);
737 
738 		crypt_inc_pending(io);
739 		r = crypt_convert(cc, &io->ctx);
740 		crypt_finished = atomic_dec_and_test(&io->ctx.pending);
741 
742 		/* Encryption was already finished, submit io now */
743 		if (crypt_finished) {
744 			kcryptd_crypt_write_io_submit(io, r, 0);
745 
746 			/*
747 			 * If there was an error, do not try next fragments.
748 			 * For async, error is processed in async handler.
749 			 */
750 			if (unlikely(r < 0))
751 				break;
752 
753 			io->sector = sector;
754 		}
755 
756 		/*
757 		 * Out of memory -> run queues
758 		 * But don't wait if split was due to the io size restriction
759 		 */
760 		if (unlikely(out_of_pages))
761 			congestion_wait(WRITE, HZ/100);
762 
763 		/*
764 		 * With async crypto it is unsafe to share the crypto context
765 		 * between fragments, so switch to a new dm_crypt_io structure.
766 		 */
767 		if (unlikely(!crypt_finished && remaining)) {
768 			new_io = crypt_io_alloc(io->target, io->base_bio,
769 						sector);
770 			crypt_inc_pending(new_io);
771 			crypt_convert_init(cc, &new_io->ctx, NULL,
772 					   io->base_bio, sector);
773 			new_io->ctx.idx_in = io->ctx.idx_in;
774 			new_io->ctx.offset_in = io->ctx.offset_in;
775 
776 			/*
777 			 * Fragments after the first use the base_io
778 			 * pending count.
779 			 */
780 			if (!io->base_io)
781 				new_io->base_io = io;
782 			else {
783 				new_io->base_io = io->base_io;
784 				crypt_inc_pending(io->base_io);
785 				crypt_dec_pending(io);
786 			}
787 
788 			io = new_io;
789 		}
790 	}
791 
792 	crypt_dec_pending(io);
793 }
794 
795 static void kcryptd_crypt_read_done(struct dm_crypt_io *io, int error)
796 {
797 	if (unlikely(error < 0))
798 		io->error = -EIO;
799 
800 	crypt_dec_pending(io);
801 }
802 
803 static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
804 {
805 	struct crypt_config *cc = io->target->private;
806 	int r = 0;
807 
808 	crypt_inc_pending(io);
809 
810 	crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
811 			   io->sector);
812 
813 	r = crypt_convert(cc, &io->ctx);
814 
815 	if (atomic_dec_and_test(&io->ctx.pending))
816 		kcryptd_crypt_read_done(io, r);
817 
818 	crypt_dec_pending(io);
819 }
820 
821 static void kcryptd_async_done(struct crypto_async_request *async_req,
822 			       int error)
823 {
824 	struct convert_context *ctx = async_req->data;
825 	struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
826 	struct crypt_config *cc = io->target->private;
827 
828 	if (error == -EINPROGRESS) {
829 		complete(&ctx->restart);
830 		return;
831 	}
832 
833 	mempool_free(ablkcipher_request_cast(async_req), cc->req_pool);
834 
835 	if (!atomic_dec_and_test(&ctx->pending))
836 		return;
837 
838 	if (bio_data_dir(io->base_bio) == READ)
839 		kcryptd_crypt_read_done(io, error);
840 	else
841 		kcryptd_crypt_write_io_submit(io, error, 1);
842 }
843 
844 static void kcryptd_crypt(struct work_struct *work)
845 {
846 	struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
847 
848 	if (bio_data_dir(io->base_bio) == READ)
849 		kcryptd_crypt_read_convert(io);
850 	else
851 		kcryptd_crypt_write_convert(io);
852 }
853 
854 static void kcryptd_queue_crypt(struct dm_crypt_io *io)
855 {
856 	struct crypt_config *cc = io->target->private;
857 
858 	INIT_WORK(&io->work, kcryptd_crypt);
859 	queue_work(cc->crypt_queue, &io->work);
860 }
861 
862 /*
863  * Decode key from its hex representation
864  */
865 static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
866 {
867 	char buffer[3];
868 	char *endp;
869 	unsigned int i;
870 
871 	buffer[2] = '\0';
872 
873 	for (i = 0; i < size; i++) {
874 		buffer[0] = *hex++;
875 		buffer[1] = *hex++;
876 
877 		key[i] = (u8)simple_strtoul(buffer, &endp, 16);
878 
879 		if (endp != &buffer[2])
880 			return -EINVAL;
881 	}
882 
883 	if (*hex != '\0')
884 		return -EINVAL;
885 
886 	return 0;
887 }
888 
889 /*
890  * Encode key into its hex representation
891  */
892 static void crypt_encode_key(char *hex, u8 *key, unsigned int size)
893 {
894 	unsigned int i;
895 
896 	for (i = 0; i < size; i++) {
897 		sprintf(hex, "%02x", *key);
898 		hex += 2;
899 		key++;
900 	}
901 }
902 
903 static int crypt_set_key(struct crypt_config *cc, char *key)
904 {
905 	unsigned key_size = strlen(key) >> 1;
906 
907 	if (cc->key_size && cc->key_size != key_size)
908 		return -EINVAL;
909 
910 	cc->key_size = key_size; /* initial settings */
911 
912 	if ((!key_size && strcmp(key, "-")) ||
913 	   (key_size && crypt_decode_key(cc->key, key, key_size) < 0))
914 		return -EINVAL;
915 
916 	set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
917 
918 	return 0;
919 }
920 
921 static int crypt_wipe_key(struct crypt_config *cc)
922 {
923 	clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
924 	memset(&cc->key, 0, cc->key_size * sizeof(u8));
925 	return 0;
926 }
927 
928 /*
929  * Construct an encryption mapping:
930  * <cipher> <key> <iv_offset> <dev_path> <start>
931  */
932 static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
933 {
934 	struct crypt_config *cc;
935 	struct crypto_ablkcipher *tfm;
936 	char *tmp;
937 	char *cipher;
938 	char *chainmode;
939 	char *ivmode;
940 	char *ivopts;
941 	unsigned int key_size;
942 	unsigned long long tmpll;
943 
944 	if (argc != 5) {
945 		ti->error = "Not enough arguments";
946 		return -EINVAL;
947 	}
948 
949 	tmp = argv[0];
950 	cipher = strsep(&tmp, "-");
951 	chainmode = strsep(&tmp, "-");
952 	ivopts = strsep(&tmp, "-");
953 	ivmode = strsep(&ivopts, ":");
954 
955 	if (tmp)
956 		DMWARN("Unexpected additional cipher options");
957 
958 	key_size = strlen(argv[1]) >> 1;
959 
960  	cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
961 	if (cc == NULL) {
962 		ti->error =
963 			"Cannot allocate transparent encryption context";
964 		return -ENOMEM;
965 	}
966 
967  	if (crypt_set_key(cc, argv[1])) {
968 		ti->error = "Error decoding key";
969 		goto bad_cipher;
970 	}
971 
972 	/* Compatiblity mode for old dm-crypt cipher strings */
973 	if (!chainmode || (strcmp(chainmode, "plain") == 0 && !ivmode)) {
974 		chainmode = "cbc";
975 		ivmode = "plain";
976 	}
977 
978 	if (strcmp(chainmode, "ecb") && !ivmode) {
979 		ti->error = "This chaining mode requires an IV mechanism";
980 		goto bad_cipher;
981 	}
982 
983 	if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)",
984 		     chainmode, cipher) >= CRYPTO_MAX_ALG_NAME) {
985 		ti->error = "Chain mode + cipher name is too long";
986 		goto bad_cipher;
987 	}
988 
989 	tfm = crypto_alloc_ablkcipher(cc->cipher, 0, 0);
990 	if (IS_ERR(tfm)) {
991 		ti->error = "Error allocating crypto tfm";
992 		goto bad_cipher;
993 	}
994 
995 	strcpy(cc->cipher, cipher);
996 	strcpy(cc->chainmode, chainmode);
997 	cc->tfm = tfm;
998 
999 	/*
1000 	 * Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi".
1001 	 * See comments at iv code
1002 	 */
1003 
1004 	if (ivmode == NULL)
1005 		cc->iv_gen_ops = NULL;
1006 	else if (strcmp(ivmode, "plain") == 0)
1007 		cc->iv_gen_ops = &crypt_iv_plain_ops;
1008 	else if (strcmp(ivmode, "essiv") == 0)
1009 		cc->iv_gen_ops = &crypt_iv_essiv_ops;
1010 	else if (strcmp(ivmode, "benbi") == 0)
1011 		cc->iv_gen_ops = &crypt_iv_benbi_ops;
1012 	else if (strcmp(ivmode, "null") == 0)
1013 		cc->iv_gen_ops = &crypt_iv_null_ops;
1014 	else {
1015 		ti->error = "Invalid IV mode";
1016 		goto bad_ivmode;
1017 	}
1018 
1019 	if (cc->iv_gen_ops && cc->iv_gen_ops->ctr &&
1020 	    cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0)
1021 		goto bad_ivmode;
1022 
1023 	cc->iv_size = crypto_ablkcipher_ivsize(tfm);
1024 	if (cc->iv_size)
1025 		/* at least a 64 bit sector number should fit in our buffer */
1026 		cc->iv_size = max(cc->iv_size,
1027 				  (unsigned int)(sizeof(u64) / sizeof(u8)));
1028 	else {
1029 		if (cc->iv_gen_ops) {
1030 			DMWARN("Selected cipher does not support IVs");
1031 			if (cc->iv_gen_ops->dtr)
1032 				cc->iv_gen_ops->dtr(cc);
1033 			cc->iv_gen_ops = NULL;
1034 		}
1035 	}
1036 
1037 	cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
1038 	if (!cc->io_pool) {
1039 		ti->error = "Cannot allocate crypt io mempool";
1040 		goto bad_slab_pool;
1041 	}
1042 
1043 	cc->dmreq_start = sizeof(struct ablkcipher_request);
1044 	cc->dmreq_start += crypto_ablkcipher_reqsize(tfm);
1045 	cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment());
1046 	cc->dmreq_start += crypto_ablkcipher_alignmask(tfm) &
1047 			   ~(crypto_tfm_ctx_alignment() - 1);
1048 
1049 	cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
1050 			sizeof(struct dm_crypt_request) + cc->iv_size);
1051 	if (!cc->req_pool) {
1052 		ti->error = "Cannot allocate crypt request mempool";
1053 		goto bad_req_pool;
1054 	}
1055 	cc->req = NULL;
1056 
1057 	cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
1058 	if (!cc->page_pool) {
1059 		ti->error = "Cannot allocate page mempool";
1060 		goto bad_page_pool;
1061 	}
1062 
1063 	cc->bs = bioset_create(MIN_IOS, MIN_IOS);
1064 	if (!cc->bs) {
1065 		ti->error = "Cannot allocate crypt bioset";
1066 		goto bad_bs;
1067 	}
1068 
1069 	if (crypto_ablkcipher_setkey(tfm, cc->key, key_size) < 0) {
1070 		ti->error = "Error setting key";
1071 		goto bad_device;
1072 	}
1073 
1074 	if (sscanf(argv[2], "%llu", &tmpll) != 1) {
1075 		ti->error = "Invalid iv_offset sector";
1076 		goto bad_device;
1077 	}
1078 	cc->iv_offset = tmpll;
1079 
1080 	if (sscanf(argv[4], "%llu", &tmpll) != 1) {
1081 		ti->error = "Invalid device sector";
1082 		goto bad_device;
1083 	}
1084 	cc->start = tmpll;
1085 
1086 	if (dm_get_device(ti, argv[3], cc->start, ti->len,
1087 			  dm_table_get_mode(ti->table), &cc->dev)) {
1088 		ti->error = "Device lookup failed";
1089 		goto bad_device;
1090 	}
1091 
1092 	if (ivmode && cc->iv_gen_ops) {
1093 		if (ivopts)
1094 			*(ivopts - 1) = ':';
1095 		cc->iv_mode = kmalloc(strlen(ivmode) + 1, GFP_KERNEL);
1096 		if (!cc->iv_mode) {
1097 			ti->error = "Error kmallocing iv_mode string";
1098 			goto bad_ivmode_string;
1099 		}
1100 		strcpy(cc->iv_mode, ivmode);
1101 	} else
1102 		cc->iv_mode = NULL;
1103 
1104 	cc->io_queue = create_singlethread_workqueue("kcryptd_io");
1105 	if (!cc->io_queue) {
1106 		ti->error = "Couldn't create kcryptd io queue";
1107 		goto bad_io_queue;
1108 	}
1109 
1110 	cc->crypt_queue = create_singlethread_workqueue("kcryptd");
1111 	if (!cc->crypt_queue) {
1112 		ti->error = "Couldn't create kcryptd queue";
1113 		goto bad_crypt_queue;
1114 	}
1115 
1116 	ti->private = cc;
1117 	return 0;
1118 
1119 bad_crypt_queue:
1120 	destroy_workqueue(cc->io_queue);
1121 bad_io_queue:
1122 	kfree(cc->iv_mode);
1123 bad_ivmode_string:
1124 	dm_put_device(ti, cc->dev);
1125 bad_device:
1126 	bioset_free(cc->bs);
1127 bad_bs:
1128 	mempool_destroy(cc->page_pool);
1129 bad_page_pool:
1130 	mempool_destroy(cc->req_pool);
1131 bad_req_pool:
1132 	mempool_destroy(cc->io_pool);
1133 bad_slab_pool:
1134 	if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
1135 		cc->iv_gen_ops->dtr(cc);
1136 bad_ivmode:
1137 	crypto_free_ablkcipher(tfm);
1138 bad_cipher:
1139 	/* Must zero key material before freeing */
1140 	memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
1141 	kfree(cc);
1142 	return -EINVAL;
1143 }
1144 
1145 static void crypt_dtr(struct dm_target *ti)
1146 {
1147 	struct crypt_config *cc = (struct crypt_config *) ti->private;
1148 
1149 	destroy_workqueue(cc->io_queue);
1150 	destroy_workqueue(cc->crypt_queue);
1151 
1152 	if (cc->req)
1153 		mempool_free(cc->req, cc->req_pool);
1154 
1155 	bioset_free(cc->bs);
1156 	mempool_destroy(cc->page_pool);
1157 	mempool_destroy(cc->req_pool);
1158 	mempool_destroy(cc->io_pool);
1159 
1160 	kfree(cc->iv_mode);
1161 	if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
1162 		cc->iv_gen_ops->dtr(cc);
1163 	crypto_free_ablkcipher(cc->tfm);
1164 	dm_put_device(ti, cc->dev);
1165 
1166 	/* Must zero key material before freeing */
1167 	memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
1168 	kfree(cc);
1169 }
1170 
1171 static int crypt_map(struct dm_target *ti, struct bio *bio,
1172 		     union map_info *map_context)
1173 {
1174 	struct dm_crypt_io *io;
1175 
1176 	io = crypt_io_alloc(ti, bio, bio->bi_sector - ti->begin);
1177 
1178 	if (bio_data_dir(io->base_bio) == READ)
1179 		kcryptd_queue_io(io);
1180 	else
1181 		kcryptd_queue_crypt(io);
1182 
1183 	return DM_MAPIO_SUBMITTED;
1184 }
1185 
1186 static int crypt_status(struct dm_target *ti, status_type_t type,
1187 			char *result, unsigned int maxlen)
1188 {
1189 	struct crypt_config *cc = (struct crypt_config *) ti->private;
1190 	unsigned int sz = 0;
1191 
1192 	switch (type) {
1193 	case STATUSTYPE_INFO:
1194 		result[0] = '\0';
1195 		break;
1196 
1197 	case STATUSTYPE_TABLE:
1198 		if (cc->iv_mode)
1199 			DMEMIT("%s-%s-%s ", cc->cipher, cc->chainmode,
1200 			       cc->iv_mode);
1201 		else
1202 			DMEMIT("%s-%s ", cc->cipher, cc->chainmode);
1203 
1204 		if (cc->key_size > 0) {
1205 			if ((maxlen - sz) < ((cc->key_size << 1) + 1))
1206 				return -ENOMEM;
1207 
1208 			crypt_encode_key(result + sz, cc->key, cc->key_size);
1209 			sz += cc->key_size << 1;
1210 		} else {
1211 			if (sz >= maxlen)
1212 				return -ENOMEM;
1213 			result[sz++] = '-';
1214 		}
1215 
1216 		DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
1217 				cc->dev->name, (unsigned long long)cc->start);
1218 		break;
1219 	}
1220 	return 0;
1221 }
1222 
1223 static void crypt_postsuspend(struct dm_target *ti)
1224 {
1225 	struct crypt_config *cc = ti->private;
1226 
1227 	set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
1228 }
1229 
1230 static int crypt_preresume(struct dm_target *ti)
1231 {
1232 	struct crypt_config *cc = ti->private;
1233 
1234 	if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
1235 		DMERR("aborting resume - crypt key is not set.");
1236 		return -EAGAIN;
1237 	}
1238 
1239 	return 0;
1240 }
1241 
1242 static void crypt_resume(struct dm_target *ti)
1243 {
1244 	struct crypt_config *cc = ti->private;
1245 
1246 	clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
1247 }
1248 
1249 /* Message interface
1250  *	key set <key>
1251  *	key wipe
1252  */
1253 static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
1254 {
1255 	struct crypt_config *cc = ti->private;
1256 
1257 	if (argc < 2)
1258 		goto error;
1259 
1260 	if (!strnicmp(argv[0], MESG_STR("key"))) {
1261 		if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
1262 			DMWARN("not suspended during key manipulation.");
1263 			return -EINVAL;
1264 		}
1265 		if (argc == 3 && !strnicmp(argv[1], MESG_STR("set")))
1266 			return crypt_set_key(cc, argv[2]);
1267 		if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe")))
1268 			return crypt_wipe_key(cc);
1269 	}
1270 
1271 error:
1272 	DMWARN("unrecognised message received.");
1273 	return -EINVAL;
1274 }
1275 
1276 static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
1277 		       struct bio_vec *biovec, int max_size)
1278 {
1279 	struct crypt_config *cc = ti->private;
1280 	struct request_queue *q = bdev_get_queue(cc->dev->bdev);
1281 
1282 	if (!q->merge_bvec_fn)
1283 		return max_size;
1284 
1285 	bvm->bi_bdev = cc->dev->bdev;
1286 	bvm->bi_sector = cc->start + bvm->bi_sector - ti->begin;
1287 
1288 	return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
1289 }
1290 
1291 static struct target_type crypt_target = {
1292 	.name   = "crypt",
1293 	.version= {1, 6, 0},
1294 	.module = THIS_MODULE,
1295 	.ctr    = crypt_ctr,
1296 	.dtr    = crypt_dtr,
1297 	.map    = crypt_map,
1298 	.status = crypt_status,
1299 	.postsuspend = crypt_postsuspend,
1300 	.preresume = crypt_preresume,
1301 	.resume = crypt_resume,
1302 	.message = crypt_message,
1303 	.merge  = crypt_merge,
1304 };
1305 
1306 static int __init dm_crypt_init(void)
1307 {
1308 	int r;
1309 
1310 	_crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0);
1311 	if (!_crypt_io_pool)
1312 		return -ENOMEM;
1313 
1314 	r = dm_register_target(&crypt_target);
1315 	if (r < 0) {
1316 		DMERR("register failed %d", r);
1317 		kmem_cache_destroy(_crypt_io_pool);
1318 	}
1319 
1320 	return r;
1321 }
1322 
1323 static void __exit dm_crypt_exit(void)
1324 {
1325 	int r = dm_unregister_target(&crypt_target);
1326 
1327 	if (r < 0)
1328 		DMERR("unregister failed %d", r);
1329 
1330 	kmem_cache_destroy(_crypt_io_pool);
1331 }
1332 
1333 module_init(dm_crypt_init);
1334 module_exit(dm_crypt_exit);
1335 
1336 MODULE_AUTHOR("Christophe Saout <christophe@saout.de>");
1337 MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
1338 MODULE_LICENSE("GPL");
1339