xref: /openbmc/linux/drivers/md/dm-crypt.c (revision 6aeadf78)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2003 Jana Saout <jana@saout.de>
4  * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
5  * Copyright (C) 2006-2020 Red Hat, Inc. All rights reserved.
6  * Copyright (C) 2013-2020 Milan Broz <gmazyland@gmail.com>
7  *
8  * This file is released under the GPL.
9  */
10 
11 #include <linux/completion.h>
12 #include <linux/err.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/key.h>
17 #include <linux/bio.h>
18 #include <linux/blkdev.h>
19 #include <linux/blk-integrity.h>
20 #include <linux/mempool.h>
21 #include <linux/slab.h>
22 #include <linux/crypto.h>
23 #include <linux/workqueue.h>
24 #include <linux/kthread.h>
25 #include <linux/backing-dev.h>
26 #include <linux/atomic.h>
27 #include <linux/scatterlist.h>
28 #include <linux/rbtree.h>
29 #include <linux/ctype.h>
30 #include <asm/page.h>
31 #include <asm/unaligned.h>
32 #include <crypto/hash.h>
33 #include <crypto/md5.h>
34 #include <crypto/algapi.h>
35 #include <crypto/skcipher.h>
36 #include <crypto/aead.h>
37 #include <crypto/authenc.h>
38 #include <linux/rtnetlink.h> /* for struct rtattr and RTA macros only */
39 #include <linux/key-type.h>
40 #include <keys/user-type.h>
41 #include <keys/encrypted-type.h>
42 #include <keys/trusted-type.h>
43 
44 #include <linux/device-mapper.h>
45 
46 #include "dm-audit.h"
47 
48 #define DM_MSG_PREFIX "crypt"
49 
50 /*
51  * context holding the current state of a multi-part conversion
52  */
53 struct convert_context {
54 	struct completion restart;
55 	struct bio *bio_in;
56 	struct bio *bio_out;
57 	struct bvec_iter iter_in;
58 	struct bvec_iter iter_out;
59 	u64 cc_sector;
60 	atomic_t cc_pending;
61 	union {
62 		struct skcipher_request *req;
63 		struct aead_request *req_aead;
64 	} r;
65 
66 };
67 
68 /*
69  * per bio private data
70  */
71 struct dm_crypt_io {
72 	struct crypt_config *cc;
73 	struct bio *base_bio;
74 	u8 *integrity_metadata;
75 	bool integrity_metadata_from_pool:1;
76 	bool in_tasklet:1;
77 
78 	struct work_struct work;
79 	struct tasklet_struct tasklet;
80 
81 	struct convert_context ctx;
82 
83 	atomic_t io_pending;
84 	blk_status_t error;
85 	sector_t sector;
86 
87 	struct rb_node rb_node;
88 } CRYPTO_MINALIGN_ATTR;
89 
90 struct dm_crypt_request {
91 	struct convert_context *ctx;
92 	struct scatterlist sg_in[4];
93 	struct scatterlist sg_out[4];
94 	u64 iv_sector;
95 };
96 
97 struct crypt_config;
98 
99 struct crypt_iv_operations {
100 	int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
101 		   const char *opts);
102 	void (*dtr)(struct crypt_config *cc);
103 	int (*init)(struct crypt_config *cc);
104 	int (*wipe)(struct crypt_config *cc);
105 	int (*generator)(struct crypt_config *cc, u8 *iv,
106 			 struct dm_crypt_request *dmreq);
107 	int (*post)(struct crypt_config *cc, u8 *iv,
108 		    struct dm_crypt_request *dmreq);
109 };
110 
111 struct iv_benbi_private {
112 	int shift;
113 };
114 
115 #define LMK_SEED_SIZE 64 /* hash + 0 */
116 struct iv_lmk_private {
117 	struct crypto_shash *hash_tfm;
118 	u8 *seed;
119 };
120 
121 #define TCW_WHITENING_SIZE 16
122 struct iv_tcw_private {
123 	struct crypto_shash *crc32_tfm;
124 	u8 *iv_seed;
125 	u8 *whitening;
126 };
127 
128 #define ELEPHANT_MAX_KEY_SIZE 32
129 struct iv_elephant_private {
130 	struct crypto_skcipher *tfm;
131 };
132 
133 /*
134  * Crypt: maps a linear range of a block device
135  * and encrypts / decrypts at the same time.
136  */
137 enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
138 	     DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD,
139 	     DM_CRYPT_NO_READ_WORKQUEUE, DM_CRYPT_NO_WRITE_WORKQUEUE,
140 	     DM_CRYPT_WRITE_INLINE };
141 
142 enum cipher_flags {
143 	CRYPT_MODE_INTEGRITY_AEAD,	/* Use authenticated mode for cipher */
144 	CRYPT_IV_LARGE_SECTORS,		/* Calculate IV from sector_size, not 512B sectors */
145 	CRYPT_ENCRYPT_PREPROCESS,	/* Must preprocess data for encryption (elephant) */
146 };
147 
148 /*
149  * The fields in here must be read only after initialization.
150  */
151 struct crypt_config {
152 	struct dm_dev *dev;
153 	sector_t start;
154 
155 	struct percpu_counter n_allocated_pages;
156 
157 	struct workqueue_struct *io_queue;
158 	struct workqueue_struct *crypt_queue;
159 
160 	spinlock_t write_thread_lock;
161 	struct task_struct *write_thread;
162 	struct rb_root write_tree;
163 
164 	char *cipher_string;
165 	char *cipher_auth;
166 	char *key_string;
167 
168 	const struct crypt_iv_operations *iv_gen_ops;
169 	union {
170 		struct iv_benbi_private benbi;
171 		struct iv_lmk_private lmk;
172 		struct iv_tcw_private tcw;
173 		struct iv_elephant_private elephant;
174 	} iv_gen_private;
175 	u64 iv_offset;
176 	unsigned int iv_size;
177 	unsigned short sector_size;
178 	unsigned char sector_shift;
179 
180 	union {
181 		struct crypto_skcipher **tfms;
182 		struct crypto_aead **tfms_aead;
183 	} cipher_tfm;
184 	unsigned int tfms_count;
185 	unsigned long cipher_flags;
186 
187 	/*
188 	 * Layout of each crypto request:
189 	 *
190 	 *   struct skcipher_request
191 	 *      context
192 	 *      padding
193 	 *   struct dm_crypt_request
194 	 *      padding
195 	 *   IV
196 	 *
197 	 * The padding is added so that dm_crypt_request and the IV are
198 	 * correctly aligned.
199 	 */
200 	unsigned int dmreq_start;
201 
202 	unsigned int per_bio_data_size;
203 
204 	unsigned long flags;
205 	unsigned int key_size;
206 	unsigned int key_parts;      /* independent parts in key buffer */
207 	unsigned int key_extra_size; /* additional keys length */
208 	unsigned int key_mac_size;   /* MAC key size for authenc(...) */
209 
210 	unsigned int integrity_tag_size;
211 	unsigned int integrity_iv_size;
212 	unsigned int on_disk_tag_size;
213 
214 	/*
215 	 * pool for per bio private data, crypto requests,
216 	 * encryption requeusts/buffer pages and integrity tags
217 	 */
218 	unsigned int tag_pool_max_sectors;
219 	mempool_t tag_pool;
220 	mempool_t req_pool;
221 	mempool_t page_pool;
222 
223 	struct bio_set bs;
224 	struct mutex bio_alloc_lock;
225 
226 	u8 *authenc_key; /* space for keys in authenc() format (if used) */
227 	u8 key[];
228 };
229 
230 #define MIN_IOS		64
231 #define MAX_TAG_SIZE	480
232 #define POOL_ENTRY_SIZE	512
233 
234 static DEFINE_SPINLOCK(dm_crypt_clients_lock);
235 static unsigned int dm_crypt_clients_n;
236 static volatile unsigned long dm_crypt_pages_per_client;
237 #define DM_CRYPT_MEMORY_PERCENT			2
238 #define DM_CRYPT_MIN_PAGES_PER_CLIENT		(BIO_MAX_VECS * 16)
239 
240 static void crypt_endio(struct bio *clone);
241 static void kcryptd_queue_crypt(struct dm_crypt_io *io);
242 static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
243 					     struct scatterlist *sg);
244 
245 static bool crypt_integrity_aead(struct crypt_config *cc);
246 
247 /*
248  * Use this to access cipher attributes that are independent of the key.
249  */
250 static struct crypto_skcipher *any_tfm(struct crypt_config *cc)
251 {
252 	return cc->cipher_tfm.tfms[0];
253 }
254 
255 static struct crypto_aead *any_tfm_aead(struct crypt_config *cc)
256 {
257 	return cc->cipher_tfm.tfms_aead[0];
258 }
259 
260 /*
261  * Different IV generation algorithms:
262  *
263  * plain: the initial vector is the 32-bit little-endian version of the sector
264  *        number, padded with zeros if necessary.
265  *
266  * plain64: the initial vector is the 64-bit little-endian version of the sector
267  *        number, padded with zeros if necessary.
268  *
269  * plain64be: the initial vector is the 64-bit big-endian version of the sector
270  *        number, padded with zeros if necessary.
271  *
272  * essiv: "encrypted sector|salt initial vector", the sector number is
273  *        encrypted with the bulk cipher using a salt as key. The salt
274  *        should be derived from the bulk cipher's key via hashing.
275  *
276  * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
277  *        (needed for LRW-32-AES and possible other narrow block modes)
278  *
279  * null: the initial vector is always zero.  Provides compatibility with
280  *       obsolete loop_fish2 devices.  Do not use for new devices.
281  *
282  * lmk:  Compatible implementation of the block chaining mode used
283  *       by the Loop-AES block device encryption system
284  *       designed by Jari Ruusu. See http://loop-aes.sourceforge.net/
285  *       It operates on full 512 byte sectors and uses CBC
286  *       with an IV derived from the sector number, the data and
287  *       optionally extra IV seed.
288  *       This means that after decryption the first block
289  *       of sector must be tweaked according to decrypted data.
290  *       Loop-AES can use three encryption schemes:
291  *         version 1: is plain aes-cbc mode
292  *         version 2: uses 64 multikey scheme with lmk IV generator
293  *         version 3: the same as version 2 with additional IV seed
294  *                   (it uses 65 keys, last key is used as IV seed)
295  *
296  * tcw:  Compatible implementation of the block chaining mode used
297  *       by the TrueCrypt device encryption system (prior to version 4.1).
298  *       For more info see: https://gitlab.com/cryptsetup/cryptsetup/wikis/TrueCryptOnDiskFormat
299  *       It operates on full 512 byte sectors and uses CBC
300  *       with an IV derived from initial key and the sector number.
301  *       In addition, whitening value is applied on every sector, whitening
302  *       is calculated from initial key, sector number and mixed using CRC32.
303  *       Note that this encryption scheme is vulnerable to watermarking attacks
304  *       and should be used for old compatible containers access only.
305  *
306  * eboiv: Encrypted byte-offset IV (used in Bitlocker in CBC mode)
307  *        The IV is encrypted little-endian byte-offset (with the same key
308  *        and cipher as the volume).
309  *
310  * elephant: The extended version of eboiv with additional Elephant diffuser
311  *           used with Bitlocker CBC mode.
312  *           This mode was used in older Windows systems
313  *           https://download.microsoft.com/download/0/2/3/0238acaf-d3bf-4a6d-b3d6-0a0be4bbb36e/bitlockercipher200608.pdf
314  */
315 
316 static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
317 			      struct dm_crypt_request *dmreq)
318 {
319 	memset(iv, 0, cc->iv_size);
320 	*(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
321 
322 	return 0;
323 }
324 
325 static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
326 				struct dm_crypt_request *dmreq)
327 {
328 	memset(iv, 0, cc->iv_size);
329 	*(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
330 
331 	return 0;
332 }
333 
334 static int crypt_iv_plain64be_gen(struct crypt_config *cc, u8 *iv,
335 				  struct dm_crypt_request *dmreq)
336 {
337 	memset(iv, 0, cc->iv_size);
338 	/* iv_size is at least of size u64; usually it is 16 bytes */
339 	*(__be64 *)&iv[cc->iv_size - sizeof(u64)] = cpu_to_be64(dmreq->iv_sector);
340 
341 	return 0;
342 }
343 
344 static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
345 			      struct dm_crypt_request *dmreq)
346 {
347 	/*
348 	 * ESSIV encryption of the IV is now handled by the crypto API,
349 	 * so just pass the plain sector number here.
350 	 */
351 	memset(iv, 0, cc->iv_size);
352 	*(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
353 
354 	return 0;
355 }
356 
357 static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
358 			      const char *opts)
359 {
360 	unsigned int bs;
361 	int log;
362 
363 	if (crypt_integrity_aead(cc))
364 		bs = crypto_aead_blocksize(any_tfm_aead(cc));
365 	else
366 		bs = crypto_skcipher_blocksize(any_tfm(cc));
367 	log = ilog2(bs);
368 
369 	/*
370 	 * We need to calculate how far we must shift the sector count
371 	 * to get the cipher block count, we use this shift in _gen.
372 	 */
373 	if (1 << log != bs) {
374 		ti->error = "cypher blocksize is not a power of 2";
375 		return -EINVAL;
376 	}
377 
378 	if (log > 9) {
379 		ti->error = "cypher blocksize is > 512";
380 		return -EINVAL;
381 	}
382 
383 	cc->iv_gen_private.benbi.shift = 9 - log;
384 
385 	return 0;
386 }
387 
388 static void crypt_iv_benbi_dtr(struct crypt_config *cc)
389 {
390 }
391 
392 static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
393 			      struct dm_crypt_request *dmreq)
394 {
395 	__be64 val;
396 
397 	memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
398 
399 	val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
400 	put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
401 
402 	return 0;
403 }
404 
405 static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
406 			     struct dm_crypt_request *dmreq)
407 {
408 	memset(iv, 0, cc->iv_size);
409 
410 	return 0;
411 }
412 
413 static void crypt_iv_lmk_dtr(struct crypt_config *cc)
414 {
415 	struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
416 
417 	if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm))
418 		crypto_free_shash(lmk->hash_tfm);
419 	lmk->hash_tfm = NULL;
420 
421 	kfree_sensitive(lmk->seed);
422 	lmk->seed = NULL;
423 }
424 
425 static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
426 			    const char *opts)
427 {
428 	struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
429 
430 	if (cc->sector_size != (1 << SECTOR_SHIFT)) {
431 		ti->error = "Unsupported sector size for LMK";
432 		return -EINVAL;
433 	}
434 
435 	lmk->hash_tfm = crypto_alloc_shash("md5", 0,
436 					   CRYPTO_ALG_ALLOCATES_MEMORY);
437 	if (IS_ERR(lmk->hash_tfm)) {
438 		ti->error = "Error initializing LMK hash";
439 		return PTR_ERR(lmk->hash_tfm);
440 	}
441 
442 	/* No seed in LMK version 2 */
443 	if (cc->key_parts == cc->tfms_count) {
444 		lmk->seed = NULL;
445 		return 0;
446 	}
447 
448 	lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL);
449 	if (!lmk->seed) {
450 		crypt_iv_lmk_dtr(cc);
451 		ti->error = "Error kmallocing seed storage in LMK";
452 		return -ENOMEM;
453 	}
454 
455 	return 0;
456 }
457 
458 static int crypt_iv_lmk_init(struct crypt_config *cc)
459 {
460 	struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
461 	int subkey_size = cc->key_size / cc->key_parts;
462 
463 	/* LMK seed is on the position of LMK_KEYS + 1 key */
464 	if (lmk->seed)
465 		memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size),
466 		       crypto_shash_digestsize(lmk->hash_tfm));
467 
468 	return 0;
469 }
470 
471 static int crypt_iv_lmk_wipe(struct crypt_config *cc)
472 {
473 	struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
474 
475 	if (lmk->seed)
476 		memset(lmk->seed, 0, LMK_SEED_SIZE);
477 
478 	return 0;
479 }
480 
481 static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
482 			    struct dm_crypt_request *dmreq,
483 			    u8 *data)
484 {
485 	struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
486 	SHASH_DESC_ON_STACK(desc, lmk->hash_tfm);
487 	struct md5_state md5state;
488 	__le32 buf[4];
489 	int i, r;
490 
491 	desc->tfm = lmk->hash_tfm;
492 
493 	r = crypto_shash_init(desc);
494 	if (r)
495 		return r;
496 
497 	if (lmk->seed) {
498 		r = crypto_shash_update(desc, lmk->seed, LMK_SEED_SIZE);
499 		if (r)
500 			return r;
501 	}
502 
503 	/* Sector is always 512B, block size 16, add data of blocks 1-31 */
504 	r = crypto_shash_update(desc, data + 16, 16 * 31);
505 	if (r)
506 		return r;
507 
508 	/* Sector is cropped to 56 bits here */
509 	buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF);
510 	buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
511 	buf[2] = cpu_to_le32(4024);
512 	buf[3] = 0;
513 	r = crypto_shash_update(desc, (u8 *)buf, sizeof(buf));
514 	if (r)
515 		return r;
516 
517 	/* No MD5 padding here */
518 	r = crypto_shash_export(desc, &md5state);
519 	if (r)
520 		return r;
521 
522 	for (i = 0; i < MD5_HASH_WORDS; i++)
523 		__cpu_to_le32s(&md5state.hash[i]);
524 	memcpy(iv, &md5state.hash, cc->iv_size);
525 
526 	return 0;
527 }
528 
529 static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
530 			    struct dm_crypt_request *dmreq)
531 {
532 	struct scatterlist *sg;
533 	u8 *src;
534 	int r = 0;
535 
536 	if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
537 		sg = crypt_get_sg_data(cc, dmreq->sg_in);
538 		src = kmap_local_page(sg_page(sg));
539 		r = crypt_iv_lmk_one(cc, iv, dmreq, src + sg->offset);
540 		kunmap_local(src);
541 	} else
542 		memset(iv, 0, cc->iv_size);
543 
544 	return r;
545 }
546 
547 static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
548 			     struct dm_crypt_request *dmreq)
549 {
550 	struct scatterlist *sg;
551 	u8 *dst;
552 	int r;
553 
554 	if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
555 		return 0;
556 
557 	sg = crypt_get_sg_data(cc, dmreq->sg_out);
558 	dst = kmap_local_page(sg_page(sg));
559 	r = crypt_iv_lmk_one(cc, iv, dmreq, dst + sg->offset);
560 
561 	/* Tweak the first block of plaintext sector */
562 	if (!r)
563 		crypto_xor(dst + sg->offset, iv, cc->iv_size);
564 
565 	kunmap_local(dst);
566 	return r;
567 }
568 
569 static void crypt_iv_tcw_dtr(struct crypt_config *cc)
570 {
571 	struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
572 
573 	kfree_sensitive(tcw->iv_seed);
574 	tcw->iv_seed = NULL;
575 	kfree_sensitive(tcw->whitening);
576 	tcw->whitening = NULL;
577 
578 	if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm))
579 		crypto_free_shash(tcw->crc32_tfm);
580 	tcw->crc32_tfm = NULL;
581 }
582 
583 static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
584 			    const char *opts)
585 {
586 	struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
587 
588 	if (cc->sector_size != (1 << SECTOR_SHIFT)) {
589 		ti->error = "Unsupported sector size for TCW";
590 		return -EINVAL;
591 	}
592 
593 	if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) {
594 		ti->error = "Wrong key size for TCW";
595 		return -EINVAL;
596 	}
597 
598 	tcw->crc32_tfm = crypto_alloc_shash("crc32", 0,
599 					    CRYPTO_ALG_ALLOCATES_MEMORY);
600 	if (IS_ERR(tcw->crc32_tfm)) {
601 		ti->error = "Error initializing CRC32 in TCW";
602 		return PTR_ERR(tcw->crc32_tfm);
603 	}
604 
605 	tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL);
606 	tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL);
607 	if (!tcw->iv_seed || !tcw->whitening) {
608 		crypt_iv_tcw_dtr(cc);
609 		ti->error = "Error allocating seed storage in TCW";
610 		return -ENOMEM;
611 	}
612 
613 	return 0;
614 }
615 
616 static int crypt_iv_tcw_init(struct crypt_config *cc)
617 {
618 	struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
619 	int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE;
620 
621 	memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size);
622 	memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size],
623 	       TCW_WHITENING_SIZE);
624 
625 	return 0;
626 }
627 
628 static int crypt_iv_tcw_wipe(struct crypt_config *cc)
629 {
630 	struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
631 
632 	memset(tcw->iv_seed, 0, cc->iv_size);
633 	memset(tcw->whitening, 0, TCW_WHITENING_SIZE);
634 
635 	return 0;
636 }
637 
638 static int crypt_iv_tcw_whitening(struct crypt_config *cc,
639 				  struct dm_crypt_request *dmreq,
640 				  u8 *data)
641 {
642 	struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
643 	__le64 sector = cpu_to_le64(dmreq->iv_sector);
644 	u8 buf[TCW_WHITENING_SIZE];
645 	SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm);
646 	int i, r;
647 
648 	/* xor whitening with sector number */
649 	crypto_xor_cpy(buf, tcw->whitening, (u8 *)&sector, 8);
650 	crypto_xor_cpy(&buf[8], tcw->whitening + 8, (u8 *)&sector, 8);
651 
652 	/* calculate crc32 for every 32bit part and xor it */
653 	desc->tfm = tcw->crc32_tfm;
654 	for (i = 0; i < 4; i++) {
655 		r = crypto_shash_init(desc);
656 		if (r)
657 			goto out;
658 		r = crypto_shash_update(desc, &buf[i * 4], 4);
659 		if (r)
660 			goto out;
661 		r = crypto_shash_final(desc, &buf[i * 4]);
662 		if (r)
663 			goto out;
664 	}
665 	crypto_xor(&buf[0], &buf[12], 4);
666 	crypto_xor(&buf[4], &buf[8], 4);
667 
668 	/* apply whitening (8 bytes) to whole sector */
669 	for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++)
670 		crypto_xor(data + i * 8, buf, 8);
671 out:
672 	memzero_explicit(buf, sizeof(buf));
673 	return r;
674 }
675 
676 static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
677 			    struct dm_crypt_request *dmreq)
678 {
679 	struct scatterlist *sg;
680 	struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
681 	__le64 sector = cpu_to_le64(dmreq->iv_sector);
682 	u8 *src;
683 	int r = 0;
684 
685 	/* Remove whitening from ciphertext */
686 	if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
687 		sg = crypt_get_sg_data(cc, dmreq->sg_in);
688 		src = kmap_local_page(sg_page(sg));
689 		r = crypt_iv_tcw_whitening(cc, dmreq, src + sg->offset);
690 		kunmap_local(src);
691 	}
692 
693 	/* Calculate IV */
694 	crypto_xor_cpy(iv, tcw->iv_seed, (u8 *)&sector, 8);
695 	if (cc->iv_size > 8)
696 		crypto_xor_cpy(&iv[8], tcw->iv_seed + 8, (u8 *)&sector,
697 			       cc->iv_size - 8);
698 
699 	return r;
700 }
701 
702 static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
703 			     struct dm_crypt_request *dmreq)
704 {
705 	struct scatterlist *sg;
706 	u8 *dst;
707 	int r;
708 
709 	if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
710 		return 0;
711 
712 	/* Apply whitening on ciphertext */
713 	sg = crypt_get_sg_data(cc, dmreq->sg_out);
714 	dst = kmap_local_page(sg_page(sg));
715 	r = crypt_iv_tcw_whitening(cc, dmreq, dst + sg->offset);
716 	kunmap_local(dst);
717 
718 	return r;
719 }
720 
721 static int crypt_iv_random_gen(struct crypt_config *cc, u8 *iv,
722 				struct dm_crypt_request *dmreq)
723 {
724 	/* Used only for writes, there must be an additional space to store IV */
725 	get_random_bytes(iv, cc->iv_size);
726 	return 0;
727 }
728 
729 static int crypt_iv_eboiv_ctr(struct crypt_config *cc, struct dm_target *ti,
730 			    const char *opts)
731 {
732 	if (crypt_integrity_aead(cc)) {
733 		ti->error = "AEAD transforms not supported for EBOIV";
734 		return -EINVAL;
735 	}
736 
737 	if (crypto_skcipher_blocksize(any_tfm(cc)) != cc->iv_size) {
738 		ti->error = "Block size of EBOIV cipher does not match IV size of block cipher";
739 		return -EINVAL;
740 	}
741 
742 	return 0;
743 }
744 
745 static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv,
746 			    struct dm_crypt_request *dmreq)
747 {
748 	u8 buf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(__le64));
749 	struct skcipher_request *req;
750 	struct scatterlist src, dst;
751 	DECLARE_CRYPTO_WAIT(wait);
752 	int err;
753 
754 	req = skcipher_request_alloc(any_tfm(cc), GFP_NOIO);
755 	if (!req)
756 		return -ENOMEM;
757 
758 	memset(buf, 0, cc->iv_size);
759 	*(__le64 *)buf = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
760 
761 	sg_init_one(&src, page_address(ZERO_PAGE(0)), cc->iv_size);
762 	sg_init_one(&dst, iv, cc->iv_size);
763 	skcipher_request_set_crypt(req, &src, &dst, cc->iv_size, buf);
764 	skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
765 	err = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
766 	skcipher_request_free(req);
767 
768 	return err;
769 }
770 
771 static void crypt_iv_elephant_dtr(struct crypt_config *cc)
772 {
773 	struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
774 
775 	crypto_free_skcipher(elephant->tfm);
776 	elephant->tfm = NULL;
777 }
778 
779 static int crypt_iv_elephant_ctr(struct crypt_config *cc, struct dm_target *ti,
780 			    const char *opts)
781 {
782 	struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
783 	int r;
784 
785 	elephant->tfm = crypto_alloc_skcipher("ecb(aes)", 0,
786 					      CRYPTO_ALG_ALLOCATES_MEMORY);
787 	if (IS_ERR(elephant->tfm)) {
788 		r = PTR_ERR(elephant->tfm);
789 		elephant->tfm = NULL;
790 		return r;
791 	}
792 
793 	r = crypt_iv_eboiv_ctr(cc, ti, NULL);
794 	if (r)
795 		crypt_iv_elephant_dtr(cc);
796 	return r;
797 }
798 
799 static void diffuser_disk_to_cpu(u32 *d, size_t n)
800 {
801 #ifndef __LITTLE_ENDIAN
802 	int i;
803 
804 	for (i = 0; i < n; i++)
805 		d[i] = le32_to_cpu((__le32)d[i]);
806 #endif
807 }
808 
809 static void diffuser_cpu_to_disk(__le32 *d, size_t n)
810 {
811 #ifndef __LITTLE_ENDIAN
812 	int i;
813 
814 	for (i = 0; i < n; i++)
815 		d[i] = cpu_to_le32((u32)d[i]);
816 #endif
817 }
818 
819 static void diffuser_a_decrypt(u32 *d, size_t n)
820 {
821 	int i, i1, i2, i3;
822 
823 	for (i = 0; i < 5; i++) {
824 		i1 = 0;
825 		i2 = n - 2;
826 		i3 = n - 5;
827 
828 		while (i1 < (n - 1)) {
829 			d[i1] += d[i2] ^ (d[i3] << 9 | d[i3] >> 23);
830 			i1++; i2++; i3++;
831 
832 			if (i3 >= n)
833 				i3 -= n;
834 
835 			d[i1] += d[i2] ^ d[i3];
836 			i1++; i2++; i3++;
837 
838 			if (i2 >= n)
839 				i2 -= n;
840 
841 			d[i1] += d[i2] ^ (d[i3] << 13 | d[i3] >> 19);
842 			i1++; i2++; i3++;
843 
844 			d[i1] += d[i2] ^ d[i3];
845 			i1++; i2++; i3++;
846 		}
847 	}
848 }
849 
850 static void diffuser_a_encrypt(u32 *d, size_t n)
851 {
852 	int i, i1, i2, i3;
853 
854 	for (i = 0; i < 5; i++) {
855 		i1 = n - 1;
856 		i2 = n - 2 - 1;
857 		i3 = n - 5 - 1;
858 
859 		while (i1 > 0) {
860 			d[i1] -= d[i2] ^ d[i3];
861 			i1--; i2--; i3--;
862 
863 			d[i1] -= d[i2] ^ (d[i3] << 13 | d[i3] >> 19);
864 			i1--; i2--; i3--;
865 
866 			if (i2 < 0)
867 				i2 += n;
868 
869 			d[i1] -= d[i2] ^ d[i3];
870 			i1--; i2--; i3--;
871 
872 			if (i3 < 0)
873 				i3 += n;
874 
875 			d[i1] -= d[i2] ^ (d[i3] << 9 | d[i3] >> 23);
876 			i1--; i2--; i3--;
877 		}
878 	}
879 }
880 
881 static void diffuser_b_decrypt(u32 *d, size_t n)
882 {
883 	int i, i1, i2, i3;
884 
885 	for (i = 0; i < 3; i++) {
886 		i1 = 0;
887 		i2 = 2;
888 		i3 = 5;
889 
890 		while (i1 < (n - 1)) {
891 			d[i1] += d[i2] ^ d[i3];
892 			i1++; i2++; i3++;
893 
894 			d[i1] += d[i2] ^ (d[i3] << 10 | d[i3] >> 22);
895 			i1++; i2++; i3++;
896 
897 			if (i2 >= n)
898 				i2 -= n;
899 
900 			d[i1] += d[i2] ^ d[i3];
901 			i1++; i2++; i3++;
902 
903 			if (i3 >= n)
904 				i3 -= n;
905 
906 			d[i1] += d[i2] ^ (d[i3] << 25 | d[i3] >> 7);
907 			i1++; i2++; i3++;
908 		}
909 	}
910 }
911 
912 static void diffuser_b_encrypt(u32 *d, size_t n)
913 {
914 	int i, i1, i2, i3;
915 
916 	for (i = 0; i < 3; i++) {
917 		i1 = n - 1;
918 		i2 = 2 - 1;
919 		i3 = 5 - 1;
920 
921 		while (i1 > 0) {
922 			d[i1] -= d[i2] ^ (d[i3] << 25 | d[i3] >> 7);
923 			i1--; i2--; i3--;
924 
925 			if (i3 < 0)
926 				i3 += n;
927 
928 			d[i1] -= d[i2] ^ d[i3];
929 			i1--; i2--; i3--;
930 
931 			if (i2 < 0)
932 				i2 += n;
933 
934 			d[i1] -= d[i2] ^ (d[i3] << 10 | d[i3] >> 22);
935 			i1--; i2--; i3--;
936 
937 			d[i1] -= d[i2] ^ d[i3];
938 			i1--; i2--; i3--;
939 		}
940 	}
941 }
942 
943 static int crypt_iv_elephant(struct crypt_config *cc, struct dm_crypt_request *dmreq)
944 {
945 	struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
946 	u8 *es, *ks, *data, *data2, *data_offset;
947 	struct skcipher_request *req;
948 	struct scatterlist *sg, *sg2, src, dst;
949 	DECLARE_CRYPTO_WAIT(wait);
950 	int i, r;
951 
952 	req = skcipher_request_alloc(elephant->tfm, GFP_NOIO);
953 	es = kzalloc(16, GFP_NOIO); /* Key for AES */
954 	ks = kzalloc(32, GFP_NOIO); /* Elephant sector key */
955 
956 	if (!req || !es || !ks) {
957 		r = -ENOMEM;
958 		goto out;
959 	}
960 
961 	*(__le64 *)es = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
962 
963 	/* E(Ks, e(s)) */
964 	sg_init_one(&src, es, 16);
965 	sg_init_one(&dst, ks, 16);
966 	skcipher_request_set_crypt(req, &src, &dst, 16, NULL);
967 	skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
968 	r = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
969 	if (r)
970 		goto out;
971 
972 	/* E(Ks, e'(s)) */
973 	es[15] = 0x80;
974 	sg_init_one(&dst, &ks[16], 16);
975 	r = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
976 	if (r)
977 		goto out;
978 
979 	sg = crypt_get_sg_data(cc, dmreq->sg_out);
980 	data = kmap_local_page(sg_page(sg));
981 	data_offset = data + sg->offset;
982 
983 	/* Cannot modify original bio, copy to sg_out and apply Elephant to it */
984 	if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
985 		sg2 = crypt_get_sg_data(cc, dmreq->sg_in);
986 		data2 = kmap_local_page(sg_page(sg2));
987 		memcpy(data_offset, data2 + sg2->offset, cc->sector_size);
988 		kunmap_local(data2);
989 	}
990 
991 	if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
992 		diffuser_disk_to_cpu((u32 *)data_offset, cc->sector_size / sizeof(u32));
993 		diffuser_b_decrypt((u32 *)data_offset, cc->sector_size / sizeof(u32));
994 		diffuser_a_decrypt((u32 *)data_offset, cc->sector_size / sizeof(u32));
995 		diffuser_cpu_to_disk((__le32 *)data_offset, cc->sector_size / sizeof(u32));
996 	}
997 
998 	for (i = 0; i < (cc->sector_size / 32); i++)
999 		crypto_xor(data_offset + i * 32, ks, 32);
1000 
1001 	if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
1002 		diffuser_disk_to_cpu((u32 *)data_offset, cc->sector_size / sizeof(u32));
1003 		diffuser_a_encrypt((u32 *)data_offset, cc->sector_size / sizeof(u32));
1004 		diffuser_b_encrypt((u32 *)data_offset, cc->sector_size / sizeof(u32));
1005 		diffuser_cpu_to_disk((__le32 *)data_offset, cc->sector_size / sizeof(u32));
1006 	}
1007 
1008 	kunmap_local(data);
1009 out:
1010 	kfree_sensitive(ks);
1011 	kfree_sensitive(es);
1012 	skcipher_request_free(req);
1013 	return r;
1014 }
1015 
1016 static int crypt_iv_elephant_gen(struct crypt_config *cc, u8 *iv,
1017 			    struct dm_crypt_request *dmreq)
1018 {
1019 	int r;
1020 
1021 	if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
1022 		r = crypt_iv_elephant(cc, dmreq);
1023 		if (r)
1024 			return r;
1025 	}
1026 
1027 	return crypt_iv_eboiv_gen(cc, iv, dmreq);
1028 }
1029 
1030 static int crypt_iv_elephant_post(struct crypt_config *cc, u8 *iv,
1031 				  struct dm_crypt_request *dmreq)
1032 {
1033 	if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
1034 		return crypt_iv_elephant(cc, dmreq);
1035 
1036 	return 0;
1037 }
1038 
1039 static int crypt_iv_elephant_init(struct crypt_config *cc)
1040 {
1041 	struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
1042 	int key_offset = cc->key_size - cc->key_extra_size;
1043 
1044 	return crypto_skcipher_setkey(elephant->tfm, &cc->key[key_offset], cc->key_extra_size);
1045 }
1046 
1047 static int crypt_iv_elephant_wipe(struct crypt_config *cc)
1048 {
1049 	struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
1050 	u8 key[ELEPHANT_MAX_KEY_SIZE];
1051 
1052 	memset(key, 0, cc->key_extra_size);
1053 	return crypto_skcipher_setkey(elephant->tfm, key, cc->key_extra_size);
1054 }
1055 
1056 static const struct crypt_iv_operations crypt_iv_plain_ops = {
1057 	.generator = crypt_iv_plain_gen
1058 };
1059 
1060 static const struct crypt_iv_operations crypt_iv_plain64_ops = {
1061 	.generator = crypt_iv_plain64_gen
1062 };
1063 
1064 static const struct crypt_iv_operations crypt_iv_plain64be_ops = {
1065 	.generator = crypt_iv_plain64be_gen
1066 };
1067 
1068 static const struct crypt_iv_operations crypt_iv_essiv_ops = {
1069 	.generator = crypt_iv_essiv_gen
1070 };
1071 
1072 static const struct crypt_iv_operations crypt_iv_benbi_ops = {
1073 	.ctr	   = crypt_iv_benbi_ctr,
1074 	.dtr	   = crypt_iv_benbi_dtr,
1075 	.generator = crypt_iv_benbi_gen
1076 };
1077 
1078 static const struct crypt_iv_operations crypt_iv_null_ops = {
1079 	.generator = crypt_iv_null_gen
1080 };
1081 
1082 static const struct crypt_iv_operations crypt_iv_lmk_ops = {
1083 	.ctr	   = crypt_iv_lmk_ctr,
1084 	.dtr	   = crypt_iv_lmk_dtr,
1085 	.init	   = crypt_iv_lmk_init,
1086 	.wipe	   = crypt_iv_lmk_wipe,
1087 	.generator = crypt_iv_lmk_gen,
1088 	.post	   = crypt_iv_lmk_post
1089 };
1090 
1091 static const struct crypt_iv_operations crypt_iv_tcw_ops = {
1092 	.ctr	   = crypt_iv_tcw_ctr,
1093 	.dtr	   = crypt_iv_tcw_dtr,
1094 	.init	   = crypt_iv_tcw_init,
1095 	.wipe	   = crypt_iv_tcw_wipe,
1096 	.generator = crypt_iv_tcw_gen,
1097 	.post	   = crypt_iv_tcw_post
1098 };
1099 
1100 static const struct crypt_iv_operations crypt_iv_random_ops = {
1101 	.generator = crypt_iv_random_gen
1102 };
1103 
1104 static const struct crypt_iv_operations crypt_iv_eboiv_ops = {
1105 	.ctr	   = crypt_iv_eboiv_ctr,
1106 	.generator = crypt_iv_eboiv_gen
1107 };
1108 
1109 static const struct crypt_iv_operations crypt_iv_elephant_ops = {
1110 	.ctr	   = crypt_iv_elephant_ctr,
1111 	.dtr	   = crypt_iv_elephant_dtr,
1112 	.init	   = crypt_iv_elephant_init,
1113 	.wipe	   = crypt_iv_elephant_wipe,
1114 	.generator = crypt_iv_elephant_gen,
1115 	.post	   = crypt_iv_elephant_post
1116 };
1117 
1118 /*
1119  * Integrity extensions
1120  */
1121 static bool crypt_integrity_aead(struct crypt_config *cc)
1122 {
1123 	return test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
1124 }
1125 
1126 static bool crypt_integrity_hmac(struct crypt_config *cc)
1127 {
1128 	return crypt_integrity_aead(cc) && cc->key_mac_size;
1129 }
1130 
1131 /* Get sg containing data */
1132 static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
1133 					     struct scatterlist *sg)
1134 {
1135 	if (unlikely(crypt_integrity_aead(cc)))
1136 		return &sg[2];
1137 
1138 	return sg;
1139 }
1140 
1141 static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio)
1142 {
1143 	struct bio_integrity_payload *bip;
1144 	unsigned int tag_len;
1145 	int ret;
1146 
1147 	if (!bio_sectors(bio) || !io->cc->on_disk_tag_size)
1148 		return 0;
1149 
1150 	bip = bio_integrity_alloc(bio, GFP_NOIO, 1);
1151 	if (IS_ERR(bip))
1152 		return PTR_ERR(bip);
1153 
1154 	tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift);
1155 
1156 	bip->bip_iter.bi_size = tag_len;
1157 	bip->bip_iter.bi_sector = io->cc->start + io->sector;
1158 
1159 	ret = bio_integrity_add_page(bio, virt_to_page(io->integrity_metadata),
1160 				     tag_len, offset_in_page(io->integrity_metadata));
1161 	if (unlikely(ret != tag_len))
1162 		return -ENOMEM;
1163 
1164 	return 0;
1165 }
1166 
1167 static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
1168 {
1169 #ifdef CONFIG_BLK_DEV_INTEGRITY
1170 	struct blk_integrity *bi = blk_get_integrity(cc->dev->bdev->bd_disk);
1171 	struct mapped_device *md = dm_table_get_md(ti->table);
1172 
1173 	/* From now we require underlying device with our integrity profile */
1174 	if (!bi || strcasecmp(bi->profile->name, "DM-DIF-EXT-TAG")) {
1175 		ti->error = "Integrity profile not supported.";
1176 		return -EINVAL;
1177 	}
1178 
1179 	if (bi->tag_size != cc->on_disk_tag_size ||
1180 	    bi->tuple_size != cc->on_disk_tag_size) {
1181 		ti->error = "Integrity profile tag size mismatch.";
1182 		return -EINVAL;
1183 	}
1184 	if (1 << bi->interval_exp != cc->sector_size) {
1185 		ti->error = "Integrity profile sector size mismatch.";
1186 		return -EINVAL;
1187 	}
1188 
1189 	if (crypt_integrity_aead(cc)) {
1190 		cc->integrity_tag_size = cc->on_disk_tag_size - cc->integrity_iv_size;
1191 		DMDEBUG("%s: Integrity AEAD, tag size %u, IV size %u.", dm_device_name(md),
1192 		       cc->integrity_tag_size, cc->integrity_iv_size);
1193 
1194 		if (crypto_aead_setauthsize(any_tfm_aead(cc), cc->integrity_tag_size)) {
1195 			ti->error = "Integrity AEAD auth tag size is not supported.";
1196 			return -EINVAL;
1197 		}
1198 	} else if (cc->integrity_iv_size)
1199 		DMDEBUG("%s: Additional per-sector space %u bytes for IV.", dm_device_name(md),
1200 		       cc->integrity_iv_size);
1201 
1202 	if ((cc->integrity_tag_size + cc->integrity_iv_size) != bi->tag_size) {
1203 		ti->error = "Not enough space for integrity tag in the profile.";
1204 		return -EINVAL;
1205 	}
1206 
1207 	return 0;
1208 #else
1209 	ti->error = "Integrity profile not supported.";
1210 	return -EINVAL;
1211 #endif
1212 }
1213 
1214 static void crypt_convert_init(struct crypt_config *cc,
1215 			       struct convert_context *ctx,
1216 			       struct bio *bio_out, struct bio *bio_in,
1217 			       sector_t sector)
1218 {
1219 	ctx->bio_in = bio_in;
1220 	ctx->bio_out = bio_out;
1221 	if (bio_in)
1222 		ctx->iter_in = bio_in->bi_iter;
1223 	if (bio_out)
1224 		ctx->iter_out = bio_out->bi_iter;
1225 	ctx->cc_sector = sector + cc->iv_offset;
1226 	init_completion(&ctx->restart);
1227 }
1228 
1229 static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
1230 					     void *req)
1231 {
1232 	return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
1233 }
1234 
1235 static void *req_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq)
1236 {
1237 	return (void *)((char *)dmreq - cc->dmreq_start);
1238 }
1239 
1240 static u8 *iv_of_dmreq(struct crypt_config *cc,
1241 		       struct dm_crypt_request *dmreq)
1242 {
1243 	if (crypt_integrity_aead(cc))
1244 		return (u8 *)ALIGN((unsigned long)(dmreq + 1),
1245 			crypto_aead_alignmask(any_tfm_aead(cc)) + 1);
1246 	else
1247 		return (u8 *)ALIGN((unsigned long)(dmreq + 1),
1248 			crypto_skcipher_alignmask(any_tfm(cc)) + 1);
1249 }
1250 
1251 static u8 *org_iv_of_dmreq(struct crypt_config *cc,
1252 		       struct dm_crypt_request *dmreq)
1253 {
1254 	return iv_of_dmreq(cc, dmreq) + cc->iv_size;
1255 }
1256 
1257 static __le64 *org_sector_of_dmreq(struct crypt_config *cc,
1258 		       struct dm_crypt_request *dmreq)
1259 {
1260 	u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + cc->iv_size;
1261 
1262 	return (__le64 *) ptr;
1263 }
1264 
1265 static unsigned int *org_tag_of_dmreq(struct crypt_config *cc,
1266 		       struct dm_crypt_request *dmreq)
1267 {
1268 	u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size +
1269 		  cc->iv_size + sizeof(uint64_t);
1270 
1271 	return (unsigned int *)ptr;
1272 }
1273 
1274 static void *tag_from_dmreq(struct crypt_config *cc,
1275 				struct dm_crypt_request *dmreq)
1276 {
1277 	struct convert_context *ctx = dmreq->ctx;
1278 	struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
1279 
1280 	return &io->integrity_metadata[*org_tag_of_dmreq(cc, dmreq) *
1281 		cc->on_disk_tag_size];
1282 }
1283 
1284 static void *iv_tag_from_dmreq(struct crypt_config *cc,
1285 			       struct dm_crypt_request *dmreq)
1286 {
1287 	return tag_from_dmreq(cc, dmreq) + cc->integrity_tag_size;
1288 }
1289 
1290 static int crypt_convert_block_aead(struct crypt_config *cc,
1291 				     struct convert_context *ctx,
1292 				     struct aead_request *req,
1293 				     unsigned int tag_offset)
1294 {
1295 	struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
1296 	struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
1297 	struct dm_crypt_request *dmreq;
1298 	u8 *iv, *org_iv, *tag_iv, *tag;
1299 	__le64 *sector;
1300 	int r = 0;
1301 
1302 	BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size);
1303 
1304 	/* Reject unexpected unaligned bio. */
1305 	if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
1306 		return -EIO;
1307 
1308 	dmreq = dmreq_of_req(cc, req);
1309 	dmreq->iv_sector = ctx->cc_sector;
1310 	if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
1311 		dmreq->iv_sector >>= cc->sector_shift;
1312 	dmreq->ctx = ctx;
1313 
1314 	*org_tag_of_dmreq(cc, dmreq) = tag_offset;
1315 
1316 	sector = org_sector_of_dmreq(cc, dmreq);
1317 	*sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset);
1318 
1319 	iv = iv_of_dmreq(cc, dmreq);
1320 	org_iv = org_iv_of_dmreq(cc, dmreq);
1321 	tag = tag_from_dmreq(cc, dmreq);
1322 	tag_iv = iv_tag_from_dmreq(cc, dmreq);
1323 
1324 	/* AEAD request:
1325 	 *  |----- AAD -------|------ DATA -------|-- AUTH TAG --|
1326 	 *  | (authenticated) | (auth+encryption) |              |
1327 	 *  | sector_LE |  IV |  sector in/out    |  tag in/out  |
1328 	 */
1329 	sg_init_table(dmreq->sg_in, 4);
1330 	sg_set_buf(&dmreq->sg_in[0], sector, sizeof(uint64_t));
1331 	sg_set_buf(&dmreq->sg_in[1], org_iv, cc->iv_size);
1332 	sg_set_page(&dmreq->sg_in[2], bv_in.bv_page, cc->sector_size, bv_in.bv_offset);
1333 	sg_set_buf(&dmreq->sg_in[3], tag, cc->integrity_tag_size);
1334 
1335 	sg_init_table(dmreq->sg_out, 4);
1336 	sg_set_buf(&dmreq->sg_out[0], sector, sizeof(uint64_t));
1337 	sg_set_buf(&dmreq->sg_out[1], org_iv, cc->iv_size);
1338 	sg_set_page(&dmreq->sg_out[2], bv_out.bv_page, cc->sector_size, bv_out.bv_offset);
1339 	sg_set_buf(&dmreq->sg_out[3], tag, cc->integrity_tag_size);
1340 
1341 	if (cc->iv_gen_ops) {
1342 		/* For READs use IV stored in integrity metadata */
1343 		if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) {
1344 			memcpy(org_iv, tag_iv, cc->iv_size);
1345 		} else {
1346 			r = cc->iv_gen_ops->generator(cc, org_iv, dmreq);
1347 			if (r < 0)
1348 				return r;
1349 			/* Store generated IV in integrity metadata */
1350 			if (cc->integrity_iv_size)
1351 				memcpy(tag_iv, org_iv, cc->iv_size);
1352 		}
1353 		/* Working copy of IV, to be modified in crypto API */
1354 		memcpy(iv, org_iv, cc->iv_size);
1355 	}
1356 
1357 	aead_request_set_ad(req, sizeof(uint64_t) + cc->iv_size);
1358 	if (bio_data_dir(ctx->bio_in) == WRITE) {
1359 		aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out,
1360 				       cc->sector_size, iv);
1361 		r = crypto_aead_encrypt(req);
1362 		if (cc->integrity_tag_size + cc->integrity_iv_size != cc->on_disk_tag_size)
1363 			memset(tag + cc->integrity_tag_size + cc->integrity_iv_size, 0,
1364 			       cc->on_disk_tag_size - (cc->integrity_tag_size + cc->integrity_iv_size));
1365 	} else {
1366 		aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out,
1367 				       cc->sector_size + cc->integrity_tag_size, iv);
1368 		r = crypto_aead_decrypt(req);
1369 	}
1370 
1371 	if (r == -EBADMSG) {
1372 		sector_t s = le64_to_cpu(*sector);
1373 
1374 		DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
1375 			    ctx->bio_in->bi_bdev, s);
1376 		dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
1377 				 ctx->bio_in, s, 0);
1378 	}
1379 
1380 	if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
1381 		r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
1382 
1383 	bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size);
1384 	bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size);
1385 
1386 	return r;
1387 }
1388 
1389 static int crypt_convert_block_skcipher(struct crypt_config *cc,
1390 					struct convert_context *ctx,
1391 					struct skcipher_request *req,
1392 					unsigned int tag_offset)
1393 {
1394 	struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
1395 	struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
1396 	struct scatterlist *sg_in, *sg_out;
1397 	struct dm_crypt_request *dmreq;
1398 	u8 *iv, *org_iv, *tag_iv;
1399 	__le64 *sector;
1400 	int r = 0;
1401 
1402 	/* Reject unexpected unaligned bio. */
1403 	if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
1404 		return -EIO;
1405 
1406 	dmreq = dmreq_of_req(cc, req);
1407 	dmreq->iv_sector = ctx->cc_sector;
1408 	if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
1409 		dmreq->iv_sector >>= cc->sector_shift;
1410 	dmreq->ctx = ctx;
1411 
1412 	*org_tag_of_dmreq(cc, dmreq) = tag_offset;
1413 
1414 	iv = iv_of_dmreq(cc, dmreq);
1415 	org_iv = org_iv_of_dmreq(cc, dmreq);
1416 	tag_iv = iv_tag_from_dmreq(cc, dmreq);
1417 
1418 	sector = org_sector_of_dmreq(cc, dmreq);
1419 	*sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset);
1420 
1421 	/* For skcipher we use only the first sg item */
1422 	sg_in  = &dmreq->sg_in[0];
1423 	sg_out = &dmreq->sg_out[0];
1424 
1425 	sg_init_table(sg_in, 1);
1426 	sg_set_page(sg_in, bv_in.bv_page, cc->sector_size, bv_in.bv_offset);
1427 
1428 	sg_init_table(sg_out, 1);
1429 	sg_set_page(sg_out, bv_out.bv_page, cc->sector_size, bv_out.bv_offset);
1430 
1431 	if (cc->iv_gen_ops) {
1432 		/* For READs use IV stored in integrity metadata */
1433 		if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) {
1434 			memcpy(org_iv, tag_iv, cc->integrity_iv_size);
1435 		} else {
1436 			r = cc->iv_gen_ops->generator(cc, org_iv, dmreq);
1437 			if (r < 0)
1438 				return r;
1439 			/* Data can be already preprocessed in generator */
1440 			if (test_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags))
1441 				sg_in = sg_out;
1442 			/* Store generated IV in integrity metadata */
1443 			if (cc->integrity_iv_size)
1444 				memcpy(tag_iv, org_iv, cc->integrity_iv_size);
1445 		}
1446 		/* Working copy of IV, to be modified in crypto API */
1447 		memcpy(iv, org_iv, cc->iv_size);
1448 	}
1449 
1450 	skcipher_request_set_crypt(req, sg_in, sg_out, cc->sector_size, iv);
1451 
1452 	if (bio_data_dir(ctx->bio_in) == WRITE)
1453 		r = crypto_skcipher_encrypt(req);
1454 	else
1455 		r = crypto_skcipher_decrypt(req);
1456 
1457 	if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
1458 		r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
1459 
1460 	bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size);
1461 	bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size);
1462 
1463 	return r;
1464 }
1465 
1466 static void kcryptd_async_done(void *async_req, int error);
1467 
1468 static int crypt_alloc_req_skcipher(struct crypt_config *cc,
1469 				     struct convert_context *ctx)
1470 {
1471 	unsigned int key_index = ctx->cc_sector & (cc->tfms_count - 1);
1472 
1473 	if (!ctx->r.req) {
1474 		ctx->r.req = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
1475 		if (!ctx->r.req)
1476 			return -ENOMEM;
1477 	}
1478 
1479 	skcipher_request_set_tfm(ctx->r.req, cc->cipher_tfm.tfms[key_index]);
1480 
1481 	/*
1482 	 * Use REQ_MAY_BACKLOG so a cipher driver internally backlogs
1483 	 * requests if driver request queue is full.
1484 	 */
1485 	skcipher_request_set_callback(ctx->r.req,
1486 	    CRYPTO_TFM_REQ_MAY_BACKLOG,
1487 	    kcryptd_async_done, dmreq_of_req(cc, ctx->r.req));
1488 
1489 	return 0;
1490 }
1491 
1492 static int crypt_alloc_req_aead(struct crypt_config *cc,
1493 				 struct convert_context *ctx)
1494 {
1495 	if (!ctx->r.req_aead) {
1496 		ctx->r.req_aead = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
1497 		if (!ctx->r.req_aead)
1498 			return -ENOMEM;
1499 	}
1500 
1501 	aead_request_set_tfm(ctx->r.req_aead, cc->cipher_tfm.tfms_aead[0]);
1502 
1503 	/*
1504 	 * Use REQ_MAY_BACKLOG so a cipher driver internally backlogs
1505 	 * requests if driver request queue is full.
1506 	 */
1507 	aead_request_set_callback(ctx->r.req_aead,
1508 	    CRYPTO_TFM_REQ_MAY_BACKLOG,
1509 	    kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead));
1510 
1511 	return 0;
1512 }
1513 
1514 static int crypt_alloc_req(struct crypt_config *cc,
1515 			    struct convert_context *ctx)
1516 {
1517 	if (crypt_integrity_aead(cc))
1518 		return crypt_alloc_req_aead(cc, ctx);
1519 	else
1520 		return crypt_alloc_req_skcipher(cc, ctx);
1521 }
1522 
1523 static void crypt_free_req_skcipher(struct crypt_config *cc,
1524 				    struct skcipher_request *req, struct bio *base_bio)
1525 {
1526 	struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
1527 
1528 	if ((struct skcipher_request *)(io + 1) != req)
1529 		mempool_free(req, &cc->req_pool);
1530 }
1531 
1532 static void crypt_free_req_aead(struct crypt_config *cc,
1533 				struct aead_request *req, struct bio *base_bio)
1534 {
1535 	struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
1536 
1537 	if ((struct aead_request *)(io + 1) != req)
1538 		mempool_free(req, &cc->req_pool);
1539 }
1540 
1541 static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_bio)
1542 {
1543 	if (crypt_integrity_aead(cc))
1544 		crypt_free_req_aead(cc, req, base_bio);
1545 	else
1546 		crypt_free_req_skcipher(cc, req, base_bio);
1547 }
1548 
1549 /*
1550  * Encrypt / decrypt data from one bio to another one (can be the same one)
1551  */
1552 static blk_status_t crypt_convert(struct crypt_config *cc,
1553 			 struct convert_context *ctx, bool atomic, bool reset_pending)
1554 {
1555 	unsigned int tag_offset = 0;
1556 	unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT;
1557 	int r;
1558 
1559 	/*
1560 	 * if reset_pending is set we are dealing with the bio for the first time,
1561 	 * else we're continuing to work on the previous bio, so don't mess with
1562 	 * the cc_pending counter
1563 	 */
1564 	if (reset_pending)
1565 		atomic_set(&ctx->cc_pending, 1);
1566 
1567 	while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
1568 
1569 		r = crypt_alloc_req(cc, ctx);
1570 		if (r) {
1571 			complete(&ctx->restart);
1572 			return BLK_STS_DEV_RESOURCE;
1573 		}
1574 
1575 		atomic_inc(&ctx->cc_pending);
1576 
1577 		if (crypt_integrity_aead(cc))
1578 			r = crypt_convert_block_aead(cc, ctx, ctx->r.req_aead, tag_offset);
1579 		else
1580 			r = crypt_convert_block_skcipher(cc, ctx, ctx->r.req, tag_offset);
1581 
1582 		switch (r) {
1583 		/*
1584 		 * The request was queued by a crypto driver
1585 		 * but the driver request queue is full, let's wait.
1586 		 */
1587 		case -EBUSY:
1588 			if (in_interrupt()) {
1589 				if (try_wait_for_completion(&ctx->restart)) {
1590 					/*
1591 					 * we don't have to block to wait for completion,
1592 					 * so proceed
1593 					 */
1594 				} else {
1595 					/*
1596 					 * we can't wait for completion without blocking
1597 					 * exit and continue processing in a workqueue
1598 					 */
1599 					ctx->r.req = NULL;
1600 					ctx->cc_sector += sector_step;
1601 					tag_offset++;
1602 					return BLK_STS_DEV_RESOURCE;
1603 				}
1604 			} else {
1605 				wait_for_completion(&ctx->restart);
1606 			}
1607 			reinit_completion(&ctx->restart);
1608 			fallthrough;
1609 		/*
1610 		 * The request is queued and processed asynchronously,
1611 		 * completion function kcryptd_async_done() will be called.
1612 		 */
1613 		case -EINPROGRESS:
1614 			ctx->r.req = NULL;
1615 			ctx->cc_sector += sector_step;
1616 			tag_offset++;
1617 			continue;
1618 		/*
1619 		 * The request was already processed (synchronously).
1620 		 */
1621 		case 0:
1622 			atomic_dec(&ctx->cc_pending);
1623 			ctx->cc_sector += sector_step;
1624 			tag_offset++;
1625 			if (!atomic)
1626 				cond_resched();
1627 			continue;
1628 		/*
1629 		 * There was a data integrity error.
1630 		 */
1631 		case -EBADMSG:
1632 			atomic_dec(&ctx->cc_pending);
1633 			return BLK_STS_PROTECTION;
1634 		/*
1635 		 * There was an error while processing the request.
1636 		 */
1637 		default:
1638 			atomic_dec(&ctx->cc_pending);
1639 			return BLK_STS_IOERR;
1640 		}
1641 	}
1642 
1643 	return 0;
1644 }
1645 
1646 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
1647 
1648 /*
1649  * Generate a new unfragmented bio with the given size
1650  * This should never violate the device limitations (but only because
1651  * max_segment_size is being constrained to PAGE_SIZE).
1652  *
1653  * This function may be called concurrently. If we allocate from the mempool
1654  * concurrently, there is a possibility of deadlock. For example, if we have
1655  * mempool of 256 pages, two processes, each wanting 256, pages allocate from
1656  * the mempool concurrently, it may deadlock in a situation where both processes
1657  * have allocated 128 pages and the mempool is exhausted.
1658  *
1659  * In order to avoid this scenario we allocate the pages under a mutex.
1660  *
1661  * In order to not degrade performance with excessive locking, we try
1662  * non-blocking allocations without a mutex first but on failure we fallback
1663  * to blocking allocations with a mutex.
1664  */
1665 static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned int size)
1666 {
1667 	struct crypt_config *cc = io->cc;
1668 	struct bio *clone;
1669 	unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1670 	gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
1671 	unsigned int i, len, remaining_size;
1672 	struct page *page;
1673 
1674 retry:
1675 	if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
1676 		mutex_lock(&cc->bio_alloc_lock);
1677 
1678 	clone = bio_alloc_bioset(cc->dev->bdev, nr_iovecs, io->base_bio->bi_opf,
1679 				 GFP_NOIO, &cc->bs);
1680 	clone->bi_private = io;
1681 	clone->bi_end_io = crypt_endio;
1682 
1683 	remaining_size = size;
1684 
1685 	for (i = 0; i < nr_iovecs; i++) {
1686 		page = mempool_alloc(&cc->page_pool, gfp_mask);
1687 		if (!page) {
1688 			crypt_free_buffer_pages(cc, clone);
1689 			bio_put(clone);
1690 			gfp_mask |= __GFP_DIRECT_RECLAIM;
1691 			goto retry;
1692 		}
1693 
1694 		len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size;
1695 
1696 		__bio_add_page(clone, page, len, 0);
1697 		remaining_size -= len;
1698 	}
1699 
1700 	/* Allocate space for integrity tags */
1701 	if (dm_crypt_integrity_io_alloc(io, clone)) {
1702 		crypt_free_buffer_pages(cc, clone);
1703 		bio_put(clone);
1704 		clone = NULL;
1705 	}
1706 
1707 	if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
1708 		mutex_unlock(&cc->bio_alloc_lock);
1709 
1710 	return clone;
1711 }
1712 
1713 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
1714 {
1715 	struct bio_vec *bv;
1716 	struct bvec_iter_all iter_all;
1717 
1718 	bio_for_each_segment_all(bv, clone, iter_all) {
1719 		BUG_ON(!bv->bv_page);
1720 		mempool_free(bv->bv_page, &cc->page_pool);
1721 	}
1722 }
1723 
1724 static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
1725 			  struct bio *bio, sector_t sector)
1726 {
1727 	io->cc = cc;
1728 	io->base_bio = bio;
1729 	io->sector = sector;
1730 	io->error = 0;
1731 	io->ctx.r.req = NULL;
1732 	io->integrity_metadata = NULL;
1733 	io->integrity_metadata_from_pool = false;
1734 	io->in_tasklet = false;
1735 	atomic_set(&io->io_pending, 0);
1736 }
1737 
1738 static void crypt_inc_pending(struct dm_crypt_io *io)
1739 {
1740 	atomic_inc(&io->io_pending);
1741 }
1742 
1743 static void kcryptd_io_bio_endio(struct work_struct *work)
1744 {
1745 	struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1746 
1747 	bio_endio(io->base_bio);
1748 }
1749 
1750 /*
1751  * One of the bios was finished. Check for completion of
1752  * the whole request and correctly clean up the buffer.
1753  */
1754 static void crypt_dec_pending(struct dm_crypt_io *io)
1755 {
1756 	struct crypt_config *cc = io->cc;
1757 	struct bio *base_bio = io->base_bio;
1758 	blk_status_t error = io->error;
1759 
1760 	if (!atomic_dec_and_test(&io->io_pending))
1761 		return;
1762 
1763 	if (io->ctx.r.req)
1764 		crypt_free_req(cc, io->ctx.r.req, base_bio);
1765 
1766 	if (unlikely(io->integrity_metadata_from_pool))
1767 		mempool_free(io->integrity_metadata, &io->cc->tag_pool);
1768 	else
1769 		kfree(io->integrity_metadata);
1770 
1771 	base_bio->bi_status = error;
1772 
1773 	/*
1774 	 * If we are running this function from our tasklet,
1775 	 * we can't call bio_endio() here, because it will call
1776 	 * clone_endio() from dm.c, which in turn will
1777 	 * free the current struct dm_crypt_io structure with
1778 	 * our tasklet. In this case we need to delay bio_endio()
1779 	 * execution to after the tasklet is done and dequeued.
1780 	 */
1781 	if (io->in_tasklet) {
1782 		INIT_WORK(&io->work, kcryptd_io_bio_endio);
1783 		queue_work(cc->io_queue, &io->work);
1784 		return;
1785 	}
1786 
1787 	bio_endio(base_bio);
1788 }
1789 
1790 /*
1791  * kcryptd/kcryptd_io:
1792  *
1793  * Needed because it would be very unwise to do decryption in an
1794  * interrupt context.
1795  *
1796  * kcryptd performs the actual encryption or decryption.
1797  *
1798  * kcryptd_io performs the IO submission.
1799  *
1800  * They must be separated as otherwise the final stages could be
1801  * starved by new requests which can block in the first stages due
1802  * to memory allocation.
1803  *
1804  * The work is done per CPU global for all dm-crypt instances.
1805  * They should not depend on each other and do not block.
1806  */
1807 static void crypt_endio(struct bio *clone)
1808 {
1809 	struct dm_crypt_io *io = clone->bi_private;
1810 	struct crypt_config *cc = io->cc;
1811 	unsigned int rw = bio_data_dir(clone);
1812 	blk_status_t error;
1813 
1814 	/*
1815 	 * free the processed pages
1816 	 */
1817 	if (rw == WRITE)
1818 		crypt_free_buffer_pages(cc, clone);
1819 
1820 	error = clone->bi_status;
1821 	bio_put(clone);
1822 
1823 	if (rw == READ && !error) {
1824 		kcryptd_queue_crypt(io);
1825 		return;
1826 	}
1827 
1828 	if (unlikely(error))
1829 		io->error = error;
1830 
1831 	crypt_dec_pending(io);
1832 }
1833 
1834 #define CRYPT_MAP_READ_GFP GFP_NOWAIT
1835 
1836 static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
1837 {
1838 	struct crypt_config *cc = io->cc;
1839 	struct bio *clone;
1840 
1841 	/*
1842 	 * We need the original biovec array in order to decrypt the whole bio
1843 	 * data *afterwards* -- thanks to immutable biovecs we don't need to
1844 	 * worry about the block layer modifying the biovec array; so leverage
1845 	 * bio_alloc_clone().
1846 	 */
1847 	clone = bio_alloc_clone(cc->dev->bdev, io->base_bio, gfp, &cc->bs);
1848 	if (!clone)
1849 		return 1;
1850 	clone->bi_private = io;
1851 	clone->bi_end_io = crypt_endio;
1852 
1853 	crypt_inc_pending(io);
1854 
1855 	clone->bi_iter.bi_sector = cc->start + io->sector;
1856 
1857 	if (dm_crypt_integrity_io_alloc(io, clone)) {
1858 		crypt_dec_pending(io);
1859 		bio_put(clone);
1860 		return 1;
1861 	}
1862 
1863 	dm_submit_bio_remap(io->base_bio, clone);
1864 	return 0;
1865 }
1866 
1867 static void kcryptd_io_read_work(struct work_struct *work)
1868 {
1869 	struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1870 
1871 	crypt_inc_pending(io);
1872 	if (kcryptd_io_read(io, GFP_NOIO))
1873 		io->error = BLK_STS_RESOURCE;
1874 	crypt_dec_pending(io);
1875 }
1876 
1877 static void kcryptd_queue_read(struct dm_crypt_io *io)
1878 {
1879 	struct crypt_config *cc = io->cc;
1880 
1881 	INIT_WORK(&io->work, kcryptd_io_read_work);
1882 	queue_work(cc->io_queue, &io->work);
1883 }
1884 
1885 static void kcryptd_io_write(struct dm_crypt_io *io)
1886 {
1887 	struct bio *clone = io->ctx.bio_out;
1888 
1889 	dm_submit_bio_remap(io->base_bio, clone);
1890 }
1891 
1892 #define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
1893 
1894 static int dmcrypt_write(void *data)
1895 {
1896 	struct crypt_config *cc = data;
1897 	struct dm_crypt_io *io;
1898 
1899 	while (1) {
1900 		struct rb_root write_tree;
1901 		struct blk_plug plug;
1902 
1903 		spin_lock_irq(&cc->write_thread_lock);
1904 continue_locked:
1905 
1906 		if (!RB_EMPTY_ROOT(&cc->write_tree))
1907 			goto pop_from_list;
1908 
1909 		set_current_state(TASK_INTERRUPTIBLE);
1910 
1911 		spin_unlock_irq(&cc->write_thread_lock);
1912 
1913 		if (unlikely(kthread_should_stop())) {
1914 			set_current_state(TASK_RUNNING);
1915 			break;
1916 		}
1917 
1918 		schedule();
1919 
1920 		set_current_state(TASK_RUNNING);
1921 		spin_lock_irq(&cc->write_thread_lock);
1922 		goto continue_locked;
1923 
1924 pop_from_list:
1925 		write_tree = cc->write_tree;
1926 		cc->write_tree = RB_ROOT;
1927 		spin_unlock_irq(&cc->write_thread_lock);
1928 
1929 		BUG_ON(rb_parent(write_tree.rb_node));
1930 
1931 		/*
1932 		 * Note: we cannot walk the tree here with rb_next because
1933 		 * the structures may be freed when kcryptd_io_write is called.
1934 		 */
1935 		blk_start_plug(&plug);
1936 		do {
1937 			io = crypt_io_from_node(rb_first(&write_tree));
1938 			rb_erase(&io->rb_node, &write_tree);
1939 			kcryptd_io_write(io);
1940 			cond_resched();
1941 		} while (!RB_EMPTY_ROOT(&write_tree));
1942 		blk_finish_plug(&plug);
1943 	}
1944 	return 0;
1945 }
1946 
1947 static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
1948 {
1949 	struct bio *clone = io->ctx.bio_out;
1950 	struct crypt_config *cc = io->cc;
1951 	unsigned long flags;
1952 	sector_t sector;
1953 	struct rb_node **rbp, *parent;
1954 
1955 	if (unlikely(io->error)) {
1956 		crypt_free_buffer_pages(cc, clone);
1957 		bio_put(clone);
1958 		crypt_dec_pending(io);
1959 		return;
1960 	}
1961 
1962 	/* crypt_convert should have filled the clone bio */
1963 	BUG_ON(io->ctx.iter_out.bi_size);
1964 
1965 	clone->bi_iter.bi_sector = cc->start + io->sector;
1966 
1967 	if ((likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) ||
1968 	    test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags)) {
1969 		dm_submit_bio_remap(io->base_bio, clone);
1970 		return;
1971 	}
1972 
1973 	spin_lock_irqsave(&cc->write_thread_lock, flags);
1974 	if (RB_EMPTY_ROOT(&cc->write_tree))
1975 		wake_up_process(cc->write_thread);
1976 	rbp = &cc->write_tree.rb_node;
1977 	parent = NULL;
1978 	sector = io->sector;
1979 	while (*rbp) {
1980 		parent = *rbp;
1981 		if (sector < crypt_io_from_node(parent)->sector)
1982 			rbp = &(*rbp)->rb_left;
1983 		else
1984 			rbp = &(*rbp)->rb_right;
1985 	}
1986 	rb_link_node(&io->rb_node, parent, rbp);
1987 	rb_insert_color(&io->rb_node, &cc->write_tree);
1988 	spin_unlock_irqrestore(&cc->write_thread_lock, flags);
1989 }
1990 
1991 static bool kcryptd_crypt_write_inline(struct crypt_config *cc,
1992 				       struct convert_context *ctx)
1993 
1994 {
1995 	if (!test_bit(DM_CRYPT_WRITE_INLINE, &cc->flags))
1996 		return false;
1997 
1998 	/*
1999 	 * Note: zone append writes (REQ_OP_ZONE_APPEND) do not have ordering
2000 	 * constraints so they do not need to be issued inline by
2001 	 * kcryptd_crypt_write_convert().
2002 	 */
2003 	switch (bio_op(ctx->bio_in)) {
2004 	case REQ_OP_WRITE:
2005 	case REQ_OP_WRITE_ZEROES:
2006 		return true;
2007 	default:
2008 		return false;
2009 	}
2010 }
2011 
2012 static void kcryptd_crypt_write_continue(struct work_struct *work)
2013 {
2014 	struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
2015 	struct crypt_config *cc = io->cc;
2016 	struct convert_context *ctx = &io->ctx;
2017 	int crypt_finished;
2018 	sector_t sector = io->sector;
2019 	blk_status_t r;
2020 
2021 	wait_for_completion(&ctx->restart);
2022 	reinit_completion(&ctx->restart);
2023 
2024 	r = crypt_convert(cc, &io->ctx, true, false);
2025 	if (r)
2026 		io->error = r;
2027 	crypt_finished = atomic_dec_and_test(&ctx->cc_pending);
2028 	if (!crypt_finished && kcryptd_crypt_write_inline(cc, ctx)) {
2029 		/* Wait for completion signaled by kcryptd_async_done() */
2030 		wait_for_completion(&ctx->restart);
2031 		crypt_finished = 1;
2032 	}
2033 
2034 	/* Encryption was already finished, submit io now */
2035 	if (crypt_finished) {
2036 		kcryptd_crypt_write_io_submit(io, 0);
2037 		io->sector = sector;
2038 	}
2039 
2040 	crypt_dec_pending(io);
2041 }
2042 
2043 static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
2044 {
2045 	struct crypt_config *cc = io->cc;
2046 	struct convert_context *ctx = &io->ctx;
2047 	struct bio *clone;
2048 	int crypt_finished;
2049 	sector_t sector = io->sector;
2050 	blk_status_t r;
2051 
2052 	/*
2053 	 * Prevent io from disappearing until this function completes.
2054 	 */
2055 	crypt_inc_pending(io);
2056 	crypt_convert_init(cc, ctx, NULL, io->base_bio, sector);
2057 
2058 	clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
2059 	if (unlikely(!clone)) {
2060 		io->error = BLK_STS_IOERR;
2061 		goto dec;
2062 	}
2063 
2064 	io->ctx.bio_out = clone;
2065 	io->ctx.iter_out = clone->bi_iter;
2066 
2067 	sector += bio_sectors(clone);
2068 
2069 	crypt_inc_pending(io);
2070 	r = crypt_convert(cc, ctx,
2071 			  test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags), true);
2072 	/*
2073 	 * Crypto API backlogged the request, because its queue was full
2074 	 * and we're in softirq context, so continue from a workqueue
2075 	 * (TODO: is it actually possible to be in softirq in the write path?)
2076 	 */
2077 	if (r == BLK_STS_DEV_RESOURCE) {
2078 		INIT_WORK(&io->work, kcryptd_crypt_write_continue);
2079 		queue_work(cc->crypt_queue, &io->work);
2080 		return;
2081 	}
2082 	if (r)
2083 		io->error = r;
2084 	crypt_finished = atomic_dec_and_test(&ctx->cc_pending);
2085 	if (!crypt_finished && kcryptd_crypt_write_inline(cc, ctx)) {
2086 		/* Wait for completion signaled by kcryptd_async_done() */
2087 		wait_for_completion(&ctx->restart);
2088 		crypt_finished = 1;
2089 	}
2090 
2091 	/* Encryption was already finished, submit io now */
2092 	if (crypt_finished) {
2093 		kcryptd_crypt_write_io_submit(io, 0);
2094 		io->sector = sector;
2095 	}
2096 
2097 dec:
2098 	crypt_dec_pending(io);
2099 }
2100 
2101 static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
2102 {
2103 	crypt_dec_pending(io);
2104 }
2105 
2106 static void kcryptd_crypt_read_continue(struct work_struct *work)
2107 {
2108 	struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
2109 	struct crypt_config *cc = io->cc;
2110 	blk_status_t r;
2111 
2112 	wait_for_completion(&io->ctx.restart);
2113 	reinit_completion(&io->ctx.restart);
2114 
2115 	r = crypt_convert(cc, &io->ctx, true, false);
2116 	if (r)
2117 		io->error = r;
2118 
2119 	if (atomic_dec_and_test(&io->ctx.cc_pending))
2120 		kcryptd_crypt_read_done(io);
2121 
2122 	crypt_dec_pending(io);
2123 }
2124 
2125 static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
2126 {
2127 	struct crypt_config *cc = io->cc;
2128 	blk_status_t r;
2129 
2130 	crypt_inc_pending(io);
2131 
2132 	crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
2133 			   io->sector);
2134 
2135 	r = crypt_convert(cc, &io->ctx,
2136 			  test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
2137 	/*
2138 	 * Crypto API backlogged the request, because its queue was full
2139 	 * and we're in softirq context, so continue from a workqueue
2140 	 */
2141 	if (r == BLK_STS_DEV_RESOURCE) {
2142 		INIT_WORK(&io->work, kcryptd_crypt_read_continue);
2143 		queue_work(cc->crypt_queue, &io->work);
2144 		return;
2145 	}
2146 	if (r)
2147 		io->error = r;
2148 
2149 	if (atomic_dec_and_test(&io->ctx.cc_pending))
2150 		kcryptd_crypt_read_done(io);
2151 
2152 	crypt_dec_pending(io);
2153 }
2154 
2155 static void kcryptd_async_done(void *data, int error)
2156 {
2157 	struct dm_crypt_request *dmreq = data;
2158 	struct convert_context *ctx = dmreq->ctx;
2159 	struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
2160 	struct crypt_config *cc = io->cc;
2161 
2162 	/*
2163 	 * A request from crypto driver backlog is going to be processed now,
2164 	 * finish the completion and continue in crypt_convert().
2165 	 * (Callback will be called for the second time for this request.)
2166 	 */
2167 	if (error == -EINPROGRESS) {
2168 		complete(&ctx->restart);
2169 		return;
2170 	}
2171 
2172 	if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
2173 		error = cc->iv_gen_ops->post(cc, org_iv_of_dmreq(cc, dmreq), dmreq);
2174 
2175 	if (error == -EBADMSG) {
2176 		sector_t s = le64_to_cpu(*org_sector_of_dmreq(cc, dmreq));
2177 
2178 		DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
2179 			    ctx->bio_in->bi_bdev, s);
2180 		dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
2181 				 ctx->bio_in, s, 0);
2182 		io->error = BLK_STS_PROTECTION;
2183 	} else if (error < 0)
2184 		io->error = BLK_STS_IOERR;
2185 
2186 	crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
2187 
2188 	if (!atomic_dec_and_test(&ctx->cc_pending))
2189 		return;
2190 
2191 	/*
2192 	 * The request is fully completed: for inline writes, let
2193 	 * kcryptd_crypt_write_convert() do the IO submission.
2194 	 */
2195 	if (bio_data_dir(io->base_bio) == READ) {
2196 		kcryptd_crypt_read_done(io);
2197 		return;
2198 	}
2199 
2200 	if (kcryptd_crypt_write_inline(cc, ctx)) {
2201 		complete(&ctx->restart);
2202 		return;
2203 	}
2204 
2205 	kcryptd_crypt_write_io_submit(io, 1);
2206 }
2207 
2208 static void kcryptd_crypt(struct work_struct *work)
2209 {
2210 	struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
2211 
2212 	if (bio_data_dir(io->base_bio) == READ)
2213 		kcryptd_crypt_read_convert(io);
2214 	else
2215 		kcryptd_crypt_write_convert(io);
2216 }
2217 
2218 static void kcryptd_crypt_tasklet(unsigned long work)
2219 {
2220 	kcryptd_crypt((struct work_struct *)work);
2221 }
2222 
2223 static void kcryptd_queue_crypt(struct dm_crypt_io *io)
2224 {
2225 	struct crypt_config *cc = io->cc;
2226 
2227 	if ((bio_data_dir(io->base_bio) == READ && test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags)) ||
2228 	    (bio_data_dir(io->base_bio) == WRITE && test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))) {
2229 		/*
2230 		 * in_hardirq(): Crypto API's skcipher_walk_first() refuses to work in hard IRQ context.
2231 		 * irqs_disabled(): the kernel may run some IO completion from the idle thread, but
2232 		 * it is being executed with irqs disabled.
2233 		 */
2234 		if (in_hardirq() || irqs_disabled()) {
2235 			io->in_tasklet = true;
2236 			tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work);
2237 			tasklet_schedule(&io->tasklet);
2238 			return;
2239 		}
2240 
2241 		kcryptd_crypt(&io->work);
2242 		return;
2243 	}
2244 
2245 	INIT_WORK(&io->work, kcryptd_crypt);
2246 	queue_work(cc->crypt_queue, &io->work);
2247 }
2248 
2249 static void crypt_free_tfms_aead(struct crypt_config *cc)
2250 {
2251 	if (!cc->cipher_tfm.tfms_aead)
2252 		return;
2253 
2254 	if (cc->cipher_tfm.tfms_aead[0] && !IS_ERR(cc->cipher_tfm.tfms_aead[0])) {
2255 		crypto_free_aead(cc->cipher_tfm.tfms_aead[0]);
2256 		cc->cipher_tfm.tfms_aead[0] = NULL;
2257 	}
2258 
2259 	kfree(cc->cipher_tfm.tfms_aead);
2260 	cc->cipher_tfm.tfms_aead = NULL;
2261 }
2262 
2263 static void crypt_free_tfms_skcipher(struct crypt_config *cc)
2264 {
2265 	unsigned int i;
2266 
2267 	if (!cc->cipher_tfm.tfms)
2268 		return;
2269 
2270 	for (i = 0; i < cc->tfms_count; i++)
2271 		if (cc->cipher_tfm.tfms[i] && !IS_ERR(cc->cipher_tfm.tfms[i])) {
2272 			crypto_free_skcipher(cc->cipher_tfm.tfms[i]);
2273 			cc->cipher_tfm.tfms[i] = NULL;
2274 		}
2275 
2276 	kfree(cc->cipher_tfm.tfms);
2277 	cc->cipher_tfm.tfms = NULL;
2278 }
2279 
2280 static void crypt_free_tfms(struct crypt_config *cc)
2281 {
2282 	if (crypt_integrity_aead(cc))
2283 		crypt_free_tfms_aead(cc);
2284 	else
2285 		crypt_free_tfms_skcipher(cc);
2286 }
2287 
2288 static int crypt_alloc_tfms_skcipher(struct crypt_config *cc, char *ciphermode)
2289 {
2290 	unsigned int i;
2291 	int err;
2292 
2293 	cc->cipher_tfm.tfms = kcalloc(cc->tfms_count,
2294 				      sizeof(struct crypto_skcipher *),
2295 				      GFP_KERNEL);
2296 	if (!cc->cipher_tfm.tfms)
2297 		return -ENOMEM;
2298 
2299 	for (i = 0; i < cc->tfms_count; i++) {
2300 		cc->cipher_tfm.tfms[i] = crypto_alloc_skcipher(ciphermode, 0,
2301 						CRYPTO_ALG_ALLOCATES_MEMORY);
2302 		if (IS_ERR(cc->cipher_tfm.tfms[i])) {
2303 			err = PTR_ERR(cc->cipher_tfm.tfms[i]);
2304 			crypt_free_tfms(cc);
2305 			return err;
2306 		}
2307 	}
2308 
2309 	/*
2310 	 * dm-crypt performance can vary greatly depending on which crypto
2311 	 * algorithm implementation is used.  Help people debug performance
2312 	 * problems by logging the ->cra_driver_name.
2313 	 */
2314 	DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode,
2315 	       crypto_skcipher_alg(any_tfm(cc))->base.cra_driver_name);
2316 	return 0;
2317 }
2318 
2319 static int crypt_alloc_tfms_aead(struct crypt_config *cc, char *ciphermode)
2320 {
2321 	int err;
2322 
2323 	cc->cipher_tfm.tfms = kmalloc(sizeof(struct crypto_aead *), GFP_KERNEL);
2324 	if (!cc->cipher_tfm.tfms)
2325 		return -ENOMEM;
2326 
2327 	cc->cipher_tfm.tfms_aead[0] = crypto_alloc_aead(ciphermode, 0,
2328 						CRYPTO_ALG_ALLOCATES_MEMORY);
2329 	if (IS_ERR(cc->cipher_tfm.tfms_aead[0])) {
2330 		err = PTR_ERR(cc->cipher_tfm.tfms_aead[0]);
2331 		crypt_free_tfms(cc);
2332 		return err;
2333 	}
2334 
2335 	DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode,
2336 	       crypto_aead_alg(any_tfm_aead(cc))->base.cra_driver_name);
2337 	return 0;
2338 }
2339 
2340 static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
2341 {
2342 	if (crypt_integrity_aead(cc))
2343 		return crypt_alloc_tfms_aead(cc, ciphermode);
2344 	else
2345 		return crypt_alloc_tfms_skcipher(cc, ciphermode);
2346 }
2347 
2348 static unsigned int crypt_subkey_size(struct crypt_config *cc)
2349 {
2350 	return (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
2351 }
2352 
2353 static unsigned int crypt_authenckey_size(struct crypt_config *cc)
2354 {
2355 	return crypt_subkey_size(cc) + RTA_SPACE(sizeof(struct crypto_authenc_key_param));
2356 }
2357 
2358 /*
2359  * If AEAD is composed like authenc(hmac(sha256),xts(aes)),
2360  * the key must be for some reason in special format.
2361  * This funcion converts cc->key to this special format.
2362  */
2363 static void crypt_copy_authenckey(char *p, const void *key,
2364 				  unsigned int enckeylen, unsigned int authkeylen)
2365 {
2366 	struct crypto_authenc_key_param *param;
2367 	struct rtattr *rta;
2368 
2369 	rta = (struct rtattr *)p;
2370 	param = RTA_DATA(rta);
2371 	param->enckeylen = cpu_to_be32(enckeylen);
2372 	rta->rta_len = RTA_LENGTH(sizeof(*param));
2373 	rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
2374 	p += RTA_SPACE(sizeof(*param));
2375 	memcpy(p, key + enckeylen, authkeylen);
2376 	p += authkeylen;
2377 	memcpy(p, key, enckeylen);
2378 }
2379 
2380 static int crypt_setkey(struct crypt_config *cc)
2381 {
2382 	unsigned int subkey_size;
2383 	int err = 0, i, r;
2384 
2385 	/* Ignore extra keys (which are used for IV etc) */
2386 	subkey_size = crypt_subkey_size(cc);
2387 
2388 	if (crypt_integrity_hmac(cc)) {
2389 		if (subkey_size < cc->key_mac_size)
2390 			return -EINVAL;
2391 
2392 		crypt_copy_authenckey(cc->authenc_key, cc->key,
2393 				      subkey_size - cc->key_mac_size,
2394 				      cc->key_mac_size);
2395 	}
2396 
2397 	for (i = 0; i < cc->tfms_count; i++) {
2398 		if (crypt_integrity_hmac(cc))
2399 			r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
2400 				cc->authenc_key, crypt_authenckey_size(cc));
2401 		else if (crypt_integrity_aead(cc))
2402 			r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
2403 					       cc->key + (i * subkey_size),
2404 					       subkey_size);
2405 		else
2406 			r = crypto_skcipher_setkey(cc->cipher_tfm.tfms[i],
2407 						   cc->key + (i * subkey_size),
2408 						   subkey_size);
2409 		if (r)
2410 			err = r;
2411 	}
2412 
2413 	if (crypt_integrity_hmac(cc))
2414 		memzero_explicit(cc->authenc_key, crypt_authenckey_size(cc));
2415 
2416 	return err;
2417 }
2418 
2419 #ifdef CONFIG_KEYS
2420 
2421 static bool contains_whitespace(const char *str)
2422 {
2423 	while (*str)
2424 		if (isspace(*str++))
2425 			return true;
2426 	return false;
2427 }
2428 
2429 static int set_key_user(struct crypt_config *cc, struct key *key)
2430 {
2431 	const struct user_key_payload *ukp;
2432 
2433 	ukp = user_key_payload_locked(key);
2434 	if (!ukp)
2435 		return -EKEYREVOKED;
2436 
2437 	if (cc->key_size != ukp->datalen)
2438 		return -EINVAL;
2439 
2440 	memcpy(cc->key, ukp->data, cc->key_size);
2441 
2442 	return 0;
2443 }
2444 
2445 static int set_key_encrypted(struct crypt_config *cc, struct key *key)
2446 {
2447 	const struct encrypted_key_payload *ekp;
2448 
2449 	ekp = key->payload.data[0];
2450 	if (!ekp)
2451 		return -EKEYREVOKED;
2452 
2453 	if (cc->key_size != ekp->decrypted_datalen)
2454 		return -EINVAL;
2455 
2456 	memcpy(cc->key, ekp->decrypted_data, cc->key_size);
2457 
2458 	return 0;
2459 }
2460 
2461 static int set_key_trusted(struct crypt_config *cc, struct key *key)
2462 {
2463 	const struct trusted_key_payload *tkp;
2464 
2465 	tkp = key->payload.data[0];
2466 	if (!tkp)
2467 		return -EKEYREVOKED;
2468 
2469 	if (cc->key_size != tkp->key_len)
2470 		return -EINVAL;
2471 
2472 	memcpy(cc->key, tkp->key, cc->key_size);
2473 
2474 	return 0;
2475 }
2476 
2477 static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
2478 {
2479 	char *new_key_string, *key_desc;
2480 	int ret;
2481 	struct key_type *type;
2482 	struct key *key;
2483 	int (*set_key)(struct crypt_config *cc, struct key *key);
2484 
2485 	/*
2486 	 * Reject key_string with whitespace. dm core currently lacks code for
2487 	 * proper whitespace escaping in arguments on DM_TABLE_STATUS path.
2488 	 */
2489 	if (contains_whitespace(key_string)) {
2490 		DMERR("whitespace chars not allowed in key string");
2491 		return -EINVAL;
2492 	}
2493 
2494 	/* look for next ':' separating key_type from key_description */
2495 	key_desc = strchr(key_string, ':');
2496 	if (!key_desc || key_desc == key_string || !strlen(key_desc + 1))
2497 		return -EINVAL;
2498 
2499 	if (!strncmp(key_string, "logon:", key_desc - key_string + 1)) {
2500 		type = &key_type_logon;
2501 		set_key = set_key_user;
2502 	} else if (!strncmp(key_string, "user:", key_desc - key_string + 1)) {
2503 		type = &key_type_user;
2504 		set_key = set_key_user;
2505 	} else if (IS_ENABLED(CONFIG_ENCRYPTED_KEYS) &&
2506 		   !strncmp(key_string, "encrypted:", key_desc - key_string + 1)) {
2507 		type = &key_type_encrypted;
2508 		set_key = set_key_encrypted;
2509 	} else if (IS_ENABLED(CONFIG_TRUSTED_KEYS) &&
2510 		   !strncmp(key_string, "trusted:", key_desc - key_string + 1)) {
2511 		type = &key_type_trusted;
2512 		set_key = set_key_trusted;
2513 	} else {
2514 		return -EINVAL;
2515 	}
2516 
2517 	new_key_string = kstrdup(key_string, GFP_KERNEL);
2518 	if (!new_key_string)
2519 		return -ENOMEM;
2520 
2521 	key = request_key(type, key_desc + 1, NULL);
2522 	if (IS_ERR(key)) {
2523 		kfree_sensitive(new_key_string);
2524 		return PTR_ERR(key);
2525 	}
2526 
2527 	down_read(&key->sem);
2528 
2529 	ret = set_key(cc, key);
2530 	if (ret < 0) {
2531 		up_read(&key->sem);
2532 		key_put(key);
2533 		kfree_sensitive(new_key_string);
2534 		return ret;
2535 	}
2536 
2537 	up_read(&key->sem);
2538 	key_put(key);
2539 
2540 	/* clear the flag since following operations may invalidate previously valid key */
2541 	clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2542 
2543 	ret = crypt_setkey(cc);
2544 
2545 	if (!ret) {
2546 		set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2547 		kfree_sensitive(cc->key_string);
2548 		cc->key_string = new_key_string;
2549 	} else
2550 		kfree_sensitive(new_key_string);
2551 
2552 	return ret;
2553 }
2554 
2555 static int get_key_size(char **key_string)
2556 {
2557 	char *colon, dummy;
2558 	int ret;
2559 
2560 	if (*key_string[0] != ':')
2561 		return strlen(*key_string) >> 1;
2562 
2563 	/* look for next ':' in key string */
2564 	colon = strpbrk(*key_string + 1, ":");
2565 	if (!colon)
2566 		return -EINVAL;
2567 
2568 	if (sscanf(*key_string + 1, "%u%c", &ret, &dummy) != 2 || dummy != ':')
2569 		return -EINVAL;
2570 
2571 	*key_string = colon;
2572 
2573 	/* remaining key string should be :<logon|user>:<key_desc> */
2574 
2575 	return ret;
2576 }
2577 
2578 #else
2579 
2580 static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
2581 {
2582 	return -EINVAL;
2583 }
2584 
2585 static int get_key_size(char **key_string)
2586 {
2587 	return (*key_string[0] == ':') ? -EINVAL : (int)(strlen(*key_string) >> 1);
2588 }
2589 
2590 #endif /* CONFIG_KEYS */
2591 
2592 static int crypt_set_key(struct crypt_config *cc, char *key)
2593 {
2594 	int r = -EINVAL;
2595 	int key_string_len = strlen(key);
2596 
2597 	/* Hyphen (which gives a key_size of zero) means there is no key. */
2598 	if (!cc->key_size && strcmp(key, "-"))
2599 		goto out;
2600 
2601 	/* ':' means the key is in kernel keyring, short-circuit normal key processing */
2602 	if (key[0] == ':') {
2603 		r = crypt_set_keyring_key(cc, key + 1);
2604 		goto out;
2605 	}
2606 
2607 	/* clear the flag since following operations may invalidate previously valid key */
2608 	clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2609 
2610 	/* wipe references to any kernel keyring key */
2611 	kfree_sensitive(cc->key_string);
2612 	cc->key_string = NULL;
2613 
2614 	/* Decode key from its hex representation. */
2615 	if (cc->key_size && hex2bin(cc->key, key, cc->key_size) < 0)
2616 		goto out;
2617 
2618 	r = crypt_setkey(cc);
2619 	if (!r)
2620 		set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2621 
2622 out:
2623 	/* Hex key string not needed after here, so wipe it. */
2624 	memset(key, '0', key_string_len);
2625 
2626 	return r;
2627 }
2628 
2629 static int crypt_wipe_key(struct crypt_config *cc)
2630 {
2631 	int r;
2632 
2633 	clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2634 	get_random_bytes(&cc->key, cc->key_size);
2635 
2636 	/* Wipe IV private keys */
2637 	if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
2638 		r = cc->iv_gen_ops->wipe(cc);
2639 		if (r)
2640 			return r;
2641 	}
2642 
2643 	kfree_sensitive(cc->key_string);
2644 	cc->key_string = NULL;
2645 	r = crypt_setkey(cc);
2646 	memset(&cc->key, 0, cc->key_size * sizeof(u8));
2647 
2648 	return r;
2649 }
2650 
2651 static void crypt_calculate_pages_per_client(void)
2652 {
2653 	unsigned long pages = (totalram_pages() - totalhigh_pages()) * DM_CRYPT_MEMORY_PERCENT / 100;
2654 
2655 	if (!dm_crypt_clients_n)
2656 		return;
2657 
2658 	pages /= dm_crypt_clients_n;
2659 	if (pages < DM_CRYPT_MIN_PAGES_PER_CLIENT)
2660 		pages = DM_CRYPT_MIN_PAGES_PER_CLIENT;
2661 	dm_crypt_pages_per_client = pages;
2662 }
2663 
2664 static void *crypt_page_alloc(gfp_t gfp_mask, void *pool_data)
2665 {
2666 	struct crypt_config *cc = pool_data;
2667 	struct page *page;
2668 
2669 	/*
2670 	 * Note, percpu_counter_read_positive() may over (and under) estimate
2671 	 * the current usage by at most (batch - 1) * num_online_cpus() pages,
2672 	 * but avoids potential spinlock contention of an exact result.
2673 	 */
2674 	if (unlikely(percpu_counter_read_positive(&cc->n_allocated_pages) >= dm_crypt_pages_per_client) &&
2675 	    likely(gfp_mask & __GFP_NORETRY))
2676 		return NULL;
2677 
2678 	page = alloc_page(gfp_mask);
2679 	if (likely(page != NULL))
2680 		percpu_counter_add(&cc->n_allocated_pages, 1);
2681 
2682 	return page;
2683 }
2684 
2685 static void crypt_page_free(void *page, void *pool_data)
2686 {
2687 	struct crypt_config *cc = pool_data;
2688 
2689 	__free_page(page);
2690 	percpu_counter_sub(&cc->n_allocated_pages, 1);
2691 }
2692 
2693 static void crypt_dtr(struct dm_target *ti)
2694 {
2695 	struct crypt_config *cc = ti->private;
2696 
2697 	ti->private = NULL;
2698 
2699 	if (!cc)
2700 		return;
2701 
2702 	if (cc->write_thread)
2703 		kthread_stop(cc->write_thread);
2704 
2705 	if (cc->io_queue)
2706 		destroy_workqueue(cc->io_queue);
2707 	if (cc->crypt_queue)
2708 		destroy_workqueue(cc->crypt_queue);
2709 
2710 	crypt_free_tfms(cc);
2711 
2712 	bioset_exit(&cc->bs);
2713 
2714 	mempool_exit(&cc->page_pool);
2715 	mempool_exit(&cc->req_pool);
2716 	mempool_exit(&cc->tag_pool);
2717 
2718 	WARN_ON(percpu_counter_sum(&cc->n_allocated_pages) != 0);
2719 	percpu_counter_destroy(&cc->n_allocated_pages);
2720 
2721 	if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
2722 		cc->iv_gen_ops->dtr(cc);
2723 
2724 	if (cc->dev)
2725 		dm_put_device(ti, cc->dev);
2726 
2727 	kfree_sensitive(cc->cipher_string);
2728 	kfree_sensitive(cc->key_string);
2729 	kfree_sensitive(cc->cipher_auth);
2730 	kfree_sensitive(cc->authenc_key);
2731 
2732 	mutex_destroy(&cc->bio_alloc_lock);
2733 
2734 	/* Must zero key material before freeing */
2735 	kfree_sensitive(cc);
2736 
2737 	spin_lock(&dm_crypt_clients_lock);
2738 	WARN_ON(!dm_crypt_clients_n);
2739 	dm_crypt_clients_n--;
2740 	crypt_calculate_pages_per_client();
2741 	spin_unlock(&dm_crypt_clients_lock);
2742 
2743 	dm_audit_log_dtr(DM_MSG_PREFIX, ti, 1);
2744 }
2745 
2746 static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode)
2747 {
2748 	struct crypt_config *cc = ti->private;
2749 
2750 	if (crypt_integrity_aead(cc))
2751 		cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc));
2752 	else
2753 		cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
2754 
2755 	if (cc->iv_size)
2756 		/* at least a 64 bit sector number should fit in our buffer */
2757 		cc->iv_size = max(cc->iv_size,
2758 				  (unsigned int)(sizeof(u64) / sizeof(u8)));
2759 	else if (ivmode) {
2760 		DMWARN("Selected cipher does not support IVs");
2761 		ivmode = NULL;
2762 	}
2763 
2764 	/* Choose ivmode, see comments at iv code. */
2765 	if (ivmode == NULL)
2766 		cc->iv_gen_ops = NULL;
2767 	else if (strcmp(ivmode, "plain") == 0)
2768 		cc->iv_gen_ops = &crypt_iv_plain_ops;
2769 	else if (strcmp(ivmode, "plain64") == 0)
2770 		cc->iv_gen_ops = &crypt_iv_plain64_ops;
2771 	else if (strcmp(ivmode, "plain64be") == 0)
2772 		cc->iv_gen_ops = &crypt_iv_plain64be_ops;
2773 	else if (strcmp(ivmode, "essiv") == 0)
2774 		cc->iv_gen_ops = &crypt_iv_essiv_ops;
2775 	else if (strcmp(ivmode, "benbi") == 0)
2776 		cc->iv_gen_ops = &crypt_iv_benbi_ops;
2777 	else if (strcmp(ivmode, "null") == 0)
2778 		cc->iv_gen_ops = &crypt_iv_null_ops;
2779 	else if (strcmp(ivmode, "eboiv") == 0)
2780 		cc->iv_gen_ops = &crypt_iv_eboiv_ops;
2781 	else if (strcmp(ivmode, "elephant") == 0) {
2782 		cc->iv_gen_ops = &crypt_iv_elephant_ops;
2783 		cc->key_parts = 2;
2784 		cc->key_extra_size = cc->key_size / 2;
2785 		if (cc->key_extra_size > ELEPHANT_MAX_KEY_SIZE)
2786 			return -EINVAL;
2787 		set_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags);
2788 	} else if (strcmp(ivmode, "lmk") == 0) {
2789 		cc->iv_gen_ops = &crypt_iv_lmk_ops;
2790 		/*
2791 		 * Version 2 and 3 is recognised according
2792 		 * to length of provided multi-key string.
2793 		 * If present (version 3), last key is used as IV seed.
2794 		 * All keys (including IV seed) are always the same size.
2795 		 */
2796 		if (cc->key_size % cc->key_parts) {
2797 			cc->key_parts++;
2798 			cc->key_extra_size = cc->key_size / cc->key_parts;
2799 		}
2800 	} else if (strcmp(ivmode, "tcw") == 0) {
2801 		cc->iv_gen_ops = &crypt_iv_tcw_ops;
2802 		cc->key_parts += 2; /* IV + whitening */
2803 		cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE;
2804 	} else if (strcmp(ivmode, "random") == 0) {
2805 		cc->iv_gen_ops = &crypt_iv_random_ops;
2806 		/* Need storage space in integrity fields. */
2807 		cc->integrity_iv_size = cc->iv_size;
2808 	} else {
2809 		ti->error = "Invalid IV mode";
2810 		return -EINVAL;
2811 	}
2812 
2813 	return 0;
2814 }
2815 
2816 /*
2817  * Workaround to parse HMAC algorithm from AEAD crypto API spec.
2818  * The HMAC is needed to calculate tag size (HMAC digest size).
2819  * This should be probably done by crypto-api calls (once available...)
2820  */
2821 static int crypt_ctr_auth_cipher(struct crypt_config *cc, char *cipher_api)
2822 {
2823 	char *start, *end, *mac_alg = NULL;
2824 	struct crypto_ahash *mac;
2825 
2826 	if (!strstarts(cipher_api, "authenc("))
2827 		return 0;
2828 
2829 	start = strchr(cipher_api, '(');
2830 	end = strchr(cipher_api, ',');
2831 	if (!start || !end || ++start > end)
2832 		return -EINVAL;
2833 
2834 	mac_alg = kzalloc(end - start + 1, GFP_KERNEL);
2835 	if (!mac_alg)
2836 		return -ENOMEM;
2837 	strncpy(mac_alg, start, end - start);
2838 
2839 	mac = crypto_alloc_ahash(mac_alg, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
2840 	kfree(mac_alg);
2841 
2842 	if (IS_ERR(mac))
2843 		return PTR_ERR(mac);
2844 
2845 	cc->key_mac_size = crypto_ahash_digestsize(mac);
2846 	crypto_free_ahash(mac);
2847 
2848 	cc->authenc_key = kmalloc(crypt_authenckey_size(cc), GFP_KERNEL);
2849 	if (!cc->authenc_key)
2850 		return -ENOMEM;
2851 
2852 	return 0;
2853 }
2854 
2855 static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key,
2856 				char **ivmode, char **ivopts)
2857 {
2858 	struct crypt_config *cc = ti->private;
2859 	char *tmp, *cipher_api, buf[CRYPTO_MAX_ALG_NAME];
2860 	int ret = -EINVAL;
2861 
2862 	cc->tfms_count = 1;
2863 
2864 	/*
2865 	 * New format (capi: prefix)
2866 	 * capi:cipher_api_spec-iv:ivopts
2867 	 */
2868 	tmp = &cipher_in[strlen("capi:")];
2869 
2870 	/* Separate IV options if present, it can contain another '-' in hash name */
2871 	*ivopts = strrchr(tmp, ':');
2872 	if (*ivopts) {
2873 		**ivopts = '\0';
2874 		(*ivopts)++;
2875 	}
2876 	/* Parse IV mode */
2877 	*ivmode = strrchr(tmp, '-');
2878 	if (*ivmode) {
2879 		**ivmode = '\0';
2880 		(*ivmode)++;
2881 	}
2882 	/* The rest is crypto API spec */
2883 	cipher_api = tmp;
2884 
2885 	/* Alloc AEAD, can be used only in new format. */
2886 	if (crypt_integrity_aead(cc)) {
2887 		ret = crypt_ctr_auth_cipher(cc, cipher_api);
2888 		if (ret < 0) {
2889 			ti->error = "Invalid AEAD cipher spec";
2890 			return -ENOMEM;
2891 		}
2892 	}
2893 
2894 	if (*ivmode && !strcmp(*ivmode, "lmk"))
2895 		cc->tfms_count = 64;
2896 
2897 	if (*ivmode && !strcmp(*ivmode, "essiv")) {
2898 		if (!*ivopts) {
2899 			ti->error = "Digest algorithm missing for ESSIV mode";
2900 			return -EINVAL;
2901 		}
2902 		ret = snprintf(buf, CRYPTO_MAX_ALG_NAME, "essiv(%s,%s)",
2903 			       cipher_api, *ivopts);
2904 		if (ret < 0 || ret >= CRYPTO_MAX_ALG_NAME) {
2905 			ti->error = "Cannot allocate cipher string";
2906 			return -ENOMEM;
2907 		}
2908 		cipher_api = buf;
2909 	}
2910 
2911 	cc->key_parts = cc->tfms_count;
2912 
2913 	/* Allocate cipher */
2914 	ret = crypt_alloc_tfms(cc, cipher_api);
2915 	if (ret < 0) {
2916 		ti->error = "Error allocating crypto tfm";
2917 		return ret;
2918 	}
2919 
2920 	if (crypt_integrity_aead(cc))
2921 		cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc));
2922 	else
2923 		cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
2924 
2925 	return 0;
2926 }
2927 
2928 static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key,
2929 				char **ivmode, char **ivopts)
2930 {
2931 	struct crypt_config *cc = ti->private;
2932 	char *tmp, *cipher, *chainmode, *keycount;
2933 	char *cipher_api = NULL;
2934 	int ret = -EINVAL;
2935 	char dummy;
2936 
2937 	if (strchr(cipher_in, '(') || crypt_integrity_aead(cc)) {
2938 		ti->error = "Bad cipher specification";
2939 		return -EINVAL;
2940 	}
2941 
2942 	/*
2943 	 * Legacy dm-crypt cipher specification
2944 	 * cipher[:keycount]-mode-iv:ivopts
2945 	 */
2946 	tmp = cipher_in;
2947 	keycount = strsep(&tmp, "-");
2948 	cipher = strsep(&keycount, ":");
2949 
2950 	if (!keycount)
2951 		cc->tfms_count = 1;
2952 	else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 ||
2953 		 !is_power_of_2(cc->tfms_count)) {
2954 		ti->error = "Bad cipher key count specification";
2955 		return -EINVAL;
2956 	}
2957 	cc->key_parts = cc->tfms_count;
2958 
2959 	chainmode = strsep(&tmp, "-");
2960 	*ivmode = strsep(&tmp, ":");
2961 	*ivopts = tmp;
2962 
2963 	/*
2964 	 * For compatibility with the original dm-crypt mapping format, if
2965 	 * only the cipher name is supplied, use cbc-plain.
2966 	 */
2967 	if (!chainmode || (!strcmp(chainmode, "plain") && !*ivmode)) {
2968 		chainmode = "cbc";
2969 		*ivmode = "plain";
2970 	}
2971 
2972 	if (strcmp(chainmode, "ecb") && !*ivmode) {
2973 		ti->error = "IV mechanism required";
2974 		return -EINVAL;
2975 	}
2976 
2977 	cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL);
2978 	if (!cipher_api)
2979 		goto bad_mem;
2980 
2981 	if (*ivmode && !strcmp(*ivmode, "essiv")) {
2982 		if (!*ivopts) {
2983 			ti->error = "Digest algorithm missing for ESSIV mode";
2984 			kfree(cipher_api);
2985 			return -EINVAL;
2986 		}
2987 		ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
2988 			       "essiv(%s(%s),%s)", chainmode, cipher, *ivopts);
2989 	} else {
2990 		ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
2991 			       "%s(%s)", chainmode, cipher);
2992 	}
2993 	if (ret < 0 || ret >= CRYPTO_MAX_ALG_NAME) {
2994 		kfree(cipher_api);
2995 		goto bad_mem;
2996 	}
2997 
2998 	/* Allocate cipher */
2999 	ret = crypt_alloc_tfms(cc, cipher_api);
3000 	if (ret < 0) {
3001 		ti->error = "Error allocating crypto tfm";
3002 		kfree(cipher_api);
3003 		return ret;
3004 	}
3005 	kfree(cipher_api);
3006 
3007 	return 0;
3008 bad_mem:
3009 	ti->error = "Cannot allocate cipher strings";
3010 	return -ENOMEM;
3011 }
3012 
3013 static int crypt_ctr_cipher(struct dm_target *ti, char *cipher_in, char *key)
3014 {
3015 	struct crypt_config *cc = ti->private;
3016 	char *ivmode = NULL, *ivopts = NULL;
3017 	int ret;
3018 
3019 	cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL);
3020 	if (!cc->cipher_string) {
3021 		ti->error = "Cannot allocate cipher strings";
3022 		return -ENOMEM;
3023 	}
3024 
3025 	if (strstarts(cipher_in, "capi:"))
3026 		ret = crypt_ctr_cipher_new(ti, cipher_in, key, &ivmode, &ivopts);
3027 	else
3028 		ret = crypt_ctr_cipher_old(ti, cipher_in, key, &ivmode, &ivopts);
3029 	if (ret)
3030 		return ret;
3031 
3032 	/* Initialize IV */
3033 	ret = crypt_ctr_ivmode(ti, ivmode);
3034 	if (ret < 0)
3035 		return ret;
3036 
3037 	/* Initialize and set key */
3038 	ret = crypt_set_key(cc, key);
3039 	if (ret < 0) {
3040 		ti->error = "Error decoding and setting key";
3041 		return ret;
3042 	}
3043 
3044 	/* Allocate IV */
3045 	if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
3046 		ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
3047 		if (ret < 0) {
3048 			ti->error = "Error creating IV";
3049 			return ret;
3050 		}
3051 	}
3052 
3053 	/* Initialize IV (set keys for ESSIV etc) */
3054 	if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
3055 		ret = cc->iv_gen_ops->init(cc);
3056 		if (ret < 0) {
3057 			ti->error = "Error initialising IV";
3058 			return ret;
3059 		}
3060 	}
3061 
3062 	/* wipe the kernel key payload copy */
3063 	if (cc->key_string)
3064 		memset(cc->key, 0, cc->key_size * sizeof(u8));
3065 
3066 	return ret;
3067 }
3068 
3069 static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **argv)
3070 {
3071 	struct crypt_config *cc = ti->private;
3072 	struct dm_arg_set as;
3073 	static const struct dm_arg _args[] = {
3074 		{0, 8, "Invalid number of feature args"},
3075 	};
3076 	unsigned int opt_params, val;
3077 	const char *opt_string, *sval;
3078 	char dummy;
3079 	int ret;
3080 
3081 	/* Optional parameters */
3082 	as.argc = argc;
3083 	as.argv = argv;
3084 
3085 	ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
3086 	if (ret)
3087 		return ret;
3088 
3089 	while (opt_params--) {
3090 		opt_string = dm_shift_arg(&as);
3091 		if (!opt_string) {
3092 			ti->error = "Not enough feature arguments";
3093 			return -EINVAL;
3094 		}
3095 
3096 		if (!strcasecmp(opt_string, "allow_discards"))
3097 			ti->num_discard_bios = 1;
3098 
3099 		else if (!strcasecmp(opt_string, "same_cpu_crypt"))
3100 			set_bit(DM_CRYPT_SAME_CPU, &cc->flags);
3101 
3102 		else if (!strcasecmp(opt_string, "submit_from_crypt_cpus"))
3103 			set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
3104 		else if (!strcasecmp(opt_string, "no_read_workqueue"))
3105 			set_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags);
3106 		else if (!strcasecmp(opt_string, "no_write_workqueue"))
3107 			set_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
3108 		else if (sscanf(opt_string, "integrity:%u:", &val) == 1) {
3109 			if (val == 0 || val > MAX_TAG_SIZE) {
3110 				ti->error = "Invalid integrity arguments";
3111 				return -EINVAL;
3112 			}
3113 			cc->on_disk_tag_size = val;
3114 			sval = strchr(opt_string + strlen("integrity:"), ':') + 1;
3115 			if (!strcasecmp(sval, "aead")) {
3116 				set_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
3117 			} else  if (strcasecmp(sval, "none")) {
3118 				ti->error = "Unknown integrity profile";
3119 				return -EINVAL;
3120 			}
3121 
3122 			cc->cipher_auth = kstrdup(sval, GFP_KERNEL);
3123 			if (!cc->cipher_auth)
3124 				return -ENOMEM;
3125 		} else if (sscanf(opt_string, "sector_size:%hu%c", &cc->sector_size, &dummy) == 1) {
3126 			if (cc->sector_size < (1 << SECTOR_SHIFT) ||
3127 			    cc->sector_size > 4096 ||
3128 			    (cc->sector_size & (cc->sector_size - 1))) {
3129 				ti->error = "Invalid feature value for sector_size";
3130 				return -EINVAL;
3131 			}
3132 			if (ti->len & ((cc->sector_size >> SECTOR_SHIFT) - 1)) {
3133 				ti->error = "Device size is not multiple of sector_size feature";
3134 				return -EINVAL;
3135 			}
3136 			cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT;
3137 		} else if (!strcasecmp(opt_string, "iv_large_sectors"))
3138 			set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
3139 		else {
3140 			ti->error = "Invalid feature arguments";
3141 			return -EINVAL;
3142 		}
3143 	}
3144 
3145 	return 0;
3146 }
3147 
3148 #ifdef CONFIG_BLK_DEV_ZONED
3149 static int crypt_report_zones(struct dm_target *ti,
3150 		struct dm_report_zones_args *args, unsigned int nr_zones)
3151 {
3152 	struct crypt_config *cc = ti->private;
3153 
3154 	return dm_report_zones(cc->dev->bdev, cc->start,
3155 			cc->start + dm_target_offset(ti, args->next_sector),
3156 			args, nr_zones);
3157 }
3158 #else
3159 #define crypt_report_zones NULL
3160 #endif
3161 
3162 /*
3163  * Construct an encryption mapping:
3164  * <cipher> [<key>|:<key_size>:<user|logon>:<key_description>] <iv_offset> <dev_path> <start>
3165  */
3166 static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
3167 {
3168 	struct crypt_config *cc;
3169 	const char *devname = dm_table_device_name(ti->table);
3170 	int key_size;
3171 	unsigned int align_mask;
3172 	unsigned long long tmpll;
3173 	int ret;
3174 	size_t iv_size_padding, additional_req_size;
3175 	char dummy;
3176 
3177 	if (argc < 5) {
3178 		ti->error = "Not enough arguments";
3179 		return -EINVAL;
3180 	}
3181 
3182 	key_size = get_key_size(&argv[1]);
3183 	if (key_size < 0) {
3184 		ti->error = "Cannot parse key size";
3185 		return -EINVAL;
3186 	}
3187 
3188 	cc = kzalloc(struct_size(cc, key, key_size), GFP_KERNEL);
3189 	if (!cc) {
3190 		ti->error = "Cannot allocate encryption context";
3191 		return -ENOMEM;
3192 	}
3193 	cc->key_size = key_size;
3194 	cc->sector_size = (1 << SECTOR_SHIFT);
3195 	cc->sector_shift = 0;
3196 
3197 	ti->private = cc;
3198 
3199 	spin_lock(&dm_crypt_clients_lock);
3200 	dm_crypt_clients_n++;
3201 	crypt_calculate_pages_per_client();
3202 	spin_unlock(&dm_crypt_clients_lock);
3203 
3204 	ret = percpu_counter_init(&cc->n_allocated_pages, 0, GFP_KERNEL);
3205 	if (ret < 0)
3206 		goto bad;
3207 
3208 	/* Optional parameters need to be read before cipher constructor */
3209 	if (argc > 5) {
3210 		ret = crypt_ctr_optional(ti, argc - 5, &argv[5]);
3211 		if (ret)
3212 			goto bad;
3213 	}
3214 
3215 	ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
3216 	if (ret < 0)
3217 		goto bad;
3218 
3219 	if (crypt_integrity_aead(cc)) {
3220 		cc->dmreq_start = sizeof(struct aead_request);
3221 		cc->dmreq_start += crypto_aead_reqsize(any_tfm_aead(cc));
3222 		align_mask = crypto_aead_alignmask(any_tfm_aead(cc));
3223 	} else {
3224 		cc->dmreq_start = sizeof(struct skcipher_request);
3225 		cc->dmreq_start += crypto_skcipher_reqsize(any_tfm(cc));
3226 		align_mask = crypto_skcipher_alignmask(any_tfm(cc));
3227 	}
3228 	cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
3229 
3230 	if (align_mask < CRYPTO_MINALIGN) {
3231 		/* Allocate the padding exactly */
3232 		iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
3233 				& align_mask;
3234 	} else {
3235 		/*
3236 		 * If the cipher requires greater alignment than kmalloc
3237 		 * alignment, we don't know the exact position of the
3238 		 * initialization vector. We must assume worst case.
3239 		 */
3240 		iv_size_padding = align_mask;
3241 	}
3242 
3243 	/*  ...| IV + padding | original IV | original sec. number | bio tag offset | */
3244 	additional_req_size = sizeof(struct dm_crypt_request) +
3245 		iv_size_padding + cc->iv_size +
3246 		cc->iv_size +
3247 		sizeof(uint64_t) +
3248 		sizeof(unsigned int);
3249 
3250 	ret = mempool_init_kmalloc_pool(&cc->req_pool, MIN_IOS, cc->dmreq_start + additional_req_size);
3251 	if (ret) {
3252 		ti->error = "Cannot allocate crypt request mempool";
3253 		goto bad;
3254 	}
3255 
3256 	cc->per_bio_data_size = ti->per_io_data_size =
3257 		ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + additional_req_size,
3258 		      ARCH_KMALLOC_MINALIGN);
3259 
3260 	ret = mempool_init(&cc->page_pool, BIO_MAX_VECS, crypt_page_alloc, crypt_page_free, cc);
3261 	if (ret) {
3262 		ti->error = "Cannot allocate page mempool";
3263 		goto bad;
3264 	}
3265 
3266 	ret = bioset_init(&cc->bs, MIN_IOS, 0, BIOSET_NEED_BVECS);
3267 	if (ret) {
3268 		ti->error = "Cannot allocate crypt bioset";
3269 		goto bad;
3270 	}
3271 
3272 	mutex_init(&cc->bio_alloc_lock);
3273 
3274 	ret = -EINVAL;
3275 	if ((sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) ||
3276 	    (tmpll & ((cc->sector_size >> SECTOR_SHIFT) - 1))) {
3277 		ti->error = "Invalid iv_offset sector";
3278 		goto bad;
3279 	}
3280 	cc->iv_offset = tmpll;
3281 
3282 	ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev);
3283 	if (ret) {
3284 		ti->error = "Device lookup failed";
3285 		goto bad;
3286 	}
3287 
3288 	ret = -EINVAL;
3289 	if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) {
3290 		ti->error = "Invalid device sector";
3291 		goto bad;
3292 	}
3293 	cc->start = tmpll;
3294 
3295 	if (bdev_is_zoned(cc->dev->bdev)) {
3296 		/*
3297 		 * For zoned block devices, we need to preserve the issuer write
3298 		 * ordering. To do so, disable write workqueues and force inline
3299 		 * encryption completion.
3300 		 */
3301 		set_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
3302 		set_bit(DM_CRYPT_WRITE_INLINE, &cc->flags);
3303 
3304 		/*
3305 		 * All zone append writes to a zone of a zoned block device will
3306 		 * have the same BIO sector, the start of the zone. When the
3307 		 * cypher IV mode uses sector values, all data targeting a
3308 		 * zone will be encrypted using the first sector numbers of the
3309 		 * zone. This will not result in write errors but will
3310 		 * cause most reads to fail as reads will use the sector values
3311 		 * for the actual data locations, resulting in IV mismatch.
3312 		 * To avoid this problem, ask DM core to emulate zone append
3313 		 * operations with regular writes.
3314 		 */
3315 		DMDEBUG("Zone append operations will be emulated");
3316 		ti->emulate_zone_append = true;
3317 	}
3318 
3319 	if (crypt_integrity_aead(cc) || cc->integrity_iv_size) {
3320 		ret = crypt_integrity_ctr(cc, ti);
3321 		if (ret)
3322 			goto bad;
3323 
3324 		cc->tag_pool_max_sectors = POOL_ENTRY_SIZE / cc->on_disk_tag_size;
3325 		if (!cc->tag_pool_max_sectors)
3326 			cc->tag_pool_max_sectors = 1;
3327 
3328 		ret = mempool_init_kmalloc_pool(&cc->tag_pool, MIN_IOS,
3329 			cc->tag_pool_max_sectors * cc->on_disk_tag_size);
3330 		if (ret) {
3331 			ti->error = "Cannot allocate integrity tags mempool";
3332 			goto bad;
3333 		}
3334 
3335 		cc->tag_pool_max_sectors <<= cc->sector_shift;
3336 	}
3337 
3338 	ret = -ENOMEM;
3339 	cc->io_queue = alloc_workqueue("kcryptd_io/%s", WQ_MEM_RECLAIM, 1, devname);
3340 	if (!cc->io_queue) {
3341 		ti->error = "Couldn't create kcryptd io queue";
3342 		goto bad;
3343 	}
3344 
3345 	if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
3346 		cc->crypt_queue = alloc_workqueue("kcryptd/%s", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
3347 						  1, devname);
3348 	else
3349 		cc->crypt_queue = alloc_workqueue("kcryptd/%s",
3350 						  WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
3351 						  num_online_cpus(), devname);
3352 	if (!cc->crypt_queue) {
3353 		ti->error = "Couldn't create kcryptd queue";
3354 		goto bad;
3355 	}
3356 
3357 	spin_lock_init(&cc->write_thread_lock);
3358 	cc->write_tree = RB_ROOT;
3359 
3360 	cc->write_thread = kthread_run(dmcrypt_write, cc, "dmcrypt_write/%s", devname);
3361 	if (IS_ERR(cc->write_thread)) {
3362 		ret = PTR_ERR(cc->write_thread);
3363 		cc->write_thread = NULL;
3364 		ti->error = "Couldn't spawn write thread";
3365 		goto bad;
3366 	}
3367 
3368 	ti->num_flush_bios = 1;
3369 	ti->limit_swap_bios = true;
3370 	ti->accounts_remapped_io = true;
3371 
3372 	dm_audit_log_ctr(DM_MSG_PREFIX, ti, 1);
3373 	return 0;
3374 
3375 bad:
3376 	dm_audit_log_ctr(DM_MSG_PREFIX, ti, 0);
3377 	crypt_dtr(ti);
3378 	return ret;
3379 }
3380 
3381 static int crypt_map(struct dm_target *ti, struct bio *bio)
3382 {
3383 	struct dm_crypt_io *io;
3384 	struct crypt_config *cc = ti->private;
3385 
3386 	/*
3387 	 * If bio is REQ_PREFLUSH or REQ_OP_DISCARD, just bypass crypt queues.
3388 	 * - for REQ_PREFLUSH device-mapper core ensures that no IO is in-flight
3389 	 * - for REQ_OP_DISCARD caller must use flush if IO ordering matters
3390 	 */
3391 	if (unlikely(bio->bi_opf & REQ_PREFLUSH ||
3392 	    bio_op(bio) == REQ_OP_DISCARD)) {
3393 		bio_set_dev(bio, cc->dev->bdev);
3394 		if (bio_sectors(bio))
3395 			bio->bi_iter.bi_sector = cc->start +
3396 				dm_target_offset(ti, bio->bi_iter.bi_sector);
3397 		return DM_MAPIO_REMAPPED;
3398 	}
3399 
3400 	/*
3401 	 * Check if bio is too large, split as needed.
3402 	 */
3403 	if (unlikely(bio->bi_iter.bi_size > (BIO_MAX_VECS << PAGE_SHIFT)) &&
3404 	    (bio_data_dir(bio) == WRITE || cc->on_disk_tag_size))
3405 		dm_accept_partial_bio(bio, ((BIO_MAX_VECS << PAGE_SHIFT) >> SECTOR_SHIFT));
3406 
3407 	/*
3408 	 * Ensure that bio is a multiple of internal sector encryption size
3409 	 * and is aligned to this size as defined in IO hints.
3410 	 */
3411 	if (unlikely((bio->bi_iter.bi_sector & ((cc->sector_size >> SECTOR_SHIFT) - 1)) != 0))
3412 		return DM_MAPIO_KILL;
3413 
3414 	if (unlikely(bio->bi_iter.bi_size & (cc->sector_size - 1)))
3415 		return DM_MAPIO_KILL;
3416 
3417 	io = dm_per_bio_data(bio, cc->per_bio_data_size);
3418 	crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
3419 
3420 	if (cc->on_disk_tag_size) {
3421 		unsigned int tag_len = cc->on_disk_tag_size * (bio_sectors(bio) >> cc->sector_shift);
3422 
3423 		if (unlikely(tag_len > KMALLOC_MAX_SIZE))
3424 			io->integrity_metadata = NULL;
3425 		else
3426 			io->integrity_metadata = kmalloc(tag_len, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
3427 
3428 		if (unlikely(!io->integrity_metadata)) {
3429 			if (bio_sectors(bio) > cc->tag_pool_max_sectors)
3430 				dm_accept_partial_bio(bio, cc->tag_pool_max_sectors);
3431 			io->integrity_metadata = mempool_alloc(&cc->tag_pool, GFP_NOIO);
3432 			io->integrity_metadata_from_pool = true;
3433 		}
3434 	}
3435 
3436 	if (crypt_integrity_aead(cc))
3437 		io->ctx.r.req_aead = (struct aead_request *)(io + 1);
3438 	else
3439 		io->ctx.r.req = (struct skcipher_request *)(io + 1);
3440 
3441 	if (bio_data_dir(io->base_bio) == READ) {
3442 		if (kcryptd_io_read(io, CRYPT_MAP_READ_GFP))
3443 			kcryptd_queue_read(io);
3444 	} else
3445 		kcryptd_queue_crypt(io);
3446 
3447 	return DM_MAPIO_SUBMITTED;
3448 }
3449 
3450 static char hex2asc(unsigned char c)
3451 {
3452 	return c + '0' + ((unsigned int)(9 - c) >> 4 & 0x27);
3453 }
3454 
3455 static void crypt_status(struct dm_target *ti, status_type_t type,
3456 			 unsigned int status_flags, char *result, unsigned int maxlen)
3457 {
3458 	struct crypt_config *cc = ti->private;
3459 	unsigned int i, sz = 0;
3460 	int num_feature_args = 0;
3461 
3462 	switch (type) {
3463 	case STATUSTYPE_INFO:
3464 		result[0] = '\0';
3465 		break;
3466 
3467 	case STATUSTYPE_TABLE:
3468 		DMEMIT("%s ", cc->cipher_string);
3469 
3470 		if (cc->key_size > 0) {
3471 			if (cc->key_string)
3472 				DMEMIT(":%u:%s", cc->key_size, cc->key_string);
3473 			else {
3474 				for (i = 0; i < cc->key_size; i++) {
3475 					DMEMIT("%c%c", hex2asc(cc->key[i] >> 4),
3476 					       hex2asc(cc->key[i] & 0xf));
3477 				}
3478 			}
3479 		} else
3480 			DMEMIT("-");
3481 
3482 		DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
3483 				cc->dev->name, (unsigned long long)cc->start);
3484 
3485 		num_feature_args += !!ti->num_discard_bios;
3486 		num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
3487 		num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
3488 		num_feature_args += test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags);
3489 		num_feature_args += test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
3490 		num_feature_args += cc->sector_size != (1 << SECTOR_SHIFT);
3491 		num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
3492 		if (cc->on_disk_tag_size)
3493 			num_feature_args++;
3494 		if (num_feature_args) {
3495 			DMEMIT(" %d", num_feature_args);
3496 			if (ti->num_discard_bios)
3497 				DMEMIT(" allow_discards");
3498 			if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
3499 				DMEMIT(" same_cpu_crypt");
3500 			if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags))
3501 				DMEMIT(" submit_from_crypt_cpus");
3502 			if (test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags))
3503 				DMEMIT(" no_read_workqueue");
3504 			if (test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))
3505 				DMEMIT(" no_write_workqueue");
3506 			if (cc->on_disk_tag_size)
3507 				DMEMIT(" integrity:%u:%s", cc->on_disk_tag_size, cc->cipher_auth);
3508 			if (cc->sector_size != (1 << SECTOR_SHIFT))
3509 				DMEMIT(" sector_size:%d", cc->sector_size);
3510 			if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
3511 				DMEMIT(" iv_large_sectors");
3512 		}
3513 		break;
3514 
3515 	case STATUSTYPE_IMA:
3516 		DMEMIT_TARGET_NAME_VERSION(ti->type);
3517 		DMEMIT(",allow_discards=%c", ti->num_discard_bios ? 'y' : 'n');
3518 		DMEMIT(",same_cpu_crypt=%c", test_bit(DM_CRYPT_SAME_CPU, &cc->flags) ? 'y' : 'n');
3519 		DMEMIT(",submit_from_crypt_cpus=%c", test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags) ?
3520 		       'y' : 'n');
3521 		DMEMIT(",no_read_workqueue=%c", test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags) ?
3522 		       'y' : 'n');
3523 		DMEMIT(",no_write_workqueue=%c", test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags) ?
3524 		       'y' : 'n');
3525 		DMEMIT(",iv_large_sectors=%c", test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags) ?
3526 		       'y' : 'n');
3527 
3528 		if (cc->on_disk_tag_size)
3529 			DMEMIT(",integrity_tag_size=%u,cipher_auth=%s",
3530 			       cc->on_disk_tag_size, cc->cipher_auth);
3531 		if (cc->sector_size != (1 << SECTOR_SHIFT))
3532 			DMEMIT(",sector_size=%d", cc->sector_size);
3533 		if (cc->cipher_string)
3534 			DMEMIT(",cipher_string=%s", cc->cipher_string);
3535 
3536 		DMEMIT(",key_size=%u", cc->key_size);
3537 		DMEMIT(",key_parts=%u", cc->key_parts);
3538 		DMEMIT(",key_extra_size=%u", cc->key_extra_size);
3539 		DMEMIT(",key_mac_size=%u", cc->key_mac_size);
3540 		DMEMIT(";");
3541 		break;
3542 	}
3543 }
3544 
3545 static void crypt_postsuspend(struct dm_target *ti)
3546 {
3547 	struct crypt_config *cc = ti->private;
3548 
3549 	set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
3550 }
3551 
3552 static int crypt_preresume(struct dm_target *ti)
3553 {
3554 	struct crypt_config *cc = ti->private;
3555 
3556 	if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
3557 		DMERR("aborting resume - crypt key is not set.");
3558 		return -EAGAIN;
3559 	}
3560 
3561 	return 0;
3562 }
3563 
3564 static void crypt_resume(struct dm_target *ti)
3565 {
3566 	struct crypt_config *cc = ti->private;
3567 
3568 	clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
3569 }
3570 
3571 /* Message interface
3572  *	key set <key>
3573  *	key wipe
3574  */
3575 static int crypt_message(struct dm_target *ti, unsigned int argc, char **argv,
3576 			 char *result, unsigned int maxlen)
3577 {
3578 	struct crypt_config *cc = ti->private;
3579 	int key_size, ret = -EINVAL;
3580 
3581 	if (argc < 2)
3582 		goto error;
3583 
3584 	if (!strcasecmp(argv[0], "key")) {
3585 		if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
3586 			DMWARN("not suspended during key manipulation.");
3587 			return -EINVAL;
3588 		}
3589 		if (argc == 3 && !strcasecmp(argv[1], "set")) {
3590 			/* The key size may not be changed. */
3591 			key_size = get_key_size(&argv[2]);
3592 			if (key_size < 0 || cc->key_size != key_size) {
3593 				memset(argv[2], '0', strlen(argv[2]));
3594 				return -EINVAL;
3595 			}
3596 
3597 			ret = crypt_set_key(cc, argv[2]);
3598 			if (ret)
3599 				return ret;
3600 			if (cc->iv_gen_ops && cc->iv_gen_ops->init)
3601 				ret = cc->iv_gen_ops->init(cc);
3602 			/* wipe the kernel key payload copy */
3603 			if (cc->key_string)
3604 				memset(cc->key, 0, cc->key_size * sizeof(u8));
3605 			return ret;
3606 		}
3607 		if (argc == 2 && !strcasecmp(argv[1], "wipe"))
3608 			return crypt_wipe_key(cc);
3609 	}
3610 
3611 error:
3612 	DMWARN("unrecognised message received.");
3613 	return -EINVAL;
3614 }
3615 
3616 static int crypt_iterate_devices(struct dm_target *ti,
3617 				 iterate_devices_callout_fn fn, void *data)
3618 {
3619 	struct crypt_config *cc = ti->private;
3620 
3621 	return fn(ti, cc->dev, cc->start, ti->len, data);
3622 }
3623 
3624 static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
3625 {
3626 	struct crypt_config *cc = ti->private;
3627 
3628 	/*
3629 	 * Unfortunate constraint that is required to avoid the potential
3630 	 * for exceeding underlying device's max_segments limits -- due to
3631 	 * crypt_alloc_buffer() possibly allocating pages for the encryption
3632 	 * bio that are not as physically contiguous as the original bio.
3633 	 */
3634 	limits->max_segment_size = PAGE_SIZE;
3635 
3636 	limits->logical_block_size =
3637 		max_t(unsigned int, limits->logical_block_size, cc->sector_size);
3638 	limits->physical_block_size =
3639 		max_t(unsigned int, limits->physical_block_size, cc->sector_size);
3640 	limits->io_min = max_t(unsigned int, limits->io_min, cc->sector_size);
3641 	limits->dma_alignment = limits->logical_block_size - 1;
3642 }
3643 
3644 static struct target_type crypt_target = {
3645 	.name   = "crypt",
3646 	.version = {1, 24, 0},
3647 	.module = THIS_MODULE,
3648 	.ctr    = crypt_ctr,
3649 	.dtr    = crypt_dtr,
3650 	.features = DM_TARGET_ZONED_HM,
3651 	.report_zones = crypt_report_zones,
3652 	.map    = crypt_map,
3653 	.status = crypt_status,
3654 	.postsuspend = crypt_postsuspend,
3655 	.preresume = crypt_preresume,
3656 	.resume = crypt_resume,
3657 	.message = crypt_message,
3658 	.iterate_devices = crypt_iterate_devices,
3659 	.io_hints = crypt_io_hints,
3660 };
3661 module_dm(crypt);
3662 
3663 MODULE_AUTHOR("Jana Saout <jana@saout.de>");
3664 MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
3665 MODULE_LICENSE("GPL");
3666