xref: /openbmc/linux/drivers/md/dm-crypt.c (revision 4246a0b6)
11da177e4SLinus Torvalds /*
2bf14299fSJana Saout  * Copyright (C) 2003 Jana Saout <jana@saout.de>
31da177e4SLinus Torvalds  * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
454cea3f6SMilan Broz  * Copyright (C) 2006-2015 Red Hat, Inc. All rights reserved.
5ed04d981SMilan Broz  * Copyright (C) 2013 Milan Broz <gmazyland@gmail.com>
61da177e4SLinus Torvalds  *
71da177e4SLinus Torvalds  * This file is released under the GPL.
81da177e4SLinus Torvalds  */
91da177e4SLinus Torvalds 
1043d69034SMilan Broz #include <linux/completion.h>
11d1806f6aSHerbert Xu #include <linux/err.h>
121da177e4SLinus Torvalds #include <linux/module.h>
131da177e4SLinus Torvalds #include <linux/init.h>
141da177e4SLinus Torvalds #include <linux/kernel.h>
151da177e4SLinus Torvalds #include <linux/bio.h>
161da177e4SLinus Torvalds #include <linux/blkdev.h>
171da177e4SLinus Torvalds #include <linux/mempool.h>
181da177e4SLinus Torvalds #include <linux/slab.h>
191da177e4SLinus Torvalds #include <linux/crypto.h>
201da177e4SLinus Torvalds #include <linux/workqueue.h>
21dc267621SMikulas Patocka #include <linux/kthread.h>
223fcfab16SAndrew Morton #include <linux/backing-dev.h>
2360063497SArun Sharma #include <linux/atomic.h>
24378f058cSDavid Hardeman #include <linux/scatterlist.h>
25b3c5fd30SMikulas Patocka #include <linux/rbtree.h>
261da177e4SLinus Torvalds #include <asm/page.h>
2748527fa7SRik Snel #include <asm/unaligned.h>
2834745785SMilan Broz #include <crypto/hash.h>
2934745785SMilan Broz #include <crypto/md5.h>
3034745785SMilan Broz #include <crypto/algapi.h>
311da177e4SLinus Torvalds 
32586e80e6SMikulas Patocka #include <linux/device-mapper.h>
331da177e4SLinus Torvalds 
3472d94861SAlasdair G Kergon #define DM_MSG_PREFIX "crypt"
351da177e4SLinus Torvalds 
361da177e4SLinus Torvalds /*
371da177e4SLinus Torvalds  * context holding the current state of a multi-part conversion
381da177e4SLinus Torvalds  */
391da177e4SLinus Torvalds struct convert_context {
4043d69034SMilan Broz 	struct completion restart;
411da177e4SLinus Torvalds 	struct bio *bio_in;
421da177e4SLinus Torvalds 	struct bio *bio_out;
43003b5c57SKent Overstreet 	struct bvec_iter iter_in;
44003b5c57SKent Overstreet 	struct bvec_iter iter_out;
45c66029f4SMikulas Patocka 	sector_t cc_sector;
4640b6229bSMikulas Patocka 	atomic_t cc_pending;
47610f2de3SMikulas Patocka 	struct ablkcipher_request *req;
481da177e4SLinus Torvalds };
491da177e4SLinus Torvalds 
5053017030SMilan Broz /*
5153017030SMilan Broz  * per bio private data
5253017030SMilan Broz  */
5353017030SMilan Broz struct dm_crypt_io {
5449a8a920SAlasdair G Kergon 	struct crypt_config *cc;
5553017030SMilan Broz 	struct bio *base_bio;
5653017030SMilan Broz 	struct work_struct work;
5753017030SMilan Broz 
5853017030SMilan Broz 	struct convert_context ctx;
5953017030SMilan Broz 
6040b6229bSMikulas Patocka 	atomic_t io_pending;
6153017030SMilan Broz 	int error;
620c395b0fSMilan Broz 	sector_t sector;
63dc267621SMikulas Patocka 
64b3c5fd30SMikulas Patocka 	struct rb_node rb_node;
65298a9fa0SMikulas Patocka } CRYPTO_MINALIGN_ATTR;
6653017030SMilan Broz 
6701482b76SMilan Broz struct dm_crypt_request {
68b2174eebSHuang Ying 	struct convert_context *ctx;
6901482b76SMilan Broz 	struct scatterlist sg_in;
7001482b76SMilan Broz 	struct scatterlist sg_out;
712dc5327dSMilan Broz 	sector_t iv_sector;
7201482b76SMilan Broz };
7301482b76SMilan Broz 
741da177e4SLinus Torvalds struct crypt_config;
751da177e4SLinus Torvalds 
761da177e4SLinus Torvalds struct crypt_iv_operations {
771da177e4SLinus Torvalds 	int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
781da177e4SLinus Torvalds 		   const char *opts);
791da177e4SLinus Torvalds 	void (*dtr)(struct crypt_config *cc);
80b95bf2d3SMilan Broz 	int (*init)(struct crypt_config *cc);
81542da317SMilan Broz 	int (*wipe)(struct crypt_config *cc);
822dc5327dSMilan Broz 	int (*generator)(struct crypt_config *cc, u8 *iv,
832dc5327dSMilan Broz 			 struct dm_crypt_request *dmreq);
842dc5327dSMilan Broz 	int (*post)(struct crypt_config *cc, u8 *iv,
852dc5327dSMilan Broz 		    struct dm_crypt_request *dmreq);
861da177e4SLinus Torvalds };
871da177e4SLinus Torvalds 
8860473592SMilan Broz struct iv_essiv_private {
89b95bf2d3SMilan Broz 	struct crypto_hash *hash_tfm;
90b95bf2d3SMilan Broz 	u8 *salt;
9160473592SMilan Broz };
9260473592SMilan Broz 
9360473592SMilan Broz struct iv_benbi_private {
9460473592SMilan Broz 	int shift;
9560473592SMilan Broz };
9660473592SMilan Broz 
9734745785SMilan Broz #define LMK_SEED_SIZE 64 /* hash + 0 */
9834745785SMilan Broz struct iv_lmk_private {
9934745785SMilan Broz 	struct crypto_shash *hash_tfm;
10034745785SMilan Broz 	u8 *seed;
10134745785SMilan Broz };
10234745785SMilan Broz 
103ed04d981SMilan Broz #define TCW_WHITENING_SIZE 16
104ed04d981SMilan Broz struct iv_tcw_private {
105ed04d981SMilan Broz 	struct crypto_shash *crc32_tfm;
106ed04d981SMilan Broz 	u8 *iv_seed;
107ed04d981SMilan Broz 	u8 *whitening;
108ed04d981SMilan Broz };
109ed04d981SMilan Broz 
1101da177e4SLinus Torvalds /*
1111da177e4SLinus Torvalds  * Crypt: maps a linear range of a block device
1121da177e4SLinus Torvalds  * and encrypts / decrypts at the same time.
1131da177e4SLinus Torvalds  */
1140f5d8e6eSMikulas Patocka enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
1150f5d8e6eSMikulas Patocka 	     DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD };
116c0297721SAndi Kleen 
117c0297721SAndi Kleen /*
118610f2de3SMikulas Patocka  * The fields in here must be read only after initialization.
119c0297721SAndi Kleen  */
1201da177e4SLinus Torvalds struct crypt_config {
1211da177e4SLinus Torvalds 	struct dm_dev *dev;
1221da177e4SLinus Torvalds 	sector_t start;
1231da177e4SLinus Torvalds 
1241da177e4SLinus Torvalds 	/*
125ddd42edfSMilan Broz 	 * pool for per bio private data, crypto requests and
126ddd42edfSMilan Broz 	 * encryption requeusts/buffer pages
1271da177e4SLinus Torvalds 	 */
128ddd42edfSMilan Broz 	mempool_t *req_pool;
1291da177e4SLinus Torvalds 	mempool_t *page_pool;
1306a24c718SMilan Broz 	struct bio_set *bs;
1317145c241SMikulas Patocka 	struct mutex bio_alloc_lock;
1321da177e4SLinus Torvalds 
133cabf08e4SMilan Broz 	struct workqueue_struct *io_queue;
134cabf08e4SMilan Broz 	struct workqueue_struct *crypt_queue;
1353f1e9070SMilan Broz 
136dc267621SMikulas Patocka 	struct task_struct *write_thread;
137dc267621SMikulas Patocka 	wait_queue_head_t write_thread_wait;
138b3c5fd30SMikulas Patocka 	struct rb_root write_tree;
139dc267621SMikulas Patocka 
1405ebaee6dSMilan Broz 	char *cipher;
1417dbcd137SMilan Broz 	char *cipher_string;
1425ebaee6dSMilan Broz 
1431da177e4SLinus Torvalds 	struct crypt_iv_operations *iv_gen_ops;
14479066ad3SHerbert Xu 	union {
14560473592SMilan Broz 		struct iv_essiv_private essiv;
14660473592SMilan Broz 		struct iv_benbi_private benbi;
14734745785SMilan Broz 		struct iv_lmk_private lmk;
148ed04d981SMilan Broz 		struct iv_tcw_private tcw;
14979066ad3SHerbert Xu 	} iv_gen_private;
1501da177e4SLinus Torvalds 	sector_t iv_offset;
1511da177e4SLinus Torvalds 	unsigned int iv_size;
1521da177e4SLinus Torvalds 
153fd2d231fSMikulas Patocka 	/* ESSIV: struct crypto_cipher *essiv_tfm */
154fd2d231fSMikulas Patocka 	void *iv_private;
155fd2d231fSMikulas Patocka 	struct crypto_ablkcipher **tfms;
156d1f96423SMilan Broz 	unsigned tfms_count;
157c0297721SAndi Kleen 
158c0297721SAndi Kleen 	/*
159ddd42edfSMilan Broz 	 * Layout of each crypto request:
160ddd42edfSMilan Broz 	 *
161ddd42edfSMilan Broz 	 *   struct ablkcipher_request
162ddd42edfSMilan Broz 	 *      context
163ddd42edfSMilan Broz 	 *      padding
164ddd42edfSMilan Broz 	 *   struct dm_crypt_request
165ddd42edfSMilan Broz 	 *      padding
166ddd42edfSMilan Broz 	 *   IV
167ddd42edfSMilan Broz 	 *
168ddd42edfSMilan Broz 	 * The padding is added so that dm_crypt_request and the IV are
169ddd42edfSMilan Broz 	 * correctly aligned.
170ddd42edfSMilan Broz 	 */
171ddd42edfSMilan Broz 	unsigned int dmreq_start;
172ddd42edfSMilan Broz 
173298a9fa0SMikulas Patocka 	unsigned int per_bio_data_size;
174298a9fa0SMikulas Patocka 
175e48d4bbfSMilan Broz 	unsigned long flags;
1761da177e4SLinus Torvalds 	unsigned int key_size;
177da31a078SMilan Broz 	unsigned int key_parts;      /* independent parts in key buffer */
178da31a078SMilan Broz 	unsigned int key_extra_size; /* additional keys length */
1791da177e4SLinus Torvalds 	u8 key[0];
1801da177e4SLinus Torvalds };
1811da177e4SLinus Torvalds 
1826a24c718SMilan Broz #define MIN_IOS        16
1831da177e4SLinus Torvalds 
184028867acSAlasdair G Kergon static void clone_init(struct dm_crypt_io *, struct bio *);
185395b167cSAlasdair G Kergon static void kcryptd_queue_crypt(struct dm_crypt_io *io);
1862dc5327dSMilan Broz static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
187027581f3SOlaf Kirch 
188c0297721SAndi Kleen /*
189c0297721SAndi Kleen  * Use this to access cipher attributes that are the same for each CPU.
190c0297721SAndi Kleen  */
191c0297721SAndi Kleen static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc)
192c0297721SAndi Kleen {
193fd2d231fSMikulas Patocka 	return cc->tfms[0];
194c0297721SAndi Kleen }
195c0297721SAndi Kleen 
1961da177e4SLinus Torvalds /*
1971da177e4SLinus Torvalds  * Different IV generation algorithms:
1981da177e4SLinus Torvalds  *
1993c164bd8SRik Snel  * plain: the initial vector is the 32-bit little-endian version of the sector
2003a4fa0a2SRobert P. J. Day  *        number, padded with zeros if necessary.
2011da177e4SLinus Torvalds  *
20261afef61SMilan Broz  * plain64: the initial vector is the 64-bit little-endian version of the sector
20361afef61SMilan Broz  *        number, padded with zeros if necessary.
20461afef61SMilan Broz  *
2053c164bd8SRik Snel  * essiv: "encrypted sector|salt initial vector", the sector number is
2061da177e4SLinus Torvalds  *        encrypted with the bulk cipher using a salt as key. The salt
2071da177e4SLinus Torvalds  *        should be derived from the bulk cipher's key via hashing.
2081da177e4SLinus Torvalds  *
20948527fa7SRik Snel  * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
21048527fa7SRik Snel  *        (needed for LRW-32-AES and possible other narrow block modes)
21148527fa7SRik Snel  *
21246b47730SLudwig Nussel  * null: the initial vector is always zero.  Provides compatibility with
21346b47730SLudwig Nussel  *       obsolete loop_fish2 devices.  Do not use for new devices.
21446b47730SLudwig Nussel  *
21534745785SMilan Broz  * lmk:  Compatible implementation of the block chaining mode used
21634745785SMilan Broz  *       by the Loop-AES block device encryption system
21734745785SMilan Broz  *       designed by Jari Ruusu. See http://loop-aes.sourceforge.net/
21834745785SMilan Broz  *       It operates on full 512 byte sectors and uses CBC
21934745785SMilan Broz  *       with an IV derived from the sector number, the data and
22034745785SMilan Broz  *       optionally extra IV seed.
22134745785SMilan Broz  *       This means that after decryption the first block
22234745785SMilan Broz  *       of sector must be tweaked according to decrypted data.
22334745785SMilan Broz  *       Loop-AES can use three encryption schemes:
22434745785SMilan Broz  *         version 1: is plain aes-cbc mode
22534745785SMilan Broz  *         version 2: uses 64 multikey scheme with lmk IV generator
22634745785SMilan Broz  *         version 3: the same as version 2 with additional IV seed
22734745785SMilan Broz  *                   (it uses 65 keys, last key is used as IV seed)
22834745785SMilan Broz  *
229ed04d981SMilan Broz  * tcw:  Compatible implementation of the block chaining mode used
230ed04d981SMilan Broz  *       by the TrueCrypt device encryption system (prior to version 4.1).
231e44f23b3SMilan Broz  *       For more info see: https://gitlab.com/cryptsetup/cryptsetup/wikis/TrueCryptOnDiskFormat
232ed04d981SMilan Broz  *       It operates on full 512 byte sectors and uses CBC
233ed04d981SMilan Broz  *       with an IV derived from initial key and the sector number.
234ed04d981SMilan Broz  *       In addition, whitening value is applied on every sector, whitening
235ed04d981SMilan Broz  *       is calculated from initial key, sector number and mixed using CRC32.
236ed04d981SMilan Broz  *       Note that this encryption scheme is vulnerable to watermarking attacks
237ed04d981SMilan Broz  *       and should be used for old compatible containers access only.
238ed04d981SMilan Broz  *
2391da177e4SLinus Torvalds  * plumb: unimplemented, see:
2401da177e4SLinus Torvalds  * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
2411da177e4SLinus Torvalds  */
2421da177e4SLinus Torvalds 
2432dc5327dSMilan Broz static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
2442dc5327dSMilan Broz 			      struct dm_crypt_request *dmreq)
2451da177e4SLinus Torvalds {
2461da177e4SLinus Torvalds 	memset(iv, 0, cc->iv_size);
247283a8328SAlasdair G Kergon 	*(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
2481da177e4SLinus Torvalds 
2491da177e4SLinus Torvalds 	return 0;
2501da177e4SLinus Torvalds }
2511da177e4SLinus Torvalds 
25261afef61SMilan Broz static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
2532dc5327dSMilan Broz 				struct dm_crypt_request *dmreq)
25461afef61SMilan Broz {
25561afef61SMilan Broz 	memset(iv, 0, cc->iv_size);
256283a8328SAlasdair G Kergon 	*(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
25761afef61SMilan Broz 
25861afef61SMilan Broz 	return 0;
25961afef61SMilan Broz }
26061afef61SMilan Broz 
261b95bf2d3SMilan Broz /* Initialise ESSIV - compute salt but no local memory allocations */
262b95bf2d3SMilan Broz static int crypt_iv_essiv_init(struct crypt_config *cc)
263b95bf2d3SMilan Broz {
264b95bf2d3SMilan Broz 	struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
265b95bf2d3SMilan Broz 	struct hash_desc desc;
266b95bf2d3SMilan Broz 	struct scatterlist sg;
267c0297721SAndi Kleen 	struct crypto_cipher *essiv_tfm;
268fd2d231fSMikulas Patocka 	int err;
269b95bf2d3SMilan Broz 
270b95bf2d3SMilan Broz 	sg_init_one(&sg, cc->key, cc->key_size);
271b95bf2d3SMilan Broz 	desc.tfm = essiv->hash_tfm;
272b95bf2d3SMilan Broz 	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
273b95bf2d3SMilan Broz 
274b95bf2d3SMilan Broz 	err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt);
275b95bf2d3SMilan Broz 	if (err)
276b95bf2d3SMilan Broz 		return err;
277b95bf2d3SMilan Broz 
278fd2d231fSMikulas Patocka 	essiv_tfm = cc->iv_private;
279c0297721SAndi Kleen 
280c0297721SAndi Kleen 	err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
281b95bf2d3SMilan Broz 			    crypto_hash_digestsize(essiv->hash_tfm));
282c0297721SAndi Kleen 	if (err)
283c0297721SAndi Kleen 		return err;
284c0297721SAndi Kleen 
285c0297721SAndi Kleen 	return 0;
286b95bf2d3SMilan Broz }
287b95bf2d3SMilan Broz 
288542da317SMilan Broz /* Wipe salt and reset key derived from volume key */
289542da317SMilan Broz static int crypt_iv_essiv_wipe(struct crypt_config *cc)
290542da317SMilan Broz {
291542da317SMilan Broz 	struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
292542da317SMilan Broz 	unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm);
293c0297721SAndi Kleen 	struct crypto_cipher *essiv_tfm;
294fd2d231fSMikulas Patocka 	int r, err = 0;
295542da317SMilan Broz 
296542da317SMilan Broz 	memset(essiv->salt, 0, salt_size);
297542da317SMilan Broz 
298fd2d231fSMikulas Patocka 	essiv_tfm = cc->iv_private;
299c0297721SAndi Kleen 	r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size);
300c0297721SAndi Kleen 	if (r)
301c0297721SAndi Kleen 		err = r;
302c0297721SAndi Kleen 
303c0297721SAndi Kleen 	return err;
304c0297721SAndi Kleen }
305c0297721SAndi Kleen 
306c0297721SAndi Kleen /* Set up per cpu cipher state */
307c0297721SAndi Kleen static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc,
308c0297721SAndi Kleen 					     struct dm_target *ti,
309c0297721SAndi Kleen 					     u8 *salt, unsigned saltsize)
310c0297721SAndi Kleen {
311c0297721SAndi Kleen 	struct crypto_cipher *essiv_tfm;
312c0297721SAndi Kleen 	int err;
313c0297721SAndi Kleen 
314c0297721SAndi Kleen 	/* Setup the essiv_tfm with the given salt */
315c0297721SAndi Kleen 	essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
316c0297721SAndi Kleen 	if (IS_ERR(essiv_tfm)) {
317c0297721SAndi Kleen 		ti->error = "Error allocating crypto tfm for ESSIV";
318c0297721SAndi Kleen 		return essiv_tfm;
319c0297721SAndi Kleen 	}
320c0297721SAndi Kleen 
321c0297721SAndi Kleen 	if (crypto_cipher_blocksize(essiv_tfm) !=
322c0297721SAndi Kleen 	    crypto_ablkcipher_ivsize(any_tfm(cc))) {
323c0297721SAndi Kleen 		ti->error = "Block size of ESSIV cipher does "
324c0297721SAndi Kleen 			    "not match IV size of block cipher";
325c0297721SAndi Kleen 		crypto_free_cipher(essiv_tfm);
326c0297721SAndi Kleen 		return ERR_PTR(-EINVAL);
327c0297721SAndi Kleen 	}
328c0297721SAndi Kleen 
329c0297721SAndi Kleen 	err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
330c0297721SAndi Kleen 	if (err) {
331c0297721SAndi Kleen 		ti->error = "Failed to set key for ESSIV cipher";
332c0297721SAndi Kleen 		crypto_free_cipher(essiv_tfm);
333c0297721SAndi Kleen 		return ERR_PTR(err);
334c0297721SAndi Kleen 	}
335c0297721SAndi Kleen 
336c0297721SAndi Kleen 	return essiv_tfm;
337542da317SMilan Broz }
338542da317SMilan Broz 
33960473592SMilan Broz static void crypt_iv_essiv_dtr(struct crypt_config *cc)
34060473592SMilan Broz {
341c0297721SAndi Kleen 	struct crypto_cipher *essiv_tfm;
34260473592SMilan Broz 	struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
34360473592SMilan Broz 
344b95bf2d3SMilan Broz 	crypto_free_hash(essiv->hash_tfm);
345b95bf2d3SMilan Broz 	essiv->hash_tfm = NULL;
346b95bf2d3SMilan Broz 
347b95bf2d3SMilan Broz 	kzfree(essiv->salt);
348b95bf2d3SMilan Broz 	essiv->salt = NULL;
349c0297721SAndi Kleen 
350fd2d231fSMikulas Patocka 	essiv_tfm = cc->iv_private;
351c0297721SAndi Kleen 
352c0297721SAndi Kleen 	if (essiv_tfm)
353c0297721SAndi Kleen 		crypto_free_cipher(essiv_tfm);
354c0297721SAndi Kleen 
355fd2d231fSMikulas Patocka 	cc->iv_private = NULL;
35660473592SMilan Broz }
35760473592SMilan Broz 
3581da177e4SLinus Torvalds static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
3591da177e4SLinus Torvalds 			      const char *opts)
3601da177e4SLinus Torvalds {
3615861f1beSMilan Broz 	struct crypto_cipher *essiv_tfm = NULL;
3625861f1beSMilan Broz 	struct crypto_hash *hash_tfm = NULL;
3635861f1beSMilan Broz 	u8 *salt = NULL;
364fd2d231fSMikulas Patocka 	int err;
3651da177e4SLinus Torvalds 
3665861f1beSMilan Broz 	if (!opts) {
36772d94861SAlasdair G Kergon 		ti->error = "Digest algorithm missing for ESSIV mode";
3681da177e4SLinus Torvalds 		return -EINVAL;
3691da177e4SLinus Torvalds 	}
3701da177e4SLinus Torvalds 
371b95bf2d3SMilan Broz 	/* Allocate hash algorithm */
37235058687SHerbert Xu 	hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
37335058687SHerbert Xu 	if (IS_ERR(hash_tfm)) {
37472d94861SAlasdair G Kergon 		ti->error = "Error initializing ESSIV hash";
3755861f1beSMilan Broz 		err = PTR_ERR(hash_tfm);
3765861f1beSMilan Broz 		goto bad;
3771da177e4SLinus Torvalds 	}
3781da177e4SLinus Torvalds 
379b95bf2d3SMilan Broz 	salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL);
3805861f1beSMilan Broz 	if (!salt) {
38172d94861SAlasdair G Kergon 		ti->error = "Error kmallocing salt storage in ESSIV";
3825861f1beSMilan Broz 		err = -ENOMEM;
3835861f1beSMilan Broz 		goto bad;
3841da177e4SLinus Torvalds 	}
3851da177e4SLinus Torvalds 
386b95bf2d3SMilan Broz 	cc->iv_gen_private.essiv.salt = salt;
387b95bf2d3SMilan Broz 	cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
388b95bf2d3SMilan Broz 
389c0297721SAndi Kleen 	essiv_tfm = setup_essiv_cpu(cc, ti, salt,
390c0297721SAndi Kleen 				crypto_hash_digestsize(hash_tfm));
391c0297721SAndi Kleen 	if (IS_ERR(essiv_tfm)) {
392c0297721SAndi Kleen 		crypt_iv_essiv_dtr(cc);
393c0297721SAndi Kleen 		return PTR_ERR(essiv_tfm);
394c0297721SAndi Kleen 	}
395fd2d231fSMikulas Patocka 	cc->iv_private = essiv_tfm;
396c0297721SAndi Kleen 
3971da177e4SLinus Torvalds 	return 0;
3985861f1beSMilan Broz 
3995861f1beSMilan Broz bad:
4005861f1beSMilan Broz 	if (hash_tfm && !IS_ERR(hash_tfm))
4015861f1beSMilan Broz 		crypto_free_hash(hash_tfm);
402b95bf2d3SMilan Broz 	kfree(salt);
4035861f1beSMilan Broz 	return err;
4041da177e4SLinus Torvalds }
4051da177e4SLinus Torvalds 
4062dc5327dSMilan Broz static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
4072dc5327dSMilan Broz 			      struct dm_crypt_request *dmreq)
4081da177e4SLinus Torvalds {
409fd2d231fSMikulas Patocka 	struct crypto_cipher *essiv_tfm = cc->iv_private;
410c0297721SAndi Kleen 
4111da177e4SLinus Torvalds 	memset(iv, 0, cc->iv_size);
412283a8328SAlasdair G Kergon 	*(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
413c0297721SAndi Kleen 	crypto_cipher_encrypt_one(essiv_tfm, iv, iv);
414c0297721SAndi Kleen 
4151da177e4SLinus Torvalds 	return 0;
4161da177e4SLinus Torvalds }
4171da177e4SLinus Torvalds 
41848527fa7SRik Snel static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
41948527fa7SRik Snel 			      const char *opts)
42048527fa7SRik Snel {
421c0297721SAndi Kleen 	unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc));
422f0d1b0b3SDavid Howells 	int log = ilog2(bs);
42348527fa7SRik Snel 
42448527fa7SRik Snel 	/* we need to calculate how far we must shift the sector count
42548527fa7SRik Snel 	 * to get the cipher block count, we use this shift in _gen */
42648527fa7SRik Snel 
42748527fa7SRik Snel 	if (1 << log != bs) {
42848527fa7SRik Snel 		ti->error = "cypher blocksize is not a power of 2";
42948527fa7SRik Snel 		return -EINVAL;
43048527fa7SRik Snel 	}
43148527fa7SRik Snel 
43248527fa7SRik Snel 	if (log > 9) {
43348527fa7SRik Snel 		ti->error = "cypher blocksize is > 512";
43448527fa7SRik Snel 		return -EINVAL;
43548527fa7SRik Snel 	}
43648527fa7SRik Snel 
43760473592SMilan Broz 	cc->iv_gen_private.benbi.shift = 9 - log;
43848527fa7SRik Snel 
43948527fa7SRik Snel 	return 0;
44048527fa7SRik Snel }
44148527fa7SRik Snel 
44248527fa7SRik Snel static void crypt_iv_benbi_dtr(struct crypt_config *cc)
44348527fa7SRik Snel {
44448527fa7SRik Snel }
44548527fa7SRik Snel 
4462dc5327dSMilan Broz static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
4472dc5327dSMilan Broz 			      struct dm_crypt_request *dmreq)
44848527fa7SRik Snel {
44979066ad3SHerbert Xu 	__be64 val;
45079066ad3SHerbert Xu 
45148527fa7SRik Snel 	memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
45279066ad3SHerbert Xu 
4532dc5327dSMilan Broz 	val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
45479066ad3SHerbert Xu 	put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
45548527fa7SRik Snel 
4561da177e4SLinus Torvalds 	return 0;
4571da177e4SLinus Torvalds }
4581da177e4SLinus Torvalds 
4592dc5327dSMilan Broz static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
4602dc5327dSMilan Broz 			     struct dm_crypt_request *dmreq)
46146b47730SLudwig Nussel {
46246b47730SLudwig Nussel 	memset(iv, 0, cc->iv_size);
46346b47730SLudwig Nussel 
46446b47730SLudwig Nussel 	return 0;
46546b47730SLudwig Nussel }
46646b47730SLudwig Nussel 
46734745785SMilan Broz static void crypt_iv_lmk_dtr(struct crypt_config *cc)
46834745785SMilan Broz {
46934745785SMilan Broz 	struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
47034745785SMilan Broz 
47134745785SMilan Broz 	if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm))
47234745785SMilan Broz 		crypto_free_shash(lmk->hash_tfm);
47334745785SMilan Broz 	lmk->hash_tfm = NULL;
47434745785SMilan Broz 
47534745785SMilan Broz 	kzfree(lmk->seed);
47634745785SMilan Broz 	lmk->seed = NULL;
47734745785SMilan Broz }
47834745785SMilan Broz 
47934745785SMilan Broz static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
48034745785SMilan Broz 			    const char *opts)
48134745785SMilan Broz {
48234745785SMilan Broz 	struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
48334745785SMilan Broz 
48434745785SMilan Broz 	lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0);
48534745785SMilan Broz 	if (IS_ERR(lmk->hash_tfm)) {
48634745785SMilan Broz 		ti->error = "Error initializing LMK hash";
48734745785SMilan Broz 		return PTR_ERR(lmk->hash_tfm);
48834745785SMilan Broz 	}
48934745785SMilan Broz 
49034745785SMilan Broz 	/* No seed in LMK version 2 */
49134745785SMilan Broz 	if (cc->key_parts == cc->tfms_count) {
49234745785SMilan Broz 		lmk->seed = NULL;
49334745785SMilan Broz 		return 0;
49434745785SMilan Broz 	}
49534745785SMilan Broz 
49634745785SMilan Broz 	lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL);
49734745785SMilan Broz 	if (!lmk->seed) {
49834745785SMilan Broz 		crypt_iv_lmk_dtr(cc);
49934745785SMilan Broz 		ti->error = "Error kmallocing seed storage in LMK";
50034745785SMilan Broz 		return -ENOMEM;
50134745785SMilan Broz 	}
50234745785SMilan Broz 
50334745785SMilan Broz 	return 0;
50434745785SMilan Broz }
50534745785SMilan Broz 
50634745785SMilan Broz static int crypt_iv_lmk_init(struct crypt_config *cc)
50734745785SMilan Broz {
50834745785SMilan Broz 	struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
50934745785SMilan Broz 	int subkey_size = cc->key_size / cc->key_parts;
51034745785SMilan Broz 
51134745785SMilan Broz 	/* LMK seed is on the position of LMK_KEYS + 1 key */
51234745785SMilan Broz 	if (lmk->seed)
51334745785SMilan Broz 		memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size),
51434745785SMilan Broz 		       crypto_shash_digestsize(lmk->hash_tfm));
51534745785SMilan Broz 
51634745785SMilan Broz 	return 0;
51734745785SMilan Broz }
51834745785SMilan Broz 
51934745785SMilan Broz static int crypt_iv_lmk_wipe(struct crypt_config *cc)
52034745785SMilan Broz {
52134745785SMilan Broz 	struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
52234745785SMilan Broz 
52334745785SMilan Broz 	if (lmk->seed)
52434745785SMilan Broz 		memset(lmk->seed, 0, LMK_SEED_SIZE);
52534745785SMilan Broz 
52634745785SMilan Broz 	return 0;
52734745785SMilan Broz }
52834745785SMilan Broz 
52934745785SMilan Broz static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
53034745785SMilan Broz 			    struct dm_crypt_request *dmreq,
53134745785SMilan Broz 			    u8 *data)
53234745785SMilan Broz {
53334745785SMilan Broz 	struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
534b6106265SJan-Simon Möller 	SHASH_DESC_ON_STACK(desc, lmk->hash_tfm);
53534745785SMilan Broz 	struct md5_state md5state;
536da31a078SMilan Broz 	__le32 buf[4];
53734745785SMilan Broz 	int i, r;
53834745785SMilan Broz 
539b6106265SJan-Simon Möller 	desc->tfm = lmk->hash_tfm;
540b6106265SJan-Simon Möller 	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
54134745785SMilan Broz 
542b6106265SJan-Simon Möller 	r = crypto_shash_init(desc);
54334745785SMilan Broz 	if (r)
54434745785SMilan Broz 		return r;
54534745785SMilan Broz 
54634745785SMilan Broz 	if (lmk->seed) {
547b6106265SJan-Simon Möller 		r = crypto_shash_update(desc, lmk->seed, LMK_SEED_SIZE);
54834745785SMilan Broz 		if (r)
54934745785SMilan Broz 			return r;
55034745785SMilan Broz 	}
55134745785SMilan Broz 
55234745785SMilan Broz 	/* Sector is always 512B, block size 16, add data of blocks 1-31 */
553b6106265SJan-Simon Möller 	r = crypto_shash_update(desc, data + 16, 16 * 31);
55434745785SMilan Broz 	if (r)
55534745785SMilan Broz 		return r;
55634745785SMilan Broz 
55734745785SMilan Broz 	/* Sector is cropped to 56 bits here */
55834745785SMilan Broz 	buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF);
55934745785SMilan Broz 	buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
56034745785SMilan Broz 	buf[2] = cpu_to_le32(4024);
56134745785SMilan Broz 	buf[3] = 0;
562b6106265SJan-Simon Möller 	r = crypto_shash_update(desc, (u8 *)buf, sizeof(buf));
56334745785SMilan Broz 	if (r)
56434745785SMilan Broz 		return r;
56534745785SMilan Broz 
56634745785SMilan Broz 	/* No MD5 padding here */
567b6106265SJan-Simon Möller 	r = crypto_shash_export(desc, &md5state);
56834745785SMilan Broz 	if (r)
56934745785SMilan Broz 		return r;
57034745785SMilan Broz 
57134745785SMilan Broz 	for (i = 0; i < MD5_HASH_WORDS; i++)
57234745785SMilan Broz 		__cpu_to_le32s(&md5state.hash[i]);
57334745785SMilan Broz 	memcpy(iv, &md5state.hash, cc->iv_size);
57434745785SMilan Broz 
57534745785SMilan Broz 	return 0;
57634745785SMilan Broz }
57734745785SMilan Broz 
57834745785SMilan Broz static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
57934745785SMilan Broz 			    struct dm_crypt_request *dmreq)
58034745785SMilan Broz {
58134745785SMilan Broz 	u8 *src;
58234745785SMilan Broz 	int r = 0;
58334745785SMilan Broz 
58434745785SMilan Broz 	if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
585c2e022cbSCong Wang 		src = kmap_atomic(sg_page(&dmreq->sg_in));
58634745785SMilan Broz 		r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset);
587c2e022cbSCong Wang 		kunmap_atomic(src);
58834745785SMilan Broz 	} else
58934745785SMilan Broz 		memset(iv, 0, cc->iv_size);
59034745785SMilan Broz 
59134745785SMilan Broz 	return r;
59234745785SMilan Broz }
59334745785SMilan Broz 
59434745785SMilan Broz static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
59534745785SMilan Broz 			     struct dm_crypt_request *dmreq)
59634745785SMilan Broz {
59734745785SMilan Broz 	u8 *dst;
59834745785SMilan Broz 	int r;
59934745785SMilan Broz 
60034745785SMilan Broz 	if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
60134745785SMilan Broz 		return 0;
60234745785SMilan Broz 
603c2e022cbSCong Wang 	dst = kmap_atomic(sg_page(&dmreq->sg_out));
60434745785SMilan Broz 	r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset);
60534745785SMilan Broz 
60634745785SMilan Broz 	/* Tweak the first block of plaintext sector */
60734745785SMilan Broz 	if (!r)
60834745785SMilan Broz 		crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size);
60934745785SMilan Broz 
610c2e022cbSCong Wang 	kunmap_atomic(dst);
61134745785SMilan Broz 	return r;
61234745785SMilan Broz }
61334745785SMilan Broz 
614ed04d981SMilan Broz static void crypt_iv_tcw_dtr(struct crypt_config *cc)
615ed04d981SMilan Broz {
616ed04d981SMilan Broz 	struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
617ed04d981SMilan Broz 
618ed04d981SMilan Broz 	kzfree(tcw->iv_seed);
619ed04d981SMilan Broz 	tcw->iv_seed = NULL;
620ed04d981SMilan Broz 	kzfree(tcw->whitening);
621ed04d981SMilan Broz 	tcw->whitening = NULL;
622ed04d981SMilan Broz 
623ed04d981SMilan Broz 	if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm))
624ed04d981SMilan Broz 		crypto_free_shash(tcw->crc32_tfm);
625ed04d981SMilan Broz 	tcw->crc32_tfm = NULL;
626ed04d981SMilan Broz }
627ed04d981SMilan Broz 
628ed04d981SMilan Broz static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
629ed04d981SMilan Broz 			    const char *opts)
630ed04d981SMilan Broz {
631ed04d981SMilan Broz 	struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
632ed04d981SMilan Broz 
633ed04d981SMilan Broz 	if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) {
634ed04d981SMilan Broz 		ti->error = "Wrong key size for TCW";
635ed04d981SMilan Broz 		return -EINVAL;
636ed04d981SMilan Broz 	}
637ed04d981SMilan Broz 
638ed04d981SMilan Broz 	tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0);
639ed04d981SMilan Broz 	if (IS_ERR(tcw->crc32_tfm)) {
640ed04d981SMilan Broz 		ti->error = "Error initializing CRC32 in TCW";
641ed04d981SMilan Broz 		return PTR_ERR(tcw->crc32_tfm);
642ed04d981SMilan Broz 	}
643ed04d981SMilan Broz 
644ed04d981SMilan Broz 	tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL);
645ed04d981SMilan Broz 	tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL);
646ed04d981SMilan Broz 	if (!tcw->iv_seed || !tcw->whitening) {
647ed04d981SMilan Broz 		crypt_iv_tcw_dtr(cc);
648ed04d981SMilan Broz 		ti->error = "Error allocating seed storage in TCW";
649ed04d981SMilan Broz 		return -ENOMEM;
650ed04d981SMilan Broz 	}
651ed04d981SMilan Broz 
652ed04d981SMilan Broz 	return 0;
653ed04d981SMilan Broz }
654ed04d981SMilan Broz 
655ed04d981SMilan Broz static int crypt_iv_tcw_init(struct crypt_config *cc)
656ed04d981SMilan Broz {
657ed04d981SMilan Broz 	struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
658ed04d981SMilan Broz 	int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE;
659ed04d981SMilan Broz 
660ed04d981SMilan Broz 	memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size);
661ed04d981SMilan Broz 	memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size],
662ed04d981SMilan Broz 	       TCW_WHITENING_SIZE);
663ed04d981SMilan Broz 
664ed04d981SMilan Broz 	return 0;
665ed04d981SMilan Broz }
666ed04d981SMilan Broz 
667ed04d981SMilan Broz static int crypt_iv_tcw_wipe(struct crypt_config *cc)
668ed04d981SMilan Broz {
669ed04d981SMilan Broz 	struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
670ed04d981SMilan Broz 
671ed04d981SMilan Broz 	memset(tcw->iv_seed, 0, cc->iv_size);
672ed04d981SMilan Broz 	memset(tcw->whitening, 0, TCW_WHITENING_SIZE);
673ed04d981SMilan Broz 
674ed04d981SMilan Broz 	return 0;
675ed04d981SMilan Broz }
676ed04d981SMilan Broz 
677ed04d981SMilan Broz static int crypt_iv_tcw_whitening(struct crypt_config *cc,
678ed04d981SMilan Broz 				  struct dm_crypt_request *dmreq,
679ed04d981SMilan Broz 				  u8 *data)
680ed04d981SMilan Broz {
681ed04d981SMilan Broz 	struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
682ed04d981SMilan Broz 	u64 sector = cpu_to_le64((u64)dmreq->iv_sector);
683ed04d981SMilan Broz 	u8 buf[TCW_WHITENING_SIZE];
684b6106265SJan-Simon Möller 	SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm);
685ed04d981SMilan Broz 	int i, r;
686ed04d981SMilan Broz 
687ed04d981SMilan Broz 	/* xor whitening with sector number */
688ed04d981SMilan Broz 	memcpy(buf, tcw->whitening, TCW_WHITENING_SIZE);
689ed04d981SMilan Broz 	crypto_xor(buf, (u8 *)&sector, 8);
690ed04d981SMilan Broz 	crypto_xor(&buf[8], (u8 *)&sector, 8);
691ed04d981SMilan Broz 
692ed04d981SMilan Broz 	/* calculate crc32 for every 32bit part and xor it */
693b6106265SJan-Simon Möller 	desc->tfm = tcw->crc32_tfm;
694b6106265SJan-Simon Möller 	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
695ed04d981SMilan Broz 	for (i = 0; i < 4; i++) {
696b6106265SJan-Simon Möller 		r = crypto_shash_init(desc);
697ed04d981SMilan Broz 		if (r)
698ed04d981SMilan Broz 			goto out;
699b6106265SJan-Simon Möller 		r = crypto_shash_update(desc, &buf[i * 4], 4);
700ed04d981SMilan Broz 		if (r)
701ed04d981SMilan Broz 			goto out;
702b6106265SJan-Simon Möller 		r = crypto_shash_final(desc, &buf[i * 4]);
703ed04d981SMilan Broz 		if (r)
704ed04d981SMilan Broz 			goto out;
705ed04d981SMilan Broz 	}
706ed04d981SMilan Broz 	crypto_xor(&buf[0], &buf[12], 4);
707ed04d981SMilan Broz 	crypto_xor(&buf[4], &buf[8], 4);
708ed04d981SMilan Broz 
709ed04d981SMilan Broz 	/* apply whitening (8 bytes) to whole sector */
710ed04d981SMilan Broz 	for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++)
711ed04d981SMilan Broz 		crypto_xor(data + i * 8, buf, 8);
712ed04d981SMilan Broz out:
7131a71d6ffSMilan Broz 	memzero_explicit(buf, sizeof(buf));
714ed04d981SMilan Broz 	return r;
715ed04d981SMilan Broz }
716ed04d981SMilan Broz 
717ed04d981SMilan Broz static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
718ed04d981SMilan Broz 			    struct dm_crypt_request *dmreq)
719ed04d981SMilan Broz {
720ed04d981SMilan Broz 	struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
721ed04d981SMilan Broz 	u64 sector = cpu_to_le64((u64)dmreq->iv_sector);
722ed04d981SMilan Broz 	u8 *src;
723ed04d981SMilan Broz 	int r = 0;
724ed04d981SMilan Broz 
725ed04d981SMilan Broz 	/* Remove whitening from ciphertext */
726ed04d981SMilan Broz 	if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
727ed04d981SMilan Broz 		src = kmap_atomic(sg_page(&dmreq->sg_in));
728ed04d981SMilan Broz 		r = crypt_iv_tcw_whitening(cc, dmreq, src + dmreq->sg_in.offset);
729ed04d981SMilan Broz 		kunmap_atomic(src);
730ed04d981SMilan Broz 	}
731ed04d981SMilan Broz 
732ed04d981SMilan Broz 	/* Calculate IV */
733ed04d981SMilan Broz 	memcpy(iv, tcw->iv_seed, cc->iv_size);
734ed04d981SMilan Broz 	crypto_xor(iv, (u8 *)&sector, 8);
735ed04d981SMilan Broz 	if (cc->iv_size > 8)
736ed04d981SMilan Broz 		crypto_xor(&iv[8], (u8 *)&sector, cc->iv_size - 8);
737ed04d981SMilan Broz 
738ed04d981SMilan Broz 	return r;
739ed04d981SMilan Broz }
740ed04d981SMilan Broz 
741ed04d981SMilan Broz static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
742ed04d981SMilan Broz 			     struct dm_crypt_request *dmreq)
743ed04d981SMilan Broz {
744ed04d981SMilan Broz 	u8 *dst;
745ed04d981SMilan Broz 	int r;
746ed04d981SMilan Broz 
747ed04d981SMilan Broz 	if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
748ed04d981SMilan Broz 		return 0;
749ed04d981SMilan Broz 
750ed04d981SMilan Broz 	/* Apply whitening on ciphertext */
751ed04d981SMilan Broz 	dst = kmap_atomic(sg_page(&dmreq->sg_out));
752ed04d981SMilan Broz 	r = crypt_iv_tcw_whitening(cc, dmreq, dst + dmreq->sg_out.offset);
753ed04d981SMilan Broz 	kunmap_atomic(dst);
754ed04d981SMilan Broz 
755ed04d981SMilan Broz 	return r;
756ed04d981SMilan Broz }
757ed04d981SMilan Broz 
7581da177e4SLinus Torvalds static struct crypt_iv_operations crypt_iv_plain_ops = {
7591da177e4SLinus Torvalds 	.generator = crypt_iv_plain_gen
7601da177e4SLinus Torvalds };
7611da177e4SLinus Torvalds 
76261afef61SMilan Broz static struct crypt_iv_operations crypt_iv_plain64_ops = {
76361afef61SMilan Broz 	.generator = crypt_iv_plain64_gen
76461afef61SMilan Broz };
76561afef61SMilan Broz 
7661da177e4SLinus Torvalds static struct crypt_iv_operations crypt_iv_essiv_ops = {
7671da177e4SLinus Torvalds 	.ctr       = crypt_iv_essiv_ctr,
7681da177e4SLinus Torvalds 	.dtr       = crypt_iv_essiv_dtr,
769b95bf2d3SMilan Broz 	.init      = crypt_iv_essiv_init,
770542da317SMilan Broz 	.wipe      = crypt_iv_essiv_wipe,
7711da177e4SLinus Torvalds 	.generator = crypt_iv_essiv_gen
7721da177e4SLinus Torvalds };
7731da177e4SLinus Torvalds 
77448527fa7SRik Snel static struct crypt_iv_operations crypt_iv_benbi_ops = {
77548527fa7SRik Snel 	.ctr	   = crypt_iv_benbi_ctr,
77648527fa7SRik Snel 	.dtr	   = crypt_iv_benbi_dtr,
77748527fa7SRik Snel 	.generator = crypt_iv_benbi_gen
77848527fa7SRik Snel };
7791da177e4SLinus Torvalds 
78046b47730SLudwig Nussel static struct crypt_iv_operations crypt_iv_null_ops = {
78146b47730SLudwig Nussel 	.generator = crypt_iv_null_gen
78246b47730SLudwig Nussel };
78346b47730SLudwig Nussel 
78434745785SMilan Broz static struct crypt_iv_operations crypt_iv_lmk_ops = {
78534745785SMilan Broz 	.ctr	   = crypt_iv_lmk_ctr,
78634745785SMilan Broz 	.dtr	   = crypt_iv_lmk_dtr,
78734745785SMilan Broz 	.init	   = crypt_iv_lmk_init,
78834745785SMilan Broz 	.wipe	   = crypt_iv_lmk_wipe,
78934745785SMilan Broz 	.generator = crypt_iv_lmk_gen,
79034745785SMilan Broz 	.post	   = crypt_iv_lmk_post
79134745785SMilan Broz };
79234745785SMilan Broz 
793ed04d981SMilan Broz static struct crypt_iv_operations crypt_iv_tcw_ops = {
794ed04d981SMilan Broz 	.ctr	   = crypt_iv_tcw_ctr,
795ed04d981SMilan Broz 	.dtr	   = crypt_iv_tcw_dtr,
796ed04d981SMilan Broz 	.init	   = crypt_iv_tcw_init,
797ed04d981SMilan Broz 	.wipe	   = crypt_iv_tcw_wipe,
798ed04d981SMilan Broz 	.generator = crypt_iv_tcw_gen,
799ed04d981SMilan Broz 	.post	   = crypt_iv_tcw_post
800ed04d981SMilan Broz };
801ed04d981SMilan Broz 
802d469f841SMilan Broz static void crypt_convert_init(struct crypt_config *cc,
803d469f841SMilan Broz 			       struct convert_context *ctx,
8041da177e4SLinus Torvalds 			       struct bio *bio_out, struct bio *bio_in,
805fcd369daSMilan Broz 			       sector_t sector)
8061da177e4SLinus Torvalds {
8071da177e4SLinus Torvalds 	ctx->bio_in = bio_in;
8081da177e4SLinus Torvalds 	ctx->bio_out = bio_out;
809003b5c57SKent Overstreet 	if (bio_in)
810003b5c57SKent Overstreet 		ctx->iter_in = bio_in->bi_iter;
811003b5c57SKent Overstreet 	if (bio_out)
812003b5c57SKent Overstreet 		ctx->iter_out = bio_out->bi_iter;
813c66029f4SMikulas Patocka 	ctx->cc_sector = sector + cc->iv_offset;
81443d69034SMilan Broz 	init_completion(&ctx->restart);
8151da177e4SLinus Torvalds }
8161da177e4SLinus Torvalds 
817b2174eebSHuang Ying static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
818b2174eebSHuang Ying 					     struct ablkcipher_request *req)
819b2174eebSHuang Ying {
820b2174eebSHuang Ying 	return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
821b2174eebSHuang Ying }
822b2174eebSHuang Ying 
823b2174eebSHuang Ying static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc,
824b2174eebSHuang Ying 					       struct dm_crypt_request *dmreq)
825b2174eebSHuang Ying {
826b2174eebSHuang Ying 	return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start);
827b2174eebSHuang Ying }
828b2174eebSHuang Ying 
8292dc5327dSMilan Broz static u8 *iv_of_dmreq(struct crypt_config *cc,
8302dc5327dSMilan Broz 		       struct dm_crypt_request *dmreq)
8312dc5327dSMilan Broz {
8322dc5327dSMilan Broz 	return (u8 *)ALIGN((unsigned long)(dmreq + 1),
8332dc5327dSMilan Broz 		crypto_ablkcipher_alignmask(any_tfm(cc)) + 1);
8342dc5327dSMilan Broz }
8352dc5327dSMilan Broz 
83601482b76SMilan Broz static int crypt_convert_block(struct crypt_config *cc,
8373a7f6c99SMilan Broz 			       struct convert_context *ctx,
8383a7f6c99SMilan Broz 			       struct ablkcipher_request *req)
83901482b76SMilan Broz {
840003b5c57SKent Overstreet 	struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
841003b5c57SKent Overstreet 	struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
8423a7f6c99SMilan Broz 	struct dm_crypt_request *dmreq;
8433a7f6c99SMilan Broz 	u8 *iv;
84440b6229bSMikulas Patocka 	int r;
84501482b76SMilan Broz 
846b2174eebSHuang Ying 	dmreq = dmreq_of_req(cc, req);
8472dc5327dSMilan Broz 	iv = iv_of_dmreq(cc, dmreq);
8483a7f6c99SMilan Broz 
849c66029f4SMikulas Patocka 	dmreq->iv_sector = ctx->cc_sector;
850b2174eebSHuang Ying 	dmreq->ctx = ctx;
8513a7f6c99SMilan Broz 	sg_init_table(&dmreq->sg_in, 1);
852003b5c57SKent Overstreet 	sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT,
853003b5c57SKent Overstreet 		    bv_in.bv_offset);
85401482b76SMilan Broz 
8553a7f6c99SMilan Broz 	sg_init_table(&dmreq->sg_out, 1);
856003b5c57SKent Overstreet 	sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT,
857003b5c57SKent Overstreet 		    bv_out.bv_offset);
85801482b76SMilan Broz 
859003b5c57SKent Overstreet 	bio_advance_iter(ctx->bio_in, &ctx->iter_in, 1 << SECTOR_SHIFT);
860003b5c57SKent Overstreet 	bio_advance_iter(ctx->bio_out, &ctx->iter_out, 1 << SECTOR_SHIFT);
86101482b76SMilan Broz 
8623a7f6c99SMilan Broz 	if (cc->iv_gen_ops) {
8632dc5327dSMilan Broz 		r = cc->iv_gen_ops->generator(cc, iv, dmreq);
8643a7f6c99SMilan Broz 		if (r < 0)
8653a7f6c99SMilan Broz 			return r;
8663a7f6c99SMilan Broz 	}
8673a7f6c99SMilan Broz 
8683a7f6c99SMilan Broz 	ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out,
8693a7f6c99SMilan Broz 				     1 << SECTOR_SHIFT, iv);
8703a7f6c99SMilan Broz 
8713a7f6c99SMilan Broz 	if (bio_data_dir(ctx->bio_in) == WRITE)
8723a7f6c99SMilan Broz 		r = crypto_ablkcipher_encrypt(req);
8733a7f6c99SMilan Broz 	else
8743a7f6c99SMilan Broz 		r = crypto_ablkcipher_decrypt(req);
8753a7f6c99SMilan Broz 
8762dc5327dSMilan Broz 	if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
8772dc5327dSMilan Broz 		r = cc->iv_gen_ops->post(cc, iv, dmreq);
8782dc5327dSMilan Broz 
8793a7f6c99SMilan Broz 	return r;
88001482b76SMilan Broz }
88101482b76SMilan Broz 
88295497a96SMilan Broz static void kcryptd_async_done(struct crypto_async_request *async_req,
88395497a96SMilan Broz 			       int error);
884c0297721SAndi Kleen 
885ddd42edfSMilan Broz static void crypt_alloc_req(struct crypt_config *cc,
886ddd42edfSMilan Broz 			    struct convert_context *ctx)
887ddd42edfSMilan Broz {
888c66029f4SMikulas Patocka 	unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
889c0297721SAndi Kleen 
890610f2de3SMikulas Patocka 	if (!ctx->req)
891610f2de3SMikulas Patocka 		ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO);
892c0297721SAndi Kleen 
893610f2de3SMikulas Patocka 	ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]);
89454cea3f6SMilan Broz 
89554cea3f6SMilan Broz 	/*
89654cea3f6SMilan Broz 	 * Use REQ_MAY_BACKLOG so a cipher driver internally backlogs
89754cea3f6SMilan Broz 	 * requests if driver request queue is full.
89854cea3f6SMilan Broz 	 */
899610f2de3SMikulas Patocka 	ablkcipher_request_set_callback(ctx->req,
900c0297721SAndi Kleen 	    CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
901610f2de3SMikulas Patocka 	    kcryptd_async_done, dmreq_of_req(cc, ctx->req));
902ddd42edfSMilan Broz }
903ddd42edfSMilan Broz 
904298a9fa0SMikulas Patocka static void crypt_free_req(struct crypt_config *cc,
905298a9fa0SMikulas Patocka 			   struct ablkcipher_request *req, struct bio *base_bio)
906298a9fa0SMikulas Patocka {
907298a9fa0SMikulas Patocka 	struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
908298a9fa0SMikulas Patocka 
909298a9fa0SMikulas Patocka 	if ((struct ablkcipher_request *)(io + 1) != req)
910298a9fa0SMikulas Patocka 		mempool_free(req, cc->req_pool);
911298a9fa0SMikulas Patocka }
912298a9fa0SMikulas Patocka 
9131da177e4SLinus Torvalds /*
9141da177e4SLinus Torvalds  * Encrypt / decrypt data from one bio to another one (can be the same one)
9151da177e4SLinus Torvalds  */
9161da177e4SLinus Torvalds static int crypt_convert(struct crypt_config *cc,
9171da177e4SLinus Torvalds 			 struct convert_context *ctx)
9181da177e4SLinus Torvalds {
9193f1e9070SMilan Broz 	int r;
9201da177e4SLinus Torvalds 
92140b6229bSMikulas Patocka 	atomic_set(&ctx->cc_pending, 1);
922c8081618SMilan Broz 
923003b5c57SKent Overstreet 	while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
9241da177e4SLinus Torvalds 
9253a7f6c99SMilan Broz 		crypt_alloc_req(cc, ctx);
9263a7f6c99SMilan Broz 
92740b6229bSMikulas Patocka 		atomic_inc(&ctx->cc_pending);
9283f1e9070SMilan Broz 
929610f2de3SMikulas Patocka 		r = crypt_convert_block(cc, ctx, ctx->req);
9303a7f6c99SMilan Broz 
9313a7f6c99SMilan Broz 		switch (r) {
93254cea3f6SMilan Broz 		/*
93354cea3f6SMilan Broz 		 * The request was queued by a crypto driver
93454cea3f6SMilan Broz 		 * but the driver request queue is full, let's wait.
93554cea3f6SMilan Broz 		 */
9363a7f6c99SMilan Broz 		case -EBUSY:
9373a7f6c99SMilan Broz 			wait_for_completion(&ctx->restart);
93816735d02SWolfram Sang 			reinit_completion(&ctx->restart);
939c0403ec0SRabin Vincent 			/* fall through */
94054cea3f6SMilan Broz 		/*
94154cea3f6SMilan Broz 		 * The request is queued and processed asynchronously,
94254cea3f6SMilan Broz 		 * completion function kcryptd_async_done() will be called.
94354cea3f6SMilan Broz 		 */
944c0403ec0SRabin Vincent 		case -EINPROGRESS:
945610f2de3SMikulas Patocka 			ctx->req = NULL;
946c66029f4SMikulas Patocka 			ctx->cc_sector++;
9473a7f6c99SMilan Broz 			continue;
94854cea3f6SMilan Broz 		/*
94954cea3f6SMilan Broz 		 * The request was already processed (synchronously).
95054cea3f6SMilan Broz 		 */
9513f1e9070SMilan Broz 		case 0:
95240b6229bSMikulas Patocka 			atomic_dec(&ctx->cc_pending);
953c66029f4SMikulas Patocka 			ctx->cc_sector++;
954c7f1b204SMilan Broz 			cond_resched();
9553f1e9070SMilan Broz 			continue;
9561da177e4SLinus Torvalds 
95754cea3f6SMilan Broz 		/* There was an error while processing the request. */
9583f1e9070SMilan Broz 		default:
95940b6229bSMikulas Patocka 			atomic_dec(&ctx->cc_pending);
9601da177e4SLinus Torvalds 			return r;
9611da177e4SLinus Torvalds 		}
9623f1e9070SMilan Broz 	}
9633f1e9070SMilan Broz 
9643f1e9070SMilan Broz 	return 0;
9653f1e9070SMilan Broz }
9661da177e4SLinus Torvalds 
967cf2f1abfSMikulas Patocka static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
968cf2f1abfSMikulas Patocka 
9691da177e4SLinus Torvalds /*
9701da177e4SLinus Torvalds  * Generate a new unfragmented bio with the given size
9711da177e4SLinus Torvalds  * This should never violate the device limitations
9727145c241SMikulas Patocka  *
9737145c241SMikulas Patocka  * This function may be called concurrently. If we allocate from the mempool
9747145c241SMikulas Patocka  * concurrently, there is a possibility of deadlock. For example, if we have
9757145c241SMikulas Patocka  * mempool of 256 pages, two processes, each wanting 256, pages allocate from
9767145c241SMikulas Patocka  * the mempool concurrently, it may deadlock in a situation where both processes
9777145c241SMikulas Patocka  * have allocated 128 pages and the mempool is exhausted.
9787145c241SMikulas Patocka  *
9797145c241SMikulas Patocka  * In order to avoid this scenario we allocate the pages under a mutex.
9807145c241SMikulas Patocka  *
9817145c241SMikulas Patocka  * In order to not degrade performance with excessive locking, we try
9827145c241SMikulas Patocka  * non-blocking allocations without a mutex first but on failure we fallback
9837145c241SMikulas Patocka  * to blocking allocations with a mutex.
9841da177e4SLinus Torvalds  */
985cf2f1abfSMikulas Patocka static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
9861da177e4SLinus Torvalds {
98749a8a920SAlasdair G Kergon 	struct crypt_config *cc = io->cc;
9888b004457SMilan Broz 	struct bio *clone;
9891da177e4SLinus Torvalds 	unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
9907145c241SMikulas Patocka 	gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
9917145c241SMikulas Patocka 	unsigned i, len, remaining_size;
99291e10625SMilan Broz 	struct page *page;
993cf2f1abfSMikulas Patocka 	struct bio_vec *bvec;
9941da177e4SLinus Torvalds 
9957145c241SMikulas Patocka retry:
9967145c241SMikulas Patocka 	if (unlikely(gfp_mask & __GFP_WAIT))
9977145c241SMikulas Patocka 		mutex_lock(&cc->bio_alloc_lock);
9987145c241SMikulas Patocka 
9996a24c718SMilan Broz 	clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
10008b004457SMilan Broz 	if (!clone)
10017145c241SMikulas Patocka 		goto return_clone;
10021da177e4SLinus Torvalds 
1003027581f3SOlaf Kirch 	clone_init(io, clone);
10046a24c718SMilan Broz 
10057145c241SMikulas Patocka 	remaining_size = size;
10067145c241SMikulas Patocka 
1007f97380bcSOlaf Kirch 	for (i = 0; i < nr_iovecs; i++) {
100891e10625SMilan Broz 		page = mempool_alloc(cc->page_pool, gfp_mask);
10097145c241SMikulas Patocka 		if (!page) {
10107145c241SMikulas Patocka 			crypt_free_buffer_pages(cc, clone);
10117145c241SMikulas Patocka 			bio_put(clone);
10127145c241SMikulas Patocka 			gfp_mask |= __GFP_WAIT;
10137145c241SMikulas Patocka 			goto retry;
10147145c241SMikulas Patocka 		}
10151da177e4SLinus Torvalds 
10167145c241SMikulas Patocka 		len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size;
10171da177e4SLinus Torvalds 
1018cf2f1abfSMikulas Patocka 		bvec = &clone->bi_io_vec[clone->bi_vcnt++];
1019cf2f1abfSMikulas Patocka 		bvec->bv_page = page;
1020cf2f1abfSMikulas Patocka 		bvec->bv_len = len;
1021cf2f1abfSMikulas Patocka 		bvec->bv_offset = 0;
1022cf2f1abfSMikulas Patocka 
1023cf2f1abfSMikulas Patocka 		clone->bi_iter.bi_size += len;
102491e10625SMilan Broz 
10257145c241SMikulas Patocka 		remaining_size -= len;
10261da177e4SLinus Torvalds 	}
10271da177e4SLinus Torvalds 
10287145c241SMikulas Patocka return_clone:
10297145c241SMikulas Patocka 	if (unlikely(gfp_mask & __GFP_WAIT))
10307145c241SMikulas Patocka 		mutex_unlock(&cc->bio_alloc_lock);
10317145c241SMikulas Patocka 
10328b004457SMilan Broz 	return clone;
10331da177e4SLinus Torvalds }
10341da177e4SLinus Torvalds 
1035644bd2f0SNeil Brown static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
10361da177e4SLinus Torvalds {
1037644bd2f0SNeil Brown 	unsigned int i;
10381da177e4SLinus Torvalds 	struct bio_vec *bv;
10391da177e4SLinus Torvalds 
1040cb34e057SKent Overstreet 	bio_for_each_segment_all(bv, clone, i) {
10411da177e4SLinus Torvalds 		BUG_ON(!bv->bv_page);
10421da177e4SLinus Torvalds 		mempool_free(bv->bv_page, cc->page_pool);
10431da177e4SLinus Torvalds 		bv->bv_page = NULL;
10441da177e4SLinus Torvalds 	}
10451da177e4SLinus Torvalds }
10461da177e4SLinus Torvalds 
1047298a9fa0SMikulas Patocka static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
1048dc440d1eSMilan Broz 			  struct bio *bio, sector_t sector)
1049dc440d1eSMilan Broz {
105049a8a920SAlasdair G Kergon 	io->cc = cc;
1051dc440d1eSMilan Broz 	io->base_bio = bio;
1052dc440d1eSMilan Broz 	io->sector = sector;
1053dc440d1eSMilan Broz 	io->error = 0;
1054610f2de3SMikulas Patocka 	io->ctx.req = NULL;
105540b6229bSMikulas Patocka 	atomic_set(&io->io_pending, 0);
1056dc440d1eSMilan Broz }
1057dc440d1eSMilan Broz 
10583e1a8bddSMilan Broz static void crypt_inc_pending(struct dm_crypt_io *io)
10593e1a8bddSMilan Broz {
106040b6229bSMikulas Patocka 	atomic_inc(&io->io_pending);
10613e1a8bddSMilan Broz }
10623e1a8bddSMilan Broz 
10631da177e4SLinus Torvalds /*
10641da177e4SLinus Torvalds  * One of the bios was finished. Check for completion of
10651da177e4SLinus Torvalds  * the whole request and correctly clean up the buffer.
10661da177e4SLinus Torvalds  */
10675742fd77SMilan Broz static void crypt_dec_pending(struct dm_crypt_io *io)
10681da177e4SLinus Torvalds {
106949a8a920SAlasdair G Kergon 	struct crypt_config *cc = io->cc;
1070b35f8caaSMilan Broz 	struct bio *base_bio = io->base_bio;
1071b35f8caaSMilan Broz 	int error = io->error;
10721da177e4SLinus Torvalds 
107340b6229bSMikulas Patocka 	if (!atomic_dec_and_test(&io->io_pending))
10741da177e4SLinus Torvalds 		return;
10751da177e4SLinus Torvalds 
1076610f2de3SMikulas Patocka 	if (io->ctx.req)
1077298a9fa0SMikulas Patocka 		crypt_free_req(cc, io->ctx.req, base_bio);
1078b35f8caaSMilan Broz 
10794246a0b6SChristoph Hellwig 	base_bio->bi_error = error;
10804246a0b6SChristoph Hellwig 	bio_endio(base_bio);
10811da177e4SLinus Torvalds }
10821da177e4SLinus Torvalds 
10831da177e4SLinus Torvalds /*
1084cabf08e4SMilan Broz  * kcryptd/kcryptd_io:
10851da177e4SLinus Torvalds  *
10861da177e4SLinus Torvalds  * Needed because it would be very unwise to do decryption in an
108723541d2dSMilan Broz  * interrupt context.
1088cabf08e4SMilan Broz  *
1089cabf08e4SMilan Broz  * kcryptd performs the actual encryption or decryption.
1090cabf08e4SMilan Broz  *
1091cabf08e4SMilan Broz  * kcryptd_io performs the IO submission.
1092cabf08e4SMilan Broz  *
1093cabf08e4SMilan Broz  * They must be separated as otherwise the final stages could be
1094cabf08e4SMilan Broz  * starved by new requests which can block in the first stages due
1095cabf08e4SMilan Broz  * to memory allocation.
1096c0297721SAndi Kleen  *
1097c0297721SAndi Kleen  * The work is done per CPU global for all dm-crypt instances.
1098c0297721SAndi Kleen  * They should not depend on each other and do not block.
10991da177e4SLinus Torvalds  */
11004246a0b6SChristoph Hellwig static void crypt_endio(struct bio *clone)
11018b004457SMilan Broz {
1102028867acSAlasdair G Kergon 	struct dm_crypt_io *io = clone->bi_private;
110349a8a920SAlasdair G Kergon 	struct crypt_config *cc = io->cc;
1104ee7a491eSMilan Broz 	unsigned rw = bio_data_dir(clone);
11058b004457SMilan Broz 
11068b004457SMilan Broz 	/*
11076712ecf8SNeilBrown 	 * free the processed pages
11088b004457SMilan Broz 	 */
1109ee7a491eSMilan Broz 	if (rw == WRITE)
1110644bd2f0SNeil Brown 		crypt_free_buffer_pages(cc, clone);
11118b004457SMilan Broz 
11128b004457SMilan Broz 	bio_put(clone);
1113ee7a491eSMilan Broz 
11144246a0b6SChristoph Hellwig 	if (rw == READ && !clone->bi_error) {
1115cabf08e4SMilan Broz 		kcryptd_queue_crypt(io);
11166712ecf8SNeilBrown 		return;
1117ee7a491eSMilan Broz 	}
11185742fd77SMilan Broz 
11194246a0b6SChristoph Hellwig 	if (unlikely(clone->bi_error))
11204246a0b6SChristoph Hellwig 		io->error = clone->bi_error;
11215742fd77SMilan Broz 
11225742fd77SMilan Broz 	crypt_dec_pending(io);
11238b004457SMilan Broz }
11248b004457SMilan Broz 
1125028867acSAlasdair G Kergon static void clone_init(struct dm_crypt_io *io, struct bio *clone)
11268b004457SMilan Broz {
112749a8a920SAlasdair G Kergon 	struct crypt_config *cc = io->cc;
11288b004457SMilan Broz 
11298b004457SMilan Broz 	clone->bi_private = io;
11308b004457SMilan Broz 	clone->bi_end_io  = crypt_endio;
11318b004457SMilan Broz 	clone->bi_bdev    = cc->dev->bdev;
11328b004457SMilan Broz 	clone->bi_rw      = io->base_bio->bi_rw;
11338b004457SMilan Broz }
11348b004457SMilan Broz 
113520c82538SMilan Broz static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
11368b004457SMilan Broz {
113749a8a920SAlasdair G Kergon 	struct crypt_config *cc = io->cc;
11388b004457SMilan Broz 	struct bio *clone;
113993e605c2SMilan Broz 
11408b004457SMilan Broz 	/*
114159779079SMike Snitzer 	 * We need the original biovec array in order to decrypt
114259779079SMike Snitzer 	 * the whole bio data *afterwards* -- thanks to immutable
114359779079SMike Snitzer 	 * biovecs we don't need to worry about the block layer
114459779079SMike Snitzer 	 * modifying the biovec array; so leverage bio_clone_fast().
11458b004457SMilan Broz 	 */
114659779079SMike Snitzer 	clone = bio_clone_fast(io->base_bio, gfp, cc->bs);
11477eaceaccSJens Axboe 	if (!clone)
114820c82538SMilan Broz 		return 1;
11498b004457SMilan Broz 
115020c82538SMilan Broz 	crypt_inc_pending(io);
115120c82538SMilan Broz 
11528b004457SMilan Broz 	clone_init(io, clone);
11534f024f37SKent Overstreet 	clone->bi_iter.bi_sector = cc->start + io->sector;
11548b004457SMilan Broz 
115593e605c2SMilan Broz 	generic_make_request(clone);
115620c82538SMilan Broz 	return 0;
11578b004457SMilan Broz }
11588b004457SMilan Broz 
1159dc267621SMikulas Patocka static void kcryptd_io_read_work(struct work_struct *work)
1160395b167cSAlasdair G Kergon {
1161395b167cSAlasdair G Kergon 	struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1162395b167cSAlasdair G Kergon 
116320c82538SMilan Broz 	crypt_inc_pending(io);
116420c82538SMilan Broz 	if (kcryptd_io_read(io, GFP_NOIO))
116520c82538SMilan Broz 		io->error = -ENOMEM;
116620c82538SMilan Broz 	crypt_dec_pending(io);
1167395b167cSAlasdair G Kergon }
1168395b167cSAlasdair G Kergon 
1169dc267621SMikulas Patocka static void kcryptd_queue_read(struct dm_crypt_io *io)
1170395b167cSAlasdair G Kergon {
117149a8a920SAlasdair G Kergon 	struct crypt_config *cc = io->cc;
1172395b167cSAlasdair G Kergon 
1173dc267621SMikulas Patocka 	INIT_WORK(&io->work, kcryptd_io_read_work);
1174395b167cSAlasdair G Kergon 	queue_work(cc->io_queue, &io->work);
1175395b167cSAlasdair G Kergon }
1176395b167cSAlasdair G Kergon 
1177dc267621SMikulas Patocka static void kcryptd_io_write(struct dm_crypt_io *io)
1178dc267621SMikulas Patocka {
1179dc267621SMikulas Patocka 	struct bio *clone = io->ctx.bio_out;
1180dc267621SMikulas Patocka 
1181dc267621SMikulas Patocka 	generic_make_request(clone);
1182dc267621SMikulas Patocka }
1183dc267621SMikulas Patocka 
1184b3c5fd30SMikulas Patocka #define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
1185b3c5fd30SMikulas Patocka 
1186dc267621SMikulas Patocka static int dmcrypt_write(void *data)
1187dc267621SMikulas Patocka {
1188dc267621SMikulas Patocka 	struct crypt_config *cc = data;
1189b3c5fd30SMikulas Patocka 	struct dm_crypt_io *io;
1190b3c5fd30SMikulas Patocka 
1191dc267621SMikulas Patocka 	while (1) {
1192b3c5fd30SMikulas Patocka 		struct rb_root write_tree;
1193dc267621SMikulas Patocka 		struct blk_plug plug;
1194dc267621SMikulas Patocka 
1195dc267621SMikulas Patocka 		DECLARE_WAITQUEUE(wait, current);
1196dc267621SMikulas Patocka 
1197dc267621SMikulas Patocka 		spin_lock_irq(&cc->write_thread_wait.lock);
1198dc267621SMikulas Patocka continue_locked:
1199dc267621SMikulas Patocka 
1200b3c5fd30SMikulas Patocka 		if (!RB_EMPTY_ROOT(&cc->write_tree))
1201dc267621SMikulas Patocka 			goto pop_from_list;
1202dc267621SMikulas Patocka 
1203dc267621SMikulas Patocka 		__set_current_state(TASK_INTERRUPTIBLE);
1204dc267621SMikulas Patocka 		__add_wait_queue(&cc->write_thread_wait, &wait);
1205dc267621SMikulas Patocka 
1206dc267621SMikulas Patocka 		spin_unlock_irq(&cc->write_thread_wait.lock);
1207dc267621SMikulas Patocka 
1208dc267621SMikulas Patocka 		if (unlikely(kthread_should_stop())) {
1209dc267621SMikulas Patocka 			set_task_state(current, TASK_RUNNING);
1210dc267621SMikulas Patocka 			remove_wait_queue(&cc->write_thread_wait, &wait);
1211dc267621SMikulas Patocka 			break;
1212dc267621SMikulas Patocka 		}
1213dc267621SMikulas Patocka 
1214dc267621SMikulas Patocka 		schedule();
1215dc267621SMikulas Patocka 
1216dc267621SMikulas Patocka 		set_task_state(current, TASK_RUNNING);
1217dc267621SMikulas Patocka 		spin_lock_irq(&cc->write_thread_wait.lock);
1218dc267621SMikulas Patocka 		__remove_wait_queue(&cc->write_thread_wait, &wait);
1219dc267621SMikulas Patocka 		goto continue_locked;
1220dc267621SMikulas Patocka 
1221dc267621SMikulas Patocka pop_from_list:
1222b3c5fd30SMikulas Patocka 		write_tree = cc->write_tree;
1223b3c5fd30SMikulas Patocka 		cc->write_tree = RB_ROOT;
1224dc267621SMikulas Patocka 		spin_unlock_irq(&cc->write_thread_wait.lock);
1225dc267621SMikulas Patocka 
1226b3c5fd30SMikulas Patocka 		BUG_ON(rb_parent(write_tree.rb_node));
1227b3c5fd30SMikulas Patocka 
1228b3c5fd30SMikulas Patocka 		/*
1229b3c5fd30SMikulas Patocka 		 * Note: we cannot walk the tree here with rb_next because
1230b3c5fd30SMikulas Patocka 		 * the structures may be freed when kcryptd_io_write is called.
1231b3c5fd30SMikulas Patocka 		 */
1232dc267621SMikulas Patocka 		blk_start_plug(&plug);
1233dc267621SMikulas Patocka 		do {
1234b3c5fd30SMikulas Patocka 			io = crypt_io_from_node(rb_first(&write_tree));
1235b3c5fd30SMikulas Patocka 			rb_erase(&io->rb_node, &write_tree);
1236dc267621SMikulas Patocka 			kcryptd_io_write(io);
1237b3c5fd30SMikulas Patocka 		} while (!RB_EMPTY_ROOT(&write_tree));
1238dc267621SMikulas Patocka 		blk_finish_plug(&plug);
1239dc267621SMikulas Patocka 	}
1240dc267621SMikulas Patocka 	return 0;
1241dc267621SMikulas Patocka }
1242dc267621SMikulas Patocka 
124372c6e7afSMikulas Patocka static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
12444e4eef64SMilan Broz {
1245dec1cedfSMilan Broz 	struct bio *clone = io->ctx.bio_out;
124649a8a920SAlasdair G Kergon 	struct crypt_config *cc = io->cc;
1247dc267621SMikulas Patocka 	unsigned long flags;
1248b3c5fd30SMikulas Patocka 	sector_t sector;
1249b3c5fd30SMikulas Patocka 	struct rb_node **rbp, *parent;
1250dec1cedfSMilan Broz 
125172c6e7afSMikulas Patocka 	if (unlikely(io->error < 0)) {
1252dec1cedfSMilan Broz 		crypt_free_buffer_pages(cc, clone);
1253dec1cedfSMilan Broz 		bio_put(clone);
12546c031f41SMilan Broz 		crypt_dec_pending(io);
1255dec1cedfSMilan Broz 		return;
1256dec1cedfSMilan Broz 	}
1257dec1cedfSMilan Broz 
1258dec1cedfSMilan Broz 	/* crypt_convert should have filled the clone bio */
1259003b5c57SKent Overstreet 	BUG_ON(io->ctx.iter_out.bi_size);
1260dec1cedfSMilan Broz 
12614f024f37SKent Overstreet 	clone->bi_iter.bi_sector = cc->start + io->sector;
1262899c95d3SMilan Broz 
12630f5d8e6eSMikulas Patocka 	if (likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) {
12640f5d8e6eSMikulas Patocka 		generic_make_request(clone);
12650f5d8e6eSMikulas Patocka 		return;
12660f5d8e6eSMikulas Patocka 	}
12670f5d8e6eSMikulas Patocka 
1268dc267621SMikulas Patocka 	spin_lock_irqsave(&cc->write_thread_wait.lock, flags);
1269b3c5fd30SMikulas Patocka 	rbp = &cc->write_tree.rb_node;
1270b3c5fd30SMikulas Patocka 	parent = NULL;
1271b3c5fd30SMikulas Patocka 	sector = io->sector;
1272b3c5fd30SMikulas Patocka 	while (*rbp) {
1273b3c5fd30SMikulas Patocka 		parent = *rbp;
1274b3c5fd30SMikulas Patocka 		if (sector < crypt_io_from_node(parent)->sector)
1275b3c5fd30SMikulas Patocka 			rbp = &(*rbp)->rb_left;
1276b3c5fd30SMikulas Patocka 		else
1277b3c5fd30SMikulas Patocka 			rbp = &(*rbp)->rb_right;
1278b3c5fd30SMikulas Patocka 	}
1279b3c5fd30SMikulas Patocka 	rb_link_node(&io->rb_node, parent, rbp);
1280b3c5fd30SMikulas Patocka 	rb_insert_color(&io->rb_node, &cc->write_tree);
1281b3c5fd30SMikulas Patocka 
1282dc267621SMikulas Patocka 	wake_up_locked(&cc->write_thread_wait);
1283dc267621SMikulas Patocka 	spin_unlock_irqrestore(&cc->write_thread_wait.lock, flags);
12844e4eef64SMilan Broz }
12854e4eef64SMilan Broz 
1286fc5a5e9aSMilan Broz static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
12878b004457SMilan Broz {
128849a8a920SAlasdair G Kergon 	struct crypt_config *cc = io->cc;
12898b004457SMilan Broz 	struct bio *clone;
1290c8081618SMilan Broz 	int crypt_finished;
1291b635b00eSMilan Broz 	sector_t sector = io->sector;
1292dec1cedfSMilan Broz 	int r;
12938b004457SMilan Broz 
129493e605c2SMilan Broz 	/*
1295fc5a5e9aSMilan Broz 	 * Prevent io from disappearing until this function completes.
1296fc5a5e9aSMilan Broz 	 */
1297fc5a5e9aSMilan Broz 	crypt_inc_pending(io);
1298b635b00eSMilan Broz 	crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
1299fc5a5e9aSMilan Broz 
1300cf2f1abfSMikulas Patocka 	clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
130123541d2dSMilan Broz 	if (unlikely(!clone)) {
1302cf2f1abfSMikulas Patocka 		io->error = -EIO;
1303cf2f1abfSMikulas Patocka 		goto dec;
130423541d2dSMilan Broz 	}
13058b004457SMilan Broz 
130653017030SMilan Broz 	io->ctx.bio_out = clone;
1307003b5c57SKent Overstreet 	io->ctx.iter_out = clone->bi_iter;
13088b004457SMilan Broz 
1309b635b00eSMilan Broz 	sector += bio_sectors(clone);
1310dec1cedfSMilan Broz 
13114e594098SMilan Broz 	crypt_inc_pending(io);
1312dec1cedfSMilan Broz 	r = crypt_convert(cc, &io->ctx);
1313cf2f1abfSMikulas Patocka 	if (r)
131472c6e7afSMikulas Patocka 		io->error = -EIO;
131540b6229bSMikulas Patocka 	crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
1316dec1cedfSMilan Broz 
1317c8081618SMilan Broz 	/* Encryption was already finished, submit io now */
1318c8081618SMilan Broz 	if (crypt_finished) {
131972c6e7afSMikulas Patocka 		kcryptd_crypt_write_io_submit(io, 0);
1320b635b00eSMilan Broz 		io->sector = sector;
13214e594098SMilan Broz 	}
132293e605c2SMilan Broz 
1323cf2f1abfSMikulas Patocka dec:
1324899c95d3SMilan Broz 	crypt_dec_pending(io);
132584131db6SMilan Broz }
132684131db6SMilan Broz 
132772c6e7afSMikulas Patocka static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
13285742fd77SMilan Broz {
13295742fd77SMilan Broz 	crypt_dec_pending(io);
13305742fd77SMilan Broz }
13315742fd77SMilan Broz 
13324e4eef64SMilan Broz static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
13338b004457SMilan Broz {
133449a8a920SAlasdair G Kergon 	struct crypt_config *cc = io->cc;
13355742fd77SMilan Broz 	int r = 0;
13368b004457SMilan Broz 
13373e1a8bddSMilan Broz 	crypt_inc_pending(io);
13383a7f6c99SMilan Broz 
133953017030SMilan Broz 	crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
13400c395b0fSMilan Broz 			   io->sector);
13418b004457SMilan Broz 
13425742fd77SMilan Broz 	r = crypt_convert(cc, &io->ctx);
134372c6e7afSMikulas Patocka 	if (r < 0)
134472c6e7afSMikulas Patocka 		io->error = -EIO;
13455742fd77SMilan Broz 
134640b6229bSMikulas Patocka 	if (atomic_dec_and_test(&io->ctx.cc_pending))
134772c6e7afSMikulas Patocka 		kcryptd_crypt_read_done(io);
13483a7f6c99SMilan Broz 
13493a7f6c99SMilan Broz 	crypt_dec_pending(io);
13508b004457SMilan Broz }
13518b004457SMilan Broz 
135295497a96SMilan Broz static void kcryptd_async_done(struct crypto_async_request *async_req,
135395497a96SMilan Broz 			       int error)
135495497a96SMilan Broz {
1355b2174eebSHuang Ying 	struct dm_crypt_request *dmreq = async_req->data;
1356b2174eebSHuang Ying 	struct convert_context *ctx = dmreq->ctx;
135795497a96SMilan Broz 	struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
135849a8a920SAlasdair G Kergon 	struct crypt_config *cc = io->cc;
135995497a96SMilan Broz 
136054cea3f6SMilan Broz 	/*
136154cea3f6SMilan Broz 	 * A request from crypto driver backlog is going to be processed now,
136254cea3f6SMilan Broz 	 * finish the completion and continue in crypt_convert().
136354cea3f6SMilan Broz 	 * (Callback will be called for the second time for this request.)
136454cea3f6SMilan Broz 	 */
1365c0403ec0SRabin Vincent 	if (error == -EINPROGRESS) {
1366c0403ec0SRabin Vincent 		complete(&ctx->restart);
136795497a96SMilan Broz 		return;
1368c0403ec0SRabin Vincent 	}
136995497a96SMilan Broz 
13702dc5327dSMilan Broz 	if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
13712dc5327dSMilan Broz 		error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
13722dc5327dSMilan Broz 
137372c6e7afSMikulas Patocka 	if (error < 0)
137472c6e7afSMikulas Patocka 		io->error = -EIO;
137572c6e7afSMikulas Patocka 
1376298a9fa0SMikulas Patocka 	crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
137795497a96SMilan Broz 
137840b6229bSMikulas Patocka 	if (!atomic_dec_and_test(&ctx->cc_pending))
1379c0403ec0SRabin Vincent 		return;
138095497a96SMilan Broz 
138195497a96SMilan Broz 	if (bio_data_dir(io->base_bio) == READ)
138272c6e7afSMikulas Patocka 		kcryptd_crypt_read_done(io);
138395497a96SMilan Broz 	else
138472c6e7afSMikulas Patocka 		kcryptd_crypt_write_io_submit(io, 1);
138595497a96SMilan Broz }
138695497a96SMilan Broz 
13874e4eef64SMilan Broz static void kcryptd_crypt(struct work_struct *work)
13884e4eef64SMilan Broz {
13894e4eef64SMilan Broz 	struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
13904e4eef64SMilan Broz 
13914e4eef64SMilan Broz 	if (bio_data_dir(io->base_bio) == READ)
13924e4eef64SMilan Broz 		kcryptd_crypt_read_convert(io);
13934e4eef64SMilan Broz 	else
13944e4eef64SMilan Broz 		kcryptd_crypt_write_convert(io);
13958b004457SMilan Broz }
13968b004457SMilan Broz 
1397395b167cSAlasdair G Kergon static void kcryptd_queue_crypt(struct dm_crypt_io *io)
1398395b167cSAlasdair G Kergon {
139949a8a920SAlasdair G Kergon 	struct crypt_config *cc = io->cc;
1400395b167cSAlasdair G Kergon 
1401395b167cSAlasdair G Kergon 	INIT_WORK(&io->work, kcryptd_crypt);
1402395b167cSAlasdair G Kergon 	queue_work(cc->crypt_queue, &io->work);
1403395b167cSAlasdair G Kergon }
1404395b167cSAlasdair G Kergon 
14051da177e4SLinus Torvalds /*
14061da177e4SLinus Torvalds  * Decode key from its hex representation
14071da177e4SLinus Torvalds  */
14081da177e4SLinus Torvalds static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
14091da177e4SLinus Torvalds {
14101da177e4SLinus Torvalds 	char buffer[3];
14111da177e4SLinus Torvalds 	unsigned int i;
14121da177e4SLinus Torvalds 
14131da177e4SLinus Torvalds 	buffer[2] = '\0';
14141da177e4SLinus Torvalds 
14151da177e4SLinus Torvalds 	for (i = 0; i < size; i++) {
14161da177e4SLinus Torvalds 		buffer[0] = *hex++;
14171da177e4SLinus Torvalds 		buffer[1] = *hex++;
14181da177e4SLinus Torvalds 
14191a66a08aSmajianpeng 		if (kstrtou8(buffer, 16, &key[i]))
14201da177e4SLinus Torvalds 			return -EINVAL;
14211da177e4SLinus Torvalds 	}
14221da177e4SLinus Torvalds 
14231da177e4SLinus Torvalds 	if (*hex != '\0')
14241da177e4SLinus Torvalds 		return -EINVAL;
14251da177e4SLinus Torvalds 
14261da177e4SLinus Torvalds 	return 0;
14271da177e4SLinus Torvalds }
14281da177e4SLinus Torvalds 
1429fd2d231fSMikulas Patocka static void crypt_free_tfms(struct crypt_config *cc)
1430d1f96423SMilan Broz {
1431d1f96423SMilan Broz 	unsigned i;
1432d1f96423SMilan Broz 
1433fd2d231fSMikulas Patocka 	if (!cc->tfms)
1434fd2d231fSMikulas Patocka 		return;
1435fd2d231fSMikulas Patocka 
1436d1f96423SMilan Broz 	for (i = 0; i < cc->tfms_count; i++)
1437fd2d231fSMikulas Patocka 		if (cc->tfms[i] && !IS_ERR(cc->tfms[i])) {
1438fd2d231fSMikulas Patocka 			crypto_free_ablkcipher(cc->tfms[i]);
1439fd2d231fSMikulas Patocka 			cc->tfms[i] = NULL;
1440d1f96423SMilan Broz 		}
1441d1f96423SMilan Broz 
1442fd2d231fSMikulas Patocka 	kfree(cc->tfms);
1443fd2d231fSMikulas Patocka 	cc->tfms = NULL;
1444fd2d231fSMikulas Patocka }
1445fd2d231fSMikulas Patocka 
1446fd2d231fSMikulas Patocka static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
1447d1f96423SMilan Broz {
1448d1f96423SMilan Broz 	unsigned i;
1449d1f96423SMilan Broz 	int err;
1450d1f96423SMilan Broz 
1451fd2d231fSMikulas Patocka 	cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_ablkcipher *),
1452fd2d231fSMikulas Patocka 			   GFP_KERNEL);
1453fd2d231fSMikulas Patocka 	if (!cc->tfms)
1454fd2d231fSMikulas Patocka 		return -ENOMEM;
1455fd2d231fSMikulas Patocka 
1456d1f96423SMilan Broz 	for (i = 0; i < cc->tfms_count; i++) {
1457fd2d231fSMikulas Patocka 		cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0);
1458fd2d231fSMikulas Patocka 		if (IS_ERR(cc->tfms[i])) {
1459fd2d231fSMikulas Patocka 			err = PTR_ERR(cc->tfms[i]);
1460fd2d231fSMikulas Patocka 			crypt_free_tfms(cc);
1461d1f96423SMilan Broz 			return err;
1462d1f96423SMilan Broz 		}
1463d1f96423SMilan Broz 	}
1464d1f96423SMilan Broz 
1465d1f96423SMilan Broz 	return 0;
1466d1f96423SMilan Broz }
1467d1f96423SMilan Broz 
1468c0297721SAndi Kleen static int crypt_setkey_allcpus(struct crypt_config *cc)
1469c0297721SAndi Kleen {
1470da31a078SMilan Broz 	unsigned subkey_size;
1471fd2d231fSMikulas Patocka 	int err = 0, i, r;
1472c0297721SAndi Kleen 
1473da31a078SMilan Broz 	/* Ignore extra keys (which are used for IV etc) */
1474da31a078SMilan Broz 	subkey_size = (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
1475da31a078SMilan Broz 
1476d1f96423SMilan Broz 	for (i = 0; i < cc->tfms_count; i++) {
1477fd2d231fSMikulas Patocka 		r = crypto_ablkcipher_setkey(cc->tfms[i],
1478fd2d231fSMikulas Patocka 					     cc->key + (i * subkey_size),
1479fd2d231fSMikulas Patocka 					     subkey_size);
1480c0297721SAndi Kleen 		if (r)
1481c0297721SAndi Kleen 			err = r;
1482c0297721SAndi Kleen 	}
1483c0297721SAndi Kleen 
1484c0297721SAndi Kleen 	return err;
1485c0297721SAndi Kleen }
1486c0297721SAndi Kleen 
1487e48d4bbfSMilan Broz static int crypt_set_key(struct crypt_config *cc, char *key)
1488e48d4bbfSMilan Broz {
1489de8be5acSMilan Broz 	int r = -EINVAL;
1490de8be5acSMilan Broz 	int key_string_len = strlen(key);
1491de8be5acSMilan Broz 
149269a8cfcdSMilan Broz 	/* The key size may not be changed. */
1493de8be5acSMilan Broz 	if (cc->key_size != (key_string_len >> 1))
1494de8be5acSMilan Broz 		goto out;
1495e48d4bbfSMilan Broz 
149669a8cfcdSMilan Broz 	/* Hyphen (which gives a key_size of zero) means there is no key. */
149769a8cfcdSMilan Broz 	if (!cc->key_size && strcmp(key, "-"))
1498de8be5acSMilan Broz 		goto out;
1499e48d4bbfSMilan Broz 
150069a8cfcdSMilan Broz 	if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
1501de8be5acSMilan Broz 		goto out;
1502e48d4bbfSMilan Broz 
1503e48d4bbfSMilan Broz 	set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1504e48d4bbfSMilan Broz 
1505de8be5acSMilan Broz 	r = crypt_setkey_allcpus(cc);
1506de8be5acSMilan Broz 
1507de8be5acSMilan Broz out:
1508de8be5acSMilan Broz 	/* Hex key string not needed after here, so wipe it. */
1509de8be5acSMilan Broz 	memset(key, '0', key_string_len);
1510de8be5acSMilan Broz 
1511de8be5acSMilan Broz 	return r;
1512e48d4bbfSMilan Broz }
1513e48d4bbfSMilan Broz 
1514e48d4bbfSMilan Broz static int crypt_wipe_key(struct crypt_config *cc)
1515e48d4bbfSMilan Broz {
1516e48d4bbfSMilan Broz 	clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1517e48d4bbfSMilan Broz 	memset(&cc->key, 0, cc->key_size * sizeof(u8));
1518c0297721SAndi Kleen 
1519c0297721SAndi Kleen 	return crypt_setkey_allcpus(cc);
1520e48d4bbfSMilan Broz }
1521e48d4bbfSMilan Broz 
152228513fccSMilan Broz static void crypt_dtr(struct dm_target *ti)
152328513fccSMilan Broz {
152428513fccSMilan Broz 	struct crypt_config *cc = ti->private;
152528513fccSMilan Broz 
152628513fccSMilan Broz 	ti->private = NULL;
152728513fccSMilan Broz 
152828513fccSMilan Broz 	if (!cc)
152928513fccSMilan Broz 		return;
153028513fccSMilan Broz 
1531dc267621SMikulas Patocka 	if (cc->write_thread)
1532dc267621SMikulas Patocka 		kthread_stop(cc->write_thread);
1533dc267621SMikulas Patocka 
153428513fccSMilan Broz 	if (cc->io_queue)
153528513fccSMilan Broz 		destroy_workqueue(cc->io_queue);
153628513fccSMilan Broz 	if (cc->crypt_queue)
153728513fccSMilan Broz 		destroy_workqueue(cc->crypt_queue);
153828513fccSMilan Broz 
1539fd2d231fSMikulas Patocka 	crypt_free_tfms(cc);
1540fd2d231fSMikulas Patocka 
154128513fccSMilan Broz 	if (cc->bs)
154228513fccSMilan Broz 		bioset_free(cc->bs);
154328513fccSMilan Broz 
154428513fccSMilan Broz 	if (cc->page_pool)
154528513fccSMilan Broz 		mempool_destroy(cc->page_pool);
154628513fccSMilan Broz 	if (cc->req_pool)
154728513fccSMilan Broz 		mempool_destroy(cc->req_pool);
154828513fccSMilan Broz 
154928513fccSMilan Broz 	if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
155028513fccSMilan Broz 		cc->iv_gen_ops->dtr(cc);
155128513fccSMilan Broz 
155228513fccSMilan Broz 	if (cc->dev)
155328513fccSMilan Broz 		dm_put_device(ti, cc->dev);
155428513fccSMilan Broz 
15555ebaee6dSMilan Broz 	kzfree(cc->cipher);
15567dbcd137SMilan Broz 	kzfree(cc->cipher_string);
155728513fccSMilan Broz 
155828513fccSMilan Broz 	/* Must zero key material before freeing */
155928513fccSMilan Broz 	kzfree(cc);
156028513fccSMilan Broz }
156128513fccSMilan Broz 
15625ebaee6dSMilan Broz static int crypt_ctr_cipher(struct dm_target *ti,
15635ebaee6dSMilan Broz 			    char *cipher_in, char *key)
15641da177e4SLinus Torvalds {
15655ebaee6dSMilan Broz 	struct crypt_config *cc = ti->private;
1566d1f96423SMilan Broz 	char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount;
15675ebaee6dSMilan Broz 	char *cipher_api = NULL;
1568fd2d231fSMikulas Patocka 	int ret = -EINVAL;
156931998ef1SMikulas Patocka 	char dummy;
15701da177e4SLinus Torvalds 
15715ebaee6dSMilan Broz 	/* Convert to crypto api definition? */
15725ebaee6dSMilan Broz 	if (strchr(cipher_in, '(')) {
15735ebaee6dSMilan Broz 		ti->error = "Bad cipher specification";
15741da177e4SLinus Torvalds 		return -EINVAL;
15751da177e4SLinus Torvalds 	}
15761da177e4SLinus Torvalds 
15777dbcd137SMilan Broz 	cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL);
15787dbcd137SMilan Broz 	if (!cc->cipher_string)
15797dbcd137SMilan Broz 		goto bad_mem;
15807dbcd137SMilan Broz 
15815ebaee6dSMilan Broz 	/*
15825ebaee6dSMilan Broz 	 * Legacy dm-crypt cipher specification
1583d1f96423SMilan Broz 	 * cipher[:keycount]-mode-iv:ivopts
15845ebaee6dSMilan Broz 	 */
15855ebaee6dSMilan Broz 	tmp = cipher_in;
1586d1f96423SMilan Broz 	keycount = strsep(&tmp, "-");
1587d1f96423SMilan Broz 	cipher = strsep(&keycount, ":");
1588d1f96423SMilan Broz 
1589d1f96423SMilan Broz 	if (!keycount)
1590d1f96423SMilan Broz 		cc->tfms_count = 1;
159131998ef1SMikulas Patocka 	else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 ||
1592d1f96423SMilan Broz 		 !is_power_of_2(cc->tfms_count)) {
1593d1f96423SMilan Broz 		ti->error = "Bad cipher key count specification";
1594d1f96423SMilan Broz 		return -EINVAL;
1595d1f96423SMilan Broz 	}
1596d1f96423SMilan Broz 	cc->key_parts = cc->tfms_count;
1597da31a078SMilan Broz 	cc->key_extra_size = 0;
15985ebaee6dSMilan Broz 
15995ebaee6dSMilan Broz 	cc->cipher = kstrdup(cipher, GFP_KERNEL);
16005ebaee6dSMilan Broz 	if (!cc->cipher)
16015ebaee6dSMilan Broz 		goto bad_mem;
16025ebaee6dSMilan Broz 
16031da177e4SLinus Torvalds 	chainmode = strsep(&tmp, "-");
16041da177e4SLinus Torvalds 	ivopts = strsep(&tmp, "-");
16051da177e4SLinus Torvalds 	ivmode = strsep(&ivopts, ":");
16061da177e4SLinus Torvalds 
16071da177e4SLinus Torvalds 	if (tmp)
16085ebaee6dSMilan Broz 		DMWARN("Ignoring unexpected additional cipher options");
16091da177e4SLinus Torvalds 
16107dbcd137SMilan Broz 	/*
16117dbcd137SMilan Broz 	 * For compatibility with the original dm-crypt mapping format, if
16127dbcd137SMilan Broz 	 * only the cipher name is supplied, use cbc-plain.
16137dbcd137SMilan Broz 	 */
16145ebaee6dSMilan Broz 	if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) {
16151da177e4SLinus Torvalds 		chainmode = "cbc";
16161da177e4SLinus Torvalds 		ivmode = "plain";
16171da177e4SLinus Torvalds 	}
16181da177e4SLinus Torvalds 
1619d1806f6aSHerbert Xu 	if (strcmp(chainmode, "ecb") && !ivmode) {
16205ebaee6dSMilan Broz 		ti->error = "IV mechanism required";
16215ebaee6dSMilan Broz 		return -EINVAL;
16221da177e4SLinus Torvalds 	}
16231da177e4SLinus Torvalds 
16245ebaee6dSMilan Broz 	cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL);
16255ebaee6dSMilan Broz 	if (!cipher_api)
16265ebaee6dSMilan Broz 		goto bad_mem;
16275ebaee6dSMilan Broz 
16285ebaee6dSMilan Broz 	ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
16295ebaee6dSMilan Broz 		       "%s(%s)", chainmode, cipher);
16305ebaee6dSMilan Broz 	if (ret < 0) {
16315ebaee6dSMilan Broz 		kfree(cipher_api);
16325ebaee6dSMilan Broz 		goto bad_mem;
1633d1806f6aSHerbert Xu 	}
1634d1806f6aSHerbert Xu 
16355ebaee6dSMilan Broz 	/* Allocate cipher */
1636fd2d231fSMikulas Patocka 	ret = crypt_alloc_tfms(cc, cipher_api);
1637d1f96423SMilan Broz 	if (ret < 0) {
163872d94861SAlasdair G Kergon 		ti->error = "Error allocating crypto tfm";
163928513fccSMilan Broz 		goto bad;
16401da177e4SLinus Torvalds 	}
16411da177e4SLinus Torvalds 
16425ebaee6dSMilan Broz 	/* Initialize IV */
1643c0297721SAndi Kleen 	cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc));
16445ebaee6dSMilan Broz 	if (cc->iv_size)
16455ebaee6dSMilan Broz 		/* at least a 64 bit sector number should fit in our buffer */
16465ebaee6dSMilan Broz 		cc->iv_size = max(cc->iv_size,
16475ebaee6dSMilan Broz 				  (unsigned int)(sizeof(u64) / sizeof(u8)));
16485ebaee6dSMilan Broz 	else if (ivmode) {
16495ebaee6dSMilan Broz 		DMWARN("Selected cipher does not support IVs");
16505ebaee6dSMilan Broz 		ivmode = NULL;
16515ebaee6dSMilan Broz 	}
16525ebaee6dSMilan Broz 
16535ebaee6dSMilan Broz 	/* Choose ivmode, see comments at iv code. */
16541da177e4SLinus Torvalds 	if (ivmode == NULL)
16551da177e4SLinus Torvalds 		cc->iv_gen_ops = NULL;
16561da177e4SLinus Torvalds 	else if (strcmp(ivmode, "plain") == 0)
16571da177e4SLinus Torvalds 		cc->iv_gen_ops = &crypt_iv_plain_ops;
165861afef61SMilan Broz 	else if (strcmp(ivmode, "plain64") == 0)
165961afef61SMilan Broz 		cc->iv_gen_ops = &crypt_iv_plain64_ops;
16601da177e4SLinus Torvalds 	else if (strcmp(ivmode, "essiv") == 0)
16611da177e4SLinus Torvalds 		cc->iv_gen_ops = &crypt_iv_essiv_ops;
166248527fa7SRik Snel 	else if (strcmp(ivmode, "benbi") == 0)
166348527fa7SRik Snel 		cc->iv_gen_ops = &crypt_iv_benbi_ops;
166446b47730SLudwig Nussel 	else if (strcmp(ivmode, "null") == 0)
166546b47730SLudwig Nussel 		cc->iv_gen_ops = &crypt_iv_null_ops;
166634745785SMilan Broz 	else if (strcmp(ivmode, "lmk") == 0) {
166734745785SMilan Broz 		cc->iv_gen_ops = &crypt_iv_lmk_ops;
1668ed04d981SMilan Broz 		/*
1669ed04d981SMilan Broz 		 * Version 2 and 3 is recognised according
167034745785SMilan Broz 		 * to length of provided multi-key string.
167134745785SMilan Broz 		 * If present (version 3), last key is used as IV seed.
1672ed04d981SMilan Broz 		 * All keys (including IV seed) are always the same size.
167334745785SMilan Broz 		 */
1674da31a078SMilan Broz 		if (cc->key_size % cc->key_parts) {
167534745785SMilan Broz 			cc->key_parts++;
1676da31a078SMilan Broz 			cc->key_extra_size = cc->key_size / cc->key_parts;
1677da31a078SMilan Broz 		}
1678ed04d981SMilan Broz 	} else if (strcmp(ivmode, "tcw") == 0) {
1679ed04d981SMilan Broz 		cc->iv_gen_ops = &crypt_iv_tcw_ops;
1680ed04d981SMilan Broz 		cc->key_parts += 2; /* IV + whitening */
1681ed04d981SMilan Broz 		cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE;
168234745785SMilan Broz 	} else {
16835ebaee6dSMilan Broz 		ret = -EINVAL;
168472d94861SAlasdair G Kergon 		ti->error = "Invalid IV mode";
168528513fccSMilan Broz 		goto bad;
16861da177e4SLinus Torvalds 	}
16871da177e4SLinus Torvalds 
1688da31a078SMilan Broz 	/* Initialize and set key */
1689da31a078SMilan Broz 	ret = crypt_set_key(cc, key);
1690da31a078SMilan Broz 	if (ret < 0) {
1691da31a078SMilan Broz 		ti->error = "Error decoding and setting key";
1692da31a078SMilan Broz 		goto bad;
1693da31a078SMilan Broz 	}
1694da31a078SMilan Broz 
169528513fccSMilan Broz 	/* Allocate IV */
169628513fccSMilan Broz 	if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
169728513fccSMilan Broz 		ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
169828513fccSMilan Broz 		if (ret < 0) {
169928513fccSMilan Broz 			ti->error = "Error creating IV";
170028513fccSMilan Broz 			goto bad;
170128513fccSMilan Broz 		}
170228513fccSMilan Broz 	}
17031da177e4SLinus Torvalds 
170428513fccSMilan Broz 	/* Initialize IV (set keys for ESSIV etc) */
170528513fccSMilan Broz 	if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
170628513fccSMilan Broz 		ret = cc->iv_gen_ops->init(cc);
170728513fccSMilan Broz 		if (ret < 0) {
1708b95bf2d3SMilan Broz 			ti->error = "Error initialising IV";
170928513fccSMilan Broz 			goto bad;
171028513fccSMilan Broz 		}
1711b95bf2d3SMilan Broz 	}
1712b95bf2d3SMilan Broz 
17135ebaee6dSMilan Broz 	ret = 0;
17145ebaee6dSMilan Broz bad:
17155ebaee6dSMilan Broz 	kfree(cipher_api);
17165ebaee6dSMilan Broz 	return ret;
17175ebaee6dSMilan Broz 
17185ebaee6dSMilan Broz bad_mem:
17195ebaee6dSMilan Broz 	ti->error = "Cannot allocate cipher strings";
17205ebaee6dSMilan Broz 	return -ENOMEM;
17211da177e4SLinus Torvalds }
17225ebaee6dSMilan Broz 
17235ebaee6dSMilan Broz /*
17245ebaee6dSMilan Broz  * Construct an encryption mapping:
17255ebaee6dSMilan Broz  * <cipher> <key> <iv_offset> <dev_path> <start>
17265ebaee6dSMilan Broz  */
17275ebaee6dSMilan Broz static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
17285ebaee6dSMilan Broz {
17295ebaee6dSMilan Broz 	struct crypt_config *cc;
1730772ae5f5SMilan Broz 	unsigned int key_size, opt_params;
17315ebaee6dSMilan Broz 	unsigned long long tmpll;
17325ebaee6dSMilan Broz 	int ret;
1733d49ec52fSMikulas Patocka 	size_t iv_size_padding;
1734772ae5f5SMilan Broz 	struct dm_arg_set as;
1735772ae5f5SMilan Broz 	const char *opt_string;
173631998ef1SMikulas Patocka 	char dummy;
17375ebaee6dSMilan Broz 
1738772ae5f5SMilan Broz 	static struct dm_arg _args[] = {
17390f5d8e6eSMikulas Patocka 		{0, 3, "Invalid number of feature args"},
1740772ae5f5SMilan Broz 	};
1741772ae5f5SMilan Broz 
1742772ae5f5SMilan Broz 	if (argc < 5) {
17435ebaee6dSMilan Broz 		ti->error = "Not enough arguments";
17445ebaee6dSMilan Broz 		return -EINVAL;
17451da177e4SLinus Torvalds 	}
17461da177e4SLinus Torvalds 
17475ebaee6dSMilan Broz 	key_size = strlen(argv[1]) >> 1;
17485ebaee6dSMilan Broz 
17495ebaee6dSMilan Broz 	cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
17505ebaee6dSMilan Broz 	if (!cc) {
17515ebaee6dSMilan Broz 		ti->error = "Cannot allocate encryption context";
17525ebaee6dSMilan Broz 		return -ENOMEM;
17535ebaee6dSMilan Broz 	}
175469a8cfcdSMilan Broz 	cc->key_size = key_size;
17555ebaee6dSMilan Broz 
17565ebaee6dSMilan Broz 	ti->private = cc;
17575ebaee6dSMilan Broz 	ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
17585ebaee6dSMilan Broz 	if (ret < 0)
17595ebaee6dSMilan Broz 		goto bad;
17605ebaee6dSMilan Broz 
1761ddd42edfSMilan Broz 	cc->dmreq_start = sizeof(struct ablkcipher_request);
1762c0297721SAndi Kleen 	cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
1763d49ec52fSMikulas Patocka 	cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
1764d49ec52fSMikulas Patocka 
1765d49ec52fSMikulas Patocka 	if (crypto_ablkcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) {
1766d49ec52fSMikulas Patocka 		/* Allocate the padding exactly */
1767d49ec52fSMikulas Patocka 		iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
1768d49ec52fSMikulas Patocka 				& crypto_ablkcipher_alignmask(any_tfm(cc));
1769d49ec52fSMikulas Patocka 	} else {
1770d49ec52fSMikulas Patocka 		/*
1771d49ec52fSMikulas Patocka 		 * If the cipher requires greater alignment than kmalloc
1772d49ec52fSMikulas Patocka 		 * alignment, we don't know the exact position of the
1773d49ec52fSMikulas Patocka 		 * initialization vector. We must assume worst case.
1774d49ec52fSMikulas Patocka 		 */
1775d49ec52fSMikulas Patocka 		iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc));
1776d49ec52fSMikulas Patocka 	}
1777ddd42edfSMilan Broz 
177894f5e024SMikulas Patocka 	ret = -ENOMEM;
1779ddd42edfSMilan Broz 	cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
1780d49ec52fSMikulas Patocka 			sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size);
1781ddd42edfSMilan Broz 	if (!cc->req_pool) {
1782ddd42edfSMilan Broz 		ti->error = "Cannot allocate crypt request mempool";
178328513fccSMilan Broz 		goto bad;
1784ddd42edfSMilan Broz 	}
1785ddd42edfSMilan Broz 
1786298a9fa0SMikulas Patocka 	cc->per_bio_data_size = ti->per_bio_data_size =
1787d49ec52fSMikulas Patocka 		ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start +
1788d49ec52fSMikulas Patocka 		      sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size,
1789d49ec52fSMikulas Patocka 		      ARCH_KMALLOC_MINALIGN);
1790298a9fa0SMikulas Patocka 
1791cf2f1abfSMikulas Patocka 	cc->page_pool = mempool_create_page_pool(BIO_MAX_PAGES, 0);
17921da177e4SLinus Torvalds 	if (!cc->page_pool) {
179372d94861SAlasdair G Kergon 		ti->error = "Cannot allocate page mempool";
179428513fccSMilan Broz 		goto bad;
17951da177e4SLinus Torvalds 	}
17961da177e4SLinus Torvalds 
1797bb799ca0SJens Axboe 	cc->bs = bioset_create(MIN_IOS, 0);
17986a24c718SMilan Broz 	if (!cc->bs) {
17996a24c718SMilan Broz 		ti->error = "Cannot allocate crypt bioset";
180028513fccSMilan Broz 		goto bad;
18016a24c718SMilan Broz 	}
18026a24c718SMilan Broz 
18037145c241SMikulas Patocka 	mutex_init(&cc->bio_alloc_lock);
18047145c241SMikulas Patocka 
180528513fccSMilan Broz 	ret = -EINVAL;
180631998ef1SMikulas Patocka 	if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) {
180772d94861SAlasdair G Kergon 		ti->error = "Invalid iv_offset sector";
180828513fccSMilan Broz 		goto bad;
18091da177e4SLinus Torvalds 	}
18104ee218cdSAndrew Morton 	cc->iv_offset = tmpll;
18111da177e4SLinus Torvalds 
181228513fccSMilan Broz 	if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) {
181328513fccSMilan Broz 		ti->error = "Device lookup failed";
181428513fccSMilan Broz 		goto bad;
181528513fccSMilan Broz 	}
181628513fccSMilan Broz 
181731998ef1SMikulas Patocka 	if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
181872d94861SAlasdair G Kergon 		ti->error = "Invalid device sector";
181928513fccSMilan Broz 		goto bad;
18201da177e4SLinus Torvalds 	}
18214ee218cdSAndrew Morton 	cc->start = tmpll;
18221da177e4SLinus Torvalds 
1823772ae5f5SMilan Broz 	argv += 5;
1824772ae5f5SMilan Broz 	argc -= 5;
1825772ae5f5SMilan Broz 
1826772ae5f5SMilan Broz 	/* Optional parameters */
1827772ae5f5SMilan Broz 	if (argc) {
1828772ae5f5SMilan Broz 		as.argc = argc;
1829772ae5f5SMilan Broz 		as.argv = argv;
1830772ae5f5SMilan Broz 
1831772ae5f5SMilan Broz 		ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
1832772ae5f5SMilan Broz 		if (ret)
1833772ae5f5SMilan Broz 			goto bad;
1834772ae5f5SMilan Broz 
183544c144f9SWei Yongjun 		ret = -EINVAL;
1836f3396c58SMikulas Patocka 		while (opt_params--) {
1837772ae5f5SMilan Broz 			opt_string = dm_shift_arg(&as);
1838f3396c58SMikulas Patocka 			if (!opt_string) {
1839f3396c58SMikulas Patocka 				ti->error = "Not enough feature arguments";
1840f3396c58SMikulas Patocka 				goto bad;
1841f3396c58SMikulas Patocka 			}
1842772ae5f5SMilan Broz 
1843f3396c58SMikulas Patocka 			if (!strcasecmp(opt_string, "allow_discards"))
184455a62eefSAlasdair G Kergon 				ti->num_discard_bios = 1;
1845f3396c58SMikulas Patocka 
1846f3396c58SMikulas Patocka 			else if (!strcasecmp(opt_string, "same_cpu_crypt"))
1847f3396c58SMikulas Patocka 				set_bit(DM_CRYPT_SAME_CPU, &cc->flags);
1848f3396c58SMikulas Patocka 
18490f5d8e6eSMikulas Patocka 			else if (!strcasecmp(opt_string, "submit_from_crypt_cpus"))
18500f5d8e6eSMikulas Patocka 				set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
18510f5d8e6eSMikulas Patocka 
1852f3396c58SMikulas Patocka 			else {
1853772ae5f5SMilan Broz 				ti->error = "Invalid feature arguments";
1854772ae5f5SMilan Broz 				goto bad;
1855772ae5f5SMilan Broz 			}
1856772ae5f5SMilan Broz 		}
1857f3396c58SMikulas Patocka 	}
1858772ae5f5SMilan Broz 
185928513fccSMilan Broz 	ret = -ENOMEM;
1860670368a8STejun Heo 	cc->io_queue = alloc_workqueue("kcryptd_io", WQ_MEM_RECLAIM, 1);
1861cabf08e4SMilan Broz 	if (!cc->io_queue) {
1862cabf08e4SMilan Broz 		ti->error = "Couldn't create kcryptd io queue";
186328513fccSMilan Broz 		goto bad;
1864cabf08e4SMilan Broz 	}
1865cabf08e4SMilan Broz 
1866f3396c58SMikulas Patocka 	if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
1867f3396c58SMikulas Patocka 		cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
1868f3396c58SMikulas Patocka 	else
1869f3396c58SMikulas Patocka 		cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
1870f3396c58SMikulas Patocka 						  num_online_cpus());
1871cabf08e4SMilan Broz 	if (!cc->crypt_queue) {
18729934a8beSMilan Broz 		ti->error = "Couldn't create kcryptd queue";
187328513fccSMilan Broz 		goto bad;
18749934a8beSMilan Broz 	}
18759934a8beSMilan Broz 
1876dc267621SMikulas Patocka 	init_waitqueue_head(&cc->write_thread_wait);
1877b3c5fd30SMikulas Patocka 	cc->write_tree = RB_ROOT;
1878dc267621SMikulas Patocka 
1879dc267621SMikulas Patocka 	cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write");
1880dc267621SMikulas Patocka 	if (IS_ERR(cc->write_thread)) {
1881dc267621SMikulas Patocka 		ret = PTR_ERR(cc->write_thread);
1882dc267621SMikulas Patocka 		cc->write_thread = NULL;
1883dc267621SMikulas Patocka 		ti->error = "Couldn't spawn write thread";
1884dc267621SMikulas Patocka 		goto bad;
1885dc267621SMikulas Patocka 	}
1886dc267621SMikulas Patocka 	wake_up_process(cc->write_thread);
1887dc267621SMikulas Patocka 
188855a62eefSAlasdair G Kergon 	ti->num_flush_bios = 1;
18890ac55489SAlasdair G Kergon 	ti->discard_zeroes_data_unsupported = true;
1890983c7db3SMilan Broz 
18911da177e4SLinus Torvalds 	return 0;
18921da177e4SLinus Torvalds 
189328513fccSMilan Broz bad:
189428513fccSMilan Broz 	crypt_dtr(ti);
189528513fccSMilan Broz 	return ret;
18961da177e4SLinus Torvalds }
18971da177e4SLinus Torvalds 
18987de3ee57SMikulas Patocka static int crypt_map(struct dm_target *ti, struct bio *bio)
18991da177e4SLinus Torvalds {
1900028867acSAlasdair G Kergon 	struct dm_crypt_io *io;
190149a8a920SAlasdair G Kergon 	struct crypt_config *cc = ti->private;
1902647c7db1SMikulas Patocka 
1903772ae5f5SMilan Broz 	/*
1904772ae5f5SMilan Broz 	 * If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues.
1905772ae5f5SMilan Broz 	 * - for REQ_FLUSH device-mapper core ensures that no IO is in-flight
1906772ae5f5SMilan Broz 	 * - for REQ_DISCARD caller must use flush if IO ordering matters
1907772ae5f5SMilan Broz 	 */
1908772ae5f5SMilan Broz 	if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) {
1909647c7db1SMikulas Patocka 		bio->bi_bdev = cc->dev->bdev;
1910772ae5f5SMilan Broz 		if (bio_sectors(bio))
19114f024f37SKent Overstreet 			bio->bi_iter.bi_sector = cc->start +
19124f024f37SKent Overstreet 				dm_target_offset(ti, bio->bi_iter.bi_sector);
1913647c7db1SMikulas Patocka 		return DM_MAPIO_REMAPPED;
1914647c7db1SMikulas Patocka 	}
19151da177e4SLinus Torvalds 
1916298a9fa0SMikulas Patocka 	io = dm_per_bio_data(bio, cc->per_bio_data_size);
1917298a9fa0SMikulas Patocka 	crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
1918298a9fa0SMikulas Patocka 	io->ctx.req = (struct ablkcipher_request *)(io + 1);
1919cabf08e4SMilan Broz 
192020c82538SMilan Broz 	if (bio_data_dir(io->base_bio) == READ) {
192120c82538SMilan Broz 		if (kcryptd_io_read(io, GFP_NOWAIT))
1922dc267621SMikulas Patocka 			kcryptd_queue_read(io);
192320c82538SMilan Broz 	} else
1924cabf08e4SMilan Broz 		kcryptd_queue_crypt(io);
19251da177e4SLinus Torvalds 
1926d2a7ad29SKiyoshi Ueda 	return DM_MAPIO_SUBMITTED;
19271da177e4SLinus Torvalds }
19281da177e4SLinus Torvalds 
1929fd7c092eSMikulas Patocka static void crypt_status(struct dm_target *ti, status_type_t type,
19301f4e0ff0SAlasdair G Kergon 			 unsigned status_flags, char *result, unsigned maxlen)
19311da177e4SLinus Torvalds {
19325ebaee6dSMilan Broz 	struct crypt_config *cc = ti->private;
1933fd7c092eSMikulas Patocka 	unsigned i, sz = 0;
1934f3396c58SMikulas Patocka 	int num_feature_args = 0;
19351da177e4SLinus Torvalds 
19361da177e4SLinus Torvalds 	switch (type) {
19371da177e4SLinus Torvalds 	case STATUSTYPE_INFO:
19381da177e4SLinus Torvalds 		result[0] = '\0';
19391da177e4SLinus Torvalds 		break;
19401da177e4SLinus Torvalds 
19411da177e4SLinus Torvalds 	case STATUSTYPE_TABLE:
19427dbcd137SMilan Broz 		DMEMIT("%s ", cc->cipher_string);
19431da177e4SLinus Torvalds 
1944fd7c092eSMikulas Patocka 		if (cc->key_size > 0)
1945fd7c092eSMikulas Patocka 			for (i = 0; i < cc->key_size; i++)
1946fd7c092eSMikulas Patocka 				DMEMIT("%02x", cc->key[i]);
1947fd7c092eSMikulas Patocka 		else
1948fd7c092eSMikulas Patocka 			DMEMIT("-");
19491da177e4SLinus Torvalds 
19504ee218cdSAndrew Morton 		DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
19514ee218cdSAndrew Morton 				cc->dev->name, (unsigned long long)cc->start);
1952772ae5f5SMilan Broz 
1953f3396c58SMikulas Patocka 		num_feature_args += !!ti->num_discard_bios;
1954f3396c58SMikulas Patocka 		num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
19550f5d8e6eSMikulas Patocka 		num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
1956f3396c58SMikulas Patocka 		if (num_feature_args) {
1957f3396c58SMikulas Patocka 			DMEMIT(" %d", num_feature_args);
195855a62eefSAlasdair G Kergon 			if (ti->num_discard_bios)
1959f3396c58SMikulas Patocka 				DMEMIT(" allow_discards");
1960f3396c58SMikulas Patocka 			if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
1961f3396c58SMikulas Patocka 				DMEMIT(" same_cpu_crypt");
19620f5d8e6eSMikulas Patocka 			if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags))
19630f5d8e6eSMikulas Patocka 				DMEMIT(" submit_from_crypt_cpus");
1964f3396c58SMikulas Patocka 		}
1965772ae5f5SMilan Broz 
19661da177e4SLinus Torvalds 		break;
19671da177e4SLinus Torvalds 	}
19681da177e4SLinus Torvalds }
19691da177e4SLinus Torvalds 
1970e48d4bbfSMilan Broz static void crypt_postsuspend(struct dm_target *ti)
1971e48d4bbfSMilan Broz {
1972e48d4bbfSMilan Broz 	struct crypt_config *cc = ti->private;
1973e48d4bbfSMilan Broz 
1974e48d4bbfSMilan Broz 	set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
1975e48d4bbfSMilan Broz }
1976e48d4bbfSMilan Broz 
1977e48d4bbfSMilan Broz static int crypt_preresume(struct dm_target *ti)
1978e48d4bbfSMilan Broz {
1979e48d4bbfSMilan Broz 	struct crypt_config *cc = ti->private;
1980e48d4bbfSMilan Broz 
1981e48d4bbfSMilan Broz 	if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
1982e48d4bbfSMilan Broz 		DMERR("aborting resume - crypt key is not set.");
1983e48d4bbfSMilan Broz 		return -EAGAIN;
1984e48d4bbfSMilan Broz 	}
1985e48d4bbfSMilan Broz 
1986e48d4bbfSMilan Broz 	return 0;
1987e48d4bbfSMilan Broz }
1988e48d4bbfSMilan Broz 
1989e48d4bbfSMilan Broz static void crypt_resume(struct dm_target *ti)
1990e48d4bbfSMilan Broz {
1991e48d4bbfSMilan Broz 	struct crypt_config *cc = ti->private;
1992e48d4bbfSMilan Broz 
1993e48d4bbfSMilan Broz 	clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
1994e48d4bbfSMilan Broz }
1995e48d4bbfSMilan Broz 
1996e48d4bbfSMilan Broz /* Message interface
1997e48d4bbfSMilan Broz  *	key set <key>
1998e48d4bbfSMilan Broz  *	key wipe
1999e48d4bbfSMilan Broz  */
2000e48d4bbfSMilan Broz static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
2001e48d4bbfSMilan Broz {
2002e48d4bbfSMilan Broz 	struct crypt_config *cc = ti->private;
2003542da317SMilan Broz 	int ret = -EINVAL;
2004e48d4bbfSMilan Broz 
2005e48d4bbfSMilan Broz 	if (argc < 2)
2006e48d4bbfSMilan Broz 		goto error;
2007e48d4bbfSMilan Broz 
2008498f0103SMike Snitzer 	if (!strcasecmp(argv[0], "key")) {
2009e48d4bbfSMilan Broz 		if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
2010e48d4bbfSMilan Broz 			DMWARN("not suspended during key manipulation.");
2011e48d4bbfSMilan Broz 			return -EINVAL;
2012e48d4bbfSMilan Broz 		}
2013498f0103SMike Snitzer 		if (argc == 3 && !strcasecmp(argv[1], "set")) {
2014542da317SMilan Broz 			ret = crypt_set_key(cc, argv[2]);
2015542da317SMilan Broz 			if (ret)
2016542da317SMilan Broz 				return ret;
2017542da317SMilan Broz 			if (cc->iv_gen_ops && cc->iv_gen_ops->init)
2018542da317SMilan Broz 				ret = cc->iv_gen_ops->init(cc);
2019542da317SMilan Broz 			return ret;
2020542da317SMilan Broz 		}
2021498f0103SMike Snitzer 		if (argc == 2 && !strcasecmp(argv[1], "wipe")) {
2022542da317SMilan Broz 			if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
2023542da317SMilan Broz 				ret = cc->iv_gen_ops->wipe(cc);
2024542da317SMilan Broz 				if (ret)
2025542da317SMilan Broz 					return ret;
2026542da317SMilan Broz 			}
2027e48d4bbfSMilan Broz 			return crypt_wipe_key(cc);
2028e48d4bbfSMilan Broz 		}
2029542da317SMilan Broz 	}
2030e48d4bbfSMilan Broz 
2031e48d4bbfSMilan Broz error:
2032e48d4bbfSMilan Broz 	DMWARN("unrecognised message received.");
2033e48d4bbfSMilan Broz 	return -EINVAL;
2034e48d4bbfSMilan Broz }
2035e48d4bbfSMilan Broz 
2036d41e26b9SMilan Broz static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
2037d41e26b9SMilan Broz 		       struct bio_vec *biovec, int max_size)
2038d41e26b9SMilan Broz {
2039d41e26b9SMilan Broz 	struct crypt_config *cc = ti->private;
2040d41e26b9SMilan Broz 	struct request_queue *q = bdev_get_queue(cc->dev->bdev);
2041d41e26b9SMilan Broz 
2042d41e26b9SMilan Broz 	if (!q->merge_bvec_fn)
2043d41e26b9SMilan Broz 		return max_size;
2044d41e26b9SMilan Broz 
2045d41e26b9SMilan Broz 	bvm->bi_bdev = cc->dev->bdev;
2046b441a262SAlasdair G Kergon 	bvm->bi_sector = cc->start + dm_target_offset(ti, bvm->bi_sector);
2047d41e26b9SMilan Broz 
2048d41e26b9SMilan Broz 	return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
2049d41e26b9SMilan Broz }
2050d41e26b9SMilan Broz 
2051af4874e0SMike Snitzer static int crypt_iterate_devices(struct dm_target *ti,
2052af4874e0SMike Snitzer 				 iterate_devices_callout_fn fn, void *data)
2053af4874e0SMike Snitzer {
2054af4874e0SMike Snitzer 	struct crypt_config *cc = ti->private;
2055af4874e0SMike Snitzer 
20565dea271bSMike Snitzer 	return fn(ti, cc->dev, cc->start, ti->len, data);
2057af4874e0SMike Snitzer }
2058af4874e0SMike Snitzer 
20591da177e4SLinus Torvalds static struct target_type crypt_target = {
20601da177e4SLinus Torvalds 	.name   = "crypt",
2061f3396c58SMikulas Patocka 	.version = {1, 14, 0},
20621da177e4SLinus Torvalds 	.module = THIS_MODULE,
20631da177e4SLinus Torvalds 	.ctr    = crypt_ctr,
20641da177e4SLinus Torvalds 	.dtr    = crypt_dtr,
20651da177e4SLinus Torvalds 	.map    = crypt_map,
20661da177e4SLinus Torvalds 	.status = crypt_status,
2067e48d4bbfSMilan Broz 	.postsuspend = crypt_postsuspend,
2068e48d4bbfSMilan Broz 	.preresume = crypt_preresume,
2069e48d4bbfSMilan Broz 	.resume = crypt_resume,
2070e48d4bbfSMilan Broz 	.message = crypt_message,
2071d41e26b9SMilan Broz 	.merge  = crypt_merge,
2072af4874e0SMike Snitzer 	.iterate_devices = crypt_iterate_devices,
20731da177e4SLinus Torvalds };
20741da177e4SLinus Torvalds 
20751da177e4SLinus Torvalds static int __init dm_crypt_init(void)
20761da177e4SLinus Torvalds {
20771da177e4SLinus Torvalds 	int r;
20781da177e4SLinus Torvalds 
20791da177e4SLinus Torvalds 	r = dm_register_target(&crypt_target);
208094f5e024SMikulas Patocka 	if (r < 0)
208172d94861SAlasdair G Kergon 		DMERR("register failed %d", r);
20821da177e4SLinus Torvalds 
20831da177e4SLinus Torvalds 	return r;
20841da177e4SLinus Torvalds }
20851da177e4SLinus Torvalds 
20861da177e4SLinus Torvalds static void __exit dm_crypt_exit(void)
20871da177e4SLinus Torvalds {
208810d3bd09SMikulas Patocka 	dm_unregister_target(&crypt_target);
20891da177e4SLinus Torvalds }
20901da177e4SLinus Torvalds 
20911da177e4SLinus Torvalds module_init(dm_crypt_init);
20921da177e4SLinus Torvalds module_exit(dm_crypt_exit);
20931da177e4SLinus Torvalds 
2094bf14299fSJana Saout MODULE_AUTHOR("Jana Saout <jana@saout.de>");
20951da177e4SLinus Torvalds MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
20961da177e4SLinus Torvalds MODULE_LICENSE("GPL");
2097