11da177e4SLinus Torvalds /* 2bf14299fSJana Saout * Copyright (C) 2003 Jana Saout <jana@saout.de> 31da177e4SLinus Torvalds * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> 4ef43aa38SMilan Broz * Copyright (C) 2006-2017 Red Hat, Inc. All rights reserved. 5ef43aa38SMilan Broz * Copyright (C) 2013-2017 Milan Broz <gmazyland@gmail.com> 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * This file is released under the GPL. 81da177e4SLinus Torvalds */ 91da177e4SLinus Torvalds 1043d69034SMilan Broz #include <linux/completion.h> 11d1806f6aSHerbert Xu #include <linux/err.h> 121da177e4SLinus Torvalds #include <linux/module.h> 131da177e4SLinus Torvalds #include <linux/init.h> 141da177e4SLinus Torvalds #include <linux/kernel.h> 15c538f6ecSOndrej Kozina #include <linux/key.h> 161da177e4SLinus Torvalds #include <linux/bio.h> 171da177e4SLinus Torvalds #include <linux/blkdev.h> 181da177e4SLinus Torvalds #include <linux/mempool.h> 191da177e4SLinus Torvalds #include <linux/slab.h> 201da177e4SLinus Torvalds #include <linux/crypto.h> 211da177e4SLinus Torvalds #include <linux/workqueue.h> 22dc267621SMikulas Patocka #include <linux/kthread.h> 233fcfab16SAndrew Morton #include <linux/backing-dev.h> 2460063497SArun Sharma #include <linux/atomic.h> 25378f058cSDavid Hardeman #include <linux/scatterlist.h> 26b3c5fd30SMikulas Patocka #include <linux/rbtree.h> 27027c431cSOndrej Kozina #include <linux/ctype.h> 281da177e4SLinus Torvalds #include <asm/page.h> 2948527fa7SRik Snel #include <asm/unaligned.h> 3034745785SMilan Broz #include <crypto/hash.h> 3134745785SMilan Broz #include <crypto/md5.h> 3234745785SMilan Broz #include <crypto/algapi.h> 33bbdb23b5SHerbert Xu #include <crypto/skcipher.h> 34ef43aa38SMilan Broz #include <crypto/aead.h> 35ef43aa38SMilan Broz #include <crypto/authenc.h> 36ef43aa38SMilan Broz #include <linux/rtnetlink.h> /* for struct rtattr and RTA macros only */ 37c538f6ecSOndrej Kozina #include <keys/user-type.h> 381da177e4SLinus Torvalds 39586e80e6SMikulas Patocka #include <linux/device-mapper.h> 401da177e4SLinus Torvalds 4172d94861SAlasdair G Kergon #define DM_MSG_PREFIX "crypt" 421da177e4SLinus Torvalds 431da177e4SLinus Torvalds /* 441da177e4SLinus Torvalds * context holding the current state of a multi-part conversion 451da177e4SLinus Torvalds */ 461da177e4SLinus Torvalds struct convert_context { 4743d69034SMilan Broz struct completion restart; 481da177e4SLinus Torvalds struct bio *bio_in; 491da177e4SLinus Torvalds struct bio *bio_out; 50003b5c57SKent Overstreet struct bvec_iter iter_in; 51003b5c57SKent Overstreet struct bvec_iter iter_out; 528d683dcdSAliOS system security u64 cc_sector; 5340b6229bSMikulas Patocka atomic_t cc_pending; 54ef43aa38SMilan Broz union { 55bbdb23b5SHerbert Xu struct skcipher_request *req; 56ef43aa38SMilan Broz struct aead_request *req_aead; 57ef43aa38SMilan Broz } r; 58ef43aa38SMilan Broz 591da177e4SLinus Torvalds }; 601da177e4SLinus Torvalds 6153017030SMilan Broz /* 6253017030SMilan Broz * per bio private data 6353017030SMilan Broz */ 6453017030SMilan Broz struct dm_crypt_io { 6549a8a920SAlasdair G Kergon struct crypt_config *cc; 6653017030SMilan Broz struct bio *base_bio; 67ef43aa38SMilan Broz u8 *integrity_metadata; 68ef43aa38SMilan Broz bool integrity_metadata_from_pool; 6953017030SMilan Broz struct work_struct work; 7053017030SMilan Broz 7153017030SMilan Broz struct convert_context ctx; 7253017030SMilan Broz 7340b6229bSMikulas Patocka atomic_t io_pending; 744e4cbee9SChristoph Hellwig blk_status_t error; 750c395b0fSMilan Broz sector_t sector; 76dc267621SMikulas Patocka 77b3c5fd30SMikulas Patocka struct rb_node rb_node; 78298a9fa0SMikulas Patocka } CRYPTO_MINALIGN_ATTR; 7953017030SMilan Broz 8001482b76SMilan Broz struct dm_crypt_request { 81b2174eebSHuang Ying struct convert_context *ctx; 82ef43aa38SMilan Broz struct scatterlist sg_in[4]; 83ef43aa38SMilan Broz struct scatterlist sg_out[4]; 848d683dcdSAliOS system security u64 iv_sector; 8501482b76SMilan Broz }; 8601482b76SMilan Broz 871da177e4SLinus Torvalds struct crypt_config; 881da177e4SLinus Torvalds 891da177e4SLinus Torvalds struct crypt_iv_operations { 901da177e4SLinus Torvalds int (*ctr)(struct crypt_config *cc, struct dm_target *ti, 911da177e4SLinus Torvalds const char *opts); 921da177e4SLinus Torvalds void (*dtr)(struct crypt_config *cc); 93b95bf2d3SMilan Broz int (*init)(struct crypt_config *cc); 94542da317SMilan Broz int (*wipe)(struct crypt_config *cc); 952dc5327dSMilan Broz int (*generator)(struct crypt_config *cc, u8 *iv, 962dc5327dSMilan Broz struct dm_crypt_request *dmreq); 972dc5327dSMilan Broz int (*post)(struct crypt_config *cc, u8 *iv, 982dc5327dSMilan Broz struct dm_crypt_request *dmreq); 991da177e4SLinus Torvalds }; 1001da177e4SLinus Torvalds 10160473592SMilan Broz struct iv_essiv_private { 102c07c88f5SKees Cook struct crypto_shash *hash_tfm; 103b95bf2d3SMilan Broz u8 *salt; 10460473592SMilan Broz }; 10560473592SMilan Broz 10660473592SMilan Broz struct iv_benbi_private { 10760473592SMilan Broz int shift; 10860473592SMilan Broz }; 10960473592SMilan Broz 11034745785SMilan Broz #define LMK_SEED_SIZE 64 /* hash + 0 */ 11134745785SMilan Broz struct iv_lmk_private { 11234745785SMilan Broz struct crypto_shash *hash_tfm; 11334745785SMilan Broz u8 *seed; 11434745785SMilan Broz }; 11534745785SMilan Broz 116ed04d981SMilan Broz #define TCW_WHITENING_SIZE 16 117ed04d981SMilan Broz struct iv_tcw_private { 118ed04d981SMilan Broz struct crypto_shash *crc32_tfm; 119ed04d981SMilan Broz u8 *iv_seed; 120ed04d981SMilan Broz u8 *whitening; 121ed04d981SMilan Broz }; 122ed04d981SMilan Broz 1231da177e4SLinus Torvalds /* 1241da177e4SLinus Torvalds * Crypt: maps a linear range of a block device 1251da177e4SLinus Torvalds * and encrypts / decrypts at the same time. 1261da177e4SLinus Torvalds */ 1270f5d8e6eSMikulas Patocka enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID, 128f659b100SRabin Vincent DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD }; 129c0297721SAndi Kleen 130ef43aa38SMilan Broz enum cipher_flags { 131ef43aa38SMilan Broz CRYPT_MODE_INTEGRITY_AEAD, /* Use authenticated mode for cihper */ 1328f0009a2SMilan Broz CRYPT_IV_LARGE_SECTORS, /* Calculate IV from sector_size, not 512B sectors */ 133ef43aa38SMilan Broz }; 134ef43aa38SMilan Broz 135c0297721SAndi Kleen /* 136610f2de3SMikulas Patocka * The fields in here must be read only after initialization. 137c0297721SAndi Kleen */ 1381da177e4SLinus Torvalds struct crypt_config { 1391da177e4SLinus Torvalds struct dm_dev *dev; 1401da177e4SLinus Torvalds sector_t start; 1411da177e4SLinus Torvalds 1425059353dSMikulas Patocka struct percpu_counter n_allocated_pages; 1435059353dSMikulas Patocka 144cabf08e4SMilan Broz struct workqueue_struct *io_queue; 145cabf08e4SMilan Broz struct workqueue_struct *crypt_queue; 1463f1e9070SMilan Broz 147c7329effSMikulas Patocka spinlock_t write_thread_lock; 14872d711c8SMike Snitzer struct task_struct *write_thread; 149b3c5fd30SMikulas Patocka struct rb_root write_tree; 150dc267621SMikulas Patocka 1515ebaee6dSMilan Broz char *cipher; 1527dbcd137SMilan Broz char *cipher_string; 153ef43aa38SMilan Broz char *cipher_auth; 154c538f6ecSOndrej Kozina char *key_string; 1555ebaee6dSMilan Broz 1561b1b58f5SJulia Lawall const struct crypt_iv_operations *iv_gen_ops; 15779066ad3SHerbert Xu union { 15860473592SMilan Broz struct iv_essiv_private essiv; 15960473592SMilan Broz struct iv_benbi_private benbi; 16034745785SMilan Broz struct iv_lmk_private lmk; 161ed04d981SMilan Broz struct iv_tcw_private tcw; 16279066ad3SHerbert Xu } iv_gen_private; 1638d683dcdSAliOS system security u64 iv_offset; 1641da177e4SLinus Torvalds unsigned int iv_size; 165ff3af92bSMikulas Patocka unsigned short int sector_size; 166ff3af92bSMikulas Patocka unsigned char sector_shift; 1671da177e4SLinus Torvalds 168fd2d231fSMikulas Patocka /* ESSIV: struct crypto_cipher *essiv_tfm */ 169fd2d231fSMikulas Patocka void *iv_private; 170ef43aa38SMilan Broz union { 171bbdb23b5SHerbert Xu struct crypto_skcipher **tfms; 172ef43aa38SMilan Broz struct crypto_aead **tfms_aead; 173ef43aa38SMilan Broz } cipher_tfm; 174d1f96423SMilan Broz unsigned tfms_count; 175ef43aa38SMilan Broz unsigned long cipher_flags; 176c0297721SAndi Kleen 177c0297721SAndi Kleen /* 178ddd42edfSMilan Broz * Layout of each crypto request: 179ddd42edfSMilan Broz * 180bbdb23b5SHerbert Xu * struct skcipher_request 181ddd42edfSMilan Broz * context 182ddd42edfSMilan Broz * padding 183ddd42edfSMilan Broz * struct dm_crypt_request 184ddd42edfSMilan Broz * padding 185ddd42edfSMilan Broz * IV 186ddd42edfSMilan Broz * 187ddd42edfSMilan Broz * The padding is added so that dm_crypt_request and the IV are 188ddd42edfSMilan Broz * correctly aligned. 189ddd42edfSMilan Broz */ 190ddd42edfSMilan Broz unsigned int dmreq_start; 191ddd42edfSMilan Broz 192298a9fa0SMikulas Patocka unsigned int per_bio_data_size; 193298a9fa0SMikulas Patocka 194e48d4bbfSMilan Broz unsigned long flags; 1951da177e4SLinus Torvalds unsigned int key_size; 196da31a078SMilan Broz unsigned int key_parts; /* independent parts in key buffer */ 197da31a078SMilan Broz unsigned int key_extra_size; /* additional keys length */ 198ef43aa38SMilan Broz unsigned int key_mac_size; /* MAC key size for authenc(...) */ 199ef43aa38SMilan Broz 200ef43aa38SMilan Broz unsigned int integrity_tag_size; 201ef43aa38SMilan Broz unsigned int integrity_iv_size; 202ef43aa38SMilan Broz unsigned int on_disk_tag_size; 203ef43aa38SMilan Broz 20472d711c8SMike Snitzer /* 20572d711c8SMike Snitzer * pool for per bio private data, crypto requests, 20672d711c8SMike Snitzer * encryption requeusts/buffer pages and integrity tags 20772d711c8SMike Snitzer */ 20872d711c8SMike Snitzer unsigned tag_pool_max_sectors; 20972d711c8SMike Snitzer mempool_t tag_pool; 21072d711c8SMike Snitzer mempool_t req_pool; 21172d711c8SMike Snitzer mempool_t page_pool; 21272d711c8SMike Snitzer 21372d711c8SMike Snitzer struct bio_set bs; 21472d711c8SMike Snitzer struct mutex bio_alloc_lock; 21572d711c8SMike Snitzer 216ef43aa38SMilan Broz u8 *authenc_key; /* space for keys in authenc() format (if used) */ 2171da177e4SLinus Torvalds u8 key[0]; 2181da177e4SLinus Torvalds }; 2191da177e4SLinus Torvalds 2200a83df6cSMikulas Patocka #define MIN_IOS 64 221ef43aa38SMilan Broz #define MAX_TAG_SIZE 480 222ef43aa38SMilan Broz #define POOL_ENTRY_SIZE 512 2231da177e4SLinus Torvalds 2245059353dSMikulas Patocka static DEFINE_SPINLOCK(dm_crypt_clients_lock); 2255059353dSMikulas Patocka static unsigned dm_crypt_clients_n = 0; 2265059353dSMikulas Patocka static volatile unsigned long dm_crypt_pages_per_client; 2275059353dSMikulas Patocka #define DM_CRYPT_MEMORY_PERCENT 2 2285059353dSMikulas Patocka #define DM_CRYPT_MIN_PAGES_PER_CLIENT (BIO_MAX_PAGES * 16) 2295059353dSMikulas Patocka 230028867acSAlasdair G Kergon static void clone_init(struct dm_crypt_io *, struct bio *); 231395b167cSAlasdair G Kergon static void kcryptd_queue_crypt(struct dm_crypt_io *io); 232ef43aa38SMilan Broz static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc, 233ef43aa38SMilan Broz struct scatterlist *sg); 234027581f3SOlaf Kirch 235c0297721SAndi Kleen /* 23686f917adSEric Biggers * Use this to access cipher attributes that are independent of the key. 237c0297721SAndi Kleen */ 238bbdb23b5SHerbert Xu static struct crypto_skcipher *any_tfm(struct crypt_config *cc) 239c0297721SAndi Kleen { 240ef43aa38SMilan Broz return cc->cipher_tfm.tfms[0]; 241ef43aa38SMilan Broz } 242ef43aa38SMilan Broz 243ef43aa38SMilan Broz static struct crypto_aead *any_tfm_aead(struct crypt_config *cc) 244ef43aa38SMilan Broz { 245ef43aa38SMilan Broz return cc->cipher_tfm.tfms_aead[0]; 246c0297721SAndi Kleen } 247c0297721SAndi Kleen 2481da177e4SLinus Torvalds /* 2491da177e4SLinus Torvalds * Different IV generation algorithms: 2501da177e4SLinus Torvalds * 2513c164bd8SRik Snel * plain: the initial vector is the 32-bit little-endian version of the sector 2523a4fa0a2SRobert P. J. Day * number, padded with zeros if necessary. 2531da177e4SLinus Torvalds * 25461afef61SMilan Broz * plain64: the initial vector is the 64-bit little-endian version of the sector 25561afef61SMilan Broz * number, padded with zeros if necessary. 25661afef61SMilan Broz * 2577e3fd855SMilan Broz * plain64be: the initial vector is the 64-bit big-endian version of the sector 2587e3fd855SMilan Broz * number, padded with zeros if necessary. 2597e3fd855SMilan Broz * 2603c164bd8SRik Snel * essiv: "encrypted sector|salt initial vector", the sector number is 2611da177e4SLinus Torvalds * encrypted with the bulk cipher using a salt as key. The salt 2621da177e4SLinus Torvalds * should be derived from the bulk cipher's key via hashing. 2631da177e4SLinus Torvalds * 26448527fa7SRik Snel * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1 26548527fa7SRik Snel * (needed for LRW-32-AES and possible other narrow block modes) 26648527fa7SRik Snel * 26746b47730SLudwig Nussel * null: the initial vector is always zero. Provides compatibility with 26846b47730SLudwig Nussel * obsolete loop_fish2 devices. Do not use for new devices. 26946b47730SLudwig Nussel * 27034745785SMilan Broz * lmk: Compatible implementation of the block chaining mode used 27134745785SMilan Broz * by the Loop-AES block device encryption system 27234745785SMilan Broz * designed by Jari Ruusu. See http://loop-aes.sourceforge.net/ 27334745785SMilan Broz * It operates on full 512 byte sectors and uses CBC 27434745785SMilan Broz * with an IV derived from the sector number, the data and 27534745785SMilan Broz * optionally extra IV seed. 27634745785SMilan Broz * This means that after decryption the first block 27734745785SMilan Broz * of sector must be tweaked according to decrypted data. 27834745785SMilan Broz * Loop-AES can use three encryption schemes: 27934745785SMilan Broz * version 1: is plain aes-cbc mode 28034745785SMilan Broz * version 2: uses 64 multikey scheme with lmk IV generator 28134745785SMilan Broz * version 3: the same as version 2 with additional IV seed 28234745785SMilan Broz * (it uses 65 keys, last key is used as IV seed) 28334745785SMilan Broz * 284ed04d981SMilan Broz * tcw: Compatible implementation of the block chaining mode used 285ed04d981SMilan Broz * by the TrueCrypt device encryption system (prior to version 4.1). 286e44f23b3SMilan Broz * For more info see: https://gitlab.com/cryptsetup/cryptsetup/wikis/TrueCryptOnDiskFormat 287ed04d981SMilan Broz * It operates on full 512 byte sectors and uses CBC 288ed04d981SMilan Broz * with an IV derived from initial key and the sector number. 289ed04d981SMilan Broz * In addition, whitening value is applied on every sector, whitening 290ed04d981SMilan Broz * is calculated from initial key, sector number and mixed using CRC32. 291ed04d981SMilan Broz * Note that this encryption scheme is vulnerable to watermarking attacks 292ed04d981SMilan Broz * and should be used for old compatible containers access only. 293ed04d981SMilan Broz * 2941da177e4SLinus Torvalds * plumb: unimplemented, see: 2951da177e4SLinus Torvalds * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 2961da177e4SLinus Torvalds */ 2971da177e4SLinus Torvalds 2982dc5327dSMilan Broz static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, 2992dc5327dSMilan Broz struct dm_crypt_request *dmreq) 3001da177e4SLinus Torvalds { 3011da177e4SLinus Torvalds memset(iv, 0, cc->iv_size); 302283a8328SAlasdair G Kergon *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff); 3031da177e4SLinus Torvalds 3041da177e4SLinus Torvalds return 0; 3051da177e4SLinus Torvalds } 3061da177e4SLinus Torvalds 30761afef61SMilan Broz static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv, 3082dc5327dSMilan Broz struct dm_crypt_request *dmreq) 30961afef61SMilan Broz { 31061afef61SMilan Broz memset(iv, 0, cc->iv_size); 311283a8328SAlasdair G Kergon *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector); 31261afef61SMilan Broz 31361afef61SMilan Broz return 0; 31461afef61SMilan Broz } 31561afef61SMilan Broz 3167e3fd855SMilan Broz static int crypt_iv_plain64be_gen(struct crypt_config *cc, u8 *iv, 3177e3fd855SMilan Broz struct dm_crypt_request *dmreq) 3187e3fd855SMilan Broz { 3197e3fd855SMilan Broz memset(iv, 0, cc->iv_size); 3207e3fd855SMilan Broz /* iv_size is at least of size u64; usually it is 16 bytes */ 3217e3fd855SMilan Broz *(__be64 *)&iv[cc->iv_size - sizeof(u64)] = cpu_to_be64(dmreq->iv_sector); 3227e3fd855SMilan Broz 3237e3fd855SMilan Broz return 0; 3247e3fd855SMilan Broz } 3257e3fd855SMilan Broz 326b95bf2d3SMilan Broz /* Initialise ESSIV - compute salt but no local memory allocations */ 327b95bf2d3SMilan Broz static int crypt_iv_essiv_init(struct crypt_config *cc) 328b95bf2d3SMilan Broz { 329b95bf2d3SMilan Broz struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; 330c07c88f5SKees Cook SHASH_DESC_ON_STACK(desc, essiv->hash_tfm); 331c0297721SAndi Kleen struct crypto_cipher *essiv_tfm; 332fd2d231fSMikulas Patocka int err; 333b95bf2d3SMilan Broz 334c07c88f5SKees Cook desc->tfm = essiv->hash_tfm; 335b95bf2d3SMilan Broz 336c07c88f5SKees Cook err = crypto_shash_digest(desc, cc->key, cc->key_size, essiv->salt); 337c07c88f5SKees Cook shash_desc_zero(desc); 338b95bf2d3SMilan Broz if (err) 339b95bf2d3SMilan Broz return err; 340b95bf2d3SMilan Broz 341fd2d231fSMikulas Patocka essiv_tfm = cc->iv_private; 342c0297721SAndi Kleen 343c0297721SAndi Kleen err = crypto_cipher_setkey(essiv_tfm, essiv->salt, 344c07c88f5SKees Cook crypto_shash_digestsize(essiv->hash_tfm)); 345c0297721SAndi Kleen if (err) 346c0297721SAndi Kleen return err; 347c0297721SAndi Kleen 348c0297721SAndi Kleen return 0; 349b95bf2d3SMilan Broz } 350b95bf2d3SMilan Broz 351542da317SMilan Broz /* Wipe salt and reset key derived from volume key */ 352542da317SMilan Broz static int crypt_iv_essiv_wipe(struct crypt_config *cc) 353542da317SMilan Broz { 354542da317SMilan Broz struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; 355c07c88f5SKees Cook unsigned salt_size = crypto_shash_digestsize(essiv->hash_tfm); 356c0297721SAndi Kleen struct crypto_cipher *essiv_tfm; 357fd2d231fSMikulas Patocka int r, err = 0; 358542da317SMilan Broz 359542da317SMilan Broz memset(essiv->salt, 0, salt_size); 360542da317SMilan Broz 361fd2d231fSMikulas Patocka essiv_tfm = cc->iv_private; 362c0297721SAndi Kleen r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size); 363c0297721SAndi Kleen if (r) 364c0297721SAndi Kleen err = r; 365c0297721SAndi Kleen 366c0297721SAndi Kleen return err; 367c0297721SAndi Kleen } 368c0297721SAndi Kleen 36986f917adSEric Biggers /* Allocate the cipher for ESSIV */ 37086f917adSEric Biggers static struct crypto_cipher *alloc_essiv_cipher(struct crypt_config *cc, 371c0297721SAndi Kleen struct dm_target *ti, 37286f917adSEric Biggers const u8 *salt, 37386f917adSEric Biggers unsigned int saltsize) 374c0297721SAndi Kleen { 375c0297721SAndi Kleen struct crypto_cipher *essiv_tfm; 376c0297721SAndi Kleen int err; 377c0297721SAndi Kleen 378c0297721SAndi Kleen /* Setup the essiv_tfm with the given salt */ 3791ad0f160SEric Biggers essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, 0); 380c0297721SAndi Kleen if (IS_ERR(essiv_tfm)) { 381c0297721SAndi Kleen ti->error = "Error allocating crypto tfm for ESSIV"; 382c0297721SAndi Kleen return essiv_tfm; 383c0297721SAndi Kleen } 384c0297721SAndi Kleen 385ef43aa38SMilan Broz if (crypto_cipher_blocksize(essiv_tfm) != cc->iv_size) { 386c0297721SAndi Kleen ti->error = "Block size of ESSIV cipher does " 387c0297721SAndi Kleen "not match IV size of block cipher"; 388c0297721SAndi Kleen crypto_free_cipher(essiv_tfm); 389c0297721SAndi Kleen return ERR_PTR(-EINVAL); 390c0297721SAndi Kleen } 391c0297721SAndi Kleen 392c0297721SAndi Kleen err = crypto_cipher_setkey(essiv_tfm, salt, saltsize); 393c0297721SAndi Kleen if (err) { 394c0297721SAndi Kleen ti->error = "Failed to set key for ESSIV cipher"; 395c0297721SAndi Kleen crypto_free_cipher(essiv_tfm); 396c0297721SAndi Kleen return ERR_PTR(err); 397c0297721SAndi Kleen } 398c0297721SAndi Kleen 399c0297721SAndi Kleen return essiv_tfm; 400542da317SMilan Broz } 401542da317SMilan Broz 40260473592SMilan Broz static void crypt_iv_essiv_dtr(struct crypt_config *cc) 40360473592SMilan Broz { 404c0297721SAndi Kleen struct crypto_cipher *essiv_tfm; 40560473592SMilan Broz struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; 40660473592SMilan Broz 407c07c88f5SKees Cook crypto_free_shash(essiv->hash_tfm); 408b95bf2d3SMilan Broz essiv->hash_tfm = NULL; 409b95bf2d3SMilan Broz 410b95bf2d3SMilan Broz kzfree(essiv->salt); 411b95bf2d3SMilan Broz essiv->salt = NULL; 412c0297721SAndi Kleen 413fd2d231fSMikulas Patocka essiv_tfm = cc->iv_private; 414c0297721SAndi Kleen 415c0297721SAndi Kleen if (essiv_tfm) 416c0297721SAndi Kleen crypto_free_cipher(essiv_tfm); 417c0297721SAndi Kleen 418fd2d231fSMikulas Patocka cc->iv_private = NULL; 41960473592SMilan Broz } 42060473592SMilan Broz 4211da177e4SLinus Torvalds static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, 4221da177e4SLinus Torvalds const char *opts) 4231da177e4SLinus Torvalds { 4245861f1beSMilan Broz struct crypto_cipher *essiv_tfm = NULL; 425c07c88f5SKees Cook struct crypto_shash *hash_tfm = NULL; 4265861f1beSMilan Broz u8 *salt = NULL; 427fd2d231fSMikulas Patocka int err; 4281da177e4SLinus Torvalds 4295861f1beSMilan Broz if (!opts) { 43072d94861SAlasdair G Kergon ti->error = "Digest algorithm missing for ESSIV mode"; 4311da177e4SLinus Torvalds return -EINVAL; 4321da177e4SLinus Torvalds } 4331da177e4SLinus Torvalds 434b95bf2d3SMilan Broz /* Allocate hash algorithm */ 435c07c88f5SKees Cook hash_tfm = crypto_alloc_shash(opts, 0, 0); 43635058687SHerbert Xu if (IS_ERR(hash_tfm)) { 43772d94861SAlasdair G Kergon ti->error = "Error initializing ESSIV hash"; 4385861f1beSMilan Broz err = PTR_ERR(hash_tfm); 4395861f1beSMilan Broz goto bad; 4401da177e4SLinus Torvalds } 4411da177e4SLinus Torvalds 442c07c88f5SKees Cook salt = kzalloc(crypto_shash_digestsize(hash_tfm), GFP_KERNEL); 4435861f1beSMilan Broz if (!salt) { 44472d94861SAlasdair G Kergon ti->error = "Error kmallocing salt storage in ESSIV"; 4455861f1beSMilan Broz err = -ENOMEM; 4465861f1beSMilan Broz goto bad; 4471da177e4SLinus Torvalds } 4481da177e4SLinus Torvalds 449b95bf2d3SMilan Broz cc->iv_gen_private.essiv.salt = salt; 450b95bf2d3SMilan Broz cc->iv_gen_private.essiv.hash_tfm = hash_tfm; 451b95bf2d3SMilan Broz 45286f917adSEric Biggers essiv_tfm = alloc_essiv_cipher(cc, ti, salt, 453c07c88f5SKees Cook crypto_shash_digestsize(hash_tfm)); 454c0297721SAndi Kleen if (IS_ERR(essiv_tfm)) { 455c0297721SAndi Kleen crypt_iv_essiv_dtr(cc); 456c0297721SAndi Kleen return PTR_ERR(essiv_tfm); 457c0297721SAndi Kleen } 458fd2d231fSMikulas Patocka cc->iv_private = essiv_tfm; 459c0297721SAndi Kleen 4601da177e4SLinus Torvalds return 0; 4615861f1beSMilan Broz 4625861f1beSMilan Broz bad: 4635861f1beSMilan Broz if (hash_tfm && !IS_ERR(hash_tfm)) 464c07c88f5SKees Cook crypto_free_shash(hash_tfm); 465b95bf2d3SMilan Broz kfree(salt); 4665861f1beSMilan Broz return err; 4671da177e4SLinus Torvalds } 4681da177e4SLinus Torvalds 4692dc5327dSMilan Broz static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, 4702dc5327dSMilan Broz struct dm_crypt_request *dmreq) 4711da177e4SLinus Torvalds { 472fd2d231fSMikulas Patocka struct crypto_cipher *essiv_tfm = cc->iv_private; 473c0297721SAndi Kleen 4741da177e4SLinus Torvalds memset(iv, 0, cc->iv_size); 475283a8328SAlasdair G Kergon *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector); 476c0297721SAndi Kleen crypto_cipher_encrypt_one(essiv_tfm, iv, iv); 477c0297721SAndi Kleen 4781da177e4SLinus Torvalds return 0; 4791da177e4SLinus Torvalds } 4801da177e4SLinus Torvalds 48148527fa7SRik Snel static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, 48248527fa7SRik Snel const char *opts) 48348527fa7SRik Snel { 484bbdb23b5SHerbert Xu unsigned bs = crypto_skcipher_blocksize(any_tfm(cc)); 485f0d1b0b3SDavid Howells int log = ilog2(bs); 48648527fa7SRik Snel 48748527fa7SRik Snel /* we need to calculate how far we must shift the sector count 48848527fa7SRik Snel * to get the cipher block count, we use this shift in _gen */ 48948527fa7SRik Snel 49048527fa7SRik Snel if (1 << log != bs) { 49148527fa7SRik Snel ti->error = "cypher blocksize is not a power of 2"; 49248527fa7SRik Snel return -EINVAL; 49348527fa7SRik Snel } 49448527fa7SRik Snel 49548527fa7SRik Snel if (log > 9) { 49648527fa7SRik Snel ti->error = "cypher blocksize is > 512"; 49748527fa7SRik Snel return -EINVAL; 49848527fa7SRik Snel } 49948527fa7SRik Snel 50060473592SMilan Broz cc->iv_gen_private.benbi.shift = 9 - log; 50148527fa7SRik Snel 50248527fa7SRik Snel return 0; 50348527fa7SRik Snel } 50448527fa7SRik Snel 50548527fa7SRik Snel static void crypt_iv_benbi_dtr(struct crypt_config *cc) 50648527fa7SRik Snel { 50748527fa7SRik Snel } 50848527fa7SRik Snel 5092dc5327dSMilan Broz static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, 5102dc5327dSMilan Broz struct dm_crypt_request *dmreq) 51148527fa7SRik Snel { 51279066ad3SHerbert Xu __be64 val; 51379066ad3SHerbert Xu 51448527fa7SRik Snel memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */ 51579066ad3SHerbert Xu 5162dc5327dSMilan Broz val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1); 51779066ad3SHerbert Xu put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64))); 51848527fa7SRik Snel 5191da177e4SLinus Torvalds return 0; 5201da177e4SLinus Torvalds } 5211da177e4SLinus Torvalds 5222dc5327dSMilan Broz static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, 5232dc5327dSMilan Broz struct dm_crypt_request *dmreq) 52446b47730SLudwig Nussel { 52546b47730SLudwig Nussel memset(iv, 0, cc->iv_size); 52646b47730SLudwig Nussel 52746b47730SLudwig Nussel return 0; 52846b47730SLudwig Nussel } 52946b47730SLudwig Nussel 53034745785SMilan Broz static void crypt_iv_lmk_dtr(struct crypt_config *cc) 53134745785SMilan Broz { 53234745785SMilan Broz struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; 53334745785SMilan Broz 53434745785SMilan Broz if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm)) 53534745785SMilan Broz crypto_free_shash(lmk->hash_tfm); 53634745785SMilan Broz lmk->hash_tfm = NULL; 53734745785SMilan Broz 53834745785SMilan Broz kzfree(lmk->seed); 53934745785SMilan Broz lmk->seed = NULL; 54034745785SMilan Broz } 54134745785SMilan Broz 54234745785SMilan Broz static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti, 54334745785SMilan Broz const char *opts) 54434745785SMilan Broz { 54534745785SMilan Broz struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; 54634745785SMilan Broz 5478f0009a2SMilan Broz if (cc->sector_size != (1 << SECTOR_SHIFT)) { 5488f0009a2SMilan Broz ti->error = "Unsupported sector size for LMK"; 5498f0009a2SMilan Broz return -EINVAL; 5508f0009a2SMilan Broz } 5518f0009a2SMilan Broz 55234745785SMilan Broz lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0); 55334745785SMilan Broz if (IS_ERR(lmk->hash_tfm)) { 55434745785SMilan Broz ti->error = "Error initializing LMK hash"; 55534745785SMilan Broz return PTR_ERR(lmk->hash_tfm); 55634745785SMilan Broz } 55734745785SMilan Broz 55834745785SMilan Broz /* No seed in LMK version 2 */ 55934745785SMilan Broz if (cc->key_parts == cc->tfms_count) { 56034745785SMilan Broz lmk->seed = NULL; 56134745785SMilan Broz return 0; 56234745785SMilan Broz } 56334745785SMilan Broz 56434745785SMilan Broz lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL); 56534745785SMilan Broz if (!lmk->seed) { 56634745785SMilan Broz crypt_iv_lmk_dtr(cc); 56734745785SMilan Broz ti->error = "Error kmallocing seed storage in LMK"; 56834745785SMilan Broz return -ENOMEM; 56934745785SMilan Broz } 57034745785SMilan Broz 57134745785SMilan Broz return 0; 57234745785SMilan Broz } 57334745785SMilan Broz 57434745785SMilan Broz static int crypt_iv_lmk_init(struct crypt_config *cc) 57534745785SMilan Broz { 57634745785SMilan Broz struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; 57734745785SMilan Broz int subkey_size = cc->key_size / cc->key_parts; 57834745785SMilan Broz 57934745785SMilan Broz /* LMK seed is on the position of LMK_KEYS + 1 key */ 58034745785SMilan Broz if (lmk->seed) 58134745785SMilan Broz memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size), 58234745785SMilan Broz crypto_shash_digestsize(lmk->hash_tfm)); 58334745785SMilan Broz 58434745785SMilan Broz return 0; 58534745785SMilan Broz } 58634745785SMilan Broz 58734745785SMilan Broz static int crypt_iv_lmk_wipe(struct crypt_config *cc) 58834745785SMilan Broz { 58934745785SMilan Broz struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; 59034745785SMilan Broz 59134745785SMilan Broz if (lmk->seed) 59234745785SMilan Broz memset(lmk->seed, 0, LMK_SEED_SIZE); 59334745785SMilan Broz 59434745785SMilan Broz return 0; 59534745785SMilan Broz } 59634745785SMilan Broz 59734745785SMilan Broz static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv, 59834745785SMilan Broz struct dm_crypt_request *dmreq, 59934745785SMilan Broz u8 *data) 60034745785SMilan Broz { 60134745785SMilan Broz struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; 602b6106265SJan-Simon Möller SHASH_DESC_ON_STACK(desc, lmk->hash_tfm); 60334745785SMilan Broz struct md5_state md5state; 604da31a078SMilan Broz __le32 buf[4]; 60534745785SMilan Broz int i, r; 60634745785SMilan Broz 607b6106265SJan-Simon Möller desc->tfm = lmk->hash_tfm; 60834745785SMilan Broz 609b6106265SJan-Simon Möller r = crypto_shash_init(desc); 61034745785SMilan Broz if (r) 61134745785SMilan Broz return r; 61234745785SMilan Broz 61334745785SMilan Broz if (lmk->seed) { 614b6106265SJan-Simon Möller r = crypto_shash_update(desc, lmk->seed, LMK_SEED_SIZE); 61534745785SMilan Broz if (r) 61634745785SMilan Broz return r; 61734745785SMilan Broz } 61834745785SMilan Broz 61934745785SMilan Broz /* Sector is always 512B, block size 16, add data of blocks 1-31 */ 620b6106265SJan-Simon Möller r = crypto_shash_update(desc, data + 16, 16 * 31); 62134745785SMilan Broz if (r) 62234745785SMilan Broz return r; 62334745785SMilan Broz 62434745785SMilan Broz /* Sector is cropped to 56 bits here */ 62534745785SMilan Broz buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF); 62634745785SMilan Broz buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000); 62734745785SMilan Broz buf[2] = cpu_to_le32(4024); 62834745785SMilan Broz buf[3] = 0; 629b6106265SJan-Simon Möller r = crypto_shash_update(desc, (u8 *)buf, sizeof(buf)); 63034745785SMilan Broz if (r) 63134745785SMilan Broz return r; 63234745785SMilan Broz 63334745785SMilan Broz /* No MD5 padding here */ 634b6106265SJan-Simon Möller r = crypto_shash_export(desc, &md5state); 63534745785SMilan Broz if (r) 63634745785SMilan Broz return r; 63734745785SMilan Broz 63834745785SMilan Broz for (i = 0; i < MD5_HASH_WORDS; i++) 63934745785SMilan Broz __cpu_to_le32s(&md5state.hash[i]); 64034745785SMilan Broz memcpy(iv, &md5state.hash, cc->iv_size); 64134745785SMilan Broz 64234745785SMilan Broz return 0; 64334745785SMilan Broz } 64434745785SMilan Broz 64534745785SMilan Broz static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv, 64634745785SMilan Broz struct dm_crypt_request *dmreq) 64734745785SMilan Broz { 648ef43aa38SMilan Broz struct scatterlist *sg; 64934745785SMilan Broz u8 *src; 65034745785SMilan Broz int r = 0; 65134745785SMilan Broz 65234745785SMilan Broz if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { 653ef43aa38SMilan Broz sg = crypt_get_sg_data(cc, dmreq->sg_in); 654ef43aa38SMilan Broz src = kmap_atomic(sg_page(sg)); 655ef43aa38SMilan Broz r = crypt_iv_lmk_one(cc, iv, dmreq, src + sg->offset); 656c2e022cbSCong Wang kunmap_atomic(src); 65734745785SMilan Broz } else 65834745785SMilan Broz memset(iv, 0, cc->iv_size); 65934745785SMilan Broz 66034745785SMilan Broz return r; 66134745785SMilan Broz } 66234745785SMilan Broz 66334745785SMilan Broz static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv, 66434745785SMilan Broz struct dm_crypt_request *dmreq) 66534745785SMilan Broz { 666ef43aa38SMilan Broz struct scatterlist *sg; 66734745785SMilan Broz u8 *dst; 66834745785SMilan Broz int r; 66934745785SMilan Broz 67034745785SMilan Broz if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) 67134745785SMilan Broz return 0; 67234745785SMilan Broz 673ef43aa38SMilan Broz sg = crypt_get_sg_data(cc, dmreq->sg_out); 674ef43aa38SMilan Broz dst = kmap_atomic(sg_page(sg)); 675ef43aa38SMilan Broz r = crypt_iv_lmk_one(cc, iv, dmreq, dst + sg->offset); 67634745785SMilan Broz 67734745785SMilan Broz /* Tweak the first block of plaintext sector */ 67834745785SMilan Broz if (!r) 679ef43aa38SMilan Broz crypto_xor(dst + sg->offset, iv, cc->iv_size); 68034745785SMilan Broz 681c2e022cbSCong Wang kunmap_atomic(dst); 68234745785SMilan Broz return r; 68334745785SMilan Broz } 68434745785SMilan Broz 685ed04d981SMilan Broz static void crypt_iv_tcw_dtr(struct crypt_config *cc) 686ed04d981SMilan Broz { 687ed04d981SMilan Broz struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; 688ed04d981SMilan Broz 689ed04d981SMilan Broz kzfree(tcw->iv_seed); 690ed04d981SMilan Broz tcw->iv_seed = NULL; 691ed04d981SMilan Broz kzfree(tcw->whitening); 692ed04d981SMilan Broz tcw->whitening = NULL; 693ed04d981SMilan Broz 694ed04d981SMilan Broz if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm)) 695ed04d981SMilan Broz crypto_free_shash(tcw->crc32_tfm); 696ed04d981SMilan Broz tcw->crc32_tfm = NULL; 697ed04d981SMilan Broz } 698ed04d981SMilan Broz 699ed04d981SMilan Broz static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti, 700ed04d981SMilan Broz const char *opts) 701ed04d981SMilan Broz { 702ed04d981SMilan Broz struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; 703ed04d981SMilan Broz 7048f0009a2SMilan Broz if (cc->sector_size != (1 << SECTOR_SHIFT)) { 7058f0009a2SMilan Broz ti->error = "Unsupported sector size for TCW"; 7068f0009a2SMilan Broz return -EINVAL; 7078f0009a2SMilan Broz } 7088f0009a2SMilan Broz 709ed04d981SMilan Broz if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) { 710ed04d981SMilan Broz ti->error = "Wrong key size for TCW"; 711ed04d981SMilan Broz return -EINVAL; 712ed04d981SMilan Broz } 713ed04d981SMilan Broz 714ed04d981SMilan Broz tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0); 715ed04d981SMilan Broz if (IS_ERR(tcw->crc32_tfm)) { 716ed04d981SMilan Broz ti->error = "Error initializing CRC32 in TCW"; 717ed04d981SMilan Broz return PTR_ERR(tcw->crc32_tfm); 718ed04d981SMilan Broz } 719ed04d981SMilan Broz 720ed04d981SMilan Broz tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL); 721ed04d981SMilan Broz tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL); 722ed04d981SMilan Broz if (!tcw->iv_seed || !tcw->whitening) { 723ed04d981SMilan Broz crypt_iv_tcw_dtr(cc); 724ed04d981SMilan Broz ti->error = "Error allocating seed storage in TCW"; 725ed04d981SMilan Broz return -ENOMEM; 726ed04d981SMilan Broz } 727ed04d981SMilan Broz 728ed04d981SMilan Broz return 0; 729ed04d981SMilan Broz } 730ed04d981SMilan Broz 731ed04d981SMilan Broz static int crypt_iv_tcw_init(struct crypt_config *cc) 732ed04d981SMilan Broz { 733ed04d981SMilan Broz struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; 734ed04d981SMilan Broz int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE; 735ed04d981SMilan Broz 736ed04d981SMilan Broz memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size); 737ed04d981SMilan Broz memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size], 738ed04d981SMilan Broz TCW_WHITENING_SIZE); 739ed04d981SMilan Broz 740ed04d981SMilan Broz return 0; 741ed04d981SMilan Broz } 742ed04d981SMilan Broz 743ed04d981SMilan Broz static int crypt_iv_tcw_wipe(struct crypt_config *cc) 744ed04d981SMilan Broz { 745ed04d981SMilan Broz struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; 746ed04d981SMilan Broz 747ed04d981SMilan Broz memset(tcw->iv_seed, 0, cc->iv_size); 748ed04d981SMilan Broz memset(tcw->whitening, 0, TCW_WHITENING_SIZE); 749ed04d981SMilan Broz 750ed04d981SMilan Broz return 0; 751ed04d981SMilan Broz } 752ed04d981SMilan Broz 753ed04d981SMilan Broz static int crypt_iv_tcw_whitening(struct crypt_config *cc, 754ed04d981SMilan Broz struct dm_crypt_request *dmreq, 755ed04d981SMilan Broz u8 *data) 756ed04d981SMilan Broz { 757ed04d981SMilan Broz struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; 758350b5393SBart Van Assche __le64 sector = cpu_to_le64(dmreq->iv_sector); 759ed04d981SMilan Broz u8 buf[TCW_WHITENING_SIZE]; 760b6106265SJan-Simon Möller SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm); 761ed04d981SMilan Broz int i, r; 762ed04d981SMilan Broz 763ed04d981SMilan Broz /* xor whitening with sector number */ 76445fe93dfSArd Biesheuvel crypto_xor_cpy(buf, tcw->whitening, (u8 *)§or, 8); 76545fe93dfSArd Biesheuvel crypto_xor_cpy(&buf[8], tcw->whitening + 8, (u8 *)§or, 8); 766ed04d981SMilan Broz 767ed04d981SMilan Broz /* calculate crc32 for every 32bit part and xor it */ 768b6106265SJan-Simon Möller desc->tfm = tcw->crc32_tfm; 769ed04d981SMilan Broz for (i = 0; i < 4; i++) { 770b6106265SJan-Simon Möller r = crypto_shash_init(desc); 771ed04d981SMilan Broz if (r) 772ed04d981SMilan Broz goto out; 773b6106265SJan-Simon Möller r = crypto_shash_update(desc, &buf[i * 4], 4); 774ed04d981SMilan Broz if (r) 775ed04d981SMilan Broz goto out; 776b6106265SJan-Simon Möller r = crypto_shash_final(desc, &buf[i * 4]); 777ed04d981SMilan Broz if (r) 778ed04d981SMilan Broz goto out; 779ed04d981SMilan Broz } 780ed04d981SMilan Broz crypto_xor(&buf[0], &buf[12], 4); 781ed04d981SMilan Broz crypto_xor(&buf[4], &buf[8], 4); 782ed04d981SMilan Broz 783ed04d981SMilan Broz /* apply whitening (8 bytes) to whole sector */ 784ed04d981SMilan Broz for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++) 785ed04d981SMilan Broz crypto_xor(data + i * 8, buf, 8); 786ed04d981SMilan Broz out: 7871a71d6ffSMilan Broz memzero_explicit(buf, sizeof(buf)); 788ed04d981SMilan Broz return r; 789ed04d981SMilan Broz } 790ed04d981SMilan Broz 791ed04d981SMilan Broz static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv, 792ed04d981SMilan Broz struct dm_crypt_request *dmreq) 793ed04d981SMilan Broz { 794ef43aa38SMilan Broz struct scatterlist *sg; 795ed04d981SMilan Broz struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; 796350b5393SBart Van Assche __le64 sector = cpu_to_le64(dmreq->iv_sector); 797ed04d981SMilan Broz u8 *src; 798ed04d981SMilan Broz int r = 0; 799ed04d981SMilan Broz 800ed04d981SMilan Broz /* Remove whitening from ciphertext */ 801ed04d981SMilan Broz if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) { 802ef43aa38SMilan Broz sg = crypt_get_sg_data(cc, dmreq->sg_in); 803ef43aa38SMilan Broz src = kmap_atomic(sg_page(sg)); 804ef43aa38SMilan Broz r = crypt_iv_tcw_whitening(cc, dmreq, src + sg->offset); 805ed04d981SMilan Broz kunmap_atomic(src); 806ed04d981SMilan Broz } 807ed04d981SMilan Broz 808ed04d981SMilan Broz /* Calculate IV */ 80945fe93dfSArd Biesheuvel crypto_xor_cpy(iv, tcw->iv_seed, (u8 *)§or, 8); 810ed04d981SMilan Broz if (cc->iv_size > 8) 81145fe93dfSArd Biesheuvel crypto_xor_cpy(&iv[8], tcw->iv_seed + 8, (u8 *)§or, 81245fe93dfSArd Biesheuvel cc->iv_size - 8); 813ed04d981SMilan Broz 814ed04d981SMilan Broz return r; 815ed04d981SMilan Broz } 816ed04d981SMilan Broz 817ed04d981SMilan Broz static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv, 818ed04d981SMilan Broz struct dm_crypt_request *dmreq) 819ed04d981SMilan Broz { 820ef43aa38SMilan Broz struct scatterlist *sg; 821ed04d981SMilan Broz u8 *dst; 822ed04d981SMilan Broz int r; 823ed04d981SMilan Broz 824ed04d981SMilan Broz if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) 825ed04d981SMilan Broz return 0; 826ed04d981SMilan Broz 827ed04d981SMilan Broz /* Apply whitening on ciphertext */ 828ef43aa38SMilan Broz sg = crypt_get_sg_data(cc, dmreq->sg_out); 829ef43aa38SMilan Broz dst = kmap_atomic(sg_page(sg)); 830ef43aa38SMilan Broz r = crypt_iv_tcw_whitening(cc, dmreq, dst + sg->offset); 831ed04d981SMilan Broz kunmap_atomic(dst); 832ed04d981SMilan Broz 833ed04d981SMilan Broz return r; 834ed04d981SMilan Broz } 835ed04d981SMilan Broz 836ef43aa38SMilan Broz static int crypt_iv_random_gen(struct crypt_config *cc, u8 *iv, 837ef43aa38SMilan Broz struct dm_crypt_request *dmreq) 838ef43aa38SMilan Broz { 839ef43aa38SMilan Broz /* Used only for writes, there must be an additional space to store IV */ 840ef43aa38SMilan Broz get_random_bytes(iv, cc->iv_size); 841ef43aa38SMilan Broz return 0; 842ef43aa38SMilan Broz } 843ef43aa38SMilan Broz 8441b1b58f5SJulia Lawall static const struct crypt_iv_operations crypt_iv_plain_ops = { 8451da177e4SLinus Torvalds .generator = crypt_iv_plain_gen 8461da177e4SLinus Torvalds }; 8471da177e4SLinus Torvalds 8481b1b58f5SJulia Lawall static const struct crypt_iv_operations crypt_iv_plain64_ops = { 84961afef61SMilan Broz .generator = crypt_iv_plain64_gen 85061afef61SMilan Broz }; 85161afef61SMilan Broz 8527e3fd855SMilan Broz static const struct crypt_iv_operations crypt_iv_plain64be_ops = { 8537e3fd855SMilan Broz .generator = crypt_iv_plain64be_gen 8547e3fd855SMilan Broz }; 8557e3fd855SMilan Broz 8561b1b58f5SJulia Lawall static const struct crypt_iv_operations crypt_iv_essiv_ops = { 8571da177e4SLinus Torvalds .ctr = crypt_iv_essiv_ctr, 8581da177e4SLinus Torvalds .dtr = crypt_iv_essiv_dtr, 859b95bf2d3SMilan Broz .init = crypt_iv_essiv_init, 860542da317SMilan Broz .wipe = crypt_iv_essiv_wipe, 8611da177e4SLinus Torvalds .generator = crypt_iv_essiv_gen 8621da177e4SLinus Torvalds }; 8631da177e4SLinus Torvalds 8641b1b58f5SJulia Lawall static const struct crypt_iv_operations crypt_iv_benbi_ops = { 86548527fa7SRik Snel .ctr = crypt_iv_benbi_ctr, 86648527fa7SRik Snel .dtr = crypt_iv_benbi_dtr, 86748527fa7SRik Snel .generator = crypt_iv_benbi_gen 86848527fa7SRik Snel }; 8691da177e4SLinus Torvalds 8701b1b58f5SJulia Lawall static const struct crypt_iv_operations crypt_iv_null_ops = { 87146b47730SLudwig Nussel .generator = crypt_iv_null_gen 87246b47730SLudwig Nussel }; 87346b47730SLudwig Nussel 8741b1b58f5SJulia Lawall static const struct crypt_iv_operations crypt_iv_lmk_ops = { 87534745785SMilan Broz .ctr = crypt_iv_lmk_ctr, 87634745785SMilan Broz .dtr = crypt_iv_lmk_dtr, 87734745785SMilan Broz .init = crypt_iv_lmk_init, 87834745785SMilan Broz .wipe = crypt_iv_lmk_wipe, 87934745785SMilan Broz .generator = crypt_iv_lmk_gen, 88034745785SMilan Broz .post = crypt_iv_lmk_post 88134745785SMilan Broz }; 88234745785SMilan Broz 8831b1b58f5SJulia Lawall static const struct crypt_iv_operations crypt_iv_tcw_ops = { 884ed04d981SMilan Broz .ctr = crypt_iv_tcw_ctr, 885ed04d981SMilan Broz .dtr = crypt_iv_tcw_dtr, 886ed04d981SMilan Broz .init = crypt_iv_tcw_init, 887ed04d981SMilan Broz .wipe = crypt_iv_tcw_wipe, 888ed04d981SMilan Broz .generator = crypt_iv_tcw_gen, 889ed04d981SMilan Broz .post = crypt_iv_tcw_post 890ed04d981SMilan Broz }; 891ed04d981SMilan Broz 892ef43aa38SMilan Broz static struct crypt_iv_operations crypt_iv_random_ops = { 893ef43aa38SMilan Broz .generator = crypt_iv_random_gen 894ef43aa38SMilan Broz }; 895ef43aa38SMilan Broz 896ef43aa38SMilan Broz /* 897ef43aa38SMilan Broz * Integrity extensions 898ef43aa38SMilan Broz */ 899ef43aa38SMilan Broz static bool crypt_integrity_aead(struct crypt_config *cc) 900ef43aa38SMilan Broz { 901ef43aa38SMilan Broz return test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags); 902ef43aa38SMilan Broz } 903ef43aa38SMilan Broz 904ef43aa38SMilan Broz static bool crypt_integrity_hmac(struct crypt_config *cc) 905ef43aa38SMilan Broz { 90633d2f09fSMilan Broz return crypt_integrity_aead(cc) && cc->key_mac_size; 907ef43aa38SMilan Broz } 908ef43aa38SMilan Broz 909ef43aa38SMilan Broz /* Get sg containing data */ 910ef43aa38SMilan Broz static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc, 911ef43aa38SMilan Broz struct scatterlist *sg) 912ef43aa38SMilan Broz { 91333d2f09fSMilan Broz if (unlikely(crypt_integrity_aead(cc))) 914ef43aa38SMilan Broz return &sg[2]; 915ef43aa38SMilan Broz 916ef43aa38SMilan Broz return sg; 917ef43aa38SMilan Broz } 918ef43aa38SMilan Broz 919ef43aa38SMilan Broz static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio) 920ef43aa38SMilan Broz { 921ef43aa38SMilan Broz struct bio_integrity_payload *bip; 922ef43aa38SMilan Broz unsigned int tag_len; 923ef43aa38SMilan Broz int ret; 924ef43aa38SMilan Broz 925ef43aa38SMilan Broz if (!bio_sectors(bio) || !io->cc->on_disk_tag_size) 926ef43aa38SMilan Broz return 0; 927ef43aa38SMilan Broz 928ef43aa38SMilan Broz bip = bio_integrity_alloc(bio, GFP_NOIO, 1); 929ef43aa38SMilan Broz if (IS_ERR(bip)) 930ef43aa38SMilan Broz return PTR_ERR(bip); 931ef43aa38SMilan Broz 932ff0c129dSMikulas Patocka tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift); 933ef43aa38SMilan Broz 934ef43aa38SMilan Broz bip->bip_iter.bi_size = tag_len; 935ef43aa38SMilan Broz bip->bip_iter.bi_sector = io->cc->start + io->sector; 936ef43aa38SMilan Broz 937ef43aa38SMilan Broz ret = bio_integrity_add_page(bio, virt_to_page(io->integrity_metadata), 938ef43aa38SMilan Broz tag_len, offset_in_page(io->integrity_metadata)); 939ef43aa38SMilan Broz if (unlikely(ret != tag_len)) 940ef43aa38SMilan Broz return -ENOMEM; 941ef43aa38SMilan Broz 942ef43aa38SMilan Broz return 0; 943ef43aa38SMilan Broz } 944ef43aa38SMilan Broz 945ef43aa38SMilan Broz static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti) 946ef43aa38SMilan Broz { 947ef43aa38SMilan Broz #ifdef CONFIG_BLK_DEV_INTEGRITY 948ef43aa38SMilan Broz struct blk_integrity *bi = blk_get_integrity(cc->dev->bdev->bd_disk); 9497a1cd723SMilan Broz struct mapped_device *md = dm_table_get_md(ti->table); 950ef43aa38SMilan Broz 951ef43aa38SMilan Broz /* From now we require underlying device with our integrity profile */ 952ef43aa38SMilan Broz if (!bi || strcasecmp(bi->profile->name, "DM-DIF-EXT-TAG")) { 953ef43aa38SMilan Broz ti->error = "Integrity profile not supported."; 954ef43aa38SMilan Broz return -EINVAL; 955ef43aa38SMilan Broz } 956ef43aa38SMilan Broz 957583fe747SMikulas Patocka if (bi->tag_size != cc->on_disk_tag_size || 958583fe747SMikulas Patocka bi->tuple_size != cc->on_disk_tag_size) { 959ef43aa38SMilan Broz ti->error = "Integrity profile tag size mismatch."; 960ef43aa38SMilan Broz return -EINVAL; 961ef43aa38SMilan Broz } 962583fe747SMikulas Patocka if (1 << bi->interval_exp != cc->sector_size) { 963583fe747SMikulas Patocka ti->error = "Integrity profile sector size mismatch."; 964583fe747SMikulas Patocka return -EINVAL; 965583fe747SMikulas Patocka } 966ef43aa38SMilan Broz 96733d2f09fSMilan Broz if (crypt_integrity_aead(cc)) { 968ef43aa38SMilan Broz cc->integrity_tag_size = cc->on_disk_tag_size - cc->integrity_iv_size; 9697a1cd723SMilan Broz DMDEBUG("%s: Integrity AEAD, tag size %u, IV size %u.", dm_device_name(md), 970ef43aa38SMilan Broz cc->integrity_tag_size, cc->integrity_iv_size); 971ef43aa38SMilan Broz 972ef43aa38SMilan Broz if (crypto_aead_setauthsize(any_tfm_aead(cc), cc->integrity_tag_size)) { 973ef43aa38SMilan Broz ti->error = "Integrity AEAD auth tag size is not supported."; 974ef43aa38SMilan Broz return -EINVAL; 975ef43aa38SMilan Broz } 976ef43aa38SMilan Broz } else if (cc->integrity_iv_size) 9777a1cd723SMilan Broz DMDEBUG("%s: Additional per-sector space %u bytes for IV.", dm_device_name(md), 978ef43aa38SMilan Broz cc->integrity_iv_size); 979ef43aa38SMilan Broz 980ef43aa38SMilan Broz if ((cc->integrity_tag_size + cc->integrity_iv_size) != bi->tag_size) { 981ef43aa38SMilan Broz ti->error = "Not enough space for integrity tag in the profile."; 982ef43aa38SMilan Broz return -EINVAL; 983ef43aa38SMilan Broz } 984ef43aa38SMilan Broz 985ef43aa38SMilan Broz return 0; 986ef43aa38SMilan Broz #else 987ef43aa38SMilan Broz ti->error = "Integrity profile not supported."; 988ef43aa38SMilan Broz return -EINVAL; 989ef43aa38SMilan Broz #endif 990ef43aa38SMilan Broz } 991ef43aa38SMilan Broz 992d469f841SMilan Broz static void crypt_convert_init(struct crypt_config *cc, 993d469f841SMilan Broz struct convert_context *ctx, 9941da177e4SLinus Torvalds struct bio *bio_out, struct bio *bio_in, 995fcd369daSMilan Broz sector_t sector) 9961da177e4SLinus Torvalds { 9971da177e4SLinus Torvalds ctx->bio_in = bio_in; 9981da177e4SLinus Torvalds ctx->bio_out = bio_out; 999003b5c57SKent Overstreet if (bio_in) 1000003b5c57SKent Overstreet ctx->iter_in = bio_in->bi_iter; 1001003b5c57SKent Overstreet if (bio_out) 1002003b5c57SKent Overstreet ctx->iter_out = bio_out->bi_iter; 1003c66029f4SMikulas Patocka ctx->cc_sector = sector + cc->iv_offset; 100443d69034SMilan Broz init_completion(&ctx->restart); 10051da177e4SLinus Torvalds } 10061da177e4SLinus Torvalds 1007b2174eebSHuang Ying static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc, 1008ef43aa38SMilan Broz void *req) 1009b2174eebSHuang Ying { 1010b2174eebSHuang Ying return (struct dm_crypt_request *)((char *)req + cc->dmreq_start); 1011b2174eebSHuang Ying } 1012b2174eebSHuang Ying 1013ef43aa38SMilan Broz static void *req_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq) 1014b2174eebSHuang Ying { 1015ef43aa38SMilan Broz return (void *)((char *)dmreq - cc->dmreq_start); 1016b2174eebSHuang Ying } 1017b2174eebSHuang Ying 10182dc5327dSMilan Broz static u8 *iv_of_dmreq(struct crypt_config *cc, 10192dc5327dSMilan Broz struct dm_crypt_request *dmreq) 10202dc5327dSMilan Broz { 102133d2f09fSMilan Broz if (crypt_integrity_aead(cc)) 1022ef43aa38SMilan Broz return (u8 *)ALIGN((unsigned long)(dmreq + 1), 1023ef43aa38SMilan Broz crypto_aead_alignmask(any_tfm_aead(cc)) + 1); 1024ef43aa38SMilan Broz else 10252dc5327dSMilan Broz return (u8 *)ALIGN((unsigned long)(dmreq + 1), 1026bbdb23b5SHerbert Xu crypto_skcipher_alignmask(any_tfm(cc)) + 1); 10272dc5327dSMilan Broz } 10282dc5327dSMilan Broz 1029ef43aa38SMilan Broz static u8 *org_iv_of_dmreq(struct crypt_config *cc, 1030ef43aa38SMilan Broz struct dm_crypt_request *dmreq) 1031ef43aa38SMilan Broz { 1032ef43aa38SMilan Broz return iv_of_dmreq(cc, dmreq) + cc->iv_size; 1033ef43aa38SMilan Broz } 1034ef43aa38SMilan Broz 1035c13b5487SChristoph Hellwig static __le64 *org_sector_of_dmreq(struct crypt_config *cc, 1036ef43aa38SMilan Broz struct dm_crypt_request *dmreq) 1037ef43aa38SMilan Broz { 1038ef43aa38SMilan Broz u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + cc->iv_size; 1039c13b5487SChristoph Hellwig return (__le64 *) ptr; 1040ef43aa38SMilan Broz } 1041ef43aa38SMilan Broz 1042ef43aa38SMilan Broz static unsigned int *org_tag_of_dmreq(struct crypt_config *cc, 1043ef43aa38SMilan Broz struct dm_crypt_request *dmreq) 1044ef43aa38SMilan Broz { 1045ef43aa38SMilan Broz u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + 1046ef43aa38SMilan Broz cc->iv_size + sizeof(uint64_t); 1047ef43aa38SMilan Broz return (unsigned int*)ptr; 1048ef43aa38SMilan Broz } 1049ef43aa38SMilan Broz 1050ef43aa38SMilan Broz static void *tag_from_dmreq(struct crypt_config *cc, 1051ef43aa38SMilan Broz struct dm_crypt_request *dmreq) 1052ef43aa38SMilan Broz { 1053ef43aa38SMilan Broz struct convert_context *ctx = dmreq->ctx; 1054ef43aa38SMilan Broz struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); 1055ef43aa38SMilan Broz 1056ef43aa38SMilan Broz return &io->integrity_metadata[*org_tag_of_dmreq(cc, dmreq) * 1057ef43aa38SMilan Broz cc->on_disk_tag_size]; 1058ef43aa38SMilan Broz } 1059ef43aa38SMilan Broz 1060ef43aa38SMilan Broz static void *iv_tag_from_dmreq(struct crypt_config *cc, 1061ef43aa38SMilan Broz struct dm_crypt_request *dmreq) 1062ef43aa38SMilan Broz { 1063ef43aa38SMilan Broz return tag_from_dmreq(cc, dmreq) + cc->integrity_tag_size; 1064ef43aa38SMilan Broz } 1065ef43aa38SMilan Broz 1066ef43aa38SMilan Broz static int crypt_convert_block_aead(struct crypt_config *cc, 10673a7f6c99SMilan Broz struct convert_context *ctx, 1068ef43aa38SMilan Broz struct aead_request *req, 1069ef43aa38SMilan Broz unsigned int tag_offset) 107001482b76SMilan Broz { 1071003b5c57SKent Overstreet struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in); 1072003b5c57SKent Overstreet struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out); 10733a7f6c99SMilan Broz struct dm_crypt_request *dmreq; 1074ef43aa38SMilan Broz u8 *iv, *org_iv, *tag_iv, *tag; 1075c13b5487SChristoph Hellwig __le64 *sector; 1076ef43aa38SMilan Broz int r = 0; 1077ef43aa38SMilan Broz 1078ef43aa38SMilan Broz BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size); 107901482b76SMilan Broz 10808f0009a2SMilan Broz /* Reject unexpected unaligned bio. */ 10810440d5c0SMikulas Patocka if (unlikely(bv_in.bv_len & (cc->sector_size - 1))) 10828f0009a2SMilan Broz return -EIO; 108301482b76SMilan Broz 1084b2174eebSHuang Ying dmreq = dmreq_of_req(cc, req); 1085c66029f4SMikulas Patocka dmreq->iv_sector = ctx->cc_sector; 10868f0009a2SMilan Broz if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags)) 1087ff3af92bSMikulas Patocka dmreq->iv_sector >>= cc->sector_shift; 1088b2174eebSHuang Ying dmreq->ctx = ctx; 108901482b76SMilan Broz 1090ef43aa38SMilan Broz *org_tag_of_dmreq(cc, dmreq) = tag_offset; 109101482b76SMilan Broz 1092ef43aa38SMilan Broz sector = org_sector_of_dmreq(cc, dmreq); 1093ef43aa38SMilan Broz *sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset); 1094ef43aa38SMilan Broz 1095ef43aa38SMilan Broz iv = iv_of_dmreq(cc, dmreq); 1096ef43aa38SMilan Broz org_iv = org_iv_of_dmreq(cc, dmreq); 1097ef43aa38SMilan Broz tag = tag_from_dmreq(cc, dmreq); 1098ef43aa38SMilan Broz tag_iv = iv_tag_from_dmreq(cc, dmreq); 1099ef43aa38SMilan Broz 1100ef43aa38SMilan Broz /* AEAD request: 1101ef43aa38SMilan Broz * |----- AAD -------|------ DATA -------|-- AUTH TAG --| 1102ef43aa38SMilan Broz * | (authenticated) | (auth+encryption) | | 1103ef43aa38SMilan Broz * | sector_LE | IV | sector in/out | tag in/out | 1104ef43aa38SMilan Broz */ 1105ef43aa38SMilan Broz sg_init_table(dmreq->sg_in, 4); 1106ef43aa38SMilan Broz sg_set_buf(&dmreq->sg_in[0], sector, sizeof(uint64_t)); 1107ef43aa38SMilan Broz sg_set_buf(&dmreq->sg_in[1], org_iv, cc->iv_size); 11088f0009a2SMilan Broz sg_set_page(&dmreq->sg_in[2], bv_in.bv_page, cc->sector_size, bv_in.bv_offset); 1109ef43aa38SMilan Broz sg_set_buf(&dmreq->sg_in[3], tag, cc->integrity_tag_size); 1110ef43aa38SMilan Broz 1111ef43aa38SMilan Broz sg_init_table(dmreq->sg_out, 4); 1112ef43aa38SMilan Broz sg_set_buf(&dmreq->sg_out[0], sector, sizeof(uint64_t)); 1113ef43aa38SMilan Broz sg_set_buf(&dmreq->sg_out[1], org_iv, cc->iv_size); 11148f0009a2SMilan Broz sg_set_page(&dmreq->sg_out[2], bv_out.bv_page, cc->sector_size, bv_out.bv_offset); 1115ef43aa38SMilan Broz sg_set_buf(&dmreq->sg_out[3], tag, cc->integrity_tag_size); 111601482b76SMilan Broz 11173a7f6c99SMilan Broz if (cc->iv_gen_ops) { 1118ef43aa38SMilan Broz /* For READs use IV stored in integrity metadata */ 1119ef43aa38SMilan Broz if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) { 1120ef43aa38SMilan Broz memcpy(org_iv, tag_iv, cc->iv_size); 1121ef43aa38SMilan Broz } else { 1122ef43aa38SMilan Broz r = cc->iv_gen_ops->generator(cc, org_iv, dmreq); 11233a7f6c99SMilan Broz if (r < 0) 11243a7f6c99SMilan Broz return r; 1125ef43aa38SMilan Broz /* Store generated IV in integrity metadata */ 1126ef43aa38SMilan Broz if (cc->integrity_iv_size) 1127ef43aa38SMilan Broz memcpy(tag_iv, org_iv, cc->iv_size); 1128ef43aa38SMilan Broz } 1129ef43aa38SMilan Broz /* Working copy of IV, to be modified in crypto API */ 1130ef43aa38SMilan Broz memcpy(iv, org_iv, cc->iv_size); 1131ef43aa38SMilan Broz } 1132ef43aa38SMilan Broz 1133ef43aa38SMilan Broz aead_request_set_ad(req, sizeof(uint64_t) + cc->iv_size); 1134ef43aa38SMilan Broz if (bio_data_dir(ctx->bio_in) == WRITE) { 1135ef43aa38SMilan Broz aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out, 11368f0009a2SMilan Broz cc->sector_size, iv); 1137ef43aa38SMilan Broz r = crypto_aead_encrypt(req); 1138ef43aa38SMilan Broz if (cc->integrity_tag_size + cc->integrity_iv_size != cc->on_disk_tag_size) 1139ef43aa38SMilan Broz memset(tag + cc->integrity_tag_size + cc->integrity_iv_size, 0, 1140ef43aa38SMilan Broz cc->on_disk_tag_size - (cc->integrity_tag_size + cc->integrity_iv_size)); 1141ef43aa38SMilan Broz } else { 1142ef43aa38SMilan Broz aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out, 11438f0009a2SMilan Broz cc->sector_size + cc->integrity_tag_size, iv); 1144ef43aa38SMilan Broz r = crypto_aead_decrypt(req); 1145ef43aa38SMilan Broz } 1146ef43aa38SMilan Broz 1147f710126cSMilan Broz if (r == -EBADMSG) { 1148f710126cSMilan Broz char b[BDEVNAME_SIZE]; 1149f710126cSMilan Broz DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", bio_devname(ctx->bio_in, b), 1150ef43aa38SMilan Broz (unsigned long long)le64_to_cpu(*sector)); 1151f710126cSMilan Broz } 1152ef43aa38SMilan Broz 1153ef43aa38SMilan Broz if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post) 1154ef43aa38SMilan Broz r = cc->iv_gen_ops->post(cc, org_iv, dmreq); 1155ef43aa38SMilan Broz 11568f0009a2SMilan Broz bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size); 11578f0009a2SMilan Broz bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size); 1158ef43aa38SMilan Broz 1159ef43aa38SMilan Broz return r; 11603a7f6c99SMilan Broz } 11613a7f6c99SMilan Broz 1162ef43aa38SMilan Broz static int crypt_convert_block_skcipher(struct crypt_config *cc, 1163ef43aa38SMilan Broz struct convert_context *ctx, 1164ef43aa38SMilan Broz struct skcipher_request *req, 1165ef43aa38SMilan Broz unsigned int tag_offset) 1166ef43aa38SMilan Broz { 1167ef43aa38SMilan Broz struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in); 1168ef43aa38SMilan Broz struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out); 1169ef43aa38SMilan Broz struct scatterlist *sg_in, *sg_out; 1170ef43aa38SMilan Broz struct dm_crypt_request *dmreq; 1171ef43aa38SMilan Broz u8 *iv, *org_iv, *tag_iv; 1172c13b5487SChristoph Hellwig __le64 *sector; 1173ef43aa38SMilan Broz int r = 0; 1174ef43aa38SMilan Broz 11758f0009a2SMilan Broz /* Reject unexpected unaligned bio. */ 11760440d5c0SMikulas Patocka if (unlikely(bv_in.bv_len & (cc->sector_size - 1))) 11778f0009a2SMilan Broz return -EIO; 11788f0009a2SMilan Broz 1179ef43aa38SMilan Broz dmreq = dmreq_of_req(cc, req); 1180ef43aa38SMilan Broz dmreq->iv_sector = ctx->cc_sector; 11818f0009a2SMilan Broz if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags)) 1182ff3af92bSMikulas Patocka dmreq->iv_sector >>= cc->sector_shift; 1183ef43aa38SMilan Broz dmreq->ctx = ctx; 1184ef43aa38SMilan Broz 1185ef43aa38SMilan Broz *org_tag_of_dmreq(cc, dmreq) = tag_offset; 1186ef43aa38SMilan Broz 1187ef43aa38SMilan Broz iv = iv_of_dmreq(cc, dmreq); 1188ef43aa38SMilan Broz org_iv = org_iv_of_dmreq(cc, dmreq); 1189ef43aa38SMilan Broz tag_iv = iv_tag_from_dmreq(cc, dmreq); 1190ef43aa38SMilan Broz 1191ef43aa38SMilan Broz sector = org_sector_of_dmreq(cc, dmreq); 1192ef43aa38SMilan Broz *sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset); 1193ef43aa38SMilan Broz 1194ef43aa38SMilan Broz /* For skcipher we use only the first sg item */ 1195ef43aa38SMilan Broz sg_in = &dmreq->sg_in[0]; 1196ef43aa38SMilan Broz sg_out = &dmreq->sg_out[0]; 1197ef43aa38SMilan Broz 1198ef43aa38SMilan Broz sg_init_table(sg_in, 1); 11998f0009a2SMilan Broz sg_set_page(sg_in, bv_in.bv_page, cc->sector_size, bv_in.bv_offset); 1200ef43aa38SMilan Broz 1201ef43aa38SMilan Broz sg_init_table(sg_out, 1); 12028f0009a2SMilan Broz sg_set_page(sg_out, bv_out.bv_page, cc->sector_size, bv_out.bv_offset); 1203ef43aa38SMilan Broz 1204ef43aa38SMilan Broz if (cc->iv_gen_ops) { 1205ef43aa38SMilan Broz /* For READs use IV stored in integrity metadata */ 1206ef43aa38SMilan Broz if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) { 1207ef43aa38SMilan Broz memcpy(org_iv, tag_iv, cc->integrity_iv_size); 1208ef43aa38SMilan Broz } else { 1209ef43aa38SMilan Broz r = cc->iv_gen_ops->generator(cc, org_iv, dmreq); 1210ef43aa38SMilan Broz if (r < 0) 1211ef43aa38SMilan Broz return r; 1212ef43aa38SMilan Broz /* Store generated IV in integrity metadata */ 1213ef43aa38SMilan Broz if (cc->integrity_iv_size) 1214ef43aa38SMilan Broz memcpy(tag_iv, org_iv, cc->integrity_iv_size); 1215ef43aa38SMilan Broz } 1216ef43aa38SMilan Broz /* Working copy of IV, to be modified in crypto API */ 1217ef43aa38SMilan Broz memcpy(iv, org_iv, cc->iv_size); 1218ef43aa38SMilan Broz } 1219ef43aa38SMilan Broz 12208f0009a2SMilan Broz skcipher_request_set_crypt(req, sg_in, sg_out, cc->sector_size, iv); 12213a7f6c99SMilan Broz 12223a7f6c99SMilan Broz if (bio_data_dir(ctx->bio_in) == WRITE) 1223bbdb23b5SHerbert Xu r = crypto_skcipher_encrypt(req); 12243a7f6c99SMilan Broz else 1225bbdb23b5SHerbert Xu r = crypto_skcipher_decrypt(req); 12263a7f6c99SMilan Broz 12272dc5327dSMilan Broz if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post) 1228ef43aa38SMilan Broz r = cc->iv_gen_ops->post(cc, org_iv, dmreq); 1229ef43aa38SMilan Broz 12308f0009a2SMilan Broz bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size); 12318f0009a2SMilan Broz bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size); 12322dc5327dSMilan Broz 12333a7f6c99SMilan Broz return r; 123401482b76SMilan Broz } 123501482b76SMilan Broz 123695497a96SMilan Broz static void kcryptd_async_done(struct crypto_async_request *async_req, 123795497a96SMilan Broz int error); 1238c0297721SAndi Kleen 1239ef43aa38SMilan Broz static void crypt_alloc_req_skcipher(struct crypt_config *cc, 1240ddd42edfSMilan Broz struct convert_context *ctx) 1241ddd42edfSMilan Broz { 1242c66029f4SMikulas Patocka unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1); 1243c0297721SAndi Kleen 1244ef43aa38SMilan Broz if (!ctx->r.req) 12456f1c819cSKent Overstreet ctx->r.req = mempool_alloc(&cc->req_pool, GFP_NOIO); 1246c0297721SAndi Kleen 1247ef43aa38SMilan Broz skcipher_request_set_tfm(ctx->r.req, cc->cipher_tfm.tfms[key_index]); 124854cea3f6SMilan Broz 124954cea3f6SMilan Broz /* 125054cea3f6SMilan Broz * Use REQ_MAY_BACKLOG so a cipher driver internally backlogs 125154cea3f6SMilan Broz * requests if driver request queue is full. 125254cea3f6SMilan Broz */ 1253ef43aa38SMilan Broz skcipher_request_set_callback(ctx->r.req, 1254432061b3SMikulas Patocka CRYPTO_TFM_REQ_MAY_BACKLOG, 1255ef43aa38SMilan Broz kcryptd_async_done, dmreq_of_req(cc, ctx->r.req)); 1256ddd42edfSMilan Broz } 1257ddd42edfSMilan Broz 1258ef43aa38SMilan Broz static void crypt_alloc_req_aead(struct crypt_config *cc, 1259ef43aa38SMilan Broz struct convert_context *ctx) 1260ef43aa38SMilan Broz { 1261ef43aa38SMilan Broz if (!ctx->r.req_aead) 12626f1c819cSKent Overstreet ctx->r.req_aead = mempool_alloc(&cc->req_pool, GFP_NOIO); 1263ef43aa38SMilan Broz 1264ef43aa38SMilan Broz aead_request_set_tfm(ctx->r.req_aead, cc->cipher_tfm.tfms_aead[0]); 1265ef43aa38SMilan Broz 1266ef43aa38SMilan Broz /* 1267ef43aa38SMilan Broz * Use REQ_MAY_BACKLOG so a cipher driver internally backlogs 1268ef43aa38SMilan Broz * requests if driver request queue is full. 1269ef43aa38SMilan Broz */ 1270ef43aa38SMilan Broz aead_request_set_callback(ctx->r.req_aead, 1271432061b3SMikulas Patocka CRYPTO_TFM_REQ_MAY_BACKLOG, 1272ef43aa38SMilan Broz kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead)); 1273ef43aa38SMilan Broz } 1274ef43aa38SMilan Broz 1275ef43aa38SMilan Broz static void crypt_alloc_req(struct crypt_config *cc, 1276ef43aa38SMilan Broz struct convert_context *ctx) 1277ef43aa38SMilan Broz { 127833d2f09fSMilan Broz if (crypt_integrity_aead(cc)) 1279ef43aa38SMilan Broz crypt_alloc_req_aead(cc, ctx); 1280ef43aa38SMilan Broz else 1281ef43aa38SMilan Broz crypt_alloc_req_skcipher(cc, ctx); 1282ef43aa38SMilan Broz } 1283ef43aa38SMilan Broz 1284ef43aa38SMilan Broz static void crypt_free_req_skcipher(struct crypt_config *cc, 1285bbdb23b5SHerbert Xu struct skcipher_request *req, struct bio *base_bio) 1286298a9fa0SMikulas Patocka { 1287298a9fa0SMikulas Patocka struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size); 1288298a9fa0SMikulas Patocka 1289bbdb23b5SHerbert Xu if ((struct skcipher_request *)(io + 1) != req) 12906f1c819cSKent Overstreet mempool_free(req, &cc->req_pool); 1291298a9fa0SMikulas Patocka } 1292298a9fa0SMikulas Patocka 1293ef43aa38SMilan Broz static void crypt_free_req_aead(struct crypt_config *cc, 1294ef43aa38SMilan Broz struct aead_request *req, struct bio *base_bio) 1295ef43aa38SMilan Broz { 1296ef43aa38SMilan Broz struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size); 1297ef43aa38SMilan Broz 1298ef43aa38SMilan Broz if ((struct aead_request *)(io + 1) != req) 12996f1c819cSKent Overstreet mempool_free(req, &cc->req_pool); 1300ef43aa38SMilan Broz } 1301ef43aa38SMilan Broz 1302ef43aa38SMilan Broz static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_bio) 1303ef43aa38SMilan Broz { 130433d2f09fSMilan Broz if (crypt_integrity_aead(cc)) 1305ef43aa38SMilan Broz crypt_free_req_aead(cc, req, base_bio); 1306ef43aa38SMilan Broz else 1307ef43aa38SMilan Broz crypt_free_req_skcipher(cc, req, base_bio); 1308ef43aa38SMilan Broz } 1309ef43aa38SMilan Broz 13101da177e4SLinus Torvalds /* 13111da177e4SLinus Torvalds * Encrypt / decrypt data from one bio to another one (can be the same one) 13121da177e4SLinus Torvalds */ 13134e4cbee9SChristoph Hellwig static blk_status_t crypt_convert(struct crypt_config *cc, 13141da177e4SLinus Torvalds struct convert_context *ctx) 13151da177e4SLinus Torvalds { 1316ef43aa38SMilan Broz unsigned int tag_offset = 0; 1317ff3af92bSMikulas Patocka unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT; 13183f1e9070SMilan Broz int r; 13191da177e4SLinus Torvalds 132040b6229bSMikulas Patocka atomic_set(&ctx->cc_pending, 1); 1321c8081618SMilan Broz 1322003b5c57SKent Overstreet while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) { 13231da177e4SLinus Torvalds 13243a7f6c99SMilan Broz crypt_alloc_req(cc, ctx); 132540b6229bSMikulas Patocka atomic_inc(&ctx->cc_pending); 13263f1e9070SMilan Broz 132733d2f09fSMilan Broz if (crypt_integrity_aead(cc)) 1328ef43aa38SMilan Broz r = crypt_convert_block_aead(cc, ctx, ctx->r.req_aead, tag_offset); 1329ef43aa38SMilan Broz else 1330ef43aa38SMilan Broz r = crypt_convert_block_skcipher(cc, ctx, ctx->r.req, tag_offset); 13313a7f6c99SMilan Broz 13323a7f6c99SMilan Broz switch (r) { 133354cea3f6SMilan Broz /* 133454cea3f6SMilan Broz * The request was queued by a crypto driver 133554cea3f6SMilan Broz * but the driver request queue is full, let's wait. 133654cea3f6SMilan Broz */ 13373a7f6c99SMilan Broz case -EBUSY: 13383a7f6c99SMilan Broz wait_for_completion(&ctx->restart); 133916735d02SWolfram Sang reinit_completion(&ctx->restart); 1340c0403ec0SRabin Vincent /* fall through */ 134154cea3f6SMilan Broz /* 134254cea3f6SMilan Broz * The request is queued and processed asynchronously, 134354cea3f6SMilan Broz * completion function kcryptd_async_done() will be called. 134454cea3f6SMilan Broz */ 1345c0403ec0SRabin Vincent case -EINPROGRESS: 1346ef43aa38SMilan Broz ctx->r.req = NULL; 13478f0009a2SMilan Broz ctx->cc_sector += sector_step; 1348583fe747SMikulas Patocka tag_offset++; 13493a7f6c99SMilan Broz continue; 135054cea3f6SMilan Broz /* 135154cea3f6SMilan Broz * The request was already processed (synchronously). 135254cea3f6SMilan Broz */ 13533f1e9070SMilan Broz case 0: 135440b6229bSMikulas Patocka atomic_dec(&ctx->cc_pending); 13558f0009a2SMilan Broz ctx->cc_sector += sector_step; 1356583fe747SMikulas Patocka tag_offset++; 1357c7f1b204SMilan Broz cond_resched(); 13583f1e9070SMilan Broz continue; 1359ef43aa38SMilan Broz /* 1360ef43aa38SMilan Broz * There was a data integrity error. 1361ef43aa38SMilan Broz */ 1362ef43aa38SMilan Broz case -EBADMSG: 1363ef43aa38SMilan Broz atomic_dec(&ctx->cc_pending); 13644e4cbee9SChristoph Hellwig return BLK_STS_PROTECTION; 1365ef43aa38SMilan Broz /* 1366ef43aa38SMilan Broz * There was an error while processing the request. 1367ef43aa38SMilan Broz */ 13683f1e9070SMilan Broz default: 136940b6229bSMikulas Patocka atomic_dec(&ctx->cc_pending); 13704e4cbee9SChristoph Hellwig return BLK_STS_IOERR; 13711da177e4SLinus Torvalds } 13723f1e9070SMilan Broz } 13733f1e9070SMilan Broz 13743f1e9070SMilan Broz return 0; 13753f1e9070SMilan Broz } 13761da177e4SLinus Torvalds 1377cf2f1abfSMikulas Patocka static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone); 1378cf2f1abfSMikulas Patocka 13791da177e4SLinus Torvalds /* 13801da177e4SLinus Torvalds * Generate a new unfragmented bio with the given size 1381586b286bSMike Snitzer * This should never violate the device limitations (but only because 1382586b286bSMike Snitzer * max_segment_size is being constrained to PAGE_SIZE). 13837145c241SMikulas Patocka * 13847145c241SMikulas Patocka * This function may be called concurrently. If we allocate from the mempool 13857145c241SMikulas Patocka * concurrently, there is a possibility of deadlock. For example, if we have 13867145c241SMikulas Patocka * mempool of 256 pages, two processes, each wanting 256, pages allocate from 13877145c241SMikulas Patocka * the mempool concurrently, it may deadlock in a situation where both processes 13887145c241SMikulas Patocka * have allocated 128 pages and the mempool is exhausted. 13897145c241SMikulas Patocka * 13907145c241SMikulas Patocka * In order to avoid this scenario we allocate the pages under a mutex. 13917145c241SMikulas Patocka * 13927145c241SMikulas Patocka * In order to not degrade performance with excessive locking, we try 13937145c241SMikulas Patocka * non-blocking allocations without a mutex first but on failure we fallback 13947145c241SMikulas Patocka * to blocking allocations with a mutex. 13951da177e4SLinus Torvalds */ 1396cf2f1abfSMikulas Patocka static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) 13971da177e4SLinus Torvalds { 139849a8a920SAlasdair G Kergon struct crypt_config *cc = io->cc; 13998b004457SMilan Broz struct bio *clone; 14001da177e4SLinus Torvalds unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 14017145c241SMikulas Patocka gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM; 14027145c241SMikulas Patocka unsigned i, len, remaining_size; 140391e10625SMilan Broz struct page *page; 14041da177e4SLinus Torvalds 14057145c241SMikulas Patocka retry: 1406d0164adcSMel Gorman if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM)) 14077145c241SMikulas Patocka mutex_lock(&cc->bio_alloc_lock); 14087145c241SMikulas Patocka 14096f1c819cSKent Overstreet clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, &cc->bs); 14108b004457SMilan Broz if (!clone) 1411ef43aa38SMilan Broz goto out; 14121da177e4SLinus Torvalds 1413027581f3SOlaf Kirch clone_init(io, clone); 14146a24c718SMilan Broz 14157145c241SMikulas Patocka remaining_size = size; 14167145c241SMikulas Patocka 1417f97380bcSOlaf Kirch for (i = 0; i < nr_iovecs; i++) { 14186f1c819cSKent Overstreet page = mempool_alloc(&cc->page_pool, gfp_mask); 14197145c241SMikulas Patocka if (!page) { 14207145c241SMikulas Patocka crypt_free_buffer_pages(cc, clone); 14217145c241SMikulas Patocka bio_put(clone); 1422d0164adcSMel Gorman gfp_mask |= __GFP_DIRECT_RECLAIM; 14237145c241SMikulas Patocka goto retry; 14247145c241SMikulas Patocka } 14251da177e4SLinus Torvalds 14267145c241SMikulas Patocka len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size; 14271da177e4SLinus Torvalds 14280dae7fe5SMing Lei bio_add_page(clone, page, len, 0); 142991e10625SMilan Broz 14307145c241SMikulas Patocka remaining_size -= len; 14311da177e4SLinus Torvalds } 14321da177e4SLinus Torvalds 1433ef43aa38SMilan Broz /* Allocate space for integrity tags */ 1434ef43aa38SMilan Broz if (dm_crypt_integrity_io_alloc(io, clone)) { 1435ef43aa38SMilan Broz crypt_free_buffer_pages(cc, clone); 1436ef43aa38SMilan Broz bio_put(clone); 1437ef43aa38SMilan Broz clone = NULL; 1438ef43aa38SMilan Broz } 1439ef43aa38SMilan Broz out: 1440d0164adcSMel Gorman if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM)) 14417145c241SMikulas Patocka mutex_unlock(&cc->bio_alloc_lock); 14427145c241SMikulas Patocka 14438b004457SMilan Broz return clone; 14441da177e4SLinus Torvalds } 14451da177e4SLinus Torvalds 1446644bd2f0SNeil Brown static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) 14471da177e4SLinus Torvalds { 14481da177e4SLinus Torvalds struct bio_vec *bv; 14496dc4f100SMing Lei struct bvec_iter_all iter_all; 14501da177e4SLinus Torvalds 14512b070cfeSChristoph Hellwig bio_for_each_segment_all(bv, clone, iter_all) { 14521da177e4SLinus Torvalds BUG_ON(!bv->bv_page); 14536f1c819cSKent Overstreet mempool_free(bv->bv_page, &cc->page_pool); 14541da177e4SLinus Torvalds } 14551da177e4SLinus Torvalds } 14561da177e4SLinus Torvalds 1457298a9fa0SMikulas Patocka static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc, 1458dc440d1eSMilan Broz struct bio *bio, sector_t sector) 1459dc440d1eSMilan Broz { 146049a8a920SAlasdair G Kergon io->cc = cc; 1461dc440d1eSMilan Broz io->base_bio = bio; 1462dc440d1eSMilan Broz io->sector = sector; 1463dc440d1eSMilan Broz io->error = 0; 1464ef43aa38SMilan Broz io->ctx.r.req = NULL; 1465ef43aa38SMilan Broz io->integrity_metadata = NULL; 1466ef43aa38SMilan Broz io->integrity_metadata_from_pool = false; 146740b6229bSMikulas Patocka atomic_set(&io->io_pending, 0); 1468dc440d1eSMilan Broz } 1469dc440d1eSMilan Broz 14703e1a8bddSMilan Broz static void crypt_inc_pending(struct dm_crypt_io *io) 14713e1a8bddSMilan Broz { 147240b6229bSMikulas Patocka atomic_inc(&io->io_pending); 14733e1a8bddSMilan Broz } 14743e1a8bddSMilan Broz 14751da177e4SLinus Torvalds /* 14761da177e4SLinus Torvalds * One of the bios was finished. Check for completion of 14771da177e4SLinus Torvalds * the whole request and correctly clean up the buffer. 14781da177e4SLinus Torvalds */ 14795742fd77SMilan Broz static void crypt_dec_pending(struct dm_crypt_io *io) 14801da177e4SLinus Torvalds { 148149a8a920SAlasdair G Kergon struct crypt_config *cc = io->cc; 1482b35f8caaSMilan Broz struct bio *base_bio = io->base_bio; 14834e4cbee9SChristoph Hellwig blk_status_t error = io->error; 14841da177e4SLinus Torvalds 148540b6229bSMikulas Patocka if (!atomic_dec_and_test(&io->io_pending)) 14861da177e4SLinus Torvalds return; 14871da177e4SLinus Torvalds 1488ef43aa38SMilan Broz if (io->ctx.r.req) 1489ef43aa38SMilan Broz crypt_free_req(cc, io->ctx.r.req, base_bio); 1490ef43aa38SMilan Broz 1491ef43aa38SMilan Broz if (unlikely(io->integrity_metadata_from_pool)) 14926f1c819cSKent Overstreet mempool_free(io->integrity_metadata, &io->cc->tag_pool); 1493ef43aa38SMilan Broz else 1494ef43aa38SMilan Broz kfree(io->integrity_metadata); 1495b35f8caaSMilan Broz 14964e4cbee9SChristoph Hellwig base_bio->bi_status = error; 14974246a0b6SChristoph Hellwig bio_endio(base_bio); 14981da177e4SLinus Torvalds } 14991da177e4SLinus Torvalds 15001da177e4SLinus Torvalds /* 1501cabf08e4SMilan Broz * kcryptd/kcryptd_io: 15021da177e4SLinus Torvalds * 15031da177e4SLinus Torvalds * Needed because it would be very unwise to do decryption in an 150423541d2dSMilan Broz * interrupt context. 1505cabf08e4SMilan Broz * 1506cabf08e4SMilan Broz * kcryptd performs the actual encryption or decryption. 1507cabf08e4SMilan Broz * 1508cabf08e4SMilan Broz * kcryptd_io performs the IO submission. 1509cabf08e4SMilan Broz * 1510cabf08e4SMilan Broz * They must be separated as otherwise the final stages could be 1511cabf08e4SMilan Broz * starved by new requests which can block in the first stages due 1512cabf08e4SMilan Broz * to memory allocation. 1513c0297721SAndi Kleen * 1514c0297721SAndi Kleen * The work is done per CPU global for all dm-crypt instances. 1515c0297721SAndi Kleen * They should not depend on each other and do not block. 15161da177e4SLinus Torvalds */ 15174246a0b6SChristoph Hellwig static void crypt_endio(struct bio *clone) 15188b004457SMilan Broz { 1519028867acSAlasdair G Kergon struct dm_crypt_io *io = clone->bi_private; 152049a8a920SAlasdair G Kergon struct crypt_config *cc = io->cc; 1521ee7a491eSMilan Broz unsigned rw = bio_data_dir(clone); 15224e4cbee9SChristoph Hellwig blk_status_t error; 15238b004457SMilan Broz 15248b004457SMilan Broz /* 15256712ecf8SNeilBrown * free the processed pages 15268b004457SMilan Broz */ 1527ee7a491eSMilan Broz if (rw == WRITE) 1528644bd2f0SNeil Brown crypt_free_buffer_pages(cc, clone); 15298b004457SMilan Broz 15304e4cbee9SChristoph Hellwig error = clone->bi_status; 15318b004457SMilan Broz bio_put(clone); 1532ee7a491eSMilan Broz 15339b81c842SSasha Levin if (rw == READ && !error) { 1534cabf08e4SMilan Broz kcryptd_queue_crypt(io); 15356712ecf8SNeilBrown return; 1536ee7a491eSMilan Broz } 15375742fd77SMilan Broz 15389b81c842SSasha Levin if (unlikely(error)) 15399b81c842SSasha Levin io->error = error; 15405742fd77SMilan Broz 15415742fd77SMilan Broz crypt_dec_pending(io); 15428b004457SMilan Broz } 15438b004457SMilan Broz 1544028867acSAlasdair G Kergon static void clone_init(struct dm_crypt_io *io, struct bio *clone) 15458b004457SMilan Broz { 154649a8a920SAlasdair G Kergon struct crypt_config *cc = io->cc; 15478b004457SMilan Broz 15488b004457SMilan Broz clone->bi_private = io; 15498b004457SMilan Broz clone->bi_end_io = crypt_endio; 155074d46992SChristoph Hellwig bio_set_dev(clone, cc->dev->bdev); 1551ef295ecfSChristoph Hellwig clone->bi_opf = io->base_bio->bi_opf; 15528b004457SMilan Broz } 15538b004457SMilan Broz 155420c82538SMilan Broz static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) 15558b004457SMilan Broz { 155649a8a920SAlasdair G Kergon struct crypt_config *cc = io->cc; 15578b004457SMilan Broz struct bio *clone; 155893e605c2SMilan Broz 15598b004457SMilan Broz /* 156059779079SMike Snitzer * We need the original biovec array in order to decrypt 156159779079SMike Snitzer * the whole bio data *afterwards* -- thanks to immutable 156259779079SMike Snitzer * biovecs we don't need to worry about the block layer 156359779079SMike Snitzer * modifying the biovec array; so leverage bio_clone_fast(). 15648b004457SMilan Broz */ 15656f1c819cSKent Overstreet clone = bio_clone_fast(io->base_bio, gfp, &cc->bs); 15667eaceaccSJens Axboe if (!clone) 156720c82538SMilan Broz return 1; 15688b004457SMilan Broz 156920c82538SMilan Broz crypt_inc_pending(io); 157020c82538SMilan Broz 15718b004457SMilan Broz clone_init(io, clone); 15724f024f37SKent Overstreet clone->bi_iter.bi_sector = cc->start + io->sector; 15738b004457SMilan Broz 1574ef43aa38SMilan Broz if (dm_crypt_integrity_io_alloc(io, clone)) { 1575ef43aa38SMilan Broz crypt_dec_pending(io); 1576ef43aa38SMilan Broz bio_put(clone); 1577ef43aa38SMilan Broz return 1; 1578ef43aa38SMilan Broz } 1579ef43aa38SMilan Broz 158093e605c2SMilan Broz generic_make_request(clone); 158120c82538SMilan Broz return 0; 15828b004457SMilan Broz } 15838b004457SMilan Broz 1584dc267621SMikulas Patocka static void kcryptd_io_read_work(struct work_struct *work) 1585395b167cSAlasdair G Kergon { 1586395b167cSAlasdair G Kergon struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); 1587395b167cSAlasdair G Kergon 158820c82538SMilan Broz crypt_inc_pending(io); 158920c82538SMilan Broz if (kcryptd_io_read(io, GFP_NOIO)) 15904e4cbee9SChristoph Hellwig io->error = BLK_STS_RESOURCE; 159120c82538SMilan Broz crypt_dec_pending(io); 1592395b167cSAlasdair G Kergon } 1593395b167cSAlasdair G Kergon 1594dc267621SMikulas Patocka static void kcryptd_queue_read(struct dm_crypt_io *io) 1595395b167cSAlasdair G Kergon { 159649a8a920SAlasdair G Kergon struct crypt_config *cc = io->cc; 1597395b167cSAlasdair G Kergon 1598dc267621SMikulas Patocka INIT_WORK(&io->work, kcryptd_io_read_work); 1599395b167cSAlasdair G Kergon queue_work(cc->io_queue, &io->work); 1600395b167cSAlasdair G Kergon } 1601395b167cSAlasdair G Kergon 1602dc267621SMikulas Patocka static void kcryptd_io_write(struct dm_crypt_io *io) 1603dc267621SMikulas Patocka { 1604dc267621SMikulas Patocka struct bio *clone = io->ctx.bio_out; 1605dc267621SMikulas Patocka 1606dc267621SMikulas Patocka generic_make_request(clone); 1607dc267621SMikulas Patocka } 1608dc267621SMikulas Patocka 1609b3c5fd30SMikulas Patocka #define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node) 1610b3c5fd30SMikulas Patocka 1611dc267621SMikulas Patocka static int dmcrypt_write(void *data) 1612dc267621SMikulas Patocka { 1613dc267621SMikulas Patocka struct crypt_config *cc = data; 1614b3c5fd30SMikulas Patocka struct dm_crypt_io *io; 1615b3c5fd30SMikulas Patocka 1616dc267621SMikulas Patocka while (1) { 1617b3c5fd30SMikulas Patocka struct rb_root write_tree; 1618dc267621SMikulas Patocka struct blk_plug plug; 1619dc267621SMikulas Patocka 1620c7329effSMikulas Patocka spin_lock_irq(&cc->write_thread_lock); 1621dc267621SMikulas Patocka continue_locked: 1622dc267621SMikulas Patocka 1623b3c5fd30SMikulas Patocka if (!RB_EMPTY_ROOT(&cc->write_tree)) 1624dc267621SMikulas Patocka goto pop_from_list; 1625dc267621SMikulas Patocka 1626f659b100SRabin Vincent set_current_state(TASK_INTERRUPTIBLE); 1627dc267621SMikulas Patocka 1628c7329effSMikulas Patocka spin_unlock_irq(&cc->write_thread_lock); 1629dc267621SMikulas Patocka 1630f659b100SRabin Vincent if (unlikely(kthread_should_stop())) { 1631642fa448SDavidlohr Bueso set_current_state(TASK_RUNNING); 1632f659b100SRabin Vincent break; 1633f659b100SRabin Vincent } 1634f659b100SRabin Vincent 1635dc267621SMikulas Patocka schedule(); 1636dc267621SMikulas Patocka 1637642fa448SDavidlohr Bueso set_current_state(TASK_RUNNING); 1638c7329effSMikulas Patocka spin_lock_irq(&cc->write_thread_lock); 1639dc267621SMikulas Patocka goto continue_locked; 1640dc267621SMikulas Patocka 1641dc267621SMikulas Patocka pop_from_list: 1642b3c5fd30SMikulas Patocka write_tree = cc->write_tree; 1643b3c5fd30SMikulas Patocka cc->write_tree = RB_ROOT; 1644c7329effSMikulas Patocka spin_unlock_irq(&cc->write_thread_lock); 1645dc267621SMikulas Patocka 1646b3c5fd30SMikulas Patocka BUG_ON(rb_parent(write_tree.rb_node)); 1647b3c5fd30SMikulas Patocka 1648b3c5fd30SMikulas Patocka /* 1649b3c5fd30SMikulas Patocka * Note: we cannot walk the tree here with rb_next because 1650b3c5fd30SMikulas Patocka * the structures may be freed when kcryptd_io_write is called. 1651b3c5fd30SMikulas Patocka */ 1652dc267621SMikulas Patocka blk_start_plug(&plug); 1653dc267621SMikulas Patocka do { 1654b3c5fd30SMikulas Patocka io = crypt_io_from_node(rb_first(&write_tree)); 1655b3c5fd30SMikulas Patocka rb_erase(&io->rb_node, &write_tree); 1656dc267621SMikulas Patocka kcryptd_io_write(io); 1657b3c5fd30SMikulas Patocka } while (!RB_EMPTY_ROOT(&write_tree)); 1658dc267621SMikulas Patocka blk_finish_plug(&plug); 1659dc267621SMikulas Patocka } 1660dc267621SMikulas Patocka return 0; 1661dc267621SMikulas Patocka } 1662dc267621SMikulas Patocka 166372c6e7afSMikulas Patocka static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) 16644e4eef64SMilan Broz { 1665dec1cedfSMilan Broz struct bio *clone = io->ctx.bio_out; 166649a8a920SAlasdair G Kergon struct crypt_config *cc = io->cc; 1667dc267621SMikulas Patocka unsigned long flags; 1668b3c5fd30SMikulas Patocka sector_t sector; 1669b3c5fd30SMikulas Patocka struct rb_node **rbp, *parent; 1670dec1cedfSMilan Broz 16714e4cbee9SChristoph Hellwig if (unlikely(io->error)) { 1672dec1cedfSMilan Broz crypt_free_buffer_pages(cc, clone); 1673dec1cedfSMilan Broz bio_put(clone); 16746c031f41SMilan Broz crypt_dec_pending(io); 1675dec1cedfSMilan Broz return; 1676dec1cedfSMilan Broz } 1677dec1cedfSMilan Broz 1678dec1cedfSMilan Broz /* crypt_convert should have filled the clone bio */ 1679003b5c57SKent Overstreet BUG_ON(io->ctx.iter_out.bi_size); 1680dec1cedfSMilan Broz 16814f024f37SKent Overstreet clone->bi_iter.bi_sector = cc->start + io->sector; 1682899c95d3SMilan Broz 16830f5d8e6eSMikulas Patocka if (likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) { 16840f5d8e6eSMikulas Patocka generic_make_request(clone); 16850f5d8e6eSMikulas Patocka return; 16860f5d8e6eSMikulas Patocka } 16870f5d8e6eSMikulas Patocka 1688c7329effSMikulas Patocka spin_lock_irqsave(&cc->write_thread_lock, flags); 1689c7329effSMikulas Patocka if (RB_EMPTY_ROOT(&cc->write_tree)) 1690c7329effSMikulas Patocka wake_up_process(cc->write_thread); 1691b3c5fd30SMikulas Patocka rbp = &cc->write_tree.rb_node; 1692b3c5fd30SMikulas Patocka parent = NULL; 1693b3c5fd30SMikulas Patocka sector = io->sector; 1694b3c5fd30SMikulas Patocka while (*rbp) { 1695b3c5fd30SMikulas Patocka parent = *rbp; 1696b3c5fd30SMikulas Patocka if (sector < crypt_io_from_node(parent)->sector) 1697b3c5fd30SMikulas Patocka rbp = &(*rbp)->rb_left; 1698b3c5fd30SMikulas Patocka else 1699b3c5fd30SMikulas Patocka rbp = &(*rbp)->rb_right; 1700b3c5fd30SMikulas Patocka } 1701b3c5fd30SMikulas Patocka rb_link_node(&io->rb_node, parent, rbp); 1702b3c5fd30SMikulas Patocka rb_insert_color(&io->rb_node, &cc->write_tree); 1703c7329effSMikulas Patocka spin_unlock_irqrestore(&cc->write_thread_lock, flags); 17044e4eef64SMilan Broz } 17054e4eef64SMilan Broz 1706fc5a5e9aSMilan Broz static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) 17078b004457SMilan Broz { 170849a8a920SAlasdair G Kergon struct crypt_config *cc = io->cc; 17098b004457SMilan Broz struct bio *clone; 1710c8081618SMilan Broz int crypt_finished; 1711b635b00eSMilan Broz sector_t sector = io->sector; 17124e4cbee9SChristoph Hellwig blk_status_t r; 17138b004457SMilan Broz 171493e605c2SMilan Broz /* 1715fc5a5e9aSMilan Broz * Prevent io from disappearing until this function completes. 1716fc5a5e9aSMilan Broz */ 1717fc5a5e9aSMilan Broz crypt_inc_pending(io); 1718b635b00eSMilan Broz crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector); 1719fc5a5e9aSMilan Broz 1720cf2f1abfSMikulas Patocka clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size); 172123541d2dSMilan Broz if (unlikely(!clone)) { 17224e4cbee9SChristoph Hellwig io->error = BLK_STS_IOERR; 1723cf2f1abfSMikulas Patocka goto dec; 172423541d2dSMilan Broz } 17258b004457SMilan Broz 172653017030SMilan Broz io->ctx.bio_out = clone; 1727003b5c57SKent Overstreet io->ctx.iter_out = clone->bi_iter; 17288b004457SMilan Broz 1729b635b00eSMilan Broz sector += bio_sectors(clone); 1730dec1cedfSMilan Broz 17314e594098SMilan Broz crypt_inc_pending(io); 1732dec1cedfSMilan Broz r = crypt_convert(cc, &io->ctx); 17334e4cbee9SChristoph Hellwig if (r) 1734ef43aa38SMilan Broz io->error = r; 173540b6229bSMikulas Patocka crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending); 1736dec1cedfSMilan Broz 1737c8081618SMilan Broz /* Encryption was already finished, submit io now */ 1738c8081618SMilan Broz if (crypt_finished) { 173972c6e7afSMikulas Patocka kcryptd_crypt_write_io_submit(io, 0); 1740b635b00eSMilan Broz io->sector = sector; 17414e594098SMilan Broz } 174293e605c2SMilan Broz 1743cf2f1abfSMikulas Patocka dec: 1744899c95d3SMilan Broz crypt_dec_pending(io); 174584131db6SMilan Broz } 174684131db6SMilan Broz 174772c6e7afSMikulas Patocka static void kcryptd_crypt_read_done(struct dm_crypt_io *io) 17485742fd77SMilan Broz { 17495742fd77SMilan Broz crypt_dec_pending(io); 17505742fd77SMilan Broz } 17515742fd77SMilan Broz 17524e4eef64SMilan Broz static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) 17538b004457SMilan Broz { 175449a8a920SAlasdair G Kergon struct crypt_config *cc = io->cc; 17554e4cbee9SChristoph Hellwig blk_status_t r; 17568b004457SMilan Broz 17573e1a8bddSMilan Broz crypt_inc_pending(io); 17583a7f6c99SMilan Broz 175953017030SMilan Broz crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, 17600c395b0fSMilan Broz io->sector); 17618b004457SMilan Broz 17625742fd77SMilan Broz r = crypt_convert(cc, &io->ctx); 17634e4cbee9SChristoph Hellwig if (r) 1764ef43aa38SMilan Broz io->error = r; 17655742fd77SMilan Broz 176640b6229bSMikulas Patocka if (atomic_dec_and_test(&io->ctx.cc_pending)) 176772c6e7afSMikulas Patocka kcryptd_crypt_read_done(io); 17683a7f6c99SMilan Broz 17693a7f6c99SMilan Broz crypt_dec_pending(io); 17708b004457SMilan Broz } 17718b004457SMilan Broz 177295497a96SMilan Broz static void kcryptd_async_done(struct crypto_async_request *async_req, 177395497a96SMilan Broz int error) 177495497a96SMilan Broz { 1775b2174eebSHuang Ying struct dm_crypt_request *dmreq = async_req->data; 1776b2174eebSHuang Ying struct convert_context *ctx = dmreq->ctx; 177795497a96SMilan Broz struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); 177849a8a920SAlasdair G Kergon struct crypt_config *cc = io->cc; 177995497a96SMilan Broz 178054cea3f6SMilan Broz /* 178154cea3f6SMilan Broz * A request from crypto driver backlog is going to be processed now, 178254cea3f6SMilan Broz * finish the completion and continue in crypt_convert(). 178354cea3f6SMilan Broz * (Callback will be called for the second time for this request.) 178454cea3f6SMilan Broz */ 1785c0403ec0SRabin Vincent if (error == -EINPROGRESS) { 1786c0403ec0SRabin Vincent complete(&ctx->restart); 178795497a96SMilan Broz return; 1788c0403ec0SRabin Vincent } 178995497a96SMilan Broz 17902dc5327dSMilan Broz if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post) 1791ef43aa38SMilan Broz error = cc->iv_gen_ops->post(cc, org_iv_of_dmreq(cc, dmreq), dmreq); 17922dc5327dSMilan Broz 1793ef43aa38SMilan Broz if (error == -EBADMSG) { 1794f710126cSMilan Broz char b[BDEVNAME_SIZE]; 1795f710126cSMilan Broz DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", bio_devname(ctx->bio_in, b), 1796ef43aa38SMilan Broz (unsigned long long)le64_to_cpu(*org_sector_of_dmreq(cc, dmreq))); 17974e4cbee9SChristoph Hellwig io->error = BLK_STS_PROTECTION; 1798ef43aa38SMilan Broz } else if (error < 0) 17994e4cbee9SChristoph Hellwig io->error = BLK_STS_IOERR; 180072c6e7afSMikulas Patocka 1801298a9fa0SMikulas Patocka crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio); 180295497a96SMilan Broz 180340b6229bSMikulas Patocka if (!atomic_dec_and_test(&ctx->cc_pending)) 1804c0403ec0SRabin Vincent return; 180595497a96SMilan Broz 180695497a96SMilan Broz if (bio_data_dir(io->base_bio) == READ) 180772c6e7afSMikulas Patocka kcryptd_crypt_read_done(io); 180895497a96SMilan Broz else 180972c6e7afSMikulas Patocka kcryptd_crypt_write_io_submit(io, 1); 181095497a96SMilan Broz } 181195497a96SMilan Broz 18124e4eef64SMilan Broz static void kcryptd_crypt(struct work_struct *work) 18134e4eef64SMilan Broz { 18144e4eef64SMilan Broz struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); 18154e4eef64SMilan Broz 18164e4eef64SMilan Broz if (bio_data_dir(io->base_bio) == READ) 18174e4eef64SMilan Broz kcryptd_crypt_read_convert(io); 18184e4eef64SMilan Broz else 18194e4eef64SMilan Broz kcryptd_crypt_write_convert(io); 18208b004457SMilan Broz } 18218b004457SMilan Broz 1822395b167cSAlasdair G Kergon static void kcryptd_queue_crypt(struct dm_crypt_io *io) 1823395b167cSAlasdair G Kergon { 182449a8a920SAlasdair G Kergon struct crypt_config *cc = io->cc; 1825395b167cSAlasdair G Kergon 1826395b167cSAlasdair G Kergon INIT_WORK(&io->work, kcryptd_crypt); 1827395b167cSAlasdair G Kergon queue_work(cc->crypt_queue, &io->work); 1828395b167cSAlasdair G Kergon } 1829395b167cSAlasdair G Kergon 1830ef43aa38SMilan Broz static void crypt_free_tfms_aead(struct crypt_config *cc) 18311da177e4SLinus Torvalds { 1832ef43aa38SMilan Broz if (!cc->cipher_tfm.tfms_aead) 1833ef43aa38SMilan Broz return; 18341da177e4SLinus Torvalds 1835ef43aa38SMilan Broz if (cc->cipher_tfm.tfms_aead[0] && !IS_ERR(cc->cipher_tfm.tfms_aead[0])) { 1836ef43aa38SMilan Broz crypto_free_aead(cc->cipher_tfm.tfms_aead[0]); 1837ef43aa38SMilan Broz cc->cipher_tfm.tfms_aead[0] = NULL; 18381da177e4SLinus Torvalds } 18391da177e4SLinus Torvalds 1840ef43aa38SMilan Broz kfree(cc->cipher_tfm.tfms_aead); 1841ef43aa38SMilan Broz cc->cipher_tfm.tfms_aead = NULL; 1842ef43aa38SMilan Broz } 18431da177e4SLinus Torvalds 1844ef43aa38SMilan Broz static void crypt_free_tfms_skcipher(struct crypt_config *cc) 1845d1f96423SMilan Broz { 1846d1f96423SMilan Broz unsigned i; 1847d1f96423SMilan Broz 1848ef43aa38SMilan Broz if (!cc->cipher_tfm.tfms) 1849fd2d231fSMikulas Patocka return; 1850fd2d231fSMikulas Patocka 1851d1f96423SMilan Broz for (i = 0; i < cc->tfms_count; i++) 1852ef43aa38SMilan Broz if (cc->cipher_tfm.tfms[i] && !IS_ERR(cc->cipher_tfm.tfms[i])) { 1853ef43aa38SMilan Broz crypto_free_skcipher(cc->cipher_tfm.tfms[i]); 1854ef43aa38SMilan Broz cc->cipher_tfm.tfms[i] = NULL; 1855d1f96423SMilan Broz } 1856d1f96423SMilan Broz 1857ef43aa38SMilan Broz kfree(cc->cipher_tfm.tfms); 1858ef43aa38SMilan Broz cc->cipher_tfm.tfms = NULL; 18591da177e4SLinus Torvalds } 18601da177e4SLinus Torvalds 18611da177e4SLinus Torvalds static void crypt_free_tfms(struct crypt_config *cc) 1862d1f96423SMilan Broz { 186333d2f09fSMilan Broz if (crypt_integrity_aead(cc)) 1864ef43aa38SMilan Broz crypt_free_tfms_aead(cc); 1865ef43aa38SMilan Broz else 1866ef43aa38SMilan Broz crypt_free_tfms_skcipher(cc); 1867d1f96423SMilan Broz } 1868d1f96423SMilan Broz 1869ef43aa38SMilan Broz static int crypt_alloc_tfms_skcipher(struct crypt_config *cc, char *ciphermode) 1870d1f96423SMilan Broz { 1871d1f96423SMilan Broz unsigned i; 1872d1f96423SMilan Broz int err; 1873d1f96423SMilan Broz 18746396bb22SKees Cook cc->cipher_tfm.tfms = kcalloc(cc->tfms_count, 18756396bb22SKees Cook sizeof(struct crypto_skcipher *), 18766396bb22SKees Cook GFP_KERNEL); 1877ef43aa38SMilan Broz if (!cc->cipher_tfm.tfms) 1878fd2d231fSMikulas Patocka return -ENOMEM; 1879fd2d231fSMikulas Patocka 1880d1f96423SMilan Broz for (i = 0; i < cc->tfms_count; i++) { 1881ef43aa38SMilan Broz cc->cipher_tfm.tfms[i] = crypto_alloc_skcipher(ciphermode, 0, 0); 1882ef43aa38SMilan Broz if (IS_ERR(cc->cipher_tfm.tfms[i])) { 1883ef43aa38SMilan Broz err = PTR_ERR(cc->cipher_tfm.tfms[i]); 1884fd2d231fSMikulas Patocka crypt_free_tfms(cc); 1885d1f96423SMilan Broz return err; 1886d1f96423SMilan Broz } 1887d1f96423SMilan Broz } 1888d1f96423SMilan Broz 1889af331ebaSEric Biggers /* 1890af331ebaSEric Biggers * dm-crypt performance can vary greatly depending on which crypto 1891af331ebaSEric Biggers * algorithm implementation is used. Help people debug performance 1892af331ebaSEric Biggers * problems by logging the ->cra_driver_name. 1893af331ebaSEric Biggers */ 18947a1cd723SMilan Broz DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode, 1895af331ebaSEric Biggers crypto_skcipher_alg(any_tfm(cc))->base.cra_driver_name); 1896d1f96423SMilan Broz return 0; 1897d1f96423SMilan Broz } 1898d1f96423SMilan Broz 1899ef43aa38SMilan Broz static int crypt_alloc_tfms_aead(struct crypt_config *cc, char *ciphermode) 1900ef43aa38SMilan Broz { 1901ef43aa38SMilan Broz int err; 1902ef43aa38SMilan Broz 1903ef43aa38SMilan Broz cc->cipher_tfm.tfms = kmalloc(sizeof(struct crypto_aead *), GFP_KERNEL); 1904ef43aa38SMilan Broz if (!cc->cipher_tfm.tfms) 1905ef43aa38SMilan Broz return -ENOMEM; 1906ef43aa38SMilan Broz 1907ef43aa38SMilan Broz cc->cipher_tfm.tfms_aead[0] = crypto_alloc_aead(ciphermode, 0, 0); 1908ef43aa38SMilan Broz if (IS_ERR(cc->cipher_tfm.tfms_aead[0])) { 1909ef43aa38SMilan Broz err = PTR_ERR(cc->cipher_tfm.tfms_aead[0]); 1910ef43aa38SMilan Broz crypt_free_tfms(cc); 1911ef43aa38SMilan Broz return err; 1912ef43aa38SMilan Broz } 1913ef43aa38SMilan Broz 19147a1cd723SMilan Broz DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode, 1915af331ebaSEric Biggers crypto_aead_alg(any_tfm_aead(cc))->base.cra_driver_name); 1916ef43aa38SMilan Broz return 0; 1917ef43aa38SMilan Broz } 1918ef43aa38SMilan Broz 1919ef43aa38SMilan Broz static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode) 1920ef43aa38SMilan Broz { 192133d2f09fSMilan Broz if (crypt_integrity_aead(cc)) 1922ef43aa38SMilan Broz return crypt_alloc_tfms_aead(cc, ciphermode); 1923ef43aa38SMilan Broz else 1924ef43aa38SMilan Broz return crypt_alloc_tfms_skcipher(cc, ciphermode); 1925ef43aa38SMilan Broz } 1926ef43aa38SMilan Broz 1927ef43aa38SMilan Broz static unsigned crypt_subkey_size(struct crypt_config *cc) 1928ef43aa38SMilan Broz { 1929ef43aa38SMilan Broz return (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count); 1930ef43aa38SMilan Broz } 1931ef43aa38SMilan Broz 1932ef43aa38SMilan Broz static unsigned crypt_authenckey_size(struct crypt_config *cc) 1933ef43aa38SMilan Broz { 1934ef43aa38SMilan Broz return crypt_subkey_size(cc) + RTA_SPACE(sizeof(struct crypto_authenc_key_param)); 1935ef43aa38SMilan Broz } 1936ef43aa38SMilan Broz 1937ef43aa38SMilan Broz /* 1938ef43aa38SMilan Broz * If AEAD is composed like authenc(hmac(sha256),xts(aes)), 1939ef43aa38SMilan Broz * the key must be for some reason in special format. 1940ef43aa38SMilan Broz * This funcion converts cc->key to this special format. 1941ef43aa38SMilan Broz */ 1942ef43aa38SMilan Broz static void crypt_copy_authenckey(char *p, const void *key, 1943ef43aa38SMilan Broz unsigned enckeylen, unsigned authkeylen) 1944ef43aa38SMilan Broz { 1945ef43aa38SMilan Broz struct crypto_authenc_key_param *param; 1946ef43aa38SMilan Broz struct rtattr *rta; 1947ef43aa38SMilan Broz 1948ef43aa38SMilan Broz rta = (struct rtattr *)p; 1949ef43aa38SMilan Broz param = RTA_DATA(rta); 1950ef43aa38SMilan Broz param->enckeylen = cpu_to_be32(enckeylen); 1951ef43aa38SMilan Broz rta->rta_len = RTA_LENGTH(sizeof(*param)); 1952ef43aa38SMilan Broz rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM; 1953ef43aa38SMilan Broz p += RTA_SPACE(sizeof(*param)); 1954ef43aa38SMilan Broz memcpy(p, key + enckeylen, authkeylen); 1955ef43aa38SMilan Broz p += authkeylen; 1956ef43aa38SMilan Broz memcpy(p, key, enckeylen); 1957ef43aa38SMilan Broz } 1958ef43aa38SMilan Broz 1959671ea6b4SMikulas Patocka static int crypt_setkey(struct crypt_config *cc) 1960c0297721SAndi Kleen { 1961da31a078SMilan Broz unsigned subkey_size; 1962fd2d231fSMikulas Patocka int err = 0, i, r; 1963c0297721SAndi Kleen 1964da31a078SMilan Broz /* Ignore extra keys (which are used for IV etc) */ 1965ef43aa38SMilan Broz subkey_size = crypt_subkey_size(cc); 1966da31a078SMilan Broz 196727c70036SMilan Broz if (crypt_integrity_hmac(cc)) { 196827c70036SMilan Broz if (subkey_size < cc->key_mac_size) 196927c70036SMilan Broz return -EINVAL; 197027c70036SMilan Broz 1971ef43aa38SMilan Broz crypt_copy_authenckey(cc->authenc_key, cc->key, 1972ef43aa38SMilan Broz subkey_size - cc->key_mac_size, 1973ef43aa38SMilan Broz cc->key_mac_size); 197427c70036SMilan Broz } 197527c70036SMilan Broz 1976d1f96423SMilan Broz for (i = 0; i < cc->tfms_count; i++) { 197733d2f09fSMilan Broz if (crypt_integrity_hmac(cc)) 197833d2f09fSMilan Broz r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i], 197933d2f09fSMilan Broz cc->authenc_key, crypt_authenckey_size(cc)); 198033d2f09fSMilan Broz else if (crypt_integrity_aead(cc)) 1981ef43aa38SMilan Broz r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i], 1982ef43aa38SMilan Broz cc->key + (i * subkey_size), 1983ef43aa38SMilan Broz subkey_size); 1984ef43aa38SMilan Broz else 1985ef43aa38SMilan Broz r = crypto_skcipher_setkey(cc->cipher_tfm.tfms[i], 1986fd2d231fSMikulas Patocka cc->key + (i * subkey_size), 1987fd2d231fSMikulas Patocka subkey_size); 1988c0297721SAndi Kleen if (r) 1989c0297721SAndi Kleen err = r; 1990c0297721SAndi Kleen } 1991c0297721SAndi Kleen 1992ef43aa38SMilan Broz if (crypt_integrity_hmac(cc)) 1993ef43aa38SMilan Broz memzero_explicit(cc->authenc_key, crypt_authenckey_size(cc)); 1994ef43aa38SMilan Broz 1995c0297721SAndi Kleen return err; 1996c0297721SAndi Kleen } 1997c0297721SAndi Kleen 1998c538f6ecSOndrej Kozina #ifdef CONFIG_KEYS 1999c538f6ecSOndrej Kozina 2000027c431cSOndrej Kozina static bool contains_whitespace(const char *str) 2001027c431cSOndrej Kozina { 2002027c431cSOndrej Kozina while (*str) 2003027c431cSOndrej Kozina if (isspace(*str++)) 2004027c431cSOndrej Kozina return true; 2005027c431cSOndrej Kozina return false; 2006027c431cSOndrej Kozina } 2007027c431cSOndrej Kozina 2008c538f6ecSOndrej Kozina static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string) 2009c538f6ecSOndrej Kozina { 2010c538f6ecSOndrej Kozina char *new_key_string, *key_desc; 2011c538f6ecSOndrej Kozina int ret; 2012c538f6ecSOndrej Kozina struct key *key; 2013c538f6ecSOndrej Kozina const struct user_key_payload *ukp; 2014c538f6ecSOndrej Kozina 2015027c431cSOndrej Kozina /* 2016027c431cSOndrej Kozina * Reject key_string with whitespace. dm core currently lacks code for 2017027c431cSOndrej Kozina * proper whitespace escaping in arguments on DM_TABLE_STATUS path. 2018027c431cSOndrej Kozina */ 2019027c431cSOndrej Kozina if (contains_whitespace(key_string)) { 2020027c431cSOndrej Kozina DMERR("whitespace chars not allowed in key string"); 2021027c431cSOndrej Kozina return -EINVAL; 2022027c431cSOndrej Kozina } 2023027c431cSOndrej Kozina 2024c538f6ecSOndrej Kozina /* look for next ':' separating key_type from key_description */ 2025c538f6ecSOndrej Kozina key_desc = strpbrk(key_string, ":"); 2026c538f6ecSOndrej Kozina if (!key_desc || key_desc == key_string || !strlen(key_desc + 1)) 2027c538f6ecSOndrej Kozina return -EINVAL; 2028c538f6ecSOndrej Kozina 2029c538f6ecSOndrej Kozina if (strncmp(key_string, "logon:", key_desc - key_string + 1) && 2030c538f6ecSOndrej Kozina strncmp(key_string, "user:", key_desc - key_string + 1)) 2031c538f6ecSOndrej Kozina return -EINVAL; 2032c538f6ecSOndrej Kozina 2033c538f6ecSOndrej Kozina new_key_string = kstrdup(key_string, GFP_KERNEL); 2034c538f6ecSOndrej Kozina if (!new_key_string) 2035c538f6ecSOndrej Kozina return -ENOMEM; 2036c538f6ecSOndrej Kozina 2037c538f6ecSOndrej Kozina key = request_key(key_string[0] == 'l' ? &key_type_logon : &key_type_user, 2038c538f6ecSOndrej Kozina key_desc + 1, NULL); 2039c538f6ecSOndrej Kozina if (IS_ERR(key)) { 2040c538f6ecSOndrej Kozina kzfree(new_key_string); 2041c538f6ecSOndrej Kozina return PTR_ERR(key); 2042c538f6ecSOndrej Kozina } 2043c538f6ecSOndrej Kozina 2044f5b0cba8SOndrej Kozina down_read(&key->sem); 2045c538f6ecSOndrej Kozina 20460837e49aSDavid Howells ukp = user_key_payload_locked(key); 2047c538f6ecSOndrej Kozina if (!ukp) { 2048f5b0cba8SOndrej Kozina up_read(&key->sem); 2049c538f6ecSOndrej Kozina key_put(key); 2050c538f6ecSOndrej Kozina kzfree(new_key_string); 2051c538f6ecSOndrej Kozina return -EKEYREVOKED; 2052c538f6ecSOndrej Kozina } 2053c538f6ecSOndrej Kozina 2054c538f6ecSOndrej Kozina if (cc->key_size != ukp->datalen) { 2055f5b0cba8SOndrej Kozina up_read(&key->sem); 2056c538f6ecSOndrej Kozina key_put(key); 2057c538f6ecSOndrej Kozina kzfree(new_key_string); 2058c538f6ecSOndrej Kozina return -EINVAL; 2059c538f6ecSOndrej Kozina } 2060c538f6ecSOndrej Kozina 2061c538f6ecSOndrej Kozina memcpy(cc->key, ukp->data, cc->key_size); 2062c538f6ecSOndrej Kozina 2063f5b0cba8SOndrej Kozina up_read(&key->sem); 2064c538f6ecSOndrej Kozina key_put(key); 2065c538f6ecSOndrej Kozina 2066c538f6ecSOndrej Kozina /* clear the flag since following operations may invalidate previously valid key */ 2067c538f6ecSOndrej Kozina clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); 2068c538f6ecSOndrej Kozina 2069c538f6ecSOndrej Kozina ret = crypt_setkey(cc); 2070c538f6ecSOndrej Kozina 2071c538f6ecSOndrej Kozina if (!ret) { 2072c538f6ecSOndrej Kozina set_bit(DM_CRYPT_KEY_VALID, &cc->flags); 2073c538f6ecSOndrej Kozina kzfree(cc->key_string); 2074c538f6ecSOndrej Kozina cc->key_string = new_key_string; 2075c538f6ecSOndrej Kozina } else 2076c538f6ecSOndrej Kozina kzfree(new_key_string); 2077c538f6ecSOndrej Kozina 2078c538f6ecSOndrej Kozina return ret; 2079c538f6ecSOndrej Kozina } 2080c538f6ecSOndrej Kozina 2081c538f6ecSOndrej Kozina static int get_key_size(char **key_string) 2082c538f6ecSOndrej Kozina { 2083c538f6ecSOndrej Kozina char *colon, dummy; 2084c538f6ecSOndrej Kozina int ret; 2085c538f6ecSOndrej Kozina 2086c538f6ecSOndrej Kozina if (*key_string[0] != ':') 2087c538f6ecSOndrej Kozina return strlen(*key_string) >> 1; 2088c538f6ecSOndrej Kozina 2089c538f6ecSOndrej Kozina /* look for next ':' in key string */ 2090c538f6ecSOndrej Kozina colon = strpbrk(*key_string + 1, ":"); 2091c538f6ecSOndrej Kozina if (!colon) 2092c538f6ecSOndrej Kozina return -EINVAL; 2093c538f6ecSOndrej Kozina 2094c538f6ecSOndrej Kozina if (sscanf(*key_string + 1, "%u%c", &ret, &dummy) != 2 || dummy != ':') 2095c538f6ecSOndrej Kozina return -EINVAL; 2096c538f6ecSOndrej Kozina 2097c538f6ecSOndrej Kozina *key_string = colon; 2098c538f6ecSOndrej Kozina 2099c538f6ecSOndrej Kozina /* remaining key string should be :<logon|user>:<key_desc> */ 2100c538f6ecSOndrej Kozina 2101c538f6ecSOndrej Kozina return ret; 2102c538f6ecSOndrej Kozina } 2103c538f6ecSOndrej Kozina 2104c538f6ecSOndrej Kozina #else 2105c538f6ecSOndrej Kozina 2106c538f6ecSOndrej Kozina static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string) 2107c538f6ecSOndrej Kozina { 2108c538f6ecSOndrej Kozina return -EINVAL; 2109c538f6ecSOndrej Kozina } 2110c538f6ecSOndrej Kozina 2111c538f6ecSOndrej Kozina static int get_key_size(char **key_string) 2112c538f6ecSOndrej Kozina { 2113c538f6ecSOndrej Kozina return (*key_string[0] == ':') ? -EINVAL : strlen(*key_string) >> 1; 2114c538f6ecSOndrej Kozina } 2115c538f6ecSOndrej Kozina 2116c538f6ecSOndrej Kozina #endif 2117c538f6ecSOndrej Kozina 2118e48d4bbfSMilan Broz static int crypt_set_key(struct crypt_config *cc, char *key) 2119e48d4bbfSMilan Broz { 2120de8be5acSMilan Broz int r = -EINVAL; 2121de8be5acSMilan Broz int key_string_len = strlen(key); 2122de8be5acSMilan Broz 212369a8cfcdSMilan Broz /* Hyphen (which gives a key_size of zero) means there is no key. */ 212469a8cfcdSMilan Broz if (!cc->key_size && strcmp(key, "-")) 2125de8be5acSMilan Broz goto out; 2126e48d4bbfSMilan Broz 2127c538f6ecSOndrej Kozina /* ':' means the key is in kernel keyring, short-circuit normal key processing */ 2128c538f6ecSOndrej Kozina if (key[0] == ':') { 2129c538f6ecSOndrej Kozina r = crypt_set_keyring_key(cc, key + 1); 2130c538f6ecSOndrej Kozina goto out; 2131c538f6ecSOndrej Kozina } 2132c538f6ecSOndrej Kozina 2133265e9098SOndrej Kozina /* clear the flag since following operations may invalidate previously valid key */ 2134265e9098SOndrej Kozina clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); 2135265e9098SOndrej Kozina 2136c538f6ecSOndrej Kozina /* wipe references to any kernel keyring key */ 2137c538f6ecSOndrej Kozina kzfree(cc->key_string); 2138c538f6ecSOndrej Kozina cc->key_string = NULL; 2139c538f6ecSOndrej Kozina 2140e944e03eSAndy Shevchenko /* Decode key from its hex representation. */ 2141e944e03eSAndy Shevchenko if (cc->key_size && hex2bin(cc->key, key, cc->key_size) < 0) 2142de8be5acSMilan Broz goto out; 2143e48d4bbfSMilan Broz 2144671ea6b4SMikulas Patocka r = crypt_setkey(cc); 2145265e9098SOndrej Kozina if (!r) 2146e48d4bbfSMilan Broz set_bit(DM_CRYPT_KEY_VALID, &cc->flags); 2147e48d4bbfSMilan Broz 2148de8be5acSMilan Broz out: 2149de8be5acSMilan Broz /* Hex key string not needed after here, so wipe it. */ 2150de8be5acSMilan Broz memset(key, '0', key_string_len); 2151de8be5acSMilan Broz 2152de8be5acSMilan Broz return r; 2153e48d4bbfSMilan Broz } 2154e48d4bbfSMilan Broz 2155e48d4bbfSMilan Broz static int crypt_wipe_key(struct crypt_config *cc) 2156e48d4bbfSMilan Broz { 2157c82feeecSOndrej Kozina int r; 2158c82feeecSOndrej Kozina 2159e48d4bbfSMilan Broz clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); 2160c82feeecSOndrej Kozina get_random_bytes(&cc->key, cc->key_size); 21614a52ffc7SMilan Broz 21624a52ffc7SMilan Broz /* Wipe IV private keys */ 21634a52ffc7SMilan Broz if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) { 21644a52ffc7SMilan Broz r = cc->iv_gen_ops->wipe(cc); 21654a52ffc7SMilan Broz if (r) 21664a52ffc7SMilan Broz return r; 21674a52ffc7SMilan Broz } 21684a52ffc7SMilan Broz 2169c538f6ecSOndrej Kozina kzfree(cc->key_string); 2170c538f6ecSOndrej Kozina cc->key_string = NULL; 2171c82feeecSOndrej Kozina r = crypt_setkey(cc); 2172c82feeecSOndrej Kozina memset(&cc->key, 0, cc->key_size * sizeof(u8)); 2173c0297721SAndi Kleen 2174c82feeecSOndrej Kozina return r; 2175e48d4bbfSMilan Broz } 2176e48d4bbfSMilan Broz 21775059353dSMikulas Patocka static void crypt_calculate_pages_per_client(void) 21785059353dSMikulas Patocka { 2179ca79b0c2SArun KS unsigned long pages = (totalram_pages() - totalhigh_pages()) * DM_CRYPT_MEMORY_PERCENT / 100; 21805059353dSMikulas Patocka 21815059353dSMikulas Patocka if (!dm_crypt_clients_n) 21825059353dSMikulas Patocka return; 21835059353dSMikulas Patocka 21845059353dSMikulas Patocka pages /= dm_crypt_clients_n; 21855059353dSMikulas Patocka if (pages < DM_CRYPT_MIN_PAGES_PER_CLIENT) 21865059353dSMikulas Patocka pages = DM_CRYPT_MIN_PAGES_PER_CLIENT; 21875059353dSMikulas Patocka dm_crypt_pages_per_client = pages; 21885059353dSMikulas Patocka } 21895059353dSMikulas Patocka 21905059353dSMikulas Patocka static void *crypt_page_alloc(gfp_t gfp_mask, void *pool_data) 21915059353dSMikulas Patocka { 21925059353dSMikulas Patocka struct crypt_config *cc = pool_data; 21935059353dSMikulas Patocka struct page *page; 21945059353dSMikulas Patocka 21955059353dSMikulas Patocka if (unlikely(percpu_counter_compare(&cc->n_allocated_pages, dm_crypt_pages_per_client) >= 0) && 21965059353dSMikulas Patocka likely(gfp_mask & __GFP_NORETRY)) 21975059353dSMikulas Patocka return NULL; 21985059353dSMikulas Patocka 21995059353dSMikulas Patocka page = alloc_page(gfp_mask); 22005059353dSMikulas Patocka if (likely(page != NULL)) 22015059353dSMikulas Patocka percpu_counter_add(&cc->n_allocated_pages, 1); 22025059353dSMikulas Patocka 22035059353dSMikulas Patocka return page; 22045059353dSMikulas Patocka } 22055059353dSMikulas Patocka 22065059353dSMikulas Patocka static void crypt_page_free(void *page, void *pool_data) 22075059353dSMikulas Patocka { 22085059353dSMikulas Patocka struct crypt_config *cc = pool_data; 22095059353dSMikulas Patocka 22105059353dSMikulas Patocka __free_page(page); 22115059353dSMikulas Patocka percpu_counter_sub(&cc->n_allocated_pages, 1); 22125059353dSMikulas Patocka } 22135059353dSMikulas Patocka 221428513fccSMilan Broz static void crypt_dtr(struct dm_target *ti) 221528513fccSMilan Broz { 221628513fccSMilan Broz struct crypt_config *cc = ti->private; 221728513fccSMilan Broz 221828513fccSMilan Broz ti->private = NULL; 221928513fccSMilan Broz 222028513fccSMilan Broz if (!cc) 222128513fccSMilan Broz return; 222228513fccSMilan Broz 2223f659b100SRabin Vincent if (cc->write_thread) 2224dc267621SMikulas Patocka kthread_stop(cc->write_thread); 2225dc267621SMikulas Patocka 222628513fccSMilan Broz if (cc->io_queue) 222728513fccSMilan Broz destroy_workqueue(cc->io_queue); 222828513fccSMilan Broz if (cc->crypt_queue) 222928513fccSMilan Broz destroy_workqueue(cc->crypt_queue); 223028513fccSMilan Broz 2231fd2d231fSMikulas Patocka crypt_free_tfms(cc); 2232fd2d231fSMikulas Patocka 22336f1c819cSKent Overstreet bioset_exit(&cc->bs); 223428513fccSMilan Broz 22356f1c819cSKent Overstreet mempool_exit(&cc->page_pool); 22366f1c819cSKent Overstreet mempool_exit(&cc->req_pool); 22376f1c819cSKent Overstreet mempool_exit(&cc->tag_pool); 22386f1c819cSKent Overstreet 2239d00a11dfSKent Overstreet WARN_ON(percpu_counter_sum(&cc->n_allocated_pages) != 0); 2240d00a11dfSKent Overstreet percpu_counter_destroy(&cc->n_allocated_pages); 2241d00a11dfSKent Overstreet 224228513fccSMilan Broz if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) 224328513fccSMilan Broz cc->iv_gen_ops->dtr(cc); 224428513fccSMilan Broz 224528513fccSMilan Broz if (cc->dev) 224628513fccSMilan Broz dm_put_device(ti, cc->dev); 224728513fccSMilan Broz 22485ebaee6dSMilan Broz kzfree(cc->cipher); 22497dbcd137SMilan Broz kzfree(cc->cipher_string); 2250c538f6ecSOndrej Kozina kzfree(cc->key_string); 2251ef43aa38SMilan Broz kzfree(cc->cipher_auth); 2252ef43aa38SMilan Broz kzfree(cc->authenc_key); 225328513fccSMilan Broz 2254d5ffebddSMike Snitzer mutex_destroy(&cc->bio_alloc_lock); 2255d5ffebddSMike Snitzer 225628513fccSMilan Broz /* Must zero key material before freeing */ 225728513fccSMilan Broz kzfree(cc); 22585059353dSMikulas Patocka 22595059353dSMikulas Patocka spin_lock(&dm_crypt_clients_lock); 22605059353dSMikulas Patocka WARN_ON(!dm_crypt_clients_n); 22615059353dSMikulas Patocka dm_crypt_clients_n--; 22625059353dSMikulas Patocka crypt_calculate_pages_per_client(); 22635059353dSMikulas Patocka spin_unlock(&dm_crypt_clients_lock); 226428513fccSMilan Broz } 226528513fccSMilan Broz 2266e889f97aSMilan Broz static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode) 22671da177e4SLinus Torvalds { 22685ebaee6dSMilan Broz struct crypt_config *cc = ti->private; 22691da177e4SLinus Torvalds 227033d2f09fSMilan Broz if (crypt_integrity_aead(cc)) 2271e889f97aSMilan Broz cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc)); 2272e889f97aSMilan Broz else 2273bbdb23b5SHerbert Xu cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc)); 2274e889f97aSMilan Broz 22755ebaee6dSMilan Broz if (cc->iv_size) 22765ebaee6dSMilan Broz /* at least a 64 bit sector number should fit in our buffer */ 22775ebaee6dSMilan Broz cc->iv_size = max(cc->iv_size, 22785ebaee6dSMilan Broz (unsigned int)(sizeof(u64) / sizeof(u8))); 22795ebaee6dSMilan Broz else if (ivmode) { 22805ebaee6dSMilan Broz DMWARN("Selected cipher does not support IVs"); 22815ebaee6dSMilan Broz ivmode = NULL; 22825ebaee6dSMilan Broz } 22835ebaee6dSMilan Broz 22845ebaee6dSMilan Broz /* Choose ivmode, see comments at iv code. */ 22851da177e4SLinus Torvalds if (ivmode == NULL) 22861da177e4SLinus Torvalds cc->iv_gen_ops = NULL; 22871da177e4SLinus Torvalds else if (strcmp(ivmode, "plain") == 0) 22881da177e4SLinus Torvalds cc->iv_gen_ops = &crypt_iv_plain_ops; 228961afef61SMilan Broz else if (strcmp(ivmode, "plain64") == 0) 229061afef61SMilan Broz cc->iv_gen_ops = &crypt_iv_plain64_ops; 22917e3fd855SMilan Broz else if (strcmp(ivmode, "plain64be") == 0) 22927e3fd855SMilan Broz cc->iv_gen_ops = &crypt_iv_plain64be_ops; 22931da177e4SLinus Torvalds else if (strcmp(ivmode, "essiv") == 0) 22941da177e4SLinus Torvalds cc->iv_gen_ops = &crypt_iv_essiv_ops; 229548527fa7SRik Snel else if (strcmp(ivmode, "benbi") == 0) 229648527fa7SRik Snel cc->iv_gen_ops = &crypt_iv_benbi_ops; 229746b47730SLudwig Nussel else if (strcmp(ivmode, "null") == 0) 229846b47730SLudwig Nussel cc->iv_gen_ops = &crypt_iv_null_ops; 229934745785SMilan Broz else if (strcmp(ivmode, "lmk") == 0) { 230034745785SMilan Broz cc->iv_gen_ops = &crypt_iv_lmk_ops; 2301ed04d981SMilan Broz /* 2302ed04d981SMilan Broz * Version 2 and 3 is recognised according 230334745785SMilan Broz * to length of provided multi-key string. 230434745785SMilan Broz * If present (version 3), last key is used as IV seed. 2305ed04d981SMilan Broz * All keys (including IV seed) are always the same size. 230634745785SMilan Broz */ 2307da31a078SMilan Broz if (cc->key_size % cc->key_parts) { 230834745785SMilan Broz cc->key_parts++; 2309da31a078SMilan Broz cc->key_extra_size = cc->key_size / cc->key_parts; 2310da31a078SMilan Broz } 2311ed04d981SMilan Broz } else if (strcmp(ivmode, "tcw") == 0) { 2312ed04d981SMilan Broz cc->iv_gen_ops = &crypt_iv_tcw_ops; 2313ed04d981SMilan Broz cc->key_parts += 2; /* IV + whitening */ 2314ed04d981SMilan Broz cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE; 2315e889f97aSMilan Broz } else if (strcmp(ivmode, "random") == 0) { 2316e889f97aSMilan Broz cc->iv_gen_ops = &crypt_iv_random_ops; 2317e889f97aSMilan Broz /* Need storage space in integrity fields. */ 2318e889f97aSMilan Broz cc->integrity_iv_size = cc->iv_size; 231934745785SMilan Broz } else { 232072d94861SAlasdair G Kergon ti->error = "Invalid IV mode"; 2321e889f97aSMilan Broz return -EINVAL; 23221da177e4SLinus Torvalds } 23231da177e4SLinus Torvalds 2324e889f97aSMilan Broz return 0; 2325e889f97aSMilan Broz } 2326e889f97aSMilan Broz 232733d2f09fSMilan Broz /* 232833d2f09fSMilan Broz * Workaround to parse cipher algorithm from crypto API spec. 232933d2f09fSMilan Broz * The cc->cipher is currently used only in ESSIV. 233033d2f09fSMilan Broz * This should be probably done by crypto-api calls (once available...) 233133d2f09fSMilan Broz */ 233233d2f09fSMilan Broz static int crypt_ctr_blkdev_cipher(struct crypt_config *cc) 233333d2f09fSMilan Broz { 233433d2f09fSMilan Broz const char *alg_name = NULL; 233533d2f09fSMilan Broz char *start, *end; 233633d2f09fSMilan Broz 233733d2f09fSMilan Broz if (crypt_integrity_aead(cc)) { 233833d2f09fSMilan Broz alg_name = crypto_tfm_alg_name(crypto_aead_tfm(any_tfm_aead(cc))); 233933d2f09fSMilan Broz if (!alg_name) 234033d2f09fSMilan Broz return -EINVAL; 234133d2f09fSMilan Broz if (crypt_integrity_hmac(cc)) { 234233d2f09fSMilan Broz alg_name = strchr(alg_name, ','); 234333d2f09fSMilan Broz if (!alg_name) 234433d2f09fSMilan Broz return -EINVAL; 234533d2f09fSMilan Broz } 234633d2f09fSMilan Broz alg_name++; 234733d2f09fSMilan Broz } else { 234833d2f09fSMilan Broz alg_name = crypto_tfm_alg_name(crypto_skcipher_tfm(any_tfm(cc))); 234933d2f09fSMilan Broz if (!alg_name) 235033d2f09fSMilan Broz return -EINVAL; 235133d2f09fSMilan Broz } 235233d2f09fSMilan Broz 235333d2f09fSMilan Broz start = strchr(alg_name, '('); 235433d2f09fSMilan Broz end = strchr(alg_name, ')'); 235533d2f09fSMilan Broz 235633d2f09fSMilan Broz if (!start && !end) { 235733d2f09fSMilan Broz cc->cipher = kstrdup(alg_name, GFP_KERNEL); 235833d2f09fSMilan Broz return cc->cipher ? 0 : -ENOMEM; 235933d2f09fSMilan Broz } 236033d2f09fSMilan Broz 236133d2f09fSMilan Broz if (!start || !end || ++start >= end) 236233d2f09fSMilan Broz return -EINVAL; 236333d2f09fSMilan Broz 236433d2f09fSMilan Broz cc->cipher = kzalloc(end - start + 1, GFP_KERNEL); 236533d2f09fSMilan Broz if (!cc->cipher) 236633d2f09fSMilan Broz return -ENOMEM; 236733d2f09fSMilan Broz 236833d2f09fSMilan Broz strncpy(cc->cipher, start, end - start); 236933d2f09fSMilan Broz 237033d2f09fSMilan Broz return 0; 237133d2f09fSMilan Broz } 237233d2f09fSMilan Broz 237333d2f09fSMilan Broz /* 237433d2f09fSMilan Broz * Workaround to parse HMAC algorithm from AEAD crypto API spec. 237533d2f09fSMilan Broz * The HMAC is needed to calculate tag size (HMAC digest size). 237633d2f09fSMilan Broz * This should be probably done by crypto-api calls (once available...) 237733d2f09fSMilan Broz */ 237833d2f09fSMilan Broz static int crypt_ctr_auth_cipher(struct crypt_config *cc, char *cipher_api) 237933d2f09fSMilan Broz { 238033d2f09fSMilan Broz char *start, *end, *mac_alg = NULL; 238133d2f09fSMilan Broz struct crypto_ahash *mac; 238233d2f09fSMilan Broz 238333d2f09fSMilan Broz if (!strstarts(cipher_api, "authenc(")) 238433d2f09fSMilan Broz return 0; 238533d2f09fSMilan Broz 238633d2f09fSMilan Broz start = strchr(cipher_api, '('); 238733d2f09fSMilan Broz end = strchr(cipher_api, ','); 238833d2f09fSMilan Broz if (!start || !end || ++start > end) 238933d2f09fSMilan Broz return -EINVAL; 239033d2f09fSMilan Broz 239133d2f09fSMilan Broz mac_alg = kzalloc(end - start + 1, GFP_KERNEL); 239233d2f09fSMilan Broz if (!mac_alg) 239333d2f09fSMilan Broz return -ENOMEM; 239433d2f09fSMilan Broz strncpy(mac_alg, start, end - start); 239533d2f09fSMilan Broz 239633d2f09fSMilan Broz mac = crypto_alloc_ahash(mac_alg, 0, 0); 239733d2f09fSMilan Broz kfree(mac_alg); 239833d2f09fSMilan Broz 239933d2f09fSMilan Broz if (IS_ERR(mac)) 240033d2f09fSMilan Broz return PTR_ERR(mac); 240133d2f09fSMilan Broz 240233d2f09fSMilan Broz cc->key_mac_size = crypto_ahash_digestsize(mac); 240333d2f09fSMilan Broz crypto_free_ahash(mac); 240433d2f09fSMilan Broz 240533d2f09fSMilan Broz cc->authenc_key = kmalloc(crypt_authenckey_size(cc), GFP_KERNEL); 240633d2f09fSMilan Broz if (!cc->authenc_key) 240733d2f09fSMilan Broz return -ENOMEM; 240833d2f09fSMilan Broz 240933d2f09fSMilan Broz return 0; 241033d2f09fSMilan Broz } 241133d2f09fSMilan Broz 241233d2f09fSMilan Broz static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key, 241333d2f09fSMilan Broz char **ivmode, char **ivopts) 24141da177e4SLinus Torvalds { 24155ebaee6dSMilan Broz struct crypt_config *cc = ti->private; 241633d2f09fSMilan Broz char *tmp, *cipher_api; 241733d2f09fSMilan Broz int ret = -EINVAL; 241833d2f09fSMilan Broz 241933d2f09fSMilan Broz cc->tfms_count = 1; 242033d2f09fSMilan Broz 242133d2f09fSMilan Broz /* 242233d2f09fSMilan Broz * New format (capi: prefix) 242333d2f09fSMilan Broz * capi:cipher_api_spec-iv:ivopts 242433d2f09fSMilan Broz */ 242533d2f09fSMilan Broz tmp = &cipher_in[strlen("capi:")]; 24261856b9f7SMilan Broz 24271856b9f7SMilan Broz /* Separate IV options if present, it can contain another '-' in hash name */ 24281856b9f7SMilan Broz *ivopts = strrchr(tmp, ':'); 24291856b9f7SMilan Broz if (*ivopts) { 24301856b9f7SMilan Broz **ivopts = '\0'; 24311856b9f7SMilan Broz (*ivopts)++; 24321856b9f7SMilan Broz } 24331856b9f7SMilan Broz /* Parse IV mode */ 24341856b9f7SMilan Broz *ivmode = strrchr(tmp, '-'); 24351856b9f7SMilan Broz if (*ivmode) { 24361856b9f7SMilan Broz **ivmode = '\0'; 24371856b9f7SMilan Broz (*ivmode)++; 24381856b9f7SMilan Broz } 24391856b9f7SMilan Broz /* The rest is crypto API spec */ 24401856b9f7SMilan Broz cipher_api = tmp; 244133d2f09fSMilan Broz 244233d2f09fSMilan Broz if (*ivmode && !strcmp(*ivmode, "lmk")) 244333d2f09fSMilan Broz cc->tfms_count = 64; 244433d2f09fSMilan Broz 244533d2f09fSMilan Broz cc->key_parts = cc->tfms_count; 244633d2f09fSMilan Broz 244733d2f09fSMilan Broz /* Allocate cipher */ 244833d2f09fSMilan Broz ret = crypt_alloc_tfms(cc, cipher_api); 244933d2f09fSMilan Broz if (ret < 0) { 245033d2f09fSMilan Broz ti->error = "Error allocating crypto tfm"; 245133d2f09fSMilan Broz return ret; 245233d2f09fSMilan Broz } 245333d2f09fSMilan Broz 245433d2f09fSMilan Broz /* Alloc AEAD, can be used only in new format. */ 245533d2f09fSMilan Broz if (crypt_integrity_aead(cc)) { 245633d2f09fSMilan Broz ret = crypt_ctr_auth_cipher(cc, cipher_api); 245733d2f09fSMilan Broz if (ret < 0) { 245833d2f09fSMilan Broz ti->error = "Invalid AEAD cipher spec"; 245933d2f09fSMilan Broz return -ENOMEM; 246033d2f09fSMilan Broz } 246133d2f09fSMilan Broz cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc)); 246233d2f09fSMilan Broz } else 246333d2f09fSMilan Broz cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc)); 246433d2f09fSMilan Broz 246533d2f09fSMilan Broz ret = crypt_ctr_blkdev_cipher(cc); 246633d2f09fSMilan Broz if (ret < 0) { 246733d2f09fSMilan Broz ti->error = "Cannot allocate cipher string"; 246833d2f09fSMilan Broz return -ENOMEM; 246933d2f09fSMilan Broz } 247033d2f09fSMilan Broz 247133d2f09fSMilan Broz return 0; 247233d2f09fSMilan Broz } 247333d2f09fSMilan Broz 247433d2f09fSMilan Broz static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key, 247533d2f09fSMilan Broz char **ivmode, char **ivopts) 247633d2f09fSMilan Broz { 247733d2f09fSMilan Broz struct crypt_config *cc = ti->private; 247833d2f09fSMilan Broz char *tmp, *cipher, *chainmode, *keycount; 24795ebaee6dSMilan Broz char *cipher_api = NULL; 24805ebaee6dSMilan Broz int ret = -EINVAL; 24815ebaee6dSMilan Broz char dummy; 24825ebaee6dSMilan Broz 248333d2f09fSMilan Broz if (strchr(cipher_in, '(') || crypt_integrity_aead(cc)) { 24845ebaee6dSMilan Broz ti->error = "Bad cipher specification"; 24855ebaee6dSMilan Broz return -EINVAL; 24865ebaee6dSMilan Broz } 24875ebaee6dSMilan Broz 24881da177e4SLinus Torvalds /* 24895ebaee6dSMilan Broz * Legacy dm-crypt cipher specification 24905ebaee6dSMilan Broz * cipher[:keycount]-mode-iv:ivopts 24915ebaee6dSMilan Broz */ 24925ebaee6dSMilan Broz tmp = cipher_in; 24935ebaee6dSMilan Broz keycount = strsep(&tmp, "-"); 24945ebaee6dSMilan Broz cipher = strsep(&keycount, ":"); 24955ebaee6dSMilan Broz 249669a8cfcdSMilan Broz if (!keycount) 24975ebaee6dSMilan Broz cc->tfms_count = 1; 24985ebaee6dSMilan Broz else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 || 24995ebaee6dSMilan Broz !is_power_of_2(cc->tfms_count)) { 25005ebaee6dSMilan Broz ti->error = "Bad cipher key count specification"; 25015ebaee6dSMilan Broz return -EINVAL; 25025ebaee6dSMilan Broz } 250328513fccSMilan Broz cc->key_parts = cc->tfms_count; 25041da177e4SLinus Torvalds 250572d94861SAlasdair G Kergon cc->cipher = kstrdup(cipher, GFP_KERNEL); 250628513fccSMilan Broz if (!cc->cipher) 25071da177e4SLinus Torvalds goto bad_mem; 25081da177e4SLinus Torvalds 2509ddd42edfSMilan Broz chainmode = strsep(&tmp, "-"); 25101856b9f7SMilan Broz *ivmode = strsep(&tmp, ":"); 25111856b9f7SMilan Broz *ivopts = tmp; 2512ddd42edfSMilan Broz 2513ddd42edfSMilan Broz /* 2514ddd42edfSMilan Broz * For compatibility with the original dm-crypt mapping format, if 2515ddd42edfSMilan Broz * only the cipher name is supplied, use cbc-plain. 251628513fccSMilan Broz */ 251733d2f09fSMilan Broz if (!chainmode || (!strcmp(chainmode, "plain") && !*ivmode)) { 2518cabf08e4SMilan Broz chainmode = "cbc"; 251933d2f09fSMilan Broz *ivmode = "plain"; 2520cabf08e4SMilan Broz } 2521cabf08e4SMilan Broz 252233d2f09fSMilan Broz if (strcmp(chainmode, "ecb") && !*ivmode) { 2523c0297721SAndi Kleen ti->error = "IV mechanism required"; 2524c0297721SAndi Kleen return -EINVAL; 2525c0297721SAndi Kleen } 2526c0297721SAndi Kleen 2527cabf08e4SMilan Broz cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL); 25289934a8beSMilan Broz if (!cipher_api) 252928513fccSMilan Broz goto bad_mem; 25309934a8beSMilan Broz 25319934a8beSMilan Broz ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME, 2532647c7db1SMikulas Patocka "%s(%s)", chainmode, cipher); 25331da177e4SLinus Torvalds if (ret < 0) { 25341da177e4SLinus Torvalds kfree(cipher_api); 253528513fccSMilan Broz goto bad_mem; 253628513fccSMilan Broz } 253728513fccSMilan Broz 25381da177e4SLinus Torvalds /* Allocate cipher */ 25391da177e4SLinus Torvalds ret = crypt_alloc_tfms(cc, cipher_api); 25401da177e4SLinus Torvalds if (ret < 0) { 25411da177e4SLinus Torvalds ti->error = "Error allocating crypto tfm"; 254233d2f09fSMilan Broz kfree(cipher_api); 254333d2f09fSMilan Broz return ret; 2544028867acSAlasdair G Kergon } 2545bd86e320SJeffy Chen kfree(cipher_api); 2546647c7db1SMikulas Patocka 254733d2f09fSMilan Broz return 0; 254833d2f09fSMilan Broz bad_mem: 254933d2f09fSMilan Broz ti->error = "Cannot allocate cipher strings"; 255033d2f09fSMilan Broz return -ENOMEM; 255133d2f09fSMilan Broz } 255233d2f09fSMilan Broz 255333d2f09fSMilan Broz static int crypt_ctr_cipher(struct dm_target *ti, char *cipher_in, char *key) 255433d2f09fSMilan Broz { 255533d2f09fSMilan Broz struct crypt_config *cc = ti->private; 255633d2f09fSMilan Broz char *ivmode = NULL, *ivopts = NULL; 255733d2f09fSMilan Broz int ret; 255833d2f09fSMilan Broz 255933d2f09fSMilan Broz cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL); 256033d2f09fSMilan Broz if (!cc->cipher_string) { 256133d2f09fSMilan Broz ti->error = "Cannot allocate cipher strings"; 256233d2f09fSMilan Broz return -ENOMEM; 256333d2f09fSMilan Broz } 256433d2f09fSMilan Broz 256533d2f09fSMilan Broz if (strstarts(cipher_in, "capi:")) 256633d2f09fSMilan Broz ret = crypt_ctr_cipher_new(ti, cipher_in, key, &ivmode, &ivopts); 256733d2f09fSMilan Broz else 256833d2f09fSMilan Broz ret = crypt_ctr_cipher_old(ti, cipher_in, key, &ivmode, &ivopts); 256933d2f09fSMilan Broz if (ret) 257033d2f09fSMilan Broz return ret; 257133d2f09fSMilan Broz 2572647c7db1SMikulas Patocka /* Initialize IV */ 2573e889f97aSMilan Broz ret = crypt_ctr_ivmode(ti, ivmode); 2574e889f97aSMilan Broz if (ret < 0) 257533d2f09fSMilan Broz return ret; 25761da177e4SLinus Torvalds 2577da31a078SMilan Broz /* Initialize and set key */ 2578da31a078SMilan Broz ret = crypt_set_key(cc, key); 2579da31a078SMilan Broz if (ret < 0) { 2580da31a078SMilan Broz ti->error = "Error decoding and setting key"; 258133d2f09fSMilan Broz return ret; 2582da31a078SMilan Broz } 2583da31a078SMilan Broz 25841da177e4SLinus Torvalds /* Allocate IV */ 25851da177e4SLinus Torvalds if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) { 25861da177e4SLinus Torvalds ret = cc->iv_gen_ops->ctr(cc, ti, ivopts); 25871da177e4SLinus Torvalds if (ret < 0) { 25881da177e4SLinus Torvalds ti->error = "Error creating IV"; 258933d2f09fSMilan Broz return ret; 25901da177e4SLinus Torvalds } 25911da177e4SLinus Torvalds } 25921da177e4SLinus Torvalds 25931da177e4SLinus Torvalds /* Initialize IV (set keys for ESSIV etc) */ 25941da177e4SLinus Torvalds if (cc->iv_gen_ops && cc->iv_gen_ops->init) { 25951da177e4SLinus Torvalds ret = cc->iv_gen_ops->init(cc); 25961da177e4SLinus Torvalds if (ret < 0) { 25971da177e4SLinus Torvalds ti->error = "Error initialising IV"; 25981da177e4SLinus Torvalds return ret; 25991da177e4SLinus Torvalds } 26001da177e4SLinus Torvalds } 26011da177e4SLinus Torvalds 2602dc94902bSOndrej Kozina /* wipe the kernel key payload copy */ 2603dc94902bSOndrej Kozina if (cc->key_string) 2604dc94902bSOndrej Kozina memset(cc->key, 0, cc->key_size * sizeof(u8)); 2605dc94902bSOndrej Kozina 260633d2f09fSMilan Broz return ret; 26071da177e4SLinus Torvalds } 26081da177e4SLinus Torvalds 2609ef43aa38SMilan Broz static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **argv) 2610ef43aa38SMilan Broz { 2611ef43aa38SMilan Broz struct crypt_config *cc = ti->private; 2612ef43aa38SMilan Broz struct dm_arg_set as; 26135916a22bSEric Biggers static const struct dm_arg _args[] = { 26148f0009a2SMilan Broz {0, 6, "Invalid number of feature args"}, 2615ef43aa38SMilan Broz }; 2616ef43aa38SMilan Broz unsigned int opt_params, val; 2617ef43aa38SMilan Broz const char *opt_string, *sval; 26188f0009a2SMilan Broz char dummy; 2619ef43aa38SMilan Broz int ret; 2620ef43aa38SMilan Broz 2621ef43aa38SMilan Broz /* Optional parameters */ 2622ef43aa38SMilan Broz as.argc = argc; 2623ef43aa38SMilan Broz as.argv = argv; 2624ef43aa38SMilan Broz 2625ef43aa38SMilan Broz ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error); 2626ef43aa38SMilan Broz if (ret) 26271da177e4SLinus Torvalds return ret; 26281da177e4SLinus Torvalds 2629ef43aa38SMilan Broz while (opt_params--) { 2630ef43aa38SMilan Broz opt_string = dm_shift_arg(&as); 2631ef43aa38SMilan Broz if (!opt_string) { 2632ef43aa38SMilan Broz ti->error = "Not enough feature arguments"; 2633ef43aa38SMilan Broz return -EINVAL; 2634ef43aa38SMilan Broz } 2635ef43aa38SMilan Broz 2636ef43aa38SMilan Broz if (!strcasecmp(opt_string, "allow_discards")) 2637ef43aa38SMilan Broz ti->num_discard_bios = 1; 2638ef43aa38SMilan Broz 2639ef43aa38SMilan Broz else if (!strcasecmp(opt_string, "same_cpu_crypt")) 2640ef43aa38SMilan Broz set_bit(DM_CRYPT_SAME_CPU, &cc->flags); 2641ef43aa38SMilan Broz 2642ef43aa38SMilan Broz else if (!strcasecmp(opt_string, "submit_from_crypt_cpus")) 2643ef43aa38SMilan Broz set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags); 2644ef43aa38SMilan Broz else if (sscanf(opt_string, "integrity:%u:", &val) == 1) { 2645ef43aa38SMilan Broz if (val == 0 || val > MAX_TAG_SIZE) { 2646ef43aa38SMilan Broz ti->error = "Invalid integrity arguments"; 2647ef43aa38SMilan Broz return -EINVAL; 2648ef43aa38SMilan Broz } 2649ef43aa38SMilan Broz cc->on_disk_tag_size = val; 2650ef43aa38SMilan Broz sval = strchr(opt_string + strlen("integrity:"), ':') + 1; 2651ef43aa38SMilan Broz if (!strcasecmp(sval, "aead")) { 2652ef43aa38SMilan Broz set_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags); 2653ef43aa38SMilan Broz } else if (strcasecmp(sval, "none")) { 2654ef43aa38SMilan Broz ti->error = "Unknown integrity profile"; 2655ef43aa38SMilan Broz return -EINVAL; 2656ef43aa38SMilan Broz } 2657ef43aa38SMilan Broz 2658ef43aa38SMilan Broz cc->cipher_auth = kstrdup(sval, GFP_KERNEL); 2659ef43aa38SMilan Broz if (!cc->cipher_auth) 26601da177e4SLinus Torvalds return -ENOMEM; 2661ff3af92bSMikulas Patocka } else if (sscanf(opt_string, "sector_size:%hu%c", &cc->sector_size, &dummy) == 1) { 26628f0009a2SMilan Broz if (cc->sector_size < (1 << SECTOR_SHIFT) || 26638f0009a2SMilan Broz cc->sector_size > 4096 || 2664ff3af92bSMikulas Patocka (cc->sector_size & (cc->sector_size - 1))) { 26658f0009a2SMilan Broz ti->error = "Invalid feature value for sector_size"; 26668f0009a2SMilan Broz return -EINVAL; 26678f0009a2SMilan Broz } 2668783874b0SMilan Broz if (ti->len & ((cc->sector_size >> SECTOR_SHIFT) - 1)) { 2669783874b0SMilan Broz ti->error = "Device size is not multiple of sector_size feature"; 2670783874b0SMilan Broz return -EINVAL; 2671783874b0SMilan Broz } 2672ff3af92bSMikulas Patocka cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT; 26738f0009a2SMilan Broz } else if (!strcasecmp(opt_string, "iv_large_sectors")) 26748f0009a2SMilan Broz set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags); 26758f0009a2SMilan Broz else { 2676ef43aa38SMilan Broz ti->error = "Invalid feature arguments"; 2677ef43aa38SMilan Broz return -EINVAL; 2678ef43aa38SMilan Broz } 2679ef43aa38SMilan Broz } 2680ef43aa38SMilan Broz 2681ef43aa38SMilan Broz return 0; 26821da177e4SLinus Torvalds } 26831da177e4SLinus Torvalds 26841da177e4SLinus Torvalds /* 26851da177e4SLinus Torvalds * Construct an encryption mapping: 2686c538f6ecSOndrej Kozina * <cipher> [<key>|:<key_size>:<user|logon>:<key_description>] <iv_offset> <dev_path> <start> 26871da177e4SLinus Torvalds */ 26881da177e4SLinus Torvalds static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) 26891da177e4SLinus Torvalds { 26901da177e4SLinus Torvalds struct crypt_config *cc; 2691ed0302e8SMichał Mirosław const char *devname = dm_table_device_name(ti->table); 2692c538f6ecSOndrej Kozina int key_size; 2693ef43aa38SMilan Broz unsigned int align_mask; 26941da177e4SLinus Torvalds unsigned long long tmpll; 26951da177e4SLinus Torvalds int ret; 2696ef43aa38SMilan Broz size_t iv_size_padding, additional_req_size; 269731998ef1SMikulas Patocka char dummy; 26981da177e4SLinus Torvalds 2699772ae5f5SMilan Broz if (argc < 5) { 27001da177e4SLinus Torvalds ti->error = "Not enough arguments"; 27011da177e4SLinus Torvalds return -EINVAL; 27021da177e4SLinus Torvalds } 27031da177e4SLinus Torvalds 2704c538f6ecSOndrej Kozina key_size = get_key_size(&argv[1]); 2705c538f6ecSOndrej Kozina if (key_size < 0) { 2706c538f6ecSOndrej Kozina ti->error = "Cannot parse key size"; 2707c538f6ecSOndrej Kozina return -EINVAL; 2708c538f6ecSOndrej Kozina } 27091da177e4SLinus Torvalds 27109c81c99bSZhengyuan Liu cc = kzalloc(struct_size(cc, key, key_size), GFP_KERNEL); 27111da177e4SLinus Torvalds if (!cc) { 27121da177e4SLinus Torvalds ti->error = "Cannot allocate encryption context"; 27131da177e4SLinus Torvalds return -ENOMEM; 27141da177e4SLinus Torvalds } 27151da177e4SLinus Torvalds cc->key_size = key_size; 27168f0009a2SMilan Broz cc->sector_size = (1 << SECTOR_SHIFT); 2717ff3af92bSMikulas Patocka cc->sector_shift = 0; 27181da177e4SLinus Torvalds 27191da177e4SLinus Torvalds ti->private = cc; 2720ef43aa38SMilan Broz 27215059353dSMikulas Patocka spin_lock(&dm_crypt_clients_lock); 27225059353dSMikulas Patocka dm_crypt_clients_n++; 27235059353dSMikulas Patocka crypt_calculate_pages_per_client(); 27245059353dSMikulas Patocka spin_unlock(&dm_crypt_clients_lock); 27255059353dSMikulas Patocka 27265059353dSMikulas Patocka ret = percpu_counter_init(&cc->n_allocated_pages, 0, GFP_KERNEL); 27275059353dSMikulas Patocka if (ret < 0) 27285059353dSMikulas Patocka goto bad; 27295059353dSMikulas Patocka 2730ef43aa38SMilan Broz /* Optional parameters need to be read before cipher constructor */ 2731ef43aa38SMilan Broz if (argc > 5) { 2732ef43aa38SMilan Broz ret = crypt_ctr_optional(ti, argc - 5, &argv[5]); 2733ef43aa38SMilan Broz if (ret) 2734ef43aa38SMilan Broz goto bad; 2735ef43aa38SMilan Broz } 2736ef43aa38SMilan Broz 27371da177e4SLinus Torvalds ret = crypt_ctr_cipher(ti, argv[0], argv[1]); 27381da177e4SLinus Torvalds if (ret < 0) 27391da177e4SLinus Torvalds goto bad; 27401da177e4SLinus Torvalds 274133d2f09fSMilan Broz if (crypt_integrity_aead(cc)) { 2742ef43aa38SMilan Broz cc->dmreq_start = sizeof(struct aead_request); 2743ef43aa38SMilan Broz cc->dmreq_start += crypto_aead_reqsize(any_tfm_aead(cc)); 2744ef43aa38SMilan Broz align_mask = crypto_aead_alignmask(any_tfm_aead(cc)); 2745ef43aa38SMilan Broz } else { 2746bbdb23b5SHerbert Xu cc->dmreq_start = sizeof(struct skcipher_request); 2747bbdb23b5SHerbert Xu cc->dmreq_start += crypto_skcipher_reqsize(any_tfm(cc)); 2748ef43aa38SMilan Broz align_mask = crypto_skcipher_alignmask(any_tfm(cc)); 2749ef43aa38SMilan Broz } 2750d49ec52fSMikulas Patocka cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request)); 2751d49ec52fSMikulas Patocka 2752ef43aa38SMilan Broz if (align_mask < CRYPTO_MINALIGN) { 2753d49ec52fSMikulas Patocka /* Allocate the padding exactly */ 2754d49ec52fSMikulas Patocka iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request)) 2755ef43aa38SMilan Broz & align_mask; 2756d49ec52fSMikulas Patocka } else { 2757d49ec52fSMikulas Patocka /* 2758d49ec52fSMikulas Patocka * If the cipher requires greater alignment than kmalloc 2759d49ec52fSMikulas Patocka * alignment, we don't know the exact position of the 2760d49ec52fSMikulas Patocka * initialization vector. We must assume worst case. 2761d49ec52fSMikulas Patocka */ 2762ef43aa38SMilan Broz iv_size_padding = align_mask; 2763d49ec52fSMikulas Patocka } 27641da177e4SLinus Torvalds 2765ef43aa38SMilan Broz /* ...| IV + padding | original IV | original sec. number | bio tag offset | */ 2766ef43aa38SMilan Broz additional_req_size = sizeof(struct dm_crypt_request) + 2767ef43aa38SMilan Broz iv_size_padding + cc->iv_size + 2768ef43aa38SMilan Broz cc->iv_size + 2769ef43aa38SMilan Broz sizeof(uint64_t) + 2770ef43aa38SMilan Broz sizeof(unsigned int); 2771ef43aa38SMilan Broz 27726f1c819cSKent Overstreet ret = mempool_init_kmalloc_pool(&cc->req_pool, MIN_IOS, cc->dmreq_start + additional_req_size); 27736f1c819cSKent Overstreet if (ret) { 27741da177e4SLinus Torvalds ti->error = "Cannot allocate crypt request mempool"; 27751da177e4SLinus Torvalds goto bad; 27761da177e4SLinus Torvalds } 27771da177e4SLinus Torvalds 277830187e1dSMike Snitzer cc->per_bio_data_size = ti->per_io_data_size = 2779ef43aa38SMilan Broz ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + additional_req_size, 2780d49ec52fSMikulas Patocka ARCH_KMALLOC_MINALIGN); 2781298a9fa0SMikulas Patocka 27826f1c819cSKent Overstreet ret = mempool_init(&cc->page_pool, BIO_MAX_PAGES, crypt_page_alloc, crypt_page_free, cc); 27836f1c819cSKent Overstreet if (ret) { 27848b004457SMilan Broz ti->error = "Cannot allocate page mempool"; 2785e48d4bbfSMilan Broz goto bad; 27861da177e4SLinus Torvalds } 2787e48d4bbfSMilan Broz 27886f1c819cSKent Overstreet ret = bioset_init(&cc->bs, MIN_IOS, 0, BIOSET_NEED_BVECS); 27896f1c819cSKent Overstreet if (ret) { 27900c395b0fSMilan Broz ti->error = "Cannot allocate crypt bioset"; 2791cabf08e4SMilan Broz goto bad; 279293e605c2SMilan Broz } 2793cabf08e4SMilan Broz 27947145c241SMikulas Patocka mutex_init(&cc->bio_alloc_lock); 27957145c241SMikulas Patocka 2796cabf08e4SMilan Broz ret = -EINVAL; 27978f0009a2SMilan Broz if ((sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) || 27988f0009a2SMilan Broz (tmpll & ((cc->sector_size >> SECTOR_SHIFT) - 1))) { 2799cabf08e4SMilan Broz ti->error = "Invalid iv_offset sector"; 2800cabf08e4SMilan Broz goto bad; 28011da177e4SLinus Torvalds } 2802d2a7ad29SKiyoshi Ueda cc->iv_offset = tmpll; 28031da177e4SLinus Torvalds 2804e80d1c80SVivek Goyal ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev); 2805e80d1c80SVivek Goyal if (ret) { 28061da177e4SLinus Torvalds ti->error = "Device lookup failed"; 28071da177e4SLinus Torvalds goto bad; 28081da177e4SLinus Torvalds } 28091da177e4SLinus Torvalds 2810e80d1c80SVivek Goyal ret = -EINVAL; 2811ef87bfc2SMilan Broz if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) { 28121da177e4SLinus Torvalds ti->error = "Invalid device sector"; 28131da177e4SLinus Torvalds goto bad; 28141da177e4SLinus Torvalds } 28151da177e4SLinus Torvalds cc->start = tmpll; 28161da177e4SLinus Torvalds 281733d2f09fSMilan Broz if (crypt_integrity_aead(cc) || cc->integrity_iv_size) { 2818ef43aa38SMilan Broz ret = crypt_integrity_ctr(cc, ti); 2819772ae5f5SMilan Broz if (ret) 2820772ae5f5SMilan Broz goto bad; 2821772ae5f5SMilan Broz 2822ef43aa38SMilan Broz cc->tag_pool_max_sectors = POOL_ENTRY_SIZE / cc->on_disk_tag_size; 2823ef43aa38SMilan Broz if (!cc->tag_pool_max_sectors) 2824ef43aa38SMilan Broz cc->tag_pool_max_sectors = 1; 2825ef43aa38SMilan Broz 28266f1c819cSKent Overstreet ret = mempool_init_kmalloc_pool(&cc->tag_pool, MIN_IOS, 2827ef43aa38SMilan Broz cc->tag_pool_max_sectors * cc->on_disk_tag_size); 28286f1c819cSKent Overstreet if (ret) { 2829ef43aa38SMilan Broz ti->error = "Cannot allocate integrity tags mempool"; 2830f3396c58SMikulas Patocka goto bad; 2831f3396c58SMikulas Patocka } 2832772ae5f5SMilan Broz 2833583fe747SMikulas Patocka cc->tag_pool_max_sectors <<= cc->sector_shift; 2834f3396c58SMikulas Patocka } 2835772ae5f5SMilan Broz 28361da177e4SLinus Torvalds ret = -ENOMEM; 2837ed0302e8SMichał Mirosław cc->io_queue = alloc_workqueue("kcryptd_io/%s", 2838ed0302e8SMichał Mirosław WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 2839ed0302e8SMichał Mirosław 1, devname); 28401da177e4SLinus Torvalds if (!cc->io_queue) { 28411da177e4SLinus Torvalds ti->error = "Couldn't create kcryptd io queue"; 28421da177e4SLinus Torvalds goto bad; 28431da177e4SLinus Torvalds } 284437af6560SChristophe Saout 2845f3396c58SMikulas Patocka if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags)) 2846ed0302e8SMichał Mirosław cc->crypt_queue = alloc_workqueue("kcryptd/%s", 2847ed0302e8SMichał Mirosław WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 2848ed0302e8SMichał Mirosław 1, devname); 2849f3396c58SMikulas Patocka else 2850ed0302e8SMichał Mirosław cc->crypt_queue = alloc_workqueue("kcryptd/%s", 2851a1b89132STim Murray WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, 2852ed0302e8SMichał Mirosław num_online_cpus(), devname); 28531da177e4SLinus Torvalds if (!cc->crypt_queue) { 28541da177e4SLinus Torvalds ti->error = "Couldn't create kcryptd queue"; 28551da177e4SLinus Torvalds goto bad; 28561da177e4SLinus Torvalds } 28571da177e4SLinus Torvalds 2858c7329effSMikulas Patocka spin_lock_init(&cc->write_thread_lock); 2859b3c5fd30SMikulas Patocka cc->write_tree = RB_ROOT; 2860dc267621SMikulas Patocka 2861ed0302e8SMichał Mirosław cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write/%s", devname); 2862dc267621SMikulas Patocka if (IS_ERR(cc->write_thread)) { 2863dc267621SMikulas Patocka ret = PTR_ERR(cc->write_thread); 2864dc267621SMikulas Patocka cc->write_thread = NULL; 2865dc267621SMikulas Patocka ti->error = "Couldn't spawn write thread"; 2866dc267621SMikulas Patocka goto bad; 2867dc267621SMikulas Patocka } 2868dc267621SMikulas Patocka wake_up_process(cc->write_thread); 2869dc267621SMikulas Patocka 287055a62eefSAlasdair G Kergon ti->num_flush_bios = 1; 2871983c7db3SMilan Broz 28721da177e4SLinus Torvalds return 0; 28731da177e4SLinus Torvalds 28741da177e4SLinus Torvalds bad: 28751da177e4SLinus Torvalds crypt_dtr(ti); 28761da177e4SLinus Torvalds return ret; 2877647c7db1SMikulas Patocka } 2878647c7db1SMikulas Patocka 28797de3ee57SMikulas Patocka static int crypt_map(struct dm_target *ti, struct bio *bio) 28801da177e4SLinus Torvalds { 28811da177e4SLinus Torvalds struct dm_crypt_io *io; 288249a8a920SAlasdair G Kergon struct crypt_config *cc = ti->private; 2883647c7db1SMikulas Patocka 2884772ae5f5SMilan Broz /* 288528a8f0d3SMike Christie * If bio is REQ_PREFLUSH or REQ_OP_DISCARD, just bypass crypt queues. 288628a8f0d3SMike Christie * - for REQ_PREFLUSH device-mapper core ensures that no IO is in-flight 2887e6047149SMike Christie * - for REQ_OP_DISCARD caller must use flush if IO ordering matters 2888772ae5f5SMilan Broz */ 28891eff9d32SJens Axboe if (unlikely(bio->bi_opf & REQ_PREFLUSH || 289028a8f0d3SMike Christie bio_op(bio) == REQ_OP_DISCARD)) { 289174d46992SChristoph Hellwig bio_set_dev(bio, cc->dev->bdev); 2892772ae5f5SMilan Broz if (bio_sectors(bio)) 28934f024f37SKent Overstreet bio->bi_iter.bi_sector = cc->start + 28944f024f37SKent Overstreet dm_target_offset(ti, bio->bi_iter.bi_sector); 2895647c7db1SMikulas Patocka return DM_MAPIO_REMAPPED; 2896647c7db1SMikulas Patocka } 28971da177e4SLinus Torvalds 28984e870e94SMikulas Patocka /* 28994e870e94SMikulas Patocka * Check if bio is too large, split as needed. 29004e870e94SMikulas Patocka */ 29014e870e94SMikulas Patocka if (unlikely(bio->bi_iter.bi_size > (BIO_MAX_PAGES << PAGE_SHIFT)) && 2902ef43aa38SMilan Broz (bio_data_dir(bio) == WRITE || cc->on_disk_tag_size)) 29034e870e94SMikulas Patocka dm_accept_partial_bio(bio, ((BIO_MAX_PAGES << PAGE_SHIFT) >> SECTOR_SHIFT)); 29044e870e94SMikulas Patocka 29058f0009a2SMilan Broz /* 29068f0009a2SMilan Broz * Ensure that bio is a multiple of internal sector encryption size 29078f0009a2SMilan Broz * and is aligned to this size as defined in IO hints. 29088f0009a2SMilan Broz */ 29098f0009a2SMilan Broz if (unlikely((bio->bi_iter.bi_sector & ((cc->sector_size >> SECTOR_SHIFT) - 1)) != 0)) 2910846785e6SChristoph Hellwig return DM_MAPIO_KILL; 29118f0009a2SMilan Broz 29128f0009a2SMilan Broz if (unlikely(bio->bi_iter.bi_size & (cc->sector_size - 1))) 2913846785e6SChristoph Hellwig return DM_MAPIO_KILL; 29148f0009a2SMilan Broz 2915298a9fa0SMikulas Patocka io = dm_per_bio_data(bio, cc->per_bio_data_size); 2916298a9fa0SMikulas Patocka crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); 2917ef43aa38SMilan Broz 2918ef43aa38SMilan Broz if (cc->on_disk_tag_size) { 2919583fe747SMikulas Patocka unsigned tag_len = cc->on_disk_tag_size * (bio_sectors(bio) >> cc->sector_shift); 2920ef43aa38SMilan Broz 2921ef43aa38SMilan Broz if (unlikely(tag_len > KMALLOC_MAX_SIZE) || 2922583fe747SMikulas Patocka unlikely(!(io->integrity_metadata = kmalloc(tag_len, 2923ef43aa38SMilan Broz GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN)))) { 2924ef43aa38SMilan Broz if (bio_sectors(bio) > cc->tag_pool_max_sectors) 2925ef43aa38SMilan Broz dm_accept_partial_bio(bio, cc->tag_pool_max_sectors); 29266f1c819cSKent Overstreet io->integrity_metadata = mempool_alloc(&cc->tag_pool, GFP_NOIO); 2927ef43aa38SMilan Broz io->integrity_metadata_from_pool = true; 2928ef43aa38SMilan Broz } 2929ef43aa38SMilan Broz } 2930ef43aa38SMilan Broz 293133d2f09fSMilan Broz if (crypt_integrity_aead(cc)) 2932ef43aa38SMilan Broz io->ctx.r.req_aead = (struct aead_request *)(io + 1); 2933ef43aa38SMilan Broz else 2934ef43aa38SMilan Broz io->ctx.r.req = (struct skcipher_request *)(io + 1); 29351da177e4SLinus Torvalds 293620c82538SMilan Broz if (bio_data_dir(io->base_bio) == READ) { 293720c82538SMilan Broz if (kcryptd_io_read(io, GFP_NOWAIT)) 2938dc267621SMikulas Patocka kcryptd_queue_read(io); 293920c82538SMilan Broz } else 29404ee218cdSAndrew Morton kcryptd_queue_crypt(io); 29414ee218cdSAndrew Morton 29421da177e4SLinus Torvalds return DM_MAPIO_SUBMITTED; 29431da177e4SLinus Torvalds } 29441da177e4SLinus Torvalds 2945fd7c092eSMikulas Patocka static void crypt_status(struct dm_target *ti, status_type_t type, 29461f4e0ff0SAlasdair G Kergon unsigned status_flags, char *result, unsigned maxlen) 29471da177e4SLinus Torvalds { 29485ebaee6dSMilan Broz struct crypt_config *cc = ti->private; 2949fd7c092eSMikulas Patocka unsigned i, sz = 0; 2950f3396c58SMikulas Patocka int num_feature_args = 0; 29511da177e4SLinus Torvalds 29521da177e4SLinus Torvalds switch (type) { 29531da177e4SLinus Torvalds case STATUSTYPE_INFO: 29541da177e4SLinus Torvalds result[0] = '\0'; 29551da177e4SLinus Torvalds break; 29561da177e4SLinus Torvalds 29571da177e4SLinus Torvalds case STATUSTYPE_TABLE: 29587dbcd137SMilan Broz DMEMIT("%s ", cc->cipher_string); 29591da177e4SLinus Torvalds 2960c538f6ecSOndrej Kozina if (cc->key_size > 0) { 2961c538f6ecSOndrej Kozina if (cc->key_string) 2962c538f6ecSOndrej Kozina DMEMIT(":%u:%s", cc->key_size, cc->key_string); 2963c538f6ecSOndrej Kozina else 2964fd7c092eSMikulas Patocka for (i = 0; i < cc->key_size; i++) 2965fd7c092eSMikulas Patocka DMEMIT("%02x", cc->key[i]); 2966c538f6ecSOndrej Kozina } else 2967fd7c092eSMikulas Patocka DMEMIT("-"); 29681da177e4SLinus Torvalds 29691da177e4SLinus Torvalds DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset, 29701da177e4SLinus Torvalds cc->dev->name, (unsigned long long)cc->start); 2971772ae5f5SMilan Broz 2972f3396c58SMikulas Patocka num_feature_args += !!ti->num_discard_bios; 2973f3396c58SMikulas Patocka num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags); 29740f5d8e6eSMikulas Patocka num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags); 2975ff3af92bSMikulas Patocka num_feature_args += cc->sector_size != (1 << SECTOR_SHIFT); 29768f0009a2SMilan Broz num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags); 2977ef43aa38SMilan Broz if (cc->on_disk_tag_size) 2978ef43aa38SMilan Broz num_feature_args++; 2979f3396c58SMikulas Patocka if (num_feature_args) { 2980f3396c58SMikulas Patocka DMEMIT(" %d", num_feature_args); 298155a62eefSAlasdair G Kergon if (ti->num_discard_bios) 2982f3396c58SMikulas Patocka DMEMIT(" allow_discards"); 2983f3396c58SMikulas Patocka if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags)) 2984f3396c58SMikulas Patocka DMEMIT(" same_cpu_crypt"); 29850f5d8e6eSMikulas Patocka if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) 29860f5d8e6eSMikulas Patocka DMEMIT(" submit_from_crypt_cpus"); 2987ef43aa38SMilan Broz if (cc->on_disk_tag_size) 2988ef43aa38SMilan Broz DMEMIT(" integrity:%u:%s", cc->on_disk_tag_size, cc->cipher_auth); 29898f0009a2SMilan Broz if (cc->sector_size != (1 << SECTOR_SHIFT)) 29908f0009a2SMilan Broz DMEMIT(" sector_size:%d", cc->sector_size); 29918f0009a2SMilan Broz if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags)) 29928f0009a2SMilan Broz DMEMIT(" iv_large_sectors"); 2993f3396c58SMikulas Patocka } 2994772ae5f5SMilan Broz 29951da177e4SLinus Torvalds break; 29961da177e4SLinus Torvalds } 29971da177e4SLinus Torvalds } 29981da177e4SLinus Torvalds 2999e48d4bbfSMilan Broz static void crypt_postsuspend(struct dm_target *ti) 3000e48d4bbfSMilan Broz { 3001e48d4bbfSMilan Broz struct crypt_config *cc = ti->private; 3002e48d4bbfSMilan Broz 3003e48d4bbfSMilan Broz set_bit(DM_CRYPT_SUSPENDED, &cc->flags); 3004e48d4bbfSMilan Broz } 3005e48d4bbfSMilan Broz 3006e48d4bbfSMilan Broz static int crypt_preresume(struct dm_target *ti) 3007e48d4bbfSMilan Broz { 3008e48d4bbfSMilan Broz struct crypt_config *cc = ti->private; 3009e48d4bbfSMilan Broz 3010e48d4bbfSMilan Broz if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) { 3011e48d4bbfSMilan Broz DMERR("aborting resume - crypt key is not set."); 3012e48d4bbfSMilan Broz return -EAGAIN; 3013e48d4bbfSMilan Broz } 3014e48d4bbfSMilan Broz 3015e48d4bbfSMilan Broz return 0; 3016e48d4bbfSMilan Broz } 3017e48d4bbfSMilan Broz 3018e48d4bbfSMilan Broz static void crypt_resume(struct dm_target *ti) 3019e48d4bbfSMilan Broz { 3020e48d4bbfSMilan Broz struct crypt_config *cc = ti->private; 3021e48d4bbfSMilan Broz 3022e48d4bbfSMilan Broz clear_bit(DM_CRYPT_SUSPENDED, &cc->flags); 3023e48d4bbfSMilan Broz } 3024e48d4bbfSMilan Broz 3025e48d4bbfSMilan Broz /* Message interface 3026e48d4bbfSMilan Broz * key set <key> 3027e48d4bbfSMilan Broz * key wipe 3028e48d4bbfSMilan Broz */ 30291eb5fa84SMike Snitzer static int crypt_message(struct dm_target *ti, unsigned argc, char **argv, 30301eb5fa84SMike Snitzer char *result, unsigned maxlen) 3031e48d4bbfSMilan Broz { 3032e48d4bbfSMilan Broz struct crypt_config *cc = ti->private; 3033c538f6ecSOndrej Kozina int key_size, ret = -EINVAL; 3034e48d4bbfSMilan Broz 3035e48d4bbfSMilan Broz if (argc < 2) 3036e48d4bbfSMilan Broz goto error; 3037e48d4bbfSMilan Broz 3038498f0103SMike Snitzer if (!strcasecmp(argv[0], "key")) { 3039e48d4bbfSMilan Broz if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) { 3040e48d4bbfSMilan Broz DMWARN("not suspended during key manipulation."); 3041e48d4bbfSMilan Broz return -EINVAL; 3042e48d4bbfSMilan Broz } 3043498f0103SMike Snitzer if (argc == 3 && !strcasecmp(argv[1], "set")) { 3044c538f6ecSOndrej Kozina /* The key size may not be changed. */ 3045c538f6ecSOndrej Kozina key_size = get_key_size(&argv[2]); 3046c538f6ecSOndrej Kozina if (key_size < 0 || cc->key_size != key_size) { 3047c538f6ecSOndrej Kozina memset(argv[2], '0', strlen(argv[2])); 3048c538f6ecSOndrej Kozina return -EINVAL; 3049c538f6ecSOndrej Kozina } 3050c538f6ecSOndrej Kozina 3051542da317SMilan Broz ret = crypt_set_key(cc, argv[2]); 3052542da317SMilan Broz if (ret) 3053542da317SMilan Broz return ret; 3054542da317SMilan Broz if (cc->iv_gen_ops && cc->iv_gen_ops->init) 3055542da317SMilan Broz ret = cc->iv_gen_ops->init(cc); 3056dc94902bSOndrej Kozina /* wipe the kernel key payload copy */ 3057dc94902bSOndrej Kozina if (cc->key_string) 3058dc94902bSOndrej Kozina memset(cc->key, 0, cc->key_size * sizeof(u8)); 3059542da317SMilan Broz return ret; 3060542da317SMilan Broz } 30614a52ffc7SMilan Broz if (argc == 2 && !strcasecmp(argv[1], "wipe")) 3062e48d4bbfSMilan Broz return crypt_wipe_key(cc); 3063e48d4bbfSMilan Broz } 3064e48d4bbfSMilan Broz 3065e48d4bbfSMilan Broz error: 3066e48d4bbfSMilan Broz DMWARN("unrecognised message received."); 3067e48d4bbfSMilan Broz return -EINVAL; 3068e48d4bbfSMilan Broz } 3069e48d4bbfSMilan Broz 3070af4874e0SMike Snitzer static int crypt_iterate_devices(struct dm_target *ti, 3071af4874e0SMike Snitzer iterate_devices_callout_fn fn, void *data) 3072af4874e0SMike Snitzer { 3073af4874e0SMike Snitzer struct crypt_config *cc = ti->private; 3074af4874e0SMike Snitzer 30755dea271bSMike Snitzer return fn(ti, cc->dev, cc->start, ti->len, data); 3076af4874e0SMike Snitzer } 3077af4874e0SMike Snitzer 3078586b286bSMike Snitzer static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits) 3079586b286bSMike Snitzer { 30808f0009a2SMilan Broz struct crypt_config *cc = ti->private; 30818f0009a2SMilan Broz 3082586b286bSMike Snitzer /* 3083586b286bSMike Snitzer * Unfortunate constraint that is required to avoid the potential 3084586b286bSMike Snitzer * for exceeding underlying device's max_segments limits -- due to 3085586b286bSMike Snitzer * crypt_alloc_buffer() possibly allocating pages for the encryption 3086586b286bSMike Snitzer * bio that are not as physically contiguous as the original bio. 3087586b286bSMike Snitzer */ 3088586b286bSMike Snitzer limits->max_segment_size = PAGE_SIZE; 30898f0009a2SMilan Broz 3090bc9e9cf0SMikulas Patocka limits->logical_block_size = 3091bc9e9cf0SMikulas Patocka max_t(unsigned short, limits->logical_block_size, cc->sector_size); 3092bc9e9cf0SMikulas Patocka limits->physical_block_size = 3093bc9e9cf0SMikulas Patocka max_t(unsigned, limits->physical_block_size, cc->sector_size); 3094bc9e9cf0SMikulas Patocka limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size); 3095586b286bSMike Snitzer } 3096586b286bSMike Snitzer 30971da177e4SLinus Torvalds static struct target_type crypt_target = { 30981da177e4SLinus Torvalds .name = "crypt", 3099dc94902bSOndrej Kozina .version = {1, 18, 1}, 31001da177e4SLinus Torvalds .module = THIS_MODULE, 31011da177e4SLinus Torvalds .ctr = crypt_ctr, 31021da177e4SLinus Torvalds .dtr = crypt_dtr, 31031da177e4SLinus Torvalds .map = crypt_map, 31041da177e4SLinus Torvalds .status = crypt_status, 3105e48d4bbfSMilan Broz .postsuspend = crypt_postsuspend, 3106e48d4bbfSMilan Broz .preresume = crypt_preresume, 3107e48d4bbfSMilan Broz .resume = crypt_resume, 3108e48d4bbfSMilan Broz .message = crypt_message, 3109af4874e0SMike Snitzer .iterate_devices = crypt_iterate_devices, 3110586b286bSMike Snitzer .io_hints = crypt_io_hints, 31111da177e4SLinus Torvalds }; 31121da177e4SLinus Torvalds 31131da177e4SLinus Torvalds static int __init dm_crypt_init(void) 31141da177e4SLinus Torvalds { 31151da177e4SLinus Torvalds int r; 31161da177e4SLinus Torvalds 31171da177e4SLinus Torvalds r = dm_register_target(&crypt_target); 311894f5e024SMikulas Patocka if (r < 0) 311972d94861SAlasdair G Kergon DMERR("register failed %d", r); 31201da177e4SLinus Torvalds 31211da177e4SLinus Torvalds return r; 31221da177e4SLinus Torvalds } 31231da177e4SLinus Torvalds 31241da177e4SLinus Torvalds static void __exit dm_crypt_exit(void) 31251da177e4SLinus Torvalds { 312610d3bd09SMikulas Patocka dm_unregister_target(&crypt_target); 31271da177e4SLinus Torvalds } 31281da177e4SLinus Torvalds 31291da177e4SLinus Torvalds module_init(dm_crypt_init); 31301da177e4SLinus Torvalds module_exit(dm_crypt_exit); 31311da177e4SLinus Torvalds 3132bf14299fSJana Saout MODULE_AUTHOR("Jana Saout <jana@saout.de>"); 31331da177e4SLinus Torvalds MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption"); 31341da177e4SLinus Torvalds MODULE_LICENSE("GPL"); 3135