1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * CBC: Cipher Block Chaining mode 4 * 5 * Copyright (c) 2006-2016 Herbert Xu <herbert@gondor.apana.org.au> 6 */ 7 8 #include <crypto/algapi.h> 9 #include <crypto/internal/skcipher.h> 10 #include <linux/err.h> 11 #include <linux/init.h> 12 #include <linux/kernel.h> 13 #include <linux/log2.h> 14 #include <linux/module.h> 15 16 static int crypto_cbc_encrypt_segment(struct skcipher_walk *walk, 17 struct crypto_skcipher *skcipher) 18 { 19 unsigned int bsize = crypto_skcipher_blocksize(skcipher); 20 void (*fn)(struct crypto_tfm *, u8 *, const u8 *); 21 unsigned int nbytes = walk->nbytes; 22 u8 *src = walk->src.virt.addr; 23 u8 *dst = walk->dst.virt.addr; 24 struct crypto_cipher *cipher; 25 struct crypto_tfm *tfm; 26 u8 *iv = walk->iv; 27 28 cipher = skcipher_cipher_simple(skcipher); 29 tfm = crypto_cipher_tfm(cipher); 30 fn = crypto_cipher_alg(cipher)->cia_encrypt; 31 32 do { 33 crypto_xor(iv, src, bsize); 34 fn(tfm, dst, iv); 35 memcpy(iv, dst, bsize); 36 37 src += bsize; 38 dst += bsize; 39 } while ((nbytes -= bsize) >= bsize); 40 41 return nbytes; 42 } 43 44 static int crypto_cbc_encrypt_inplace(struct skcipher_walk *walk, 45 struct crypto_skcipher *skcipher) 46 { 47 unsigned int bsize = crypto_skcipher_blocksize(skcipher); 48 void (*fn)(struct crypto_tfm *, u8 *, const u8 *); 49 unsigned int nbytes = walk->nbytes; 50 u8 *src = walk->src.virt.addr; 51 struct crypto_cipher *cipher; 52 struct crypto_tfm *tfm; 53 u8 *iv = walk->iv; 54 55 cipher = skcipher_cipher_simple(skcipher); 56 tfm = crypto_cipher_tfm(cipher); 57 fn = crypto_cipher_alg(cipher)->cia_encrypt; 58 59 do { 60 crypto_xor(src, iv, bsize); 61 fn(tfm, src, src); 62 iv = src; 63 64 src += bsize; 65 } while ((nbytes -= bsize) >= bsize); 66 67 memcpy(walk->iv, iv, bsize); 68 69 return nbytes; 70 } 71 72 static int crypto_cbc_encrypt(struct skcipher_request *req) 73 { 74 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 75 struct skcipher_walk walk; 76 int err; 77 78 err = skcipher_walk_virt(&walk, req, false); 79 80 while (walk.nbytes) { 81 if (walk.src.virt.addr == walk.dst.virt.addr) 82 err = crypto_cbc_encrypt_inplace(&walk, skcipher); 83 else 84 err = crypto_cbc_encrypt_segment(&walk, skcipher); 85 err = skcipher_walk_done(&walk, err); 86 } 87 88 return err; 89 } 90 91 static int crypto_cbc_decrypt_segment(struct skcipher_walk *walk, 92 struct crypto_skcipher *skcipher) 93 { 94 unsigned int bsize = crypto_skcipher_blocksize(skcipher); 95 void (*fn)(struct crypto_tfm *, u8 *, const u8 *); 96 unsigned int nbytes = walk->nbytes; 97 u8 *src = walk->src.virt.addr; 98 u8 *dst = walk->dst.virt.addr; 99 struct crypto_cipher *cipher; 100 struct crypto_tfm *tfm; 101 u8 *iv = walk->iv; 102 103 cipher = skcipher_cipher_simple(skcipher); 104 tfm = crypto_cipher_tfm(cipher); 105 fn = crypto_cipher_alg(cipher)->cia_decrypt; 106 107 do { 108 fn(tfm, dst, src); 109 crypto_xor(dst, iv, bsize); 110 iv = src; 111 112 src += bsize; 113 dst += bsize; 114 } while ((nbytes -= bsize) >= bsize); 115 116 memcpy(walk->iv, iv, bsize); 117 118 return nbytes; 119 } 120 121 static int crypto_cbc_decrypt_inplace(struct skcipher_walk *walk, 122 struct crypto_skcipher *skcipher) 123 { 124 unsigned int bsize = crypto_skcipher_blocksize(skcipher); 125 void (*fn)(struct crypto_tfm *, u8 *, const u8 *); 126 unsigned int nbytes = walk->nbytes; 127 u8 *src = walk->src.virt.addr; 128 u8 last_iv[MAX_CIPHER_BLOCKSIZE]; 129 struct crypto_cipher *cipher; 130 struct crypto_tfm *tfm; 131 132 cipher = skcipher_cipher_simple(skcipher); 133 tfm = crypto_cipher_tfm(cipher); 134 fn = crypto_cipher_alg(cipher)->cia_decrypt; 135 136 /* Start of the last block. */ 137 src += nbytes - (nbytes & (bsize - 1)) - bsize; 138 memcpy(last_iv, src, bsize); 139 140 for (;;) { 141 fn(tfm, src, src); 142 if ((nbytes -= bsize) < bsize) 143 break; 144 crypto_xor(src, src - bsize, bsize); 145 src -= bsize; 146 } 147 148 crypto_xor(src, walk->iv, bsize); 149 memcpy(walk->iv, last_iv, bsize); 150 151 return nbytes; 152 } 153 154 static int crypto_cbc_decrypt(struct skcipher_request *req) 155 { 156 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 157 struct skcipher_walk walk; 158 int err; 159 160 err = skcipher_walk_virt(&walk, req, false); 161 162 while (walk.nbytes) { 163 if (walk.src.virt.addr == walk.dst.virt.addr) 164 err = crypto_cbc_decrypt_inplace(&walk, skcipher); 165 else 166 err = crypto_cbc_decrypt_segment(&walk, skcipher); 167 err = skcipher_walk_done(&walk, err); 168 } 169 170 return err; 171 } 172 173 static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb) 174 { 175 struct skcipher_instance *inst; 176 struct crypto_alg *alg; 177 int err; 178 179 inst = skcipher_alloc_instance_simple(tmpl, tb); 180 if (IS_ERR(inst)) 181 return PTR_ERR(inst); 182 183 alg = skcipher_ialg_simple(inst); 184 185 err = -EINVAL; 186 if (!is_power_of_2(alg->cra_blocksize)) 187 goto out_free_inst; 188 189 inst->alg.encrypt = crypto_cbc_encrypt; 190 inst->alg.decrypt = crypto_cbc_decrypt; 191 192 err = skcipher_register_instance(tmpl, inst); 193 if (err) { 194 out_free_inst: 195 inst->free(inst); 196 } 197 198 return err; 199 } 200 201 static struct crypto_template crypto_cbc_tmpl = { 202 .name = "cbc", 203 .create = crypto_cbc_create, 204 .module = THIS_MODULE, 205 }; 206 207 static int __init crypto_cbc_module_init(void) 208 { 209 return crypto_register_template(&crypto_cbc_tmpl); 210 } 211 212 static void __exit crypto_cbc_module_exit(void) 213 { 214 crypto_unregister_template(&crypto_cbc_tmpl); 215 } 216 217 subsys_initcall(crypto_cbc_module_init); 218 module_exit(crypto_cbc_module_exit); 219 220 MODULE_LICENSE("GPL"); 221 MODULE_DESCRIPTION("CBC block cipher mode of operation"); 222 MODULE_ALIAS_CRYPTO("cbc"); 223