1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* LRW: as defined by Cyril Guyot in 3 * http://grouper.ieee.org/groups/1619/email/pdf00017.pdf 4 * 5 * Copyright (c) 2006 Rik Snel <rsnel@cube.dyndns.org> 6 * 7 * Based on ecb.c 8 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> 9 */ 10 /* This implementation is checked against the test vectors in the above 11 * document and by a test vector provided by Ken Buchanan at 12 * https://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html 13 * 14 * The test vectors are included in the testing module tcrypt.[ch] */ 15 16 #include <crypto/internal/skcipher.h> 17 #include <crypto/scatterwalk.h> 18 #include <linux/err.h> 19 #include <linux/init.h> 20 #include <linux/kernel.h> 21 #include <linux/module.h> 22 #include <linux/scatterlist.h> 23 #include <linux/slab.h> 24 25 #include <crypto/b128ops.h> 26 #include <crypto/gf128mul.h> 27 28 #define LRW_BLOCK_SIZE 16 29 30 struct lrw_tfm_ctx { 31 struct crypto_skcipher *child; 32 33 /* 34 * optimizes multiplying a random (non incrementing, as at the 35 * start of a new sector) value with key2, we could also have 36 * used 4k optimization tables or no optimization at all. In the 37 * latter case we would have to store key2 here 38 */ 39 struct gf128mul_64k *table; 40 41 /* 42 * stores: 43 * key2*{ 0,0,...0,0,0,0,1 }, key2*{ 0,0,...0,0,0,1,1 }, 44 * key2*{ 0,0,...0,0,1,1,1 }, key2*{ 0,0,...0,1,1,1,1 } 45 * key2*{ 0,0,...1,1,1,1,1 }, etc 46 * needed for optimized multiplication of incrementing values 47 * with key2 48 */ 49 be128 mulinc[128]; 50 }; 51 52 struct lrw_request_ctx { 53 be128 t; 54 struct skcipher_request subreq; 55 }; 56 57 static inline void lrw_setbit128_bbe(void *b, int bit) 58 { 59 __set_bit(bit ^ (0x80 - 60 #ifdef __BIG_ENDIAN 61 BITS_PER_LONG 62 #else 63 BITS_PER_BYTE 64 #endif 65 ), b); 66 } 67 68 static int lrw_setkey(struct crypto_skcipher *parent, const u8 *key, 69 unsigned int keylen) 70 { 71 struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(parent); 72 struct crypto_skcipher *child = ctx->child; 73 int err, bsize = LRW_BLOCK_SIZE; 74 const u8 *tweak = key + keylen - bsize; 75 be128 tmp = { 0 }; 76 int i; 77 78 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); 79 crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & 80 CRYPTO_TFM_REQ_MASK); 81 err = crypto_skcipher_setkey(child, key, keylen - bsize); 82 if (err) 83 return err; 84 85 if (ctx->table) 86 gf128mul_free_64k(ctx->table); 87 88 /* initialize multiplication table for Key2 */ 89 ctx->table = gf128mul_init_64k_bbe((be128 *)tweak); 90 if (!ctx->table) 91 return -ENOMEM; 92 93 /* initialize optimization table */ 94 for (i = 0; i < 128; i++) { 95 lrw_setbit128_bbe(&tmp, i); 96 ctx->mulinc[i] = tmp; 97 gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table); 98 } 99 100 return 0; 101 } 102 103 /* 104 * Returns the number of trailing '1' bits in the words of the counter, which is 105 * represented by 4 32-bit words, arranged from least to most significant. 106 * At the same time, increments the counter by one. 107 * 108 * For example: 109 * 110 * u32 counter[4] = { 0xFFFFFFFF, 0x1, 0x0, 0x0 }; 111 * int i = lrw_next_index(&counter); 112 * // i == 33, counter == { 0x0, 0x2, 0x0, 0x0 } 113 */ 114 static int lrw_next_index(u32 *counter) 115 { 116 int i, res = 0; 117 118 for (i = 0; i < 4; i++) { 119 if (counter[i] + 1 != 0) 120 return res + ffz(counter[i]++); 121 122 counter[i] = 0; 123 res += 32; 124 } 125 126 /* 127 * If we get here, then x == 128 and we are incrementing the counter 128 * from all ones to all zeros. This means we must return index 127, i.e. 129 * the one corresponding to key2*{ 1,...,1 }. 130 */ 131 return 127; 132 } 133 134 /* 135 * We compute the tweak masks twice (both before and after the ECB encryption or 136 * decryption) to avoid having to allocate a temporary buffer and/or make 137 * mutliple calls to the 'ecb(..)' instance, which usually would be slower than 138 * just doing the lrw_next_index() calls again. 139 */ 140 static int lrw_xor_tweak(struct skcipher_request *req, bool second_pass) 141 { 142 const int bs = LRW_BLOCK_SIZE; 143 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 144 const struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); 145 struct lrw_request_ctx *rctx = skcipher_request_ctx(req); 146 be128 t = rctx->t; 147 struct skcipher_walk w; 148 __be32 *iv; 149 u32 counter[4]; 150 int err; 151 152 if (second_pass) { 153 req = &rctx->subreq; 154 /* set to our TFM to enforce correct alignment: */ 155 skcipher_request_set_tfm(req, tfm); 156 } 157 158 err = skcipher_walk_virt(&w, req, false); 159 if (err) 160 return err; 161 162 iv = (__be32 *)w.iv; 163 counter[0] = be32_to_cpu(iv[3]); 164 counter[1] = be32_to_cpu(iv[2]); 165 counter[2] = be32_to_cpu(iv[1]); 166 counter[3] = be32_to_cpu(iv[0]); 167 168 while (w.nbytes) { 169 unsigned int avail = w.nbytes; 170 be128 *wsrc; 171 be128 *wdst; 172 173 wsrc = w.src.virt.addr; 174 wdst = w.dst.virt.addr; 175 176 do { 177 be128_xor(wdst++, &t, wsrc++); 178 179 /* T <- I*Key2, using the optimization 180 * discussed in the specification */ 181 be128_xor(&t, &t, 182 &ctx->mulinc[lrw_next_index(counter)]); 183 } while ((avail -= bs) >= bs); 184 185 if (second_pass && w.nbytes == w.total) { 186 iv[0] = cpu_to_be32(counter[3]); 187 iv[1] = cpu_to_be32(counter[2]); 188 iv[2] = cpu_to_be32(counter[1]); 189 iv[3] = cpu_to_be32(counter[0]); 190 } 191 192 err = skcipher_walk_done(&w, avail); 193 } 194 195 return err; 196 } 197 198 static int lrw_xor_tweak_pre(struct skcipher_request *req) 199 { 200 return lrw_xor_tweak(req, false); 201 } 202 203 static int lrw_xor_tweak_post(struct skcipher_request *req) 204 { 205 return lrw_xor_tweak(req, true); 206 } 207 208 static void lrw_crypt_done(struct crypto_async_request *areq, int err) 209 { 210 struct skcipher_request *req = areq->data; 211 212 if (!err) { 213 struct lrw_request_ctx *rctx = skcipher_request_ctx(req); 214 215 rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; 216 err = lrw_xor_tweak_post(req); 217 } 218 219 skcipher_request_complete(req, err); 220 } 221 222 static void lrw_init_crypt(struct skcipher_request *req) 223 { 224 const struct lrw_tfm_ctx *ctx = 225 crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); 226 struct lrw_request_ctx *rctx = skcipher_request_ctx(req); 227 struct skcipher_request *subreq = &rctx->subreq; 228 229 skcipher_request_set_tfm(subreq, ctx->child); 230 skcipher_request_set_callback(subreq, req->base.flags, lrw_crypt_done, 231 req); 232 /* pass req->iv as IV (will be used by xor_tweak, ECB will ignore it) */ 233 skcipher_request_set_crypt(subreq, req->dst, req->dst, 234 req->cryptlen, req->iv); 235 236 /* calculate first value of T */ 237 memcpy(&rctx->t, req->iv, sizeof(rctx->t)); 238 239 /* T <- I*Key2 */ 240 gf128mul_64k_bbe(&rctx->t, ctx->table); 241 } 242 243 static int lrw_encrypt(struct skcipher_request *req) 244 { 245 struct lrw_request_ctx *rctx = skcipher_request_ctx(req); 246 struct skcipher_request *subreq = &rctx->subreq; 247 248 lrw_init_crypt(req); 249 return lrw_xor_tweak_pre(req) ?: 250 crypto_skcipher_encrypt(subreq) ?: 251 lrw_xor_tweak_post(req); 252 } 253 254 static int lrw_decrypt(struct skcipher_request *req) 255 { 256 struct lrw_request_ctx *rctx = skcipher_request_ctx(req); 257 struct skcipher_request *subreq = &rctx->subreq; 258 259 lrw_init_crypt(req); 260 return lrw_xor_tweak_pre(req) ?: 261 crypto_skcipher_decrypt(subreq) ?: 262 lrw_xor_tweak_post(req); 263 } 264 265 static int lrw_init_tfm(struct crypto_skcipher *tfm) 266 { 267 struct skcipher_instance *inst = skcipher_alg_instance(tfm); 268 struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst); 269 struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); 270 struct crypto_skcipher *cipher; 271 272 cipher = crypto_spawn_skcipher(spawn); 273 if (IS_ERR(cipher)) 274 return PTR_ERR(cipher); 275 276 ctx->child = cipher; 277 278 crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(cipher) + 279 sizeof(struct lrw_request_ctx)); 280 281 return 0; 282 } 283 284 static void lrw_exit_tfm(struct crypto_skcipher *tfm) 285 { 286 struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); 287 288 if (ctx->table) 289 gf128mul_free_64k(ctx->table); 290 crypto_free_skcipher(ctx->child); 291 } 292 293 static void lrw_free_instance(struct skcipher_instance *inst) 294 { 295 crypto_drop_skcipher(skcipher_instance_ctx(inst)); 296 kfree(inst); 297 } 298 299 static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb) 300 { 301 struct crypto_skcipher_spawn *spawn; 302 struct skcipher_instance *inst; 303 struct skcipher_alg *alg; 304 const char *cipher_name; 305 char ecb_name[CRYPTO_MAX_ALG_NAME]; 306 u32 mask; 307 int err; 308 309 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); 310 if (err) 311 return err; 312 313 cipher_name = crypto_attr_alg_name(tb[1]); 314 if (IS_ERR(cipher_name)) 315 return PTR_ERR(cipher_name); 316 317 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); 318 if (!inst) 319 return -ENOMEM; 320 321 spawn = skcipher_instance_ctx(inst); 322 323 err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst), 324 cipher_name, 0, mask); 325 if (err == -ENOENT) { 326 err = -ENAMETOOLONG; 327 if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", 328 cipher_name) >= CRYPTO_MAX_ALG_NAME) 329 goto err_free_inst; 330 331 err = crypto_grab_skcipher(spawn, 332 skcipher_crypto_instance(inst), 333 ecb_name, 0, mask); 334 } 335 336 if (err) 337 goto err_free_inst; 338 339 alg = crypto_skcipher_spawn_alg(spawn); 340 341 err = -EINVAL; 342 if (alg->base.cra_blocksize != LRW_BLOCK_SIZE) 343 goto err_free_inst; 344 345 if (crypto_skcipher_alg_ivsize(alg)) 346 goto err_free_inst; 347 348 err = crypto_inst_setname(skcipher_crypto_instance(inst), "lrw", 349 &alg->base); 350 if (err) 351 goto err_free_inst; 352 353 err = -EINVAL; 354 cipher_name = alg->base.cra_name; 355 356 /* Alas we screwed up the naming so we have to mangle the 357 * cipher name. 358 */ 359 if (!strncmp(cipher_name, "ecb(", 4)) { 360 unsigned len; 361 362 len = strlcpy(ecb_name, cipher_name + 4, sizeof(ecb_name)); 363 if (len < 2 || len >= sizeof(ecb_name)) 364 goto err_free_inst; 365 366 if (ecb_name[len - 1] != ')') 367 goto err_free_inst; 368 369 ecb_name[len - 1] = 0; 370 371 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, 372 "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME) { 373 err = -ENAMETOOLONG; 374 goto err_free_inst; 375 } 376 } else 377 goto err_free_inst; 378 379 inst->alg.base.cra_priority = alg->base.cra_priority; 380 inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE; 381 inst->alg.base.cra_alignmask = alg->base.cra_alignmask | 382 (__alignof__(be128) - 1); 383 384 inst->alg.ivsize = LRW_BLOCK_SIZE; 385 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) + 386 LRW_BLOCK_SIZE; 387 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) + 388 LRW_BLOCK_SIZE; 389 390 inst->alg.base.cra_ctxsize = sizeof(struct lrw_tfm_ctx); 391 392 inst->alg.init = lrw_init_tfm; 393 inst->alg.exit = lrw_exit_tfm; 394 395 inst->alg.setkey = lrw_setkey; 396 inst->alg.encrypt = lrw_encrypt; 397 inst->alg.decrypt = lrw_decrypt; 398 399 inst->free = lrw_free_instance; 400 401 err = skcipher_register_instance(tmpl, inst); 402 if (err) { 403 err_free_inst: 404 lrw_free_instance(inst); 405 } 406 return err; 407 } 408 409 static struct crypto_template lrw_tmpl = { 410 .name = "lrw", 411 .create = lrw_create, 412 .module = THIS_MODULE, 413 }; 414 415 static int __init lrw_module_init(void) 416 { 417 return crypto_register_template(&lrw_tmpl); 418 } 419 420 static void __exit lrw_module_exit(void) 421 { 422 crypto_unregister_template(&lrw_tmpl); 423 } 424 425 subsys_initcall(lrw_module_init); 426 module_exit(lrw_module_exit); 427 428 MODULE_LICENSE("GPL"); 429 MODULE_DESCRIPTION("LRW block cipher mode"); 430 MODULE_ALIAS_CRYPTO("lrw"); 431