1 /* LRW: as defined by Cyril Guyot in 2 * http://grouper.ieee.org/groups/1619/email/pdf00017.pdf 3 * 4 * Copyright (c) 2006 Rik Snel <rsnel@cube.dyndns.org> 5 * 6 * Based on ecb.c 7 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of the GNU General Public License as published by the Free 11 * Software Foundation; either version 2 of the License, or (at your option) 12 * any later version. 13 */ 14 /* This implementation is checked against the test vectors in the above 15 * document and by a test vector provided by Ken Buchanan at 16 * http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html 17 * 18 * The test vectors are included in the testing module tcrypt.[ch] */ 19 20 #include <crypto/internal/skcipher.h> 21 #include <crypto/scatterwalk.h> 22 #include <linux/err.h> 23 #include <linux/init.h> 24 #include <linux/kernel.h> 25 #include <linux/module.h> 26 #include <linux/scatterlist.h> 27 #include <linux/slab.h> 28 29 #include <crypto/b128ops.h> 30 #include <crypto/gf128mul.h> 31 32 #define LRW_BUFFER_SIZE 128u 33 34 #define LRW_BLOCK_SIZE 16 35 36 struct priv { 37 struct crypto_skcipher *child; 38 39 /* 40 * optimizes multiplying a random (non incrementing, as at the 41 * start of a new sector) value with key2, we could also have 42 * used 4k optimization tables or no optimization at all. In the 43 * latter case we would have to store key2 here 44 */ 45 struct gf128mul_64k *table; 46 47 /* 48 * stores: 49 * key2*{ 0,0,...0,0,0,0,1 }, key2*{ 0,0,...0,0,0,1,1 }, 50 * key2*{ 0,0,...0,0,1,1,1 }, key2*{ 0,0,...0,1,1,1,1 } 51 * key2*{ 0,0,...1,1,1,1,1 }, etc 52 * needed for optimized multiplication of incrementing values 53 * with key2 54 */ 55 be128 mulinc[128]; 56 }; 57 58 struct rctx { 59 be128 buf[LRW_BUFFER_SIZE / sizeof(be128)]; 60 61 be128 t; 62 63 be128 *ext; 64 65 struct scatterlist srcbuf[2]; 66 struct scatterlist dstbuf[2]; 67 struct scatterlist *src; 68 struct scatterlist *dst; 69 70 unsigned int left; 71 72 struct skcipher_request subreq; 73 }; 74 75 static inline void setbit128_bbe(void *b, int bit) 76 { 77 __set_bit(bit ^ (0x80 - 78 #ifdef __BIG_ENDIAN 79 BITS_PER_LONG 80 #else 81 BITS_PER_BYTE 82 #endif 83 ), b); 84 } 85 86 static int setkey(struct crypto_skcipher *parent, const u8 *key, 87 unsigned int keylen) 88 { 89 struct priv *ctx = crypto_skcipher_ctx(parent); 90 struct crypto_skcipher *child = ctx->child; 91 int err, bsize = LRW_BLOCK_SIZE; 92 const u8 *tweak = key + keylen - bsize; 93 be128 tmp = { 0 }; 94 int i; 95 96 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); 97 crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & 98 CRYPTO_TFM_REQ_MASK); 99 err = crypto_skcipher_setkey(child, key, keylen - bsize); 100 crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) & 101 CRYPTO_TFM_RES_MASK); 102 if (err) 103 return err; 104 105 if (ctx->table) 106 gf128mul_free_64k(ctx->table); 107 108 /* initialize multiplication table for Key2 */ 109 ctx->table = gf128mul_init_64k_bbe((be128 *)tweak); 110 if (!ctx->table) 111 return -ENOMEM; 112 113 /* initialize optimization table */ 114 for (i = 0; i < 128; i++) { 115 setbit128_bbe(&tmp, i); 116 ctx->mulinc[i] = tmp; 117 gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table); 118 } 119 120 return 0; 121 } 122 123 static inline void inc(be128 *iv) 124 { 125 be64_add_cpu(&iv->b, 1); 126 if (!iv->b) 127 be64_add_cpu(&iv->a, 1); 128 } 129 130 /* this returns the number of consequative 1 bits starting 131 * from the right, get_index128(00 00 00 00 00 00 ... 00 00 10 FB) = 2 */ 132 static inline int get_index128(be128 *block) 133 { 134 int x; 135 __be32 *p = (__be32 *) block; 136 137 for (p += 3, x = 0; x < 128; p--, x += 32) { 138 u32 val = be32_to_cpup(p); 139 140 if (!~val) 141 continue; 142 143 return x + ffz(val); 144 } 145 146 return x; 147 } 148 149 static int post_crypt(struct skcipher_request *req) 150 { 151 struct rctx *rctx = skcipher_request_ctx(req); 152 be128 *buf = rctx->ext ?: rctx->buf; 153 struct skcipher_request *subreq; 154 const int bs = LRW_BLOCK_SIZE; 155 struct skcipher_walk w; 156 struct scatterlist *sg; 157 unsigned offset; 158 int err; 159 160 subreq = &rctx->subreq; 161 err = skcipher_walk_virt(&w, subreq, false); 162 163 while (w.nbytes) { 164 unsigned int avail = w.nbytes; 165 be128 *wdst; 166 167 wdst = w.dst.virt.addr; 168 169 do { 170 be128_xor(wdst, buf++, wdst); 171 wdst++; 172 } while ((avail -= bs) >= bs); 173 174 err = skcipher_walk_done(&w, avail); 175 } 176 177 rctx->left -= subreq->cryptlen; 178 179 if (err || !rctx->left) 180 goto out; 181 182 rctx->dst = rctx->dstbuf; 183 184 scatterwalk_done(&w.out, 0, 1); 185 sg = w.out.sg; 186 offset = w.out.offset; 187 188 if (rctx->dst != sg) { 189 rctx->dst[0] = *sg; 190 sg_unmark_end(rctx->dst); 191 scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 2); 192 } 193 rctx->dst[0].length -= offset - sg->offset; 194 rctx->dst[0].offset = offset; 195 196 out: 197 return err; 198 } 199 200 static int pre_crypt(struct skcipher_request *req) 201 { 202 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 203 struct rctx *rctx = skcipher_request_ctx(req); 204 struct priv *ctx = crypto_skcipher_ctx(tfm); 205 be128 *buf = rctx->ext ?: rctx->buf; 206 struct skcipher_request *subreq; 207 const int bs = LRW_BLOCK_SIZE; 208 struct skcipher_walk w; 209 struct scatterlist *sg; 210 unsigned cryptlen; 211 unsigned offset; 212 be128 *iv; 213 bool more; 214 int err; 215 216 subreq = &rctx->subreq; 217 skcipher_request_set_tfm(subreq, tfm); 218 219 cryptlen = subreq->cryptlen; 220 more = rctx->left > cryptlen; 221 if (!more) 222 cryptlen = rctx->left; 223 224 skcipher_request_set_crypt(subreq, rctx->src, rctx->dst, 225 cryptlen, req->iv); 226 227 err = skcipher_walk_virt(&w, subreq, false); 228 iv = w.iv; 229 230 while (w.nbytes) { 231 unsigned int avail = w.nbytes; 232 be128 *wsrc; 233 be128 *wdst; 234 235 wsrc = w.src.virt.addr; 236 wdst = w.dst.virt.addr; 237 238 do { 239 *buf++ = rctx->t; 240 be128_xor(wdst++, &rctx->t, wsrc++); 241 242 /* T <- I*Key2, using the optimization 243 * discussed in the specification */ 244 be128_xor(&rctx->t, &rctx->t, 245 &ctx->mulinc[get_index128(iv)]); 246 inc(iv); 247 } while ((avail -= bs) >= bs); 248 249 err = skcipher_walk_done(&w, avail); 250 } 251 252 skcipher_request_set_tfm(subreq, ctx->child); 253 skcipher_request_set_crypt(subreq, rctx->dst, rctx->dst, 254 cryptlen, NULL); 255 256 if (err || !more) 257 goto out; 258 259 rctx->src = rctx->srcbuf; 260 261 scatterwalk_done(&w.in, 0, 1); 262 sg = w.in.sg; 263 offset = w.in.offset; 264 265 if (rctx->src != sg) { 266 rctx->src[0] = *sg; 267 sg_unmark_end(rctx->src); 268 scatterwalk_crypto_chain(rctx->src, sg_next(sg), 2); 269 } 270 rctx->src[0].length -= offset - sg->offset; 271 rctx->src[0].offset = offset; 272 273 out: 274 return err; 275 } 276 277 static int init_crypt(struct skcipher_request *req, crypto_completion_t done) 278 { 279 struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); 280 struct rctx *rctx = skcipher_request_ctx(req); 281 struct skcipher_request *subreq; 282 gfp_t gfp; 283 284 subreq = &rctx->subreq; 285 skcipher_request_set_callback(subreq, req->base.flags, done, req); 286 287 gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : 288 GFP_ATOMIC; 289 rctx->ext = NULL; 290 291 subreq->cryptlen = LRW_BUFFER_SIZE; 292 if (req->cryptlen > LRW_BUFFER_SIZE) { 293 unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE); 294 295 rctx->ext = kmalloc(n, gfp); 296 if (rctx->ext) 297 subreq->cryptlen = n; 298 } 299 300 rctx->src = req->src; 301 rctx->dst = req->dst; 302 rctx->left = req->cryptlen; 303 304 /* calculate first value of T */ 305 memcpy(&rctx->t, req->iv, sizeof(rctx->t)); 306 307 /* T <- I*Key2 */ 308 gf128mul_64k_bbe(&rctx->t, ctx->table); 309 310 return 0; 311 } 312 313 static void exit_crypt(struct skcipher_request *req) 314 { 315 struct rctx *rctx = skcipher_request_ctx(req); 316 317 rctx->left = 0; 318 319 if (rctx->ext) 320 kzfree(rctx->ext); 321 } 322 323 static int do_encrypt(struct skcipher_request *req, int err) 324 { 325 struct rctx *rctx = skcipher_request_ctx(req); 326 struct skcipher_request *subreq; 327 328 subreq = &rctx->subreq; 329 330 while (!err && rctx->left) { 331 err = pre_crypt(req) ?: 332 crypto_skcipher_encrypt(subreq) ?: 333 post_crypt(req); 334 335 if (err == -EINPROGRESS || err == -EBUSY) 336 return err; 337 } 338 339 exit_crypt(req); 340 return err; 341 } 342 343 static void encrypt_done(struct crypto_async_request *areq, int err) 344 { 345 struct skcipher_request *req = areq->data; 346 struct skcipher_request *subreq; 347 struct rctx *rctx; 348 349 rctx = skcipher_request_ctx(req); 350 351 if (err == -EINPROGRESS) { 352 if (rctx->left != req->cryptlen) 353 return; 354 goto out; 355 } 356 357 subreq = &rctx->subreq; 358 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; 359 360 err = do_encrypt(req, err ?: post_crypt(req)); 361 if (rctx->left) 362 return; 363 364 out: 365 skcipher_request_complete(req, err); 366 } 367 368 static int encrypt(struct skcipher_request *req) 369 { 370 return do_encrypt(req, init_crypt(req, encrypt_done)); 371 } 372 373 static int do_decrypt(struct skcipher_request *req, int err) 374 { 375 struct rctx *rctx = skcipher_request_ctx(req); 376 struct skcipher_request *subreq; 377 378 subreq = &rctx->subreq; 379 380 while (!err && rctx->left) { 381 err = pre_crypt(req) ?: 382 crypto_skcipher_decrypt(subreq) ?: 383 post_crypt(req); 384 385 if (err == -EINPROGRESS || err == -EBUSY) 386 return err; 387 } 388 389 exit_crypt(req); 390 return err; 391 } 392 393 static void decrypt_done(struct crypto_async_request *areq, int err) 394 { 395 struct skcipher_request *req = areq->data; 396 struct skcipher_request *subreq; 397 struct rctx *rctx; 398 399 rctx = skcipher_request_ctx(req); 400 401 if (err == -EINPROGRESS) { 402 if (rctx->left != req->cryptlen) 403 return; 404 goto out; 405 } 406 407 subreq = &rctx->subreq; 408 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; 409 410 err = do_decrypt(req, err ?: post_crypt(req)); 411 if (rctx->left) 412 return; 413 414 out: 415 skcipher_request_complete(req, err); 416 } 417 418 static int decrypt(struct skcipher_request *req) 419 { 420 return do_decrypt(req, init_crypt(req, decrypt_done)); 421 } 422 423 static int init_tfm(struct crypto_skcipher *tfm) 424 { 425 struct skcipher_instance *inst = skcipher_alg_instance(tfm); 426 struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst); 427 struct priv *ctx = crypto_skcipher_ctx(tfm); 428 struct crypto_skcipher *cipher; 429 430 cipher = crypto_spawn_skcipher(spawn); 431 if (IS_ERR(cipher)) 432 return PTR_ERR(cipher); 433 434 ctx->child = cipher; 435 436 crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(cipher) + 437 sizeof(struct rctx)); 438 439 return 0; 440 } 441 442 static void exit_tfm(struct crypto_skcipher *tfm) 443 { 444 struct priv *ctx = crypto_skcipher_ctx(tfm); 445 446 if (ctx->table) 447 gf128mul_free_64k(ctx->table); 448 crypto_free_skcipher(ctx->child); 449 } 450 451 static void free(struct skcipher_instance *inst) 452 { 453 crypto_drop_skcipher(skcipher_instance_ctx(inst)); 454 kfree(inst); 455 } 456 457 static int create(struct crypto_template *tmpl, struct rtattr **tb) 458 { 459 struct crypto_skcipher_spawn *spawn; 460 struct skcipher_instance *inst; 461 struct crypto_attr_type *algt; 462 struct skcipher_alg *alg; 463 const char *cipher_name; 464 char ecb_name[CRYPTO_MAX_ALG_NAME]; 465 int err; 466 467 algt = crypto_get_attr_type(tb); 468 if (IS_ERR(algt)) 469 return PTR_ERR(algt); 470 471 if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) 472 return -EINVAL; 473 474 cipher_name = crypto_attr_alg_name(tb[1]); 475 if (IS_ERR(cipher_name)) 476 return PTR_ERR(cipher_name); 477 478 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); 479 if (!inst) 480 return -ENOMEM; 481 482 spawn = skcipher_instance_ctx(inst); 483 484 crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst)); 485 err = crypto_grab_skcipher(spawn, cipher_name, 0, 486 crypto_requires_sync(algt->type, 487 algt->mask)); 488 if (err == -ENOENT) { 489 err = -ENAMETOOLONG; 490 if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", 491 cipher_name) >= CRYPTO_MAX_ALG_NAME) 492 goto err_free_inst; 493 494 err = crypto_grab_skcipher(spawn, ecb_name, 0, 495 crypto_requires_sync(algt->type, 496 algt->mask)); 497 } 498 499 if (err) 500 goto err_free_inst; 501 502 alg = crypto_skcipher_spawn_alg(spawn); 503 504 err = -EINVAL; 505 if (alg->base.cra_blocksize != LRW_BLOCK_SIZE) 506 goto err_drop_spawn; 507 508 if (crypto_skcipher_alg_ivsize(alg)) 509 goto err_drop_spawn; 510 511 err = crypto_inst_setname(skcipher_crypto_instance(inst), "lrw", 512 &alg->base); 513 if (err) 514 goto err_drop_spawn; 515 516 err = -EINVAL; 517 cipher_name = alg->base.cra_name; 518 519 /* Alas we screwed up the naming so we have to mangle the 520 * cipher name. 521 */ 522 if (!strncmp(cipher_name, "ecb(", 4)) { 523 unsigned len; 524 525 len = strlcpy(ecb_name, cipher_name + 4, sizeof(ecb_name)); 526 if (len < 2 || len >= sizeof(ecb_name)) 527 goto err_drop_spawn; 528 529 if (ecb_name[len - 1] != ')') 530 goto err_drop_spawn; 531 532 ecb_name[len - 1] = 0; 533 534 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, 535 "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME) { 536 err = -ENAMETOOLONG; 537 goto err_drop_spawn; 538 } 539 } else 540 goto err_drop_spawn; 541 542 inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; 543 inst->alg.base.cra_priority = alg->base.cra_priority; 544 inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE; 545 inst->alg.base.cra_alignmask = alg->base.cra_alignmask | 546 (__alignof__(u64) - 1); 547 548 inst->alg.ivsize = LRW_BLOCK_SIZE; 549 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) + 550 LRW_BLOCK_SIZE; 551 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) + 552 LRW_BLOCK_SIZE; 553 554 inst->alg.base.cra_ctxsize = sizeof(struct priv); 555 556 inst->alg.init = init_tfm; 557 inst->alg.exit = exit_tfm; 558 559 inst->alg.setkey = setkey; 560 inst->alg.encrypt = encrypt; 561 inst->alg.decrypt = decrypt; 562 563 inst->free = free; 564 565 err = skcipher_register_instance(tmpl, inst); 566 if (err) 567 goto err_drop_spawn; 568 569 out: 570 return err; 571 572 err_drop_spawn: 573 crypto_drop_skcipher(spawn); 574 err_free_inst: 575 kfree(inst); 576 goto out; 577 } 578 579 static struct crypto_template crypto_tmpl = { 580 .name = "lrw", 581 .create = create, 582 .module = THIS_MODULE, 583 }; 584 585 static int __init crypto_module_init(void) 586 { 587 return crypto_register_template(&crypto_tmpl); 588 } 589 590 static void __exit crypto_module_exit(void) 591 { 592 crypto_unregister_template(&crypto_tmpl); 593 } 594 595 module_init(crypto_module_init); 596 module_exit(crypto_module_exit); 597 598 MODULE_LICENSE("GPL"); 599 MODULE_DESCRIPTION("LRW block cipher mode"); 600 MODULE_ALIAS_CRYPTO("lrw"); 601