1 /* LRW: as defined by Cyril Guyot in 2 * http://grouper.ieee.org/groups/1619/email/pdf00017.pdf 3 * 4 * Copyright (c) 2006 Rik Snel <rsnel@cube.dyndns.org> 5 * 6 * Based on ecb.c 7 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of the GNU General Public License as published by the Free 11 * Software Foundation; either version 2 of the License, or (at your option) 12 * any later version. 13 */ 14 /* This implementation is checked against the test vectors in the above 15 * document and by a test vector provided by Ken Buchanan at 16 * http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html 17 * 18 * The test vectors are included in the testing module tcrypt.[ch] */ 19 20 #include <crypto/internal/skcipher.h> 21 #include <crypto/scatterwalk.h> 22 #include <linux/err.h> 23 #include <linux/init.h> 24 #include <linux/kernel.h> 25 #include <linux/module.h> 26 #include <linux/scatterlist.h> 27 #include <linux/slab.h> 28 29 #include <crypto/b128ops.h> 30 #include <crypto/gf128mul.h> 31 #include <crypto/lrw.h> 32 33 #define LRW_BUFFER_SIZE 128u 34 35 struct priv { 36 struct crypto_skcipher *child; 37 struct lrw_table_ctx table; 38 }; 39 40 struct rctx { 41 be128 buf[LRW_BUFFER_SIZE / sizeof(be128)]; 42 43 be128 t; 44 45 be128 *ext; 46 47 struct scatterlist srcbuf[2]; 48 struct scatterlist dstbuf[2]; 49 struct scatterlist *src; 50 struct scatterlist *dst; 51 52 unsigned int left; 53 54 struct skcipher_request subreq; 55 }; 56 57 static inline void setbit128_bbe(void *b, int bit) 58 { 59 __set_bit(bit ^ (0x80 - 60 #ifdef __BIG_ENDIAN 61 BITS_PER_LONG 62 #else 63 BITS_PER_BYTE 64 #endif 65 ), b); 66 } 67 68 int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak) 69 { 70 be128 tmp = { 0 }; 71 int i; 72 73 if (ctx->table) 74 gf128mul_free_64k(ctx->table); 75 76 /* initialize multiplication table for Key2 */ 77 ctx->table = gf128mul_init_64k_bbe((be128 *)tweak); 78 if (!ctx->table) 79 return -ENOMEM; 80 81 /* initialize optimization table */ 82 for (i = 0; i < 128; i++) { 83 setbit128_bbe(&tmp, i); 84 ctx->mulinc[i] = tmp; 85 gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table); 86 } 87 88 return 0; 89 } 90 EXPORT_SYMBOL_GPL(lrw_init_table); 91 92 void lrw_free_table(struct lrw_table_ctx *ctx) 93 { 94 if (ctx->table) 95 gf128mul_free_64k(ctx->table); 96 } 97 EXPORT_SYMBOL_GPL(lrw_free_table); 98 99 static int setkey(struct crypto_skcipher *parent, const u8 *key, 100 unsigned int keylen) 101 { 102 struct priv *ctx = crypto_skcipher_ctx(parent); 103 struct crypto_skcipher *child = ctx->child; 104 int err, bsize = LRW_BLOCK_SIZE; 105 const u8 *tweak = key + keylen - bsize; 106 107 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); 108 crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & 109 CRYPTO_TFM_REQ_MASK); 110 err = crypto_skcipher_setkey(child, key, keylen - bsize); 111 crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) & 112 CRYPTO_TFM_RES_MASK); 113 if (err) 114 return err; 115 116 return lrw_init_table(&ctx->table, tweak); 117 } 118 119 static inline void inc(be128 *iv) 120 { 121 be64_add_cpu(&iv->b, 1); 122 if (!iv->b) 123 be64_add_cpu(&iv->a, 1); 124 } 125 126 /* this returns the number of consequative 1 bits starting 127 * from the right, get_index128(00 00 00 00 00 00 ... 00 00 10 FB) = 2 */ 128 static inline int get_index128(be128 *block) 129 { 130 int x; 131 __be32 *p = (__be32 *) block; 132 133 for (p += 3, x = 0; x < 128; p--, x += 32) { 134 u32 val = be32_to_cpup(p); 135 136 if (!~val) 137 continue; 138 139 return x + ffz(val); 140 } 141 142 return x; 143 } 144 145 static int post_crypt(struct skcipher_request *req) 146 { 147 struct rctx *rctx = skcipher_request_ctx(req); 148 be128 *buf = rctx->ext ?: rctx->buf; 149 struct skcipher_request *subreq; 150 const int bs = LRW_BLOCK_SIZE; 151 struct skcipher_walk w; 152 struct scatterlist *sg; 153 unsigned offset; 154 int err; 155 156 subreq = &rctx->subreq; 157 err = skcipher_walk_virt(&w, subreq, false); 158 159 while (w.nbytes) { 160 unsigned int avail = w.nbytes; 161 be128 *wdst; 162 163 wdst = w.dst.virt.addr; 164 165 do { 166 be128_xor(wdst, buf++, wdst); 167 wdst++; 168 } while ((avail -= bs) >= bs); 169 170 err = skcipher_walk_done(&w, avail); 171 } 172 173 rctx->left -= subreq->cryptlen; 174 175 if (err || !rctx->left) 176 goto out; 177 178 rctx->dst = rctx->dstbuf; 179 180 scatterwalk_done(&w.out, 0, 1); 181 sg = w.out.sg; 182 offset = w.out.offset; 183 184 if (rctx->dst != sg) { 185 rctx->dst[0] = *sg; 186 sg_unmark_end(rctx->dst); 187 scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 0, 2); 188 } 189 rctx->dst[0].length -= offset - sg->offset; 190 rctx->dst[0].offset = offset; 191 192 out: 193 return err; 194 } 195 196 static int pre_crypt(struct skcipher_request *req) 197 { 198 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 199 struct rctx *rctx = skcipher_request_ctx(req); 200 struct priv *ctx = crypto_skcipher_ctx(tfm); 201 be128 *buf = rctx->ext ?: rctx->buf; 202 struct skcipher_request *subreq; 203 const int bs = LRW_BLOCK_SIZE; 204 struct skcipher_walk w; 205 struct scatterlist *sg; 206 unsigned cryptlen; 207 unsigned offset; 208 be128 *iv; 209 bool more; 210 int err; 211 212 subreq = &rctx->subreq; 213 skcipher_request_set_tfm(subreq, tfm); 214 215 cryptlen = subreq->cryptlen; 216 more = rctx->left > cryptlen; 217 if (!more) 218 cryptlen = rctx->left; 219 220 skcipher_request_set_crypt(subreq, rctx->src, rctx->dst, 221 cryptlen, req->iv); 222 223 err = skcipher_walk_virt(&w, subreq, false); 224 iv = w.iv; 225 226 while (w.nbytes) { 227 unsigned int avail = w.nbytes; 228 be128 *wsrc; 229 be128 *wdst; 230 231 wsrc = w.src.virt.addr; 232 wdst = w.dst.virt.addr; 233 234 do { 235 *buf++ = rctx->t; 236 be128_xor(wdst++, &rctx->t, wsrc++); 237 238 /* T <- I*Key2, using the optimization 239 * discussed in the specification */ 240 be128_xor(&rctx->t, &rctx->t, 241 &ctx->table.mulinc[get_index128(iv)]); 242 inc(iv); 243 } while ((avail -= bs) >= bs); 244 245 err = skcipher_walk_done(&w, avail); 246 } 247 248 skcipher_request_set_tfm(subreq, ctx->child); 249 skcipher_request_set_crypt(subreq, rctx->dst, rctx->dst, 250 cryptlen, NULL); 251 252 if (err || !more) 253 goto out; 254 255 rctx->src = rctx->srcbuf; 256 257 scatterwalk_done(&w.in, 0, 1); 258 sg = w.in.sg; 259 offset = w.in.offset; 260 261 if (rctx->src != sg) { 262 rctx->src[0] = *sg; 263 sg_unmark_end(rctx->src); 264 scatterwalk_crypto_chain(rctx->src, sg_next(sg), 0, 2); 265 } 266 rctx->src[0].length -= offset - sg->offset; 267 rctx->src[0].offset = offset; 268 269 out: 270 return err; 271 } 272 273 static int init_crypt(struct skcipher_request *req, crypto_completion_t done) 274 { 275 struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); 276 struct rctx *rctx = skcipher_request_ctx(req); 277 struct skcipher_request *subreq; 278 gfp_t gfp; 279 280 subreq = &rctx->subreq; 281 skcipher_request_set_callback(subreq, req->base.flags, done, req); 282 283 gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : 284 GFP_ATOMIC; 285 rctx->ext = NULL; 286 287 subreq->cryptlen = LRW_BUFFER_SIZE; 288 if (req->cryptlen > LRW_BUFFER_SIZE) { 289 unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE); 290 291 rctx->ext = kmalloc(n, gfp); 292 if (rctx->ext) 293 subreq->cryptlen = n; 294 } 295 296 rctx->src = req->src; 297 rctx->dst = req->dst; 298 rctx->left = req->cryptlen; 299 300 /* calculate first value of T */ 301 memcpy(&rctx->t, req->iv, sizeof(rctx->t)); 302 303 /* T <- I*Key2 */ 304 gf128mul_64k_bbe(&rctx->t, ctx->table.table); 305 306 return 0; 307 } 308 309 static void exit_crypt(struct skcipher_request *req) 310 { 311 struct rctx *rctx = skcipher_request_ctx(req); 312 313 rctx->left = 0; 314 315 if (rctx->ext) 316 kfree(rctx->ext); 317 } 318 319 static int do_encrypt(struct skcipher_request *req, int err) 320 { 321 struct rctx *rctx = skcipher_request_ctx(req); 322 struct skcipher_request *subreq; 323 324 subreq = &rctx->subreq; 325 326 while (!err && rctx->left) { 327 err = pre_crypt(req) ?: 328 crypto_skcipher_encrypt(subreq) ?: 329 post_crypt(req); 330 331 if (err == -EINPROGRESS || err == -EBUSY) 332 return err; 333 } 334 335 exit_crypt(req); 336 return err; 337 } 338 339 static void encrypt_done(struct crypto_async_request *areq, int err) 340 { 341 struct skcipher_request *req = areq->data; 342 struct skcipher_request *subreq; 343 struct rctx *rctx; 344 345 rctx = skcipher_request_ctx(req); 346 347 if (err == -EINPROGRESS) { 348 if (rctx->left != req->cryptlen) 349 return; 350 goto out; 351 } 352 353 subreq = &rctx->subreq; 354 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; 355 356 err = do_encrypt(req, err ?: post_crypt(req)); 357 if (rctx->left) 358 return; 359 360 out: 361 skcipher_request_complete(req, err); 362 } 363 364 static int encrypt(struct skcipher_request *req) 365 { 366 return do_encrypt(req, init_crypt(req, encrypt_done)); 367 } 368 369 static int do_decrypt(struct skcipher_request *req, int err) 370 { 371 struct rctx *rctx = skcipher_request_ctx(req); 372 struct skcipher_request *subreq; 373 374 subreq = &rctx->subreq; 375 376 while (!err && rctx->left) { 377 err = pre_crypt(req) ?: 378 crypto_skcipher_decrypt(subreq) ?: 379 post_crypt(req); 380 381 if (err == -EINPROGRESS || err == -EBUSY) 382 return err; 383 } 384 385 exit_crypt(req); 386 return err; 387 } 388 389 static void decrypt_done(struct crypto_async_request *areq, int err) 390 { 391 struct skcipher_request *req = areq->data; 392 struct skcipher_request *subreq; 393 struct rctx *rctx; 394 395 rctx = skcipher_request_ctx(req); 396 397 if (err == -EINPROGRESS) { 398 if (rctx->left != req->cryptlen) 399 return; 400 goto out; 401 } 402 403 subreq = &rctx->subreq; 404 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; 405 406 err = do_decrypt(req, err ?: post_crypt(req)); 407 if (rctx->left) 408 return; 409 410 out: 411 skcipher_request_complete(req, err); 412 } 413 414 static int decrypt(struct skcipher_request *req) 415 { 416 return do_decrypt(req, init_crypt(req, decrypt_done)); 417 } 418 419 int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst, 420 struct scatterlist *ssrc, unsigned int nbytes, 421 struct lrw_crypt_req *req) 422 { 423 const unsigned int bsize = LRW_BLOCK_SIZE; 424 const unsigned int max_blks = req->tbuflen / bsize; 425 struct lrw_table_ctx *ctx = req->table_ctx; 426 struct blkcipher_walk walk; 427 unsigned int nblocks; 428 be128 *iv, *src, *dst, *t; 429 be128 *t_buf = req->tbuf; 430 int err, i; 431 432 BUG_ON(max_blks < 1); 433 434 blkcipher_walk_init(&walk, sdst, ssrc, nbytes); 435 436 err = blkcipher_walk_virt(desc, &walk); 437 nbytes = walk.nbytes; 438 if (!nbytes) 439 return err; 440 441 nblocks = min(walk.nbytes / bsize, max_blks); 442 src = (be128 *)walk.src.virt.addr; 443 dst = (be128 *)walk.dst.virt.addr; 444 445 /* calculate first value of T */ 446 iv = (be128 *)walk.iv; 447 t_buf[0] = *iv; 448 449 /* T <- I*Key2 */ 450 gf128mul_64k_bbe(&t_buf[0], ctx->table); 451 452 i = 0; 453 goto first; 454 455 for (;;) { 456 do { 457 for (i = 0; i < nblocks; i++) { 458 /* T <- I*Key2, using the optimization 459 * discussed in the specification */ 460 be128_xor(&t_buf[i], t, 461 &ctx->mulinc[get_index128(iv)]); 462 inc(iv); 463 first: 464 t = &t_buf[i]; 465 466 /* PP <- T xor P */ 467 be128_xor(dst + i, t, src + i); 468 } 469 470 /* CC <- E(Key2,PP) */ 471 req->crypt_fn(req->crypt_ctx, (u8 *)dst, 472 nblocks * bsize); 473 474 /* C <- T xor CC */ 475 for (i = 0; i < nblocks; i++) 476 be128_xor(dst + i, dst + i, &t_buf[i]); 477 478 src += nblocks; 479 dst += nblocks; 480 nbytes -= nblocks * bsize; 481 nblocks = min(nbytes / bsize, max_blks); 482 } while (nblocks > 0); 483 484 err = blkcipher_walk_done(desc, &walk, nbytes); 485 nbytes = walk.nbytes; 486 if (!nbytes) 487 break; 488 489 nblocks = min(nbytes / bsize, max_blks); 490 src = (be128 *)walk.src.virt.addr; 491 dst = (be128 *)walk.dst.virt.addr; 492 } 493 494 return err; 495 } 496 EXPORT_SYMBOL_GPL(lrw_crypt); 497 498 static int init_tfm(struct crypto_skcipher *tfm) 499 { 500 struct skcipher_instance *inst = skcipher_alg_instance(tfm); 501 struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst); 502 struct priv *ctx = crypto_skcipher_ctx(tfm); 503 struct crypto_skcipher *cipher; 504 505 cipher = crypto_spawn_skcipher(spawn); 506 if (IS_ERR(cipher)) 507 return PTR_ERR(cipher); 508 509 ctx->child = cipher; 510 511 crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(cipher) + 512 sizeof(struct rctx)); 513 514 return 0; 515 } 516 517 static void exit_tfm(struct crypto_skcipher *tfm) 518 { 519 struct priv *ctx = crypto_skcipher_ctx(tfm); 520 521 lrw_free_table(&ctx->table); 522 crypto_free_skcipher(ctx->child); 523 } 524 525 static void free(struct skcipher_instance *inst) 526 { 527 crypto_drop_skcipher(skcipher_instance_ctx(inst)); 528 kfree(inst); 529 } 530 531 static int create(struct crypto_template *tmpl, struct rtattr **tb) 532 { 533 struct crypto_skcipher_spawn *spawn; 534 struct skcipher_instance *inst; 535 struct crypto_attr_type *algt; 536 struct skcipher_alg *alg; 537 const char *cipher_name; 538 char ecb_name[CRYPTO_MAX_ALG_NAME]; 539 int err; 540 541 algt = crypto_get_attr_type(tb); 542 if (IS_ERR(algt)) 543 return PTR_ERR(algt); 544 545 if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) 546 return -EINVAL; 547 548 cipher_name = crypto_attr_alg_name(tb[1]); 549 if (IS_ERR(cipher_name)) 550 return PTR_ERR(cipher_name); 551 552 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); 553 if (!inst) 554 return -ENOMEM; 555 556 spawn = skcipher_instance_ctx(inst); 557 558 crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst)); 559 err = crypto_grab_skcipher(spawn, cipher_name, 0, 560 crypto_requires_sync(algt->type, 561 algt->mask)); 562 if (err == -ENOENT) { 563 err = -ENAMETOOLONG; 564 if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", 565 cipher_name) >= CRYPTO_MAX_ALG_NAME) 566 goto err_free_inst; 567 568 err = crypto_grab_skcipher(spawn, ecb_name, 0, 569 crypto_requires_sync(algt->type, 570 algt->mask)); 571 } 572 573 if (err) 574 goto err_free_inst; 575 576 alg = crypto_skcipher_spawn_alg(spawn); 577 578 err = -EINVAL; 579 if (alg->base.cra_blocksize != LRW_BLOCK_SIZE) 580 goto err_drop_spawn; 581 582 if (crypto_skcipher_alg_ivsize(alg)) 583 goto err_drop_spawn; 584 585 err = crypto_inst_setname(skcipher_crypto_instance(inst), "lrw", 586 &alg->base); 587 if (err) 588 goto err_drop_spawn; 589 590 err = -EINVAL; 591 cipher_name = alg->base.cra_name; 592 593 /* Alas we screwed up the naming so we have to mangle the 594 * cipher name. 595 */ 596 if (!strncmp(cipher_name, "ecb(", 4)) { 597 unsigned len; 598 599 len = strlcpy(ecb_name, cipher_name + 4, sizeof(ecb_name)); 600 if (len < 2 || len >= sizeof(ecb_name)) 601 goto err_drop_spawn; 602 603 if (ecb_name[len - 1] != ')') 604 goto err_drop_spawn; 605 606 ecb_name[len - 1] = 0; 607 608 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, 609 "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME) { 610 err = -ENAMETOOLONG; 611 goto err_drop_spawn; 612 } 613 } else 614 goto err_drop_spawn; 615 616 inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; 617 inst->alg.base.cra_priority = alg->base.cra_priority; 618 inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE; 619 inst->alg.base.cra_alignmask = alg->base.cra_alignmask | 620 (__alignof__(u64) - 1); 621 622 inst->alg.ivsize = LRW_BLOCK_SIZE; 623 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) + 624 LRW_BLOCK_SIZE; 625 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) + 626 LRW_BLOCK_SIZE; 627 628 inst->alg.base.cra_ctxsize = sizeof(struct priv); 629 630 inst->alg.init = init_tfm; 631 inst->alg.exit = exit_tfm; 632 633 inst->alg.setkey = setkey; 634 inst->alg.encrypt = encrypt; 635 inst->alg.decrypt = decrypt; 636 637 inst->free = free; 638 639 err = skcipher_register_instance(tmpl, inst); 640 if (err) 641 goto err_drop_spawn; 642 643 out: 644 return err; 645 646 err_drop_spawn: 647 crypto_drop_skcipher(spawn); 648 err_free_inst: 649 kfree(inst); 650 goto out; 651 } 652 653 static struct crypto_template crypto_tmpl = { 654 .name = "lrw", 655 .create = create, 656 .module = THIS_MODULE, 657 }; 658 659 static int __init crypto_module_init(void) 660 { 661 return crypto_register_template(&crypto_tmpl); 662 } 663 664 static void __exit crypto_module_exit(void) 665 { 666 crypto_unregister_template(&crypto_tmpl); 667 } 668 669 module_init(crypto_module_init); 670 module_exit(crypto_module_exit); 671 672 MODULE_LICENSE("GPL"); 673 MODULE_DESCRIPTION("LRW block cipher mode"); 674 MODULE_ALIAS_CRYPTO("lrw"); 675