1 /* LRW: as defined by Cyril Guyot in 2 * http://grouper.ieee.org/groups/1619/email/pdf00017.pdf 3 * 4 * Copyright (c) 2006 Rik Snel <rsnel@cube.dyndns.org> 5 * 6 * Based on ecb.c 7 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of the GNU General Public License as published by the Free 11 * Software Foundation; either version 2 of the License, or (at your option) 12 * any later version. 13 */ 14 /* This implementation is checked against the test vectors in the above 15 * document and by a test vector provided by Ken Buchanan at 16 * http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html 17 * 18 * The test vectors are included in the testing module tcrypt.[ch] */ 19 20 #include <crypto/internal/skcipher.h> 21 #include <crypto/scatterwalk.h> 22 #include <linux/err.h> 23 #include <linux/init.h> 24 #include <linux/kernel.h> 25 #include <linux/module.h> 26 #include <linux/scatterlist.h> 27 #include <linux/slab.h> 28 29 #include <crypto/b128ops.h> 30 #include <crypto/gf128mul.h> 31 #include <crypto/lrw.h> 32 33 #define LRW_BUFFER_SIZE 128u 34 35 struct priv { 36 struct crypto_skcipher *child; 37 struct lrw_table_ctx table; 38 }; 39 40 struct rctx { 41 be128 buf[LRW_BUFFER_SIZE / sizeof(be128)]; 42 43 be128 t; 44 45 be128 *ext; 46 47 struct scatterlist srcbuf[2]; 48 struct scatterlist dstbuf[2]; 49 struct scatterlist *src; 50 struct scatterlist *dst; 51 52 unsigned int left; 53 54 struct skcipher_request subreq; 55 }; 56 57 static inline void setbit128_bbe(void *b, int bit) 58 { 59 __set_bit(bit ^ (0x80 - 60 #ifdef __BIG_ENDIAN 61 BITS_PER_LONG 62 #else 63 BITS_PER_BYTE 64 #endif 65 ), b); 66 } 67 68 int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak) 69 { 70 be128 tmp = { 0 }; 71 int i; 72 73 if (ctx->table) 74 gf128mul_free_64k(ctx->table); 75 76 /* initialize multiplication table for Key2 */ 77 ctx->table = gf128mul_init_64k_bbe((be128 *)tweak); 78 if (!ctx->table) 79 return -ENOMEM; 80 81 /* initialize optimization table */ 82 for (i = 0; i < 128; i++) { 83 setbit128_bbe(&tmp, i); 84 ctx->mulinc[i] = tmp; 85 gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table); 86 } 87 88 return 0; 89 } 90 EXPORT_SYMBOL_GPL(lrw_init_table); 91 92 void lrw_free_table(struct lrw_table_ctx *ctx) 93 { 94 if (ctx->table) 95 gf128mul_free_64k(ctx->table); 96 } 97 EXPORT_SYMBOL_GPL(lrw_free_table); 98 99 static int setkey(struct crypto_skcipher *parent, const u8 *key, 100 unsigned int keylen) 101 { 102 struct priv *ctx = crypto_skcipher_ctx(parent); 103 struct crypto_skcipher *child = ctx->child; 104 int err, bsize = LRW_BLOCK_SIZE; 105 const u8 *tweak = key + keylen - bsize; 106 107 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); 108 crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & 109 CRYPTO_TFM_REQ_MASK); 110 err = crypto_skcipher_setkey(child, key, keylen - bsize); 111 crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) & 112 CRYPTO_TFM_RES_MASK); 113 if (err) 114 return err; 115 116 return lrw_init_table(&ctx->table, tweak); 117 } 118 119 static inline void inc(be128 *iv) 120 { 121 be64_add_cpu(&iv->b, 1); 122 if (!iv->b) 123 be64_add_cpu(&iv->a, 1); 124 } 125 126 /* this returns the number of consequative 1 bits starting 127 * from the right, get_index128(00 00 00 00 00 00 ... 00 00 10 FB) = 2 */ 128 static inline int get_index128(be128 *block) 129 { 130 int x; 131 __be32 *p = (__be32 *) block; 132 133 for (p += 3, x = 0; x < 128; p--, x += 32) { 134 u32 val = be32_to_cpup(p); 135 136 if (!~val) 137 continue; 138 139 return x + ffz(val); 140 } 141 142 return x; 143 } 144 145 static int post_crypt(struct skcipher_request *req) 146 { 147 struct rctx *rctx = skcipher_request_ctx(req); 148 be128 *buf = rctx->ext ?: rctx->buf; 149 struct skcipher_request *subreq; 150 const int bs = LRW_BLOCK_SIZE; 151 struct skcipher_walk w; 152 struct scatterlist *sg; 153 unsigned offset; 154 int err; 155 156 subreq = &rctx->subreq; 157 err = skcipher_walk_virt(&w, subreq, false); 158 159 while (w.nbytes) { 160 unsigned int avail = w.nbytes; 161 be128 *wdst; 162 163 wdst = w.dst.virt.addr; 164 165 do { 166 be128_xor(wdst, buf++, wdst); 167 wdst++; 168 } while ((avail -= bs) >= bs); 169 170 err = skcipher_walk_done(&w, avail); 171 } 172 173 rctx->left -= subreq->cryptlen; 174 175 if (err || !rctx->left) 176 goto out; 177 178 rctx->dst = rctx->dstbuf; 179 180 scatterwalk_done(&w.out, 0, 1); 181 sg = w.out.sg; 182 offset = w.out.offset; 183 184 if (rctx->dst != sg) { 185 rctx->dst[0] = *sg; 186 sg_unmark_end(rctx->dst); 187 scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 0, 2); 188 } 189 rctx->dst[0].length -= offset - sg->offset; 190 rctx->dst[0].offset = offset; 191 192 out: 193 return err; 194 } 195 196 static int pre_crypt(struct skcipher_request *req) 197 { 198 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 199 struct rctx *rctx = skcipher_request_ctx(req); 200 struct priv *ctx = crypto_skcipher_ctx(tfm); 201 be128 *buf = rctx->ext ?: rctx->buf; 202 struct skcipher_request *subreq; 203 const int bs = LRW_BLOCK_SIZE; 204 struct skcipher_walk w; 205 struct scatterlist *sg; 206 unsigned cryptlen; 207 unsigned offset; 208 be128 *iv; 209 bool more; 210 int err; 211 212 subreq = &rctx->subreq; 213 skcipher_request_set_tfm(subreq, tfm); 214 215 cryptlen = subreq->cryptlen; 216 more = rctx->left > cryptlen; 217 if (!more) 218 cryptlen = rctx->left; 219 220 skcipher_request_set_crypt(subreq, rctx->src, rctx->dst, 221 cryptlen, req->iv); 222 223 err = skcipher_walk_virt(&w, subreq, false); 224 iv = w.iv; 225 226 while (w.nbytes) { 227 unsigned int avail = w.nbytes; 228 be128 *wsrc; 229 be128 *wdst; 230 231 wsrc = w.src.virt.addr; 232 wdst = w.dst.virt.addr; 233 234 do { 235 *buf++ = rctx->t; 236 be128_xor(wdst++, &rctx->t, wsrc++); 237 238 /* T <- I*Key2, using the optimization 239 * discussed in the specification */ 240 be128_xor(&rctx->t, &rctx->t, 241 &ctx->table.mulinc[get_index128(iv)]); 242 inc(iv); 243 } while ((avail -= bs) >= bs); 244 245 err = skcipher_walk_done(&w, avail); 246 } 247 248 skcipher_request_set_tfm(subreq, ctx->child); 249 skcipher_request_set_crypt(subreq, rctx->dst, rctx->dst, 250 cryptlen, NULL); 251 252 if (err || !more) 253 goto out; 254 255 rctx->src = rctx->srcbuf; 256 257 scatterwalk_done(&w.in, 0, 1); 258 sg = w.in.sg; 259 offset = w.in.offset; 260 261 if (rctx->src != sg) { 262 rctx->src[0] = *sg; 263 sg_unmark_end(rctx->src); 264 scatterwalk_crypto_chain(rctx->src, sg_next(sg), 0, 2); 265 } 266 rctx->src[0].length -= offset - sg->offset; 267 rctx->src[0].offset = offset; 268 269 out: 270 return err; 271 } 272 273 static int init_crypt(struct skcipher_request *req, crypto_completion_t done) 274 { 275 struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); 276 struct rctx *rctx = skcipher_request_ctx(req); 277 struct skcipher_request *subreq; 278 gfp_t gfp; 279 280 subreq = &rctx->subreq; 281 skcipher_request_set_callback(subreq, req->base.flags, done, req); 282 283 gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : 284 GFP_ATOMIC; 285 rctx->ext = NULL; 286 287 subreq->cryptlen = LRW_BUFFER_SIZE; 288 if (req->cryptlen > LRW_BUFFER_SIZE) { 289 subreq->cryptlen = min(req->cryptlen, (unsigned)PAGE_SIZE); 290 rctx->ext = kmalloc(subreq->cryptlen, gfp); 291 } 292 293 rctx->src = req->src; 294 rctx->dst = req->dst; 295 rctx->left = req->cryptlen; 296 297 /* calculate first value of T */ 298 memcpy(&rctx->t, req->iv, sizeof(rctx->t)); 299 300 /* T <- I*Key2 */ 301 gf128mul_64k_bbe(&rctx->t, ctx->table.table); 302 303 return 0; 304 } 305 306 static void exit_crypt(struct skcipher_request *req) 307 { 308 struct rctx *rctx = skcipher_request_ctx(req); 309 310 rctx->left = 0; 311 312 if (rctx->ext) 313 kfree(rctx->ext); 314 } 315 316 static int do_encrypt(struct skcipher_request *req, int err) 317 { 318 struct rctx *rctx = skcipher_request_ctx(req); 319 struct skcipher_request *subreq; 320 321 subreq = &rctx->subreq; 322 323 while (!err && rctx->left) { 324 err = pre_crypt(req) ?: 325 crypto_skcipher_encrypt(subreq) ?: 326 post_crypt(req); 327 328 if (err == -EINPROGRESS || 329 (err == -EBUSY && 330 req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) 331 return err; 332 } 333 334 exit_crypt(req); 335 return err; 336 } 337 338 static void encrypt_done(struct crypto_async_request *areq, int err) 339 { 340 struct skcipher_request *req = areq->data; 341 struct skcipher_request *subreq; 342 struct rctx *rctx; 343 344 rctx = skcipher_request_ctx(req); 345 subreq = &rctx->subreq; 346 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; 347 348 err = do_encrypt(req, err ?: post_crypt(req)); 349 if (rctx->left) 350 return; 351 352 skcipher_request_complete(req, err); 353 } 354 355 static int encrypt(struct skcipher_request *req) 356 { 357 return do_encrypt(req, init_crypt(req, encrypt_done)); 358 } 359 360 static int do_decrypt(struct skcipher_request *req, int err) 361 { 362 struct rctx *rctx = skcipher_request_ctx(req); 363 struct skcipher_request *subreq; 364 365 subreq = &rctx->subreq; 366 367 while (!err && rctx->left) { 368 err = pre_crypt(req) ?: 369 crypto_skcipher_decrypt(subreq) ?: 370 post_crypt(req); 371 372 if (err == -EINPROGRESS || 373 (err == -EBUSY && 374 req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) 375 return err; 376 } 377 378 exit_crypt(req); 379 return err; 380 } 381 382 static void decrypt_done(struct crypto_async_request *areq, int err) 383 { 384 struct skcipher_request *req = areq->data; 385 struct skcipher_request *subreq; 386 struct rctx *rctx; 387 388 rctx = skcipher_request_ctx(req); 389 subreq = &rctx->subreq; 390 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; 391 392 err = do_decrypt(req, err ?: post_crypt(req)); 393 if (rctx->left) 394 return; 395 396 skcipher_request_complete(req, err); 397 } 398 399 static int decrypt(struct skcipher_request *req) 400 { 401 return do_decrypt(req, init_crypt(req, decrypt_done)); 402 } 403 404 int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst, 405 struct scatterlist *ssrc, unsigned int nbytes, 406 struct lrw_crypt_req *req) 407 { 408 const unsigned int bsize = LRW_BLOCK_SIZE; 409 const unsigned int max_blks = req->tbuflen / bsize; 410 struct lrw_table_ctx *ctx = req->table_ctx; 411 struct blkcipher_walk walk; 412 unsigned int nblocks; 413 be128 *iv, *src, *dst, *t; 414 be128 *t_buf = req->tbuf; 415 int err, i; 416 417 BUG_ON(max_blks < 1); 418 419 blkcipher_walk_init(&walk, sdst, ssrc, nbytes); 420 421 err = blkcipher_walk_virt(desc, &walk); 422 nbytes = walk.nbytes; 423 if (!nbytes) 424 return err; 425 426 nblocks = min(walk.nbytes / bsize, max_blks); 427 src = (be128 *)walk.src.virt.addr; 428 dst = (be128 *)walk.dst.virt.addr; 429 430 /* calculate first value of T */ 431 iv = (be128 *)walk.iv; 432 t_buf[0] = *iv; 433 434 /* T <- I*Key2 */ 435 gf128mul_64k_bbe(&t_buf[0], ctx->table); 436 437 i = 0; 438 goto first; 439 440 for (;;) { 441 do { 442 for (i = 0; i < nblocks; i++) { 443 /* T <- I*Key2, using the optimization 444 * discussed in the specification */ 445 be128_xor(&t_buf[i], t, 446 &ctx->mulinc[get_index128(iv)]); 447 inc(iv); 448 first: 449 t = &t_buf[i]; 450 451 /* PP <- T xor P */ 452 be128_xor(dst + i, t, src + i); 453 } 454 455 /* CC <- E(Key2,PP) */ 456 req->crypt_fn(req->crypt_ctx, (u8 *)dst, 457 nblocks * bsize); 458 459 /* C <- T xor CC */ 460 for (i = 0; i < nblocks; i++) 461 be128_xor(dst + i, dst + i, &t_buf[i]); 462 463 src += nblocks; 464 dst += nblocks; 465 nbytes -= nblocks * bsize; 466 nblocks = min(nbytes / bsize, max_blks); 467 } while (nblocks > 0); 468 469 err = blkcipher_walk_done(desc, &walk, nbytes); 470 nbytes = walk.nbytes; 471 if (!nbytes) 472 break; 473 474 nblocks = min(nbytes / bsize, max_blks); 475 src = (be128 *)walk.src.virt.addr; 476 dst = (be128 *)walk.dst.virt.addr; 477 } 478 479 return err; 480 } 481 EXPORT_SYMBOL_GPL(lrw_crypt); 482 483 static int init_tfm(struct crypto_skcipher *tfm) 484 { 485 struct skcipher_instance *inst = skcipher_alg_instance(tfm); 486 struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst); 487 struct priv *ctx = crypto_skcipher_ctx(tfm); 488 struct crypto_skcipher *cipher; 489 490 cipher = crypto_spawn_skcipher(spawn); 491 if (IS_ERR(cipher)) 492 return PTR_ERR(cipher); 493 494 ctx->child = cipher; 495 496 crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(cipher) + 497 sizeof(struct rctx)); 498 499 return 0; 500 } 501 502 static void exit_tfm(struct crypto_skcipher *tfm) 503 { 504 struct priv *ctx = crypto_skcipher_ctx(tfm); 505 506 lrw_free_table(&ctx->table); 507 crypto_free_skcipher(ctx->child); 508 } 509 510 static void free(struct skcipher_instance *inst) 511 { 512 crypto_drop_skcipher(skcipher_instance_ctx(inst)); 513 kfree(inst); 514 } 515 516 static int create(struct crypto_template *tmpl, struct rtattr **tb) 517 { 518 struct crypto_skcipher_spawn *spawn; 519 struct skcipher_instance *inst; 520 struct crypto_attr_type *algt; 521 struct skcipher_alg *alg; 522 const char *cipher_name; 523 char ecb_name[CRYPTO_MAX_ALG_NAME]; 524 int err; 525 526 algt = crypto_get_attr_type(tb); 527 if (IS_ERR(algt)) 528 return PTR_ERR(algt); 529 530 if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) 531 return -EINVAL; 532 533 cipher_name = crypto_attr_alg_name(tb[1]); 534 if (IS_ERR(cipher_name)) 535 return PTR_ERR(cipher_name); 536 537 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); 538 if (!inst) 539 return -ENOMEM; 540 541 spawn = skcipher_instance_ctx(inst); 542 543 crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst)); 544 err = crypto_grab_skcipher(spawn, cipher_name, 0, 545 crypto_requires_sync(algt->type, 546 algt->mask)); 547 if (err == -ENOENT) { 548 err = -ENAMETOOLONG; 549 if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", 550 cipher_name) >= CRYPTO_MAX_ALG_NAME) 551 goto err_free_inst; 552 553 err = crypto_grab_skcipher(spawn, ecb_name, 0, 554 crypto_requires_sync(algt->type, 555 algt->mask)); 556 } 557 558 if (err) 559 goto err_free_inst; 560 561 alg = crypto_skcipher_spawn_alg(spawn); 562 563 err = -EINVAL; 564 if (alg->base.cra_blocksize != LRW_BLOCK_SIZE) 565 goto err_drop_spawn; 566 567 if (crypto_skcipher_alg_ivsize(alg)) 568 goto err_drop_spawn; 569 570 err = crypto_inst_setname(skcipher_crypto_instance(inst), "lrw", 571 &alg->base); 572 if (err) 573 goto err_drop_spawn; 574 575 err = -EINVAL; 576 cipher_name = alg->base.cra_name; 577 578 /* Alas we screwed up the naming so we have to mangle the 579 * cipher name. 580 */ 581 if (!strncmp(cipher_name, "ecb(", 4)) { 582 unsigned len; 583 584 len = strlcpy(ecb_name, cipher_name + 4, sizeof(ecb_name)); 585 if (len < 2 || len >= sizeof(ecb_name)) 586 goto err_drop_spawn; 587 588 if (ecb_name[len - 1] != ')') 589 goto err_drop_spawn; 590 591 ecb_name[len - 1] = 0; 592 593 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, 594 "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME) 595 return -ENAMETOOLONG; 596 } 597 598 inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; 599 inst->alg.base.cra_priority = alg->base.cra_priority; 600 inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE; 601 inst->alg.base.cra_alignmask = alg->base.cra_alignmask | 602 (__alignof__(u64) - 1); 603 604 inst->alg.ivsize = LRW_BLOCK_SIZE; 605 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) + 606 LRW_BLOCK_SIZE; 607 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) + 608 LRW_BLOCK_SIZE; 609 610 inst->alg.base.cra_ctxsize = sizeof(struct priv); 611 612 inst->alg.init = init_tfm; 613 inst->alg.exit = exit_tfm; 614 615 inst->alg.setkey = setkey; 616 inst->alg.encrypt = encrypt; 617 inst->alg.decrypt = decrypt; 618 619 inst->free = free; 620 621 err = skcipher_register_instance(tmpl, inst); 622 if (err) 623 goto err_drop_spawn; 624 625 out: 626 return err; 627 628 err_drop_spawn: 629 crypto_drop_skcipher(spawn); 630 err_free_inst: 631 kfree(inst); 632 goto out; 633 } 634 635 static struct crypto_template crypto_tmpl = { 636 .name = "lrw", 637 .create = create, 638 .module = THIS_MODULE, 639 }; 640 641 static int __init crypto_module_init(void) 642 { 643 return crypto_register_template(&crypto_tmpl); 644 } 645 646 static void __exit crypto_module_exit(void) 647 { 648 crypto_unregister_template(&crypto_tmpl); 649 } 650 651 module_init(crypto_module_init); 652 module_exit(crypto_module_exit); 653 654 MODULE_LICENSE("GPL"); 655 MODULE_DESCRIPTION("LRW block cipher mode"); 656 MODULE_ALIAS_CRYPTO("lrw"); 657