1 /* XTS: as defined in IEEE1619/D16 2 * http://grouper.ieee.org/groups/1619/email/pdf00086.pdf 3 * (sector sizes which are not a multiple of 16 bytes are, 4 * however currently unsupported) 5 * 6 * Copyright (c) 2007 Rik Snel <rsnel@cube.dyndns.org> 7 * 8 * Based on ecb.c 9 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> 10 * 11 * This program is free software; you can redistribute it and/or modify it 12 * under the terms of the GNU General Public License as published by the Free 13 * Software Foundation; either version 2 of the License, or (at your option) 14 * any later version. 15 */ 16 #include <crypto/internal/skcipher.h> 17 #include <crypto/scatterwalk.h> 18 #include <linux/err.h> 19 #include <linux/init.h> 20 #include <linux/kernel.h> 21 #include <linux/module.h> 22 #include <linux/scatterlist.h> 23 #include <linux/slab.h> 24 25 #include <crypto/xts.h> 26 #include <crypto/b128ops.h> 27 #include <crypto/gf128mul.h> 28 29 #define XTS_BUFFER_SIZE 128u 30 31 struct priv { 32 struct crypto_skcipher *child; 33 struct crypto_cipher *tweak; 34 }; 35 36 struct xts_instance_ctx { 37 struct crypto_skcipher_spawn spawn; 38 char name[CRYPTO_MAX_ALG_NAME]; 39 }; 40 41 struct rctx { 42 le128 buf[XTS_BUFFER_SIZE / sizeof(le128)]; 43 44 le128 t; 45 46 le128 *ext; 47 48 struct scatterlist srcbuf[2]; 49 struct scatterlist dstbuf[2]; 50 struct scatterlist *src; 51 struct scatterlist *dst; 52 53 unsigned int left; 54 55 struct skcipher_request subreq; 56 }; 57 58 static int setkey(struct crypto_skcipher *parent, const u8 *key, 59 unsigned int keylen) 60 { 61 struct priv *ctx = crypto_skcipher_ctx(parent); 62 struct crypto_skcipher *child; 63 struct crypto_cipher *tweak; 64 int err; 65 66 err = xts_verify_key(parent, key, keylen); 67 if (err) 68 return err; 69 70 keylen /= 2; 71 72 /* we need two cipher instances: one to compute the initial 'tweak' 73 * by encrypting the IV (usually the 'plain' iv) and the other 74 * one to encrypt and decrypt the data */ 75 76 /* tweak cipher, uses Key2 i.e. the second half of *key */ 77 tweak = ctx->tweak; 78 crypto_cipher_clear_flags(tweak, CRYPTO_TFM_REQ_MASK); 79 crypto_cipher_set_flags(tweak, crypto_skcipher_get_flags(parent) & 80 CRYPTO_TFM_REQ_MASK); 81 err = crypto_cipher_setkey(tweak, key + keylen, keylen); 82 crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(tweak) & 83 CRYPTO_TFM_RES_MASK); 84 if (err) 85 return err; 86 87 /* data cipher, uses Key1 i.e. the first half of *key */ 88 child = ctx->child; 89 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); 90 crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & 91 CRYPTO_TFM_REQ_MASK); 92 err = crypto_skcipher_setkey(child, key, keylen); 93 crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) & 94 CRYPTO_TFM_RES_MASK); 95 96 return err; 97 } 98 99 static int post_crypt(struct skcipher_request *req) 100 { 101 struct rctx *rctx = skcipher_request_ctx(req); 102 le128 *buf = rctx->ext ?: rctx->buf; 103 struct skcipher_request *subreq; 104 const int bs = XTS_BLOCK_SIZE; 105 struct skcipher_walk w; 106 struct scatterlist *sg; 107 unsigned offset; 108 int err; 109 110 subreq = &rctx->subreq; 111 err = skcipher_walk_virt(&w, subreq, false); 112 113 while (w.nbytes) { 114 unsigned int avail = w.nbytes; 115 le128 *wdst; 116 117 wdst = w.dst.virt.addr; 118 119 do { 120 le128_xor(wdst, buf++, wdst); 121 wdst++; 122 } while ((avail -= bs) >= bs); 123 124 err = skcipher_walk_done(&w, avail); 125 } 126 127 rctx->left -= subreq->cryptlen; 128 129 if (err || !rctx->left) 130 goto out; 131 132 rctx->dst = rctx->dstbuf; 133 134 scatterwalk_done(&w.out, 0, 1); 135 sg = w.out.sg; 136 offset = w.out.offset; 137 138 if (rctx->dst != sg) { 139 rctx->dst[0] = *sg; 140 sg_unmark_end(rctx->dst); 141 scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 0, 2); 142 } 143 rctx->dst[0].length -= offset - sg->offset; 144 rctx->dst[0].offset = offset; 145 146 out: 147 return err; 148 } 149 150 static int pre_crypt(struct skcipher_request *req) 151 { 152 struct rctx *rctx = skcipher_request_ctx(req); 153 le128 *buf = rctx->ext ?: rctx->buf; 154 struct skcipher_request *subreq; 155 const int bs = XTS_BLOCK_SIZE; 156 struct skcipher_walk w; 157 struct scatterlist *sg; 158 unsigned cryptlen; 159 unsigned offset; 160 bool more; 161 int err; 162 163 subreq = &rctx->subreq; 164 cryptlen = subreq->cryptlen; 165 166 more = rctx->left > cryptlen; 167 if (!more) 168 cryptlen = rctx->left; 169 170 skcipher_request_set_crypt(subreq, rctx->src, rctx->dst, 171 cryptlen, NULL); 172 173 err = skcipher_walk_virt(&w, subreq, false); 174 175 while (w.nbytes) { 176 unsigned int avail = w.nbytes; 177 le128 *wsrc; 178 le128 *wdst; 179 180 wsrc = w.src.virt.addr; 181 wdst = w.dst.virt.addr; 182 183 do { 184 *buf++ = rctx->t; 185 le128_xor(wdst++, &rctx->t, wsrc++); 186 gf128mul_x_ble(&rctx->t, &rctx->t); 187 } while ((avail -= bs) >= bs); 188 189 err = skcipher_walk_done(&w, avail); 190 } 191 192 skcipher_request_set_crypt(subreq, rctx->dst, rctx->dst, 193 cryptlen, NULL); 194 195 if (err || !more) 196 goto out; 197 198 rctx->src = rctx->srcbuf; 199 200 scatterwalk_done(&w.in, 0, 1); 201 sg = w.in.sg; 202 offset = w.in.offset; 203 204 if (rctx->src != sg) { 205 rctx->src[0] = *sg; 206 sg_unmark_end(rctx->src); 207 scatterwalk_crypto_chain(rctx->src, sg_next(sg), 0, 2); 208 } 209 rctx->src[0].length -= offset - sg->offset; 210 rctx->src[0].offset = offset; 211 212 out: 213 return err; 214 } 215 216 static int init_crypt(struct skcipher_request *req, crypto_completion_t done) 217 { 218 struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); 219 struct rctx *rctx = skcipher_request_ctx(req); 220 struct skcipher_request *subreq; 221 gfp_t gfp; 222 223 subreq = &rctx->subreq; 224 skcipher_request_set_tfm(subreq, ctx->child); 225 skcipher_request_set_callback(subreq, req->base.flags, done, req); 226 227 gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : 228 GFP_ATOMIC; 229 rctx->ext = NULL; 230 231 subreq->cryptlen = XTS_BUFFER_SIZE; 232 if (req->cryptlen > XTS_BUFFER_SIZE) { 233 unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE); 234 235 rctx->ext = kmalloc(n, gfp); 236 if (rctx->ext) 237 subreq->cryptlen = n; 238 } 239 240 rctx->src = req->src; 241 rctx->dst = req->dst; 242 rctx->left = req->cryptlen; 243 244 /* calculate first value of T */ 245 crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv); 246 247 return 0; 248 } 249 250 static void exit_crypt(struct skcipher_request *req) 251 { 252 struct rctx *rctx = skcipher_request_ctx(req); 253 254 rctx->left = 0; 255 256 if (rctx->ext) 257 kzfree(rctx->ext); 258 } 259 260 static int do_encrypt(struct skcipher_request *req, int err) 261 { 262 struct rctx *rctx = skcipher_request_ctx(req); 263 struct skcipher_request *subreq; 264 265 subreq = &rctx->subreq; 266 267 while (!err && rctx->left) { 268 err = pre_crypt(req) ?: 269 crypto_skcipher_encrypt(subreq) ?: 270 post_crypt(req); 271 272 if (err == -EINPROGRESS || 273 (err == -EBUSY && 274 req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) 275 return err; 276 } 277 278 exit_crypt(req); 279 return err; 280 } 281 282 static void encrypt_done(struct crypto_async_request *areq, int err) 283 { 284 struct skcipher_request *req = areq->data; 285 struct skcipher_request *subreq; 286 struct rctx *rctx; 287 288 rctx = skcipher_request_ctx(req); 289 290 if (err == -EINPROGRESS) { 291 if (rctx->left != req->cryptlen) 292 return; 293 goto out; 294 } 295 296 subreq = &rctx->subreq; 297 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; 298 299 err = do_encrypt(req, err ?: post_crypt(req)); 300 if (rctx->left) 301 return; 302 303 out: 304 skcipher_request_complete(req, err); 305 } 306 307 static int encrypt(struct skcipher_request *req) 308 { 309 return do_encrypt(req, init_crypt(req, encrypt_done)); 310 } 311 312 static int do_decrypt(struct skcipher_request *req, int err) 313 { 314 struct rctx *rctx = skcipher_request_ctx(req); 315 struct skcipher_request *subreq; 316 317 subreq = &rctx->subreq; 318 319 while (!err && rctx->left) { 320 err = pre_crypt(req) ?: 321 crypto_skcipher_decrypt(subreq) ?: 322 post_crypt(req); 323 324 if (err == -EINPROGRESS || 325 (err == -EBUSY && 326 req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) 327 return err; 328 } 329 330 exit_crypt(req); 331 return err; 332 } 333 334 static void decrypt_done(struct crypto_async_request *areq, int err) 335 { 336 struct skcipher_request *req = areq->data; 337 struct skcipher_request *subreq; 338 struct rctx *rctx; 339 340 rctx = skcipher_request_ctx(req); 341 342 if (err == -EINPROGRESS) { 343 if (rctx->left != req->cryptlen) 344 return; 345 goto out; 346 } 347 348 subreq = &rctx->subreq; 349 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; 350 351 err = do_decrypt(req, err ?: post_crypt(req)); 352 if (rctx->left) 353 return; 354 355 out: 356 skcipher_request_complete(req, err); 357 } 358 359 static int decrypt(struct skcipher_request *req) 360 { 361 return do_decrypt(req, init_crypt(req, decrypt_done)); 362 } 363 364 int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst, 365 struct scatterlist *ssrc, unsigned int nbytes, 366 struct xts_crypt_req *req) 367 { 368 const unsigned int bsize = XTS_BLOCK_SIZE; 369 const unsigned int max_blks = req->tbuflen / bsize; 370 struct blkcipher_walk walk; 371 unsigned int nblocks; 372 le128 *src, *dst, *t; 373 le128 *t_buf = req->tbuf; 374 int err, i; 375 376 BUG_ON(max_blks < 1); 377 378 blkcipher_walk_init(&walk, sdst, ssrc, nbytes); 379 380 err = blkcipher_walk_virt(desc, &walk); 381 nbytes = walk.nbytes; 382 if (!nbytes) 383 return err; 384 385 nblocks = min(nbytes / bsize, max_blks); 386 src = (le128 *)walk.src.virt.addr; 387 dst = (le128 *)walk.dst.virt.addr; 388 389 /* calculate first value of T */ 390 req->tweak_fn(req->tweak_ctx, (u8 *)&t_buf[0], walk.iv); 391 392 i = 0; 393 goto first; 394 395 for (;;) { 396 do { 397 for (i = 0; i < nblocks; i++) { 398 gf128mul_x_ble(&t_buf[i], t); 399 first: 400 t = &t_buf[i]; 401 402 /* PP <- T xor P */ 403 le128_xor(dst + i, t, src + i); 404 } 405 406 /* CC <- E(Key2,PP) */ 407 req->crypt_fn(req->crypt_ctx, (u8 *)dst, 408 nblocks * bsize); 409 410 /* C <- T xor CC */ 411 for (i = 0; i < nblocks; i++) 412 le128_xor(dst + i, dst + i, &t_buf[i]); 413 414 src += nblocks; 415 dst += nblocks; 416 nbytes -= nblocks * bsize; 417 nblocks = min(nbytes / bsize, max_blks); 418 } while (nblocks > 0); 419 420 *(le128 *)walk.iv = *t; 421 422 err = blkcipher_walk_done(desc, &walk, nbytes); 423 nbytes = walk.nbytes; 424 if (!nbytes) 425 break; 426 427 nblocks = min(nbytes / bsize, max_blks); 428 src = (le128 *)walk.src.virt.addr; 429 dst = (le128 *)walk.dst.virt.addr; 430 } 431 432 return err; 433 } 434 EXPORT_SYMBOL_GPL(xts_crypt); 435 436 static int init_tfm(struct crypto_skcipher *tfm) 437 { 438 struct skcipher_instance *inst = skcipher_alg_instance(tfm); 439 struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst); 440 struct priv *ctx = crypto_skcipher_ctx(tfm); 441 struct crypto_skcipher *child; 442 struct crypto_cipher *tweak; 443 444 child = crypto_spawn_skcipher(&ictx->spawn); 445 if (IS_ERR(child)) 446 return PTR_ERR(child); 447 448 ctx->child = child; 449 450 tweak = crypto_alloc_cipher(ictx->name, 0, 0); 451 if (IS_ERR(tweak)) { 452 crypto_free_skcipher(ctx->child); 453 return PTR_ERR(tweak); 454 } 455 456 ctx->tweak = tweak; 457 458 crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) + 459 sizeof(struct rctx)); 460 461 return 0; 462 } 463 464 static void exit_tfm(struct crypto_skcipher *tfm) 465 { 466 struct priv *ctx = crypto_skcipher_ctx(tfm); 467 468 crypto_free_skcipher(ctx->child); 469 crypto_free_cipher(ctx->tweak); 470 } 471 472 static void free(struct skcipher_instance *inst) 473 { 474 crypto_drop_skcipher(skcipher_instance_ctx(inst)); 475 kfree(inst); 476 } 477 478 static int create(struct crypto_template *tmpl, struct rtattr **tb) 479 { 480 struct skcipher_instance *inst; 481 struct crypto_attr_type *algt; 482 struct xts_instance_ctx *ctx; 483 struct skcipher_alg *alg; 484 const char *cipher_name; 485 u32 mask; 486 int err; 487 488 algt = crypto_get_attr_type(tb); 489 if (IS_ERR(algt)) 490 return PTR_ERR(algt); 491 492 if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) 493 return -EINVAL; 494 495 cipher_name = crypto_attr_alg_name(tb[1]); 496 if (IS_ERR(cipher_name)) 497 return PTR_ERR(cipher_name); 498 499 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); 500 if (!inst) 501 return -ENOMEM; 502 503 ctx = skcipher_instance_ctx(inst); 504 505 crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst)); 506 507 mask = crypto_requires_off(algt->type, algt->mask, 508 CRYPTO_ALG_NEED_FALLBACK | 509 CRYPTO_ALG_ASYNC); 510 511 err = crypto_grab_skcipher(&ctx->spawn, cipher_name, 0, mask); 512 if (err == -ENOENT) { 513 err = -ENAMETOOLONG; 514 if (snprintf(ctx->name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", 515 cipher_name) >= CRYPTO_MAX_ALG_NAME) 516 goto err_free_inst; 517 518 err = crypto_grab_skcipher(&ctx->spawn, ctx->name, 0, mask); 519 } 520 521 if (err) 522 goto err_free_inst; 523 524 alg = crypto_skcipher_spawn_alg(&ctx->spawn); 525 526 err = -EINVAL; 527 if (alg->base.cra_blocksize != XTS_BLOCK_SIZE) 528 goto err_drop_spawn; 529 530 if (crypto_skcipher_alg_ivsize(alg)) 531 goto err_drop_spawn; 532 533 err = crypto_inst_setname(skcipher_crypto_instance(inst), "xts", 534 &alg->base); 535 if (err) 536 goto err_drop_spawn; 537 538 err = -EINVAL; 539 cipher_name = alg->base.cra_name; 540 541 /* Alas we screwed up the naming so we have to mangle the 542 * cipher name. 543 */ 544 if (!strncmp(cipher_name, "ecb(", 4)) { 545 unsigned len; 546 547 len = strlcpy(ctx->name, cipher_name + 4, sizeof(ctx->name)); 548 if (len < 2 || len >= sizeof(ctx->name)) 549 goto err_drop_spawn; 550 551 if (ctx->name[len - 1] != ')') 552 goto err_drop_spawn; 553 554 ctx->name[len - 1] = 0; 555 556 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, 557 "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) 558 return -ENAMETOOLONG; 559 } else 560 goto err_drop_spawn; 561 562 inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; 563 inst->alg.base.cra_priority = alg->base.cra_priority; 564 inst->alg.base.cra_blocksize = XTS_BLOCK_SIZE; 565 inst->alg.base.cra_alignmask = alg->base.cra_alignmask | 566 (__alignof__(u64) - 1); 567 568 inst->alg.ivsize = XTS_BLOCK_SIZE; 569 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) * 2; 570 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) * 2; 571 572 inst->alg.base.cra_ctxsize = sizeof(struct priv); 573 574 inst->alg.init = init_tfm; 575 inst->alg.exit = exit_tfm; 576 577 inst->alg.setkey = setkey; 578 inst->alg.encrypt = encrypt; 579 inst->alg.decrypt = decrypt; 580 581 inst->free = free; 582 583 err = skcipher_register_instance(tmpl, inst); 584 if (err) 585 goto err_drop_spawn; 586 587 out: 588 return err; 589 590 err_drop_spawn: 591 crypto_drop_skcipher(&ctx->spawn); 592 err_free_inst: 593 kfree(inst); 594 goto out; 595 } 596 597 static struct crypto_template crypto_tmpl = { 598 .name = "xts", 599 .create = create, 600 .module = THIS_MODULE, 601 }; 602 603 static int __init crypto_module_init(void) 604 { 605 return crypto_register_template(&crypto_tmpl); 606 } 607 608 static void __exit crypto_module_exit(void) 609 { 610 crypto_unregister_template(&crypto_tmpl); 611 } 612 613 module_init(crypto_module_init); 614 module_exit(crypto_module_exit); 615 616 MODULE_LICENSE("GPL"); 617 MODULE_DESCRIPTION("XTS block cipher mode"); 618 MODULE_ALIAS_CRYPTO("xts"); 619