1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Adiantum length-preserving encryption mode 4 * 5 * Copyright 2018 Google LLC 6 */ 7 8 /* 9 * Adiantum is a tweakable, length-preserving encryption mode designed for fast 10 * and secure disk encryption, especially on CPUs without dedicated crypto 11 * instructions. Adiantum encrypts each sector using the XChaCha12 stream 12 * cipher, two passes of an ε-almost-∆-universal (ε-∆U) hash function based on 13 * NH and Poly1305, and an invocation of the AES-256 block cipher on a single 14 * 16-byte block. See the paper for details: 15 * 16 * Adiantum: length-preserving encryption for entry-level processors 17 * (https://eprint.iacr.org/2018/720.pdf) 18 * 19 * For flexibility, this implementation also allows other ciphers: 20 * 21 * - Stream cipher: XChaCha12 or XChaCha20 22 * - Block cipher: any with a 128-bit block size and 256-bit key 23 * 24 * This implementation doesn't currently allow other ε-∆U hash functions, i.e. 25 * HPolyC is not supported. This is because Adiantum is ~20% faster than HPolyC 26 * but still provably as secure, and also the ε-∆U hash function of HBSH is 27 * formally defined to take two inputs (tweak, message) which makes it difficult 28 * to wrap with the crypto_shash API. Rather, some details need to be handled 29 * here. Nevertheless, if needed in the future, support for other ε-∆U hash 30 * functions could be added here. 31 */ 32 33 #include <crypto/b128ops.h> 34 #include <crypto/chacha.h> 35 #include <crypto/internal/hash.h> 36 #include <crypto/internal/poly1305.h> 37 #include <crypto/internal/skcipher.h> 38 #include <crypto/nhpoly1305.h> 39 #include <crypto/scatterwalk.h> 40 #include <linux/module.h> 41 42 /* 43 * Size of right-hand part of input data, in bytes; also the size of the block 44 * cipher's block size and the hash function's output. 45 */ 46 #define BLOCKCIPHER_BLOCK_SIZE 16 47 48 /* Size of the block cipher key (K_E) in bytes */ 49 #define BLOCKCIPHER_KEY_SIZE 32 50 51 /* Size of the hash key (K_H) in bytes */ 52 #define HASH_KEY_SIZE (POLY1305_BLOCK_SIZE + NHPOLY1305_KEY_SIZE) 53 54 /* 55 * The specification allows variable-length tweaks, but Linux's crypto API 56 * currently only allows algorithms to support a single length. The "natural" 57 * tweak length for Adiantum is 16, since that fits into one Poly1305 block for 58 * the best performance. But longer tweaks are useful for fscrypt, to avoid 59 * needing to derive per-file keys. So instead we use two blocks, or 32 bytes. 60 */ 61 #define TWEAK_SIZE 32 62 63 struct adiantum_instance_ctx { 64 struct crypto_skcipher_spawn streamcipher_spawn; 65 struct crypto_cipher_spawn blockcipher_spawn; 66 struct crypto_shash_spawn hash_spawn; 67 }; 68 69 struct adiantum_tfm_ctx { 70 struct crypto_skcipher *streamcipher; 71 struct crypto_cipher *blockcipher; 72 struct crypto_shash *hash; 73 struct poly1305_core_key header_hash_key; 74 }; 75 76 struct adiantum_request_ctx { 77 78 /* 79 * Buffer for right-hand part of data, i.e. 80 * 81 * P_L => P_M => C_M => C_R when encrypting, or 82 * C_R => C_M => P_M => P_L when decrypting. 83 * 84 * Also used to build the IV for the stream cipher. 85 */ 86 union { 87 u8 bytes[XCHACHA_IV_SIZE]; 88 __le32 words[XCHACHA_IV_SIZE / sizeof(__le32)]; 89 le128 bignum; /* interpret as element of Z/(2^{128}Z) */ 90 } rbuf; 91 92 bool enc; /* true if encrypting, false if decrypting */ 93 94 /* 95 * The result of the Poly1305 ε-∆U hash function applied to 96 * (bulk length, tweak) 97 */ 98 le128 header_hash; 99 100 /* Sub-requests, must be last */ 101 union { 102 struct shash_desc hash_desc; 103 struct skcipher_request streamcipher_req; 104 } u; 105 }; 106 107 /* 108 * Given the XChaCha stream key K_S, derive the block cipher key K_E and the 109 * hash key K_H as follows: 110 * 111 * K_E || K_H || ... = XChaCha(key=K_S, nonce=1||0^191) 112 * 113 * Note that this denotes using bits from the XChaCha keystream, which here we 114 * get indirectly by encrypting a buffer containing all 0's. 115 */ 116 static int adiantum_setkey(struct crypto_skcipher *tfm, const u8 *key, 117 unsigned int keylen) 118 { 119 struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); 120 struct { 121 u8 iv[XCHACHA_IV_SIZE]; 122 u8 derived_keys[BLOCKCIPHER_KEY_SIZE + HASH_KEY_SIZE]; 123 struct scatterlist sg; 124 struct crypto_wait wait; 125 struct skcipher_request req; /* must be last */ 126 } *data; 127 u8 *keyp; 128 int err; 129 130 /* Set the stream cipher key (K_S) */ 131 crypto_skcipher_clear_flags(tctx->streamcipher, CRYPTO_TFM_REQ_MASK); 132 crypto_skcipher_set_flags(tctx->streamcipher, 133 crypto_skcipher_get_flags(tfm) & 134 CRYPTO_TFM_REQ_MASK); 135 err = crypto_skcipher_setkey(tctx->streamcipher, key, keylen); 136 if (err) 137 return err; 138 139 /* Derive the subkeys */ 140 data = kzalloc(sizeof(*data) + 141 crypto_skcipher_reqsize(tctx->streamcipher), GFP_KERNEL); 142 if (!data) 143 return -ENOMEM; 144 data->iv[0] = 1; 145 sg_init_one(&data->sg, data->derived_keys, sizeof(data->derived_keys)); 146 crypto_init_wait(&data->wait); 147 skcipher_request_set_tfm(&data->req, tctx->streamcipher); 148 skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP | 149 CRYPTO_TFM_REQ_MAY_BACKLOG, 150 crypto_req_done, &data->wait); 151 skcipher_request_set_crypt(&data->req, &data->sg, &data->sg, 152 sizeof(data->derived_keys), data->iv); 153 err = crypto_wait_req(crypto_skcipher_encrypt(&data->req), &data->wait); 154 if (err) 155 goto out; 156 keyp = data->derived_keys; 157 158 /* Set the block cipher key (K_E) */ 159 crypto_cipher_clear_flags(tctx->blockcipher, CRYPTO_TFM_REQ_MASK); 160 crypto_cipher_set_flags(tctx->blockcipher, 161 crypto_skcipher_get_flags(tfm) & 162 CRYPTO_TFM_REQ_MASK); 163 err = crypto_cipher_setkey(tctx->blockcipher, keyp, 164 BLOCKCIPHER_KEY_SIZE); 165 if (err) 166 goto out; 167 keyp += BLOCKCIPHER_KEY_SIZE; 168 169 /* Set the hash key (K_H) */ 170 poly1305_core_setkey(&tctx->header_hash_key, keyp); 171 keyp += POLY1305_BLOCK_SIZE; 172 173 crypto_shash_clear_flags(tctx->hash, CRYPTO_TFM_REQ_MASK); 174 crypto_shash_set_flags(tctx->hash, crypto_skcipher_get_flags(tfm) & 175 CRYPTO_TFM_REQ_MASK); 176 err = crypto_shash_setkey(tctx->hash, keyp, NHPOLY1305_KEY_SIZE); 177 keyp += NHPOLY1305_KEY_SIZE; 178 WARN_ON(keyp != &data->derived_keys[ARRAY_SIZE(data->derived_keys)]); 179 out: 180 kfree_sensitive(data); 181 return err; 182 } 183 184 /* Addition in Z/(2^{128}Z) */ 185 static inline void le128_add(le128 *r, const le128 *v1, const le128 *v2) 186 { 187 u64 x = le64_to_cpu(v1->b); 188 u64 y = le64_to_cpu(v2->b); 189 190 r->b = cpu_to_le64(x + y); 191 r->a = cpu_to_le64(le64_to_cpu(v1->a) + le64_to_cpu(v2->a) + 192 (x + y < x)); 193 } 194 195 /* Subtraction in Z/(2^{128}Z) */ 196 static inline void le128_sub(le128 *r, const le128 *v1, const le128 *v2) 197 { 198 u64 x = le64_to_cpu(v1->b); 199 u64 y = le64_to_cpu(v2->b); 200 201 r->b = cpu_to_le64(x - y); 202 r->a = cpu_to_le64(le64_to_cpu(v1->a) - le64_to_cpu(v2->a) - 203 (x - y > x)); 204 } 205 206 /* 207 * Apply the Poly1305 ε-∆U hash function to (bulk length, tweak) and save the 208 * result to rctx->header_hash. This is the calculation 209 * 210 * H_T ← Poly1305_{K_T}(bin_{128}(|L|) || T) 211 * 212 * from the procedure in section 6.4 of the Adiantum paper. The resulting value 213 * is reused in both the first and second hash steps. Specifically, it's added 214 * to the result of an independently keyed ε-∆U hash function (for equal length 215 * inputs only) taken over the left-hand part (the "bulk") of the message, to 216 * give the overall Adiantum hash of the (tweak, left-hand part) pair. 217 */ 218 static void adiantum_hash_header(struct skcipher_request *req) 219 { 220 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 221 const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); 222 struct adiantum_request_ctx *rctx = skcipher_request_ctx(req); 223 const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE; 224 struct { 225 __le64 message_bits; 226 __le64 padding; 227 } header = { 228 .message_bits = cpu_to_le64((u64)bulk_len * 8) 229 }; 230 struct poly1305_state state; 231 232 poly1305_core_init(&state); 233 234 BUILD_BUG_ON(sizeof(header) % POLY1305_BLOCK_SIZE != 0); 235 poly1305_core_blocks(&state, &tctx->header_hash_key, 236 &header, sizeof(header) / POLY1305_BLOCK_SIZE, 1); 237 238 BUILD_BUG_ON(TWEAK_SIZE % POLY1305_BLOCK_SIZE != 0); 239 poly1305_core_blocks(&state, &tctx->header_hash_key, req->iv, 240 TWEAK_SIZE / POLY1305_BLOCK_SIZE, 1); 241 242 poly1305_core_emit(&state, NULL, &rctx->header_hash); 243 } 244 245 /* Hash the left-hand part (the "bulk") of the message using NHPoly1305 */ 246 static int adiantum_hash_message(struct skcipher_request *req, 247 struct scatterlist *sgl, le128 *digest) 248 { 249 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 250 const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); 251 struct adiantum_request_ctx *rctx = skcipher_request_ctx(req); 252 const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE; 253 struct shash_desc *hash_desc = &rctx->u.hash_desc; 254 struct sg_mapping_iter miter; 255 unsigned int i, n; 256 int err; 257 258 hash_desc->tfm = tctx->hash; 259 260 err = crypto_shash_init(hash_desc); 261 if (err) 262 return err; 263 264 sg_miter_start(&miter, sgl, sg_nents(sgl), 265 SG_MITER_FROM_SG | SG_MITER_ATOMIC); 266 for (i = 0; i < bulk_len; i += n) { 267 sg_miter_next(&miter); 268 n = min_t(unsigned int, miter.length, bulk_len - i); 269 err = crypto_shash_update(hash_desc, miter.addr, n); 270 if (err) 271 break; 272 } 273 sg_miter_stop(&miter); 274 if (err) 275 return err; 276 277 return crypto_shash_final(hash_desc, (u8 *)digest); 278 } 279 280 /* Continue Adiantum encryption/decryption after the stream cipher step */ 281 static int adiantum_finish(struct skcipher_request *req) 282 { 283 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 284 const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); 285 struct adiantum_request_ctx *rctx = skcipher_request_ctx(req); 286 const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE; 287 le128 digest; 288 int err; 289 290 /* If decrypting, decrypt C_M with the block cipher to get P_M */ 291 if (!rctx->enc) 292 crypto_cipher_decrypt_one(tctx->blockcipher, rctx->rbuf.bytes, 293 rctx->rbuf.bytes); 294 295 /* 296 * Second hash step 297 * enc: C_R = C_M - H_{K_H}(T, C_L) 298 * dec: P_R = P_M - H_{K_H}(T, P_L) 299 */ 300 err = adiantum_hash_message(req, req->dst, &digest); 301 if (err) 302 return err; 303 le128_add(&digest, &digest, &rctx->header_hash); 304 le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest); 305 scatterwalk_map_and_copy(&rctx->rbuf.bignum, req->dst, 306 bulk_len, BLOCKCIPHER_BLOCK_SIZE, 1); 307 return 0; 308 } 309 310 static void adiantum_streamcipher_done(struct crypto_async_request *areq, 311 int err) 312 { 313 struct skcipher_request *req = areq->data; 314 315 if (!err) 316 err = adiantum_finish(req); 317 318 skcipher_request_complete(req, err); 319 } 320 321 static int adiantum_crypt(struct skcipher_request *req, bool enc) 322 { 323 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 324 const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); 325 struct adiantum_request_ctx *rctx = skcipher_request_ctx(req); 326 const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE; 327 unsigned int stream_len; 328 le128 digest; 329 int err; 330 331 if (req->cryptlen < BLOCKCIPHER_BLOCK_SIZE) 332 return -EINVAL; 333 334 rctx->enc = enc; 335 336 /* 337 * First hash step 338 * enc: P_M = P_R + H_{K_H}(T, P_L) 339 * dec: C_M = C_R + H_{K_H}(T, C_L) 340 */ 341 adiantum_hash_header(req); 342 err = adiantum_hash_message(req, req->src, &digest); 343 if (err) 344 return err; 345 le128_add(&digest, &digest, &rctx->header_hash); 346 scatterwalk_map_and_copy(&rctx->rbuf.bignum, req->src, 347 bulk_len, BLOCKCIPHER_BLOCK_SIZE, 0); 348 le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest); 349 350 /* If encrypting, encrypt P_M with the block cipher to get C_M */ 351 if (enc) 352 crypto_cipher_encrypt_one(tctx->blockcipher, rctx->rbuf.bytes, 353 rctx->rbuf.bytes); 354 355 /* Initialize the rest of the XChaCha IV (first part is C_M) */ 356 BUILD_BUG_ON(BLOCKCIPHER_BLOCK_SIZE != 16); 357 BUILD_BUG_ON(XCHACHA_IV_SIZE != 32); /* nonce || stream position */ 358 rctx->rbuf.words[4] = cpu_to_le32(1); 359 rctx->rbuf.words[5] = 0; 360 rctx->rbuf.words[6] = 0; 361 rctx->rbuf.words[7] = 0; 362 363 /* 364 * XChaCha needs to be done on all the data except the last 16 bytes; 365 * for disk encryption that usually means 4080 or 496 bytes. But ChaCha 366 * implementations tend to be most efficient when passed a whole number 367 * of 64-byte ChaCha blocks, or sometimes even a multiple of 256 bytes. 368 * And here it doesn't matter whether the last 16 bytes are written to, 369 * as the second hash step will overwrite them. Thus, round the XChaCha 370 * length up to the next 64-byte boundary if possible. 371 */ 372 stream_len = bulk_len; 373 if (round_up(stream_len, CHACHA_BLOCK_SIZE) <= req->cryptlen) 374 stream_len = round_up(stream_len, CHACHA_BLOCK_SIZE); 375 376 skcipher_request_set_tfm(&rctx->u.streamcipher_req, tctx->streamcipher); 377 skcipher_request_set_crypt(&rctx->u.streamcipher_req, req->src, 378 req->dst, stream_len, &rctx->rbuf); 379 skcipher_request_set_callback(&rctx->u.streamcipher_req, 380 req->base.flags, 381 adiantum_streamcipher_done, req); 382 return crypto_skcipher_encrypt(&rctx->u.streamcipher_req) ?: 383 adiantum_finish(req); 384 } 385 386 static int adiantum_encrypt(struct skcipher_request *req) 387 { 388 return adiantum_crypt(req, true); 389 } 390 391 static int adiantum_decrypt(struct skcipher_request *req) 392 { 393 return adiantum_crypt(req, false); 394 } 395 396 static int adiantum_init_tfm(struct crypto_skcipher *tfm) 397 { 398 struct skcipher_instance *inst = skcipher_alg_instance(tfm); 399 struct adiantum_instance_ctx *ictx = skcipher_instance_ctx(inst); 400 struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); 401 struct crypto_skcipher *streamcipher; 402 struct crypto_cipher *blockcipher; 403 struct crypto_shash *hash; 404 unsigned int subreq_size; 405 int err; 406 407 streamcipher = crypto_spawn_skcipher(&ictx->streamcipher_spawn); 408 if (IS_ERR(streamcipher)) 409 return PTR_ERR(streamcipher); 410 411 blockcipher = crypto_spawn_cipher(&ictx->blockcipher_spawn); 412 if (IS_ERR(blockcipher)) { 413 err = PTR_ERR(blockcipher); 414 goto err_free_streamcipher; 415 } 416 417 hash = crypto_spawn_shash(&ictx->hash_spawn); 418 if (IS_ERR(hash)) { 419 err = PTR_ERR(hash); 420 goto err_free_blockcipher; 421 } 422 423 tctx->streamcipher = streamcipher; 424 tctx->blockcipher = blockcipher; 425 tctx->hash = hash; 426 427 BUILD_BUG_ON(offsetofend(struct adiantum_request_ctx, u) != 428 sizeof(struct adiantum_request_ctx)); 429 subreq_size = max(sizeof_field(struct adiantum_request_ctx, 430 u.hash_desc) + 431 crypto_shash_descsize(hash), 432 sizeof_field(struct adiantum_request_ctx, 433 u.streamcipher_req) + 434 crypto_skcipher_reqsize(streamcipher)); 435 436 crypto_skcipher_set_reqsize(tfm, 437 offsetof(struct adiantum_request_ctx, u) + 438 subreq_size); 439 return 0; 440 441 err_free_blockcipher: 442 crypto_free_cipher(blockcipher); 443 err_free_streamcipher: 444 crypto_free_skcipher(streamcipher); 445 return err; 446 } 447 448 static void adiantum_exit_tfm(struct crypto_skcipher *tfm) 449 { 450 struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); 451 452 crypto_free_skcipher(tctx->streamcipher); 453 crypto_free_cipher(tctx->blockcipher); 454 crypto_free_shash(tctx->hash); 455 } 456 457 static void adiantum_free_instance(struct skcipher_instance *inst) 458 { 459 struct adiantum_instance_ctx *ictx = skcipher_instance_ctx(inst); 460 461 crypto_drop_skcipher(&ictx->streamcipher_spawn); 462 crypto_drop_cipher(&ictx->blockcipher_spawn); 463 crypto_drop_shash(&ictx->hash_spawn); 464 kfree(inst); 465 } 466 467 /* 468 * Check for a supported set of inner algorithms. 469 * See the comment at the beginning of this file. 470 */ 471 static bool adiantum_supported_algorithms(struct skcipher_alg *streamcipher_alg, 472 struct crypto_alg *blockcipher_alg, 473 struct shash_alg *hash_alg) 474 { 475 if (strcmp(streamcipher_alg->base.cra_name, "xchacha12") != 0 && 476 strcmp(streamcipher_alg->base.cra_name, "xchacha20") != 0) 477 return false; 478 479 if (blockcipher_alg->cra_cipher.cia_min_keysize > BLOCKCIPHER_KEY_SIZE || 480 blockcipher_alg->cra_cipher.cia_max_keysize < BLOCKCIPHER_KEY_SIZE) 481 return false; 482 if (blockcipher_alg->cra_blocksize != BLOCKCIPHER_BLOCK_SIZE) 483 return false; 484 485 if (strcmp(hash_alg->base.cra_name, "nhpoly1305") != 0) 486 return false; 487 488 return true; 489 } 490 491 static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) 492 { 493 u32 mask; 494 const char *nhpoly1305_name; 495 struct skcipher_instance *inst; 496 struct adiantum_instance_ctx *ictx; 497 struct skcipher_alg *streamcipher_alg; 498 struct crypto_alg *blockcipher_alg; 499 struct shash_alg *hash_alg; 500 int err; 501 502 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); 503 if (err) 504 return err; 505 506 inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL); 507 if (!inst) 508 return -ENOMEM; 509 ictx = skcipher_instance_ctx(inst); 510 511 /* Stream cipher, e.g. "xchacha12" */ 512 err = crypto_grab_skcipher(&ictx->streamcipher_spawn, 513 skcipher_crypto_instance(inst), 514 crypto_attr_alg_name(tb[1]), 0, mask); 515 if (err) 516 goto err_free_inst; 517 streamcipher_alg = crypto_spawn_skcipher_alg(&ictx->streamcipher_spawn); 518 519 /* Block cipher, e.g. "aes" */ 520 err = crypto_grab_cipher(&ictx->blockcipher_spawn, 521 skcipher_crypto_instance(inst), 522 crypto_attr_alg_name(tb[2]), 0, mask); 523 if (err) 524 goto err_free_inst; 525 blockcipher_alg = crypto_spawn_cipher_alg(&ictx->blockcipher_spawn); 526 527 /* NHPoly1305 ε-∆U hash function */ 528 nhpoly1305_name = crypto_attr_alg_name(tb[3]); 529 if (nhpoly1305_name == ERR_PTR(-ENOENT)) 530 nhpoly1305_name = "nhpoly1305"; 531 err = crypto_grab_shash(&ictx->hash_spawn, 532 skcipher_crypto_instance(inst), 533 nhpoly1305_name, 0, mask); 534 if (err) 535 goto err_free_inst; 536 hash_alg = crypto_spawn_shash_alg(&ictx->hash_spawn); 537 538 /* Check the set of algorithms */ 539 if (!adiantum_supported_algorithms(streamcipher_alg, blockcipher_alg, 540 hash_alg)) { 541 pr_warn("Unsupported Adiantum instantiation: (%s,%s,%s)\n", 542 streamcipher_alg->base.cra_name, 543 blockcipher_alg->cra_name, hash_alg->base.cra_name); 544 err = -EINVAL; 545 goto err_free_inst; 546 } 547 548 /* Instance fields */ 549 550 err = -ENAMETOOLONG; 551 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, 552 "adiantum(%s,%s)", streamcipher_alg->base.cra_name, 553 blockcipher_alg->cra_name) >= CRYPTO_MAX_ALG_NAME) 554 goto err_free_inst; 555 if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, 556 "adiantum(%s,%s,%s)", 557 streamcipher_alg->base.cra_driver_name, 558 blockcipher_alg->cra_driver_name, 559 hash_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) 560 goto err_free_inst; 561 562 inst->alg.base.cra_blocksize = BLOCKCIPHER_BLOCK_SIZE; 563 inst->alg.base.cra_ctxsize = sizeof(struct adiantum_tfm_ctx); 564 inst->alg.base.cra_alignmask = streamcipher_alg->base.cra_alignmask | 565 hash_alg->base.cra_alignmask; 566 /* 567 * The block cipher is only invoked once per message, so for long 568 * messages (e.g. sectors for disk encryption) its performance doesn't 569 * matter as much as that of the stream cipher and hash function. Thus, 570 * weigh the block cipher's ->cra_priority less. 571 */ 572 inst->alg.base.cra_priority = (4 * streamcipher_alg->base.cra_priority + 573 2 * hash_alg->base.cra_priority + 574 blockcipher_alg->cra_priority) / 7; 575 576 inst->alg.setkey = adiantum_setkey; 577 inst->alg.encrypt = adiantum_encrypt; 578 inst->alg.decrypt = adiantum_decrypt; 579 inst->alg.init = adiantum_init_tfm; 580 inst->alg.exit = adiantum_exit_tfm; 581 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(streamcipher_alg); 582 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(streamcipher_alg); 583 inst->alg.ivsize = TWEAK_SIZE; 584 585 inst->free = adiantum_free_instance; 586 587 err = skcipher_register_instance(tmpl, inst); 588 if (err) { 589 err_free_inst: 590 adiantum_free_instance(inst); 591 } 592 return err; 593 } 594 595 /* adiantum(streamcipher_name, blockcipher_name [, nhpoly1305_name]) */ 596 static struct crypto_template adiantum_tmpl = { 597 .name = "adiantum", 598 .create = adiantum_create, 599 .module = THIS_MODULE, 600 }; 601 602 static int __init adiantum_module_init(void) 603 { 604 return crypto_register_template(&adiantum_tmpl); 605 } 606 607 static void __exit adiantum_module_exit(void) 608 { 609 crypto_unregister_template(&adiantum_tmpl); 610 } 611 612 subsys_initcall(adiantum_module_init); 613 module_exit(adiantum_module_exit); 614 615 MODULE_DESCRIPTION("Adiantum length-preserving encryption mode"); 616 MODULE_LICENSE("GPL v2"); 617 MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>"); 618 MODULE_ALIAS_CRYPTO("adiantum"); 619