1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Software async crypto daemon. 4 * 5 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> 6 * 7 * Added AEAD support to cryptd. 8 * Authors: Tadeusz Struk (tadeusz.struk@intel.com) 9 * Adrian Hoban <adrian.hoban@intel.com> 10 * Gabriele Paoloni <gabriele.paoloni@intel.com> 11 * Aidan O'Mahony (aidan.o.mahony@intel.com) 12 * Copyright (c) 2010, Intel Corporation. 13 */ 14 15 #include <crypto/internal/hash.h> 16 #include <crypto/internal/aead.h> 17 #include <crypto/internal/skcipher.h> 18 #include <crypto/cryptd.h> 19 #include <linux/refcount.h> 20 #include <linux/err.h> 21 #include <linux/init.h> 22 #include <linux/kernel.h> 23 #include <linux/list.h> 24 #include <linux/module.h> 25 #include <linux/scatterlist.h> 26 #include <linux/sched.h> 27 #include <linux/slab.h> 28 #include <linux/workqueue.h> 29 30 static unsigned int cryptd_max_cpu_qlen = 1000; 31 module_param(cryptd_max_cpu_qlen, uint, 0); 32 MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth"); 33 34 static struct workqueue_struct *cryptd_wq; 35 36 struct cryptd_cpu_queue { 37 struct crypto_queue queue; 38 struct work_struct work; 39 }; 40 41 struct cryptd_queue { 42 struct cryptd_cpu_queue __percpu *cpu_queue; 43 }; 44 45 struct cryptd_instance_ctx { 46 struct crypto_spawn spawn; 47 struct cryptd_queue *queue; 48 }; 49 50 struct skcipherd_instance_ctx { 51 struct crypto_skcipher_spawn spawn; 52 struct cryptd_queue *queue; 53 }; 54 55 struct hashd_instance_ctx { 56 struct crypto_shash_spawn spawn; 57 struct cryptd_queue *queue; 58 }; 59 60 struct aead_instance_ctx { 61 struct crypto_aead_spawn aead_spawn; 62 struct cryptd_queue *queue; 63 }; 64 65 struct cryptd_skcipher_ctx { 66 refcount_t refcnt; 67 struct crypto_sync_skcipher *child; 68 }; 69 70 struct cryptd_skcipher_request_ctx { 71 crypto_completion_t complete; 72 }; 73 74 struct cryptd_hash_ctx { 75 refcount_t refcnt; 76 struct crypto_shash *child; 77 }; 78 79 struct cryptd_hash_request_ctx { 80 crypto_completion_t complete; 81 struct shash_desc desc; 82 }; 83 84 struct cryptd_aead_ctx { 85 refcount_t refcnt; 86 struct crypto_aead *child; 87 }; 88 89 struct cryptd_aead_request_ctx { 90 crypto_completion_t complete; 91 }; 92 93 static void cryptd_queue_worker(struct work_struct *work); 94 95 static int cryptd_init_queue(struct cryptd_queue *queue, 96 unsigned int max_cpu_qlen) 97 { 98 int cpu; 99 struct cryptd_cpu_queue *cpu_queue; 100 101 queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue); 102 if (!queue->cpu_queue) 103 return -ENOMEM; 104 for_each_possible_cpu(cpu) { 105 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); 106 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); 107 INIT_WORK(&cpu_queue->work, cryptd_queue_worker); 108 } 109 pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen); 110 return 0; 111 } 112 113 static void cryptd_fini_queue(struct cryptd_queue *queue) 114 { 115 int cpu; 116 struct cryptd_cpu_queue *cpu_queue; 117 118 for_each_possible_cpu(cpu) { 119 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); 120 BUG_ON(cpu_queue->queue.qlen); 121 } 122 free_percpu(queue->cpu_queue); 123 } 124 125 static int cryptd_enqueue_request(struct cryptd_queue *queue, 126 struct crypto_async_request *request) 127 { 128 int cpu, err; 129 struct cryptd_cpu_queue *cpu_queue; 130 refcount_t *refcnt; 131 132 cpu = get_cpu(); 133 cpu_queue = this_cpu_ptr(queue->cpu_queue); 134 err = crypto_enqueue_request(&cpu_queue->queue, request); 135 136 refcnt = crypto_tfm_ctx(request->tfm); 137 138 if (err == -ENOSPC) 139 goto out_put_cpu; 140 141 queue_work_on(cpu, cryptd_wq, &cpu_queue->work); 142 143 if (!refcount_read(refcnt)) 144 goto out_put_cpu; 145 146 refcount_inc(refcnt); 147 148 out_put_cpu: 149 put_cpu(); 150 151 return err; 152 } 153 154 /* Called in workqueue context, do one real cryption work (via 155 * req->complete) and reschedule itself if there are more work to 156 * do. */ 157 static void cryptd_queue_worker(struct work_struct *work) 158 { 159 struct cryptd_cpu_queue *cpu_queue; 160 struct crypto_async_request *req, *backlog; 161 162 cpu_queue = container_of(work, struct cryptd_cpu_queue, work); 163 /* 164 * Only handle one request at a time to avoid hogging crypto workqueue. 165 * preempt_disable/enable is used to prevent being preempted by 166 * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent 167 * cryptd_enqueue_request() being accessed from software interrupts. 168 */ 169 local_bh_disable(); 170 preempt_disable(); 171 backlog = crypto_get_backlog(&cpu_queue->queue); 172 req = crypto_dequeue_request(&cpu_queue->queue); 173 preempt_enable(); 174 local_bh_enable(); 175 176 if (!req) 177 return; 178 179 if (backlog) 180 backlog->complete(backlog, -EINPROGRESS); 181 req->complete(req, 0); 182 183 if (cpu_queue->queue.qlen) 184 queue_work(cryptd_wq, &cpu_queue->work); 185 } 186 187 static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm) 188 { 189 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); 190 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); 191 return ictx->queue; 192 } 193 194 static inline void cryptd_check_internal(struct rtattr **tb, u32 *type, 195 u32 *mask) 196 { 197 struct crypto_attr_type *algt; 198 199 algt = crypto_get_attr_type(tb); 200 if (IS_ERR(algt)) 201 return; 202 203 *type |= algt->type & CRYPTO_ALG_INTERNAL; 204 *mask |= algt->mask & CRYPTO_ALG_INTERNAL; 205 } 206 207 static int cryptd_init_instance(struct crypto_instance *inst, 208 struct crypto_alg *alg) 209 { 210 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, 211 "cryptd(%s)", 212 alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) 213 return -ENAMETOOLONG; 214 215 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); 216 217 inst->alg.cra_priority = alg->cra_priority + 50; 218 inst->alg.cra_blocksize = alg->cra_blocksize; 219 inst->alg.cra_alignmask = alg->cra_alignmask; 220 221 return 0; 222 } 223 224 static int cryptd_skcipher_setkey(struct crypto_skcipher *parent, 225 const u8 *key, unsigned int keylen) 226 { 227 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent); 228 struct crypto_sync_skcipher *child = ctx->child; 229 230 crypto_sync_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); 231 crypto_sync_skcipher_set_flags(child, 232 crypto_skcipher_get_flags(parent) & 233 CRYPTO_TFM_REQ_MASK); 234 return crypto_sync_skcipher_setkey(child, key, keylen); 235 } 236 237 static void cryptd_skcipher_complete(struct skcipher_request *req, int err) 238 { 239 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 240 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 241 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); 242 int refcnt = refcount_read(&ctx->refcnt); 243 244 local_bh_disable(); 245 rctx->complete(&req->base, err); 246 local_bh_enable(); 247 248 if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt)) 249 crypto_free_skcipher(tfm); 250 } 251 252 static void cryptd_skcipher_encrypt(struct crypto_async_request *base, 253 int err) 254 { 255 struct skcipher_request *req = skcipher_request_cast(base); 256 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); 257 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 258 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 259 struct crypto_sync_skcipher *child = ctx->child; 260 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child); 261 262 if (unlikely(err == -EINPROGRESS)) 263 goto out; 264 265 skcipher_request_set_sync_tfm(subreq, child); 266 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, 267 NULL, NULL); 268 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, 269 req->iv); 270 271 err = crypto_skcipher_encrypt(subreq); 272 skcipher_request_zero(subreq); 273 274 req->base.complete = rctx->complete; 275 276 out: 277 cryptd_skcipher_complete(req, err); 278 } 279 280 static void cryptd_skcipher_decrypt(struct crypto_async_request *base, 281 int err) 282 { 283 struct skcipher_request *req = skcipher_request_cast(base); 284 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); 285 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 286 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 287 struct crypto_sync_skcipher *child = ctx->child; 288 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child); 289 290 if (unlikely(err == -EINPROGRESS)) 291 goto out; 292 293 skcipher_request_set_sync_tfm(subreq, child); 294 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, 295 NULL, NULL); 296 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, 297 req->iv); 298 299 err = crypto_skcipher_decrypt(subreq); 300 skcipher_request_zero(subreq); 301 302 req->base.complete = rctx->complete; 303 304 out: 305 cryptd_skcipher_complete(req, err); 306 } 307 308 static int cryptd_skcipher_enqueue(struct skcipher_request *req, 309 crypto_completion_t compl) 310 { 311 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); 312 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 313 struct cryptd_queue *queue; 314 315 queue = cryptd_get_queue(crypto_skcipher_tfm(tfm)); 316 rctx->complete = req->base.complete; 317 req->base.complete = compl; 318 319 return cryptd_enqueue_request(queue, &req->base); 320 } 321 322 static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req) 323 { 324 return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt); 325 } 326 327 static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req) 328 { 329 return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt); 330 } 331 332 static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm) 333 { 334 struct skcipher_instance *inst = skcipher_alg_instance(tfm); 335 struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst); 336 struct crypto_skcipher_spawn *spawn = &ictx->spawn; 337 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 338 struct crypto_skcipher *cipher; 339 340 cipher = crypto_spawn_skcipher(spawn); 341 if (IS_ERR(cipher)) 342 return PTR_ERR(cipher); 343 344 ctx->child = (struct crypto_sync_skcipher *)cipher; 345 crypto_skcipher_set_reqsize( 346 tfm, sizeof(struct cryptd_skcipher_request_ctx)); 347 return 0; 348 } 349 350 static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm) 351 { 352 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 353 354 crypto_free_sync_skcipher(ctx->child); 355 } 356 357 static void cryptd_skcipher_free(struct skcipher_instance *inst) 358 { 359 struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst); 360 361 crypto_drop_skcipher(&ctx->spawn); 362 kfree(inst); 363 } 364 365 static int cryptd_create_skcipher(struct crypto_template *tmpl, 366 struct rtattr **tb, 367 struct cryptd_queue *queue) 368 { 369 struct skcipherd_instance_ctx *ctx; 370 struct skcipher_instance *inst; 371 struct skcipher_alg *alg; 372 u32 type; 373 u32 mask; 374 int err; 375 376 type = 0; 377 mask = CRYPTO_ALG_ASYNC; 378 379 cryptd_check_internal(tb, &type, &mask); 380 381 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); 382 if (!inst) 383 return -ENOMEM; 384 385 ctx = skcipher_instance_ctx(inst); 386 ctx->queue = queue; 387 388 err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst), 389 crypto_attr_alg_name(tb[1]), type, mask); 390 if (err) 391 goto err_free_inst; 392 393 alg = crypto_spawn_skcipher_alg(&ctx->spawn); 394 err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base); 395 if (err) 396 goto err_free_inst; 397 398 inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC | 399 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); 400 401 inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg); 402 inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); 403 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg); 404 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg); 405 406 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx); 407 408 inst->alg.init = cryptd_skcipher_init_tfm; 409 inst->alg.exit = cryptd_skcipher_exit_tfm; 410 411 inst->alg.setkey = cryptd_skcipher_setkey; 412 inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue; 413 inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue; 414 415 inst->free = cryptd_skcipher_free; 416 417 err = skcipher_register_instance(tmpl, inst); 418 if (err) { 419 err_free_inst: 420 cryptd_skcipher_free(inst); 421 } 422 return err; 423 } 424 425 static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) 426 { 427 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); 428 struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst); 429 struct crypto_shash_spawn *spawn = &ictx->spawn; 430 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); 431 struct crypto_shash *hash; 432 433 hash = crypto_spawn_shash(spawn); 434 if (IS_ERR(hash)) 435 return PTR_ERR(hash); 436 437 ctx->child = hash; 438 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 439 sizeof(struct cryptd_hash_request_ctx) + 440 crypto_shash_descsize(hash)); 441 return 0; 442 } 443 444 static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm) 445 { 446 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); 447 448 crypto_free_shash(ctx->child); 449 } 450 451 static int cryptd_hash_setkey(struct crypto_ahash *parent, 452 const u8 *key, unsigned int keylen) 453 { 454 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); 455 struct crypto_shash *child = ctx->child; 456 457 crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK); 458 crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) & 459 CRYPTO_TFM_REQ_MASK); 460 return crypto_shash_setkey(child, key, keylen); 461 } 462 463 static int cryptd_hash_enqueue(struct ahash_request *req, 464 crypto_completion_t compl) 465 { 466 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 467 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 468 struct cryptd_queue *queue = 469 cryptd_get_queue(crypto_ahash_tfm(tfm)); 470 471 rctx->complete = req->base.complete; 472 req->base.complete = compl; 473 474 return cryptd_enqueue_request(queue, &req->base); 475 } 476 477 static void cryptd_hash_complete(struct ahash_request *req, int err) 478 { 479 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 480 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); 481 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 482 int refcnt = refcount_read(&ctx->refcnt); 483 484 local_bh_disable(); 485 rctx->complete(&req->base, err); 486 local_bh_enable(); 487 488 if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt)) 489 crypto_free_ahash(tfm); 490 } 491 492 static void cryptd_hash_init(struct crypto_async_request *req_async, int err) 493 { 494 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); 495 struct crypto_shash *child = ctx->child; 496 struct ahash_request *req = ahash_request_cast(req_async); 497 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 498 struct shash_desc *desc = &rctx->desc; 499 500 if (unlikely(err == -EINPROGRESS)) 501 goto out; 502 503 desc->tfm = child; 504 505 err = crypto_shash_init(desc); 506 507 req->base.complete = rctx->complete; 508 509 out: 510 cryptd_hash_complete(req, err); 511 } 512 513 static int cryptd_hash_init_enqueue(struct ahash_request *req) 514 { 515 return cryptd_hash_enqueue(req, cryptd_hash_init); 516 } 517 518 static void cryptd_hash_update(struct crypto_async_request *req_async, int err) 519 { 520 struct ahash_request *req = ahash_request_cast(req_async); 521 struct cryptd_hash_request_ctx *rctx; 522 523 rctx = ahash_request_ctx(req); 524 525 if (unlikely(err == -EINPROGRESS)) 526 goto out; 527 528 err = shash_ahash_update(req, &rctx->desc); 529 530 req->base.complete = rctx->complete; 531 532 out: 533 cryptd_hash_complete(req, err); 534 } 535 536 static int cryptd_hash_update_enqueue(struct ahash_request *req) 537 { 538 return cryptd_hash_enqueue(req, cryptd_hash_update); 539 } 540 541 static void cryptd_hash_final(struct crypto_async_request *req_async, int err) 542 { 543 struct ahash_request *req = ahash_request_cast(req_async); 544 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 545 546 if (unlikely(err == -EINPROGRESS)) 547 goto out; 548 549 err = crypto_shash_final(&rctx->desc, req->result); 550 551 req->base.complete = rctx->complete; 552 553 out: 554 cryptd_hash_complete(req, err); 555 } 556 557 static int cryptd_hash_final_enqueue(struct ahash_request *req) 558 { 559 return cryptd_hash_enqueue(req, cryptd_hash_final); 560 } 561 562 static void cryptd_hash_finup(struct crypto_async_request *req_async, int err) 563 { 564 struct ahash_request *req = ahash_request_cast(req_async); 565 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 566 567 if (unlikely(err == -EINPROGRESS)) 568 goto out; 569 570 err = shash_ahash_finup(req, &rctx->desc); 571 572 req->base.complete = rctx->complete; 573 574 out: 575 cryptd_hash_complete(req, err); 576 } 577 578 static int cryptd_hash_finup_enqueue(struct ahash_request *req) 579 { 580 return cryptd_hash_enqueue(req, cryptd_hash_finup); 581 } 582 583 static void cryptd_hash_digest(struct crypto_async_request *req_async, int err) 584 { 585 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); 586 struct crypto_shash *child = ctx->child; 587 struct ahash_request *req = ahash_request_cast(req_async); 588 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 589 struct shash_desc *desc = &rctx->desc; 590 591 if (unlikely(err == -EINPROGRESS)) 592 goto out; 593 594 desc->tfm = child; 595 596 err = shash_ahash_digest(req, desc); 597 598 req->base.complete = rctx->complete; 599 600 out: 601 cryptd_hash_complete(req, err); 602 } 603 604 static int cryptd_hash_digest_enqueue(struct ahash_request *req) 605 { 606 return cryptd_hash_enqueue(req, cryptd_hash_digest); 607 } 608 609 static int cryptd_hash_export(struct ahash_request *req, void *out) 610 { 611 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 612 613 return crypto_shash_export(&rctx->desc, out); 614 } 615 616 static int cryptd_hash_import(struct ahash_request *req, const void *in) 617 { 618 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 619 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); 620 struct shash_desc *desc = cryptd_shash_desc(req); 621 622 desc->tfm = ctx->child; 623 624 return crypto_shash_import(desc, in); 625 } 626 627 static void cryptd_hash_free(struct ahash_instance *inst) 628 { 629 struct hashd_instance_ctx *ctx = ahash_instance_ctx(inst); 630 631 crypto_drop_shash(&ctx->spawn); 632 kfree(inst); 633 } 634 635 static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, 636 struct cryptd_queue *queue) 637 { 638 struct hashd_instance_ctx *ctx; 639 struct ahash_instance *inst; 640 struct shash_alg *alg; 641 u32 type = 0; 642 u32 mask = 0; 643 int err; 644 645 cryptd_check_internal(tb, &type, &mask); 646 647 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); 648 if (!inst) 649 return -ENOMEM; 650 651 ctx = ahash_instance_ctx(inst); 652 ctx->queue = queue; 653 654 err = crypto_grab_shash(&ctx->spawn, ahash_crypto_instance(inst), 655 crypto_attr_alg_name(tb[1]), type, mask); 656 if (err) 657 goto err_free_inst; 658 alg = crypto_spawn_shash_alg(&ctx->spawn); 659 660 err = cryptd_init_instance(ahash_crypto_instance(inst), &alg->base); 661 if (err) 662 goto err_free_inst; 663 664 inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC | 665 (alg->base.cra_flags & (CRYPTO_ALG_INTERNAL | 666 CRYPTO_ALG_OPTIONAL_KEY)); 667 668 inst->alg.halg.digestsize = alg->digestsize; 669 inst->alg.halg.statesize = alg->statesize; 670 inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx); 671 672 inst->alg.halg.base.cra_init = cryptd_hash_init_tfm; 673 inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm; 674 675 inst->alg.init = cryptd_hash_init_enqueue; 676 inst->alg.update = cryptd_hash_update_enqueue; 677 inst->alg.final = cryptd_hash_final_enqueue; 678 inst->alg.finup = cryptd_hash_finup_enqueue; 679 inst->alg.export = cryptd_hash_export; 680 inst->alg.import = cryptd_hash_import; 681 if (crypto_shash_alg_has_setkey(alg)) 682 inst->alg.setkey = cryptd_hash_setkey; 683 inst->alg.digest = cryptd_hash_digest_enqueue; 684 685 inst->free = cryptd_hash_free; 686 687 err = ahash_register_instance(tmpl, inst); 688 if (err) { 689 err_free_inst: 690 cryptd_hash_free(inst); 691 } 692 return err; 693 } 694 695 static int cryptd_aead_setkey(struct crypto_aead *parent, 696 const u8 *key, unsigned int keylen) 697 { 698 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent); 699 struct crypto_aead *child = ctx->child; 700 701 return crypto_aead_setkey(child, key, keylen); 702 } 703 704 static int cryptd_aead_setauthsize(struct crypto_aead *parent, 705 unsigned int authsize) 706 { 707 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent); 708 struct crypto_aead *child = ctx->child; 709 710 return crypto_aead_setauthsize(child, authsize); 711 } 712 713 static void cryptd_aead_crypt(struct aead_request *req, 714 struct crypto_aead *child, 715 int err, 716 int (*crypt)(struct aead_request *req)) 717 { 718 struct cryptd_aead_request_ctx *rctx; 719 struct cryptd_aead_ctx *ctx; 720 crypto_completion_t compl; 721 struct crypto_aead *tfm; 722 int refcnt; 723 724 rctx = aead_request_ctx(req); 725 compl = rctx->complete; 726 727 tfm = crypto_aead_reqtfm(req); 728 729 if (unlikely(err == -EINPROGRESS)) 730 goto out; 731 aead_request_set_tfm(req, child); 732 err = crypt( req ); 733 734 out: 735 ctx = crypto_aead_ctx(tfm); 736 refcnt = refcount_read(&ctx->refcnt); 737 738 local_bh_disable(); 739 compl(&req->base, err); 740 local_bh_enable(); 741 742 if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt)) 743 crypto_free_aead(tfm); 744 } 745 746 static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err) 747 { 748 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm); 749 struct crypto_aead *child = ctx->child; 750 struct aead_request *req; 751 752 req = container_of(areq, struct aead_request, base); 753 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt); 754 } 755 756 static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err) 757 { 758 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm); 759 struct crypto_aead *child = ctx->child; 760 struct aead_request *req; 761 762 req = container_of(areq, struct aead_request, base); 763 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt); 764 } 765 766 static int cryptd_aead_enqueue(struct aead_request *req, 767 crypto_completion_t compl) 768 { 769 struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req); 770 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 771 struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm)); 772 773 rctx->complete = req->base.complete; 774 req->base.complete = compl; 775 return cryptd_enqueue_request(queue, &req->base); 776 } 777 778 static int cryptd_aead_encrypt_enqueue(struct aead_request *req) 779 { 780 return cryptd_aead_enqueue(req, cryptd_aead_encrypt ); 781 } 782 783 static int cryptd_aead_decrypt_enqueue(struct aead_request *req) 784 { 785 return cryptd_aead_enqueue(req, cryptd_aead_decrypt ); 786 } 787 788 static int cryptd_aead_init_tfm(struct crypto_aead *tfm) 789 { 790 struct aead_instance *inst = aead_alg_instance(tfm); 791 struct aead_instance_ctx *ictx = aead_instance_ctx(inst); 792 struct crypto_aead_spawn *spawn = &ictx->aead_spawn; 793 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm); 794 struct crypto_aead *cipher; 795 796 cipher = crypto_spawn_aead(spawn); 797 if (IS_ERR(cipher)) 798 return PTR_ERR(cipher); 799 800 ctx->child = cipher; 801 crypto_aead_set_reqsize( 802 tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx), 803 crypto_aead_reqsize(cipher))); 804 return 0; 805 } 806 807 static void cryptd_aead_exit_tfm(struct crypto_aead *tfm) 808 { 809 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm); 810 crypto_free_aead(ctx->child); 811 } 812 813 static void cryptd_aead_free(struct aead_instance *inst) 814 { 815 struct aead_instance_ctx *ctx = aead_instance_ctx(inst); 816 817 crypto_drop_aead(&ctx->aead_spawn); 818 kfree(inst); 819 } 820 821 static int cryptd_create_aead(struct crypto_template *tmpl, 822 struct rtattr **tb, 823 struct cryptd_queue *queue) 824 { 825 struct aead_instance_ctx *ctx; 826 struct aead_instance *inst; 827 struct aead_alg *alg; 828 u32 type = 0; 829 u32 mask = CRYPTO_ALG_ASYNC; 830 int err; 831 832 cryptd_check_internal(tb, &type, &mask); 833 834 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); 835 if (!inst) 836 return -ENOMEM; 837 838 ctx = aead_instance_ctx(inst); 839 ctx->queue = queue; 840 841 err = crypto_grab_aead(&ctx->aead_spawn, aead_crypto_instance(inst), 842 crypto_attr_alg_name(tb[1]), type, mask); 843 if (err) 844 goto err_free_inst; 845 846 alg = crypto_spawn_aead_alg(&ctx->aead_spawn); 847 err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base); 848 if (err) 849 goto err_free_inst; 850 851 inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC | 852 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); 853 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx); 854 855 inst->alg.ivsize = crypto_aead_alg_ivsize(alg); 856 inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); 857 858 inst->alg.init = cryptd_aead_init_tfm; 859 inst->alg.exit = cryptd_aead_exit_tfm; 860 inst->alg.setkey = cryptd_aead_setkey; 861 inst->alg.setauthsize = cryptd_aead_setauthsize; 862 inst->alg.encrypt = cryptd_aead_encrypt_enqueue; 863 inst->alg.decrypt = cryptd_aead_decrypt_enqueue; 864 865 inst->free = cryptd_aead_free; 866 867 err = aead_register_instance(tmpl, inst); 868 if (err) { 869 err_free_inst: 870 cryptd_aead_free(inst); 871 } 872 return err; 873 } 874 875 static struct cryptd_queue queue; 876 877 static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) 878 { 879 struct crypto_attr_type *algt; 880 881 algt = crypto_get_attr_type(tb); 882 if (IS_ERR(algt)) 883 return PTR_ERR(algt); 884 885 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { 886 case CRYPTO_ALG_TYPE_SKCIPHER: 887 return cryptd_create_skcipher(tmpl, tb, &queue); 888 case CRYPTO_ALG_TYPE_HASH: 889 return cryptd_create_hash(tmpl, tb, &queue); 890 case CRYPTO_ALG_TYPE_AEAD: 891 return cryptd_create_aead(tmpl, tb, &queue); 892 } 893 894 return -EINVAL; 895 } 896 897 static struct crypto_template cryptd_tmpl = { 898 .name = "cryptd", 899 .create = cryptd_create, 900 .module = THIS_MODULE, 901 }; 902 903 struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name, 904 u32 type, u32 mask) 905 { 906 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; 907 struct cryptd_skcipher_ctx *ctx; 908 struct crypto_skcipher *tfm; 909 910 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, 911 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) 912 return ERR_PTR(-EINVAL); 913 914 tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask); 915 if (IS_ERR(tfm)) 916 return ERR_CAST(tfm); 917 918 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { 919 crypto_free_skcipher(tfm); 920 return ERR_PTR(-EINVAL); 921 } 922 923 ctx = crypto_skcipher_ctx(tfm); 924 refcount_set(&ctx->refcnt, 1); 925 926 return container_of(tfm, struct cryptd_skcipher, base); 927 } 928 EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher); 929 930 struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm) 931 { 932 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); 933 934 return &ctx->child->base; 935 } 936 EXPORT_SYMBOL_GPL(cryptd_skcipher_child); 937 938 bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm) 939 { 940 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); 941 942 return refcount_read(&ctx->refcnt) - 1; 943 } 944 EXPORT_SYMBOL_GPL(cryptd_skcipher_queued); 945 946 void cryptd_free_skcipher(struct cryptd_skcipher *tfm) 947 { 948 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); 949 950 if (refcount_dec_and_test(&ctx->refcnt)) 951 crypto_free_skcipher(&tfm->base); 952 } 953 EXPORT_SYMBOL_GPL(cryptd_free_skcipher); 954 955 struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, 956 u32 type, u32 mask) 957 { 958 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; 959 struct cryptd_hash_ctx *ctx; 960 struct crypto_ahash *tfm; 961 962 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, 963 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) 964 return ERR_PTR(-EINVAL); 965 tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask); 966 if (IS_ERR(tfm)) 967 return ERR_CAST(tfm); 968 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { 969 crypto_free_ahash(tfm); 970 return ERR_PTR(-EINVAL); 971 } 972 973 ctx = crypto_ahash_ctx(tfm); 974 refcount_set(&ctx->refcnt, 1); 975 976 return __cryptd_ahash_cast(tfm); 977 } 978 EXPORT_SYMBOL_GPL(cryptd_alloc_ahash); 979 980 struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm) 981 { 982 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); 983 984 return ctx->child; 985 } 986 EXPORT_SYMBOL_GPL(cryptd_ahash_child); 987 988 struct shash_desc *cryptd_shash_desc(struct ahash_request *req) 989 { 990 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 991 return &rctx->desc; 992 } 993 EXPORT_SYMBOL_GPL(cryptd_shash_desc); 994 995 bool cryptd_ahash_queued(struct cryptd_ahash *tfm) 996 { 997 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); 998 999 return refcount_read(&ctx->refcnt) - 1; 1000 } 1001 EXPORT_SYMBOL_GPL(cryptd_ahash_queued); 1002 1003 void cryptd_free_ahash(struct cryptd_ahash *tfm) 1004 { 1005 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); 1006 1007 if (refcount_dec_and_test(&ctx->refcnt)) 1008 crypto_free_ahash(&tfm->base); 1009 } 1010 EXPORT_SYMBOL_GPL(cryptd_free_ahash); 1011 1012 struct cryptd_aead *cryptd_alloc_aead(const char *alg_name, 1013 u32 type, u32 mask) 1014 { 1015 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; 1016 struct cryptd_aead_ctx *ctx; 1017 struct crypto_aead *tfm; 1018 1019 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, 1020 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) 1021 return ERR_PTR(-EINVAL); 1022 tfm = crypto_alloc_aead(cryptd_alg_name, type, mask); 1023 if (IS_ERR(tfm)) 1024 return ERR_CAST(tfm); 1025 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { 1026 crypto_free_aead(tfm); 1027 return ERR_PTR(-EINVAL); 1028 } 1029 1030 ctx = crypto_aead_ctx(tfm); 1031 refcount_set(&ctx->refcnt, 1); 1032 1033 return __cryptd_aead_cast(tfm); 1034 } 1035 EXPORT_SYMBOL_GPL(cryptd_alloc_aead); 1036 1037 struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm) 1038 { 1039 struct cryptd_aead_ctx *ctx; 1040 ctx = crypto_aead_ctx(&tfm->base); 1041 return ctx->child; 1042 } 1043 EXPORT_SYMBOL_GPL(cryptd_aead_child); 1044 1045 bool cryptd_aead_queued(struct cryptd_aead *tfm) 1046 { 1047 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base); 1048 1049 return refcount_read(&ctx->refcnt) - 1; 1050 } 1051 EXPORT_SYMBOL_GPL(cryptd_aead_queued); 1052 1053 void cryptd_free_aead(struct cryptd_aead *tfm) 1054 { 1055 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base); 1056 1057 if (refcount_dec_and_test(&ctx->refcnt)) 1058 crypto_free_aead(&tfm->base); 1059 } 1060 EXPORT_SYMBOL_GPL(cryptd_free_aead); 1061 1062 static int __init cryptd_init(void) 1063 { 1064 int err; 1065 1066 cryptd_wq = alloc_workqueue("cryptd", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1067 1); 1068 if (!cryptd_wq) 1069 return -ENOMEM; 1070 1071 err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen); 1072 if (err) 1073 goto err_destroy_wq; 1074 1075 err = crypto_register_template(&cryptd_tmpl); 1076 if (err) 1077 goto err_fini_queue; 1078 1079 return 0; 1080 1081 err_fini_queue: 1082 cryptd_fini_queue(&queue); 1083 err_destroy_wq: 1084 destroy_workqueue(cryptd_wq); 1085 return err; 1086 } 1087 1088 static void __exit cryptd_exit(void) 1089 { 1090 destroy_workqueue(cryptd_wq); 1091 cryptd_fini_queue(&queue); 1092 crypto_unregister_template(&cryptd_tmpl); 1093 } 1094 1095 subsys_initcall(cryptd_init); 1096 module_exit(cryptd_exit); 1097 1098 MODULE_LICENSE("GPL"); 1099 MODULE_DESCRIPTION("Software async crypto daemon"); 1100 MODULE_ALIAS_CRYPTO("cryptd"); 1101