1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Software async crypto daemon. 4 * 5 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> 6 * 7 * Added AEAD support to cryptd. 8 * Authors: Tadeusz Struk (tadeusz.struk@intel.com) 9 * Adrian Hoban <adrian.hoban@intel.com> 10 * Gabriele Paoloni <gabriele.paoloni@intel.com> 11 * Aidan O'Mahony (aidan.o.mahony@intel.com) 12 * Copyright (c) 2010, Intel Corporation. 13 */ 14 15 #include <crypto/internal/hash.h> 16 #include <crypto/internal/aead.h> 17 #include <crypto/internal/skcipher.h> 18 #include <crypto/cryptd.h> 19 #include <linux/refcount.h> 20 #include <linux/err.h> 21 #include <linux/init.h> 22 #include <linux/kernel.h> 23 #include <linux/list.h> 24 #include <linux/module.h> 25 #include <linux/scatterlist.h> 26 #include <linux/sched.h> 27 #include <linux/slab.h> 28 #include <linux/workqueue.h> 29 30 static unsigned int cryptd_max_cpu_qlen = 1000; 31 module_param(cryptd_max_cpu_qlen, uint, 0); 32 MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth"); 33 34 static struct workqueue_struct *cryptd_wq; 35 36 struct cryptd_cpu_queue { 37 struct crypto_queue queue; 38 struct work_struct work; 39 }; 40 41 struct cryptd_queue { 42 struct cryptd_cpu_queue __percpu *cpu_queue; 43 }; 44 45 struct cryptd_instance_ctx { 46 struct crypto_spawn spawn; 47 struct cryptd_queue *queue; 48 }; 49 50 struct skcipherd_instance_ctx { 51 struct crypto_skcipher_spawn spawn; 52 struct cryptd_queue *queue; 53 }; 54 55 struct hashd_instance_ctx { 56 struct crypto_shash_spawn spawn; 57 struct cryptd_queue *queue; 58 }; 59 60 struct aead_instance_ctx { 61 struct crypto_aead_spawn aead_spawn; 62 struct cryptd_queue *queue; 63 }; 64 65 struct cryptd_skcipher_ctx { 66 refcount_t refcnt; 67 struct crypto_sync_skcipher *child; 68 }; 69 70 struct cryptd_skcipher_request_ctx { 71 crypto_completion_t complete; 72 }; 73 74 struct cryptd_hash_ctx { 75 refcount_t refcnt; 76 struct crypto_shash *child; 77 }; 78 79 struct cryptd_hash_request_ctx { 80 crypto_completion_t complete; 81 struct shash_desc desc; 82 }; 83 84 struct cryptd_aead_ctx { 85 refcount_t refcnt; 86 struct crypto_aead *child; 87 }; 88 89 struct cryptd_aead_request_ctx { 90 crypto_completion_t complete; 91 }; 92 93 static void cryptd_queue_worker(struct work_struct *work); 94 95 static int cryptd_init_queue(struct cryptd_queue *queue, 96 unsigned int max_cpu_qlen) 97 { 98 int cpu; 99 struct cryptd_cpu_queue *cpu_queue; 100 101 queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue); 102 if (!queue->cpu_queue) 103 return -ENOMEM; 104 for_each_possible_cpu(cpu) { 105 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); 106 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); 107 INIT_WORK(&cpu_queue->work, cryptd_queue_worker); 108 } 109 pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen); 110 return 0; 111 } 112 113 static void cryptd_fini_queue(struct cryptd_queue *queue) 114 { 115 int cpu; 116 struct cryptd_cpu_queue *cpu_queue; 117 118 for_each_possible_cpu(cpu) { 119 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); 120 BUG_ON(cpu_queue->queue.qlen); 121 } 122 free_percpu(queue->cpu_queue); 123 } 124 125 static int cryptd_enqueue_request(struct cryptd_queue *queue, 126 struct crypto_async_request *request) 127 { 128 int cpu, err; 129 struct cryptd_cpu_queue *cpu_queue; 130 refcount_t *refcnt; 131 132 cpu = get_cpu(); 133 cpu_queue = this_cpu_ptr(queue->cpu_queue); 134 err = crypto_enqueue_request(&cpu_queue->queue, request); 135 136 refcnt = crypto_tfm_ctx(request->tfm); 137 138 if (err == -ENOSPC) 139 goto out_put_cpu; 140 141 queue_work_on(cpu, cryptd_wq, &cpu_queue->work); 142 143 if (!refcount_read(refcnt)) 144 goto out_put_cpu; 145 146 refcount_inc(refcnt); 147 148 out_put_cpu: 149 put_cpu(); 150 151 return err; 152 } 153 154 /* Called in workqueue context, do one real cryption work (via 155 * req->complete) and reschedule itself if there are more work to 156 * do. */ 157 static void cryptd_queue_worker(struct work_struct *work) 158 { 159 struct cryptd_cpu_queue *cpu_queue; 160 struct crypto_async_request *req, *backlog; 161 162 cpu_queue = container_of(work, struct cryptd_cpu_queue, work); 163 /* 164 * Only handle one request at a time to avoid hogging crypto workqueue. 165 * preempt_disable/enable is used to prevent being preempted by 166 * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent 167 * cryptd_enqueue_request() being accessed from software interrupts. 168 */ 169 local_bh_disable(); 170 preempt_disable(); 171 backlog = crypto_get_backlog(&cpu_queue->queue); 172 req = crypto_dequeue_request(&cpu_queue->queue); 173 preempt_enable(); 174 local_bh_enable(); 175 176 if (!req) 177 return; 178 179 if (backlog) 180 backlog->complete(backlog, -EINPROGRESS); 181 req->complete(req, 0); 182 183 if (cpu_queue->queue.qlen) 184 queue_work(cryptd_wq, &cpu_queue->work); 185 } 186 187 static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm) 188 { 189 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); 190 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); 191 return ictx->queue; 192 } 193 194 static inline void cryptd_check_internal(struct rtattr **tb, u32 *type, 195 u32 *mask) 196 { 197 struct crypto_attr_type *algt; 198 199 algt = crypto_get_attr_type(tb); 200 if (IS_ERR(algt)) 201 return; 202 203 *type |= algt->type & CRYPTO_ALG_INTERNAL; 204 *mask |= algt->mask & CRYPTO_ALG_INTERNAL; 205 } 206 207 static int cryptd_init_instance(struct crypto_instance *inst, 208 struct crypto_alg *alg) 209 { 210 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, 211 "cryptd(%s)", 212 alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) 213 return -ENAMETOOLONG; 214 215 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); 216 217 inst->alg.cra_priority = alg->cra_priority + 50; 218 inst->alg.cra_blocksize = alg->cra_blocksize; 219 inst->alg.cra_alignmask = alg->cra_alignmask; 220 221 return 0; 222 } 223 224 static int cryptd_skcipher_setkey(struct crypto_skcipher *parent, 225 const u8 *key, unsigned int keylen) 226 { 227 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent); 228 struct crypto_sync_skcipher *child = ctx->child; 229 230 crypto_sync_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); 231 crypto_sync_skcipher_set_flags(child, 232 crypto_skcipher_get_flags(parent) & 233 CRYPTO_TFM_REQ_MASK); 234 return crypto_sync_skcipher_setkey(child, key, keylen); 235 } 236 237 static void cryptd_skcipher_complete(struct skcipher_request *req, int err) 238 { 239 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 240 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 241 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); 242 int refcnt = refcount_read(&ctx->refcnt); 243 244 local_bh_disable(); 245 rctx->complete(&req->base, err); 246 local_bh_enable(); 247 248 if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt)) 249 crypto_free_skcipher(tfm); 250 } 251 252 static void cryptd_skcipher_encrypt(struct crypto_async_request *base, 253 int err) 254 { 255 struct skcipher_request *req = skcipher_request_cast(base); 256 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); 257 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 258 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 259 struct crypto_sync_skcipher *child = ctx->child; 260 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child); 261 262 if (unlikely(err == -EINPROGRESS)) 263 goto out; 264 265 skcipher_request_set_sync_tfm(subreq, child); 266 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, 267 NULL, NULL); 268 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, 269 req->iv); 270 271 err = crypto_skcipher_encrypt(subreq); 272 skcipher_request_zero(subreq); 273 274 req->base.complete = rctx->complete; 275 276 out: 277 cryptd_skcipher_complete(req, err); 278 } 279 280 static void cryptd_skcipher_decrypt(struct crypto_async_request *base, 281 int err) 282 { 283 struct skcipher_request *req = skcipher_request_cast(base); 284 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); 285 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 286 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 287 struct crypto_sync_skcipher *child = ctx->child; 288 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child); 289 290 if (unlikely(err == -EINPROGRESS)) 291 goto out; 292 293 skcipher_request_set_sync_tfm(subreq, child); 294 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, 295 NULL, NULL); 296 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, 297 req->iv); 298 299 err = crypto_skcipher_decrypt(subreq); 300 skcipher_request_zero(subreq); 301 302 req->base.complete = rctx->complete; 303 304 out: 305 cryptd_skcipher_complete(req, err); 306 } 307 308 static int cryptd_skcipher_enqueue(struct skcipher_request *req, 309 crypto_completion_t compl) 310 { 311 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); 312 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 313 struct cryptd_queue *queue; 314 315 queue = cryptd_get_queue(crypto_skcipher_tfm(tfm)); 316 rctx->complete = req->base.complete; 317 req->base.complete = compl; 318 319 return cryptd_enqueue_request(queue, &req->base); 320 } 321 322 static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req) 323 { 324 return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt); 325 } 326 327 static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req) 328 { 329 return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt); 330 } 331 332 static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm) 333 { 334 struct skcipher_instance *inst = skcipher_alg_instance(tfm); 335 struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst); 336 struct crypto_skcipher_spawn *spawn = &ictx->spawn; 337 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 338 struct crypto_skcipher *cipher; 339 340 cipher = crypto_spawn_skcipher(spawn); 341 if (IS_ERR(cipher)) 342 return PTR_ERR(cipher); 343 344 ctx->child = (struct crypto_sync_skcipher *)cipher; 345 crypto_skcipher_set_reqsize( 346 tfm, sizeof(struct cryptd_skcipher_request_ctx)); 347 return 0; 348 } 349 350 static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm) 351 { 352 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 353 354 crypto_free_sync_skcipher(ctx->child); 355 } 356 357 static void cryptd_skcipher_free(struct skcipher_instance *inst) 358 { 359 struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst); 360 361 crypto_drop_skcipher(&ctx->spawn); 362 kfree(inst); 363 } 364 365 static int cryptd_create_skcipher(struct crypto_template *tmpl, 366 struct rtattr **tb, 367 struct cryptd_queue *queue) 368 { 369 struct skcipherd_instance_ctx *ctx; 370 struct skcipher_instance *inst; 371 struct skcipher_alg *alg; 372 const char *name; 373 u32 type; 374 u32 mask; 375 int err; 376 377 type = 0; 378 mask = CRYPTO_ALG_ASYNC; 379 380 cryptd_check_internal(tb, &type, &mask); 381 382 name = crypto_attr_alg_name(tb[1]); 383 if (IS_ERR(name)) 384 return PTR_ERR(name); 385 386 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); 387 if (!inst) 388 return -ENOMEM; 389 390 ctx = skcipher_instance_ctx(inst); 391 ctx->queue = queue; 392 393 err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst), 394 name, type, mask); 395 if (err) 396 goto out_free_inst; 397 398 alg = crypto_spawn_skcipher_alg(&ctx->spawn); 399 err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base); 400 if (err) 401 goto out_drop_skcipher; 402 403 inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC | 404 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); 405 406 inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg); 407 inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); 408 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg); 409 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg); 410 411 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx); 412 413 inst->alg.init = cryptd_skcipher_init_tfm; 414 inst->alg.exit = cryptd_skcipher_exit_tfm; 415 416 inst->alg.setkey = cryptd_skcipher_setkey; 417 inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue; 418 inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue; 419 420 inst->free = cryptd_skcipher_free; 421 422 err = skcipher_register_instance(tmpl, inst); 423 if (err) { 424 out_drop_skcipher: 425 crypto_drop_skcipher(&ctx->spawn); 426 out_free_inst: 427 kfree(inst); 428 } 429 return err; 430 } 431 432 static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) 433 { 434 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); 435 struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst); 436 struct crypto_shash_spawn *spawn = &ictx->spawn; 437 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); 438 struct crypto_shash *hash; 439 440 hash = crypto_spawn_shash(spawn); 441 if (IS_ERR(hash)) 442 return PTR_ERR(hash); 443 444 ctx->child = hash; 445 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 446 sizeof(struct cryptd_hash_request_ctx) + 447 crypto_shash_descsize(hash)); 448 return 0; 449 } 450 451 static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm) 452 { 453 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); 454 455 crypto_free_shash(ctx->child); 456 } 457 458 static int cryptd_hash_setkey(struct crypto_ahash *parent, 459 const u8 *key, unsigned int keylen) 460 { 461 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); 462 struct crypto_shash *child = ctx->child; 463 464 crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK); 465 crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) & 466 CRYPTO_TFM_REQ_MASK); 467 return crypto_shash_setkey(child, key, keylen); 468 } 469 470 static int cryptd_hash_enqueue(struct ahash_request *req, 471 crypto_completion_t compl) 472 { 473 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 474 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 475 struct cryptd_queue *queue = 476 cryptd_get_queue(crypto_ahash_tfm(tfm)); 477 478 rctx->complete = req->base.complete; 479 req->base.complete = compl; 480 481 return cryptd_enqueue_request(queue, &req->base); 482 } 483 484 static void cryptd_hash_complete(struct ahash_request *req, int err) 485 { 486 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 487 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); 488 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 489 int refcnt = refcount_read(&ctx->refcnt); 490 491 local_bh_disable(); 492 rctx->complete(&req->base, err); 493 local_bh_enable(); 494 495 if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt)) 496 crypto_free_ahash(tfm); 497 } 498 499 static void cryptd_hash_init(struct crypto_async_request *req_async, int err) 500 { 501 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); 502 struct crypto_shash *child = ctx->child; 503 struct ahash_request *req = ahash_request_cast(req_async); 504 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 505 struct shash_desc *desc = &rctx->desc; 506 507 if (unlikely(err == -EINPROGRESS)) 508 goto out; 509 510 desc->tfm = child; 511 512 err = crypto_shash_init(desc); 513 514 req->base.complete = rctx->complete; 515 516 out: 517 cryptd_hash_complete(req, err); 518 } 519 520 static int cryptd_hash_init_enqueue(struct ahash_request *req) 521 { 522 return cryptd_hash_enqueue(req, cryptd_hash_init); 523 } 524 525 static void cryptd_hash_update(struct crypto_async_request *req_async, int err) 526 { 527 struct ahash_request *req = ahash_request_cast(req_async); 528 struct cryptd_hash_request_ctx *rctx; 529 530 rctx = ahash_request_ctx(req); 531 532 if (unlikely(err == -EINPROGRESS)) 533 goto out; 534 535 err = shash_ahash_update(req, &rctx->desc); 536 537 req->base.complete = rctx->complete; 538 539 out: 540 cryptd_hash_complete(req, err); 541 } 542 543 static int cryptd_hash_update_enqueue(struct ahash_request *req) 544 { 545 return cryptd_hash_enqueue(req, cryptd_hash_update); 546 } 547 548 static void cryptd_hash_final(struct crypto_async_request *req_async, int err) 549 { 550 struct ahash_request *req = ahash_request_cast(req_async); 551 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 552 553 if (unlikely(err == -EINPROGRESS)) 554 goto out; 555 556 err = crypto_shash_final(&rctx->desc, req->result); 557 558 req->base.complete = rctx->complete; 559 560 out: 561 cryptd_hash_complete(req, err); 562 } 563 564 static int cryptd_hash_final_enqueue(struct ahash_request *req) 565 { 566 return cryptd_hash_enqueue(req, cryptd_hash_final); 567 } 568 569 static void cryptd_hash_finup(struct crypto_async_request *req_async, int err) 570 { 571 struct ahash_request *req = ahash_request_cast(req_async); 572 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 573 574 if (unlikely(err == -EINPROGRESS)) 575 goto out; 576 577 err = shash_ahash_finup(req, &rctx->desc); 578 579 req->base.complete = rctx->complete; 580 581 out: 582 cryptd_hash_complete(req, err); 583 } 584 585 static int cryptd_hash_finup_enqueue(struct ahash_request *req) 586 { 587 return cryptd_hash_enqueue(req, cryptd_hash_finup); 588 } 589 590 static void cryptd_hash_digest(struct crypto_async_request *req_async, int err) 591 { 592 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); 593 struct crypto_shash *child = ctx->child; 594 struct ahash_request *req = ahash_request_cast(req_async); 595 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 596 struct shash_desc *desc = &rctx->desc; 597 598 if (unlikely(err == -EINPROGRESS)) 599 goto out; 600 601 desc->tfm = child; 602 603 err = shash_ahash_digest(req, desc); 604 605 req->base.complete = rctx->complete; 606 607 out: 608 cryptd_hash_complete(req, err); 609 } 610 611 static int cryptd_hash_digest_enqueue(struct ahash_request *req) 612 { 613 return cryptd_hash_enqueue(req, cryptd_hash_digest); 614 } 615 616 static int cryptd_hash_export(struct ahash_request *req, void *out) 617 { 618 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 619 620 return crypto_shash_export(&rctx->desc, out); 621 } 622 623 static int cryptd_hash_import(struct ahash_request *req, const void *in) 624 { 625 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 626 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); 627 struct shash_desc *desc = cryptd_shash_desc(req); 628 629 desc->tfm = ctx->child; 630 631 return crypto_shash_import(desc, in); 632 } 633 634 static void cryptd_hash_free(struct ahash_instance *inst) 635 { 636 struct hashd_instance_ctx *ctx = ahash_instance_ctx(inst); 637 638 crypto_drop_shash(&ctx->spawn); 639 kfree(inst); 640 } 641 642 static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, 643 struct cryptd_queue *queue) 644 { 645 struct hashd_instance_ctx *ctx; 646 struct ahash_instance *inst; 647 struct shash_alg *alg; 648 u32 type = 0; 649 u32 mask = 0; 650 int err; 651 652 cryptd_check_internal(tb, &type, &mask); 653 654 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); 655 if (!inst) 656 return -ENOMEM; 657 658 ctx = ahash_instance_ctx(inst); 659 ctx->queue = queue; 660 661 err = crypto_grab_shash(&ctx->spawn, ahash_crypto_instance(inst), 662 crypto_attr_alg_name(tb[1]), type, mask); 663 if (err) 664 goto err_free_inst; 665 alg = crypto_spawn_shash_alg(&ctx->spawn); 666 667 err = cryptd_init_instance(ahash_crypto_instance(inst), &alg->base); 668 if (err) 669 goto err_free_inst; 670 671 inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC | 672 (alg->base.cra_flags & (CRYPTO_ALG_INTERNAL | 673 CRYPTO_ALG_OPTIONAL_KEY)); 674 675 inst->alg.halg.digestsize = alg->digestsize; 676 inst->alg.halg.statesize = alg->statesize; 677 inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx); 678 679 inst->alg.halg.base.cra_init = cryptd_hash_init_tfm; 680 inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm; 681 682 inst->alg.init = cryptd_hash_init_enqueue; 683 inst->alg.update = cryptd_hash_update_enqueue; 684 inst->alg.final = cryptd_hash_final_enqueue; 685 inst->alg.finup = cryptd_hash_finup_enqueue; 686 inst->alg.export = cryptd_hash_export; 687 inst->alg.import = cryptd_hash_import; 688 if (crypto_shash_alg_has_setkey(alg)) 689 inst->alg.setkey = cryptd_hash_setkey; 690 inst->alg.digest = cryptd_hash_digest_enqueue; 691 692 inst->free = cryptd_hash_free; 693 694 err = ahash_register_instance(tmpl, inst); 695 if (err) { 696 err_free_inst: 697 crypto_drop_shash(&ctx->spawn); 698 kfree(inst); 699 } 700 return err; 701 } 702 703 static int cryptd_aead_setkey(struct crypto_aead *parent, 704 const u8 *key, unsigned int keylen) 705 { 706 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent); 707 struct crypto_aead *child = ctx->child; 708 709 return crypto_aead_setkey(child, key, keylen); 710 } 711 712 static int cryptd_aead_setauthsize(struct crypto_aead *parent, 713 unsigned int authsize) 714 { 715 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent); 716 struct crypto_aead *child = ctx->child; 717 718 return crypto_aead_setauthsize(child, authsize); 719 } 720 721 static void cryptd_aead_crypt(struct aead_request *req, 722 struct crypto_aead *child, 723 int err, 724 int (*crypt)(struct aead_request *req)) 725 { 726 struct cryptd_aead_request_ctx *rctx; 727 struct cryptd_aead_ctx *ctx; 728 crypto_completion_t compl; 729 struct crypto_aead *tfm; 730 int refcnt; 731 732 rctx = aead_request_ctx(req); 733 compl = rctx->complete; 734 735 tfm = crypto_aead_reqtfm(req); 736 737 if (unlikely(err == -EINPROGRESS)) 738 goto out; 739 aead_request_set_tfm(req, child); 740 err = crypt( req ); 741 742 out: 743 ctx = crypto_aead_ctx(tfm); 744 refcnt = refcount_read(&ctx->refcnt); 745 746 local_bh_disable(); 747 compl(&req->base, err); 748 local_bh_enable(); 749 750 if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt)) 751 crypto_free_aead(tfm); 752 } 753 754 static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err) 755 { 756 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm); 757 struct crypto_aead *child = ctx->child; 758 struct aead_request *req; 759 760 req = container_of(areq, struct aead_request, base); 761 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt); 762 } 763 764 static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err) 765 { 766 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm); 767 struct crypto_aead *child = ctx->child; 768 struct aead_request *req; 769 770 req = container_of(areq, struct aead_request, base); 771 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt); 772 } 773 774 static int cryptd_aead_enqueue(struct aead_request *req, 775 crypto_completion_t compl) 776 { 777 struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req); 778 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 779 struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm)); 780 781 rctx->complete = req->base.complete; 782 req->base.complete = compl; 783 return cryptd_enqueue_request(queue, &req->base); 784 } 785 786 static int cryptd_aead_encrypt_enqueue(struct aead_request *req) 787 { 788 return cryptd_aead_enqueue(req, cryptd_aead_encrypt ); 789 } 790 791 static int cryptd_aead_decrypt_enqueue(struct aead_request *req) 792 { 793 return cryptd_aead_enqueue(req, cryptd_aead_decrypt ); 794 } 795 796 static int cryptd_aead_init_tfm(struct crypto_aead *tfm) 797 { 798 struct aead_instance *inst = aead_alg_instance(tfm); 799 struct aead_instance_ctx *ictx = aead_instance_ctx(inst); 800 struct crypto_aead_spawn *spawn = &ictx->aead_spawn; 801 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm); 802 struct crypto_aead *cipher; 803 804 cipher = crypto_spawn_aead(spawn); 805 if (IS_ERR(cipher)) 806 return PTR_ERR(cipher); 807 808 ctx->child = cipher; 809 crypto_aead_set_reqsize( 810 tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx), 811 crypto_aead_reqsize(cipher))); 812 return 0; 813 } 814 815 static void cryptd_aead_exit_tfm(struct crypto_aead *tfm) 816 { 817 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm); 818 crypto_free_aead(ctx->child); 819 } 820 821 static void cryptd_aead_free(struct aead_instance *inst) 822 { 823 struct aead_instance_ctx *ctx = aead_instance_ctx(inst); 824 825 crypto_drop_aead(&ctx->aead_spawn); 826 kfree(inst); 827 } 828 829 static int cryptd_create_aead(struct crypto_template *tmpl, 830 struct rtattr **tb, 831 struct cryptd_queue *queue) 832 { 833 struct aead_instance_ctx *ctx; 834 struct aead_instance *inst; 835 struct aead_alg *alg; 836 const char *name; 837 u32 type = 0; 838 u32 mask = CRYPTO_ALG_ASYNC; 839 int err; 840 841 cryptd_check_internal(tb, &type, &mask); 842 843 name = crypto_attr_alg_name(tb[1]); 844 if (IS_ERR(name)) 845 return PTR_ERR(name); 846 847 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); 848 if (!inst) 849 return -ENOMEM; 850 851 ctx = aead_instance_ctx(inst); 852 ctx->queue = queue; 853 854 err = crypto_grab_aead(&ctx->aead_spawn, aead_crypto_instance(inst), 855 name, type, mask); 856 if (err) 857 goto out_free_inst; 858 859 alg = crypto_spawn_aead_alg(&ctx->aead_spawn); 860 err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base); 861 if (err) 862 goto out_drop_aead; 863 864 inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC | 865 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); 866 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx); 867 868 inst->alg.ivsize = crypto_aead_alg_ivsize(alg); 869 inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); 870 871 inst->alg.init = cryptd_aead_init_tfm; 872 inst->alg.exit = cryptd_aead_exit_tfm; 873 inst->alg.setkey = cryptd_aead_setkey; 874 inst->alg.setauthsize = cryptd_aead_setauthsize; 875 inst->alg.encrypt = cryptd_aead_encrypt_enqueue; 876 inst->alg.decrypt = cryptd_aead_decrypt_enqueue; 877 878 inst->free = cryptd_aead_free; 879 880 err = aead_register_instance(tmpl, inst); 881 if (err) { 882 out_drop_aead: 883 crypto_drop_aead(&ctx->aead_spawn); 884 out_free_inst: 885 kfree(inst); 886 } 887 return err; 888 } 889 890 static struct cryptd_queue queue; 891 892 static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) 893 { 894 struct crypto_attr_type *algt; 895 896 algt = crypto_get_attr_type(tb); 897 if (IS_ERR(algt)) 898 return PTR_ERR(algt); 899 900 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { 901 case CRYPTO_ALG_TYPE_SKCIPHER: 902 return cryptd_create_skcipher(tmpl, tb, &queue); 903 case CRYPTO_ALG_TYPE_HASH: 904 return cryptd_create_hash(tmpl, tb, &queue); 905 case CRYPTO_ALG_TYPE_AEAD: 906 return cryptd_create_aead(tmpl, tb, &queue); 907 } 908 909 return -EINVAL; 910 } 911 912 static struct crypto_template cryptd_tmpl = { 913 .name = "cryptd", 914 .create = cryptd_create, 915 .module = THIS_MODULE, 916 }; 917 918 struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name, 919 u32 type, u32 mask) 920 { 921 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; 922 struct cryptd_skcipher_ctx *ctx; 923 struct crypto_skcipher *tfm; 924 925 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, 926 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) 927 return ERR_PTR(-EINVAL); 928 929 tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask); 930 if (IS_ERR(tfm)) 931 return ERR_CAST(tfm); 932 933 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { 934 crypto_free_skcipher(tfm); 935 return ERR_PTR(-EINVAL); 936 } 937 938 ctx = crypto_skcipher_ctx(tfm); 939 refcount_set(&ctx->refcnt, 1); 940 941 return container_of(tfm, struct cryptd_skcipher, base); 942 } 943 EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher); 944 945 struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm) 946 { 947 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); 948 949 return &ctx->child->base; 950 } 951 EXPORT_SYMBOL_GPL(cryptd_skcipher_child); 952 953 bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm) 954 { 955 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); 956 957 return refcount_read(&ctx->refcnt) - 1; 958 } 959 EXPORT_SYMBOL_GPL(cryptd_skcipher_queued); 960 961 void cryptd_free_skcipher(struct cryptd_skcipher *tfm) 962 { 963 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); 964 965 if (refcount_dec_and_test(&ctx->refcnt)) 966 crypto_free_skcipher(&tfm->base); 967 } 968 EXPORT_SYMBOL_GPL(cryptd_free_skcipher); 969 970 struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, 971 u32 type, u32 mask) 972 { 973 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; 974 struct cryptd_hash_ctx *ctx; 975 struct crypto_ahash *tfm; 976 977 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, 978 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) 979 return ERR_PTR(-EINVAL); 980 tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask); 981 if (IS_ERR(tfm)) 982 return ERR_CAST(tfm); 983 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { 984 crypto_free_ahash(tfm); 985 return ERR_PTR(-EINVAL); 986 } 987 988 ctx = crypto_ahash_ctx(tfm); 989 refcount_set(&ctx->refcnt, 1); 990 991 return __cryptd_ahash_cast(tfm); 992 } 993 EXPORT_SYMBOL_GPL(cryptd_alloc_ahash); 994 995 struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm) 996 { 997 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); 998 999 return ctx->child; 1000 } 1001 EXPORT_SYMBOL_GPL(cryptd_ahash_child); 1002 1003 struct shash_desc *cryptd_shash_desc(struct ahash_request *req) 1004 { 1005 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 1006 return &rctx->desc; 1007 } 1008 EXPORT_SYMBOL_GPL(cryptd_shash_desc); 1009 1010 bool cryptd_ahash_queued(struct cryptd_ahash *tfm) 1011 { 1012 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); 1013 1014 return refcount_read(&ctx->refcnt) - 1; 1015 } 1016 EXPORT_SYMBOL_GPL(cryptd_ahash_queued); 1017 1018 void cryptd_free_ahash(struct cryptd_ahash *tfm) 1019 { 1020 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); 1021 1022 if (refcount_dec_and_test(&ctx->refcnt)) 1023 crypto_free_ahash(&tfm->base); 1024 } 1025 EXPORT_SYMBOL_GPL(cryptd_free_ahash); 1026 1027 struct cryptd_aead *cryptd_alloc_aead(const char *alg_name, 1028 u32 type, u32 mask) 1029 { 1030 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; 1031 struct cryptd_aead_ctx *ctx; 1032 struct crypto_aead *tfm; 1033 1034 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, 1035 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) 1036 return ERR_PTR(-EINVAL); 1037 tfm = crypto_alloc_aead(cryptd_alg_name, type, mask); 1038 if (IS_ERR(tfm)) 1039 return ERR_CAST(tfm); 1040 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { 1041 crypto_free_aead(tfm); 1042 return ERR_PTR(-EINVAL); 1043 } 1044 1045 ctx = crypto_aead_ctx(tfm); 1046 refcount_set(&ctx->refcnt, 1); 1047 1048 return __cryptd_aead_cast(tfm); 1049 } 1050 EXPORT_SYMBOL_GPL(cryptd_alloc_aead); 1051 1052 struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm) 1053 { 1054 struct cryptd_aead_ctx *ctx; 1055 ctx = crypto_aead_ctx(&tfm->base); 1056 return ctx->child; 1057 } 1058 EXPORT_SYMBOL_GPL(cryptd_aead_child); 1059 1060 bool cryptd_aead_queued(struct cryptd_aead *tfm) 1061 { 1062 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base); 1063 1064 return refcount_read(&ctx->refcnt) - 1; 1065 } 1066 EXPORT_SYMBOL_GPL(cryptd_aead_queued); 1067 1068 void cryptd_free_aead(struct cryptd_aead *tfm) 1069 { 1070 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base); 1071 1072 if (refcount_dec_and_test(&ctx->refcnt)) 1073 crypto_free_aead(&tfm->base); 1074 } 1075 EXPORT_SYMBOL_GPL(cryptd_free_aead); 1076 1077 static int __init cryptd_init(void) 1078 { 1079 int err; 1080 1081 cryptd_wq = alloc_workqueue("cryptd", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1082 1); 1083 if (!cryptd_wq) 1084 return -ENOMEM; 1085 1086 err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen); 1087 if (err) 1088 goto err_destroy_wq; 1089 1090 err = crypto_register_template(&cryptd_tmpl); 1091 if (err) 1092 goto err_fini_queue; 1093 1094 return 0; 1095 1096 err_fini_queue: 1097 cryptd_fini_queue(&queue); 1098 err_destroy_wq: 1099 destroy_workqueue(cryptd_wq); 1100 return err; 1101 } 1102 1103 static void __exit cryptd_exit(void) 1104 { 1105 destroy_workqueue(cryptd_wq); 1106 cryptd_fini_queue(&queue); 1107 crypto_unregister_template(&cryptd_tmpl); 1108 } 1109 1110 subsys_initcall(cryptd_init); 1111 module_exit(cryptd_exit); 1112 1113 MODULE_LICENSE("GPL"); 1114 MODULE_DESCRIPTION("Software async crypto daemon"); 1115 MODULE_ALIAS_CRYPTO("cryptd"); 1116