1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Software async crypto daemon. 4 * 5 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> 6 * 7 * Added AEAD support to cryptd. 8 * Authors: Tadeusz Struk (tadeusz.struk@intel.com) 9 * Adrian Hoban <adrian.hoban@intel.com> 10 * Gabriele Paoloni <gabriele.paoloni@intel.com> 11 * Aidan O'Mahony (aidan.o.mahony@intel.com) 12 * Copyright (c) 2010, Intel Corporation. 13 */ 14 15 #include <crypto/internal/hash.h> 16 #include <crypto/internal/aead.h> 17 #include <crypto/internal/skcipher.h> 18 #include <crypto/cryptd.h> 19 #include <linux/refcount.h> 20 #include <linux/err.h> 21 #include <linux/init.h> 22 #include <linux/kernel.h> 23 #include <linux/list.h> 24 #include <linux/module.h> 25 #include <linux/scatterlist.h> 26 #include <linux/sched.h> 27 #include <linux/slab.h> 28 #include <linux/workqueue.h> 29 30 static unsigned int cryptd_max_cpu_qlen = 1000; 31 module_param(cryptd_max_cpu_qlen, uint, 0); 32 MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth"); 33 34 static struct workqueue_struct *cryptd_wq; 35 36 struct cryptd_cpu_queue { 37 struct crypto_queue queue; 38 struct work_struct work; 39 }; 40 41 struct cryptd_queue { 42 struct cryptd_cpu_queue __percpu *cpu_queue; 43 }; 44 45 struct cryptd_instance_ctx { 46 struct crypto_spawn spawn; 47 struct cryptd_queue *queue; 48 }; 49 50 struct skcipherd_instance_ctx { 51 struct crypto_skcipher_spawn spawn; 52 struct cryptd_queue *queue; 53 }; 54 55 struct hashd_instance_ctx { 56 struct crypto_shash_spawn spawn; 57 struct cryptd_queue *queue; 58 }; 59 60 struct aead_instance_ctx { 61 struct crypto_aead_spawn aead_spawn; 62 struct cryptd_queue *queue; 63 }; 64 65 struct cryptd_skcipher_ctx { 66 refcount_t refcnt; 67 struct crypto_sync_skcipher *child; 68 }; 69 70 struct cryptd_skcipher_request_ctx { 71 crypto_completion_t complete; 72 }; 73 74 struct cryptd_hash_ctx { 75 refcount_t refcnt; 76 struct crypto_shash *child; 77 }; 78 79 struct cryptd_hash_request_ctx { 80 crypto_completion_t complete; 81 struct shash_desc desc; 82 }; 83 84 struct cryptd_aead_ctx { 85 refcount_t refcnt; 86 struct crypto_aead *child; 87 }; 88 89 struct cryptd_aead_request_ctx { 90 crypto_completion_t complete; 91 }; 92 93 static void cryptd_queue_worker(struct work_struct *work); 94 95 static int cryptd_init_queue(struct cryptd_queue *queue, 96 unsigned int max_cpu_qlen) 97 { 98 int cpu; 99 struct cryptd_cpu_queue *cpu_queue; 100 101 queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue); 102 if (!queue->cpu_queue) 103 return -ENOMEM; 104 for_each_possible_cpu(cpu) { 105 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); 106 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); 107 INIT_WORK(&cpu_queue->work, cryptd_queue_worker); 108 } 109 pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen); 110 return 0; 111 } 112 113 static void cryptd_fini_queue(struct cryptd_queue *queue) 114 { 115 int cpu; 116 struct cryptd_cpu_queue *cpu_queue; 117 118 for_each_possible_cpu(cpu) { 119 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); 120 BUG_ON(cpu_queue->queue.qlen); 121 } 122 free_percpu(queue->cpu_queue); 123 } 124 125 static int cryptd_enqueue_request(struct cryptd_queue *queue, 126 struct crypto_async_request *request) 127 { 128 int cpu, err; 129 struct cryptd_cpu_queue *cpu_queue; 130 refcount_t *refcnt; 131 132 cpu = get_cpu(); 133 cpu_queue = this_cpu_ptr(queue->cpu_queue); 134 err = crypto_enqueue_request(&cpu_queue->queue, request); 135 136 refcnt = crypto_tfm_ctx(request->tfm); 137 138 if (err == -ENOSPC) 139 goto out_put_cpu; 140 141 queue_work_on(cpu, cryptd_wq, &cpu_queue->work); 142 143 if (!refcount_read(refcnt)) 144 goto out_put_cpu; 145 146 refcount_inc(refcnt); 147 148 out_put_cpu: 149 put_cpu(); 150 151 return err; 152 } 153 154 /* Called in workqueue context, do one real cryption work (via 155 * req->complete) and reschedule itself if there are more work to 156 * do. */ 157 static void cryptd_queue_worker(struct work_struct *work) 158 { 159 struct cryptd_cpu_queue *cpu_queue; 160 struct crypto_async_request *req, *backlog; 161 162 cpu_queue = container_of(work, struct cryptd_cpu_queue, work); 163 /* 164 * Only handle one request at a time to avoid hogging crypto workqueue. 165 * preempt_disable/enable is used to prevent being preempted by 166 * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent 167 * cryptd_enqueue_request() being accessed from software interrupts. 168 */ 169 local_bh_disable(); 170 preempt_disable(); 171 backlog = crypto_get_backlog(&cpu_queue->queue); 172 req = crypto_dequeue_request(&cpu_queue->queue); 173 preempt_enable(); 174 local_bh_enable(); 175 176 if (!req) 177 return; 178 179 if (backlog) 180 backlog->complete(backlog, -EINPROGRESS); 181 req->complete(req, 0); 182 183 if (cpu_queue->queue.qlen) 184 queue_work(cryptd_wq, &cpu_queue->work); 185 } 186 187 static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm) 188 { 189 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); 190 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); 191 return ictx->queue; 192 } 193 194 static inline void cryptd_check_internal(struct rtattr **tb, u32 *type, 195 u32 *mask) 196 { 197 struct crypto_attr_type *algt; 198 199 algt = crypto_get_attr_type(tb); 200 if (IS_ERR(algt)) 201 return; 202 203 *type |= algt->type & CRYPTO_ALG_INTERNAL; 204 *mask |= algt->mask & CRYPTO_ALG_INTERNAL; 205 } 206 207 static int cryptd_init_instance(struct crypto_instance *inst, 208 struct crypto_alg *alg) 209 { 210 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, 211 "cryptd(%s)", 212 alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) 213 return -ENAMETOOLONG; 214 215 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); 216 217 inst->alg.cra_priority = alg->cra_priority + 50; 218 inst->alg.cra_blocksize = alg->cra_blocksize; 219 inst->alg.cra_alignmask = alg->cra_alignmask; 220 221 return 0; 222 } 223 224 static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, 225 unsigned int tail) 226 { 227 char *p; 228 struct crypto_instance *inst; 229 int err; 230 231 p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL); 232 if (!p) 233 return ERR_PTR(-ENOMEM); 234 235 inst = (void *)(p + head); 236 237 err = cryptd_init_instance(inst, alg); 238 if (err) 239 goto out_free_inst; 240 241 out: 242 return p; 243 244 out_free_inst: 245 kfree(p); 246 p = ERR_PTR(err); 247 goto out; 248 } 249 250 static int cryptd_skcipher_setkey(struct crypto_skcipher *parent, 251 const u8 *key, unsigned int keylen) 252 { 253 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent); 254 struct crypto_sync_skcipher *child = ctx->child; 255 256 crypto_sync_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); 257 crypto_sync_skcipher_set_flags(child, 258 crypto_skcipher_get_flags(parent) & 259 CRYPTO_TFM_REQ_MASK); 260 return crypto_sync_skcipher_setkey(child, key, keylen); 261 } 262 263 static void cryptd_skcipher_complete(struct skcipher_request *req, int err) 264 { 265 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 266 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 267 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); 268 int refcnt = refcount_read(&ctx->refcnt); 269 270 local_bh_disable(); 271 rctx->complete(&req->base, err); 272 local_bh_enable(); 273 274 if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt)) 275 crypto_free_skcipher(tfm); 276 } 277 278 static void cryptd_skcipher_encrypt(struct crypto_async_request *base, 279 int err) 280 { 281 struct skcipher_request *req = skcipher_request_cast(base); 282 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); 283 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 284 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 285 struct crypto_sync_skcipher *child = ctx->child; 286 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child); 287 288 if (unlikely(err == -EINPROGRESS)) 289 goto out; 290 291 skcipher_request_set_sync_tfm(subreq, child); 292 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, 293 NULL, NULL); 294 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, 295 req->iv); 296 297 err = crypto_skcipher_encrypt(subreq); 298 skcipher_request_zero(subreq); 299 300 req->base.complete = rctx->complete; 301 302 out: 303 cryptd_skcipher_complete(req, err); 304 } 305 306 static void cryptd_skcipher_decrypt(struct crypto_async_request *base, 307 int err) 308 { 309 struct skcipher_request *req = skcipher_request_cast(base); 310 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); 311 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 312 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 313 struct crypto_sync_skcipher *child = ctx->child; 314 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child); 315 316 if (unlikely(err == -EINPROGRESS)) 317 goto out; 318 319 skcipher_request_set_sync_tfm(subreq, child); 320 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, 321 NULL, NULL); 322 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, 323 req->iv); 324 325 err = crypto_skcipher_decrypt(subreq); 326 skcipher_request_zero(subreq); 327 328 req->base.complete = rctx->complete; 329 330 out: 331 cryptd_skcipher_complete(req, err); 332 } 333 334 static int cryptd_skcipher_enqueue(struct skcipher_request *req, 335 crypto_completion_t compl) 336 { 337 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); 338 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 339 struct cryptd_queue *queue; 340 341 queue = cryptd_get_queue(crypto_skcipher_tfm(tfm)); 342 rctx->complete = req->base.complete; 343 req->base.complete = compl; 344 345 return cryptd_enqueue_request(queue, &req->base); 346 } 347 348 static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req) 349 { 350 return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt); 351 } 352 353 static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req) 354 { 355 return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt); 356 } 357 358 static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm) 359 { 360 struct skcipher_instance *inst = skcipher_alg_instance(tfm); 361 struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst); 362 struct crypto_skcipher_spawn *spawn = &ictx->spawn; 363 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 364 struct crypto_skcipher *cipher; 365 366 cipher = crypto_spawn_skcipher(spawn); 367 if (IS_ERR(cipher)) 368 return PTR_ERR(cipher); 369 370 ctx->child = (struct crypto_sync_skcipher *)cipher; 371 crypto_skcipher_set_reqsize( 372 tfm, sizeof(struct cryptd_skcipher_request_ctx)); 373 return 0; 374 } 375 376 static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm) 377 { 378 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 379 380 crypto_free_sync_skcipher(ctx->child); 381 } 382 383 static void cryptd_skcipher_free(struct skcipher_instance *inst) 384 { 385 struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst); 386 387 crypto_drop_skcipher(&ctx->spawn); 388 kfree(inst); 389 } 390 391 static int cryptd_create_skcipher(struct crypto_template *tmpl, 392 struct rtattr **tb, 393 struct cryptd_queue *queue) 394 { 395 struct skcipherd_instance_ctx *ctx; 396 struct skcipher_instance *inst; 397 struct skcipher_alg *alg; 398 const char *name; 399 u32 type; 400 u32 mask; 401 int err; 402 403 type = 0; 404 mask = CRYPTO_ALG_ASYNC; 405 406 cryptd_check_internal(tb, &type, &mask); 407 408 name = crypto_attr_alg_name(tb[1]); 409 if (IS_ERR(name)) 410 return PTR_ERR(name); 411 412 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); 413 if (!inst) 414 return -ENOMEM; 415 416 ctx = skcipher_instance_ctx(inst); 417 ctx->queue = queue; 418 419 err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst), 420 name, type, mask); 421 if (err) 422 goto out_free_inst; 423 424 alg = crypto_spawn_skcipher_alg(&ctx->spawn); 425 err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base); 426 if (err) 427 goto out_drop_skcipher; 428 429 inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC | 430 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); 431 432 inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg); 433 inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); 434 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg); 435 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg); 436 437 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx); 438 439 inst->alg.init = cryptd_skcipher_init_tfm; 440 inst->alg.exit = cryptd_skcipher_exit_tfm; 441 442 inst->alg.setkey = cryptd_skcipher_setkey; 443 inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue; 444 inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue; 445 446 inst->free = cryptd_skcipher_free; 447 448 err = skcipher_register_instance(tmpl, inst); 449 if (err) { 450 out_drop_skcipher: 451 crypto_drop_skcipher(&ctx->spawn); 452 out_free_inst: 453 kfree(inst); 454 } 455 return err; 456 } 457 458 static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) 459 { 460 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); 461 struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst); 462 struct crypto_shash_spawn *spawn = &ictx->spawn; 463 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); 464 struct crypto_shash *hash; 465 466 hash = crypto_spawn_shash(spawn); 467 if (IS_ERR(hash)) 468 return PTR_ERR(hash); 469 470 ctx->child = hash; 471 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 472 sizeof(struct cryptd_hash_request_ctx) + 473 crypto_shash_descsize(hash)); 474 return 0; 475 } 476 477 static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm) 478 { 479 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); 480 481 crypto_free_shash(ctx->child); 482 } 483 484 static int cryptd_hash_setkey(struct crypto_ahash *parent, 485 const u8 *key, unsigned int keylen) 486 { 487 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); 488 struct crypto_shash *child = ctx->child; 489 490 crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK); 491 crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) & 492 CRYPTO_TFM_REQ_MASK); 493 return crypto_shash_setkey(child, key, keylen); 494 } 495 496 static int cryptd_hash_enqueue(struct ahash_request *req, 497 crypto_completion_t compl) 498 { 499 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 500 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 501 struct cryptd_queue *queue = 502 cryptd_get_queue(crypto_ahash_tfm(tfm)); 503 504 rctx->complete = req->base.complete; 505 req->base.complete = compl; 506 507 return cryptd_enqueue_request(queue, &req->base); 508 } 509 510 static void cryptd_hash_complete(struct ahash_request *req, int err) 511 { 512 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 513 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); 514 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 515 int refcnt = refcount_read(&ctx->refcnt); 516 517 local_bh_disable(); 518 rctx->complete(&req->base, err); 519 local_bh_enable(); 520 521 if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt)) 522 crypto_free_ahash(tfm); 523 } 524 525 static void cryptd_hash_init(struct crypto_async_request *req_async, int err) 526 { 527 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); 528 struct crypto_shash *child = ctx->child; 529 struct ahash_request *req = ahash_request_cast(req_async); 530 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 531 struct shash_desc *desc = &rctx->desc; 532 533 if (unlikely(err == -EINPROGRESS)) 534 goto out; 535 536 desc->tfm = child; 537 538 err = crypto_shash_init(desc); 539 540 req->base.complete = rctx->complete; 541 542 out: 543 cryptd_hash_complete(req, err); 544 } 545 546 static int cryptd_hash_init_enqueue(struct ahash_request *req) 547 { 548 return cryptd_hash_enqueue(req, cryptd_hash_init); 549 } 550 551 static void cryptd_hash_update(struct crypto_async_request *req_async, int err) 552 { 553 struct ahash_request *req = ahash_request_cast(req_async); 554 struct cryptd_hash_request_ctx *rctx; 555 556 rctx = ahash_request_ctx(req); 557 558 if (unlikely(err == -EINPROGRESS)) 559 goto out; 560 561 err = shash_ahash_update(req, &rctx->desc); 562 563 req->base.complete = rctx->complete; 564 565 out: 566 cryptd_hash_complete(req, err); 567 } 568 569 static int cryptd_hash_update_enqueue(struct ahash_request *req) 570 { 571 return cryptd_hash_enqueue(req, cryptd_hash_update); 572 } 573 574 static void cryptd_hash_final(struct crypto_async_request *req_async, int err) 575 { 576 struct ahash_request *req = ahash_request_cast(req_async); 577 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 578 579 if (unlikely(err == -EINPROGRESS)) 580 goto out; 581 582 err = crypto_shash_final(&rctx->desc, req->result); 583 584 req->base.complete = rctx->complete; 585 586 out: 587 cryptd_hash_complete(req, err); 588 } 589 590 static int cryptd_hash_final_enqueue(struct ahash_request *req) 591 { 592 return cryptd_hash_enqueue(req, cryptd_hash_final); 593 } 594 595 static void cryptd_hash_finup(struct crypto_async_request *req_async, int err) 596 { 597 struct ahash_request *req = ahash_request_cast(req_async); 598 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 599 600 if (unlikely(err == -EINPROGRESS)) 601 goto out; 602 603 err = shash_ahash_finup(req, &rctx->desc); 604 605 req->base.complete = rctx->complete; 606 607 out: 608 cryptd_hash_complete(req, err); 609 } 610 611 static int cryptd_hash_finup_enqueue(struct ahash_request *req) 612 { 613 return cryptd_hash_enqueue(req, cryptd_hash_finup); 614 } 615 616 static void cryptd_hash_digest(struct crypto_async_request *req_async, int err) 617 { 618 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); 619 struct crypto_shash *child = ctx->child; 620 struct ahash_request *req = ahash_request_cast(req_async); 621 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 622 struct shash_desc *desc = &rctx->desc; 623 624 if (unlikely(err == -EINPROGRESS)) 625 goto out; 626 627 desc->tfm = child; 628 629 err = shash_ahash_digest(req, desc); 630 631 req->base.complete = rctx->complete; 632 633 out: 634 cryptd_hash_complete(req, err); 635 } 636 637 static int cryptd_hash_digest_enqueue(struct ahash_request *req) 638 { 639 return cryptd_hash_enqueue(req, cryptd_hash_digest); 640 } 641 642 static int cryptd_hash_export(struct ahash_request *req, void *out) 643 { 644 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 645 646 return crypto_shash_export(&rctx->desc, out); 647 } 648 649 static int cryptd_hash_import(struct ahash_request *req, const void *in) 650 { 651 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 652 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); 653 struct shash_desc *desc = cryptd_shash_desc(req); 654 655 desc->tfm = ctx->child; 656 657 return crypto_shash_import(desc, in); 658 } 659 660 static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, 661 struct cryptd_queue *queue) 662 { 663 struct hashd_instance_ctx *ctx; 664 struct ahash_instance *inst; 665 struct shash_alg *salg; 666 struct crypto_alg *alg; 667 u32 type = 0; 668 u32 mask = 0; 669 int err; 670 671 cryptd_check_internal(tb, &type, &mask); 672 673 salg = shash_attr_alg(tb[1], type, mask); 674 if (IS_ERR(salg)) 675 return PTR_ERR(salg); 676 677 alg = &salg->base; 678 inst = cryptd_alloc_instance(alg, ahash_instance_headroom(), 679 sizeof(*ctx)); 680 err = PTR_ERR(inst); 681 if (IS_ERR(inst)) 682 goto out_put_alg; 683 684 ctx = ahash_instance_ctx(inst); 685 ctx->queue = queue; 686 687 err = crypto_init_shash_spawn(&ctx->spawn, salg, 688 ahash_crypto_instance(inst)); 689 if (err) 690 goto out_free_inst; 691 692 inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC | 693 (alg->cra_flags & (CRYPTO_ALG_INTERNAL | 694 CRYPTO_ALG_OPTIONAL_KEY)); 695 696 inst->alg.halg.digestsize = salg->digestsize; 697 inst->alg.halg.statesize = salg->statesize; 698 inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx); 699 700 inst->alg.halg.base.cra_init = cryptd_hash_init_tfm; 701 inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm; 702 703 inst->alg.init = cryptd_hash_init_enqueue; 704 inst->alg.update = cryptd_hash_update_enqueue; 705 inst->alg.final = cryptd_hash_final_enqueue; 706 inst->alg.finup = cryptd_hash_finup_enqueue; 707 inst->alg.export = cryptd_hash_export; 708 inst->alg.import = cryptd_hash_import; 709 if (crypto_shash_alg_has_setkey(salg)) 710 inst->alg.setkey = cryptd_hash_setkey; 711 inst->alg.digest = cryptd_hash_digest_enqueue; 712 713 err = ahash_register_instance(tmpl, inst); 714 if (err) { 715 crypto_drop_shash(&ctx->spawn); 716 out_free_inst: 717 kfree(inst); 718 } 719 720 out_put_alg: 721 crypto_mod_put(alg); 722 return err; 723 } 724 725 static int cryptd_aead_setkey(struct crypto_aead *parent, 726 const u8 *key, unsigned int keylen) 727 { 728 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent); 729 struct crypto_aead *child = ctx->child; 730 731 return crypto_aead_setkey(child, key, keylen); 732 } 733 734 static int cryptd_aead_setauthsize(struct crypto_aead *parent, 735 unsigned int authsize) 736 { 737 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent); 738 struct crypto_aead *child = ctx->child; 739 740 return crypto_aead_setauthsize(child, authsize); 741 } 742 743 static void cryptd_aead_crypt(struct aead_request *req, 744 struct crypto_aead *child, 745 int err, 746 int (*crypt)(struct aead_request *req)) 747 { 748 struct cryptd_aead_request_ctx *rctx; 749 struct cryptd_aead_ctx *ctx; 750 crypto_completion_t compl; 751 struct crypto_aead *tfm; 752 int refcnt; 753 754 rctx = aead_request_ctx(req); 755 compl = rctx->complete; 756 757 tfm = crypto_aead_reqtfm(req); 758 759 if (unlikely(err == -EINPROGRESS)) 760 goto out; 761 aead_request_set_tfm(req, child); 762 err = crypt( req ); 763 764 out: 765 ctx = crypto_aead_ctx(tfm); 766 refcnt = refcount_read(&ctx->refcnt); 767 768 local_bh_disable(); 769 compl(&req->base, err); 770 local_bh_enable(); 771 772 if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt)) 773 crypto_free_aead(tfm); 774 } 775 776 static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err) 777 { 778 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm); 779 struct crypto_aead *child = ctx->child; 780 struct aead_request *req; 781 782 req = container_of(areq, struct aead_request, base); 783 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt); 784 } 785 786 static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err) 787 { 788 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm); 789 struct crypto_aead *child = ctx->child; 790 struct aead_request *req; 791 792 req = container_of(areq, struct aead_request, base); 793 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt); 794 } 795 796 static int cryptd_aead_enqueue(struct aead_request *req, 797 crypto_completion_t compl) 798 { 799 struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req); 800 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 801 struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm)); 802 803 rctx->complete = req->base.complete; 804 req->base.complete = compl; 805 return cryptd_enqueue_request(queue, &req->base); 806 } 807 808 static int cryptd_aead_encrypt_enqueue(struct aead_request *req) 809 { 810 return cryptd_aead_enqueue(req, cryptd_aead_encrypt ); 811 } 812 813 static int cryptd_aead_decrypt_enqueue(struct aead_request *req) 814 { 815 return cryptd_aead_enqueue(req, cryptd_aead_decrypt ); 816 } 817 818 static int cryptd_aead_init_tfm(struct crypto_aead *tfm) 819 { 820 struct aead_instance *inst = aead_alg_instance(tfm); 821 struct aead_instance_ctx *ictx = aead_instance_ctx(inst); 822 struct crypto_aead_spawn *spawn = &ictx->aead_spawn; 823 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm); 824 struct crypto_aead *cipher; 825 826 cipher = crypto_spawn_aead(spawn); 827 if (IS_ERR(cipher)) 828 return PTR_ERR(cipher); 829 830 ctx->child = cipher; 831 crypto_aead_set_reqsize( 832 tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx), 833 crypto_aead_reqsize(cipher))); 834 return 0; 835 } 836 837 static void cryptd_aead_exit_tfm(struct crypto_aead *tfm) 838 { 839 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm); 840 crypto_free_aead(ctx->child); 841 } 842 843 static int cryptd_create_aead(struct crypto_template *tmpl, 844 struct rtattr **tb, 845 struct cryptd_queue *queue) 846 { 847 struct aead_instance_ctx *ctx; 848 struct aead_instance *inst; 849 struct aead_alg *alg; 850 const char *name; 851 u32 type = 0; 852 u32 mask = CRYPTO_ALG_ASYNC; 853 int err; 854 855 cryptd_check_internal(tb, &type, &mask); 856 857 name = crypto_attr_alg_name(tb[1]); 858 if (IS_ERR(name)) 859 return PTR_ERR(name); 860 861 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); 862 if (!inst) 863 return -ENOMEM; 864 865 ctx = aead_instance_ctx(inst); 866 ctx->queue = queue; 867 868 err = crypto_grab_aead(&ctx->aead_spawn, aead_crypto_instance(inst), 869 name, type, mask); 870 if (err) 871 goto out_free_inst; 872 873 alg = crypto_spawn_aead_alg(&ctx->aead_spawn); 874 err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base); 875 if (err) 876 goto out_drop_aead; 877 878 inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC | 879 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); 880 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx); 881 882 inst->alg.ivsize = crypto_aead_alg_ivsize(alg); 883 inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); 884 885 inst->alg.init = cryptd_aead_init_tfm; 886 inst->alg.exit = cryptd_aead_exit_tfm; 887 inst->alg.setkey = cryptd_aead_setkey; 888 inst->alg.setauthsize = cryptd_aead_setauthsize; 889 inst->alg.encrypt = cryptd_aead_encrypt_enqueue; 890 inst->alg.decrypt = cryptd_aead_decrypt_enqueue; 891 892 err = aead_register_instance(tmpl, inst); 893 if (err) { 894 out_drop_aead: 895 crypto_drop_aead(&ctx->aead_spawn); 896 out_free_inst: 897 kfree(inst); 898 } 899 return err; 900 } 901 902 static struct cryptd_queue queue; 903 904 static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) 905 { 906 struct crypto_attr_type *algt; 907 908 algt = crypto_get_attr_type(tb); 909 if (IS_ERR(algt)) 910 return PTR_ERR(algt); 911 912 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { 913 case CRYPTO_ALG_TYPE_SKCIPHER: 914 return cryptd_create_skcipher(tmpl, tb, &queue); 915 case CRYPTO_ALG_TYPE_HASH: 916 return cryptd_create_hash(tmpl, tb, &queue); 917 case CRYPTO_ALG_TYPE_AEAD: 918 return cryptd_create_aead(tmpl, tb, &queue); 919 } 920 921 return -EINVAL; 922 } 923 924 static void cryptd_free(struct crypto_instance *inst) 925 { 926 struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); 927 struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst); 928 struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst); 929 930 switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) { 931 case CRYPTO_ALG_TYPE_AHASH: 932 crypto_drop_shash(&hctx->spawn); 933 kfree(ahash_instance(inst)); 934 return; 935 case CRYPTO_ALG_TYPE_AEAD: 936 crypto_drop_aead(&aead_ctx->aead_spawn); 937 kfree(aead_instance(inst)); 938 return; 939 default: 940 crypto_drop_spawn(&ctx->spawn); 941 kfree(inst); 942 } 943 } 944 945 static struct crypto_template cryptd_tmpl = { 946 .name = "cryptd", 947 .create = cryptd_create, 948 .free = cryptd_free, 949 .module = THIS_MODULE, 950 }; 951 952 struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name, 953 u32 type, u32 mask) 954 { 955 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; 956 struct cryptd_skcipher_ctx *ctx; 957 struct crypto_skcipher *tfm; 958 959 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, 960 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) 961 return ERR_PTR(-EINVAL); 962 963 tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask); 964 if (IS_ERR(tfm)) 965 return ERR_CAST(tfm); 966 967 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { 968 crypto_free_skcipher(tfm); 969 return ERR_PTR(-EINVAL); 970 } 971 972 ctx = crypto_skcipher_ctx(tfm); 973 refcount_set(&ctx->refcnt, 1); 974 975 return container_of(tfm, struct cryptd_skcipher, base); 976 } 977 EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher); 978 979 struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm) 980 { 981 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); 982 983 return &ctx->child->base; 984 } 985 EXPORT_SYMBOL_GPL(cryptd_skcipher_child); 986 987 bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm) 988 { 989 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); 990 991 return refcount_read(&ctx->refcnt) - 1; 992 } 993 EXPORT_SYMBOL_GPL(cryptd_skcipher_queued); 994 995 void cryptd_free_skcipher(struct cryptd_skcipher *tfm) 996 { 997 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); 998 999 if (refcount_dec_and_test(&ctx->refcnt)) 1000 crypto_free_skcipher(&tfm->base); 1001 } 1002 EXPORT_SYMBOL_GPL(cryptd_free_skcipher); 1003 1004 struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, 1005 u32 type, u32 mask) 1006 { 1007 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; 1008 struct cryptd_hash_ctx *ctx; 1009 struct crypto_ahash *tfm; 1010 1011 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, 1012 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) 1013 return ERR_PTR(-EINVAL); 1014 tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask); 1015 if (IS_ERR(tfm)) 1016 return ERR_CAST(tfm); 1017 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { 1018 crypto_free_ahash(tfm); 1019 return ERR_PTR(-EINVAL); 1020 } 1021 1022 ctx = crypto_ahash_ctx(tfm); 1023 refcount_set(&ctx->refcnt, 1); 1024 1025 return __cryptd_ahash_cast(tfm); 1026 } 1027 EXPORT_SYMBOL_GPL(cryptd_alloc_ahash); 1028 1029 struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm) 1030 { 1031 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); 1032 1033 return ctx->child; 1034 } 1035 EXPORT_SYMBOL_GPL(cryptd_ahash_child); 1036 1037 struct shash_desc *cryptd_shash_desc(struct ahash_request *req) 1038 { 1039 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 1040 return &rctx->desc; 1041 } 1042 EXPORT_SYMBOL_GPL(cryptd_shash_desc); 1043 1044 bool cryptd_ahash_queued(struct cryptd_ahash *tfm) 1045 { 1046 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); 1047 1048 return refcount_read(&ctx->refcnt) - 1; 1049 } 1050 EXPORT_SYMBOL_GPL(cryptd_ahash_queued); 1051 1052 void cryptd_free_ahash(struct cryptd_ahash *tfm) 1053 { 1054 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); 1055 1056 if (refcount_dec_and_test(&ctx->refcnt)) 1057 crypto_free_ahash(&tfm->base); 1058 } 1059 EXPORT_SYMBOL_GPL(cryptd_free_ahash); 1060 1061 struct cryptd_aead *cryptd_alloc_aead(const char *alg_name, 1062 u32 type, u32 mask) 1063 { 1064 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; 1065 struct cryptd_aead_ctx *ctx; 1066 struct crypto_aead *tfm; 1067 1068 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, 1069 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) 1070 return ERR_PTR(-EINVAL); 1071 tfm = crypto_alloc_aead(cryptd_alg_name, type, mask); 1072 if (IS_ERR(tfm)) 1073 return ERR_CAST(tfm); 1074 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { 1075 crypto_free_aead(tfm); 1076 return ERR_PTR(-EINVAL); 1077 } 1078 1079 ctx = crypto_aead_ctx(tfm); 1080 refcount_set(&ctx->refcnt, 1); 1081 1082 return __cryptd_aead_cast(tfm); 1083 } 1084 EXPORT_SYMBOL_GPL(cryptd_alloc_aead); 1085 1086 struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm) 1087 { 1088 struct cryptd_aead_ctx *ctx; 1089 ctx = crypto_aead_ctx(&tfm->base); 1090 return ctx->child; 1091 } 1092 EXPORT_SYMBOL_GPL(cryptd_aead_child); 1093 1094 bool cryptd_aead_queued(struct cryptd_aead *tfm) 1095 { 1096 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base); 1097 1098 return refcount_read(&ctx->refcnt) - 1; 1099 } 1100 EXPORT_SYMBOL_GPL(cryptd_aead_queued); 1101 1102 void cryptd_free_aead(struct cryptd_aead *tfm) 1103 { 1104 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base); 1105 1106 if (refcount_dec_and_test(&ctx->refcnt)) 1107 crypto_free_aead(&tfm->base); 1108 } 1109 EXPORT_SYMBOL_GPL(cryptd_free_aead); 1110 1111 static int __init cryptd_init(void) 1112 { 1113 int err; 1114 1115 cryptd_wq = alloc_workqueue("cryptd", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1116 1); 1117 if (!cryptd_wq) 1118 return -ENOMEM; 1119 1120 err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen); 1121 if (err) 1122 goto err_destroy_wq; 1123 1124 err = crypto_register_template(&cryptd_tmpl); 1125 if (err) 1126 goto err_fini_queue; 1127 1128 return 0; 1129 1130 err_fini_queue: 1131 cryptd_fini_queue(&queue); 1132 err_destroy_wq: 1133 destroy_workqueue(cryptd_wq); 1134 return err; 1135 } 1136 1137 static void __exit cryptd_exit(void) 1138 { 1139 destroy_workqueue(cryptd_wq); 1140 cryptd_fini_queue(&queue); 1141 crypto_unregister_template(&cryptd_tmpl); 1142 } 1143 1144 subsys_initcall(cryptd_init); 1145 module_exit(cryptd_exit); 1146 1147 MODULE_LICENSE("GPL"); 1148 MODULE_DESCRIPTION("Software async crypto daemon"); 1149 MODULE_ALIAS_CRYPTO("cryptd"); 1150