1 /* 2 * Software async crypto daemon. 3 * 4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> 5 * 6 * Added AEAD support to cryptd. 7 * Authors: Tadeusz Struk (tadeusz.struk@intel.com) 8 * Adrian Hoban <adrian.hoban@intel.com> 9 * Gabriele Paoloni <gabriele.paoloni@intel.com> 10 * Aidan O'Mahony (aidan.o.mahony@intel.com) 11 * Copyright (c) 2010, Intel Corporation. 12 * 13 * This program is free software; you can redistribute it and/or modify it 14 * under the terms of the GNU General Public License as published by the Free 15 * Software Foundation; either version 2 of the License, or (at your option) 16 * any later version. 17 * 18 */ 19 20 #include <crypto/internal/hash.h> 21 #include <crypto/internal/aead.h> 22 #include <crypto/internal/skcipher.h> 23 #include <crypto/cryptd.h> 24 #include <crypto/crypto_wq.h> 25 #include <linux/atomic.h> 26 #include <linux/err.h> 27 #include <linux/init.h> 28 #include <linux/kernel.h> 29 #include <linux/list.h> 30 #include <linux/module.h> 31 #include <linux/scatterlist.h> 32 #include <linux/sched.h> 33 #include <linux/slab.h> 34 35 static unsigned int cryptd_max_cpu_qlen = 1000; 36 module_param(cryptd_max_cpu_qlen, uint, 0); 37 MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth"); 38 39 struct cryptd_cpu_queue { 40 struct crypto_queue queue; 41 struct work_struct work; 42 }; 43 44 struct cryptd_queue { 45 struct cryptd_cpu_queue __percpu *cpu_queue; 46 }; 47 48 struct cryptd_instance_ctx { 49 struct crypto_spawn spawn; 50 struct cryptd_queue *queue; 51 }; 52 53 struct skcipherd_instance_ctx { 54 struct crypto_skcipher_spawn spawn; 55 struct cryptd_queue *queue; 56 }; 57 58 struct hashd_instance_ctx { 59 struct crypto_shash_spawn spawn; 60 struct cryptd_queue *queue; 61 }; 62 63 struct aead_instance_ctx { 64 struct crypto_aead_spawn aead_spawn; 65 struct cryptd_queue *queue; 66 }; 67 68 struct cryptd_skcipher_ctx { 69 atomic_t refcnt; 70 struct crypto_sync_skcipher *child; 71 }; 72 73 struct cryptd_skcipher_request_ctx { 74 crypto_completion_t complete; 75 }; 76 77 struct cryptd_hash_ctx { 78 atomic_t refcnt; 79 struct crypto_shash *child; 80 }; 81 82 struct cryptd_hash_request_ctx { 83 crypto_completion_t complete; 84 struct shash_desc desc; 85 }; 86 87 struct cryptd_aead_ctx { 88 atomic_t refcnt; 89 struct crypto_aead *child; 90 }; 91 92 struct cryptd_aead_request_ctx { 93 crypto_completion_t complete; 94 }; 95 96 static void cryptd_queue_worker(struct work_struct *work); 97 98 static int cryptd_init_queue(struct cryptd_queue *queue, 99 unsigned int max_cpu_qlen) 100 { 101 int cpu; 102 struct cryptd_cpu_queue *cpu_queue; 103 104 queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue); 105 if (!queue->cpu_queue) 106 return -ENOMEM; 107 for_each_possible_cpu(cpu) { 108 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); 109 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); 110 INIT_WORK(&cpu_queue->work, cryptd_queue_worker); 111 } 112 pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen); 113 return 0; 114 } 115 116 static void cryptd_fini_queue(struct cryptd_queue *queue) 117 { 118 int cpu; 119 struct cryptd_cpu_queue *cpu_queue; 120 121 for_each_possible_cpu(cpu) { 122 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); 123 BUG_ON(cpu_queue->queue.qlen); 124 } 125 free_percpu(queue->cpu_queue); 126 } 127 128 static int cryptd_enqueue_request(struct cryptd_queue *queue, 129 struct crypto_async_request *request) 130 { 131 int cpu, err; 132 struct cryptd_cpu_queue *cpu_queue; 133 atomic_t *refcnt; 134 135 cpu = get_cpu(); 136 cpu_queue = this_cpu_ptr(queue->cpu_queue); 137 err = crypto_enqueue_request(&cpu_queue->queue, request); 138 139 refcnt = crypto_tfm_ctx(request->tfm); 140 141 if (err == -ENOSPC) 142 goto out_put_cpu; 143 144 queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); 145 146 if (!atomic_read(refcnt)) 147 goto out_put_cpu; 148 149 atomic_inc(refcnt); 150 151 out_put_cpu: 152 put_cpu(); 153 154 return err; 155 } 156 157 /* Called in workqueue context, do one real cryption work (via 158 * req->complete) and reschedule itself if there are more work to 159 * do. */ 160 static void cryptd_queue_worker(struct work_struct *work) 161 { 162 struct cryptd_cpu_queue *cpu_queue; 163 struct crypto_async_request *req, *backlog; 164 165 cpu_queue = container_of(work, struct cryptd_cpu_queue, work); 166 /* 167 * Only handle one request at a time to avoid hogging crypto workqueue. 168 * preempt_disable/enable is used to prevent being preempted by 169 * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent 170 * cryptd_enqueue_request() being accessed from software interrupts. 171 */ 172 local_bh_disable(); 173 preempt_disable(); 174 backlog = crypto_get_backlog(&cpu_queue->queue); 175 req = crypto_dequeue_request(&cpu_queue->queue); 176 preempt_enable(); 177 local_bh_enable(); 178 179 if (!req) 180 return; 181 182 if (backlog) 183 backlog->complete(backlog, -EINPROGRESS); 184 req->complete(req, 0); 185 186 if (cpu_queue->queue.qlen) 187 queue_work(kcrypto_wq, &cpu_queue->work); 188 } 189 190 static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm) 191 { 192 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); 193 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); 194 return ictx->queue; 195 } 196 197 static inline void cryptd_check_internal(struct rtattr **tb, u32 *type, 198 u32 *mask) 199 { 200 struct crypto_attr_type *algt; 201 202 algt = crypto_get_attr_type(tb); 203 if (IS_ERR(algt)) 204 return; 205 206 *type |= algt->type & CRYPTO_ALG_INTERNAL; 207 *mask |= algt->mask & CRYPTO_ALG_INTERNAL; 208 } 209 210 static int cryptd_init_instance(struct crypto_instance *inst, 211 struct crypto_alg *alg) 212 { 213 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, 214 "cryptd(%s)", 215 alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) 216 return -ENAMETOOLONG; 217 218 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); 219 220 inst->alg.cra_priority = alg->cra_priority + 50; 221 inst->alg.cra_blocksize = alg->cra_blocksize; 222 inst->alg.cra_alignmask = alg->cra_alignmask; 223 224 return 0; 225 } 226 227 static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, 228 unsigned int tail) 229 { 230 char *p; 231 struct crypto_instance *inst; 232 int err; 233 234 p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL); 235 if (!p) 236 return ERR_PTR(-ENOMEM); 237 238 inst = (void *)(p + head); 239 240 err = cryptd_init_instance(inst, alg); 241 if (err) 242 goto out_free_inst; 243 244 out: 245 return p; 246 247 out_free_inst: 248 kfree(p); 249 p = ERR_PTR(err); 250 goto out; 251 } 252 253 static int cryptd_skcipher_setkey(struct crypto_skcipher *parent, 254 const u8 *key, unsigned int keylen) 255 { 256 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent); 257 struct crypto_sync_skcipher *child = ctx->child; 258 int err; 259 260 crypto_sync_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); 261 crypto_sync_skcipher_set_flags(child, 262 crypto_skcipher_get_flags(parent) & 263 CRYPTO_TFM_REQ_MASK); 264 err = crypto_sync_skcipher_setkey(child, key, keylen); 265 crypto_skcipher_set_flags(parent, 266 crypto_sync_skcipher_get_flags(child) & 267 CRYPTO_TFM_RES_MASK); 268 return err; 269 } 270 271 static void cryptd_skcipher_complete(struct skcipher_request *req, int err) 272 { 273 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 274 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 275 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); 276 int refcnt = atomic_read(&ctx->refcnt); 277 278 local_bh_disable(); 279 rctx->complete(&req->base, err); 280 local_bh_enable(); 281 282 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) 283 crypto_free_skcipher(tfm); 284 } 285 286 static void cryptd_skcipher_encrypt(struct crypto_async_request *base, 287 int err) 288 { 289 struct skcipher_request *req = skcipher_request_cast(base); 290 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); 291 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 292 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 293 struct crypto_sync_skcipher *child = ctx->child; 294 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child); 295 296 if (unlikely(err == -EINPROGRESS)) 297 goto out; 298 299 skcipher_request_set_sync_tfm(subreq, child); 300 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, 301 NULL, NULL); 302 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, 303 req->iv); 304 305 err = crypto_skcipher_encrypt(subreq); 306 skcipher_request_zero(subreq); 307 308 req->base.complete = rctx->complete; 309 310 out: 311 cryptd_skcipher_complete(req, err); 312 } 313 314 static void cryptd_skcipher_decrypt(struct crypto_async_request *base, 315 int err) 316 { 317 struct skcipher_request *req = skcipher_request_cast(base); 318 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); 319 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 320 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 321 struct crypto_sync_skcipher *child = ctx->child; 322 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child); 323 324 if (unlikely(err == -EINPROGRESS)) 325 goto out; 326 327 skcipher_request_set_sync_tfm(subreq, child); 328 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, 329 NULL, NULL); 330 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, 331 req->iv); 332 333 err = crypto_skcipher_decrypt(subreq); 334 skcipher_request_zero(subreq); 335 336 req->base.complete = rctx->complete; 337 338 out: 339 cryptd_skcipher_complete(req, err); 340 } 341 342 static int cryptd_skcipher_enqueue(struct skcipher_request *req, 343 crypto_completion_t compl) 344 { 345 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); 346 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 347 struct cryptd_queue *queue; 348 349 queue = cryptd_get_queue(crypto_skcipher_tfm(tfm)); 350 rctx->complete = req->base.complete; 351 req->base.complete = compl; 352 353 return cryptd_enqueue_request(queue, &req->base); 354 } 355 356 static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req) 357 { 358 return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt); 359 } 360 361 static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req) 362 { 363 return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt); 364 } 365 366 static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm) 367 { 368 struct skcipher_instance *inst = skcipher_alg_instance(tfm); 369 struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst); 370 struct crypto_skcipher_spawn *spawn = &ictx->spawn; 371 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 372 struct crypto_skcipher *cipher; 373 374 cipher = crypto_spawn_skcipher(spawn); 375 if (IS_ERR(cipher)) 376 return PTR_ERR(cipher); 377 378 ctx->child = (struct crypto_sync_skcipher *)cipher; 379 crypto_skcipher_set_reqsize( 380 tfm, sizeof(struct cryptd_skcipher_request_ctx)); 381 return 0; 382 } 383 384 static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm) 385 { 386 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 387 388 crypto_free_sync_skcipher(ctx->child); 389 } 390 391 static void cryptd_skcipher_free(struct skcipher_instance *inst) 392 { 393 struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst); 394 395 crypto_drop_skcipher(&ctx->spawn); 396 } 397 398 static int cryptd_create_skcipher(struct crypto_template *tmpl, 399 struct rtattr **tb, 400 struct cryptd_queue *queue) 401 { 402 struct skcipherd_instance_ctx *ctx; 403 struct skcipher_instance *inst; 404 struct skcipher_alg *alg; 405 const char *name; 406 u32 type; 407 u32 mask; 408 int err; 409 410 type = 0; 411 mask = CRYPTO_ALG_ASYNC; 412 413 cryptd_check_internal(tb, &type, &mask); 414 415 name = crypto_attr_alg_name(tb[1]); 416 if (IS_ERR(name)) 417 return PTR_ERR(name); 418 419 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); 420 if (!inst) 421 return -ENOMEM; 422 423 ctx = skcipher_instance_ctx(inst); 424 ctx->queue = queue; 425 426 crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst)); 427 err = crypto_grab_skcipher(&ctx->spawn, name, type, mask); 428 if (err) 429 goto out_free_inst; 430 431 alg = crypto_spawn_skcipher_alg(&ctx->spawn); 432 err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base); 433 if (err) 434 goto out_drop_skcipher; 435 436 inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC | 437 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); 438 439 inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg); 440 inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); 441 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg); 442 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg); 443 444 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx); 445 446 inst->alg.init = cryptd_skcipher_init_tfm; 447 inst->alg.exit = cryptd_skcipher_exit_tfm; 448 449 inst->alg.setkey = cryptd_skcipher_setkey; 450 inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue; 451 inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue; 452 453 inst->free = cryptd_skcipher_free; 454 455 err = skcipher_register_instance(tmpl, inst); 456 if (err) { 457 out_drop_skcipher: 458 crypto_drop_skcipher(&ctx->spawn); 459 out_free_inst: 460 kfree(inst); 461 } 462 return err; 463 } 464 465 static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) 466 { 467 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); 468 struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst); 469 struct crypto_shash_spawn *spawn = &ictx->spawn; 470 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); 471 struct crypto_shash *hash; 472 473 hash = crypto_spawn_shash(spawn); 474 if (IS_ERR(hash)) 475 return PTR_ERR(hash); 476 477 ctx->child = hash; 478 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 479 sizeof(struct cryptd_hash_request_ctx) + 480 crypto_shash_descsize(hash)); 481 return 0; 482 } 483 484 static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm) 485 { 486 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); 487 488 crypto_free_shash(ctx->child); 489 } 490 491 static int cryptd_hash_setkey(struct crypto_ahash *parent, 492 const u8 *key, unsigned int keylen) 493 { 494 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); 495 struct crypto_shash *child = ctx->child; 496 int err; 497 498 crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK); 499 crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) & 500 CRYPTO_TFM_REQ_MASK); 501 err = crypto_shash_setkey(child, key, keylen); 502 crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) & 503 CRYPTO_TFM_RES_MASK); 504 return err; 505 } 506 507 static int cryptd_hash_enqueue(struct ahash_request *req, 508 crypto_completion_t compl) 509 { 510 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 511 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 512 struct cryptd_queue *queue = 513 cryptd_get_queue(crypto_ahash_tfm(tfm)); 514 515 rctx->complete = req->base.complete; 516 req->base.complete = compl; 517 518 return cryptd_enqueue_request(queue, &req->base); 519 } 520 521 static void cryptd_hash_complete(struct ahash_request *req, int err) 522 { 523 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 524 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); 525 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 526 int refcnt = atomic_read(&ctx->refcnt); 527 528 local_bh_disable(); 529 rctx->complete(&req->base, err); 530 local_bh_enable(); 531 532 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) 533 crypto_free_ahash(tfm); 534 } 535 536 static void cryptd_hash_init(struct crypto_async_request *req_async, int err) 537 { 538 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); 539 struct crypto_shash *child = ctx->child; 540 struct ahash_request *req = ahash_request_cast(req_async); 541 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 542 struct shash_desc *desc = &rctx->desc; 543 544 if (unlikely(err == -EINPROGRESS)) 545 goto out; 546 547 desc->tfm = child; 548 549 err = crypto_shash_init(desc); 550 551 req->base.complete = rctx->complete; 552 553 out: 554 cryptd_hash_complete(req, err); 555 } 556 557 static int cryptd_hash_init_enqueue(struct ahash_request *req) 558 { 559 return cryptd_hash_enqueue(req, cryptd_hash_init); 560 } 561 562 static void cryptd_hash_update(struct crypto_async_request *req_async, int err) 563 { 564 struct ahash_request *req = ahash_request_cast(req_async); 565 struct cryptd_hash_request_ctx *rctx; 566 567 rctx = ahash_request_ctx(req); 568 569 if (unlikely(err == -EINPROGRESS)) 570 goto out; 571 572 err = shash_ahash_update(req, &rctx->desc); 573 574 req->base.complete = rctx->complete; 575 576 out: 577 cryptd_hash_complete(req, err); 578 } 579 580 static int cryptd_hash_update_enqueue(struct ahash_request *req) 581 { 582 return cryptd_hash_enqueue(req, cryptd_hash_update); 583 } 584 585 static void cryptd_hash_final(struct crypto_async_request *req_async, int err) 586 { 587 struct ahash_request *req = ahash_request_cast(req_async); 588 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 589 590 if (unlikely(err == -EINPROGRESS)) 591 goto out; 592 593 err = crypto_shash_final(&rctx->desc, req->result); 594 595 req->base.complete = rctx->complete; 596 597 out: 598 cryptd_hash_complete(req, err); 599 } 600 601 static int cryptd_hash_final_enqueue(struct ahash_request *req) 602 { 603 return cryptd_hash_enqueue(req, cryptd_hash_final); 604 } 605 606 static void cryptd_hash_finup(struct crypto_async_request *req_async, int err) 607 { 608 struct ahash_request *req = ahash_request_cast(req_async); 609 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 610 611 if (unlikely(err == -EINPROGRESS)) 612 goto out; 613 614 err = shash_ahash_finup(req, &rctx->desc); 615 616 req->base.complete = rctx->complete; 617 618 out: 619 cryptd_hash_complete(req, err); 620 } 621 622 static int cryptd_hash_finup_enqueue(struct ahash_request *req) 623 { 624 return cryptd_hash_enqueue(req, cryptd_hash_finup); 625 } 626 627 static void cryptd_hash_digest(struct crypto_async_request *req_async, int err) 628 { 629 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); 630 struct crypto_shash *child = ctx->child; 631 struct ahash_request *req = ahash_request_cast(req_async); 632 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 633 struct shash_desc *desc = &rctx->desc; 634 635 if (unlikely(err == -EINPROGRESS)) 636 goto out; 637 638 desc->tfm = child; 639 640 err = shash_ahash_digest(req, desc); 641 642 req->base.complete = rctx->complete; 643 644 out: 645 cryptd_hash_complete(req, err); 646 } 647 648 static int cryptd_hash_digest_enqueue(struct ahash_request *req) 649 { 650 return cryptd_hash_enqueue(req, cryptd_hash_digest); 651 } 652 653 static int cryptd_hash_export(struct ahash_request *req, void *out) 654 { 655 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 656 657 return crypto_shash_export(&rctx->desc, out); 658 } 659 660 static int cryptd_hash_import(struct ahash_request *req, const void *in) 661 { 662 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 663 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); 664 struct shash_desc *desc = cryptd_shash_desc(req); 665 666 desc->tfm = ctx->child; 667 668 return crypto_shash_import(desc, in); 669 } 670 671 static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, 672 struct cryptd_queue *queue) 673 { 674 struct hashd_instance_ctx *ctx; 675 struct ahash_instance *inst; 676 struct shash_alg *salg; 677 struct crypto_alg *alg; 678 u32 type = 0; 679 u32 mask = 0; 680 int err; 681 682 cryptd_check_internal(tb, &type, &mask); 683 684 salg = shash_attr_alg(tb[1], type, mask); 685 if (IS_ERR(salg)) 686 return PTR_ERR(salg); 687 688 alg = &salg->base; 689 inst = cryptd_alloc_instance(alg, ahash_instance_headroom(), 690 sizeof(*ctx)); 691 err = PTR_ERR(inst); 692 if (IS_ERR(inst)) 693 goto out_put_alg; 694 695 ctx = ahash_instance_ctx(inst); 696 ctx->queue = queue; 697 698 err = crypto_init_shash_spawn(&ctx->spawn, salg, 699 ahash_crypto_instance(inst)); 700 if (err) 701 goto out_free_inst; 702 703 inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC | 704 (alg->cra_flags & (CRYPTO_ALG_INTERNAL | 705 CRYPTO_ALG_OPTIONAL_KEY)); 706 707 inst->alg.halg.digestsize = salg->digestsize; 708 inst->alg.halg.statesize = salg->statesize; 709 inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx); 710 711 inst->alg.halg.base.cra_init = cryptd_hash_init_tfm; 712 inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm; 713 714 inst->alg.init = cryptd_hash_init_enqueue; 715 inst->alg.update = cryptd_hash_update_enqueue; 716 inst->alg.final = cryptd_hash_final_enqueue; 717 inst->alg.finup = cryptd_hash_finup_enqueue; 718 inst->alg.export = cryptd_hash_export; 719 inst->alg.import = cryptd_hash_import; 720 if (crypto_shash_alg_has_setkey(salg)) 721 inst->alg.setkey = cryptd_hash_setkey; 722 inst->alg.digest = cryptd_hash_digest_enqueue; 723 724 err = ahash_register_instance(tmpl, inst); 725 if (err) { 726 crypto_drop_shash(&ctx->spawn); 727 out_free_inst: 728 kfree(inst); 729 } 730 731 out_put_alg: 732 crypto_mod_put(alg); 733 return err; 734 } 735 736 static int cryptd_aead_setkey(struct crypto_aead *parent, 737 const u8 *key, unsigned int keylen) 738 { 739 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent); 740 struct crypto_aead *child = ctx->child; 741 742 return crypto_aead_setkey(child, key, keylen); 743 } 744 745 static int cryptd_aead_setauthsize(struct crypto_aead *parent, 746 unsigned int authsize) 747 { 748 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent); 749 struct crypto_aead *child = ctx->child; 750 751 return crypto_aead_setauthsize(child, authsize); 752 } 753 754 static void cryptd_aead_crypt(struct aead_request *req, 755 struct crypto_aead *child, 756 int err, 757 int (*crypt)(struct aead_request *req)) 758 { 759 struct cryptd_aead_request_ctx *rctx; 760 struct cryptd_aead_ctx *ctx; 761 crypto_completion_t compl; 762 struct crypto_aead *tfm; 763 int refcnt; 764 765 rctx = aead_request_ctx(req); 766 compl = rctx->complete; 767 768 tfm = crypto_aead_reqtfm(req); 769 770 if (unlikely(err == -EINPROGRESS)) 771 goto out; 772 aead_request_set_tfm(req, child); 773 err = crypt( req ); 774 775 out: 776 ctx = crypto_aead_ctx(tfm); 777 refcnt = atomic_read(&ctx->refcnt); 778 779 local_bh_disable(); 780 compl(&req->base, err); 781 local_bh_enable(); 782 783 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) 784 crypto_free_aead(tfm); 785 } 786 787 static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err) 788 { 789 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm); 790 struct crypto_aead *child = ctx->child; 791 struct aead_request *req; 792 793 req = container_of(areq, struct aead_request, base); 794 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt); 795 } 796 797 static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err) 798 { 799 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm); 800 struct crypto_aead *child = ctx->child; 801 struct aead_request *req; 802 803 req = container_of(areq, struct aead_request, base); 804 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt); 805 } 806 807 static int cryptd_aead_enqueue(struct aead_request *req, 808 crypto_completion_t compl) 809 { 810 struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req); 811 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 812 struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm)); 813 814 rctx->complete = req->base.complete; 815 req->base.complete = compl; 816 return cryptd_enqueue_request(queue, &req->base); 817 } 818 819 static int cryptd_aead_encrypt_enqueue(struct aead_request *req) 820 { 821 return cryptd_aead_enqueue(req, cryptd_aead_encrypt ); 822 } 823 824 static int cryptd_aead_decrypt_enqueue(struct aead_request *req) 825 { 826 return cryptd_aead_enqueue(req, cryptd_aead_decrypt ); 827 } 828 829 static int cryptd_aead_init_tfm(struct crypto_aead *tfm) 830 { 831 struct aead_instance *inst = aead_alg_instance(tfm); 832 struct aead_instance_ctx *ictx = aead_instance_ctx(inst); 833 struct crypto_aead_spawn *spawn = &ictx->aead_spawn; 834 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm); 835 struct crypto_aead *cipher; 836 837 cipher = crypto_spawn_aead(spawn); 838 if (IS_ERR(cipher)) 839 return PTR_ERR(cipher); 840 841 ctx->child = cipher; 842 crypto_aead_set_reqsize( 843 tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx), 844 crypto_aead_reqsize(cipher))); 845 return 0; 846 } 847 848 static void cryptd_aead_exit_tfm(struct crypto_aead *tfm) 849 { 850 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm); 851 crypto_free_aead(ctx->child); 852 } 853 854 static int cryptd_create_aead(struct crypto_template *tmpl, 855 struct rtattr **tb, 856 struct cryptd_queue *queue) 857 { 858 struct aead_instance_ctx *ctx; 859 struct aead_instance *inst; 860 struct aead_alg *alg; 861 const char *name; 862 u32 type = 0; 863 u32 mask = CRYPTO_ALG_ASYNC; 864 int err; 865 866 cryptd_check_internal(tb, &type, &mask); 867 868 name = crypto_attr_alg_name(tb[1]); 869 if (IS_ERR(name)) 870 return PTR_ERR(name); 871 872 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); 873 if (!inst) 874 return -ENOMEM; 875 876 ctx = aead_instance_ctx(inst); 877 ctx->queue = queue; 878 879 crypto_set_aead_spawn(&ctx->aead_spawn, aead_crypto_instance(inst)); 880 err = crypto_grab_aead(&ctx->aead_spawn, name, type, mask); 881 if (err) 882 goto out_free_inst; 883 884 alg = crypto_spawn_aead_alg(&ctx->aead_spawn); 885 err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base); 886 if (err) 887 goto out_drop_aead; 888 889 inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC | 890 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); 891 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx); 892 893 inst->alg.ivsize = crypto_aead_alg_ivsize(alg); 894 inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); 895 896 inst->alg.init = cryptd_aead_init_tfm; 897 inst->alg.exit = cryptd_aead_exit_tfm; 898 inst->alg.setkey = cryptd_aead_setkey; 899 inst->alg.setauthsize = cryptd_aead_setauthsize; 900 inst->alg.encrypt = cryptd_aead_encrypt_enqueue; 901 inst->alg.decrypt = cryptd_aead_decrypt_enqueue; 902 903 err = aead_register_instance(tmpl, inst); 904 if (err) { 905 out_drop_aead: 906 crypto_drop_aead(&ctx->aead_spawn); 907 out_free_inst: 908 kfree(inst); 909 } 910 return err; 911 } 912 913 static struct cryptd_queue queue; 914 915 static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) 916 { 917 struct crypto_attr_type *algt; 918 919 algt = crypto_get_attr_type(tb); 920 if (IS_ERR(algt)) 921 return PTR_ERR(algt); 922 923 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { 924 case CRYPTO_ALG_TYPE_BLKCIPHER: 925 return cryptd_create_skcipher(tmpl, tb, &queue); 926 case CRYPTO_ALG_TYPE_DIGEST: 927 return cryptd_create_hash(tmpl, tb, &queue); 928 case CRYPTO_ALG_TYPE_AEAD: 929 return cryptd_create_aead(tmpl, tb, &queue); 930 } 931 932 return -EINVAL; 933 } 934 935 static void cryptd_free(struct crypto_instance *inst) 936 { 937 struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); 938 struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst); 939 struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst); 940 941 switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) { 942 case CRYPTO_ALG_TYPE_AHASH: 943 crypto_drop_shash(&hctx->spawn); 944 kfree(ahash_instance(inst)); 945 return; 946 case CRYPTO_ALG_TYPE_AEAD: 947 crypto_drop_aead(&aead_ctx->aead_spawn); 948 kfree(aead_instance(inst)); 949 return; 950 default: 951 crypto_drop_spawn(&ctx->spawn); 952 kfree(inst); 953 } 954 } 955 956 static struct crypto_template cryptd_tmpl = { 957 .name = "cryptd", 958 .create = cryptd_create, 959 .free = cryptd_free, 960 .module = THIS_MODULE, 961 }; 962 963 struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name, 964 u32 type, u32 mask) 965 { 966 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; 967 struct cryptd_skcipher_ctx *ctx; 968 struct crypto_skcipher *tfm; 969 970 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, 971 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) 972 return ERR_PTR(-EINVAL); 973 974 tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask); 975 if (IS_ERR(tfm)) 976 return ERR_CAST(tfm); 977 978 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { 979 crypto_free_skcipher(tfm); 980 return ERR_PTR(-EINVAL); 981 } 982 983 ctx = crypto_skcipher_ctx(tfm); 984 atomic_set(&ctx->refcnt, 1); 985 986 return container_of(tfm, struct cryptd_skcipher, base); 987 } 988 EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher); 989 990 struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm) 991 { 992 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); 993 994 return &ctx->child->base; 995 } 996 EXPORT_SYMBOL_GPL(cryptd_skcipher_child); 997 998 bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm) 999 { 1000 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); 1001 1002 return atomic_read(&ctx->refcnt) - 1; 1003 } 1004 EXPORT_SYMBOL_GPL(cryptd_skcipher_queued); 1005 1006 void cryptd_free_skcipher(struct cryptd_skcipher *tfm) 1007 { 1008 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); 1009 1010 if (atomic_dec_and_test(&ctx->refcnt)) 1011 crypto_free_skcipher(&tfm->base); 1012 } 1013 EXPORT_SYMBOL_GPL(cryptd_free_skcipher); 1014 1015 struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, 1016 u32 type, u32 mask) 1017 { 1018 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; 1019 struct cryptd_hash_ctx *ctx; 1020 struct crypto_ahash *tfm; 1021 1022 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, 1023 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) 1024 return ERR_PTR(-EINVAL); 1025 tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask); 1026 if (IS_ERR(tfm)) 1027 return ERR_CAST(tfm); 1028 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { 1029 crypto_free_ahash(tfm); 1030 return ERR_PTR(-EINVAL); 1031 } 1032 1033 ctx = crypto_ahash_ctx(tfm); 1034 atomic_set(&ctx->refcnt, 1); 1035 1036 return __cryptd_ahash_cast(tfm); 1037 } 1038 EXPORT_SYMBOL_GPL(cryptd_alloc_ahash); 1039 1040 struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm) 1041 { 1042 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); 1043 1044 return ctx->child; 1045 } 1046 EXPORT_SYMBOL_GPL(cryptd_ahash_child); 1047 1048 struct shash_desc *cryptd_shash_desc(struct ahash_request *req) 1049 { 1050 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 1051 return &rctx->desc; 1052 } 1053 EXPORT_SYMBOL_GPL(cryptd_shash_desc); 1054 1055 bool cryptd_ahash_queued(struct cryptd_ahash *tfm) 1056 { 1057 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); 1058 1059 return atomic_read(&ctx->refcnt) - 1; 1060 } 1061 EXPORT_SYMBOL_GPL(cryptd_ahash_queued); 1062 1063 void cryptd_free_ahash(struct cryptd_ahash *tfm) 1064 { 1065 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); 1066 1067 if (atomic_dec_and_test(&ctx->refcnt)) 1068 crypto_free_ahash(&tfm->base); 1069 } 1070 EXPORT_SYMBOL_GPL(cryptd_free_ahash); 1071 1072 struct cryptd_aead *cryptd_alloc_aead(const char *alg_name, 1073 u32 type, u32 mask) 1074 { 1075 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; 1076 struct cryptd_aead_ctx *ctx; 1077 struct crypto_aead *tfm; 1078 1079 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, 1080 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) 1081 return ERR_PTR(-EINVAL); 1082 tfm = crypto_alloc_aead(cryptd_alg_name, type, mask); 1083 if (IS_ERR(tfm)) 1084 return ERR_CAST(tfm); 1085 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { 1086 crypto_free_aead(tfm); 1087 return ERR_PTR(-EINVAL); 1088 } 1089 1090 ctx = crypto_aead_ctx(tfm); 1091 atomic_set(&ctx->refcnt, 1); 1092 1093 return __cryptd_aead_cast(tfm); 1094 } 1095 EXPORT_SYMBOL_GPL(cryptd_alloc_aead); 1096 1097 struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm) 1098 { 1099 struct cryptd_aead_ctx *ctx; 1100 ctx = crypto_aead_ctx(&tfm->base); 1101 return ctx->child; 1102 } 1103 EXPORT_SYMBOL_GPL(cryptd_aead_child); 1104 1105 bool cryptd_aead_queued(struct cryptd_aead *tfm) 1106 { 1107 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base); 1108 1109 return atomic_read(&ctx->refcnt) - 1; 1110 } 1111 EXPORT_SYMBOL_GPL(cryptd_aead_queued); 1112 1113 void cryptd_free_aead(struct cryptd_aead *tfm) 1114 { 1115 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base); 1116 1117 if (atomic_dec_and_test(&ctx->refcnt)) 1118 crypto_free_aead(&tfm->base); 1119 } 1120 EXPORT_SYMBOL_GPL(cryptd_free_aead); 1121 1122 static int __init cryptd_init(void) 1123 { 1124 int err; 1125 1126 err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen); 1127 if (err) 1128 return err; 1129 1130 err = crypto_register_template(&cryptd_tmpl); 1131 if (err) 1132 cryptd_fini_queue(&queue); 1133 1134 return err; 1135 } 1136 1137 static void __exit cryptd_exit(void) 1138 { 1139 cryptd_fini_queue(&queue); 1140 crypto_unregister_template(&cryptd_tmpl); 1141 } 1142 1143 subsys_initcall(cryptd_init); 1144 module_exit(cryptd_exit); 1145 1146 MODULE_LICENSE("GPL"); 1147 MODULE_DESCRIPTION("Software async crypto daemon"); 1148 MODULE_ALIAS_CRYPTO("cryptd"); 1149