1 /* 2 * Software async crypto daemon. 3 * 4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> 5 * 6 * Added AEAD support to cryptd. 7 * Authors: Tadeusz Struk (tadeusz.struk@intel.com) 8 * Adrian Hoban <adrian.hoban@intel.com> 9 * Gabriele Paoloni <gabriele.paoloni@intel.com> 10 * Aidan O'Mahony (aidan.o.mahony@intel.com) 11 * Copyright (c) 2010, Intel Corporation. 12 * 13 * This program is free software; you can redistribute it and/or modify it 14 * under the terms of the GNU General Public License as published by the Free 15 * Software Foundation; either version 2 of the License, or (at your option) 16 * any later version. 17 * 18 */ 19 20 #include <crypto/internal/hash.h> 21 #include <crypto/internal/aead.h> 22 #include <crypto/internal/skcipher.h> 23 #include <crypto/cryptd.h> 24 #include <crypto/crypto_wq.h> 25 #include <linux/atomic.h> 26 #include <linux/err.h> 27 #include <linux/init.h> 28 #include <linux/kernel.h> 29 #include <linux/list.h> 30 #include <linux/module.h> 31 #include <linux/scatterlist.h> 32 #include <linux/sched.h> 33 #include <linux/slab.h> 34 35 static unsigned int cryptd_max_cpu_qlen = 1000; 36 module_param(cryptd_max_cpu_qlen, uint, 0); 37 MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth"); 38 39 struct cryptd_cpu_queue { 40 struct crypto_queue queue; 41 struct work_struct work; 42 }; 43 44 struct cryptd_queue { 45 struct cryptd_cpu_queue __percpu *cpu_queue; 46 }; 47 48 struct cryptd_instance_ctx { 49 struct crypto_spawn spawn; 50 struct cryptd_queue *queue; 51 }; 52 53 struct skcipherd_instance_ctx { 54 struct crypto_skcipher_spawn spawn; 55 struct cryptd_queue *queue; 56 }; 57 58 struct hashd_instance_ctx { 59 struct crypto_shash_spawn spawn; 60 struct cryptd_queue *queue; 61 }; 62 63 struct aead_instance_ctx { 64 struct crypto_aead_spawn aead_spawn; 65 struct cryptd_queue *queue; 66 }; 67 68 struct cryptd_blkcipher_ctx { 69 atomic_t refcnt; 70 struct crypto_blkcipher *child; 71 }; 72 73 struct cryptd_blkcipher_request_ctx { 74 crypto_completion_t complete; 75 }; 76 77 struct cryptd_skcipher_ctx { 78 atomic_t refcnt; 79 struct crypto_sync_skcipher *child; 80 }; 81 82 struct cryptd_skcipher_request_ctx { 83 crypto_completion_t complete; 84 }; 85 86 struct cryptd_hash_ctx { 87 atomic_t refcnt; 88 struct crypto_shash *child; 89 }; 90 91 struct cryptd_hash_request_ctx { 92 crypto_completion_t complete; 93 struct shash_desc desc; 94 }; 95 96 struct cryptd_aead_ctx { 97 atomic_t refcnt; 98 struct crypto_aead *child; 99 }; 100 101 struct cryptd_aead_request_ctx { 102 crypto_completion_t complete; 103 }; 104 105 static void cryptd_queue_worker(struct work_struct *work); 106 107 static int cryptd_init_queue(struct cryptd_queue *queue, 108 unsigned int max_cpu_qlen) 109 { 110 int cpu; 111 struct cryptd_cpu_queue *cpu_queue; 112 113 queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue); 114 if (!queue->cpu_queue) 115 return -ENOMEM; 116 for_each_possible_cpu(cpu) { 117 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); 118 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); 119 INIT_WORK(&cpu_queue->work, cryptd_queue_worker); 120 } 121 pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen); 122 return 0; 123 } 124 125 static void cryptd_fini_queue(struct cryptd_queue *queue) 126 { 127 int cpu; 128 struct cryptd_cpu_queue *cpu_queue; 129 130 for_each_possible_cpu(cpu) { 131 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); 132 BUG_ON(cpu_queue->queue.qlen); 133 } 134 free_percpu(queue->cpu_queue); 135 } 136 137 static int cryptd_enqueue_request(struct cryptd_queue *queue, 138 struct crypto_async_request *request) 139 { 140 int cpu, err; 141 struct cryptd_cpu_queue *cpu_queue; 142 atomic_t *refcnt; 143 144 cpu = get_cpu(); 145 cpu_queue = this_cpu_ptr(queue->cpu_queue); 146 err = crypto_enqueue_request(&cpu_queue->queue, request); 147 148 refcnt = crypto_tfm_ctx(request->tfm); 149 150 if (err == -ENOSPC) 151 goto out_put_cpu; 152 153 queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); 154 155 if (!atomic_read(refcnt)) 156 goto out_put_cpu; 157 158 atomic_inc(refcnt); 159 160 out_put_cpu: 161 put_cpu(); 162 163 return err; 164 } 165 166 /* Called in workqueue context, do one real cryption work (via 167 * req->complete) and reschedule itself if there are more work to 168 * do. */ 169 static void cryptd_queue_worker(struct work_struct *work) 170 { 171 struct cryptd_cpu_queue *cpu_queue; 172 struct crypto_async_request *req, *backlog; 173 174 cpu_queue = container_of(work, struct cryptd_cpu_queue, work); 175 /* 176 * Only handle one request at a time to avoid hogging crypto workqueue. 177 * preempt_disable/enable is used to prevent being preempted by 178 * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent 179 * cryptd_enqueue_request() being accessed from software interrupts. 180 */ 181 local_bh_disable(); 182 preempt_disable(); 183 backlog = crypto_get_backlog(&cpu_queue->queue); 184 req = crypto_dequeue_request(&cpu_queue->queue); 185 preempt_enable(); 186 local_bh_enable(); 187 188 if (!req) 189 return; 190 191 if (backlog) 192 backlog->complete(backlog, -EINPROGRESS); 193 req->complete(req, 0); 194 195 if (cpu_queue->queue.qlen) 196 queue_work(kcrypto_wq, &cpu_queue->work); 197 } 198 199 static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm) 200 { 201 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); 202 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); 203 return ictx->queue; 204 } 205 206 static inline void cryptd_check_internal(struct rtattr **tb, u32 *type, 207 u32 *mask) 208 { 209 struct crypto_attr_type *algt; 210 211 algt = crypto_get_attr_type(tb); 212 if (IS_ERR(algt)) 213 return; 214 215 *type |= algt->type & CRYPTO_ALG_INTERNAL; 216 *mask |= algt->mask & CRYPTO_ALG_INTERNAL; 217 } 218 219 static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent, 220 const u8 *key, unsigned int keylen) 221 { 222 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent); 223 struct crypto_blkcipher *child = ctx->child; 224 int err; 225 226 crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); 227 crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) & 228 CRYPTO_TFM_REQ_MASK); 229 err = crypto_blkcipher_setkey(child, key, keylen); 230 crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) & 231 CRYPTO_TFM_RES_MASK); 232 return err; 233 } 234 235 static void cryptd_blkcipher_crypt(struct ablkcipher_request *req, 236 struct crypto_blkcipher *child, 237 int err, 238 int (*crypt)(struct blkcipher_desc *desc, 239 struct scatterlist *dst, 240 struct scatterlist *src, 241 unsigned int len)) 242 { 243 struct cryptd_blkcipher_request_ctx *rctx; 244 struct cryptd_blkcipher_ctx *ctx; 245 struct crypto_ablkcipher *tfm; 246 struct blkcipher_desc desc; 247 int refcnt; 248 249 rctx = ablkcipher_request_ctx(req); 250 251 if (unlikely(err == -EINPROGRESS)) 252 goto out; 253 254 desc.tfm = child; 255 desc.info = req->info; 256 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; 257 258 err = crypt(&desc, req->dst, req->src, req->nbytes); 259 260 req->base.complete = rctx->complete; 261 262 out: 263 tfm = crypto_ablkcipher_reqtfm(req); 264 ctx = crypto_ablkcipher_ctx(tfm); 265 refcnt = atomic_read(&ctx->refcnt); 266 267 local_bh_disable(); 268 rctx->complete(&req->base, err); 269 local_bh_enable(); 270 271 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) 272 crypto_free_ablkcipher(tfm); 273 } 274 275 static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err) 276 { 277 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); 278 struct crypto_blkcipher *child = ctx->child; 279 280 cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, 281 crypto_blkcipher_crt(child)->encrypt); 282 } 283 284 static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err) 285 { 286 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); 287 struct crypto_blkcipher *child = ctx->child; 288 289 cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, 290 crypto_blkcipher_crt(child)->decrypt); 291 } 292 293 static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req, 294 crypto_completion_t compl) 295 { 296 struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req); 297 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 298 struct cryptd_queue *queue; 299 300 queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm)); 301 rctx->complete = req->base.complete; 302 req->base.complete = compl; 303 304 return cryptd_enqueue_request(queue, &req->base); 305 } 306 307 static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req) 308 { 309 return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt); 310 } 311 312 static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req) 313 { 314 return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt); 315 } 316 317 static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm) 318 { 319 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); 320 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); 321 struct crypto_spawn *spawn = &ictx->spawn; 322 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); 323 struct crypto_blkcipher *cipher; 324 325 cipher = crypto_spawn_blkcipher(spawn); 326 if (IS_ERR(cipher)) 327 return PTR_ERR(cipher); 328 329 ctx->child = cipher; 330 tfm->crt_ablkcipher.reqsize = 331 sizeof(struct cryptd_blkcipher_request_ctx); 332 return 0; 333 } 334 335 static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm) 336 { 337 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); 338 339 crypto_free_blkcipher(ctx->child); 340 } 341 342 static int cryptd_init_instance(struct crypto_instance *inst, 343 struct crypto_alg *alg) 344 { 345 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, 346 "cryptd(%s)", 347 alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) 348 return -ENAMETOOLONG; 349 350 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); 351 352 inst->alg.cra_priority = alg->cra_priority + 50; 353 inst->alg.cra_blocksize = alg->cra_blocksize; 354 inst->alg.cra_alignmask = alg->cra_alignmask; 355 356 return 0; 357 } 358 359 static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, 360 unsigned int tail) 361 { 362 char *p; 363 struct crypto_instance *inst; 364 int err; 365 366 p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL); 367 if (!p) 368 return ERR_PTR(-ENOMEM); 369 370 inst = (void *)(p + head); 371 372 err = cryptd_init_instance(inst, alg); 373 if (err) 374 goto out_free_inst; 375 376 out: 377 return p; 378 379 out_free_inst: 380 kfree(p); 381 p = ERR_PTR(err); 382 goto out; 383 } 384 385 static int cryptd_create_blkcipher(struct crypto_template *tmpl, 386 struct rtattr **tb, 387 struct cryptd_queue *queue) 388 { 389 struct cryptd_instance_ctx *ctx; 390 struct crypto_instance *inst; 391 struct crypto_alg *alg; 392 u32 type = CRYPTO_ALG_TYPE_BLKCIPHER; 393 u32 mask = CRYPTO_ALG_TYPE_MASK; 394 int err; 395 396 cryptd_check_internal(tb, &type, &mask); 397 398 alg = crypto_get_attr_alg(tb, type, mask); 399 if (IS_ERR(alg)) 400 return PTR_ERR(alg); 401 402 inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx)); 403 err = PTR_ERR(inst); 404 if (IS_ERR(inst)) 405 goto out_put_alg; 406 407 ctx = crypto_instance_ctx(inst); 408 ctx->queue = queue; 409 410 err = crypto_init_spawn(&ctx->spawn, alg, inst, 411 CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); 412 if (err) 413 goto out_free_inst; 414 415 type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; 416 if (alg->cra_flags & CRYPTO_ALG_INTERNAL) 417 type |= CRYPTO_ALG_INTERNAL; 418 inst->alg.cra_flags = type; 419 inst->alg.cra_type = &crypto_ablkcipher_type; 420 421 inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize; 422 inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize; 423 inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize; 424 425 inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx); 426 427 inst->alg.cra_init = cryptd_blkcipher_init_tfm; 428 inst->alg.cra_exit = cryptd_blkcipher_exit_tfm; 429 430 inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey; 431 inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue; 432 inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue; 433 434 err = crypto_register_instance(tmpl, inst); 435 if (err) { 436 crypto_drop_spawn(&ctx->spawn); 437 out_free_inst: 438 kfree(inst); 439 } 440 441 out_put_alg: 442 crypto_mod_put(alg); 443 return err; 444 } 445 446 static int cryptd_skcipher_setkey(struct crypto_skcipher *parent, 447 const u8 *key, unsigned int keylen) 448 { 449 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent); 450 struct crypto_sync_skcipher *child = ctx->child; 451 int err; 452 453 crypto_sync_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); 454 crypto_sync_skcipher_set_flags(child, 455 crypto_skcipher_get_flags(parent) & 456 CRYPTO_TFM_REQ_MASK); 457 err = crypto_sync_skcipher_setkey(child, key, keylen); 458 crypto_skcipher_set_flags(parent, 459 crypto_sync_skcipher_get_flags(child) & 460 CRYPTO_TFM_RES_MASK); 461 return err; 462 } 463 464 static void cryptd_skcipher_complete(struct skcipher_request *req, int err) 465 { 466 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 467 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 468 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); 469 int refcnt = atomic_read(&ctx->refcnt); 470 471 local_bh_disable(); 472 rctx->complete(&req->base, err); 473 local_bh_enable(); 474 475 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) 476 crypto_free_skcipher(tfm); 477 } 478 479 static void cryptd_skcipher_encrypt(struct crypto_async_request *base, 480 int err) 481 { 482 struct skcipher_request *req = skcipher_request_cast(base); 483 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); 484 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 485 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 486 struct crypto_sync_skcipher *child = ctx->child; 487 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child); 488 489 if (unlikely(err == -EINPROGRESS)) 490 goto out; 491 492 skcipher_request_set_sync_tfm(subreq, child); 493 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, 494 NULL, NULL); 495 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, 496 req->iv); 497 498 err = crypto_skcipher_encrypt(subreq); 499 skcipher_request_zero(subreq); 500 501 req->base.complete = rctx->complete; 502 503 out: 504 cryptd_skcipher_complete(req, err); 505 } 506 507 static void cryptd_skcipher_decrypt(struct crypto_async_request *base, 508 int err) 509 { 510 struct skcipher_request *req = skcipher_request_cast(base); 511 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); 512 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 513 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 514 struct crypto_sync_skcipher *child = ctx->child; 515 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child); 516 517 if (unlikely(err == -EINPROGRESS)) 518 goto out; 519 520 skcipher_request_set_sync_tfm(subreq, child); 521 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, 522 NULL, NULL); 523 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, 524 req->iv); 525 526 err = crypto_skcipher_decrypt(subreq); 527 skcipher_request_zero(subreq); 528 529 req->base.complete = rctx->complete; 530 531 out: 532 cryptd_skcipher_complete(req, err); 533 } 534 535 static int cryptd_skcipher_enqueue(struct skcipher_request *req, 536 crypto_completion_t compl) 537 { 538 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); 539 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 540 struct cryptd_queue *queue; 541 542 queue = cryptd_get_queue(crypto_skcipher_tfm(tfm)); 543 rctx->complete = req->base.complete; 544 req->base.complete = compl; 545 546 return cryptd_enqueue_request(queue, &req->base); 547 } 548 549 static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req) 550 { 551 return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt); 552 } 553 554 static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req) 555 { 556 return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt); 557 } 558 559 static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm) 560 { 561 struct skcipher_instance *inst = skcipher_alg_instance(tfm); 562 struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst); 563 struct crypto_skcipher_spawn *spawn = &ictx->spawn; 564 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 565 struct crypto_skcipher *cipher; 566 567 cipher = crypto_spawn_skcipher(spawn); 568 if (IS_ERR(cipher)) 569 return PTR_ERR(cipher); 570 571 ctx->child = (struct crypto_sync_skcipher *)cipher; 572 crypto_skcipher_set_reqsize( 573 tfm, sizeof(struct cryptd_skcipher_request_ctx)); 574 return 0; 575 } 576 577 static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm) 578 { 579 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 580 581 crypto_free_sync_skcipher(ctx->child); 582 } 583 584 static void cryptd_skcipher_free(struct skcipher_instance *inst) 585 { 586 struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst); 587 588 crypto_drop_skcipher(&ctx->spawn); 589 } 590 591 static int cryptd_create_skcipher(struct crypto_template *tmpl, 592 struct rtattr **tb, 593 struct cryptd_queue *queue) 594 { 595 struct skcipherd_instance_ctx *ctx; 596 struct skcipher_instance *inst; 597 struct skcipher_alg *alg; 598 const char *name; 599 u32 type; 600 u32 mask; 601 int err; 602 603 type = 0; 604 mask = CRYPTO_ALG_ASYNC; 605 606 cryptd_check_internal(tb, &type, &mask); 607 608 name = crypto_attr_alg_name(tb[1]); 609 if (IS_ERR(name)) 610 return PTR_ERR(name); 611 612 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); 613 if (!inst) 614 return -ENOMEM; 615 616 ctx = skcipher_instance_ctx(inst); 617 ctx->queue = queue; 618 619 crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst)); 620 err = crypto_grab_skcipher(&ctx->spawn, name, type, mask); 621 if (err) 622 goto out_free_inst; 623 624 alg = crypto_spawn_skcipher_alg(&ctx->spawn); 625 err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base); 626 if (err) 627 goto out_drop_skcipher; 628 629 inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC | 630 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); 631 632 inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg); 633 inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); 634 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg); 635 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg); 636 637 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx); 638 639 inst->alg.init = cryptd_skcipher_init_tfm; 640 inst->alg.exit = cryptd_skcipher_exit_tfm; 641 642 inst->alg.setkey = cryptd_skcipher_setkey; 643 inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue; 644 inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue; 645 646 inst->free = cryptd_skcipher_free; 647 648 err = skcipher_register_instance(tmpl, inst); 649 if (err) { 650 out_drop_skcipher: 651 crypto_drop_skcipher(&ctx->spawn); 652 out_free_inst: 653 kfree(inst); 654 } 655 return err; 656 } 657 658 static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) 659 { 660 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); 661 struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst); 662 struct crypto_shash_spawn *spawn = &ictx->spawn; 663 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); 664 struct crypto_shash *hash; 665 666 hash = crypto_spawn_shash(spawn); 667 if (IS_ERR(hash)) 668 return PTR_ERR(hash); 669 670 ctx->child = hash; 671 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 672 sizeof(struct cryptd_hash_request_ctx) + 673 crypto_shash_descsize(hash)); 674 return 0; 675 } 676 677 static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm) 678 { 679 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); 680 681 crypto_free_shash(ctx->child); 682 } 683 684 static int cryptd_hash_setkey(struct crypto_ahash *parent, 685 const u8 *key, unsigned int keylen) 686 { 687 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); 688 struct crypto_shash *child = ctx->child; 689 int err; 690 691 crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK); 692 crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) & 693 CRYPTO_TFM_REQ_MASK); 694 err = crypto_shash_setkey(child, key, keylen); 695 crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) & 696 CRYPTO_TFM_RES_MASK); 697 return err; 698 } 699 700 static int cryptd_hash_enqueue(struct ahash_request *req, 701 crypto_completion_t compl) 702 { 703 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 704 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 705 struct cryptd_queue *queue = 706 cryptd_get_queue(crypto_ahash_tfm(tfm)); 707 708 rctx->complete = req->base.complete; 709 req->base.complete = compl; 710 711 return cryptd_enqueue_request(queue, &req->base); 712 } 713 714 static void cryptd_hash_complete(struct ahash_request *req, int err) 715 { 716 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 717 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); 718 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 719 int refcnt = atomic_read(&ctx->refcnt); 720 721 local_bh_disable(); 722 rctx->complete(&req->base, err); 723 local_bh_enable(); 724 725 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) 726 crypto_free_ahash(tfm); 727 } 728 729 static void cryptd_hash_init(struct crypto_async_request *req_async, int err) 730 { 731 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); 732 struct crypto_shash *child = ctx->child; 733 struct ahash_request *req = ahash_request_cast(req_async); 734 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 735 struct shash_desc *desc = &rctx->desc; 736 737 if (unlikely(err == -EINPROGRESS)) 738 goto out; 739 740 desc->tfm = child; 741 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; 742 743 err = crypto_shash_init(desc); 744 745 req->base.complete = rctx->complete; 746 747 out: 748 cryptd_hash_complete(req, err); 749 } 750 751 static int cryptd_hash_init_enqueue(struct ahash_request *req) 752 { 753 return cryptd_hash_enqueue(req, cryptd_hash_init); 754 } 755 756 static void cryptd_hash_update(struct crypto_async_request *req_async, int err) 757 { 758 struct ahash_request *req = ahash_request_cast(req_async); 759 struct cryptd_hash_request_ctx *rctx; 760 761 rctx = ahash_request_ctx(req); 762 763 if (unlikely(err == -EINPROGRESS)) 764 goto out; 765 766 err = shash_ahash_update(req, &rctx->desc); 767 768 req->base.complete = rctx->complete; 769 770 out: 771 cryptd_hash_complete(req, err); 772 } 773 774 static int cryptd_hash_update_enqueue(struct ahash_request *req) 775 { 776 return cryptd_hash_enqueue(req, cryptd_hash_update); 777 } 778 779 static void cryptd_hash_final(struct crypto_async_request *req_async, int err) 780 { 781 struct ahash_request *req = ahash_request_cast(req_async); 782 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 783 784 if (unlikely(err == -EINPROGRESS)) 785 goto out; 786 787 err = crypto_shash_final(&rctx->desc, req->result); 788 789 req->base.complete = rctx->complete; 790 791 out: 792 cryptd_hash_complete(req, err); 793 } 794 795 static int cryptd_hash_final_enqueue(struct ahash_request *req) 796 { 797 return cryptd_hash_enqueue(req, cryptd_hash_final); 798 } 799 800 static void cryptd_hash_finup(struct crypto_async_request *req_async, int err) 801 { 802 struct ahash_request *req = ahash_request_cast(req_async); 803 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 804 805 if (unlikely(err == -EINPROGRESS)) 806 goto out; 807 808 err = shash_ahash_finup(req, &rctx->desc); 809 810 req->base.complete = rctx->complete; 811 812 out: 813 cryptd_hash_complete(req, err); 814 } 815 816 static int cryptd_hash_finup_enqueue(struct ahash_request *req) 817 { 818 return cryptd_hash_enqueue(req, cryptd_hash_finup); 819 } 820 821 static void cryptd_hash_digest(struct crypto_async_request *req_async, int err) 822 { 823 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); 824 struct crypto_shash *child = ctx->child; 825 struct ahash_request *req = ahash_request_cast(req_async); 826 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 827 struct shash_desc *desc = &rctx->desc; 828 829 if (unlikely(err == -EINPROGRESS)) 830 goto out; 831 832 desc->tfm = child; 833 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; 834 835 err = shash_ahash_digest(req, desc); 836 837 req->base.complete = rctx->complete; 838 839 out: 840 cryptd_hash_complete(req, err); 841 } 842 843 static int cryptd_hash_digest_enqueue(struct ahash_request *req) 844 { 845 return cryptd_hash_enqueue(req, cryptd_hash_digest); 846 } 847 848 static int cryptd_hash_export(struct ahash_request *req, void *out) 849 { 850 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 851 852 return crypto_shash_export(&rctx->desc, out); 853 } 854 855 static int cryptd_hash_import(struct ahash_request *req, const void *in) 856 { 857 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 858 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); 859 struct shash_desc *desc = cryptd_shash_desc(req); 860 861 desc->tfm = ctx->child; 862 desc->flags = req->base.flags; 863 864 return crypto_shash_import(desc, in); 865 } 866 867 static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, 868 struct cryptd_queue *queue) 869 { 870 struct hashd_instance_ctx *ctx; 871 struct ahash_instance *inst; 872 struct shash_alg *salg; 873 struct crypto_alg *alg; 874 u32 type = 0; 875 u32 mask = 0; 876 int err; 877 878 cryptd_check_internal(tb, &type, &mask); 879 880 salg = shash_attr_alg(tb[1], type, mask); 881 if (IS_ERR(salg)) 882 return PTR_ERR(salg); 883 884 alg = &salg->base; 885 inst = cryptd_alloc_instance(alg, ahash_instance_headroom(), 886 sizeof(*ctx)); 887 err = PTR_ERR(inst); 888 if (IS_ERR(inst)) 889 goto out_put_alg; 890 891 ctx = ahash_instance_ctx(inst); 892 ctx->queue = queue; 893 894 err = crypto_init_shash_spawn(&ctx->spawn, salg, 895 ahash_crypto_instance(inst)); 896 if (err) 897 goto out_free_inst; 898 899 inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC | 900 (alg->cra_flags & (CRYPTO_ALG_INTERNAL | 901 CRYPTO_ALG_OPTIONAL_KEY)); 902 903 inst->alg.halg.digestsize = salg->digestsize; 904 inst->alg.halg.statesize = salg->statesize; 905 inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx); 906 907 inst->alg.halg.base.cra_init = cryptd_hash_init_tfm; 908 inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm; 909 910 inst->alg.init = cryptd_hash_init_enqueue; 911 inst->alg.update = cryptd_hash_update_enqueue; 912 inst->alg.final = cryptd_hash_final_enqueue; 913 inst->alg.finup = cryptd_hash_finup_enqueue; 914 inst->alg.export = cryptd_hash_export; 915 inst->alg.import = cryptd_hash_import; 916 if (crypto_shash_alg_has_setkey(salg)) 917 inst->alg.setkey = cryptd_hash_setkey; 918 inst->alg.digest = cryptd_hash_digest_enqueue; 919 920 err = ahash_register_instance(tmpl, inst); 921 if (err) { 922 crypto_drop_shash(&ctx->spawn); 923 out_free_inst: 924 kfree(inst); 925 } 926 927 out_put_alg: 928 crypto_mod_put(alg); 929 return err; 930 } 931 932 static int cryptd_aead_setkey(struct crypto_aead *parent, 933 const u8 *key, unsigned int keylen) 934 { 935 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent); 936 struct crypto_aead *child = ctx->child; 937 938 return crypto_aead_setkey(child, key, keylen); 939 } 940 941 static int cryptd_aead_setauthsize(struct crypto_aead *parent, 942 unsigned int authsize) 943 { 944 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent); 945 struct crypto_aead *child = ctx->child; 946 947 return crypto_aead_setauthsize(child, authsize); 948 } 949 950 static void cryptd_aead_crypt(struct aead_request *req, 951 struct crypto_aead *child, 952 int err, 953 int (*crypt)(struct aead_request *req)) 954 { 955 struct cryptd_aead_request_ctx *rctx; 956 struct cryptd_aead_ctx *ctx; 957 crypto_completion_t compl; 958 struct crypto_aead *tfm; 959 int refcnt; 960 961 rctx = aead_request_ctx(req); 962 compl = rctx->complete; 963 964 tfm = crypto_aead_reqtfm(req); 965 966 if (unlikely(err == -EINPROGRESS)) 967 goto out; 968 aead_request_set_tfm(req, child); 969 err = crypt( req ); 970 971 out: 972 ctx = crypto_aead_ctx(tfm); 973 refcnt = atomic_read(&ctx->refcnt); 974 975 local_bh_disable(); 976 compl(&req->base, err); 977 local_bh_enable(); 978 979 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) 980 crypto_free_aead(tfm); 981 } 982 983 static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err) 984 { 985 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm); 986 struct crypto_aead *child = ctx->child; 987 struct aead_request *req; 988 989 req = container_of(areq, struct aead_request, base); 990 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt); 991 } 992 993 static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err) 994 { 995 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm); 996 struct crypto_aead *child = ctx->child; 997 struct aead_request *req; 998 999 req = container_of(areq, struct aead_request, base); 1000 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt); 1001 } 1002 1003 static int cryptd_aead_enqueue(struct aead_request *req, 1004 crypto_completion_t compl) 1005 { 1006 struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req); 1007 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1008 struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm)); 1009 1010 rctx->complete = req->base.complete; 1011 req->base.complete = compl; 1012 return cryptd_enqueue_request(queue, &req->base); 1013 } 1014 1015 static int cryptd_aead_encrypt_enqueue(struct aead_request *req) 1016 { 1017 return cryptd_aead_enqueue(req, cryptd_aead_encrypt ); 1018 } 1019 1020 static int cryptd_aead_decrypt_enqueue(struct aead_request *req) 1021 { 1022 return cryptd_aead_enqueue(req, cryptd_aead_decrypt ); 1023 } 1024 1025 static int cryptd_aead_init_tfm(struct crypto_aead *tfm) 1026 { 1027 struct aead_instance *inst = aead_alg_instance(tfm); 1028 struct aead_instance_ctx *ictx = aead_instance_ctx(inst); 1029 struct crypto_aead_spawn *spawn = &ictx->aead_spawn; 1030 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm); 1031 struct crypto_aead *cipher; 1032 1033 cipher = crypto_spawn_aead(spawn); 1034 if (IS_ERR(cipher)) 1035 return PTR_ERR(cipher); 1036 1037 ctx->child = cipher; 1038 crypto_aead_set_reqsize( 1039 tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx), 1040 crypto_aead_reqsize(cipher))); 1041 return 0; 1042 } 1043 1044 static void cryptd_aead_exit_tfm(struct crypto_aead *tfm) 1045 { 1046 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm); 1047 crypto_free_aead(ctx->child); 1048 } 1049 1050 static int cryptd_create_aead(struct crypto_template *tmpl, 1051 struct rtattr **tb, 1052 struct cryptd_queue *queue) 1053 { 1054 struct aead_instance_ctx *ctx; 1055 struct aead_instance *inst; 1056 struct aead_alg *alg; 1057 const char *name; 1058 u32 type = 0; 1059 u32 mask = CRYPTO_ALG_ASYNC; 1060 int err; 1061 1062 cryptd_check_internal(tb, &type, &mask); 1063 1064 name = crypto_attr_alg_name(tb[1]); 1065 if (IS_ERR(name)) 1066 return PTR_ERR(name); 1067 1068 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); 1069 if (!inst) 1070 return -ENOMEM; 1071 1072 ctx = aead_instance_ctx(inst); 1073 ctx->queue = queue; 1074 1075 crypto_set_aead_spawn(&ctx->aead_spawn, aead_crypto_instance(inst)); 1076 err = crypto_grab_aead(&ctx->aead_spawn, name, type, mask); 1077 if (err) 1078 goto out_free_inst; 1079 1080 alg = crypto_spawn_aead_alg(&ctx->aead_spawn); 1081 err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base); 1082 if (err) 1083 goto out_drop_aead; 1084 1085 inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC | 1086 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); 1087 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx); 1088 1089 inst->alg.ivsize = crypto_aead_alg_ivsize(alg); 1090 inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); 1091 1092 inst->alg.init = cryptd_aead_init_tfm; 1093 inst->alg.exit = cryptd_aead_exit_tfm; 1094 inst->alg.setkey = cryptd_aead_setkey; 1095 inst->alg.setauthsize = cryptd_aead_setauthsize; 1096 inst->alg.encrypt = cryptd_aead_encrypt_enqueue; 1097 inst->alg.decrypt = cryptd_aead_decrypt_enqueue; 1098 1099 err = aead_register_instance(tmpl, inst); 1100 if (err) { 1101 out_drop_aead: 1102 crypto_drop_aead(&ctx->aead_spawn); 1103 out_free_inst: 1104 kfree(inst); 1105 } 1106 return err; 1107 } 1108 1109 static struct cryptd_queue queue; 1110 1111 static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) 1112 { 1113 struct crypto_attr_type *algt; 1114 1115 algt = crypto_get_attr_type(tb); 1116 if (IS_ERR(algt)) 1117 return PTR_ERR(algt); 1118 1119 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { 1120 case CRYPTO_ALG_TYPE_BLKCIPHER: 1121 if ((algt->type & CRYPTO_ALG_TYPE_MASK) == 1122 CRYPTO_ALG_TYPE_BLKCIPHER) 1123 return cryptd_create_blkcipher(tmpl, tb, &queue); 1124 1125 return cryptd_create_skcipher(tmpl, tb, &queue); 1126 case CRYPTO_ALG_TYPE_DIGEST: 1127 return cryptd_create_hash(tmpl, tb, &queue); 1128 case CRYPTO_ALG_TYPE_AEAD: 1129 return cryptd_create_aead(tmpl, tb, &queue); 1130 } 1131 1132 return -EINVAL; 1133 } 1134 1135 static void cryptd_free(struct crypto_instance *inst) 1136 { 1137 struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); 1138 struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst); 1139 struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst); 1140 1141 switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) { 1142 case CRYPTO_ALG_TYPE_AHASH: 1143 crypto_drop_shash(&hctx->spawn); 1144 kfree(ahash_instance(inst)); 1145 return; 1146 case CRYPTO_ALG_TYPE_AEAD: 1147 crypto_drop_aead(&aead_ctx->aead_spawn); 1148 kfree(aead_instance(inst)); 1149 return; 1150 default: 1151 crypto_drop_spawn(&ctx->spawn); 1152 kfree(inst); 1153 } 1154 } 1155 1156 static struct crypto_template cryptd_tmpl = { 1157 .name = "cryptd", 1158 .create = cryptd_create, 1159 .free = cryptd_free, 1160 .module = THIS_MODULE, 1161 }; 1162 1163 struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, 1164 u32 type, u32 mask) 1165 { 1166 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; 1167 struct cryptd_blkcipher_ctx *ctx; 1168 struct crypto_tfm *tfm; 1169 1170 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, 1171 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) 1172 return ERR_PTR(-EINVAL); 1173 type = crypto_skcipher_type(type); 1174 mask &= ~CRYPTO_ALG_TYPE_MASK; 1175 mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK; 1176 tfm = crypto_alloc_base(cryptd_alg_name, type, mask); 1177 if (IS_ERR(tfm)) 1178 return ERR_CAST(tfm); 1179 if (tfm->__crt_alg->cra_module != THIS_MODULE) { 1180 crypto_free_tfm(tfm); 1181 return ERR_PTR(-EINVAL); 1182 } 1183 1184 ctx = crypto_tfm_ctx(tfm); 1185 atomic_set(&ctx->refcnt, 1); 1186 1187 return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm)); 1188 } 1189 EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher); 1190 1191 struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm) 1192 { 1193 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); 1194 return ctx->child; 1195 } 1196 EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child); 1197 1198 bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm) 1199 { 1200 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); 1201 1202 return atomic_read(&ctx->refcnt) - 1; 1203 } 1204 EXPORT_SYMBOL_GPL(cryptd_ablkcipher_queued); 1205 1206 void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm) 1207 { 1208 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); 1209 1210 if (atomic_dec_and_test(&ctx->refcnt)) 1211 crypto_free_ablkcipher(&tfm->base); 1212 } 1213 EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); 1214 1215 struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name, 1216 u32 type, u32 mask) 1217 { 1218 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; 1219 struct cryptd_skcipher_ctx *ctx; 1220 struct crypto_skcipher *tfm; 1221 1222 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, 1223 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) 1224 return ERR_PTR(-EINVAL); 1225 1226 tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask); 1227 if (IS_ERR(tfm)) 1228 return ERR_CAST(tfm); 1229 1230 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { 1231 crypto_free_skcipher(tfm); 1232 return ERR_PTR(-EINVAL); 1233 } 1234 1235 ctx = crypto_skcipher_ctx(tfm); 1236 atomic_set(&ctx->refcnt, 1); 1237 1238 return container_of(tfm, struct cryptd_skcipher, base); 1239 } 1240 EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher); 1241 1242 struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm) 1243 { 1244 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); 1245 1246 return &ctx->child->base; 1247 } 1248 EXPORT_SYMBOL_GPL(cryptd_skcipher_child); 1249 1250 bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm) 1251 { 1252 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); 1253 1254 return atomic_read(&ctx->refcnt) - 1; 1255 } 1256 EXPORT_SYMBOL_GPL(cryptd_skcipher_queued); 1257 1258 void cryptd_free_skcipher(struct cryptd_skcipher *tfm) 1259 { 1260 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); 1261 1262 if (atomic_dec_and_test(&ctx->refcnt)) 1263 crypto_free_skcipher(&tfm->base); 1264 } 1265 EXPORT_SYMBOL_GPL(cryptd_free_skcipher); 1266 1267 struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, 1268 u32 type, u32 mask) 1269 { 1270 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; 1271 struct cryptd_hash_ctx *ctx; 1272 struct crypto_ahash *tfm; 1273 1274 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, 1275 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) 1276 return ERR_PTR(-EINVAL); 1277 tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask); 1278 if (IS_ERR(tfm)) 1279 return ERR_CAST(tfm); 1280 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { 1281 crypto_free_ahash(tfm); 1282 return ERR_PTR(-EINVAL); 1283 } 1284 1285 ctx = crypto_ahash_ctx(tfm); 1286 atomic_set(&ctx->refcnt, 1); 1287 1288 return __cryptd_ahash_cast(tfm); 1289 } 1290 EXPORT_SYMBOL_GPL(cryptd_alloc_ahash); 1291 1292 struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm) 1293 { 1294 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); 1295 1296 return ctx->child; 1297 } 1298 EXPORT_SYMBOL_GPL(cryptd_ahash_child); 1299 1300 struct shash_desc *cryptd_shash_desc(struct ahash_request *req) 1301 { 1302 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 1303 return &rctx->desc; 1304 } 1305 EXPORT_SYMBOL_GPL(cryptd_shash_desc); 1306 1307 bool cryptd_ahash_queued(struct cryptd_ahash *tfm) 1308 { 1309 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); 1310 1311 return atomic_read(&ctx->refcnt) - 1; 1312 } 1313 EXPORT_SYMBOL_GPL(cryptd_ahash_queued); 1314 1315 void cryptd_free_ahash(struct cryptd_ahash *tfm) 1316 { 1317 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); 1318 1319 if (atomic_dec_and_test(&ctx->refcnt)) 1320 crypto_free_ahash(&tfm->base); 1321 } 1322 EXPORT_SYMBOL_GPL(cryptd_free_ahash); 1323 1324 struct cryptd_aead *cryptd_alloc_aead(const char *alg_name, 1325 u32 type, u32 mask) 1326 { 1327 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; 1328 struct cryptd_aead_ctx *ctx; 1329 struct crypto_aead *tfm; 1330 1331 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, 1332 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) 1333 return ERR_PTR(-EINVAL); 1334 tfm = crypto_alloc_aead(cryptd_alg_name, type, mask); 1335 if (IS_ERR(tfm)) 1336 return ERR_CAST(tfm); 1337 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { 1338 crypto_free_aead(tfm); 1339 return ERR_PTR(-EINVAL); 1340 } 1341 1342 ctx = crypto_aead_ctx(tfm); 1343 atomic_set(&ctx->refcnt, 1); 1344 1345 return __cryptd_aead_cast(tfm); 1346 } 1347 EXPORT_SYMBOL_GPL(cryptd_alloc_aead); 1348 1349 struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm) 1350 { 1351 struct cryptd_aead_ctx *ctx; 1352 ctx = crypto_aead_ctx(&tfm->base); 1353 return ctx->child; 1354 } 1355 EXPORT_SYMBOL_GPL(cryptd_aead_child); 1356 1357 bool cryptd_aead_queued(struct cryptd_aead *tfm) 1358 { 1359 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base); 1360 1361 return atomic_read(&ctx->refcnt) - 1; 1362 } 1363 EXPORT_SYMBOL_GPL(cryptd_aead_queued); 1364 1365 void cryptd_free_aead(struct cryptd_aead *tfm) 1366 { 1367 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base); 1368 1369 if (atomic_dec_and_test(&ctx->refcnt)) 1370 crypto_free_aead(&tfm->base); 1371 } 1372 EXPORT_SYMBOL_GPL(cryptd_free_aead); 1373 1374 static int __init cryptd_init(void) 1375 { 1376 int err; 1377 1378 err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen); 1379 if (err) 1380 return err; 1381 1382 err = crypto_register_template(&cryptd_tmpl); 1383 if (err) 1384 cryptd_fini_queue(&queue); 1385 1386 return err; 1387 } 1388 1389 static void __exit cryptd_exit(void) 1390 { 1391 cryptd_fini_queue(&queue); 1392 crypto_unregister_template(&cryptd_tmpl); 1393 } 1394 1395 subsys_initcall(cryptd_init); 1396 module_exit(cryptd_exit); 1397 1398 MODULE_LICENSE("GPL"); 1399 MODULE_DESCRIPTION("Software async crypto daemon"); 1400 MODULE_ALIAS_CRYPTO("cryptd"); 1401