1 /* 2 * Software async crypto daemon. 3 * 4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> 5 * 6 * Added AEAD support to cryptd. 7 * Authors: Tadeusz Struk (tadeusz.struk@intel.com) 8 * Adrian Hoban <adrian.hoban@intel.com> 9 * Gabriele Paoloni <gabriele.paoloni@intel.com> 10 * Aidan O'Mahony (aidan.o.mahony@intel.com) 11 * Copyright (c) 2010, Intel Corporation. 12 * 13 * This program is free software; you can redistribute it and/or modify it 14 * under the terms of the GNU General Public License as published by the Free 15 * Software Foundation; either version 2 of the License, or (at your option) 16 * any later version. 17 * 18 */ 19 20 #include <crypto/internal/hash.h> 21 #include <crypto/internal/aead.h> 22 #include <crypto/internal/skcipher.h> 23 #include <crypto/cryptd.h> 24 #include <crypto/crypto_wq.h> 25 #include <linux/atomic.h> 26 #include <linux/err.h> 27 #include <linux/init.h> 28 #include <linux/kernel.h> 29 #include <linux/list.h> 30 #include <linux/module.h> 31 #include <linux/scatterlist.h> 32 #include <linux/sched.h> 33 #include <linux/slab.h> 34 35 static unsigned int cryptd_max_cpu_qlen = 1000; 36 module_param(cryptd_max_cpu_qlen, uint, 0); 37 MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth"); 38 39 struct cryptd_cpu_queue { 40 struct crypto_queue queue; 41 struct work_struct work; 42 }; 43 44 struct cryptd_queue { 45 struct cryptd_cpu_queue __percpu *cpu_queue; 46 }; 47 48 struct cryptd_instance_ctx { 49 struct crypto_spawn spawn; 50 struct cryptd_queue *queue; 51 }; 52 53 struct skcipherd_instance_ctx { 54 struct crypto_skcipher_spawn spawn; 55 struct cryptd_queue *queue; 56 }; 57 58 struct hashd_instance_ctx { 59 struct crypto_shash_spawn spawn; 60 struct cryptd_queue *queue; 61 }; 62 63 struct aead_instance_ctx { 64 struct crypto_aead_spawn aead_spawn; 65 struct cryptd_queue *queue; 66 }; 67 68 struct cryptd_blkcipher_ctx { 69 atomic_t refcnt; 70 struct crypto_blkcipher *child; 71 }; 72 73 struct cryptd_blkcipher_request_ctx { 74 crypto_completion_t complete; 75 }; 76 77 struct cryptd_skcipher_ctx { 78 atomic_t refcnt; 79 struct crypto_sync_skcipher *child; 80 }; 81 82 struct cryptd_skcipher_request_ctx { 83 crypto_completion_t complete; 84 }; 85 86 struct cryptd_hash_ctx { 87 atomic_t refcnt; 88 struct crypto_shash *child; 89 }; 90 91 struct cryptd_hash_request_ctx { 92 crypto_completion_t complete; 93 struct shash_desc desc; 94 }; 95 96 struct cryptd_aead_ctx { 97 atomic_t refcnt; 98 struct crypto_aead *child; 99 }; 100 101 struct cryptd_aead_request_ctx { 102 crypto_completion_t complete; 103 }; 104 105 static void cryptd_queue_worker(struct work_struct *work); 106 107 static int cryptd_init_queue(struct cryptd_queue *queue, 108 unsigned int max_cpu_qlen) 109 { 110 int cpu; 111 struct cryptd_cpu_queue *cpu_queue; 112 113 queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue); 114 if (!queue->cpu_queue) 115 return -ENOMEM; 116 for_each_possible_cpu(cpu) { 117 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); 118 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); 119 INIT_WORK(&cpu_queue->work, cryptd_queue_worker); 120 } 121 pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen); 122 return 0; 123 } 124 125 static void cryptd_fini_queue(struct cryptd_queue *queue) 126 { 127 int cpu; 128 struct cryptd_cpu_queue *cpu_queue; 129 130 for_each_possible_cpu(cpu) { 131 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); 132 BUG_ON(cpu_queue->queue.qlen); 133 } 134 free_percpu(queue->cpu_queue); 135 } 136 137 static int cryptd_enqueue_request(struct cryptd_queue *queue, 138 struct crypto_async_request *request) 139 { 140 int cpu, err; 141 struct cryptd_cpu_queue *cpu_queue; 142 atomic_t *refcnt; 143 144 cpu = get_cpu(); 145 cpu_queue = this_cpu_ptr(queue->cpu_queue); 146 err = crypto_enqueue_request(&cpu_queue->queue, request); 147 148 refcnt = crypto_tfm_ctx(request->tfm); 149 150 if (err == -ENOSPC) 151 goto out_put_cpu; 152 153 queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); 154 155 if (!atomic_read(refcnt)) 156 goto out_put_cpu; 157 158 atomic_inc(refcnt); 159 160 out_put_cpu: 161 put_cpu(); 162 163 return err; 164 } 165 166 /* Called in workqueue context, do one real cryption work (via 167 * req->complete) and reschedule itself if there are more work to 168 * do. */ 169 static void cryptd_queue_worker(struct work_struct *work) 170 { 171 struct cryptd_cpu_queue *cpu_queue; 172 struct crypto_async_request *req, *backlog; 173 174 cpu_queue = container_of(work, struct cryptd_cpu_queue, work); 175 /* 176 * Only handle one request at a time to avoid hogging crypto workqueue. 177 * preempt_disable/enable is used to prevent being preempted by 178 * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent 179 * cryptd_enqueue_request() being accessed from software interrupts. 180 */ 181 local_bh_disable(); 182 preempt_disable(); 183 backlog = crypto_get_backlog(&cpu_queue->queue); 184 req = crypto_dequeue_request(&cpu_queue->queue); 185 preempt_enable(); 186 local_bh_enable(); 187 188 if (!req) 189 return; 190 191 if (backlog) 192 backlog->complete(backlog, -EINPROGRESS); 193 req->complete(req, 0); 194 195 if (cpu_queue->queue.qlen) 196 queue_work(kcrypto_wq, &cpu_queue->work); 197 } 198 199 static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm) 200 { 201 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); 202 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); 203 return ictx->queue; 204 } 205 206 static inline void cryptd_check_internal(struct rtattr **tb, u32 *type, 207 u32 *mask) 208 { 209 struct crypto_attr_type *algt; 210 211 algt = crypto_get_attr_type(tb); 212 if (IS_ERR(algt)) 213 return; 214 215 *type |= algt->type & CRYPTO_ALG_INTERNAL; 216 *mask |= algt->mask & CRYPTO_ALG_INTERNAL; 217 } 218 219 static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent, 220 const u8 *key, unsigned int keylen) 221 { 222 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent); 223 struct crypto_blkcipher *child = ctx->child; 224 int err; 225 226 crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); 227 crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) & 228 CRYPTO_TFM_REQ_MASK); 229 err = crypto_blkcipher_setkey(child, key, keylen); 230 crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) & 231 CRYPTO_TFM_RES_MASK); 232 return err; 233 } 234 235 static void cryptd_blkcipher_crypt(struct ablkcipher_request *req, 236 struct crypto_blkcipher *child, 237 int err, 238 int (*crypt)(struct blkcipher_desc *desc, 239 struct scatterlist *dst, 240 struct scatterlist *src, 241 unsigned int len)) 242 { 243 struct cryptd_blkcipher_request_ctx *rctx; 244 struct cryptd_blkcipher_ctx *ctx; 245 struct crypto_ablkcipher *tfm; 246 struct blkcipher_desc desc; 247 int refcnt; 248 249 rctx = ablkcipher_request_ctx(req); 250 251 if (unlikely(err == -EINPROGRESS)) 252 goto out; 253 254 desc.tfm = child; 255 desc.info = req->info; 256 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; 257 258 err = crypt(&desc, req->dst, req->src, req->nbytes); 259 260 req->base.complete = rctx->complete; 261 262 out: 263 tfm = crypto_ablkcipher_reqtfm(req); 264 ctx = crypto_ablkcipher_ctx(tfm); 265 refcnt = atomic_read(&ctx->refcnt); 266 267 local_bh_disable(); 268 rctx->complete(&req->base, err); 269 local_bh_enable(); 270 271 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) 272 crypto_free_ablkcipher(tfm); 273 } 274 275 static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err) 276 { 277 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); 278 struct crypto_blkcipher *child = ctx->child; 279 280 cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, 281 crypto_blkcipher_crt(child)->encrypt); 282 } 283 284 static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err) 285 { 286 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); 287 struct crypto_blkcipher *child = ctx->child; 288 289 cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, 290 crypto_blkcipher_crt(child)->decrypt); 291 } 292 293 static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req, 294 crypto_completion_t compl) 295 { 296 struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req); 297 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 298 struct cryptd_queue *queue; 299 300 queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm)); 301 rctx->complete = req->base.complete; 302 req->base.complete = compl; 303 304 return cryptd_enqueue_request(queue, &req->base); 305 } 306 307 static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req) 308 { 309 return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt); 310 } 311 312 static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req) 313 { 314 return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt); 315 } 316 317 static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm) 318 { 319 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); 320 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); 321 struct crypto_spawn *spawn = &ictx->spawn; 322 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); 323 struct crypto_blkcipher *cipher; 324 325 cipher = crypto_spawn_blkcipher(spawn); 326 if (IS_ERR(cipher)) 327 return PTR_ERR(cipher); 328 329 ctx->child = cipher; 330 tfm->crt_ablkcipher.reqsize = 331 sizeof(struct cryptd_blkcipher_request_ctx); 332 return 0; 333 } 334 335 static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm) 336 { 337 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); 338 339 crypto_free_blkcipher(ctx->child); 340 } 341 342 static int cryptd_init_instance(struct crypto_instance *inst, 343 struct crypto_alg *alg) 344 { 345 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, 346 "cryptd(%s)", 347 alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) 348 return -ENAMETOOLONG; 349 350 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); 351 352 inst->alg.cra_priority = alg->cra_priority + 50; 353 inst->alg.cra_blocksize = alg->cra_blocksize; 354 inst->alg.cra_alignmask = alg->cra_alignmask; 355 356 return 0; 357 } 358 359 static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, 360 unsigned int tail) 361 { 362 char *p; 363 struct crypto_instance *inst; 364 int err; 365 366 p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL); 367 if (!p) 368 return ERR_PTR(-ENOMEM); 369 370 inst = (void *)(p + head); 371 372 err = cryptd_init_instance(inst, alg); 373 if (err) 374 goto out_free_inst; 375 376 out: 377 return p; 378 379 out_free_inst: 380 kfree(p); 381 p = ERR_PTR(err); 382 goto out; 383 } 384 385 static int cryptd_create_blkcipher(struct crypto_template *tmpl, 386 struct rtattr **tb, 387 struct cryptd_queue *queue) 388 { 389 struct cryptd_instance_ctx *ctx; 390 struct crypto_instance *inst; 391 struct crypto_alg *alg; 392 u32 type = CRYPTO_ALG_TYPE_BLKCIPHER; 393 u32 mask = CRYPTO_ALG_TYPE_MASK; 394 int err; 395 396 cryptd_check_internal(tb, &type, &mask); 397 398 alg = crypto_get_attr_alg(tb, type, mask); 399 if (IS_ERR(alg)) 400 return PTR_ERR(alg); 401 402 inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx)); 403 err = PTR_ERR(inst); 404 if (IS_ERR(inst)) 405 goto out_put_alg; 406 407 ctx = crypto_instance_ctx(inst); 408 ctx->queue = queue; 409 410 err = crypto_init_spawn(&ctx->spawn, alg, inst, 411 CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); 412 if (err) 413 goto out_free_inst; 414 415 type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; 416 if (alg->cra_flags & CRYPTO_ALG_INTERNAL) 417 type |= CRYPTO_ALG_INTERNAL; 418 inst->alg.cra_flags = type; 419 inst->alg.cra_type = &crypto_ablkcipher_type; 420 421 inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize; 422 inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize; 423 inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize; 424 425 inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv; 426 427 inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx); 428 429 inst->alg.cra_init = cryptd_blkcipher_init_tfm; 430 inst->alg.cra_exit = cryptd_blkcipher_exit_tfm; 431 432 inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey; 433 inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue; 434 inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue; 435 436 err = crypto_register_instance(tmpl, inst); 437 if (err) { 438 crypto_drop_spawn(&ctx->spawn); 439 out_free_inst: 440 kfree(inst); 441 } 442 443 out_put_alg: 444 crypto_mod_put(alg); 445 return err; 446 } 447 448 static int cryptd_skcipher_setkey(struct crypto_skcipher *parent, 449 const u8 *key, unsigned int keylen) 450 { 451 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent); 452 struct crypto_sync_skcipher *child = ctx->child; 453 int err; 454 455 crypto_sync_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); 456 crypto_sync_skcipher_set_flags(child, 457 crypto_skcipher_get_flags(parent) & 458 CRYPTO_TFM_REQ_MASK); 459 err = crypto_sync_skcipher_setkey(child, key, keylen); 460 crypto_skcipher_set_flags(parent, 461 crypto_sync_skcipher_get_flags(child) & 462 CRYPTO_TFM_RES_MASK); 463 return err; 464 } 465 466 static void cryptd_skcipher_complete(struct skcipher_request *req, int err) 467 { 468 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 469 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 470 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); 471 int refcnt = atomic_read(&ctx->refcnt); 472 473 local_bh_disable(); 474 rctx->complete(&req->base, err); 475 local_bh_enable(); 476 477 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) 478 crypto_free_skcipher(tfm); 479 } 480 481 static void cryptd_skcipher_encrypt(struct crypto_async_request *base, 482 int err) 483 { 484 struct skcipher_request *req = skcipher_request_cast(base); 485 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); 486 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 487 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 488 struct crypto_sync_skcipher *child = ctx->child; 489 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child); 490 491 if (unlikely(err == -EINPROGRESS)) 492 goto out; 493 494 skcipher_request_set_sync_tfm(subreq, child); 495 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, 496 NULL, NULL); 497 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, 498 req->iv); 499 500 err = crypto_skcipher_encrypt(subreq); 501 skcipher_request_zero(subreq); 502 503 req->base.complete = rctx->complete; 504 505 out: 506 cryptd_skcipher_complete(req, err); 507 } 508 509 static void cryptd_skcipher_decrypt(struct crypto_async_request *base, 510 int err) 511 { 512 struct skcipher_request *req = skcipher_request_cast(base); 513 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); 514 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 515 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 516 struct crypto_sync_skcipher *child = ctx->child; 517 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child); 518 519 if (unlikely(err == -EINPROGRESS)) 520 goto out; 521 522 skcipher_request_set_sync_tfm(subreq, child); 523 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, 524 NULL, NULL); 525 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, 526 req->iv); 527 528 err = crypto_skcipher_decrypt(subreq); 529 skcipher_request_zero(subreq); 530 531 req->base.complete = rctx->complete; 532 533 out: 534 cryptd_skcipher_complete(req, err); 535 } 536 537 static int cryptd_skcipher_enqueue(struct skcipher_request *req, 538 crypto_completion_t compl) 539 { 540 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); 541 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 542 struct cryptd_queue *queue; 543 544 queue = cryptd_get_queue(crypto_skcipher_tfm(tfm)); 545 rctx->complete = req->base.complete; 546 req->base.complete = compl; 547 548 return cryptd_enqueue_request(queue, &req->base); 549 } 550 551 static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req) 552 { 553 return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt); 554 } 555 556 static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req) 557 { 558 return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt); 559 } 560 561 static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm) 562 { 563 struct skcipher_instance *inst = skcipher_alg_instance(tfm); 564 struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst); 565 struct crypto_skcipher_spawn *spawn = &ictx->spawn; 566 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 567 struct crypto_skcipher *cipher; 568 569 cipher = crypto_spawn_skcipher(spawn); 570 if (IS_ERR(cipher)) 571 return PTR_ERR(cipher); 572 573 ctx->child = (struct crypto_sync_skcipher *)cipher; 574 crypto_skcipher_set_reqsize( 575 tfm, sizeof(struct cryptd_skcipher_request_ctx)); 576 return 0; 577 } 578 579 static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm) 580 { 581 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 582 583 crypto_free_sync_skcipher(ctx->child); 584 } 585 586 static void cryptd_skcipher_free(struct skcipher_instance *inst) 587 { 588 struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst); 589 590 crypto_drop_skcipher(&ctx->spawn); 591 } 592 593 static int cryptd_create_skcipher(struct crypto_template *tmpl, 594 struct rtattr **tb, 595 struct cryptd_queue *queue) 596 { 597 struct skcipherd_instance_ctx *ctx; 598 struct skcipher_instance *inst; 599 struct skcipher_alg *alg; 600 const char *name; 601 u32 type; 602 u32 mask; 603 int err; 604 605 type = 0; 606 mask = CRYPTO_ALG_ASYNC; 607 608 cryptd_check_internal(tb, &type, &mask); 609 610 name = crypto_attr_alg_name(tb[1]); 611 if (IS_ERR(name)) 612 return PTR_ERR(name); 613 614 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); 615 if (!inst) 616 return -ENOMEM; 617 618 ctx = skcipher_instance_ctx(inst); 619 ctx->queue = queue; 620 621 crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst)); 622 err = crypto_grab_skcipher(&ctx->spawn, name, type, mask); 623 if (err) 624 goto out_free_inst; 625 626 alg = crypto_spawn_skcipher_alg(&ctx->spawn); 627 err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base); 628 if (err) 629 goto out_drop_skcipher; 630 631 inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC | 632 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); 633 634 inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg); 635 inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); 636 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg); 637 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg); 638 639 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx); 640 641 inst->alg.init = cryptd_skcipher_init_tfm; 642 inst->alg.exit = cryptd_skcipher_exit_tfm; 643 644 inst->alg.setkey = cryptd_skcipher_setkey; 645 inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue; 646 inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue; 647 648 inst->free = cryptd_skcipher_free; 649 650 err = skcipher_register_instance(tmpl, inst); 651 if (err) { 652 out_drop_skcipher: 653 crypto_drop_skcipher(&ctx->spawn); 654 out_free_inst: 655 kfree(inst); 656 } 657 return err; 658 } 659 660 static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) 661 { 662 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); 663 struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst); 664 struct crypto_shash_spawn *spawn = &ictx->spawn; 665 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); 666 struct crypto_shash *hash; 667 668 hash = crypto_spawn_shash(spawn); 669 if (IS_ERR(hash)) 670 return PTR_ERR(hash); 671 672 ctx->child = hash; 673 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 674 sizeof(struct cryptd_hash_request_ctx) + 675 crypto_shash_descsize(hash)); 676 return 0; 677 } 678 679 static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm) 680 { 681 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); 682 683 crypto_free_shash(ctx->child); 684 } 685 686 static int cryptd_hash_setkey(struct crypto_ahash *parent, 687 const u8 *key, unsigned int keylen) 688 { 689 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); 690 struct crypto_shash *child = ctx->child; 691 int err; 692 693 crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK); 694 crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) & 695 CRYPTO_TFM_REQ_MASK); 696 err = crypto_shash_setkey(child, key, keylen); 697 crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) & 698 CRYPTO_TFM_RES_MASK); 699 return err; 700 } 701 702 static int cryptd_hash_enqueue(struct ahash_request *req, 703 crypto_completion_t compl) 704 { 705 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 706 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 707 struct cryptd_queue *queue = 708 cryptd_get_queue(crypto_ahash_tfm(tfm)); 709 710 rctx->complete = req->base.complete; 711 req->base.complete = compl; 712 713 return cryptd_enqueue_request(queue, &req->base); 714 } 715 716 static void cryptd_hash_complete(struct ahash_request *req, int err) 717 { 718 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 719 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); 720 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 721 int refcnt = atomic_read(&ctx->refcnt); 722 723 local_bh_disable(); 724 rctx->complete(&req->base, err); 725 local_bh_enable(); 726 727 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) 728 crypto_free_ahash(tfm); 729 } 730 731 static void cryptd_hash_init(struct crypto_async_request *req_async, int err) 732 { 733 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); 734 struct crypto_shash *child = ctx->child; 735 struct ahash_request *req = ahash_request_cast(req_async); 736 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 737 struct shash_desc *desc = &rctx->desc; 738 739 if (unlikely(err == -EINPROGRESS)) 740 goto out; 741 742 desc->tfm = child; 743 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; 744 745 err = crypto_shash_init(desc); 746 747 req->base.complete = rctx->complete; 748 749 out: 750 cryptd_hash_complete(req, err); 751 } 752 753 static int cryptd_hash_init_enqueue(struct ahash_request *req) 754 { 755 return cryptd_hash_enqueue(req, cryptd_hash_init); 756 } 757 758 static void cryptd_hash_update(struct crypto_async_request *req_async, int err) 759 { 760 struct ahash_request *req = ahash_request_cast(req_async); 761 struct cryptd_hash_request_ctx *rctx; 762 763 rctx = ahash_request_ctx(req); 764 765 if (unlikely(err == -EINPROGRESS)) 766 goto out; 767 768 err = shash_ahash_update(req, &rctx->desc); 769 770 req->base.complete = rctx->complete; 771 772 out: 773 cryptd_hash_complete(req, err); 774 } 775 776 static int cryptd_hash_update_enqueue(struct ahash_request *req) 777 { 778 return cryptd_hash_enqueue(req, cryptd_hash_update); 779 } 780 781 static void cryptd_hash_final(struct crypto_async_request *req_async, int err) 782 { 783 struct ahash_request *req = ahash_request_cast(req_async); 784 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 785 786 if (unlikely(err == -EINPROGRESS)) 787 goto out; 788 789 err = crypto_shash_final(&rctx->desc, req->result); 790 791 req->base.complete = rctx->complete; 792 793 out: 794 cryptd_hash_complete(req, err); 795 } 796 797 static int cryptd_hash_final_enqueue(struct ahash_request *req) 798 { 799 return cryptd_hash_enqueue(req, cryptd_hash_final); 800 } 801 802 static void cryptd_hash_finup(struct crypto_async_request *req_async, int err) 803 { 804 struct ahash_request *req = ahash_request_cast(req_async); 805 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 806 807 if (unlikely(err == -EINPROGRESS)) 808 goto out; 809 810 err = shash_ahash_finup(req, &rctx->desc); 811 812 req->base.complete = rctx->complete; 813 814 out: 815 cryptd_hash_complete(req, err); 816 } 817 818 static int cryptd_hash_finup_enqueue(struct ahash_request *req) 819 { 820 return cryptd_hash_enqueue(req, cryptd_hash_finup); 821 } 822 823 static void cryptd_hash_digest(struct crypto_async_request *req_async, int err) 824 { 825 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); 826 struct crypto_shash *child = ctx->child; 827 struct ahash_request *req = ahash_request_cast(req_async); 828 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 829 struct shash_desc *desc = &rctx->desc; 830 831 if (unlikely(err == -EINPROGRESS)) 832 goto out; 833 834 desc->tfm = child; 835 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; 836 837 err = shash_ahash_digest(req, desc); 838 839 req->base.complete = rctx->complete; 840 841 out: 842 cryptd_hash_complete(req, err); 843 } 844 845 static int cryptd_hash_digest_enqueue(struct ahash_request *req) 846 { 847 return cryptd_hash_enqueue(req, cryptd_hash_digest); 848 } 849 850 static int cryptd_hash_export(struct ahash_request *req, void *out) 851 { 852 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 853 854 return crypto_shash_export(&rctx->desc, out); 855 } 856 857 static int cryptd_hash_import(struct ahash_request *req, const void *in) 858 { 859 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 860 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); 861 struct shash_desc *desc = cryptd_shash_desc(req); 862 863 desc->tfm = ctx->child; 864 desc->flags = req->base.flags; 865 866 return crypto_shash_import(desc, in); 867 } 868 869 static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, 870 struct cryptd_queue *queue) 871 { 872 struct hashd_instance_ctx *ctx; 873 struct ahash_instance *inst; 874 struct shash_alg *salg; 875 struct crypto_alg *alg; 876 u32 type = 0; 877 u32 mask = 0; 878 int err; 879 880 cryptd_check_internal(tb, &type, &mask); 881 882 salg = shash_attr_alg(tb[1], type, mask); 883 if (IS_ERR(salg)) 884 return PTR_ERR(salg); 885 886 alg = &salg->base; 887 inst = cryptd_alloc_instance(alg, ahash_instance_headroom(), 888 sizeof(*ctx)); 889 err = PTR_ERR(inst); 890 if (IS_ERR(inst)) 891 goto out_put_alg; 892 893 ctx = ahash_instance_ctx(inst); 894 ctx->queue = queue; 895 896 err = crypto_init_shash_spawn(&ctx->spawn, salg, 897 ahash_crypto_instance(inst)); 898 if (err) 899 goto out_free_inst; 900 901 inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC | 902 (alg->cra_flags & (CRYPTO_ALG_INTERNAL | 903 CRYPTO_ALG_OPTIONAL_KEY)); 904 905 inst->alg.halg.digestsize = salg->digestsize; 906 inst->alg.halg.statesize = salg->statesize; 907 inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx); 908 909 inst->alg.halg.base.cra_init = cryptd_hash_init_tfm; 910 inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm; 911 912 inst->alg.init = cryptd_hash_init_enqueue; 913 inst->alg.update = cryptd_hash_update_enqueue; 914 inst->alg.final = cryptd_hash_final_enqueue; 915 inst->alg.finup = cryptd_hash_finup_enqueue; 916 inst->alg.export = cryptd_hash_export; 917 inst->alg.import = cryptd_hash_import; 918 if (crypto_shash_alg_has_setkey(salg)) 919 inst->alg.setkey = cryptd_hash_setkey; 920 inst->alg.digest = cryptd_hash_digest_enqueue; 921 922 err = ahash_register_instance(tmpl, inst); 923 if (err) { 924 crypto_drop_shash(&ctx->spawn); 925 out_free_inst: 926 kfree(inst); 927 } 928 929 out_put_alg: 930 crypto_mod_put(alg); 931 return err; 932 } 933 934 static int cryptd_aead_setkey(struct crypto_aead *parent, 935 const u8 *key, unsigned int keylen) 936 { 937 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent); 938 struct crypto_aead *child = ctx->child; 939 940 return crypto_aead_setkey(child, key, keylen); 941 } 942 943 static int cryptd_aead_setauthsize(struct crypto_aead *parent, 944 unsigned int authsize) 945 { 946 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent); 947 struct crypto_aead *child = ctx->child; 948 949 return crypto_aead_setauthsize(child, authsize); 950 } 951 952 static void cryptd_aead_crypt(struct aead_request *req, 953 struct crypto_aead *child, 954 int err, 955 int (*crypt)(struct aead_request *req)) 956 { 957 struct cryptd_aead_request_ctx *rctx; 958 struct cryptd_aead_ctx *ctx; 959 crypto_completion_t compl; 960 struct crypto_aead *tfm; 961 int refcnt; 962 963 rctx = aead_request_ctx(req); 964 compl = rctx->complete; 965 966 tfm = crypto_aead_reqtfm(req); 967 968 if (unlikely(err == -EINPROGRESS)) 969 goto out; 970 aead_request_set_tfm(req, child); 971 err = crypt( req ); 972 973 out: 974 ctx = crypto_aead_ctx(tfm); 975 refcnt = atomic_read(&ctx->refcnt); 976 977 local_bh_disable(); 978 compl(&req->base, err); 979 local_bh_enable(); 980 981 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) 982 crypto_free_aead(tfm); 983 } 984 985 static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err) 986 { 987 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm); 988 struct crypto_aead *child = ctx->child; 989 struct aead_request *req; 990 991 req = container_of(areq, struct aead_request, base); 992 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt); 993 } 994 995 static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err) 996 { 997 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm); 998 struct crypto_aead *child = ctx->child; 999 struct aead_request *req; 1000 1001 req = container_of(areq, struct aead_request, base); 1002 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt); 1003 } 1004 1005 static int cryptd_aead_enqueue(struct aead_request *req, 1006 crypto_completion_t compl) 1007 { 1008 struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req); 1009 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1010 struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm)); 1011 1012 rctx->complete = req->base.complete; 1013 req->base.complete = compl; 1014 return cryptd_enqueue_request(queue, &req->base); 1015 } 1016 1017 static int cryptd_aead_encrypt_enqueue(struct aead_request *req) 1018 { 1019 return cryptd_aead_enqueue(req, cryptd_aead_encrypt ); 1020 } 1021 1022 static int cryptd_aead_decrypt_enqueue(struct aead_request *req) 1023 { 1024 return cryptd_aead_enqueue(req, cryptd_aead_decrypt ); 1025 } 1026 1027 static int cryptd_aead_init_tfm(struct crypto_aead *tfm) 1028 { 1029 struct aead_instance *inst = aead_alg_instance(tfm); 1030 struct aead_instance_ctx *ictx = aead_instance_ctx(inst); 1031 struct crypto_aead_spawn *spawn = &ictx->aead_spawn; 1032 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm); 1033 struct crypto_aead *cipher; 1034 1035 cipher = crypto_spawn_aead(spawn); 1036 if (IS_ERR(cipher)) 1037 return PTR_ERR(cipher); 1038 1039 ctx->child = cipher; 1040 crypto_aead_set_reqsize( 1041 tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx), 1042 crypto_aead_reqsize(cipher))); 1043 return 0; 1044 } 1045 1046 static void cryptd_aead_exit_tfm(struct crypto_aead *tfm) 1047 { 1048 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm); 1049 crypto_free_aead(ctx->child); 1050 } 1051 1052 static int cryptd_create_aead(struct crypto_template *tmpl, 1053 struct rtattr **tb, 1054 struct cryptd_queue *queue) 1055 { 1056 struct aead_instance_ctx *ctx; 1057 struct aead_instance *inst; 1058 struct aead_alg *alg; 1059 const char *name; 1060 u32 type = 0; 1061 u32 mask = CRYPTO_ALG_ASYNC; 1062 int err; 1063 1064 cryptd_check_internal(tb, &type, &mask); 1065 1066 name = crypto_attr_alg_name(tb[1]); 1067 if (IS_ERR(name)) 1068 return PTR_ERR(name); 1069 1070 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); 1071 if (!inst) 1072 return -ENOMEM; 1073 1074 ctx = aead_instance_ctx(inst); 1075 ctx->queue = queue; 1076 1077 crypto_set_aead_spawn(&ctx->aead_spawn, aead_crypto_instance(inst)); 1078 err = crypto_grab_aead(&ctx->aead_spawn, name, type, mask); 1079 if (err) 1080 goto out_free_inst; 1081 1082 alg = crypto_spawn_aead_alg(&ctx->aead_spawn); 1083 err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base); 1084 if (err) 1085 goto out_drop_aead; 1086 1087 inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC | 1088 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); 1089 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx); 1090 1091 inst->alg.ivsize = crypto_aead_alg_ivsize(alg); 1092 inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); 1093 1094 inst->alg.init = cryptd_aead_init_tfm; 1095 inst->alg.exit = cryptd_aead_exit_tfm; 1096 inst->alg.setkey = cryptd_aead_setkey; 1097 inst->alg.setauthsize = cryptd_aead_setauthsize; 1098 inst->alg.encrypt = cryptd_aead_encrypt_enqueue; 1099 inst->alg.decrypt = cryptd_aead_decrypt_enqueue; 1100 1101 err = aead_register_instance(tmpl, inst); 1102 if (err) { 1103 out_drop_aead: 1104 crypto_drop_aead(&ctx->aead_spawn); 1105 out_free_inst: 1106 kfree(inst); 1107 } 1108 return err; 1109 } 1110 1111 static struct cryptd_queue queue; 1112 1113 static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) 1114 { 1115 struct crypto_attr_type *algt; 1116 1117 algt = crypto_get_attr_type(tb); 1118 if (IS_ERR(algt)) 1119 return PTR_ERR(algt); 1120 1121 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { 1122 case CRYPTO_ALG_TYPE_BLKCIPHER: 1123 if ((algt->type & CRYPTO_ALG_TYPE_MASK) == 1124 CRYPTO_ALG_TYPE_BLKCIPHER) 1125 return cryptd_create_blkcipher(tmpl, tb, &queue); 1126 1127 return cryptd_create_skcipher(tmpl, tb, &queue); 1128 case CRYPTO_ALG_TYPE_DIGEST: 1129 return cryptd_create_hash(tmpl, tb, &queue); 1130 case CRYPTO_ALG_TYPE_AEAD: 1131 return cryptd_create_aead(tmpl, tb, &queue); 1132 } 1133 1134 return -EINVAL; 1135 } 1136 1137 static void cryptd_free(struct crypto_instance *inst) 1138 { 1139 struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); 1140 struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst); 1141 struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst); 1142 1143 switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) { 1144 case CRYPTO_ALG_TYPE_AHASH: 1145 crypto_drop_shash(&hctx->spawn); 1146 kfree(ahash_instance(inst)); 1147 return; 1148 case CRYPTO_ALG_TYPE_AEAD: 1149 crypto_drop_aead(&aead_ctx->aead_spawn); 1150 kfree(aead_instance(inst)); 1151 return; 1152 default: 1153 crypto_drop_spawn(&ctx->spawn); 1154 kfree(inst); 1155 } 1156 } 1157 1158 static struct crypto_template cryptd_tmpl = { 1159 .name = "cryptd", 1160 .create = cryptd_create, 1161 .free = cryptd_free, 1162 .module = THIS_MODULE, 1163 }; 1164 1165 struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, 1166 u32 type, u32 mask) 1167 { 1168 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; 1169 struct cryptd_blkcipher_ctx *ctx; 1170 struct crypto_tfm *tfm; 1171 1172 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, 1173 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) 1174 return ERR_PTR(-EINVAL); 1175 type = crypto_skcipher_type(type); 1176 mask &= ~CRYPTO_ALG_TYPE_MASK; 1177 mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK); 1178 tfm = crypto_alloc_base(cryptd_alg_name, type, mask); 1179 if (IS_ERR(tfm)) 1180 return ERR_CAST(tfm); 1181 if (tfm->__crt_alg->cra_module != THIS_MODULE) { 1182 crypto_free_tfm(tfm); 1183 return ERR_PTR(-EINVAL); 1184 } 1185 1186 ctx = crypto_tfm_ctx(tfm); 1187 atomic_set(&ctx->refcnt, 1); 1188 1189 return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm)); 1190 } 1191 EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher); 1192 1193 struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm) 1194 { 1195 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); 1196 return ctx->child; 1197 } 1198 EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child); 1199 1200 bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm) 1201 { 1202 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); 1203 1204 return atomic_read(&ctx->refcnt) - 1; 1205 } 1206 EXPORT_SYMBOL_GPL(cryptd_ablkcipher_queued); 1207 1208 void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm) 1209 { 1210 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); 1211 1212 if (atomic_dec_and_test(&ctx->refcnt)) 1213 crypto_free_ablkcipher(&tfm->base); 1214 } 1215 EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); 1216 1217 struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name, 1218 u32 type, u32 mask) 1219 { 1220 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; 1221 struct cryptd_skcipher_ctx *ctx; 1222 struct crypto_skcipher *tfm; 1223 1224 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, 1225 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) 1226 return ERR_PTR(-EINVAL); 1227 1228 tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask); 1229 if (IS_ERR(tfm)) 1230 return ERR_CAST(tfm); 1231 1232 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { 1233 crypto_free_skcipher(tfm); 1234 return ERR_PTR(-EINVAL); 1235 } 1236 1237 ctx = crypto_skcipher_ctx(tfm); 1238 atomic_set(&ctx->refcnt, 1); 1239 1240 return container_of(tfm, struct cryptd_skcipher, base); 1241 } 1242 EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher); 1243 1244 struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm) 1245 { 1246 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); 1247 1248 return &ctx->child->base; 1249 } 1250 EXPORT_SYMBOL_GPL(cryptd_skcipher_child); 1251 1252 bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm) 1253 { 1254 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); 1255 1256 return atomic_read(&ctx->refcnt) - 1; 1257 } 1258 EXPORT_SYMBOL_GPL(cryptd_skcipher_queued); 1259 1260 void cryptd_free_skcipher(struct cryptd_skcipher *tfm) 1261 { 1262 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); 1263 1264 if (atomic_dec_and_test(&ctx->refcnt)) 1265 crypto_free_skcipher(&tfm->base); 1266 } 1267 EXPORT_SYMBOL_GPL(cryptd_free_skcipher); 1268 1269 struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, 1270 u32 type, u32 mask) 1271 { 1272 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; 1273 struct cryptd_hash_ctx *ctx; 1274 struct crypto_ahash *tfm; 1275 1276 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, 1277 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) 1278 return ERR_PTR(-EINVAL); 1279 tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask); 1280 if (IS_ERR(tfm)) 1281 return ERR_CAST(tfm); 1282 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { 1283 crypto_free_ahash(tfm); 1284 return ERR_PTR(-EINVAL); 1285 } 1286 1287 ctx = crypto_ahash_ctx(tfm); 1288 atomic_set(&ctx->refcnt, 1); 1289 1290 return __cryptd_ahash_cast(tfm); 1291 } 1292 EXPORT_SYMBOL_GPL(cryptd_alloc_ahash); 1293 1294 struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm) 1295 { 1296 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); 1297 1298 return ctx->child; 1299 } 1300 EXPORT_SYMBOL_GPL(cryptd_ahash_child); 1301 1302 struct shash_desc *cryptd_shash_desc(struct ahash_request *req) 1303 { 1304 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 1305 return &rctx->desc; 1306 } 1307 EXPORT_SYMBOL_GPL(cryptd_shash_desc); 1308 1309 bool cryptd_ahash_queued(struct cryptd_ahash *tfm) 1310 { 1311 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); 1312 1313 return atomic_read(&ctx->refcnt) - 1; 1314 } 1315 EXPORT_SYMBOL_GPL(cryptd_ahash_queued); 1316 1317 void cryptd_free_ahash(struct cryptd_ahash *tfm) 1318 { 1319 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); 1320 1321 if (atomic_dec_and_test(&ctx->refcnt)) 1322 crypto_free_ahash(&tfm->base); 1323 } 1324 EXPORT_SYMBOL_GPL(cryptd_free_ahash); 1325 1326 struct cryptd_aead *cryptd_alloc_aead(const char *alg_name, 1327 u32 type, u32 mask) 1328 { 1329 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; 1330 struct cryptd_aead_ctx *ctx; 1331 struct crypto_aead *tfm; 1332 1333 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, 1334 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) 1335 return ERR_PTR(-EINVAL); 1336 tfm = crypto_alloc_aead(cryptd_alg_name, type, mask); 1337 if (IS_ERR(tfm)) 1338 return ERR_CAST(tfm); 1339 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { 1340 crypto_free_aead(tfm); 1341 return ERR_PTR(-EINVAL); 1342 } 1343 1344 ctx = crypto_aead_ctx(tfm); 1345 atomic_set(&ctx->refcnt, 1); 1346 1347 return __cryptd_aead_cast(tfm); 1348 } 1349 EXPORT_SYMBOL_GPL(cryptd_alloc_aead); 1350 1351 struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm) 1352 { 1353 struct cryptd_aead_ctx *ctx; 1354 ctx = crypto_aead_ctx(&tfm->base); 1355 return ctx->child; 1356 } 1357 EXPORT_SYMBOL_GPL(cryptd_aead_child); 1358 1359 bool cryptd_aead_queued(struct cryptd_aead *tfm) 1360 { 1361 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base); 1362 1363 return atomic_read(&ctx->refcnt) - 1; 1364 } 1365 EXPORT_SYMBOL_GPL(cryptd_aead_queued); 1366 1367 void cryptd_free_aead(struct cryptd_aead *tfm) 1368 { 1369 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base); 1370 1371 if (atomic_dec_and_test(&ctx->refcnt)) 1372 crypto_free_aead(&tfm->base); 1373 } 1374 EXPORT_SYMBOL_GPL(cryptd_free_aead); 1375 1376 static int __init cryptd_init(void) 1377 { 1378 int err; 1379 1380 err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen); 1381 if (err) 1382 return err; 1383 1384 err = crypto_register_template(&cryptd_tmpl); 1385 if (err) 1386 cryptd_fini_queue(&queue); 1387 1388 return err; 1389 } 1390 1391 static void __exit cryptd_exit(void) 1392 { 1393 cryptd_fini_queue(&queue); 1394 crypto_unregister_template(&cryptd_tmpl); 1395 } 1396 1397 subsys_initcall(cryptd_init); 1398 module_exit(cryptd_exit); 1399 1400 MODULE_LICENSE("GPL"); 1401 MODULE_DESCRIPTION("Software async crypto daemon"); 1402 MODULE_ALIAS_CRYPTO("cryptd"); 1403