xref: /openbmc/linux/crypto/cryptd.c (revision a6377d90)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Software async crypto daemon.
4  *
5  * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
6  *
7  * Added AEAD support to cryptd.
8  *    Authors: Tadeusz Struk (tadeusz.struk@intel.com)
9  *             Adrian Hoban <adrian.hoban@intel.com>
10  *             Gabriele Paoloni <gabriele.paoloni@intel.com>
11  *             Aidan O'Mahony (aidan.o.mahony@intel.com)
12  *    Copyright (c) 2010, Intel Corporation.
13  */
14 
15 #include <crypto/internal/hash.h>
16 #include <crypto/internal/aead.h>
17 #include <crypto/internal/skcipher.h>
18 #include <crypto/cryptd.h>
19 #include <crypto/crypto_wq.h>
20 #include <linux/atomic.h>
21 #include <linux/err.h>
22 #include <linux/init.h>
23 #include <linux/kernel.h>
24 #include <linux/list.h>
25 #include <linux/module.h>
26 #include <linux/scatterlist.h>
27 #include <linux/sched.h>
28 #include <linux/slab.h>
29 
30 static unsigned int cryptd_max_cpu_qlen = 1000;
31 module_param(cryptd_max_cpu_qlen, uint, 0);
32 MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
33 
34 struct cryptd_cpu_queue {
35 	struct crypto_queue queue;
36 	struct work_struct work;
37 };
38 
39 struct cryptd_queue {
40 	struct cryptd_cpu_queue __percpu *cpu_queue;
41 };
42 
43 struct cryptd_instance_ctx {
44 	struct crypto_spawn spawn;
45 	struct cryptd_queue *queue;
46 };
47 
48 struct skcipherd_instance_ctx {
49 	struct crypto_skcipher_spawn spawn;
50 	struct cryptd_queue *queue;
51 };
52 
53 struct hashd_instance_ctx {
54 	struct crypto_shash_spawn spawn;
55 	struct cryptd_queue *queue;
56 };
57 
58 struct aead_instance_ctx {
59 	struct crypto_aead_spawn aead_spawn;
60 	struct cryptd_queue *queue;
61 };
62 
63 struct cryptd_skcipher_ctx {
64 	atomic_t refcnt;
65 	struct crypto_sync_skcipher *child;
66 };
67 
68 struct cryptd_skcipher_request_ctx {
69 	crypto_completion_t complete;
70 };
71 
72 struct cryptd_hash_ctx {
73 	atomic_t refcnt;
74 	struct crypto_shash *child;
75 };
76 
77 struct cryptd_hash_request_ctx {
78 	crypto_completion_t complete;
79 	struct shash_desc desc;
80 };
81 
82 struct cryptd_aead_ctx {
83 	atomic_t refcnt;
84 	struct crypto_aead *child;
85 };
86 
87 struct cryptd_aead_request_ctx {
88 	crypto_completion_t complete;
89 };
90 
91 static void cryptd_queue_worker(struct work_struct *work);
92 
93 static int cryptd_init_queue(struct cryptd_queue *queue,
94 			     unsigned int max_cpu_qlen)
95 {
96 	int cpu;
97 	struct cryptd_cpu_queue *cpu_queue;
98 
99 	queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
100 	if (!queue->cpu_queue)
101 		return -ENOMEM;
102 	for_each_possible_cpu(cpu) {
103 		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
104 		crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
105 		INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
106 	}
107 	pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
108 	return 0;
109 }
110 
111 static void cryptd_fini_queue(struct cryptd_queue *queue)
112 {
113 	int cpu;
114 	struct cryptd_cpu_queue *cpu_queue;
115 
116 	for_each_possible_cpu(cpu) {
117 		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
118 		BUG_ON(cpu_queue->queue.qlen);
119 	}
120 	free_percpu(queue->cpu_queue);
121 }
122 
123 static int cryptd_enqueue_request(struct cryptd_queue *queue,
124 				  struct crypto_async_request *request)
125 {
126 	int cpu, err;
127 	struct cryptd_cpu_queue *cpu_queue;
128 	atomic_t *refcnt;
129 
130 	cpu = get_cpu();
131 	cpu_queue = this_cpu_ptr(queue->cpu_queue);
132 	err = crypto_enqueue_request(&cpu_queue->queue, request);
133 
134 	refcnt = crypto_tfm_ctx(request->tfm);
135 
136 	if (err == -ENOSPC)
137 		goto out_put_cpu;
138 
139 	queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
140 
141 	if (!atomic_read(refcnt))
142 		goto out_put_cpu;
143 
144 	atomic_inc(refcnt);
145 
146 out_put_cpu:
147 	put_cpu();
148 
149 	return err;
150 }
151 
152 /* Called in workqueue context, do one real cryption work (via
153  * req->complete) and reschedule itself if there are more work to
154  * do. */
155 static void cryptd_queue_worker(struct work_struct *work)
156 {
157 	struct cryptd_cpu_queue *cpu_queue;
158 	struct crypto_async_request *req, *backlog;
159 
160 	cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
161 	/*
162 	 * Only handle one request at a time to avoid hogging crypto workqueue.
163 	 * preempt_disable/enable is used to prevent being preempted by
164 	 * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
165 	 * cryptd_enqueue_request() being accessed from software interrupts.
166 	 */
167 	local_bh_disable();
168 	preempt_disable();
169 	backlog = crypto_get_backlog(&cpu_queue->queue);
170 	req = crypto_dequeue_request(&cpu_queue->queue);
171 	preempt_enable();
172 	local_bh_enable();
173 
174 	if (!req)
175 		return;
176 
177 	if (backlog)
178 		backlog->complete(backlog, -EINPROGRESS);
179 	req->complete(req, 0);
180 
181 	if (cpu_queue->queue.qlen)
182 		queue_work(kcrypto_wq, &cpu_queue->work);
183 }
184 
185 static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
186 {
187 	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
188 	struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
189 	return ictx->queue;
190 }
191 
192 static inline void cryptd_check_internal(struct rtattr **tb, u32 *type,
193 					 u32 *mask)
194 {
195 	struct crypto_attr_type *algt;
196 
197 	algt = crypto_get_attr_type(tb);
198 	if (IS_ERR(algt))
199 		return;
200 
201 	*type |= algt->type & CRYPTO_ALG_INTERNAL;
202 	*mask |= algt->mask & CRYPTO_ALG_INTERNAL;
203 }
204 
205 static int cryptd_init_instance(struct crypto_instance *inst,
206 				struct crypto_alg *alg)
207 {
208 	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
209 		     "cryptd(%s)",
210 		     alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
211 		return -ENAMETOOLONG;
212 
213 	memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
214 
215 	inst->alg.cra_priority = alg->cra_priority + 50;
216 	inst->alg.cra_blocksize = alg->cra_blocksize;
217 	inst->alg.cra_alignmask = alg->cra_alignmask;
218 
219 	return 0;
220 }
221 
222 static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
223 				   unsigned int tail)
224 {
225 	char *p;
226 	struct crypto_instance *inst;
227 	int err;
228 
229 	p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
230 	if (!p)
231 		return ERR_PTR(-ENOMEM);
232 
233 	inst = (void *)(p + head);
234 
235 	err = cryptd_init_instance(inst, alg);
236 	if (err)
237 		goto out_free_inst;
238 
239 out:
240 	return p;
241 
242 out_free_inst:
243 	kfree(p);
244 	p = ERR_PTR(err);
245 	goto out;
246 }
247 
248 static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
249 				  const u8 *key, unsigned int keylen)
250 {
251 	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
252 	struct crypto_sync_skcipher *child = ctx->child;
253 	int err;
254 
255 	crypto_sync_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
256 	crypto_sync_skcipher_set_flags(child,
257 				       crypto_skcipher_get_flags(parent) &
258 					 CRYPTO_TFM_REQ_MASK);
259 	err = crypto_sync_skcipher_setkey(child, key, keylen);
260 	crypto_skcipher_set_flags(parent,
261 				  crypto_sync_skcipher_get_flags(child) &
262 					  CRYPTO_TFM_RES_MASK);
263 	return err;
264 }
265 
266 static void cryptd_skcipher_complete(struct skcipher_request *req, int err)
267 {
268 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
269 	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
270 	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
271 	int refcnt = atomic_read(&ctx->refcnt);
272 
273 	local_bh_disable();
274 	rctx->complete(&req->base, err);
275 	local_bh_enable();
276 
277 	if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
278 		crypto_free_skcipher(tfm);
279 }
280 
281 static void cryptd_skcipher_encrypt(struct crypto_async_request *base,
282 				    int err)
283 {
284 	struct skcipher_request *req = skcipher_request_cast(base);
285 	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
286 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
287 	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
288 	struct crypto_sync_skcipher *child = ctx->child;
289 	SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
290 
291 	if (unlikely(err == -EINPROGRESS))
292 		goto out;
293 
294 	skcipher_request_set_sync_tfm(subreq, child);
295 	skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
296 				      NULL, NULL);
297 	skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
298 				   req->iv);
299 
300 	err = crypto_skcipher_encrypt(subreq);
301 	skcipher_request_zero(subreq);
302 
303 	req->base.complete = rctx->complete;
304 
305 out:
306 	cryptd_skcipher_complete(req, err);
307 }
308 
309 static void cryptd_skcipher_decrypt(struct crypto_async_request *base,
310 				    int err)
311 {
312 	struct skcipher_request *req = skcipher_request_cast(base);
313 	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
314 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
315 	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
316 	struct crypto_sync_skcipher *child = ctx->child;
317 	SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
318 
319 	if (unlikely(err == -EINPROGRESS))
320 		goto out;
321 
322 	skcipher_request_set_sync_tfm(subreq, child);
323 	skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
324 				      NULL, NULL);
325 	skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
326 				   req->iv);
327 
328 	err = crypto_skcipher_decrypt(subreq);
329 	skcipher_request_zero(subreq);
330 
331 	req->base.complete = rctx->complete;
332 
333 out:
334 	cryptd_skcipher_complete(req, err);
335 }
336 
337 static int cryptd_skcipher_enqueue(struct skcipher_request *req,
338 				   crypto_completion_t compl)
339 {
340 	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
341 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
342 	struct cryptd_queue *queue;
343 
344 	queue = cryptd_get_queue(crypto_skcipher_tfm(tfm));
345 	rctx->complete = req->base.complete;
346 	req->base.complete = compl;
347 
348 	return cryptd_enqueue_request(queue, &req->base);
349 }
350 
351 static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req)
352 {
353 	return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt);
354 }
355 
356 static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req)
357 {
358 	return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt);
359 }
360 
361 static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
362 {
363 	struct skcipher_instance *inst = skcipher_alg_instance(tfm);
364 	struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst);
365 	struct crypto_skcipher_spawn *spawn = &ictx->spawn;
366 	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
367 	struct crypto_skcipher *cipher;
368 
369 	cipher = crypto_spawn_skcipher(spawn);
370 	if (IS_ERR(cipher))
371 		return PTR_ERR(cipher);
372 
373 	ctx->child = (struct crypto_sync_skcipher *)cipher;
374 	crypto_skcipher_set_reqsize(
375 		tfm, sizeof(struct cryptd_skcipher_request_ctx));
376 	return 0;
377 }
378 
379 static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
380 {
381 	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
382 
383 	crypto_free_sync_skcipher(ctx->child);
384 }
385 
386 static void cryptd_skcipher_free(struct skcipher_instance *inst)
387 {
388 	struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst);
389 
390 	crypto_drop_skcipher(&ctx->spawn);
391 }
392 
393 static int cryptd_create_skcipher(struct crypto_template *tmpl,
394 				  struct rtattr **tb,
395 				  struct cryptd_queue *queue)
396 {
397 	struct skcipherd_instance_ctx *ctx;
398 	struct skcipher_instance *inst;
399 	struct skcipher_alg *alg;
400 	const char *name;
401 	u32 type;
402 	u32 mask;
403 	int err;
404 
405 	type = 0;
406 	mask = CRYPTO_ALG_ASYNC;
407 
408 	cryptd_check_internal(tb, &type, &mask);
409 
410 	name = crypto_attr_alg_name(tb[1]);
411 	if (IS_ERR(name))
412 		return PTR_ERR(name);
413 
414 	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
415 	if (!inst)
416 		return -ENOMEM;
417 
418 	ctx = skcipher_instance_ctx(inst);
419 	ctx->queue = queue;
420 
421 	crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst));
422 	err = crypto_grab_skcipher(&ctx->spawn, name, type, mask);
423 	if (err)
424 		goto out_free_inst;
425 
426 	alg = crypto_spawn_skcipher_alg(&ctx->spawn);
427 	err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base);
428 	if (err)
429 		goto out_drop_skcipher;
430 
431 	inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
432 				   (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
433 
434 	inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg);
435 	inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
436 	inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
437 	inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
438 
439 	inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx);
440 
441 	inst->alg.init = cryptd_skcipher_init_tfm;
442 	inst->alg.exit = cryptd_skcipher_exit_tfm;
443 
444 	inst->alg.setkey = cryptd_skcipher_setkey;
445 	inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue;
446 	inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue;
447 
448 	inst->free = cryptd_skcipher_free;
449 
450 	err = skcipher_register_instance(tmpl, inst);
451 	if (err) {
452 out_drop_skcipher:
453 		crypto_drop_skcipher(&ctx->spawn);
454 out_free_inst:
455 		kfree(inst);
456 	}
457 	return err;
458 }
459 
460 static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
461 {
462 	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
463 	struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
464 	struct crypto_shash_spawn *spawn = &ictx->spawn;
465 	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
466 	struct crypto_shash *hash;
467 
468 	hash = crypto_spawn_shash(spawn);
469 	if (IS_ERR(hash))
470 		return PTR_ERR(hash);
471 
472 	ctx->child = hash;
473 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
474 				 sizeof(struct cryptd_hash_request_ctx) +
475 				 crypto_shash_descsize(hash));
476 	return 0;
477 }
478 
479 static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
480 {
481 	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
482 
483 	crypto_free_shash(ctx->child);
484 }
485 
486 static int cryptd_hash_setkey(struct crypto_ahash *parent,
487 				   const u8 *key, unsigned int keylen)
488 {
489 	struct cryptd_hash_ctx *ctx   = crypto_ahash_ctx(parent);
490 	struct crypto_shash *child = ctx->child;
491 	int err;
492 
493 	crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
494 	crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
495 				      CRYPTO_TFM_REQ_MASK);
496 	err = crypto_shash_setkey(child, key, keylen);
497 	crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
498 				       CRYPTO_TFM_RES_MASK);
499 	return err;
500 }
501 
502 static int cryptd_hash_enqueue(struct ahash_request *req,
503 				crypto_completion_t compl)
504 {
505 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
506 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
507 	struct cryptd_queue *queue =
508 		cryptd_get_queue(crypto_ahash_tfm(tfm));
509 
510 	rctx->complete = req->base.complete;
511 	req->base.complete = compl;
512 
513 	return cryptd_enqueue_request(queue, &req->base);
514 }
515 
516 static void cryptd_hash_complete(struct ahash_request *req, int err)
517 {
518 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
519 	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
520 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
521 	int refcnt = atomic_read(&ctx->refcnt);
522 
523 	local_bh_disable();
524 	rctx->complete(&req->base, err);
525 	local_bh_enable();
526 
527 	if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
528 		crypto_free_ahash(tfm);
529 }
530 
531 static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
532 {
533 	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
534 	struct crypto_shash *child = ctx->child;
535 	struct ahash_request *req = ahash_request_cast(req_async);
536 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
537 	struct shash_desc *desc = &rctx->desc;
538 
539 	if (unlikely(err == -EINPROGRESS))
540 		goto out;
541 
542 	desc->tfm = child;
543 
544 	err = crypto_shash_init(desc);
545 
546 	req->base.complete = rctx->complete;
547 
548 out:
549 	cryptd_hash_complete(req, err);
550 }
551 
552 static int cryptd_hash_init_enqueue(struct ahash_request *req)
553 {
554 	return cryptd_hash_enqueue(req, cryptd_hash_init);
555 }
556 
557 static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
558 {
559 	struct ahash_request *req = ahash_request_cast(req_async);
560 	struct cryptd_hash_request_ctx *rctx;
561 
562 	rctx = ahash_request_ctx(req);
563 
564 	if (unlikely(err == -EINPROGRESS))
565 		goto out;
566 
567 	err = shash_ahash_update(req, &rctx->desc);
568 
569 	req->base.complete = rctx->complete;
570 
571 out:
572 	cryptd_hash_complete(req, err);
573 }
574 
575 static int cryptd_hash_update_enqueue(struct ahash_request *req)
576 {
577 	return cryptd_hash_enqueue(req, cryptd_hash_update);
578 }
579 
580 static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
581 {
582 	struct ahash_request *req = ahash_request_cast(req_async);
583 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
584 
585 	if (unlikely(err == -EINPROGRESS))
586 		goto out;
587 
588 	err = crypto_shash_final(&rctx->desc, req->result);
589 
590 	req->base.complete = rctx->complete;
591 
592 out:
593 	cryptd_hash_complete(req, err);
594 }
595 
596 static int cryptd_hash_final_enqueue(struct ahash_request *req)
597 {
598 	return cryptd_hash_enqueue(req, cryptd_hash_final);
599 }
600 
601 static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
602 {
603 	struct ahash_request *req = ahash_request_cast(req_async);
604 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
605 
606 	if (unlikely(err == -EINPROGRESS))
607 		goto out;
608 
609 	err = shash_ahash_finup(req, &rctx->desc);
610 
611 	req->base.complete = rctx->complete;
612 
613 out:
614 	cryptd_hash_complete(req, err);
615 }
616 
617 static int cryptd_hash_finup_enqueue(struct ahash_request *req)
618 {
619 	return cryptd_hash_enqueue(req, cryptd_hash_finup);
620 }
621 
622 static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
623 {
624 	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
625 	struct crypto_shash *child = ctx->child;
626 	struct ahash_request *req = ahash_request_cast(req_async);
627 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
628 	struct shash_desc *desc = &rctx->desc;
629 
630 	if (unlikely(err == -EINPROGRESS))
631 		goto out;
632 
633 	desc->tfm = child;
634 
635 	err = shash_ahash_digest(req, desc);
636 
637 	req->base.complete = rctx->complete;
638 
639 out:
640 	cryptd_hash_complete(req, err);
641 }
642 
643 static int cryptd_hash_digest_enqueue(struct ahash_request *req)
644 {
645 	return cryptd_hash_enqueue(req, cryptd_hash_digest);
646 }
647 
648 static int cryptd_hash_export(struct ahash_request *req, void *out)
649 {
650 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
651 
652 	return crypto_shash_export(&rctx->desc, out);
653 }
654 
655 static int cryptd_hash_import(struct ahash_request *req, const void *in)
656 {
657 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
658 	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
659 	struct shash_desc *desc = cryptd_shash_desc(req);
660 
661 	desc->tfm = ctx->child;
662 
663 	return crypto_shash_import(desc, in);
664 }
665 
666 static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
667 			      struct cryptd_queue *queue)
668 {
669 	struct hashd_instance_ctx *ctx;
670 	struct ahash_instance *inst;
671 	struct shash_alg *salg;
672 	struct crypto_alg *alg;
673 	u32 type = 0;
674 	u32 mask = 0;
675 	int err;
676 
677 	cryptd_check_internal(tb, &type, &mask);
678 
679 	salg = shash_attr_alg(tb[1], type, mask);
680 	if (IS_ERR(salg))
681 		return PTR_ERR(salg);
682 
683 	alg = &salg->base;
684 	inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
685 				     sizeof(*ctx));
686 	err = PTR_ERR(inst);
687 	if (IS_ERR(inst))
688 		goto out_put_alg;
689 
690 	ctx = ahash_instance_ctx(inst);
691 	ctx->queue = queue;
692 
693 	err = crypto_init_shash_spawn(&ctx->spawn, salg,
694 				      ahash_crypto_instance(inst));
695 	if (err)
696 		goto out_free_inst;
697 
698 	inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC |
699 		(alg->cra_flags & (CRYPTO_ALG_INTERNAL |
700 				   CRYPTO_ALG_OPTIONAL_KEY));
701 
702 	inst->alg.halg.digestsize = salg->digestsize;
703 	inst->alg.halg.statesize = salg->statesize;
704 	inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
705 
706 	inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
707 	inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
708 
709 	inst->alg.init   = cryptd_hash_init_enqueue;
710 	inst->alg.update = cryptd_hash_update_enqueue;
711 	inst->alg.final  = cryptd_hash_final_enqueue;
712 	inst->alg.finup  = cryptd_hash_finup_enqueue;
713 	inst->alg.export = cryptd_hash_export;
714 	inst->alg.import = cryptd_hash_import;
715 	if (crypto_shash_alg_has_setkey(salg))
716 		inst->alg.setkey = cryptd_hash_setkey;
717 	inst->alg.digest = cryptd_hash_digest_enqueue;
718 
719 	err = ahash_register_instance(tmpl, inst);
720 	if (err) {
721 		crypto_drop_shash(&ctx->spawn);
722 out_free_inst:
723 		kfree(inst);
724 	}
725 
726 out_put_alg:
727 	crypto_mod_put(alg);
728 	return err;
729 }
730 
731 static int cryptd_aead_setkey(struct crypto_aead *parent,
732 			      const u8 *key, unsigned int keylen)
733 {
734 	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
735 	struct crypto_aead *child = ctx->child;
736 
737 	return crypto_aead_setkey(child, key, keylen);
738 }
739 
740 static int cryptd_aead_setauthsize(struct crypto_aead *parent,
741 				   unsigned int authsize)
742 {
743 	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
744 	struct crypto_aead *child = ctx->child;
745 
746 	return crypto_aead_setauthsize(child, authsize);
747 }
748 
749 static void cryptd_aead_crypt(struct aead_request *req,
750 			struct crypto_aead *child,
751 			int err,
752 			int (*crypt)(struct aead_request *req))
753 {
754 	struct cryptd_aead_request_ctx *rctx;
755 	struct cryptd_aead_ctx *ctx;
756 	crypto_completion_t compl;
757 	struct crypto_aead *tfm;
758 	int refcnt;
759 
760 	rctx = aead_request_ctx(req);
761 	compl = rctx->complete;
762 
763 	tfm = crypto_aead_reqtfm(req);
764 
765 	if (unlikely(err == -EINPROGRESS))
766 		goto out;
767 	aead_request_set_tfm(req, child);
768 	err = crypt( req );
769 
770 out:
771 	ctx = crypto_aead_ctx(tfm);
772 	refcnt = atomic_read(&ctx->refcnt);
773 
774 	local_bh_disable();
775 	compl(&req->base, err);
776 	local_bh_enable();
777 
778 	if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
779 		crypto_free_aead(tfm);
780 }
781 
782 static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
783 {
784 	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
785 	struct crypto_aead *child = ctx->child;
786 	struct aead_request *req;
787 
788 	req = container_of(areq, struct aead_request, base);
789 	cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt);
790 }
791 
792 static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
793 {
794 	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
795 	struct crypto_aead *child = ctx->child;
796 	struct aead_request *req;
797 
798 	req = container_of(areq, struct aead_request, base);
799 	cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt);
800 }
801 
802 static int cryptd_aead_enqueue(struct aead_request *req,
803 				    crypto_completion_t compl)
804 {
805 	struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
806 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
807 	struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
808 
809 	rctx->complete = req->base.complete;
810 	req->base.complete = compl;
811 	return cryptd_enqueue_request(queue, &req->base);
812 }
813 
814 static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
815 {
816 	return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
817 }
818 
819 static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
820 {
821 	return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
822 }
823 
824 static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
825 {
826 	struct aead_instance *inst = aead_alg_instance(tfm);
827 	struct aead_instance_ctx *ictx = aead_instance_ctx(inst);
828 	struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
829 	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
830 	struct crypto_aead *cipher;
831 
832 	cipher = crypto_spawn_aead(spawn);
833 	if (IS_ERR(cipher))
834 		return PTR_ERR(cipher);
835 
836 	ctx->child = cipher;
837 	crypto_aead_set_reqsize(
838 		tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx),
839 			 crypto_aead_reqsize(cipher)));
840 	return 0;
841 }
842 
843 static void cryptd_aead_exit_tfm(struct crypto_aead *tfm)
844 {
845 	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
846 	crypto_free_aead(ctx->child);
847 }
848 
849 static int cryptd_create_aead(struct crypto_template *tmpl,
850 		              struct rtattr **tb,
851 			      struct cryptd_queue *queue)
852 {
853 	struct aead_instance_ctx *ctx;
854 	struct aead_instance *inst;
855 	struct aead_alg *alg;
856 	const char *name;
857 	u32 type = 0;
858 	u32 mask = CRYPTO_ALG_ASYNC;
859 	int err;
860 
861 	cryptd_check_internal(tb, &type, &mask);
862 
863 	name = crypto_attr_alg_name(tb[1]);
864 	if (IS_ERR(name))
865 		return PTR_ERR(name);
866 
867 	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
868 	if (!inst)
869 		return -ENOMEM;
870 
871 	ctx = aead_instance_ctx(inst);
872 	ctx->queue = queue;
873 
874 	crypto_set_aead_spawn(&ctx->aead_spawn, aead_crypto_instance(inst));
875 	err = crypto_grab_aead(&ctx->aead_spawn, name, type, mask);
876 	if (err)
877 		goto out_free_inst;
878 
879 	alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
880 	err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
881 	if (err)
882 		goto out_drop_aead;
883 
884 	inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
885 				   (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
886 	inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
887 
888 	inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
889 	inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
890 
891 	inst->alg.init = cryptd_aead_init_tfm;
892 	inst->alg.exit = cryptd_aead_exit_tfm;
893 	inst->alg.setkey = cryptd_aead_setkey;
894 	inst->alg.setauthsize = cryptd_aead_setauthsize;
895 	inst->alg.encrypt = cryptd_aead_encrypt_enqueue;
896 	inst->alg.decrypt = cryptd_aead_decrypt_enqueue;
897 
898 	err = aead_register_instance(tmpl, inst);
899 	if (err) {
900 out_drop_aead:
901 		crypto_drop_aead(&ctx->aead_spawn);
902 out_free_inst:
903 		kfree(inst);
904 	}
905 	return err;
906 }
907 
908 static struct cryptd_queue queue;
909 
910 static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
911 {
912 	struct crypto_attr_type *algt;
913 
914 	algt = crypto_get_attr_type(tb);
915 	if (IS_ERR(algt))
916 		return PTR_ERR(algt);
917 
918 	switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
919 	case CRYPTO_ALG_TYPE_BLKCIPHER:
920 		return cryptd_create_skcipher(tmpl, tb, &queue);
921 	case CRYPTO_ALG_TYPE_DIGEST:
922 		return cryptd_create_hash(tmpl, tb, &queue);
923 	case CRYPTO_ALG_TYPE_AEAD:
924 		return cryptd_create_aead(tmpl, tb, &queue);
925 	}
926 
927 	return -EINVAL;
928 }
929 
930 static void cryptd_free(struct crypto_instance *inst)
931 {
932 	struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
933 	struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
934 	struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst);
935 
936 	switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
937 	case CRYPTO_ALG_TYPE_AHASH:
938 		crypto_drop_shash(&hctx->spawn);
939 		kfree(ahash_instance(inst));
940 		return;
941 	case CRYPTO_ALG_TYPE_AEAD:
942 		crypto_drop_aead(&aead_ctx->aead_spawn);
943 		kfree(aead_instance(inst));
944 		return;
945 	default:
946 		crypto_drop_spawn(&ctx->spawn);
947 		kfree(inst);
948 	}
949 }
950 
951 static struct crypto_template cryptd_tmpl = {
952 	.name = "cryptd",
953 	.create = cryptd_create,
954 	.free = cryptd_free,
955 	.module = THIS_MODULE,
956 };
957 
958 struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
959 					      u32 type, u32 mask)
960 {
961 	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
962 	struct cryptd_skcipher_ctx *ctx;
963 	struct crypto_skcipher *tfm;
964 
965 	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
966 		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
967 		return ERR_PTR(-EINVAL);
968 
969 	tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask);
970 	if (IS_ERR(tfm))
971 		return ERR_CAST(tfm);
972 
973 	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
974 		crypto_free_skcipher(tfm);
975 		return ERR_PTR(-EINVAL);
976 	}
977 
978 	ctx = crypto_skcipher_ctx(tfm);
979 	atomic_set(&ctx->refcnt, 1);
980 
981 	return container_of(tfm, struct cryptd_skcipher, base);
982 }
983 EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher);
984 
985 struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
986 {
987 	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
988 
989 	return &ctx->child->base;
990 }
991 EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
992 
993 bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm)
994 {
995 	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
996 
997 	return atomic_read(&ctx->refcnt) - 1;
998 }
999 EXPORT_SYMBOL_GPL(cryptd_skcipher_queued);
1000 
1001 void cryptd_free_skcipher(struct cryptd_skcipher *tfm)
1002 {
1003 	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
1004 
1005 	if (atomic_dec_and_test(&ctx->refcnt))
1006 		crypto_free_skcipher(&tfm->base);
1007 }
1008 EXPORT_SYMBOL_GPL(cryptd_free_skcipher);
1009 
1010 struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
1011 					u32 type, u32 mask)
1012 {
1013 	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1014 	struct cryptd_hash_ctx *ctx;
1015 	struct crypto_ahash *tfm;
1016 
1017 	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1018 		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1019 		return ERR_PTR(-EINVAL);
1020 	tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
1021 	if (IS_ERR(tfm))
1022 		return ERR_CAST(tfm);
1023 	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1024 		crypto_free_ahash(tfm);
1025 		return ERR_PTR(-EINVAL);
1026 	}
1027 
1028 	ctx = crypto_ahash_ctx(tfm);
1029 	atomic_set(&ctx->refcnt, 1);
1030 
1031 	return __cryptd_ahash_cast(tfm);
1032 }
1033 EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
1034 
1035 struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
1036 {
1037 	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1038 
1039 	return ctx->child;
1040 }
1041 EXPORT_SYMBOL_GPL(cryptd_ahash_child);
1042 
1043 struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
1044 {
1045 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
1046 	return &rctx->desc;
1047 }
1048 EXPORT_SYMBOL_GPL(cryptd_shash_desc);
1049 
1050 bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
1051 {
1052 	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1053 
1054 	return atomic_read(&ctx->refcnt) - 1;
1055 }
1056 EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
1057 
1058 void cryptd_free_ahash(struct cryptd_ahash *tfm)
1059 {
1060 	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1061 
1062 	if (atomic_dec_and_test(&ctx->refcnt))
1063 		crypto_free_ahash(&tfm->base);
1064 }
1065 EXPORT_SYMBOL_GPL(cryptd_free_ahash);
1066 
1067 struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
1068 						  u32 type, u32 mask)
1069 {
1070 	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1071 	struct cryptd_aead_ctx *ctx;
1072 	struct crypto_aead *tfm;
1073 
1074 	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1075 		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1076 		return ERR_PTR(-EINVAL);
1077 	tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
1078 	if (IS_ERR(tfm))
1079 		return ERR_CAST(tfm);
1080 	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1081 		crypto_free_aead(tfm);
1082 		return ERR_PTR(-EINVAL);
1083 	}
1084 
1085 	ctx = crypto_aead_ctx(tfm);
1086 	atomic_set(&ctx->refcnt, 1);
1087 
1088 	return __cryptd_aead_cast(tfm);
1089 }
1090 EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
1091 
1092 struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
1093 {
1094 	struct cryptd_aead_ctx *ctx;
1095 	ctx = crypto_aead_ctx(&tfm->base);
1096 	return ctx->child;
1097 }
1098 EXPORT_SYMBOL_GPL(cryptd_aead_child);
1099 
1100 bool cryptd_aead_queued(struct cryptd_aead *tfm)
1101 {
1102 	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1103 
1104 	return atomic_read(&ctx->refcnt) - 1;
1105 }
1106 EXPORT_SYMBOL_GPL(cryptd_aead_queued);
1107 
1108 void cryptd_free_aead(struct cryptd_aead *tfm)
1109 {
1110 	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1111 
1112 	if (atomic_dec_and_test(&ctx->refcnt))
1113 		crypto_free_aead(&tfm->base);
1114 }
1115 EXPORT_SYMBOL_GPL(cryptd_free_aead);
1116 
1117 static int __init cryptd_init(void)
1118 {
1119 	int err;
1120 
1121 	err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen);
1122 	if (err)
1123 		return err;
1124 
1125 	err = crypto_register_template(&cryptd_tmpl);
1126 	if (err)
1127 		cryptd_fini_queue(&queue);
1128 
1129 	return err;
1130 }
1131 
1132 static void __exit cryptd_exit(void)
1133 {
1134 	cryptd_fini_queue(&queue);
1135 	crypto_unregister_template(&cryptd_tmpl);
1136 }
1137 
1138 subsys_initcall(cryptd_init);
1139 module_exit(cryptd_exit);
1140 
1141 MODULE_LICENSE("GPL");
1142 MODULE_DESCRIPTION("Software async crypto daemon");
1143 MODULE_ALIAS_CRYPTO("cryptd");
1144