xref: /openbmc/linux/crypto/cryptd.c (revision bc5aa3a0)
1 /*
2  * Software async crypto daemon.
3  *
4  * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
5  *
6  * Added AEAD support to cryptd.
7  *    Authors: Tadeusz Struk (tadeusz.struk@intel.com)
8  *             Adrian Hoban <adrian.hoban@intel.com>
9  *             Gabriele Paoloni <gabriele.paoloni@intel.com>
10  *             Aidan O'Mahony (aidan.o.mahony@intel.com)
11  *    Copyright (c) 2010, Intel Corporation.
12  *
13  * This program is free software; you can redistribute it and/or modify it
14  * under the terms of the GNU General Public License as published by the Free
15  * Software Foundation; either version 2 of the License, or (at your option)
16  * any later version.
17  *
18  */
19 
20 #include <crypto/algapi.h>
21 #include <crypto/internal/hash.h>
22 #include <crypto/internal/aead.h>
23 #include <crypto/cryptd.h>
24 #include <crypto/crypto_wq.h>
25 #include <linux/atomic.h>
26 #include <linux/err.h>
27 #include <linux/init.h>
28 #include <linux/kernel.h>
29 #include <linux/list.h>
30 #include <linux/module.h>
31 #include <linux/scatterlist.h>
32 #include <linux/sched.h>
33 #include <linux/slab.h>
34 
35 #define CRYPTD_MAX_CPU_QLEN 1000
36 
37 struct cryptd_cpu_queue {
38 	struct crypto_queue queue;
39 	struct work_struct work;
40 };
41 
42 struct cryptd_queue {
43 	struct cryptd_cpu_queue __percpu *cpu_queue;
44 };
45 
46 struct cryptd_instance_ctx {
47 	struct crypto_spawn spawn;
48 	struct cryptd_queue *queue;
49 };
50 
51 struct hashd_instance_ctx {
52 	struct crypto_shash_spawn spawn;
53 	struct cryptd_queue *queue;
54 };
55 
56 struct aead_instance_ctx {
57 	struct crypto_aead_spawn aead_spawn;
58 	struct cryptd_queue *queue;
59 };
60 
61 struct cryptd_blkcipher_ctx {
62 	atomic_t refcnt;
63 	struct crypto_blkcipher *child;
64 };
65 
66 struct cryptd_blkcipher_request_ctx {
67 	crypto_completion_t complete;
68 };
69 
70 struct cryptd_hash_ctx {
71 	atomic_t refcnt;
72 	struct crypto_shash *child;
73 };
74 
75 struct cryptd_hash_request_ctx {
76 	crypto_completion_t complete;
77 	struct shash_desc desc;
78 };
79 
80 struct cryptd_aead_ctx {
81 	atomic_t refcnt;
82 	struct crypto_aead *child;
83 };
84 
85 struct cryptd_aead_request_ctx {
86 	crypto_completion_t complete;
87 };
88 
89 static void cryptd_queue_worker(struct work_struct *work);
90 
91 static int cryptd_init_queue(struct cryptd_queue *queue,
92 			     unsigned int max_cpu_qlen)
93 {
94 	int cpu;
95 	struct cryptd_cpu_queue *cpu_queue;
96 
97 	queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
98 	if (!queue->cpu_queue)
99 		return -ENOMEM;
100 	for_each_possible_cpu(cpu) {
101 		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
102 		crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
103 		INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
104 	}
105 	return 0;
106 }
107 
108 static void cryptd_fini_queue(struct cryptd_queue *queue)
109 {
110 	int cpu;
111 	struct cryptd_cpu_queue *cpu_queue;
112 
113 	for_each_possible_cpu(cpu) {
114 		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
115 		BUG_ON(cpu_queue->queue.qlen);
116 	}
117 	free_percpu(queue->cpu_queue);
118 }
119 
120 static int cryptd_enqueue_request(struct cryptd_queue *queue,
121 				  struct crypto_async_request *request)
122 {
123 	int cpu, err;
124 	struct cryptd_cpu_queue *cpu_queue;
125 	struct crypto_tfm *tfm;
126 	atomic_t *refcnt;
127 	bool may_backlog;
128 
129 	cpu = get_cpu();
130 	cpu_queue = this_cpu_ptr(queue->cpu_queue);
131 	err = crypto_enqueue_request(&cpu_queue->queue, request);
132 
133 	refcnt = crypto_tfm_ctx(request->tfm);
134 	may_backlog = request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG;
135 
136 	if (err == -EBUSY && !may_backlog)
137 		goto out_put_cpu;
138 
139 	queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
140 
141 	if (!atomic_read(refcnt))
142 		goto out_put_cpu;
143 
144 	tfm = request->tfm;
145 	atomic_inc(refcnt);
146 
147 out_put_cpu:
148 	put_cpu();
149 
150 	return err;
151 }
152 
153 /* Called in workqueue context, do one real cryption work (via
154  * req->complete) and reschedule itself if there are more work to
155  * do. */
156 static void cryptd_queue_worker(struct work_struct *work)
157 {
158 	struct cryptd_cpu_queue *cpu_queue;
159 	struct crypto_async_request *req, *backlog;
160 
161 	cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
162 	/*
163 	 * Only handle one request at a time to avoid hogging crypto workqueue.
164 	 * preempt_disable/enable is used to prevent being preempted by
165 	 * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
166 	 * cryptd_enqueue_request() being accessed from software interrupts.
167 	 */
168 	local_bh_disable();
169 	preempt_disable();
170 	backlog = crypto_get_backlog(&cpu_queue->queue);
171 	req = crypto_dequeue_request(&cpu_queue->queue);
172 	preempt_enable();
173 	local_bh_enable();
174 
175 	if (!req)
176 		return;
177 
178 	if (backlog)
179 		backlog->complete(backlog, -EINPROGRESS);
180 	req->complete(req, 0);
181 
182 	if (cpu_queue->queue.qlen)
183 		queue_work(kcrypto_wq, &cpu_queue->work);
184 }
185 
186 static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
187 {
188 	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
189 	struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
190 	return ictx->queue;
191 }
192 
193 static inline void cryptd_check_internal(struct rtattr **tb, u32 *type,
194 					 u32 *mask)
195 {
196 	struct crypto_attr_type *algt;
197 
198 	algt = crypto_get_attr_type(tb);
199 	if (IS_ERR(algt))
200 		return;
201 
202 	*type |= algt->type & CRYPTO_ALG_INTERNAL;
203 	*mask |= algt->mask & CRYPTO_ALG_INTERNAL;
204 }
205 
206 static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
207 				   const u8 *key, unsigned int keylen)
208 {
209 	struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent);
210 	struct crypto_blkcipher *child = ctx->child;
211 	int err;
212 
213 	crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
214 	crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) &
215 					  CRYPTO_TFM_REQ_MASK);
216 	err = crypto_blkcipher_setkey(child, key, keylen);
217 	crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) &
218 					    CRYPTO_TFM_RES_MASK);
219 	return err;
220 }
221 
222 static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
223 				   struct crypto_blkcipher *child,
224 				   int err,
225 				   int (*crypt)(struct blkcipher_desc *desc,
226 						struct scatterlist *dst,
227 						struct scatterlist *src,
228 						unsigned int len))
229 {
230 	struct cryptd_blkcipher_request_ctx *rctx;
231 	struct cryptd_blkcipher_ctx *ctx;
232 	struct crypto_ablkcipher *tfm;
233 	struct blkcipher_desc desc;
234 	int refcnt;
235 
236 	rctx = ablkcipher_request_ctx(req);
237 
238 	if (unlikely(err == -EINPROGRESS))
239 		goto out;
240 
241 	desc.tfm = child;
242 	desc.info = req->info;
243 	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
244 
245 	err = crypt(&desc, req->dst, req->src, req->nbytes);
246 
247 	req->base.complete = rctx->complete;
248 
249 out:
250 	tfm = crypto_ablkcipher_reqtfm(req);
251 	ctx = crypto_ablkcipher_ctx(tfm);
252 	refcnt = atomic_read(&ctx->refcnt);
253 
254 	local_bh_disable();
255 	rctx->complete(&req->base, err);
256 	local_bh_enable();
257 
258 	if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
259 		crypto_free_ablkcipher(tfm);
260 }
261 
262 static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
263 {
264 	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
265 	struct crypto_blkcipher *child = ctx->child;
266 
267 	cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
268 			       crypto_blkcipher_crt(child)->encrypt);
269 }
270 
271 static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err)
272 {
273 	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
274 	struct crypto_blkcipher *child = ctx->child;
275 
276 	cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
277 			       crypto_blkcipher_crt(child)->decrypt);
278 }
279 
280 static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
281 				    crypto_completion_t compl)
282 {
283 	struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
284 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
285 	struct cryptd_queue *queue;
286 
287 	queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
288 	rctx->complete = req->base.complete;
289 	req->base.complete = compl;
290 
291 	return cryptd_enqueue_request(queue, &req->base);
292 }
293 
294 static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
295 {
296 	return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt);
297 }
298 
299 static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req)
300 {
301 	return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt);
302 }
303 
304 static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
305 {
306 	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
307 	struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
308 	struct crypto_spawn *spawn = &ictx->spawn;
309 	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
310 	struct crypto_blkcipher *cipher;
311 
312 	cipher = crypto_spawn_blkcipher(spawn);
313 	if (IS_ERR(cipher))
314 		return PTR_ERR(cipher);
315 
316 	ctx->child = cipher;
317 	tfm->crt_ablkcipher.reqsize =
318 		sizeof(struct cryptd_blkcipher_request_ctx);
319 	return 0;
320 }
321 
322 static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
323 {
324 	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
325 
326 	crypto_free_blkcipher(ctx->child);
327 }
328 
329 static int cryptd_init_instance(struct crypto_instance *inst,
330 				struct crypto_alg *alg)
331 {
332 	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
333 		     "cryptd(%s)",
334 		     alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
335 		return -ENAMETOOLONG;
336 
337 	memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
338 
339 	inst->alg.cra_priority = alg->cra_priority + 50;
340 	inst->alg.cra_blocksize = alg->cra_blocksize;
341 	inst->alg.cra_alignmask = alg->cra_alignmask;
342 
343 	return 0;
344 }
345 
346 static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
347 				   unsigned int tail)
348 {
349 	char *p;
350 	struct crypto_instance *inst;
351 	int err;
352 
353 	p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
354 	if (!p)
355 		return ERR_PTR(-ENOMEM);
356 
357 	inst = (void *)(p + head);
358 
359 	err = cryptd_init_instance(inst, alg);
360 	if (err)
361 		goto out_free_inst;
362 
363 out:
364 	return p;
365 
366 out_free_inst:
367 	kfree(p);
368 	p = ERR_PTR(err);
369 	goto out;
370 }
371 
372 static int cryptd_create_blkcipher(struct crypto_template *tmpl,
373 				   struct rtattr **tb,
374 				   struct cryptd_queue *queue)
375 {
376 	struct cryptd_instance_ctx *ctx;
377 	struct crypto_instance *inst;
378 	struct crypto_alg *alg;
379 	u32 type = CRYPTO_ALG_TYPE_BLKCIPHER;
380 	u32 mask = CRYPTO_ALG_TYPE_MASK;
381 	int err;
382 
383 	cryptd_check_internal(tb, &type, &mask);
384 
385 	alg = crypto_get_attr_alg(tb, type, mask);
386 	if (IS_ERR(alg))
387 		return PTR_ERR(alg);
388 
389 	inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
390 	err = PTR_ERR(inst);
391 	if (IS_ERR(inst))
392 		goto out_put_alg;
393 
394 	ctx = crypto_instance_ctx(inst);
395 	ctx->queue = queue;
396 
397 	err = crypto_init_spawn(&ctx->spawn, alg, inst,
398 				CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
399 	if (err)
400 		goto out_free_inst;
401 
402 	type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
403 	if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
404 		type |= CRYPTO_ALG_INTERNAL;
405 	inst->alg.cra_flags = type;
406 	inst->alg.cra_type = &crypto_ablkcipher_type;
407 
408 	inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
409 	inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
410 	inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
411 
412 	inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv;
413 
414 	inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx);
415 
416 	inst->alg.cra_init = cryptd_blkcipher_init_tfm;
417 	inst->alg.cra_exit = cryptd_blkcipher_exit_tfm;
418 
419 	inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey;
420 	inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
421 	inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
422 
423 	err = crypto_register_instance(tmpl, inst);
424 	if (err) {
425 		crypto_drop_spawn(&ctx->spawn);
426 out_free_inst:
427 		kfree(inst);
428 	}
429 
430 out_put_alg:
431 	crypto_mod_put(alg);
432 	return err;
433 }
434 
435 static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
436 {
437 	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
438 	struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
439 	struct crypto_shash_spawn *spawn = &ictx->spawn;
440 	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
441 	struct crypto_shash *hash;
442 
443 	hash = crypto_spawn_shash(spawn);
444 	if (IS_ERR(hash))
445 		return PTR_ERR(hash);
446 
447 	ctx->child = hash;
448 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
449 				 sizeof(struct cryptd_hash_request_ctx) +
450 				 crypto_shash_descsize(hash));
451 	return 0;
452 }
453 
454 static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
455 {
456 	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
457 
458 	crypto_free_shash(ctx->child);
459 }
460 
461 static int cryptd_hash_setkey(struct crypto_ahash *parent,
462 				   const u8 *key, unsigned int keylen)
463 {
464 	struct cryptd_hash_ctx *ctx   = crypto_ahash_ctx(parent);
465 	struct crypto_shash *child = ctx->child;
466 	int err;
467 
468 	crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
469 	crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
470 				      CRYPTO_TFM_REQ_MASK);
471 	err = crypto_shash_setkey(child, key, keylen);
472 	crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
473 				       CRYPTO_TFM_RES_MASK);
474 	return err;
475 }
476 
477 static int cryptd_hash_enqueue(struct ahash_request *req,
478 				crypto_completion_t compl)
479 {
480 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
481 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
482 	struct cryptd_queue *queue =
483 		cryptd_get_queue(crypto_ahash_tfm(tfm));
484 
485 	rctx->complete = req->base.complete;
486 	req->base.complete = compl;
487 
488 	return cryptd_enqueue_request(queue, &req->base);
489 }
490 
491 static void cryptd_hash_complete(struct ahash_request *req, int err)
492 {
493 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
494 	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
495 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
496 	int refcnt = atomic_read(&ctx->refcnt);
497 
498 	local_bh_disable();
499 	rctx->complete(&req->base, err);
500 	local_bh_enable();
501 
502 	if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
503 		crypto_free_ahash(tfm);
504 }
505 
506 static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
507 {
508 	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
509 	struct crypto_shash *child = ctx->child;
510 	struct ahash_request *req = ahash_request_cast(req_async);
511 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
512 	struct shash_desc *desc = &rctx->desc;
513 
514 	if (unlikely(err == -EINPROGRESS))
515 		goto out;
516 
517 	desc->tfm = child;
518 	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
519 
520 	err = crypto_shash_init(desc);
521 
522 	req->base.complete = rctx->complete;
523 
524 out:
525 	cryptd_hash_complete(req, err);
526 }
527 
528 static int cryptd_hash_init_enqueue(struct ahash_request *req)
529 {
530 	return cryptd_hash_enqueue(req, cryptd_hash_init);
531 }
532 
533 static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
534 {
535 	struct ahash_request *req = ahash_request_cast(req_async);
536 	struct cryptd_hash_request_ctx *rctx;
537 
538 	rctx = ahash_request_ctx(req);
539 
540 	if (unlikely(err == -EINPROGRESS))
541 		goto out;
542 
543 	err = shash_ahash_update(req, &rctx->desc);
544 
545 	req->base.complete = rctx->complete;
546 
547 out:
548 	cryptd_hash_complete(req, err);
549 }
550 
551 static int cryptd_hash_update_enqueue(struct ahash_request *req)
552 {
553 	return cryptd_hash_enqueue(req, cryptd_hash_update);
554 }
555 
556 static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
557 {
558 	struct ahash_request *req = ahash_request_cast(req_async);
559 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
560 
561 	if (unlikely(err == -EINPROGRESS))
562 		goto out;
563 
564 	err = crypto_shash_final(&rctx->desc, req->result);
565 
566 	req->base.complete = rctx->complete;
567 
568 out:
569 	cryptd_hash_complete(req, err);
570 }
571 
572 static int cryptd_hash_final_enqueue(struct ahash_request *req)
573 {
574 	return cryptd_hash_enqueue(req, cryptd_hash_final);
575 }
576 
577 static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
578 {
579 	struct ahash_request *req = ahash_request_cast(req_async);
580 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
581 
582 	if (unlikely(err == -EINPROGRESS))
583 		goto out;
584 
585 	err = shash_ahash_finup(req, &rctx->desc);
586 
587 	req->base.complete = rctx->complete;
588 
589 out:
590 	cryptd_hash_complete(req, err);
591 }
592 
593 static int cryptd_hash_finup_enqueue(struct ahash_request *req)
594 {
595 	return cryptd_hash_enqueue(req, cryptd_hash_finup);
596 }
597 
598 static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
599 {
600 	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
601 	struct crypto_shash *child = ctx->child;
602 	struct ahash_request *req = ahash_request_cast(req_async);
603 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
604 	struct shash_desc *desc = &rctx->desc;
605 
606 	if (unlikely(err == -EINPROGRESS))
607 		goto out;
608 
609 	desc->tfm = child;
610 	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
611 
612 	err = shash_ahash_digest(req, desc);
613 
614 	req->base.complete = rctx->complete;
615 
616 out:
617 	cryptd_hash_complete(req, err);
618 }
619 
620 static int cryptd_hash_digest_enqueue(struct ahash_request *req)
621 {
622 	return cryptd_hash_enqueue(req, cryptd_hash_digest);
623 }
624 
625 static int cryptd_hash_export(struct ahash_request *req, void *out)
626 {
627 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
628 
629 	return crypto_shash_export(&rctx->desc, out);
630 }
631 
632 static int cryptd_hash_import(struct ahash_request *req, const void *in)
633 {
634 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
635 
636 	return crypto_shash_import(&rctx->desc, in);
637 }
638 
639 static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
640 			      struct cryptd_queue *queue)
641 {
642 	struct hashd_instance_ctx *ctx;
643 	struct ahash_instance *inst;
644 	struct shash_alg *salg;
645 	struct crypto_alg *alg;
646 	u32 type = 0;
647 	u32 mask = 0;
648 	int err;
649 
650 	cryptd_check_internal(tb, &type, &mask);
651 
652 	salg = shash_attr_alg(tb[1], type, mask);
653 	if (IS_ERR(salg))
654 		return PTR_ERR(salg);
655 
656 	alg = &salg->base;
657 	inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
658 				     sizeof(*ctx));
659 	err = PTR_ERR(inst);
660 	if (IS_ERR(inst))
661 		goto out_put_alg;
662 
663 	ctx = ahash_instance_ctx(inst);
664 	ctx->queue = queue;
665 
666 	err = crypto_init_shash_spawn(&ctx->spawn, salg,
667 				      ahash_crypto_instance(inst));
668 	if (err)
669 		goto out_free_inst;
670 
671 	type = CRYPTO_ALG_ASYNC;
672 	if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
673 		type |= CRYPTO_ALG_INTERNAL;
674 	inst->alg.halg.base.cra_flags = type;
675 
676 	inst->alg.halg.digestsize = salg->digestsize;
677 	inst->alg.halg.statesize = salg->statesize;
678 	inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
679 
680 	inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
681 	inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
682 
683 	inst->alg.init   = cryptd_hash_init_enqueue;
684 	inst->alg.update = cryptd_hash_update_enqueue;
685 	inst->alg.final  = cryptd_hash_final_enqueue;
686 	inst->alg.finup  = cryptd_hash_finup_enqueue;
687 	inst->alg.export = cryptd_hash_export;
688 	inst->alg.import = cryptd_hash_import;
689 	inst->alg.setkey = cryptd_hash_setkey;
690 	inst->alg.digest = cryptd_hash_digest_enqueue;
691 
692 	err = ahash_register_instance(tmpl, inst);
693 	if (err) {
694 		crypto_drop_shash(&ctx->spawn);
695 out_free_inst:
696 		kfree(inst);
697 	}
698 
699 out_put_alg:
700 	crypto_mod_put(alg);
701 	return err;
702 }
703 
704 static int cryptd_aead_setkey(struct crypto_aead *parent,
705 			      const u8 *key, unsigned int keylen)
706 {
707 	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
708 	struct crypto_aead *child = ctx->child;
709 
710 	return crypto_aead_setkey(child, key, keylen);
711 }
712 
713 static int cryptd_aead_setauthsize(struct crypto_aead *parent,
714 				   unsigned int authsize)
715 {
716 	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
717 	struct crypto_aead *child = ctx->child;
718 
719 	return crypto_aead_setauthsize(child, authsize);
720 }
721 
722 static void cryptd_aead_crypt(struct aead_request *req,
723 			struct crypto_aead *child,
724 			int err,
725 			int (*crypt)(struct aead_request *req))
726 {
727 	struct cryptd_aead_request_ctx *rctx;
728 	struct cryptd_aead_ctx *ctx;
729 	crypto_completion_t compl;
730 	struct crypto_aead *tfm;
731 	int refcnt;
732 
733 	rctx = aead_request_ctx(req);
734 	compl = rctx->complete;
735 
736 	tfm = crypto_aead_reqtfm(req);
737 
738 	if (unlikely(err == -EINPROGRESS))
739 		goto out;
740 	aead_request_set_tfm(req, child);
741 	err = crypt( req );
742 
743 out:
744 	ctx = crypto_aead_ctx(tfm);
745 	refcnt = atomic_read(&ctx->refcnt);
746 
747 	local_bh_disable();
748 	compl(&req->base, err);
749 	local_bh_enable();
750 
751 	if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
752 		crypto_free_aead(tfm);
753 }
754 
755 static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
756 {
757 	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
758 	struct crypto_aead *child = ctx->child;
759 	struct aead_request *req;
760 
761 	req = container_of(areq, struct aead_request, base);
762 	cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt);
763 }
764 
765 static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
766 {
767 	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
768 	struct crypto_aead *child = ctx->child;
769 	struct aead_request *req;
770 
771 	req = container_of(areq, struct aead_request, base);
772 	cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt);
773 }
774 
775 static int cryptd_aead_enqueue(struct aead_request *req,
776 				    crypto_completion_t compl)
777 {
778 	struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
779 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
780 	struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
781 
782 	rctx->complete = req->base.complete;
783 	req->base.complete = compl;
784 	return cryptd_enqueue_request(queue, &req->base);
785 }
786 
787 static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
788 {
789 	return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
790 }
791 
792 static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
793 {
794 	return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
795 }
796 
797 static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
798 {
799 	struct aead_instance *inst = aead_alg_instance(tfm);
800 	struct aead_instance_ctx *ictx = aead_instance_ctx(inst);
801 	struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
802 	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
803 	struct crypto_aead *cipher;
804 
805 	cipher = crypto_spawn_aead(spawn);
806 	if (IS_ERR(cipher))
807 		return PTR_ERR(cipher);
808 
809 	ctx->child = cipher;
810 	crypto_aead_set_reqsize(
811 		tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx),
812 			 crypto_aead_reqsize(cipher)));
813 	return 0;
814 }
815 
816 static void cryptd_aead_exit_tfm(struct crypto_aead *tfm)
817 {
818 	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
819 	crypto_free_aead(ctx->child);
820 }
821 
822 static int cryptd_create_aead(struct crypto_template *tmpl,
823 		              struct rtattr **tb,
824 			      struct cryptd_queue *queue)
825 {
826 	struct aead_instance_ctx *ctx;
827 	struct aead_instance *inst;
828 	struct aead_alg *alg;
829 	const char *name;
830 	u32 type = 0;
831 	u32 mask = CRYPTO_ALG_ASYNC;
832 	int err;
833 
834 	cryptd_check_internal(tb, &type, &mask);
835 
836 	name = crypto_attr_alg_name(tb[1]);
837 	if (IS_ERR(name))
838 		return PTR_ERR(name);
839 
840 	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
841 	if (!inst)
842 		return -ENOMEM;
843 
844 	ctx = aead_instance_ctx(inst);
845 	ctx->queue = queue;
846 
847 	crypto_set_aead_spawn(&ctx->aead_spawn, aead_crypto_instance(inst));
848 	err = crypto_grab_aead(&ctx->aead_spawn, name, type, mask);
849 	if (err)
850 		goto out_free_inst;
851 
852 	alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
853 	err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
854 	if (err)
855 		goto out_drop_aead;
856 
857 	inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
858 				   (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
859 	inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
860 
861 	inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
862 	inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
863 
864 	inst->alg.init = cryptd_aead_init_tfm;
865 	inst->alg.exit = cryptd_aead_exit_tfm;
866 	inst->alg.setkey = cryptd_aead_setkey;
867 	inst->alg.setauthsize = cryptd_aead_setauthsize;
868 	inst->alg.encrypt = cryptd_aead_encrypt_enqueue;
869 	inst->alg.decrypt = cryptd_aead_decrypt_enqueue;
870 
871 	err = aead_register_instance(tmpl, inst);
872 	if (err) {
873 out_drop_aead:
874 		crypto_drop_aead(&ctx->aead_spawn);
875 out_free_inst:
876 		kfree(inst);
877 	}
878 	return err;
879 }
880 
881 static struct cryptd_queue queue;
882 
883 static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
884 {
885 	struct crypto_attr_type *algt;
886 
887 	algt = crypto_get_attr_type(tb);
888 	if (IS_ERR(algt))
889 		return PTR_ERR(algt);
890 
891 	switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
892 	case CRYPTO_ALG_TYPE_BLKCIPHER:
893 		return cryptd_create_blkcipher(tmpl, tb, &queue);
894 	case CRYPTO_ALG_TYPE_DIGEST:
895 		return cryptd_create_hash(tmpl, tb, &queue);
896 	case CRYPTO_ALG_TYPE_AEAD:
897 		return cryptd_create_aead(tmpl, tb, &queue);
898 	}
899 
900 	return -EINVAL;
901 }
902 
903 static void cryptd_free(struct crypto_instance *inst)
904 {
905 	struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
906 	struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
907 	struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst);
908 
909 	switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
910 	case CRYPTO_ALG_TYPE_AHASH:
911 		crypto_drop_shash(&hctx->spawn);
912 		kfree(ahash_instance(inst));
913 		return;
914 	case CRYPTO_ALG_TYPE_AEAD:
915 		crypto_drop_aead(&aead_ctx->aead_spawn);
916 		kfree(aead_instance(inst));
917 		return;
918 	default:
919 		crypto_drop_spawn(&ctx->spawn);
920 		kfree(inst);
921 	}
922 }
923 
924 static struct crypto_template cryptd_tmpl = {
925 	.name = "cryptd",
926 	.create = cryptd_create,
927 	.free = cryptd_free,
928 	.module = THIS_MODULE,
929 };
930 
931 struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
932 						  u32 type, u32 mask)
933 {
934 	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
935 	struct cryptd_blkcipher_ctx *ctx;
936 	struct crypto_tfm *tfm;
937 
938 	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
939 		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
940 		return ERR_PTR(-EINVAL);
941 	type = crypto_skcipher_type(type);
942 	mask &= ~CRYPTO_ALG_TYPE_MASK;
943 	mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK);
944 	tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
945 	if (IS_ERR(tfm))
946 		return ERR_CAST(tfm);
947 	if (tfm->__crt_alg->cra_module != THIS_MODULE) {
948 		crypto_free_tfm(tfm);
949 		return ERR_PTR(-EINVAL);
950 	}
951 
952 	ctx = crypto_tfm_ctx(tfm);
953 	atomic_set(&ctx->refcnt, 1);
954 
955 	return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
956 }
957 EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
958 
959 struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
960 {
961 	struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
962 	return ctx->child;
963 }
964 EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
965 
966 bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm)
967 {
968 	struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
969 
970 	return atomic_read(&ctx->refcnt) - 1;
971 }
972 EXPORT_SYMBOL_GPL(cryptd_ablkcipher_queued);
973 
974 void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
975 {
976 	struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
977 
978 	if (atomic_dec_and_test(&ctx->refcnt))
979 		crypto_free_ablkcipher(&tfm->base);
980 }
981 EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
982 
983 struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
984 					u32 type, u32 mask)
985 {
986 	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
987 	struct cryptd_hash_ctx *ctx;
988 	struct crypto_ahash *tfm;
989 
990 	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
991 		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
992 		return ERR_PTR(-EINVAL);
993 	tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
994 	if (IS_ERR(tfm))
995 		return ERR_CAST(tfm);
996 	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
997 		crypto_free_ahash(tfm);
998 		return ERR_PTR(-EINVAL);
999 	}
1000 
1001 	ctx = crypto_ahash_ctx(tfm);
1002 	atomic_set(&ctx->refcnt, 1);
1003 
1004 	return __cryptd_ahash_cast(tfm);
1005 }
1006 EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
1007 
1008 struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
1009 {
1010 	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1011 
1012 	return ctx->child;
1013 }
1014 EXPORT_SYMBOL_GPL(cryptd_ahash_child);
1015 
1016 struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
1017 {
1018 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
1019 	return &rctx->desc;
1020 }
1021 EXPORT_SYMBOL_GPL(cryptd_shash_desc);
1022 
1023 bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
1024 {
1025 	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1026 
1027 	return atomic_read(&ctx->refcnt) - 1;
1028 }
1029 EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
1030 
1031 void cryptd_free_ahash(struct cryptd_ahash *tfm)
1032 {
1033 	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1034 
1035 	if (atomic_dec_and_test(&ctx->refcnt))
1036 		crypto_free_ahash(&tfm->base);
1037 }
1038 EXPORT_SYMBOL_GPL(cryptd_free_ahash);
1039 
1040 struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
1041 						  u32 type, u32 mask)
1042 {
1043 	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1044 	struct cryptd_aead_ctx *ctx;
1045 	struct crypto_aead *tfm;
1046 
1047 	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1048 		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1049 		return ERR_PTR(-EINVAL);
1050 	tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
1051 	if (IS_ERR(tfm))
1052 		return ERR_CAST(tfm);
1053 	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1054 		crypto_free_aead(tfm);
1055 		return ERR_PTR(-EINVAL);
1056 	}
1057 
1058 	ctx = crypto_aead_ctx(tfm);
1059 	atomic_set(&ctx->refcnt, 1);
1060 
1061 	return __cryptd_aead_cast(tfm);
1062 }
1063 EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
1064 
1065 struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
1066 {
1067 	struct cryptd_aead_ctx *ctx;
1068 	ctx = crypto_aead_ctx(&tfm->base);
1069 	return ctx->child;
1070 }
1071 EXPORT_SYMBOL_GPL(cryptd_aead_child);
1072 
1073 bool cryptd_aead_queued(struct cryptd_aead *tfm)
1074 {
1075 	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1076 
1077 	return atomic_read(&ctx->refcnt) - 1;
1078 }
1079 EXPORT_SYMBOL_GPL(cryptd_aead_queued);
1080 
1081 void cryptd_free_aead(struct cryptd_aead *tfm)
1082 {
1083 	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1084 
1085 	if (atomic_dec_and_test(&ctx->refcnt))
1086 		crypto_free_aead(&tfm->base);
1087 }
1088 EXPORT_SYMBOL_GPL(cryptd_free_aead);
1089 
1090 static int __init cryptd_init(void)
1091 {
1092 	int err;
1093 
1094 	err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN);
1095 	if (err)
1096 		return err;
1097 
1098 	err = crypto_register_template(&cryptd_tmpl);
1099 	if (err)
1100 		cryptd_fini_queue(&queue);
1101 
1102 	return err;
1103 }
1104 
1105 static void __exit cryptd_exit(void)
1106 {
1107 	cryptd_fini_queue(&queue);
1108 	crypto_unregister_template(&cryptd_tmpl);
1109 }
1110 
1111 subsys_initcall(cryptd_init);
1112 module_exit(cryptd_exit);
1113 
1114 MODULE_LICENSE("GPL");
1115 MODULE_DESCRIPTION("Software async crypto daemon");
1116 MODULE_ALIAS_CRYPTO("cryptd");
1117