xref: /openbmc/linux/crypto/cryptd.c (revision 293d5b43)
1 /*
2  * Software async crypto daemon.
3  *
4  * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
5  *
6  * Added AEAD support to cryptd.
7  *    Authors: Tadeusz Struk (tadeusz.struk@intel.com)
8  *             Adrian Hoban <adrian.hoban@intel.com>
9  *             Gabriele Paoloni <gabriele.paoloni@intel.com>
10  *             Aidan O'Mahony (aidan.o.mahony@intel.com)
11  *    Copyright (c) 2010, Intel Corporation.
12  *
13  * This program is free software; you can redistribute it and/or modify it
14  * under the terms of the GNU General Public License as published by the Free
15  * Software Foundation; either version 2 of the License, or (at your option)
16  * any later version.
17  *
18  */
19 
20 #include <crypto/algapi.h>
21 #include <crypto/internal/hash.h>
22 #include <crypto/internal/aead.h>
23 #include <crypto/cryptd.h>
24 #include <crypto/crypto_wq.h>
25 #include <linux/atomic.h>
26 #include <linux/err.h>
27 #include <linux/init.h>
28 #include <linux/kernel.h>
29 #include <linux/list.h>
30 #include <linux/module.h>
31 #include <linux/scatterlist.h>
32 #include <linux/sched.h>
33 #include <linux/slab.h>
34 
35 #define CRYPTD_MAX_CPU_QLEN 1000
36 
37 struct cryptd_cpu_queue {
38 	struct crypto_queue queue;
39 	struct work_struct work;
40 };
41 
42 struct cryptd_queue {
43 	struct cryptd_cpu_queue __percpu *cpu_queue;
44 };
45 
46 struct cryptd_instance_ctx {
47 	struct crypto_spawn spawn;
48 	struct cryptd_queue *queue;
49 };
50 
51 struct hashd_instance_ctx {
52 	struct crypto_shash_spawn spawn;
53 	struct cryptd_queue *queue;
54 };
55 
56 struct aead_instance_ctx {
57 	struct crypto_aead_spawn aead_spawn;
58 	struct cryptd_queue *queue;
59 };
60 
61 struct cryptd_blkcipher_ctx {
62 	atomic_t refcnt;
63 	struct crypto_blkcipher *child;
64 };
65 
66 struct cryptd_blkcipher_request_ctx {
67 	crypto_completion_t complete;
68 };
69 
70 struct cryptd_hash_ctx {
71 	atomic_t refcnt;
72 	struct crypto_shash *child;
73 };
74 
75 struct cryptd_hash_request_ctx {
76 	crypto_completion_t complete;
77 	struct shash_desc desc;
78 };
79 
80 struct cryptd_aead_ctx {
81 	atomic_t refcnt;
82 	struct crypto_aead *child;
83 };
84 
85 struct cryptd_aead_request_ctx {
86 	crypto_completion_t complete;
87 };
88 
89 static void cryptd_queue_worker(struct work_struct *work);
90 
91 static int cryptd_init_queue(struct cryptd_queue *queue,
92 			     unsigned int max_cpu_qlen)
93 {
94 	int cpu;
95 	struct cryptd_cpu_queue *cpu_queue;
96 
97 	queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
98 	if (!queue->cpu_queue)
99 		return -ENOMEM;
100 	for_each_possible_cpu(cpu) {
101 		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
102 		crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
103 		INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
104 	}
105 	return 0;
106 }
107 
108 static void cryptd_fini_queue(struct cryptd_queue *queue)
109 {
110 	int cpu;
111 	struct cryptd_cpu_queue *cpu_queue;
112 
113 	for_each_possible_cpu(cpu) {
114 		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
115 		BUG_ON(cpu_queue->queue.qlen);
116 	}
117 	free_percpu(queue->cpu_queue);
118 }
119 
120 static int cryptd_enqueue_request(struct cryptd_queue *queue,
121 				  struct crypto_async_request *request)
122 {
123 	int cpu, err;
124 	struct cryptd_cpu_queue *cpu_queue;
125 	struct crypto_tfm *tfm;
126 	atomic_t *refcnt;
127 	bool may_backlog;
128 
129 	cpu = get_cpu();
130 	cpu_queue = this_cpu_ptr(queue->cpu_queue);
131 	err = crypto_enqueue_request(&cpu_queue->queue, request);
132 
133 	refcnt = crypto_tfm_ctx(request->tfm);
134 	may_backlog = request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG;
135 
136 	if (err == -EBUSY && !may_backlog)
137 		goto out_put_cpu;
138 
139 	queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
140 
141 	if (!atomic_read(refcnt))
142 		goto out_put_cpu;
143 
144 	tfm = request->tfm;
145 	atomic_inc(refcnt);
146 
147 out_put_cpu:
148 	put_cpu();
149 
150 	return err;
151 }
152 
153 /* Called in workqueue context, do one real cryption work (via
154  * req->complete) and reschedule itself if there are more work to
155  * do. */
156 static void cryptd_queue_worker(struct work_struct *work)
157 {
158 	struct cryptd_cpu_queue *cpu_queue;
159 	struct crypto_async_request *req, *backlog;
160 
161 	cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
162 	/*
163 	 * Only handle one request at a time to avoid hogging crypto workqueue.
164 	 * preempt_disable/enable is used to prevent being preempted by
165 	 * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
166 	 * cryptd_enqueue_request() being accessed from software interrupts.
167 	 */
168 	local_bh_disable();
169 	preempt_disable();
170 	backlog = crypto_get_backlog(&cpu_queue->queue);
171 	req = crypto_dequeue_request(&cpu_queue->queue);
172 	preempt_enable();
173 	local_bh_enable();
174 
175 	if (!req)
176 		return;
177 
178 	if (backlog)
179 		backlog->complete(backlog, -EINPROGRESS);
180 	req->complete(req, 0);
181 
182 	if (cpu_queue->queue.qlen)
183 		queue_work(kcrypto_wq, &cpu_queue->work);
184 }
185 
186 static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
187 {
188 	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
189 	struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
190 	return ictx->queue;
191 }
192 
193 static inline void cryptd_check_internal(struct rtattr **tb, u32 *type,
194 					 u32 *mask)
195 {
196 	struct crypto_attr_type *algt;
197 
198 	algt = crypto_get_attr_type(tb);
199 	if (IS_ERR(algt))
200 		return;
201 
202 	*type |= algt->type & CRYPTO_ALG_INTERNAL;
203 	*mask |= algt->mask & CRYPTO_ALG_INTERNAL;
204 }
205 
206 static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
207 				   const u8 *key, unsigned int keylen)
208 {
209 	struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent);
210 	struct crypto_blkcipher *child = ctx->child;
211 	int err;
212 
213 	crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
214 	crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) &
215 					  CRYPTO_TFM_REQ_MASK);
216 	err = crypto_blkcipher_setkey(child, key, keylen);
217 	crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) &
218 					    CRYPTO_TFM_RES_MASK);
219 	return err;
220 }
221 
222 static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
223 				   struct crypto_blkcipher *child,
224 				   int err,
225 				   int (*crypt)(struct blkcipher_desc *desc,
226 						struct scatterlist *dst,
227 						struct scatterlist *src,
228 						unsigned int len))
229 {
230 	struct cryptd_blkcipher_request_ctx *rctx;
231 	struct cryptd_blkcipher_ctx *ctx;
232 	struct crypto_ablkcipher *tfm;
233 	struct blkcipher_desc desc;
234 	int refcnt;
235 
236 	rctx = ablkcipher_request_ctx(req);
237 
238 	if (unlikely(err == -EINPROGRESS))
239 		goto out;
240 
241 	desc.tfm = child;
242 	desc.info = req->info;
243 	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
244 
245 	err = crypt(&desc, req->dst, req->src, req->nbytes);
246 
247 	req->base.complete = rctx->complete;
248 
249 out:
250 	tfm = crypto_ablkcipher_reqtfm(req);
251 	ctx = crypto_ablkcipher_ctx(tfm);
252 	refcnt = atomic_read(&ctx->refcnt);
253 
254 	local_bh_disable();
255 	rctx->complete(&req->base, err);
256 	local_bh_enable();
257 
258 	if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
259 		crypto_free_ablkcipher(tfm);
260 }
261 
262 static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
263 {
264 	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
265 	struct crypto_blkcipher *child = ctx->child;
266 
267 	cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
268 			       crypto_blkcipher_crt(child)->encrypt);
269 }
270 
271 static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err)
272 {
273 	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
274 	struct crypto_blkcipher *child = ctx->child;
275 
276 	cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
277 			       crypto_blkcipher_crt(child)->decrypt);
278 }
279 
280 static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
281 				    crypto_completion_t compl)
282 {
283 	struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
284 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
285 	struct cryptd_queue *queue;
286 
287 	queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
288 	rctx->complete = req->base.complete;
289 	req->base.complete = compl;
290 
291 	return cryptd_enqueue_request(queue, &req->base);
292 }
293 
294 static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
295 {
296 	return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt);
297 }
298 
299 static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req)
300 {
301 	return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt);
302 }
303 
304 static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
305 {
306 	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
307 	struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
308 	struct crypto_spawn *spawn = &ictx->spawn;
309 	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
310 	struct crypto_blkcipher *cipher;
311 
312 	cipher = crypto_spawn_blkcipher(spawn);
313 	if (IS_ERR(cipher))
314 		return PTR_ERR(cipher);
315 
316 	ctx->child = cipher;
317 	tfm->crt_ablkcipher.reqsize =
318 		sizeof(struct cryptd_blkcipher_request_ctx);
319 	return 0;
320 }
321 
322 static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
323 {
324 	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
325 
326 	crypto_free_blkcipher(ctx->child);
327 }
328 
329 static int cryptd_init_instance(struct crypto_instance *inst,
330 				struct crypto_alg *alg)
331 {
332 	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
333 		     "cryptd(%s)",
334 		     alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
335 		return -ENAMETOOLONG;
336 
337 	memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
338 
339 	inst->alg.cra_priority = alg->cra_priority + 50;
340 	inst->alg.cra_blocksize = alg->cra_blocksize;
341 	inst->alg.cra_alignmask = alg->cra_alignmask;
342 
343 	return 0;
344 }
345 
346 static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
347 				   unsigned int tail)
348 {
349 	char *p;
350 	struct crypto_instance *inst;
351 	int err;
352 
353 	p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
354 	if (!p)
355 		return ERR_PTR(-ENOMEM);
356 
357 	inst = (void *)(p + head);
358 
359 	err = cryptd_init_instance(inst, alg);
360 	if (err)
361 		goto out_free_inst;
362 
363 out:
364 	return p;
365 
366 out_free_inst:
367 	kfree(p);
368 	p = ERR_PTR(err);
369 	goto out;
370 }
371 
372 static int cryptd_create_blkcipher(struct crypto_template *tmpl,
373 				   struct rtattr **tb,
374 				   struct cryptd_queue *queue)
375 {
376 	struct cryptd_instance_ctx *ctx;
377 	struct crypto_instance *inst;
378 	struct crypto_alg *alg;
379 	u32 type = CRYPTO_ALG_TYPE_BLKCIPHER;
380 	u32 mask = CRYPTO_ALG_TYPE_MASK;
381 	int err;
382 
383 	cryptd_check_internal(tb, &type, &mask);
384 
385 	alg = crypto_get_attr_alg(tb, type, mask);
386 	if (IS_ERR(alg))
387 		return PTR_ERR(alg);
388 
389 	inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
390 	err = PTR_ERR(inst);
391 	if (IS_ERR(inst))
392 		goto out_put_alg;
393 
394 	ctx = crypto_instance_ctx(inst);
395 	ctx->queue = queue;
396 
397 	err = crypto_init_spawn(&ctx->spawn, alg, inst,
398 				CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
399 	if (err)
400 		goto out_free_inst;
401 
402 	type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
403 	if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
404 		type |= CRYPTO_ALG_INTERNAL;
405 	inst->alg.cra_flags = type;
406 	inst->alg.cra_type = &crypto_ablkcipher_type;
407 
408 	inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
409 	inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
410 	inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
411 
412 	inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv;
413 
414 	inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx);
415 
416 	inst->alg.cra_init = cryptd_blkcipher_init_tfm;
417 	inst->alg.cra_exit = cryptd_blkcipher_exit_tfm;
418 
419 	inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey;
420 	inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
421 	inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
422 
423 	err = crypto_register_instance(tmpl, inst);
424 	if (err) {
425 		crypto_drop_spawn(&ctx->spawn);
426 out_free_inst:
427 		kfree(inst);
428 	}
429 
430 out_put_alg:
431 	crypto_mod_put(alg);
432 	return err;
433 }
434 
435 static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
436 {
437 	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
438 	struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
439 	struct crypto_shash_spawn *spawn = &ictx->spawn;
440 	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
441 	struct crypto_shash *hash;
442 
443 	hash = crypto_spawn_shash(spawn);
444 	if (IS_ERR(hash))
445 		return PTR_ERR(hash);
446 
447 	ctx->child = hash;
448 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
449 				 sizeof(struct cryptd_hash_request_ctx) +
450 				 crypto_shash_descsize(hash));
451 	return 0;
452 }
453 
454 static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
455 {
456 	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
457 
458 	crypto_free_shash(ctx->child);
459 }
460 
461 static int cryptd_hash_setkey(struct crypto_ahash *parent,
462 				   const u8 *key, unsigned int keylen)
463 {
464 	struct cryptd_hash_ctx *ctx   = crypto_ahash_ctx(parent);
465 	struct crypto_shash *child = ctx->child;
466 	int err;
467 
468 	crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
469 	crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
470 				      CRYPTO_TFM_REQ_MASK);
471 	err = crypto_shash_setkey(child, key, keylen);
472 	crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
473 				       CRYPTO_TFM_RES_MASK);
474 	return err;
475 }
476 
477 static int cryptd_hash_enqueue(struct ahash_request *req,
478 				crypto_completion_t compl)
479 {
480 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
481 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
482 	struct cryptd_queue *queue =
483 		cryptd_get_queue(crypto_ahash_tfm(tfm));
484 
485 	rctx->complete = req->base.complete;
486 	req->base.complete = compl;
487 
488 	return cryptd_enqueue_request(queue, &req->base);
489 }
490 
491 static void cryptd_hash_complete(struct ahash_request *req, int err)
492 {
493 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
494 	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
495 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
496 	int refcnt = atomic_read(&ctx->refcnt);
497 
498 	local_bh_disable();
499 	rctx->complete(&req->base, err);
500 	local_bh_enable();
501 
502 	if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
503 		crypto_free_ahash(tfm);
504 }
505 
506 static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
507 {
508 	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
509 	struct crypto_shash *child = ctx->child;
510 	struct ahash_request *req = ahash_request_cast(req_async);
511 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
512 	struct shash_desc *desc = &rctx->desc;
513 
514 	if (unlikely(err == -EINPROGRESS))
515 		goto out;
516 
517 	desc->tfm = child;
518 	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
519 
520 	err = crypto_shash_init(desc);
521 
522 	req->base.complete = rctx->complete;
523 
524 out:
525 	cryptd_hash_complete(req, err);
526 }
527 
528 static int cryptd_hash_init_enqueue(struct ahash_request *req)
529 {
530 	return cryptd_hash_enqueue(req, cryptd_hash_init);
531 }
532 
533 static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
534 {
535 	struct ahash_request *req = ahash_request_cast(req_async);
536 	struct cryptd_hash_request_ctx *rctx;
537 
538 	rctx = ahash_request_ctx(req);
539 
540 	if (unlikely(err == -EINPROGRESS))
541 		goto out;
542 
543 	err = shash_ahash_update(req, &rctx->desc);
544 
545 	req->base.complete = rctx->complete;
546 
547 out:
548 	cryptd_hash_complete(req, err);
549 }
550 
551 static int cryptd_hash_update_enqueue(struct ahash_request *req)
552 {
553 	return cryptd_hash_enqueue(req, cryptd_hash_update);
554 }
555 
556 static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
557 {
558 	struct ahash_request *req = ahash_request_cast(req_async);
559 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
560 
561 	if (unlikely(err == -EINPROGRESS))
562 		goto out;
563 
564 	err = crypto_shash_final(&rctx->desc, req->result);
565 
566 	req->base.complete = rctx->complete;
567 
568 out:
569 	cryptd_hash_complete(req, err);
570 }
571 
572 static int cryptd_hash_final_enqueue(struct ahash_request *req)
573 {
574 	return cryptd_hash_enqueue(req, cryptd_hash_final);
575 }
576 
577 static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
578 {
579 	struct ahash_request *req = ahash_request_cast(req_async);
580 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
581 
582 	if (unlikely(err == -EINPROGRESS))
583 		goto out;
584 
585 	err = shash_ahash_finup(req, &rctx->desc);
586 
587 	req->base.complete = rctx->complete;
588 
589 out:
590 	cryptd_hash_complete(req, err);
591 }
592 
593 static int cryptd_hash_finup_enqueue(struct ahash_request *req)
594 {
595 	return cryptd_hash_enqueue(req, cryptd_hash_finup);
596 }
597 
598 static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
599 {
600 	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
601 	struct crypto_shash *child = ctx->child;
602 	struct ahash_request *req = ahash_request_cast(req_async);
603 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
604 	struct shash_desc *desc = &rctx->desc;
605 
606 	if (unlikely(err == -EINPROGRESS))
607 		goto out;
608 
609 	desc->tfm = child;
610 	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
611 
612 	err = shash_ahash_digest(req, desc);
613 
614 	req->base.complete = rctx->complete;
615 
616 out:
617 	cryptd_hash_complete(req, err);
618 }
619 
620 static int cryptd_hash_digest_enqueue(struct ahash_request *req)
621 {
622 	return cryptd_hash_enqueue(req, cryptd_hash_digest);
623 }
624 
625 static int cryptd_hash_export(struct ahash_request *req, void *out)
626 {
627 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
628 
629 	return crypto_shash_export(&rctx->desc, out);
630 }
631 
632 static int cryptd_hash_import(struct ahash_request *req, const void *in)
633 {
634 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
635 
636 	return crypto_shash_import(&rctx->desc, in);
637 }
638 
639 static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
640 			      struct cryptd_queue *queue)
641 {
642 	struct hashd_instance_ctx *ctx;
643 	struct ahash_instance *inst;
644 	struct shash_alg *salg;
645 	struct crypto_alg *alg;
646 	u32 type = 0;
647 	u32 mask = 0;
648 	int err;
649 
650 	cryptd_check_internal(tb, &type, &mask);
651 
652 	salg = shash_attr_alg(tb[1], type, mask);
653 	if (IS_ERR(salg))
654 		return PTR_ERR(salg);
655 
656 	alg = &salg->base;
657 	inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
658 				     sizeof(*ctx));
659 	err = PTR_ERR(inst);
660 	if (IS_ERR(inst))
661 		goto out_put_alg;
662 
663 	ctx = ahash_instance_ctx(inst);
664 	ctx->queue = queue;
665 
666 	err = crypto_init_shash_spawn(&ctx->spawn, salg,
667 				      ahash_crypto_instance(inst));
668 	if (err)
669 		goto out_free_inst;
670 
671 	type = CRYPTO_ALG_ASYNC;
672 	if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
673 		type |= CRYPTO_ALG_INTERNAL;
674 	inst->alg.halg.base.cra_flags = type;
675 
676 	inst->alg.halg.digestsize = salg->digestsize;
677 	inst->alg.halg.statesize = salg->statesize;
678 	inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
679 
680 	inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
681 	inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
682 
683 	inst->alg.init   = cryptd_hash_init_enqueue;
684 	inst->alg.update = cryptd_hash_update_enqueue;
685 	inst->alg.final  = cryptd_hash_final_enqueue;
686 	inst->alg.finup  = cryptd_hash_finup_enqueue;
687 	inst->alg.export = cryptd_hash_export;
688 	inst->alg.import = cryptd_hash_import;
689 	inst->alg.setkey = cryptd_hash_setkey;
690 	inst->alg.digest = cryptd_hash_digest_enqueue;
691 
692 	err = ahash_register_instance(tmpl, inst);
693 	if (err) {
694 		crypto_drop_shash(&ctx->spawn);
695 out_free_inst:
696 		kfree(inst);
697 	}
698 
699 out_put_alg:
700 	crypto_mod_put(alg);
701 	return err;
702 }
703 
704 static int cryptd_aead_setkey(struct crypto_aead *parent,
705 			      const u8 *key, unsigned int keylen)
706 {
707 	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
708 	struct crypto_aead *child = ctx->child;
709 
710 	return crypto_aead_setkey(child, key, keylen);
711 }
712 
713 static int cryptd_aead_setauthsize(struct crypto_aead *parent,
714 				   unsigned int authsize)
715 {
716 	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
717 	struct crypto_aead *child = ctx->child;
718 
719 	return crypto_aead_setauthsize(child, authsize);
720 }
721 
722 static void cryptd_aead_crypt(struct aead_request *req,
723 			struct crypto_aead *child,
724 			int err,
725 			int (*crypt)(struct aead_request *req))
726 {
727 	struct cryptd_aead_request_ctx *rctx;
728 	struct cryptd_aead_ctx *ctx;
729 	crypto_completion_t compl;
730 	struct crypto_aead *tfm;
731 	int refcnt;
732 
733 	rctx = aead_request_ctx(req);
734 	compl = rctx->complete;
735 
736 	if (unlikely(err == -EINPROGRESS))
737 		goto out;
738 	aead_request_set_tfm(req, child);
739 	err = crypt( req );
740 
741 out:
742 	tfm = crypto_aead_reqtfm(req);
743 	ctx = crypto_aead_ctx(tfm);
744 	refcnt = atomic_read(&ctx->refcnt);
745 
746 	local_bh_disable();
747 	compl(&req->base, err);
748 	local_bh_enable();
749 
750 	if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
751 		crypto_free_aead(tfm);
752 }
753 
754 static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
755 {
756 	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
757 	struct crypto_aead *child = ctx->child;
758 	struct aead_request *req;
759 
760 	req = container_of(areq, struct aead_request, base);
761 	cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt);
762 }
763 
764 static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
765 {
766 	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
767 	struct crypto_aead *child = ctx->child;
768 	struct aead_request *req;
769 
770 	req = container_of(areq, struct aead_request, base);
771 	cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt);
772 }
773 
774 static int cryptd_aead_enqueue(struct aead_request *req,
775 				    crypto_completion_t compl)
776 {
777 	struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
778 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
779 	struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
780 
781 	rctx->complete = req->base.complete;
782 	req->base.complete = compl;
783 	return cryptd_enqueue_request(queue, &req->base);
784 }
785 
786 static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
787 {
788 	return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
789 }
790 
791 static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
792 {
793 	return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
794 }
795 
796 static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
797 {
798 	struct aead_instance *inst = aead_alg_instance(tfm);
799 	struct aead_instance_ctx *ictx = aead_instance_ctx(inst);
800 	struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
801 	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
802 	struct crypto_aead *cipher;
803 
804 	cipher = crypto_spawn_aead(spawn);
805 	if (IS_ERR(cipher))
806 		return PTR_ERR(cipher);
807 
808 	ctx->child = cipher;
809 	crypto_aead_set_reqsize(
810 		tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx),
811 			 crypto_aead_reqsize(cipher)));
812 	return 0;
813 }
814 
815 static void cryptd_aead_exit_tfm(struct crypto_aead *tfm)
816 {
817 	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
818 	crypto_free_aead(ctx->child);
819 }
820 
821 static int cryptd_create_aead(struct crypto_template *tmpl,
822 		              struct rtattr **tb,
823 			      struct cryptd_queue *queue)
824 {
825 	struct aead_instance_ctx *ctx;
826 	struct aead_instance *inst;
827 	struct aead_alg *alg;
828 	const char *name;
829 	u32 type = 0;
830 	u32 mask = CRYPTO_ALG_ASYNC;
831 	int err;
832 
833 	cryptd_check_internal(tb, &type, &mask);
834 
835 	name = crypto_attr_alg_name(tb[1]);
836 	if (IS_ERR(name))
837 		return PTR_ERR(name);
838 
839 	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
840 	if (!inst)
841 		return -ENOMEM;
842 
843 	ctx = aead_instance_ctx(inst);
844 	ctx->queue = queue;
845 
846 	crypto_set_aead_spawn(&ctx->aead_spawn, aead_crypto_instance(inst));
847 	err = crypto_grab_aead(&ctx->aead_spawn, name, type, mask);
848 	if (err)
849 		goto out_free_inst;
850 
851 	alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
852 	err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
853 	if (err)
854 		goto out_drop_aead;
855 
856 	inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
857 				   (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
858 	inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
859 
860 	inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
861 	inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
862 
863 	inst->alg.init = cryptd_aead_init_tfm;
864 	inst->alg.exit = cryptd_aead_exit_tfm;
865 	inst->alg.setkey = cryptd_aead_setkey;
866 	inst->alg.setauthsize = cryptd_aead_setauthsize;
867 	inst->alg.encrypt = cryptd_aead_encrypt_enqueue;
868 	inst->alg.decrypt = cryptd_aead_decrypt_enqueue;
869 
870 	err = aead_register_instance(tmpl, inst);
871 	if (err) {
872 out_drop_aead:
873 		crypto_drop_aead(&ctx->aead_spawn);
874 out_free_inst:
875 		kfree(inst);
876 	}
877 	return err;
878 }
879 
880 static struct cryptd_queue queue;
881 
882 static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
883 {
884 	struct crypto_attr_type *algt;
885 
886 	algt = crypto_get_attr_type(tb);
887 	if (IS_ERR(algt))
888 		return PTR_ERR(algt);
889 
890 	switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
891 	case CRYPTO_ALG_TYPE_BLKCIPHER:
892 		return cryptd_create_blkcipher(tmpl, tb, &queue);
893 	case CRYPTO_ALG_TYPE_DIGEST:
894 		return cryptd_create_hash(tmpl, tb, &queue);
895 	case CRYPTO_ALG_TYPE_AEAD:
896 		return cryptd_create_aead(tmpl, tb, &queue);
897 	}
898 
899 	return -EINVAL;
900 }
901 
902 static void cryptd_free(struct crypto_instance *inst)
903 {
904 	struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
905 	struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
906 	struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst);
907 
908 	switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
909 	case CRYPTO_ALG_TYPE_AHASH:
910 		crypto_drop_shash(&hctx->spawn);
911 		kfree(ahash_instance(inst));
912 		return;
913 	case CRYPTO_ALG_TYPE_AEAD:
914 		crypto_drop_aead(&aead_ctx->aead_spawn);
915 		kfree(aead_instance(inst));
916 		return;
917 	default:
918 		crypto_drop_spawn(&ctx->spawn);
919 		kfree(inst);
920 	}
921 }
922 
923 static struct crypto_template cryptd_tmpl = {
924 	.name = "cryptd",
925 	.create = cryptd_create,
926 	.free = cryptd_free,
927 	.module = THIS_MODULE,
928 };
929 
930 struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
931 						  u32 type, u32 mask)
932 {
933 	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
934 	struct cryptd_blkcipher_ctx *ctx;
935 	struct crypto_tfm *tfm;
936 
937 	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
938 		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
939 		return ERR_PTR(-EINVAL);
940 	type = crypto_skcipher_type(type);
941 	mask &= ~CRYPTO_ALG_TYPE_MASK;
942 	mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK);
943 	tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
944 	if (IS_ERR(tfm))
945 		return ERR_CAST(tfm);
946 	if (tfm->__crt_alg->cra_module != THIS_MODULE) {
947 		crypto_free_tfm(tfm);
948 		return ERR_PTR(-EINVAL);
949 	}
950 
951 	ctx = crypto_tfm_ctx(tfm);
952 	atomic_set(&ctx->refcnt, 1);
953 
954 	return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
955 }
956 EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
957 
958 struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
959 {
960 	struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
961 	return ctx->child;
962 }
963 EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
964 
965 bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm)
966 {
967 	struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
968 
969 	return atomic_read(&ctx->refcnt) - 1;
970 }
971 EXPORT_SYMBOL_GPL(cryptd_ablkcipher_queued);
972 
973 void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
974 {
975 	struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
976 
977 	if (atomic_dec_and_test(&ctx->refcnt))
978 		crypto_free_ablkcipher(&tfm->base);
979 }
980 EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
981 
982 struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
983 					u32 type, u32 mask)
984 {
985 	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
986 	struct cryptd_hash_ctx *ctx;
987 	struct crypto_ahash *tfm;
988 
989 	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
990 		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
991 		return ERR_PTR(-EINVAL);
992 	tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
993 	if (IS_ERR(tfm))
994 		return ERR_CAST(tfm);
995 	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
996 		crypto_free_ahash(tfm);
997 		return ERR_PTR(-EINVAL);
998 	}
999 
1000 	ctx = crypto_ahash_ctx(tfm);
1001 	atomic_set(&ctx->refcnt, 1);
1002 
1003 	return __cryptd_ahash_cast(tfm);
1004 }
1005 EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
1006 
1007 struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
1008 {
1009 	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1010 
1011 	return ctx->child;
1012 }
1013 EXPORT_SYMBOL_GPL(cryptd_ahash_child);
1014 
1015 struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
1016 {
1017 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
1018 	return &rctx->desc;
1019 }
1020 EXPORT_SYMBOL_GPL(cryptd_shash_desc);
1021 
1022 bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
1023 {
1024 	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1025 
1026 	return atomic_read(&ctx->refcnt) - 1;
1027 }
1028 EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
1029 
1030 void cryptd_free_ahash(struct cryptd_ahash *tfm)
1031 {
1032 	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1033 
1034 	if (atomic_dec_and_test(&ctx->refcnt))
1035 		crypto_free_ahash(&tfm->base);
1036 }
1037 EXPORT_SYMBOL_GPL(cryptd_free_ahash);
1038 
1039 struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
1040 						  u32 type, u32 mask)
1041 {
1042 	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1043 	struct cryptd_aead_ctx *ctx;
1044 	struct crypto_aead *tfm;
1045 
1046 	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1047 		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1048 		return ERR_PTR(-EINVAL);
1049 	tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
1050 	if (IS_ERR(tfm))
1051 		return ERR_CAST(tfm);
1052 	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1053 		crypto_free_aead(tfm);
1054 		return ERR_PTR(-EINVAL);
1055 	}
1056 
1057 	ctx = crypto_aead_ctx(tfm);
1058 	atomic_set(&ctx->refcnt, 1);
1059 
1060 	return __cryptd_aead_cast(tfm);
1061 }
1062 EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
1063 
1064 struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
1065 {
1066 	struct cryptd_aead_ctx *ctx;
1067 	ctx = crypto_aead_ctx(&tfm->base);
1068 	return ctx->child;
1069 }
1070 EXPORT_SYMBOL_GPL(cryptd_aead_child);
1071 
1072 bool cryptd_aead_queued(struct cryptd_aead *tfm)
1073 {
1074 	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1075 
1076 	return atomic_read(&ctx->refcnt) - 1;
1077 }
1078 EXPORT_SYMBOL_GPL(cryptd_aead_queued);
1079 
1080 void cryptd_free_aead(struct cryptd_aead *tfm)
1081 {
1082 	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1083 
1084 	if (atomic_dec_and_test(&ctx->refcnt))
1085 		crypto_free_aead(&tfm->base);
1086 }
1087 EXPORT_SYMBOL_GPL(cryptd_free_aead);
1088 
1089 static int __init cryptd_init(void)
1090 {
1091 	int err;
1092 
1093 	err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN);
1094 	if (err)
1095 		return err;
1096 
1097 	err = crypto_register_template(&cryptd_tmpl);
1098 	if (err)
1099 		cryptd_fini_queue(&queue);
1100 
1101 	return err;
1102 }
1103 
1104 static void __exit cryptd_exit(void)
1105 {
1106 	cryptd_fini_queue(&queue);
1107 	crypto_unregister_template(&cryptd_tmpl);
1108 }
1109 
1110 subsys_initcall(cryptd_init);
1111 module_exit(cryptd_exit);
1112 
1113 MODULE_LICENSE("GPL");
1114 MODULE_DESCRIPTION("Software async crypto daemon");
1115 MODULE_ALIAS_CRYPTO("cryptd");
1116