xref: /openbmc/linux/crypto/ahash.c (revision 133f9794)
1 /*
2  * Asynchronous Cryptographic Hash operations.
3  *
4  * This is the asynchronous version of hash.c with notification of
5  * completion via a callback.
6  *
7  * Copyright (c) 2008 Loc Ho <lho@amcc.com>
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU General Public License as published by the Free
11  * Software Foundation; either version 2 of the License, or (at your option)
12  * any later version.
13  *
14  */
15 
16 #include <crypto/internal/hash.h>
17 #include <crypto/scatterwalk.h>
18 #include <linux/bug.h>
19 #include <linux/err.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/sched.h>
23 #include <linux/slab.h>
24 #include <linux/seq_file.h>
25 #include <linux/cryptouser.h>
26 #include <linux/compiler.h>
27 #include <net/netlink.h>
28 
29 #include "internal.h"
30 
31 struct ahash_request_priv {
32 	crypto_completion_t complete;
33 	void *data;
34 	u8 *result;
35 	u32 flags;
36 	void *ubuf[] CRYPTO_MINALIGN_ATTR;
37 };
38 
39 static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
40 {
41 	return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
42 			    halg);
43 }
44 
45 static int hash_walk_next(struct crypto_hash_walk *walk)
46 {
47 	unsigned int alignmask = walk->alignmask;
48 	unsigned int offset = walk->offset;
49 	unsigned int nbytes = min(walk->entrylen,
50 				  ((unsigned int)(PAGE_SIZE)) - offset);
51 
52 	if (walk->flags & CRYPTO_ALG_ASYNC)
53 		walk->data = kmap(walk->pg);
54 	else
55 		walk->data = kmap_atomic(walk->pg);
56 	walk->data += offset;
57 
58 	if (offset & alignmask) {
59 		unsigned int unaligned = alignmask + 1 - (offset & alignmask);
60 
61 		if (nbytes > unaligned)
62 			nbytes = unaligned;
63 	}
64 
65 	walk->entrylen -= nbytes;
66 	return nbytes;
67 }
68 
69 static int hash_walk_new_entry(struct crypto_hash_walk *walk)
70 {
71 	struct scatterlist *sg;
72 
73 	sg = walk->sg;
74 	walk->offset = sg->offset;
75 	walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
76 	walk->offset = offset_in_page(walk->offset);
77 	walk->entrylen = sg->length;
78 
79 	if (walk->entrylen > walk->total)
80 		walk->entrylen = walk->total;
81 	walk->total -= walk->entrylen;
82 
83 	return hash_walk_next(walk);
84 }
85 
86 int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
87 {
88 	unsigned int alignmask = walk->alignmask;
89 	unsigned int nbytes = walk->entrylen;
90 
91 	walk->data -= walk->offset;
92 
93 	if (nbytes && walk->offset & alignmask && !err) {
94 		walk->offset = ALIGN(walk->offset, alignmask + 1);
95 		walk->data += walk->offset;
96 
97 		nbytes = min(nbytes,
98 			     ((unsigned int)(PAGE_SIZE)) - walk->offset);
99 		walk->entrylen -= nbytes;
100 
101 		return nbytes;
102 	}
103 
104 	if (walk->flags & CRYPTO_ALG_ASYNC)
105 		kunmap(walk->pg);
106 	else {
107 		kunmap_atomic(walk->data);
108 		/*
109 		 * The may sleep test only makes sense for sync users.
110 		 * Async users don't need to sleep here anyway.
111 		 */
112 		crypto_yield(walk->flags);
113 	}
114 
115 	if (err)
116 		return err;
117 
118 	if (nbytes) {
119 		walk->offset = 0;
120 		walk->pg++;
121 		return hash_walk_next(walk);
122 	}
123 
124 	if (!walk->total)
125 		return 0;
126 
127 	walk->sg = sg_next(walk->sg);
128 
129 	return hash_walk_new_entry(walk);
130 }
131 EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
132 
133 int crypto_hash_walk_first(struct ahash_request *req,
134 			   struct crypto_hash_walk *walk)
135 {
136 	walk->total = req->nbytes;
137 
138 	if (!walk->total) {
139 		walk->entrylen = 0;
140 		return 0;
141 	}
142 
143 	walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
144 	walk->sg = req->src;
145 	walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
146 
147 	return hash_walk_new_entry(walk);
148 }
149 EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
150 
151 int crypto_ahash_walk_first(struct ahash_request *req,
152 			    struct crypto_hash_walk *walk)
153 {
154 	walk->total = req->nbytes;
155 
156 	if (!walk->total) {
157 		walk->entrylen = 0;
158 		return 0;
159 	}
160 
161 	walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
162 	walk->sg = req->src;
163 	walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
164 	walk->flags |= CRYPTO_ALG_ASYNC;
165 
166 	BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC);
167 
168 	return hash_walk_new_entry(walk);
169 }
170 EXPORT_SYMBOL_GPL(crypto_ahash_walk_first);
171 
172 static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
173 				unsigned int keylen)
174 {
175 	unsigned long alignmask = crypto_ahash_alignmask(tfm);
176 	int ret;
177 	u8 *buffer, *alignbuffer;
178 	unsigned long absize;
179 
180 	absize = keylen + alignmask;
181 	buffer = kmalloc(absize, GFP_KERNEL);
182 	if (!buffer)
183 		return -ENOMEM;
184 
185 	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
186 	memcpy(alignbuffer, key, keylen);
187 	ret = tfm->setkey(tfm, alignbuffer, keylen);
188 	kzfree(buffer);
189 	return ret;
190 }
191 
192 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
193 			unsigned int keylen)
194 {
195 	unsigned long alignmask = crypto_ahash_alignmask(tfm);
196 	int err;
197 
198 	if ((unsigned long)key & alignmask)
199 		err = ahash_setkey_unaligned(tfm, key, keylen);
200 	else
201 		err = tfm->setkey(tfm, key, keylen);
202 
203 	if (err)
204 		return err;
205 
206 	crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
207 	return 0;
208 }
209 EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
210 
211 static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
212 			  unsigned int keylen)
213 {
214 	return -ENOSYS;
215 }
216 
217 static inline unsigned int ahash_align_buffer_size(unsigned len,
218 						   unsigned long mask)
219 {
220 	return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
221 }
222 
223 static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
224 {
225 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
226 	unsigned long alignmask = crypto_ahash_alignmask(tfm);
227 	unsigned int ds = crypto_ahash_digestsize(tfm);
228 	struct ahash_request_priv *priv;
229 
230 	priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
231 		       (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
232 		       GFP_KERNEL : GFP_ATOMIC);
233 	if (!priv)
234 		return -ENOMEM;
235 
236 	/*
237 	 * WARNING: Voodoo programming below!
238 	 *
239 	 * The code below is obscure and hard to understand, thus explanation
240 	 * is necessary. See include/crypto/hash.h and include/linux/crypto.h
241 	 * to understand the layout of structures used here!
242 	 *
243 	 * The code here will replace portions of the ORIGINAL request with
244 	 * pointers to new code and buffers so the hashing operation can store
245 	 * the result in aligned buffer. We will call the modified request
246 	 * an ADJUSTED request.
247 	 *
248 	 * The newly mangled request will look as such:
249 	 *
250 	 * req {
251 	 *   .result        = ADJUSTED[new aligned buffer]
252 	 *   .base.complete = ADJUSTED[pointer to completion function]
253 	 *   .base.data     = ADJUSTED[*req (pointer to self)]
254 	 *   .priv          = ADJUSTED[new priv] {
255 	 *           .result   = ORIGINAL(result)
256 	 *           .complete = ORIGINAL(base.complete)
257 	 *           .data     = ORIGINAL(base.data)
258 	 *   }
259 	 */
260 
261 	priv->result = req->result;
262 	priv->complete = req->base.complete;
263 	priv->data = req->base.data;
264 	priv->flags = req->base.flags;
265 
266 	/*
267 	 * WARNING: We do not backup req->priv here! The req->priv
268 	 *          is for internal use of the Crypto API and the
269 	 *          user must _NOT_ _EVER_ depend on it's content!
270 	 */
271 
272 	req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
273 	req->base.complete = cplt;
274 	req->base.data = req;
275 	req->priv = priv;
276 
277 	return 0;
278 }
279 
280 static void ahash_restore_req(struct ahash_request *req, int err)
281 {
282 	struct ahash_request_priv *priv = req->priv;
283 
284 	if (!err)
285 		memcpy(priv->result, req->result,
286 		       crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
287 
288 	/* Restore the original crypto request. */
289 	req->result = priv->result;
290 
291 	ahash_request_set_callback(req, priv->flags,
292 				   priv->complete, priv->data);
293 	req->priv = NULL;
294 
295 	/* Free the req->priv.priv from the ADJUSTED request. */
296 	kzfree(priv);
297 }
298 
299 static void ahash_notify_einprogress(struct ahash_request *req)
300 {
301 	struct ahash_request_priv *priv = req->priv;
302 	struct crypto_async_request oreq;
303 
304 	oreq.data = priv->data;
305 
306 	priv->complete(&oreq, -EINPROGRESS);
307 }
308 
309 static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
310 {
311 	struct ahash_request *areq = req->data;
312 
313 	if (err == -EINPROGRESS) {
314 		ahash_notify_einprogress(areq);
315 		return;
316 	}
317 
318 	/*
319 	 * Restore the original request, see ahash_op_unaligned() for what
320 	 * goes where.
321 	 *
322 	 * The "struct ahash_request *req" here is in fact the "req.base"
323 	 * from the ADJUSTED request from ahash_op_unaligned(), thus as it
324 	 * is a pointer to self, it is also the ADJUSTED "req" .
325 	 */
326 
327 	/* First copy req->result into req->priv.result */
328 	ahash_restore_req(areq, err);
329 
330 	/* Complete the ORIGINAL request. */
331 	areq->base.complete(&areq->base, err);
332 }
333 
334 static int ahash_op_unaligned(struct ahash_request *req,
335 			      int (*op)(struct ahash_request *))
336 {
337 	int err;
338 
339 	err = ahash_save_req(req, ahash_op_unaligned_done);
340 	if (err)
341 		return err;
342 
343 	err = op(req);
344 	if (err == -EINPROGRESS || err == -EBUSY)
345 		return err;
346 
347 	ahash_restore_req(req, err);
348 
349 	return err;
350 }
351 
352 static int crypto_ahash_op(struct ahash_request *req,
353 			   int (*op)(struct ahash_request *))
354 {
355 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
356 	unsigned long alignmask = crypto_ahash_alignmask(tfm);
357 
358 	if ((unsigned long)req->result & alignmask)
359 		return ahash_op_unaligned(req, op);
360 
361 	return op(req);
362 }
363 
364 int crypto_ahash_final(struct ahash_request *req)
365 {
366 	return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
367 }
368 EXPORT_SYMBOL_GPL(crypto_ahash_final);
369 
370 int crypto_ahash_finup(struct ahash_request *req)
371 {
372 	return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
373 }
374 EXPORT_SYMBOL_GPL(crypto_ahash_finup);
375 
376 int crypto_ahash_digest(struct ahash_request *req)
377 {
378 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
379 
380 	if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
381 		return -ENOKEY;
382 
383 	return crypto_ahash_op(req, tfm->digest);
384 }
385 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
386 
387 static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
388 {
389 	struct ahash_request *areq = req->data;
390 
391 	if (err == -EINPROGRESS)
392 		return;
393 
394 	ahash_restore_req(areq, err);
395 
396 	areq->base.complete(&areq->base, err);
397 }
398 
399 static int ahash_def_finup_finish1(struct ahash_request *req, int err)
400 {
401 	if (err)
402 		goto out;
403 
404 	req->base.complete = ahash_def_finup_done2;
405 
406 	err = crypto_ahash_reqtfm(req)->final(req);
407 	if (err == -EINPROGRESS || err == -EBUSY)
408 		return err;
409 
410 out:
411 	ahash_restore_req(req, err);
412 	return err;
413 }
414 
415 static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
416 {
417 	struct ahash_request *areq = req->data;
418 
419 	if (err == -EINPROGRESS) {
420 		ahash_notify_einprogress(areq);
421 		return;
422 	}
423 
424 	areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
425 
426 	err = ahash_def_finup_finish1(areq, err);
427 	if (areq->priv)
428 		return;
429 
430 	areq->base.complete(&areq->base, err);
431 }
432 
433 static int ahash_def_finup(struct ahash_request *req)
434 {
435 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
436 	int err;
437 
438 	err = ahash_save_req(req, ahash_def_finup_done1);
439 	if (err)
440 		return err;
441 
442 	err = tfm->update(req);
443 	if (err == -EINPROGRESS || err == -EBUSY)
444 		return err;
445 
446 	return ahash_def_finup_finish1(req, err);
447 }
448 
449 static int ahash_no_export(struct ahash_request *req, void *out)
450 {
451 	return -ENOSYS;
452 }
453 
454 static int ahash_no_import(struct ahash_request *req, const void *in)
455 {
456 	return -ENOSYS;
457 }
458 
459 static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
460 {
461 	struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
462 	struct ahash_alg *alg = crypto_ahash_alg(hash);
463 
464 	hash->setkey = ahash_nosetkey;
465 	hash->export = ahash_no_export;
466 	hash->import = ahash_no_import;
467 
468 	if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
469 		return crypto_init_shash_ops_async(tfm);
470 
471 	hash->init = alg->init;
472 	hash->update = alg->update;
473 	hash->final = alg->final;
474 	hash->finup = alg->finup ?: ahash_def_finup;
475 	hash->digest = alg->digest;
476 
477 	if (alg->setkey) {
478 		hash->setkey = alg->setkey;
479 		if (!(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
480 			crypto_ahash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
481 	}
482 	if (alg->export)
483 		hash->export = alg->export;
484 	if (alg->import)
485 		hash->import = alg->import;
486 
487 	return 0;
488 }
489 
490 static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
491 {
492 	if (alg->cra_type != &crypto_ahash_type)
493 		return sizeof(struct crypto_shash *);
494 
495 	return crypto_alg_extsize(alg);
496 }
497 
498 #ifdef CONFIG_NET
499 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
500 {
501 	struct crypto_report_hash rhash;
502 
503 	strncpy(rhash.type, "ahash", sizeof(rhash.type));
504 
505 	rhash.blocksize = alg->cra_blocksize;
506 	rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
507 
508 	if (nla_put(skb, CRYPTOCFGA_REPORT_HASH,
509 		    sizeof(struct crypto_report_hash), &rhash))
510 		goto nla_put_failure;
511 	return 0;
512 
513 nla_put_failure:
514 	return -EMSGSIZE;
515 }
516 #else
517 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
518 {
519 	return -ENOSYS;
520 }
521 #endif
522 
523 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
524 	__maybe_unused;
525 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
526 {
527 	seq_printf(m, "type         : ahash\n");
528 	seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
529 					     "yes" : "no");
530 	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
531 	seq_printf(m, "digestsize   : %u\n",
532 		   __crypto_hash_alg_common(alg)->digestsize);
533 }
534 
535 const struct crypto_type crypto_ahash_type = {
536 	.extsize = crypto_ahash_extsize,
537 	.init_tfm = crypto_ahash_init_tfm,
538 #ifdef CONFIG_PROC_FS
539 	.show = crypto_ahash_show,
540 #endif
541 	.report = crypto_ahash_report,
542 	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
543 	.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
544 	.type = CRYPTO_ALG_TYPE_AHASH,
545 	.tfmsize = offsetof(struct crypto_ahash, base),
546 };
547 EXPORT_SYMBOL_GPL(crypto_ahash_type);
548 
549 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
550 					u32 mask)
551 {
552 	return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
553 }
554 EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
555 
556 int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
557 {
558 	return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
559 }
560 EXPORT_SYMBOL_GPL(crypto_has_ahash);
561 
562 static int ahash_prepare_alg(struct ahash_alg *alg)
563 {
564 	struct crypto_alg *base = &alg->halg.base;
565 
566 	if (alg->halg.digestsize > PAGE_SIZE / 8 ||
567 	    alg->halg.statesize > PAGE_SIZE / 8 ||
568 	    alg->halg.statesize == 0)
569 		return -EINVAL;
570 
571 	base->cra_type = &crypto_ahash_type;
572 	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
573 	base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
574 
575 	return 0;
576 }
577 
578 int crypto_register_ahash(struct ahash_alg *alg)
579 {
580 	struct crypto_alg *base = &alg->halg.base;
581 	int err;
582 
583 	err = ahash_prepare_alg(alg);
584 	if (err)
585 		return err;
586 
587 	return crypto_register_alg(base);
588 }
589 EXPORT_SYMBOL_GPL(crypto_register_ahash);
590 
591 int crypto_unregister_ahash(struct ahash_alg *alg)
592 {
593 	return crypto_unregister_alg(&alg->halg.base);
594 }
595 EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
596 
597 int crypto_register_ahashes(struct ahash_alg *algs, int count)
598 {
599 	int i, ret;
600 
601 	for (i = 0; i < count; i++) {
602 		ret = crypto_register_ahash(&algs[i]);
603 		if (ret)
604 			goto err;
605 	}
606 
607 	return 0;
608 
609 err:
610 	for (--i; i >= 0; --i)
611 		crypto_unregister_ahash(&algs[i]);
612 
613 	return ret;
614 }
615 EXPORT_SYMBOL_GPL(crypto_register_ahashes);
616 
617 void crypto_unregister_ahashes(struct ahash_alg *algs, int count)
618 {
619 	int i;
620 
621 	for (i = count - 1; i >= 0; --i)
622 		crypto_unregister_ahash(&algs[i]);
623 }
624 EXPORT_SYMBOL_GPL(crypto_unregister_ahashes);
625 
626 int ahash_register_instance(struct crypto_template *tmpl,
627 			    struct ahash_instance *inst)
628 {
629 	int err;
630 
631 	err = ahash_prepare_alg(&inst->alg);
632 	if (err)
633 		return err;
634 
635 	return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
636 }
637 EXPORT_SYMBOL_GPL(ahash_register_instance);
638 
639 void ahash_free_instance(struct crypto_instance *inst)
640 {
641 	crypto_drop_spawn(crypto_instance_ctx(inst));
642 	kfree(ahash_instance(inst));
643 }
644 EXPORT_SYMBOL_GPL(ahash_free_instance);
645 
646 int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
647 			    struct hash_alg_common *alg,
648 			    struct crypto_instance *inst)
649 {
650 	return crypto_init_spawn2(&spawn->base, &alg->base, inst,
651 				  &crypto_ahash_type);
652 }
653 EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
654 
655 struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
656 {
657 	struct crypto_alg *alg;
658 
659 	alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
660 	return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
661 }
662 EXPORT_SYMBOL_GPL(ahash_attr_alg);
663 
664 bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
665 {
666 	struct crypto_alg *alg = &halg->base;
667 
668 	if (alg->cra_type != &crypto_ahash_type)
669 		return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));
670 
671 	return __crypto_ahash_alg(alg)->setkey != NULL;
672 }
673 EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);
674 
675 MODULE_LICENSE("GPL");
676 MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
677