xref: /openbmc/linux/crypto/ahash.c (revision a72b9869)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Asynchronous Cryptographic Hash operations.
4  *
5  * This is the asynchronous version of hash.c with notification of
6  * completion via a callback.
7  *
8  * Copyright (c) 2008 Loc Ho <lho@amcc.com>
9  */
10 
11 #include <crypto/internal/hash.h>
12 #include <crypto/scatterwalk.h>
13 #include <linux/err.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/seq_file.h>
19 #include <linux/cryptouser.h>
20 #include <linux/compiler.h>
21 #include <net/netlink.h>
22 
23 #include "internal.h"
24 
25 static const struct crypto_type crypto_ahash_type;
26 
27 struct ahash_request_priv {
28 	crypto_completion_t complete;
29 	void *data;
30 	u8 *result;
31 	u32 flags;
32 	void *ubuf[] CRYPTO_MINALIGN_ATTR;
33 };
34 
35 static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
36 {
37 	return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
38 			    halg);
39 }
40 
41 static int hash_walk_next(struct crypto_hash_walk *walk)
42 {
43 	unsigned int alignmask = walk->alignmask;
44 	unsigned int offset = walk->offset;
45 	unsigned int nbytes = min(walk->entrylen,
46 				  ((unsigned int)(PAGE_SIZE)) - offset);
47 
48 	walk->data = kmap_local_page(walk->pg);
49 	walk->data += offset;
50 
51 	if (offset & alignmask) {
52 		unsigned int unaligned = alignmask + 1 - (offset & alignmask);
53 
54 		if (nbytes > unaligned)
55 			nbytes = unaligned;
56 	}
57 
58 	walk->entrylen -= nbytes;
59 	return nbytes;
60 }
61 
62 static int hash_walk_new_entry(struct crypto_hash_walk *walk)
63 {
64 	struct scatterlist *sg;
65 
66 	sg = walk->sg;
67 	walk->offset = sg->offset;
68 	walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
69 	walk->offset = offset_in_page(walk->offset);
70 	walk->entrylen = sg->length;
71 
72 	if (walk->entrylen > walk->total)
73 		walk->entrylen = walk->total;
74 	walk->total -= walk->entrylen;
75 
76 	return hash_walk_next(walk);
77 }
78 
79 int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
80 {
81 	unsigned int alignmask = walk->alignmask;
82 
83 	walk->data -= walk->offset;
84 
85 	if (walk->entrylen && (walk->offset & alignmask) && !err) {
86 		unsigned int nbytes;
87 
88 		walk->offset = ALIGN(walk->offset, alignmask + 1);
89 		nbytes = min(walk->entrylen,
90 			     (unsigned int)(PAGE_SIZE - walk->offset));
91 		if (nbytes) {
92 			walk->entrylen -= nbytes;
93 			walk->data += walk->offset;
94 			return nbytes;
95 		}
96 	}
97 
98 	kunmap_local(walk->data);
99 	crypto_yield(walk->flags);
100 
101 	if (err)
102 		return err;
103 
104 	if (walk->entrylen) {
105 		walk->offset = 0;
106 		walk->pg++;
107 		return hash_walk_next(walk);
108 	}
109 
110 	if (!walk->total)
111 		return 0;
112 
113 	walk->sg = sg_next(walk->sg);
114 
115 	return hash_walk_new_entry(walk);
116 }
117 EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
118 
119 int crypto_hash_walk_first(struct ahash_request *req,
120 			   struct crypto_hash_walk *walk)
121 {
122 	walk->total = req->nbytes;
123 
124 	if (!walk->total) {
125 		walk->entrylen = 0;
126 		return 0;
127 	}
128 
129 	walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
130 	walk->sg = req->src;
131 	walk->flags = req->base.flags;
132 
133 	return hash_walk_new_entry(walk);
134 }
135 EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
136 
137 static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
138 				unsigned int keylen)
139 {
140 	unsigned long alignmask = crypto_ahash_alignmask(tfm);
141 	int ret;
142 	u8 *buffer, *alignbuffer;
143 	unsigned long absize;
144 
145 	absize = keylen + alignmask;
146 	buffer = kmalloc(absize, GFP_KERNEL);
147 	if (!buffer)
148 		return -ENOMEM;
149 
150 	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
151 	memcpy(alignbuffer, key, keylen);
152 	ret = tfm->setkey(tfm, alignbuffer, keylen);
153 	kfree_sensitive(buffer);
154 	return ret;
155 }
156 
157 static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
158 			  unsigned int keylen)
159 {
160 	return -ENOSYS;
161 }
162 
163 static void ahash_set_needkey(struct crypto_ahash *tfm)
164 {
165 	const struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
166 
167 	if (tfm->setkey != ahash_nosetkey &&
168 	    !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
169 		crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
170 }
171 
172 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
173 			unsigned int keylen)
174 {
175 	unsigned long alignmask = crypto_ahash_alignmask(tfm);
176 	int err;
177 
178 	if ((unsigned long)key & alignmask)
179 		err = ahash_setkey_unaligned(tfm, key, keylen);
180 	else
181 		err = tfm->setkey(tfm, key, keylen);
182 
183 	if (unlikely(err)) {
184 		ahash_set_needkey(tfm);
185 		return err;
186 	}
187 
188 	crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
189 	return 0;
190 }
191 EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
192 
193 static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt,
194 			  bool has_state)
195 {
196 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
197 	unsigned long alignmask = crypto_ahash_alignmask(tfm);
198 	unsigned int ds = crypto_ahash_digestsize(tfm);
199 	struct ahash_request *subreq;
200 	unsigned int subreq_size;
201 	unsigned int reqsize;
202 	u8 *result;
203 	gfp_t gfp;
204 	u32 flags;
205 
206 	subreq_size = sizeof(*subreq);
207 	reqsize = crypto_ahash_reqsize(tfm);
208 	reqsize = ALIGN(reqsize, crypto_tfm_ctx_alignment());
209 	subreq_size += reqsize;
210 	subreq_size += ds;
211 	subreq_size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
212 
213 	flags = ahash_request_flags(req);
214 	gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?  GFP_KERNEL : GFP_ATOMIC;
215 	subreq = kmalloc(subreq_size, gfp);
216 	if (!subreq)
217 		return -ENOMEM;
218 
219 	ahash_request_set_tfm(subreq, tfm);
220 	ahash_request_set_callback(subreq, flags, cplt, req);
221 
222 	result = (u8 *)(subreq + 1) + reqsize;
223 	result = PTR_ALIGN(result, alignmask + 1);
224 
225 	ahash_request_set_crypt(subreq, req->src, result, req->nbytes);
226 
227 	if (has_state) {
228 		void *state;
229 
230 		state = kmalloc(crypto_ahash_statesize(tfm), gfp);
231 		if (!state) {
232 			kfree(subreq);
233 			return -ENOMEM;
234 		}
235 
236 		crypto_ahash_export(req, state);
237 		crypto_ahash_import(subreq, state);
238 		kfree_sensitive(state);
239 	}
240 
241 	req->priv = subreq;
242 
243 	return 0;
244 }
245 
246 static void ahash_restore_req(struct ahash_request *req, int err)
247 {
248 	struct ahash_request *subreq = req->priv;
249 
250 	if (!err)
251 		memcpy(req->result, subreq->result,
252 		       crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
253 
254 	req->priv = NULL;
255 
256 	kfree_sensitive(subreq);
257 }
258 
259 static void ahash_op_unaligned_done(void *data, int err)
260 {
261 	struct ahash_request *areq = data;
262 
263 	if (err == -EINPROGRESS)
264 		goto out;
265 
266 	/* First copy req->result into req->priv.result */
267 	ahash_restore_req(areq, err);
268 
269 out:
270 	/* Complete the ORIGINAL request. */
271 	ahash_request_complete(areq, err);
272 }
273 
274 static int ahash_op_unaligned(struct ahash_request *req,
275 			      int (*op)(struct ahash_request *),
276 			      bool has_state)
277 {
278 	int err;
279 
280 	err = ahash_save_req(req, ahash_op_unaligned_done, has_state);
281 	if (err)
282 		return err;
283 
284 	err = op(req->priv);
285 	if (err == -EINPROGRESS || err == -EBUSY)
286 		return err;
287 
288 	ahash_restore_req(req, err);
289 
290 	return err;
291 }
292 
293 static int crypto_ahash_op(struct ahash_request *req,
294 			   int (*op)(struct ahash_request *),
295 			   bool has_state)
296 {
297 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
298 	unsigned long alignmask = crypto_ahash_alignmask(tfm);
299 
300 	if ((unsigned long)req->result & alignmask)
301 		return ahash_op_unaligned(req, op, has_state);
302 
303 	return op(req);
304 }
305 
306 int crypto_ahash_final(struct ahash_request *req)
307 {
308 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
309 	struct crypto_alg *alg = tfm->base.__crt_alg;
310 	unsigned int nbytes = req->nbytes;
311 	int ret;
312 
313 	crypto_stats_get(alg);
314 	ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final, true);
315 	crypto_stats_ahash_final(nbytes, ret, alg);
316 	return ret;
317 }
318 EXPORT_SYMBOL_GPL(crypto_ahash_final);
319 
320 int crypto_ahash_finup(struct ahash_request *req)
321 {
322 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
323 	struct crypto_alg *alg = tfm->base.__crt_alg;
324 	unsigned int nbytes = req->nbytes;
325 	int ret;
326 
327 	crypto_stats_get(alg);
328 	ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup, true);
329 	crypto_stats_ahash_final(nbytes, ret, alg);
330 	return ret;
331 }
332 EXPORT_SYMBOL_GPL(crypto_ahash_finup);
333 
334 int crypto_ahash_digest(struct ahash_request *req)
335 {
336 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
337 	struct crypto_alg *alg = tfm->base.__crt_alg;
338 	unsigned int nbytes = req->nbytes;
339 	int ret;
340 
341 	crypto_stats_get(alg);
342 	if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
343 		ret = -ENOKEY;
344 	else
345 		ret = crypto_ahash_op(req, tfm->digest, false);
346 	crypto_stats_ahash_final(nbytes, ret, alg);
347 	return ret;
348 }
349 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
350 
351 static void ahash_def_finup_done2(void *data, int err)
352 {
353 	struct ahash_request *areq = data;
354 
355 	if (err == -EINPROGRESS)
356 		return;
357 
358 	ahash_restore_req(areq, err);
359 
360 	ahash_request_complete(areq, err);
361 }
362 
363 static int ahash_def_finup_finish1(struct ahash_request *req, int err)
364 {
365 	struct ahash_request *subreq = req->priv;
366 
367 	if (err)
368 		goto out;
369 
370 	subreq->base.complete = ahash_def_finup_done2;
371 
372 	err = crypto_ahash_reqtfm(req)->final(subreq);
373 	if (err == -EINPROGRESS || err == -EBUSY)
374 		return err;
375 
376 out:
377 	ahash_restore_req(req, err);
378 	return err;
379 }
380 
381 static void ahash_def_finup_done1(void *data, int err)
382 {
383 	struct ahash_request *areq = data;
384 	struct ahash_request *subreq;
385 
386 	if (err == -EINPROGRESS)
387 		goto out;
388 
389 	subreq = areq->priv;
390 	subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
391 
392 	err = ahash_def_finup_finish1(areq, err);
393 	if (err == -EINPROGRESS || err == -EBUSY)
394 		return;
395 
396 out:
397 	ahash_request_complete(areq, err);
398 }
399 
400 static int ahash_def_finup(struct ahash_request *req)
401 {
402 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
403 	int err;
404 
405 	err = ahash_save_req(req, ahash_def_finup_done1, true);
406 	if (err)
407 		return err;
408 
409 	err = tfm->update(req->priv);
410 	if (err == -EINPROGRESS || err == -EBUSY)
411 		return err;
412 
413 	return ahash_def_finup_finish1(req, err);
414 }
415 
416 static void crypto_ahash_exit_tfm(struct crypto_tfm *tfm)
417 {
418 	struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
419 	struct ahash_alg *alg = crypto_ahash_alg(hash);
420 
421 	alg->exit_tfm(hash);
422 }
423 
424 static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
425 {
426 	struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
427 	struct ahash_alg *alg = crypto_ahash_alg(hash);
428 
429 	hash->setkey = ahash_nosetkey;
430 
431 	if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
432 		return crypto_init_shash_ops_async(tfm);
433 
434 	hash->init = alg->init;
435 	hash->update = alg->update;
436 	hash->final = alg->final;
437 	hash->finup = alg->finup ?: ahash_def_finup;
438 	hash->digest = alg->digest;
439 	hash->export = alg->export;
440 	hash->import = alg->import;
441 
442 	if (alg->setkey) {
443 		hash->setkey = alg->setkey;
444 		ahash_set_needkey(hash);
445 	}
446 
447 	if (alg->exit_tfm)
448 		tfm->exit = crypto_ahash_exit_tfm;
449 
450 	return alg->init_tfm ? alg->init_tfm(hash) : 0;
451 }
452 
453 static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
454 {
455 	if (alg->cra_type != &crypto_ahash_type)
456 		return sizeof(struct crypto_shash *);
457 
458 	return crypto_alg_extsize(alg);
459 }
460 
461 static void crypto_ahash_free_instance(struct crypto_instance *inst)
462 {
463 	struct ahash_instance *ahash = ahash_instance(inst);
464 
465 	ahash->free(ahash);
466 }
467 
468 #ifdef CONFIG_NET
469 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
470 {
471 	struct crypto_report_hash rhash;
472 
473 	memset(&rhash, 0, sizeof(rhash));
474 
475 	strscpy(rhash.type, "ahash", sizeof(rhash.type));
476 
477 	rhash.blocksize = alg->cra_blocksize;
478 	rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
479 
480 	return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash);
481 }
482 #else
483 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
484 {
485 	return -ENOSYS;
486 }
487 #endif
488 
489 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
490 	__maybe_unused;
491 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
492 {
493 	seq_printf(m, "type         : ahash\n");
494 	seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
495 					     "yes" : "no");
496 	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
497 	seq_printf(m, "digestsize   : %u\n",
498 		   __crypto_hash_alg_common(alg)->digestsize);
499 }
500 
501 static const struct crypto_type crypto_ahash_type = {
502 	.extsize = crypto_ahash_extsize,
503 	.init_tfm = crypto_ahash_init_tfm,
504 	.free = crypto_ahash_free_instance,
505 #ifdef CONFIG_PROC_FS
506 	.show = crypto_ahash_show,
507 #endif
508 	.report = crypto_ahash_report,
509 	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
510 	.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
511 	.type = CRYPTO_ALG_TYPE_AHASH,
512 	.tfmsize = offsetof(struct crypto_ahash, base),
513 };
514 
515 int crypto_grab_ahash(struct crypto_ahash_spawn *spawn,
516 		      struct crypto_instance *inst,
517 		      const char *name, u32 type, u32 mask)
518 {
519 	spawn->base.frontend = &crypto_ahash_type;
520 	return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
521 }
522 EXPORT_SYMBOL_GPL(crypto_grab_ahash);
523 
524 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
525 					u32 mask)
526 {
527 	return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
528 }
529 EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
530 
531 int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
532 {
533 	return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
534 }
535 EXPORT_SYMBOL_GPL(crypto_has_ahash);
536 
537 static int ahash_prepare_alg(struct ahash_alg *alg)
538 {
539 	struct crypto_alg *base = &alg->halg.base;
540 
541 	if (alg->halg.digestsize > HASH_MAX_DIGESTSIZE ||
542 	    alg->halg.statesize > HASH_MAX_STATESIZE ||
543 	    alg->halg.statesize == 0)
544 		return -EINVAL;
545 
546 	base->cra_type = &crypto_ahash_type;
547 	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
548 	base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
549 
550 	return 0;
551 }
552 
553 int crypto_register_ahash(struct ahash_alg *alg)
554 {
555 	struct crypto_alg *base = &alg->halg.base;
556 	int err;
557 
558 	err = ahash_prepare_alg(alg);
559 	if (err)
560 		return err;
561 
562 	return crypto_register_alg(base);
563 }
564 EXPORT_SYMBOL_GPL(crypto_register_ahash);
565 
566 void crypto_unregister_ahash(struct ahash_alg *alg)
567 {
568 	crypto_unregister_alg(&alg->halg.base);
569 }
570 EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
571 
572 int crypto_register_ahashes(struct ahash_alg *algs, int count)
573 {
574 	int i, ret;
575 
576 	for (i = 0; i < count; i++) {
577 		ret = crypto_register_ahash(&algs[i]);
578 		if (ret)
579 			goto err;
580 	}
581 
582 	return 0;
583 
584 err:
585 	for (--i; i >= 0; --i)
586 		crypto_unregister_ahash(&algs[i]);
587 
588 	return ret;
589 }
590 EXPORT_SYMBOL_GPL(crypto_register_ahashes);
591 
592 void crypto_unregister_ahashes(struct ahash_alg *algs, int count)
593 {
594 	int i;
595 
596 	for (i = count - 1; i >= 0; --i)
597 		crypto_unregister_ahash(&algs[i]);
598 }
599 EXPORT_SYMBOL_GPL(crypto_unregister_ahashes);
600 
601 int ahash_register_instance(struct crypto_template *tmpl,
602 			    struct ahash_instance *inst)
603 {
604 	int err;
605 
606 	if (WARN_ON(!inst->free))
607 		return -EINVAL;
608 
609 	err = ahash_prepare_alg(&inst->alg);
610 	if (err)
611 		return err;
612 
613 	return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
614 }
615 EXPORT_SYMBOL_GPL(ahash_register_instance);
616 
617 bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
618 {
619 	struct crypto_alg *alg = &halg->base;
620 
621 	if (alg->cra_type != &crypto_ahash_type)
622 		return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));
623 
624 	return __crypto_ahash_alg(alg)->setkey != NULL;
625 }
626 EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);
627 
628 MODULE_LICENSE("GPL");
629 MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
630