xref: /openbmc/linux/crypto/ahash.c (revision 9726bfcd)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Asynchronous Cryptographic Hash operations.
4  *
5  * This is the asynchronous version of hash.c with notification of
6  * completion via a callback.
7  *
8  * Copyright (c) 2008 Loc Ho <lho@amcc.com>
9  */
10 
11 #include <crypto/internal/hash.h>
12 #include <crypto/scatterwalk.h>
13 #include <linux/bug.h>
14 #include <linux/err.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/seq_file.h>
20 #include <linux/cryptouser.h>
21 #include <linux/compiler.h>
22 #include <net/netlink.h>
23 
24 #include "internal.h"
25 
26 struct ahash_request_priv {
27 	crypto_completion_t complete;
28 	void *data;
29 	u8 *result;
30 	u32 flags;
31 	void *ubuf[] CRYPTO_MINALIGN_ATTR;
32 };
33 
34 static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
35 {
36 	return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
37 			    halg);
38 }
39 
40 static int hash_walk_next(struct crypto_hash_walk *walk)
41 {
42 	unsigned int alignmask = walk->alignmask;
43 	unsigned int offset = walk->offset;
44 	unsigned int nbytes = min(walk->entrylen,
45 				  ((unsigned int)(PAGE_SIZE)) - offset);
46 
47 	if (walk->flags & CRYPTO_ALG_ASYNC)
48 		walk->data = kmap(walk->pg);
49 	else
50 		walk->data = kmap_atomic(walk->pg);
51 	walk->data += offset;
52 
53 	if (offset & alignmask) {
54 		unsigned int unaligned = alignmask + 1 - (offset & alignmask);
55 
56 		if (nbytes > unaligned)
57 			nbytes = unaligned;
58 	}
59 
60 	walk->entrylen -= nbytes;
61 	return nbytes;
62 }
63 
64 static int hash_walk_new_entry(struct crypto_hash_walk *walk)
65 {
66 	struct scatterlist *sg;
67 
68 	sg = walk->sg;
69 	walk->offset = sg->offset;
70 	walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
71 	walk->offset = offset_in_page(walk->offset);
72 	walk->entrylen = sg->length;
73 
74 	if (walk->entrylen > walk->total)
75 		walk->entrylen = walk->total;
76 	walk->total -= walk->entrylen;
77 
78 	return hash_walk_next(walk);
79 }
80 
81 int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
82 {
83 	unsigned int alignmask = walk->alignmask;
84 
85 	walk->data -= walk->offset;
86 
87 	if (walk->entrylen && (walk->offset & alignmask) && !err) {
88 		unsigned int nbytes;
89 
90 		walk->offset = ALIGN(walk->offset, alignmask + 1);
91 		nbytes = min(walk->entrylen,
92 			     (unsigned int)(PAGE_SIZE - walk->offset));
93 		if (nbytes) {
94 			walk->entrylen -= nbytes;
95 			walk->data += walk->offset;
96 			return nbytes;
97 		}
98 	}
99 
100 	if (walk->flags & CRYPTO_ALG_ASYNC)
101 		kunmap(walk->pg);
102 	else {
103 		kunmap_atomic(walk->data);
104 		/*
105 		 * The may sleep test only makes sense for sync users.
106 		 * Async users don't need to sleep here anyway.
107 		 */
108 		crypto_yield(walk->flags);
109 	}
110 
111 	if (err)
112 		return err;
113 
114 	if (walk->entrylen) {
115 		walk->offset = 0;
116 		walk->pg++;
117 		return hash_walk_next(walk);
118 	}
119 
120 	if (!walk->total)
121 		return 0;
122 
123 	walk->sg = sg_next(walk->sg);
124 
125 	return hash_walk_new_entry(walk);
126 }
127 EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
128 
129 int crypto_hash_walk_first(struct ahash_request *req,
130 			   struct crypto_hash_walk *walk)
131 {
132 	walk->total = req->nbytes;
133 
134 	if (!walk->total) {
135 		walk->entrylen = 0;
136 		return 0;
137 	}
138 
139 	walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
140 	walk->sg = req->src;
141 	walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
142 
143 	return hash_walk_new_entry(walk);
144 }
145 EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
146 
147 int crypto_ahash_walk_first(struct ahash_request *req,
148 			    struct crypto_hash_walk *walk)
149 {
150 	walk->total = req->nbytes;
151 
152 	if (!walk->total) {
153 		walk->entrylen = 0;
154 		return 0;
155 	}
156 
157 	walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
158 	walk->sg = req->src;
159 	walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
160 	walk->flags |= CRYPTO_ALG_ASYNC;
161 
162 	BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC);
163 
164 	return hash_walk_new_entry(walk);
165 }
166 EXPORT_SYMBOL_GPL(crypto_ahash_walk_first);
167 
168 static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
169 				unsigned int keylen)
170 {
171 	unsigned long alignmask = crypto_ahash_alignmask(tfm);
172 	int ret;
173 	u8 *buffer, *alignbuffer;
174 	unsigned long absize;
175 
176 	absize = keylen + alignmask;
177 	buffer = kmalloc(absize, GFP_KERNEL);
178 	if (!buffer)
179 		return -ENOMEM;
180 
181 	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
182 	memcpy(alignbuffer, key, keylen);
183 	ret = tfm->setkey(tfm, alignbuffer, keylen);
184 	kzfree(buffer);
185 	return ret;
186 }
187 
188 static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
189 			  unsigned int keylen)
190 {
191 	return -ENOSYS;
192 }
193 
194 static void ahash_set_needkey(struct crypto_ahash *tfm)
195 {
196 	const struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
197 
198 	if (tfm->setkey != ahash_nosetkey &&
199 	    !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
200 		crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
201 }
202 
203 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
204 			unsigned int keylen)
205 {
206 	unsigned long alignmask = crypto_ahash_alignmask(tfm);
207 	int err;
208 
209 	if ((unsigned long)key & alignmask)
210 		err = ahash_setkey_unaligned(tfm, key, keylen);
211 	else
212 		err = tfm->setkey(tfm, key, keylen);
213 
214 	if (unlikely(err)) {
215 		ahash_set_needkey(tfm);
216 		return err;
217 	}
218 
219 	crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
220 	return 0;
221 }
222 EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
223 
224 static inline unsigned int ahash_align_buffer_size(unsigned len,
225 						   unsigned long mask)
226 {
227 	return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
228 }
229 
230 static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
231 {
232 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
233 	unsigned long alignmask = crypto_ahash_alignmask(tfm);
234 	unsigned int ds = crypto_ahash_digestsize(tfm);
235 	struct ahash_request_priv *priv;
236 
237 	priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
238 		       (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
239 		       GFP_KERNEL : GFP_ATOMIC);
240 	if (!priv)
241 		return -ENOMEM;
242 
243 	/*
244 	 * WARNING: Voodoo programming below!
245 	 *
246 	 * The code below is obscure and hard to understand, thus explanation
247 	 * is necessary. See include/crypto/hash.h and include/linux/crypto.h
248 	 * to understand the layout of structures used here!
249 	 *
250 	 * The code here will replace portions of the ORIGINAL request with
251 	 * pointers to new code and buffers so the hashing operation can store
252 	 * the result in aligned buffer. We will call the modified request
253 	 * an ADJUSTED request.
254 	 *
255 	 * The newly mangled request will look as such:
256 	 *
257 	 * req {
258 	 *   .result        = ADJUSTED[new aligned buffer]
259 	 *   .base.complete = ADJUSTED[pointer to completion function]
260 	 *   .base.data     = ADJUSTED[*req (pointer to self)]
261 	 *   .priv          = ADJUSTED[new priv] {
262 	 *           .result   = ORIGINAL(result)
263 	 *           .complete = ORIGINAL(base.complete)
264 	 *           .data     = ORIGINAL(base.data)
265 	 *   }
266 	 */
267 
268 	priv->result = req->result;
269 	priv->complete = req->base.complete;
270 	priv->data = req->base.data;
271 	priv->flags = req->base.flags;
272 
273 	/*
274 	 * WARNING: We do not backup req->priv here! The req->priv
275 	 *          is for internal use of the Crypto API and the
276 	 *          user must _NOT_ _EVER_ depend on it's content!
277 	 */
278 
279 	req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
280 	req->base.complete = cplt;
281 	req->base.data = req;
282 	req->priv = priv;
283 
284 	return 0;
285 }
286 
287 static void ahash_restore_req(struct ahash_request *req, int err)
288 {
289 	struct ahash_request_priv *priv = req->priv;
290 
291 	if (!err)
292 		memcpy(priv->result, req->result,
293 		       crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
294 
295 	/* Restore the original crypto request. */
296 	req->result = priv->result;
297 
298 	ahash_request_set_callback(req, priv->flags,
299 				   priv->complete, priv->data);
300 	req->priv = NULL;
301 
302 	/* Free the req->priv.priv from the ADJUSTED request. */
303 	kzfree(priv);
304 }
305 
306 static void ahash_notify_einprogress(struct ahash_request *req)
307 {
308 	struct ahash_request_priv *priv = req->priv;
309 	struct crypto_async_request oreq;
310 
311 	oreq.data = priv->data;
312 
313 	priv->complete(&oreq, -EINPROGRESS);
314 }
315 
316 static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
317 {
318 	struct ahash_request *areq = req->data;
319 
320 	if (err == -EINPROGRESS) {
321 		ahash_notify_einprogress(areq);
322 		return;
323 	}
324 
325 	/*
326 	 * Restore the original request, see ahash_op_unaligned() for what
327 	 * goes where.
328 	 *
329 	 * The "struct ahash_request *req" here is in fact the "req.base"
330 	 * from the ADJUSTED request from ahash_op_unaligned(), thus as it
331 	 * is a pointer to self, it is also the ADJUSTED "req" .
332 	 */
333 
334 	/* First copy req->result into req->priv.result */
335 	ahash_restore_req(areq, err);
336 
337 	/* Complete the ORIGINAL request. */
338 	areq->base.complete(&areq->base, err);
339 }
340 
341 static int ahash_op_unaligned(struct ahash_request *req,
342 			      int (*op)(struct ahash_request *))
343 {
344 	int err;
345 
346 	err = ahash_save_req(req, ahash_op_unaligned_done);
347 	if (err)
348 		return err;
349 
350 	err = op(req);
351 	if (err == -EINPROGRESS || err == -EBUSY)
352 		return err;
353 
354 	ahash_restore_req(req, err);
355 
356 	return err;
357 }
358 
359 static int crypto_ahash_op(struct ahash_request *req,
360 			   int (*op)(struct ahash_request *))
361 {
362 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
363 	unsigned long alignmask = crypto_ahash_alignmask(tfm);
364 
365 	if ((unsigned long)req->result & alignmask)
366 		return ahash_op_unaligned(req, op);
367 
368 	return op(req);
369 }
370 
371 int crypto_ahash_final(struct ahash_request *req)
372 {
373 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
374 	struct crypto_alg *alg = tfm->base.__crt_alg;
375 	unsigned int nbytes = req->nbytes;
376 	int ret;
377 
378 	crypto_stats_get(alg);
379 	ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
380 	crypto_stats_ahash_final(nbytes, ret, alg);
381 	return ret;
382 }
383 EXPORT_SYMBOL_GPL(crypto_ahash_final);
384 
385 int crypto_ahash_finup(struct ahash_request *req)
386 {
387 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
388 	struct crypto_alg *alg = tfm->base.__crt_alg;
389 	unsigned int nbytes = req->nbytes;
390 	int ret;
391 
392 	crypto_stats_get(alg);
393 	ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
394 	crypto_stats_ahash_final(nbytes, ret, alg);
395 	return ret;
396 }
397 EXPORT_SYMBOL_GPL(crypto_ahash_finup);
398 
399 int crypto_ahash_digest(struct ahash_request *req)
400 {
401 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
402 	struct crypto_alg *alg = tfm->base.__crt_alg;
403 	unsigned int nbytes = req->nbytes;
404 	int ret;
405 
406 	crypto_stats_get(alg);
407 	if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
408 		ret = -ENOKEY;
409 	else
410 		ret = crypto_ahash_op(req, tfm->digest);
411 	crypto_stats_ahash_final(nbytes, ret, alg);
412 	return ret;
413 }
414 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
415 
416 static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
417 {
418 	struct ahash_request *areq = req->data;
419 
420 	if (err == -EINPROGRESS)
421 		return;
422 
423 	ahash_restore_req(areq, err);
424 
425 	areq->base.complete(&areq->base, err);
426 }
427 
428 static int ahash_def_finup_finish1(struct ahash_request *req, int err)
429 {
430 	if (err)
431 		goto out;
432 
433 	req->base.complete = ahash_def_finup_done2;
434 
435 	err = crypto_ahash_reqtfm(req)->final(req);
436 	if (err == -EINPROGRESS || err == -EBUSY)
437 		return err;
438 
439 out:
440 	ahash_restore_req(req, err);
441 	return err;
442 }
443 
444 static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
445 {
446 	struct ahash_request *areq = req->data;
447 
448 	if (err == -EINPROGRESS) {
449 		ahash_notify_einprogress(areq);
450 		return;
451 	}
452 
453 	areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
454 
455 	err = ahash_def_finup_finish1(areq, err);
456 	if (areq->priv)
457 		return;
458 
459 	areq->base.complete(&areq->base, err);
460 }
461 
462 static int ahash_def_finup(struct ahash_request *req)
463 {
464 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
465 	int err;
466 
467 	err = ahash_save_req(req, ahash_def_finup_done1);
468 	if (err)
469 		return err;
470 
471 	err = tfm->update(req);
472 	if (err == -EINPROGRESS || err == -EBUSY)
473 		return err;
474 
475 	return ahash_def_finup_finish1(req, err);
476 }
477 
478 static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
479 {
480 	struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
481 	struct ahash_alg *alg = crypto_ahash_alg(hash);
482 
483 	hash->setkey = ahash_nosetkey;
484 
485 	if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
486 		return crypto_init_shash_ops_async(tfm);
487 
488 	hash->init = alg->init;
489 	hash->update = alg->update;
490 	hash->final = alg->final;
491 	hash->finup = alg->finup ?: ahash_def_finup;
492 	hash->digest = alg->digest;
493 	hash->export = alg->export;
494 	hash->import = alg->import;
495 
496 	if (alg->setkey) {
497 		hash->setkey = alg->setkey;
498 		ahash_set_needkey(hash);
499 	}
500 
501 	return 0;
502 }
503 
504 static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
505 {
506 	if (alg->cra_type != &crypto_ahash_type)
507 		return sizeof(struct crypto_shash *);
508 
509 	return crypto_alg_extsize(alg);
510 }
511 
512 #ifdef CONFIG_NET
513 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
514 {
515 	struct crypto_report_hash rhash;
516 
517 	memset(&rhash, 0, sizeof(rhash));
518 
519 	strscpy(rhash.type, "ahash", sizeof(rhash.type));
520 
521 	rhash.blocksize = alg->cra_blocksize;
522 	rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
523 
524 	return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash);
525 }
526 #else
527 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
528 {
529 	return -ENOSYS;
530 }
531 #endif
532 
533 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
534 	__maybe_unused;
535 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
536 {
537 	seq_printf(m, "type         : ahash\n");
538 	seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
539 					     "yes" : "no");
540 	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
541 	seq_printf(m, "digestsize   : %u\n",
542 		   __crypto_hash_alg_common(alg)->digestsize);
543 }
544 
545 const struct crypto_type crypto_ahash_type = {
546 	.extsize = crypto_ahash_extsize,
547 	.init_tfm = crypto_ahash_init_tfm,
548 #ifdef CONFIG_PROC_FS
549 	.show = crypto_ahash_show,
550 #endif
551 	.report = crypto_ahash_report,
552 	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
553 	.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
554 	.type = CRYPTO_ALG_TYPE_AHASH,
555 	.tfmsize = offsetof(struct crypto_ahash, base),
556 };
557 EXPORT_SYMBOL_GPL(crypto_ahash_type);
558 
559 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
560 					u32 mask)
561 {
562 	return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
563 }
564 EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
565 
566 int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
567 {
568 	return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
569 }
570 EXPORT_SYMBOL_GPL(crypto_has_ahash);
571 
572 static int ahash_prepare_alg(struct ahash_alg *alg)
573 {
574 	struct crypto_alg *base = &alg->halg.base;
575 
576 	if (alg->halg.digestsize > HASH_MAX_DIGESTSIZE ||
577 	    alg->halg.statesize > HASH_MAX_STATESIZE ||
578 	    alg->halg.statesize == 0)
579 		return -EINVAL;
580 
581 	base->cra_type = &crypto_ahash_type;
582 	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
583 	base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
584 
585 	return 0;
586 }
587 
588 int crypto_register_ahash(struct ahash_alg *alg)
589 {
590 	struct crypto_alg *base = &alg->halg.base;
591 	int err;
592 
593 	err = ahash_prepare_alg(alg);
594 	if (err)
595 		return err;
596 
597 	return crypto_register_alg(base);
598 }
599 EXPORT_SYMBOL_GPL(crypto_register_ahash);
600 
601 int crypto_unregister_ahash(struct ahash_alg *alg)
602 {
603 	return crypto_unregister_alg(&alg->halg.base);
604 }
605 EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
606 
607 int crypto_register_ahashes(struct ahash_alg *algs, int count)
608 {
609 	int i, ret;
610 
611 	for (i = 0; i < count; i++) {
612 		ret = crypto_register_ahash(&algs[i]);
613 		if (ret)
614 			goto err;
615 	}
616 
617 	return 0;
618 
619 err:
620 	for (--i; i >= 0; --i)
621 		crypto_unregister_ahash(&algs[i]);
622 
623 	return ret;
624 }
625 EXPORT_SYMBOL_GPL(crypto_register_ahashes);
626 
627 void crypto_unregister_ahashes(struct ahash_alg *algs, int count)
628 {
629 	int i;
630 
631 	for (i = count - 1; i >= 0; --i)
632 		crypto_unregister_ahash(&algs[i]);
633 }
634 EXPORT_SYMBOL_GPL(crypto_unregister_ahashes);
635 
636 int ahash_register_instance(struct crypto_template *tmpl,
637 			    struct ahash_instance *inst)
638 {
639 	int err;
640 
641 	err = ahash_prepare_alg(&inst->alg);
642 	if (err)
643 		return err;
644 
645 	return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
646 }
647 EXPORT_SYMBOL_GPL(ahash_register_instance);
648 
649 void ahash_free_instance(struct crypto_instance *inst)
650 {
651 	crypto_drop_spawn(crypto_instance_ctx(inst));
652 	kfree(ahash_instance(inst));
653 }
654 EXPORT_SYMBOL_GPL(ahash_free_instance);
655 
656 int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
657 			    struct hash_alg_common *alg,
658 			    struct crypto_instance *inst)
659 {
660 	return crypto_init_spawn2(&spawn->base, &alg->base, inst,
661 				  &crypto_ahash_type);
662 }
663 EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
664 
665 struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
666 {
667 	struct crypto_alg *alg;
668 
669 	alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
670 	return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
671 }
672 EXPORT_SYMBOL_GPL(ahash_attr_alg);
673 
674 bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
675 {
676 	struct crypto_alg *alg = &halg->base;
677 
678 	if (alg->cra_type != &crypto_ahash_type)
679 		return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));
680 
681 	return __crypto_ahash_alg(alg)->setkey != NULL;
682 }
683 EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);
684 
685 MODULE_LICENSE("GPL");
686 MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
687