xref: /openbmc/linux/crypto/ahash.c (revision be709d48)
1 /*
2  * Asynchronous Cryptographic Hash operations.
3  *
4  * This is the asynchronous version of hash.c with notification of
5  * completion via a callback.
6  *
7  * Copyright (c) 2008 Loc Ho <lho@amcc.com>
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU General Public License as published by the Free
11  * Software Foundation; either version 2 of the License, or (at your option)
12  * any later version.
13  *
14  */
15 
16 #include <crypto/internal/hash.h>
17 #include <crypto/scatterwalk.h>
18 #include <linux/bug.h>
19 #include <linux/err.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/sched.h>
23 #include <linux/slab.h>
24 #include <linux/seq_file.h>
25 #include <linux/cryptouser.h>
26 #include <linux/compiler.h>
27 #include <net/netlink.h>
28 
29 #include "internal.h"
30 
31 struct ahash_request_priv {
32 	crypto_completion_t complete;
33 	void *data;
34 	u8 *result;
35 	u32 flags;
36 	void *ubuf[] CRYPTO_MINALIGN_ATTR;
37 };
38 
39 static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
40 {
41 	return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
42 			    halg);
43 }
44 
45 static int hash_walk_next(struct crypto_hash_walk *walk)
46 {
47 	unsigned int alignmask = walk->alignmask;
48 	unsigned int offset = walk->offset;
49 	unsigned int nbytes = min(walk->entrylen,
50 				  ((unsigned int)(PAGE_SIZE)) - offset);
51 
52 	if (walk->flags & CRYPTO_ALG_ASYNC)
53 		walk->data = kmap(walk->pg);
54 	else
55 		walk->data = kmap_atomic(walk->pg);
56 	walk->data += offset;
57 
58 	if (offset & alignmask) {
59 		unsigned int unaligned = alignmask + 1 - (offset & alignmask);
60 
61 		if (nbytes > unaligned)
62 			nbytes = unaligned;
63 	}
64 
65 	walk->entrylen -= nbytes;
66 	return nbytes;
67 }
68 
69 static int hash_walk_new_entry(struct crypto_hash_walk *walk)
70 {
71 	struct scatterlist *sg;
72 
73 	sg = walk->sg;
74 	walk->offset = sg->offset;
75 	walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
76 	walk->offset = offset_in_page(walk->offset);
77 	walk->entrylen = sg->length;
78 
79 	if (walk->entrylen > walk->total)
80 		walk->entrylen = walk->total;
81 	walk->total -= walk->entrylen;
82 
83 	return hash_walk_next(walk);
84 }
85 
86 int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
87 {
88 	unsigned int alignmask = walk->alignmask;
89 
90 	walk->data -= walk->offset;
91 
92 	if (walk->entrylen && (walk->offset & alignmask) && !err) {
93 		unsigned int nbytes;
94 
95 		walk->offset = ALIGN(walk->offset, alignmask + 1);
96 		nbytes = min(walk->entrylen,
97 			     (unsigned int)(PAGE_SIZE - walk->offset));
98 		if (nbytes) {
99 			walk->entrylen -= nbytes;
100 			walk->data += walk->offset;
101 			return nbytes;
102 		}
103 	}
104 
105 	if (walk->flags & CRYPTO_ALG_ASYNC)
106 		kunmap(walk->pg);
107 	else {
108 		kunmap_atomic(walk->data);
109 		/*
110 		 * The may sleep test only makes sense for sync users.
111 		 * Async users don't need to sleep here anyway.
112 		 */
113 		crypto_yield(walk->flags);
114 	}
115 
116 	if (err)
117 		return err;
118 
119 	if (walk->entrylen) {
120 		walk->offset = 0;
121 		walk->pg++;
122 		return hash_walk_next(walk);
123 	}
124 
125 	if (!walk->total)
126 		return 0;
127 
128 	walk->sg = sg_next(walk->sg);
129 
130 	return hash_walk_new_entry(walk);
131 }
132 EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
133 
134 int crypto_hash_walk_first(struct ahash_request *req,
135 			   struct crypto_hash_walk *walk)
136 {
137 	walk->total = req->nbytes;
138 
139 	if (!walk->total) {
140 		walk->entrylen = 0;
141 		return 0;
142 	}
143 
144 	walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
145 	walk->sg = req->src;
146 	walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
147 
148 	return hash_walk_new_entry(walk);
149 }
150 EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
151 
152 int crypto_ahash_walk_first(struct ahash_request *req,
153 			    struct crypto_hash_walk *walk)
154 {
155 	walk->total = req->nbytes;
156 
157 	if (!walk->total) {
158 		walk->entrylen = 0;
159 		return 0;
160 	}
161 
162 	walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
163 	walk->sg = req->src;
164 	walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
165 	walk->flags |= CRYPTO_ALG_ASYNC;
166 
167 	BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC);
168 
169 	return hash_walk_new_entry(walk);
170 }
171 EXPORT_SYMBOL_GPL(crypto_ahash_walk_first);
172 
173 static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
174 				unsigned int keylen)
175 {
176 	unsigned long alignmask = crypto_ahash_alignmask(tfm);
177 	int ret;
178 	u8 *buffer, *alignbuffer;
179 	unsigned long absize;
180 
181 	absize = keylen + alignmask;
182 	buffer = kmalloc(absize, GFP_KERNEL);
183 	if (!buffer)
184 		return -ENOMEM;
185 
186 	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
187 	memcpy(alignbuffer, key, keylen);
188 	ret = tfm->setkey(tfm, alignbuffer, keylen);
189 	kzfree(buffer);
190 	return ret;
191 }
192 
193 static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
194 			  unsigned int keylen)
195 {
196 	return -ENOSYS;
197 }
198 
199 static void ahash_set_needkey(struct crypto_ahash *tfm)
200 {
201 	const struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
202 
203 	if (tfm->setkey != ahash_nosetkey &&
204 	    !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
205 		crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
206 }
207 
208 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
209 			unsigned int keylen)
210 {
211 	unsigned long alignmask = crypto_ahash_alignmask(tfm);
212 	int err;
213 
214 	if ((unsigned long)key & alignmask)
215 		err = ahash_setkey_unaligned(tfm, key, keylen);
216 	else
217 		err = tfm->setkey(tfm, key, keylen);
218 
219 	if (unlikely(err)) {
220 		ahash_set_needkey(tfm);
221 		return err;
222 	}
223 
224 	crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
225 	return 0;
226 }
227 EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
228 
229 static inline unsigned int ahash_align_buffer_size(unsigned len,
230 						   unsigned long mask)
231 {
232 	return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
233 }
234 
235 static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
236 {
237 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
238 	unsigned long alignmask = crypto_ahash_alignmask(tfm);
239 	unsigned int ds = crypto_ahash_digestsize(tfm);
240 	struct ahash_request_priv *priv;
241 
242 	priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
243 		       (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
244 		       GFP_KERNEL : GFP_ATOMIC);
245 	if (!priv)
246 		return -ENOMEM;
247 
248 	/*
249 	 * WARNING: Voodoo programming below!
250 	 *
251 	 * The code below is obscure and hard to understand, thus explanation
252 	 * is necessary. See include/crypto/hash.h and include/linux/crypto.h
253 	 * to understand the layout of structures used here!
254 	 *
255 	 * The code here will replace portions of the ORIGINAL request with
256 	 * pointers to new code and buffers so the hashing operation can store
257 	 * the result in aligned buffer. We will call the modified request
258 	 * an ADJUSTED request.
259 	 *
260 	 * The newly mangled request will look as such:
261 	 *
262 	 * req {
263 	 *   .result        = ADJUSTED[new aligned buffer]
264 	 *   .base.complete = ADJUSTED[pointer to completion function]
265 	 *   .base.data     = ADJUSTED[*req (pointer to self)]
266 	 *   .priv          = ADJUSTED[new priv] {
267 	 *           .result   = ORIGINAL(result)
268 	 *           .complete = ORIGINAL(base.complete)
269 	 *           .data     = ORIGINAL(base.data)
270 	 *   }
271 	 */
272 
273 	priv->result = req->result;
274 	priv->complete = req->base.complete;
275 	priv->data = req->base.data;
276 	priv->flags = req->base.flags;
277 
278 	/*
279 	 * WARNING: We do not backup req->priv here! The req->priv
280 	 *          is for internal use of the Crypto API and the
281 	 *          user must _NOT_ _EVER_ depend on it's content!
282 	 */
283 
284 	req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
285 	req->base.complete = cplt;
286 	req->base.data = req;
287 	req->priv = priv;
288 
289 	return 0;
290 }
291 
292 static void ahash_restore_req(struct ahash_request *req, int err)
293 {
294 	struct ahash_request_priv *priv = req->priv;
295 
296 	if (!err)
297 		memcpy(priv->result, req->result,
298 		       crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
299 
300 	/* Restore the original crypto request. */
301 	req->result = priv->result;
302 
303 	ahash_request_set_callback(req, priv->flags,
304 				   priv->complete, priv->data);
305 	req->priv = NULL;
306 
307 	/* Free the req->priv.priv from the ADJUSTED request. */
308 	kzfree(priv);
309 }
310 
311 static void ahash_notify_einprogress(struct ahash_request *req)
312 {
313 	struct ahash_request_priv *priv = req->priv;
314 	struct crypto_async_request oreq;
315 
316 	oreq.data = priv->data;
317 
318 	priv->complete(&oreq, -EINPROGRESS);
319 }
320 
321 static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
322 {
323 	struct ahash_request *areq = req->data;
324 
325 	if (err == -EINPROGRESS) {
326 		ahash_notify_einprogress(areq);
327 		return;
328 	}
329 
330 	/*
331 	 * Restore the original request, see ahash_op_unaligned() for what
332 	 * goes where.
333 	 *
334 	 * The "struct ahash_request *req" here is in fact the "req.base"
335 	 * from the ADJUSTED request from ahash_op_unaligned(), thus as it
336 	 * is a pointer to self, it is also the ADJUSTED "req" .
337 	 */
338 
339 	/* First copy req->result into req->priv.result */
340 	ahash_restore_req(areq, err);
341 
342 	/* Complete the ORIGINAL request. */
343 	areq->base.complete(&areq->base, err);
344 }
345 
346 static int ahash_op_unaligned(struct ahash_request *req,
347 			      int (*op)(struct ahash_request *))
348 {
349 	int err;
350 
351 	err = ahash_save_req(req, ahash_op_unaligned_done);
352 	if (err)
353 		return err;
354 
355 	err = op(req);
356 	if (err == -EINPROGRESS || err == -EBUSY)
357 		return err;
358 
359 	ahash_restore_req(req, err);
360 
361 	return err;
362 }
363 
364 static int crypto_ahash_op(struct ahash_request *req,
365 			   int (*op)(struct ahash_request *))
366 {
367 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
368 	unsigned long alignmask = crypto_ahash_alignmask(tfm);
369 
370 	if ((unsigned long)req->result & alignmask)
371 		return ahash_op_unaligned(req, op);
372 
373 	return op(req);
374 }
375 
376 int crypto_ahash_final(struct ahash_request *req)
377 {
378 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
379 	struct crypto_alg *alg = tfm->base.__crt_alg;
380 	unsigned int nbytes = req->nbytes;
381 	int ret;
382 
383 	crypto_stats_get(alg);
384 	ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
385 	crypto_stats_ahash_final(nbytes, ret, alg);
386 	return ret;
387 }
388 EXPORT_SYMBOL_GPL(crypto_ahash_final);
389 
390 int crypto_ahash_finup(struct ahash_request *req)
391 {
392 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
393 	struct crypto_alg *alg = tfm->base.__crt_alg;
394 	unsigned int nbytes = req->nbytes;
395 	int ret;
396 
397 	crypto_stats_get(alg);
398 	ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
399 	crypto_stats_ahash_final(nbytes, ret, alg);
400 	return ret;
401 }
402 EXPORT_SYMBOL_GPL(crypto_ahash_finup);
403 
404 int crypto_ahash_digest(struct ahash_request *req)
405 {
406 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
407 	struct crypto_alg *alg = tfm->base.__crt_alg;
408 	unsigned int nbytes = req->nbytes;
409 	int ret;
410 
411 	crypto_stats_get(alg);
412 	if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
413 		ret = -ENOKEY;
414 	else
415 		ret = crypto_ahash_op(req, tfm->digest);
416 	crypto_stats_ahash_final(nbytes, ret, alg);
417 	return ret;
418 }
419 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
420 
421 static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
422 {
423 	struct ahash_request *areq = req->data;
424 
425 	if (err == -EINPROGRESS)
426 		return;
427 
428 	ahash_restore_req(areq, err);
429 
430 	areq->base.complete(&areq->base, err);
431 }
432 
433 static int ahash_def_finup_finish1(struct ahash_request *req, int err)
434 {
435 	if (err)
436 		goto out;
437 
438 	req->base.complete = ahash_def_finup_done2;
439 
440 	err = crypto_ahash_reqtfm(req)->final(req);
441 	if (err == -EINPROGRESS || err == -EBUSY)
442 		return err;
443 
444 out:
445 	ahash_restore_req(req, err);
446 	return err;
447 }
448 
449 static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
450 {
451 	struct ahash_request *areq = req->data;
452 
453 	if (err == -EINPROGRESS) {
454 		ahash_notify_einprogress(areq);
455 		return;
456 	}
457 
458 	areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
459 
460 	err = ahash_def_finup_finish1(areq, err);
461 	if (areq->priv)
462 		return;
463 
464 	areq->base.complete(&areq->base, err);
465 }
466 
467 static int ahash_def_finup(struct ahash_request *req)
468 {
469 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
470 	int err;
471 
472 	err = ahash_save_req(req, ahash_def_finup_done1);
473 	if (err)
474 		return err;
475 
476 	err = tfm->update(req);
477 	if (err == -EINPROGRESS || err == -EBUSY)
478 		return err;
479 
480 	return ahash_def_finup_finish1(req, err);
481 }
482 
483 static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
484 {
485 	struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
486 	struct ahash_alg *alg = crypto_ahash_alg(hash);
487 
488 	hash->setkey = ahash_nosetkey;
489 
490 	if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
491 		return crypto_init_shash_ops_async(tfm);
492 
493 	hash->init = alg->init;
494 	hash->update = alg->update;
495 	hash->final = alg->final;
496 	hash->finup = alg->finup ?: ahash_def_finup;
497 	hash->digest = alg->digest;
498 	hash->export = alg->export;
499 	hash->import = alg->import;
500 
501 	if (alg->setkey) {
502 		hash->setkey = alg->setkey;
503 		ahash_set_needkey(hash);
504 	}
505 
506 	return 0;
507 }
508 
509 static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
510 {
511 	if (alg->cra_type != &crypto_ahash_type)
512 		return sizeof(struct crypto_shash *);
513 
514 	return crypto_alg_extsize(alg);
515 }
516 
517 #ifdef CONFIG_NET
518 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
519 {
520 	struct crypto_report_hash rhash;
521 
522 	memset(&rhash, 0, sizeof(rhash));
523 
524 	strscpy(rhash.type, "ahash", sizeof(rhash.type));
525 
526 	rhash.blocksize = alg->cra_blocksize;
527 	rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
528 
529 	return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash);
530 }
531 #else
532 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
533 {
534 	return -ENOSYS;
535 }
536 #endif
537 
538 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
539 	__maybe_unused;
540 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
541 {
542 	seq_printf(m, "type         : ahash\n");
543 	seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
544 					     "yes" : "no");
545 	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
546 	seq_printf(m, "digestsize   : %u\n",
547 		   __crypto_hash_alg_common(alg)->digestsize);
548 }
549 
550 const struct crypto_type crypto_ahash_type = {
551 	.extsize = crypto_ahash_extsize,
552 	.init_tfm = crypto_ahash_init_tfm,
553 #ifdef CONFIG_PROC_FS
554 	.show = crypto_ahash_show,
555 #endif
556 	.report = crypto_ahash_report,
557 	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
558 	.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
559 	.type = CRYPTO_ALG_TYPE_AHASH,
560 	.tfmsize = offsetof(struct crypto_ahash, base),
561 };
562 EXPORT_SYMBOL_GPL(crypto_ahash_type);
563 
564 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
565 					u32 mask)
566 {
567 	return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
568 }
569 EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
570 
571 int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
572 {
573 	return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
574 }
575 EXPORT_SYMBOL_GPL(crypto_has_ahash);
576 
577 static int ahash_prepare_alg(struct ahash_alg *alg)
578 {
579 	struct crypto_alg *base = &alg->halg.base;
580 
581 	if (alg->halg.digestsize > HASH_MAX_DIGESTSIZE ||
582 	    alg->halg.statesize > HASH_MAX_STATESIZE ||
583 	    alg->halg.statesize == 0)
584 		return -EINVAL;
585 
586 	base->cra_type = &crypto_ahash_type;
587 	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
588 	base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
589 
590 	return 0;
591 }
592 
593 int crypto_register_ahash(struct ahash_alg *alg)
594 {
595 	struct crypto_alg *base = &alg->halg.base;
596 	int err;
597 
598 	err = ahash_prepare_alg(alg);
599 	if (err)
600 		return err;
601 
602 	return crypto_register_alg(base);
603 }
604 EXPORT_SYMBOL_GPL(crypto_register_ahash);
605 
606 int crypto_unregister_ahash(struct ahash_alg *alg)
607 {
608 	return crypto_unregister_alg(&alg->halg.base);
609 }
610 EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
611 
612 int crypto_register_ahashes(struct ahash_alg *algs, int count)
613 {
614 	int i, ret;
615 
616 	for (i = 0; i < count; i++) {
617 		ret = crypto_register_ahash(&algs[i]);
618 		if (ret)
619 			goto err;
620 	}
621 
622 	return 0;
623 
624 err:
625 	for (--i; i >= 0; --i)
626 		crypto_unregister_ahash(&algs[i]);
627 
628 	return ret;
629 }
630 EXPORT_SYMBOL_GPL(crypto_register_ahashes);
631 
632 void crypto_unregister_ahashes(struct ahash_alg *algs, int count)
633 {
634 	int i;
635 
636 	for (i = count - 1; i >= 0; --i)
637 		crypto_unregister_ahash(&algs[i]);
638 }
639 EXPORT_SYMBOL_GPL(crypto_unregister_ahashes);
640 
641 int ahash_register_instance(struct crypto_template *tmpl,
642 			    struct ahash_instance *inst)
643 {
644 	int err;
645 
646 	err = ahash_prepare_alg(&inst->alg);
647 	if (err)
648 		return err;
649 
650 	return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
651 }
652 EXPORT_SYMBOL_GPL(ahash_register_instance);
653 
654 void ahash_free_instance(struct crypto_instance *inst)
655 {
656 	crypto_drop_spawn(crypto_instance_ctx(inst));
657 	kfree(ahash_instance(inst));
658 }
659 EXPORT_SYMBOL_GPL(ahash_free_instance);
660 
661 int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
662 			    struct hash_alg_common *alg,
663 			    struct crypto_instance *inst)
664 {
665 	return crypto_init_spawn2(&spawn->base, &alg->base, inst,
666 				  &crypto_ahash_type);
667 }
668 EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
669 
670 struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
671 {
672 	struct crypto_alg *alg;
673 
674 	alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
675 	return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
676 }
677 EXPORT_SYMBOL_GPL(ahash_attr_alg);
678 
679 bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
680 {
681 	struct crypto_alg *alg = &halg->base;
682 
683 	if (alg->cra_type != &crypto_ahash_type)
684 		return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));
685 
686 	return __crypto_ahash_alg(alg)->setkey != NULL;
687 }
688 EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);
689 
690 MODULE_LICENSE("GPL");
691 MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
692