xref: /openbmc/linux/crypto/ahash.c (revision aac5987a)
1 /*
2  * Asynchronous Cryptographic Hash operations.
3  *
4  * This is the asynchronous version of hash.c with notification of
5  * completion via a callback.
6  *
7  * Copyright (c) 2008 Loc Ho <lho@amcc.com>
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU General Public License as published by the Free
11  * Software Foundation; either version 2 of the License, or (at your option)
12  * any later version.
13  *
14  */
15 
16 #include <crypto/internal/hash.h>
17 #include <crypto/scatterwalk.h>
18 #include <linux/bug.h>
19 #include <linux/err.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/sched.h>
23 #include <linux/slab.h>
24 #include <linux/seq_file.h>
25 #include <linux/cryptouser.h>
26 #include <linux/compiler.h>
27 #include <net/netlink.h>
28 
29 #include "internal.h"
30 
31 struct ahash_request_priv {
32 	crypto_completion_t complete;
33 	void *data;
34 	u8 *result;
35 	void *ubuf[] CRYPTO_MINALIGN_ATTR;
36 };
37 
38 static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
39 {
40 	return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
41 			    halg);
42 }
43 
44 static int hash_walk_next(struct crypto_hash_walk *walk)
45 {
46 	unsigned int alignmask = walk->alignmask;
47 	unsigned int offset = walk->offset;
48 	unsigned int nbytes = min(walk->entrylen,
49 				  ((unsigned int)(PAGE_SIZE)) - offset);
50 
51 	if (walk->flags & CRYPTO_ALG_ASYNC)
52 		walk->data = kmap(walk->pg);
53 	else
54 		walk->data = kmap_atomic(walk->pg);
55 	walk->data += offset;
56 
57 	if (offset & alignmask) {
58 		unsigned int unaligned = alignmask + 1 - (offset & alignmask);
59 
60 		if (nbytes > unaligned)
61 			nbytes = unaligned;
62 	}
63 
64 	walk->entrylen -= nbytes;
65 	return nbytes;
66 }
67 
68 static int hash_walk_new_entry(struct crypto_hash_walk *walk)
69 {
70 	struct scatterlist *sg;
71 
72 	sg = walk->sg;
73 	walk->offset = sg->offset;
74 	walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
75 	walk->offset = offset_in_page(walk->offset);
76 	walk->entrylen = sg->length;
77 
78 	if (walk->entrylen > walk->total)
79 		walk->entrylen = walk->total;
80 	walk->total -= walk->entrylen;
81 
82 	return hash_walk_next(walk);
83 }
84 
85 int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
86 {
87 	unsigned int alignmask = walk->alignmask;
88 	unsigned int nbytes = walk->entrylen;
89 
90 	walk->data -= walk->offset;
91 
92 	if (nbytes && walk->offset & alignmask && !err) {
93 		walk->offset = ALIGN(walk->offset, alignmask + 1);
94 		walk->data += walk->offset;
95 
96 		nbytes = min(nbytes,
97 			     ((unsigned int)(PAGE_SIZE)) - walk->offset);
98 		walk->entrylen -= nbytes;
99 
100 		return nbytes;
101 	}
102 
103 	if (walk->flags & CRYPTO_ALG_ASYNC)
104 		kunmap(walk->pg);
105 	else {
106 		kunmap_atomic(walk->data);
107 		/*
108 		 * The may sleep test only makes sense for sync users.
109 		 * Async users don't need to sleep here anyway.
110 		 */
111 		crypto_yield(walk->flags);
112 	}
113 
114 	if (err)
115 		return err;
116 
117 	if (nbytes) {
118 		walk->offset = 0;
119 		walk->pg++;
120 		return hash_walk_next(walk);
121 	}
122 
123 	if (!walk->total)
124 		return 0;
125 
126 	walk->sg = sg_next(walk->sg);
127 
128 	return hash_walk_new_entry(walk);
129 }
130 EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
131 
132 int crypto_hash_walk_first(struct ahash_request *req,
133 			   struct crypto_hash_walk *walk)
134 {
135 	walk->total = req->nbytes;
136 
137 	if (!walk->total) {
138 		walk->entrylen = 0;
139 		return 0;
140 	}
141 
142 	walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
143 	walk->sg = req->src;
144 	walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
145 
146 	return hash_walk_new_entry(walk);
147 }
148 EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
149 
150 int crypto_ahash_walk_first(struct ahash_request *req,
151 			    struct crypto_hash_walk *walk)
152 {
153 	walk->total = req->nbytes;
154 
155 	if (!walk->total) {
156 		walk->entrylen = 0;
157 		return 0;
158 	}
159 
160 	walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
161 	walk->sg = req->src;
162 	walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
163 	walk->flags |= CRYPTO_ALG_ASYNC;
164 
165 	BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC);
166 
167 	return hash_walk_new_entry(walk);
168 }
169 EXPORT_SYMBOL_GPL(crypto_ahash_walk_first);
170 
171 static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
172 				unsigned int keylen)
173 {
174 	unsigned long alignmask = crypto_ahash_alignmask(tfm);
175 	int ret;
176 	u8 *buffer, *alignbuffer;
177 	unsigned long absize;
178 
179 	absize = keylen + alignmask;
180 	buffer = kmalloc(absize, GFP_KERNEL);
181 	if (!buffer)
182 		return -ENOMEM;
183 
184 	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
185 	memcpy(alignbuffer, key, keylen);
186 	ret = tfm->setkey(tfm, alignbuffer, keylen);
187 	kzfree(buffer);
188 	return ret;
189 }
190 
191 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
192 			unsigned int keylen)
193 {
194 	unsigned long alignmask = crypto_ahash_alignmask(tfm);
195 
196 	if ((unsigned long)key & alignmask)
197 		return ahash_setkey_unaligned(tfm, key, keylen);
198 
199 	return tfm->setkey(tfm, key, keylen);
200 }
201 EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
202 
203 static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
204 			  unsigned int keylen)
205 {
206 	return -ENOSYS;
207 }
208 
209 static inline unsigned int ahash_align_buffer_size(unsigned len,
210 						   unsigned long mask)
211 {
212 	return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
213 }
214 
215 static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
216 {
217 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
218 	unsigned long alignmask = crypto_ahash_alignmask(tfm);
219 	unsigned int ds = crypto_ahash_digestsize(tfm);
220 	struct ahash_request_priv *priv;
221 
222 	priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
223 		       (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
224 		       GFP_KERNEL : GFP_ATOMIC);
225 	if (!priv)
226 		return -ENOMEM;
227 
228 	/*
229 	 * WARNING: Voodoo programming below!
230 	 *
231 	 * The code below is obscure and hard to understand, thus explanation
232 	 * is necessary. See include/crypto/hash.h and include/linux/crypto.h
233 	 * to understand the layout of structures used here!
234 	 *
235 	 * The code here will replace portions of the ORIGINAL request with
236 	 * pointers to new code and buffers so the hashing operation can store
237 	 * the result in aligned buffer. We will call the modified request
238 	 * an ADJUSTED request.
239 	 *
240 	 * The newly mangled request will look as such:
241 	 *
242 	 * req {
243 	 *   .result        = ADJUSTED[new aligned buffer]
244 	 *   .base.complete = ADJUSTED[pointer to completion function]
245 	 *   .base.data     = ADJUSTED[*req (pointer to self)]
246 	 *   .priv          = ADJUSTED[new priv] {
247 	 *           .result   = ORIGINAL(result)
248 	 *           .complete = ORIGINAL(base.complete)
249 	 *           .data     = ORIGINAL(base.data)
250 	 *   }
251 	 */
252 
253 	priv->result = req->result;
254 	priv->complete = req->base.complete;
255 	priv->data = req->base.data;
256 	/*
257 	 * WARNING: We do not backup req->priv here! The req->priv
258 	 *          is for internal use of the Crypto API and the
259 	 *          user must _NOT_ _EVER_ depend on it's content!
260 	 */
261 
262 	req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
263 	req->base.complete = cplt;
264 	req->base.data = req;
265 	req->priv = priv;
266 
267 	return 0;
268 }
269 
270 static void ahash_restore_req(struct ahash_request *req)
271 {
272 	struct ahash_request_priv *priv = req->priv;
273 
274 	/* Restore the original crypto request. */
275 	req->result = priv->result;
276 	req->base.complete = priv->complete;
277 	req->base.data = priv->data;
278 	req->priv = NULL;
279 
280 	/* Free the req->priv.priv from the ADJUSTED request. */
281 	kzfree(priv);
282 }
283 
284 static void ahash_op_unaligned_finish(struct ahash_request *req, int err)
285 {
286 	struct ahash_request_priv *priv = req->priv;
287 
288 	if (err == -EINPROGRESS)
289 		return;
290 
291 	if (!err)
292 		memcpy(priv->result, req->result,
293 		       crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
294 
295 	ahash_restore_req(req);
296 }
297 
298 static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
299 {
300 	struct ahash_request *areq = req->data;
301 
302 	/*
303 	 * Restore the original request, see ahash_op_unaligned() for what
304 	 * goes where.
305 	 *
306 	 * The "struct ahash_request *req" here is in fact the "req.base"
307 	 * from the ADJUSTED request from ahash_op_unaligned(), thus as it
308 	 * is a pointer to self, it is also the ADJUSTED "req" .
309 	 */
310 
311 	/* First copy req->result into req->priv.result */
312 	ahash_op_unaligned_finish(areq, err);
313 
314 	/* Complete the ORIGINAL request. */
315 	areq->base.complete(&areq->base, err);
316 }
317 
318 static int ahash_op_unaligned(struct ahash_request *req,
319 			      int (*op)(struct ahash_request *))
320 {
321 	int err;
322 
323 	err = ahash_save_req(req, ahash_op_unaligned_done);
324 	if (err)
325 		return err;
326 
327 	err = op(req);
328 	ahash_op_unaligned_finish(req, err);
329 
330 	return err;
331 }
332 
333 static int crypto_ahash_op(struct ahash_request *req,
334 			   int (*op)(struct ahash_request *))
335 {
336 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
337 	unsigned long alignmask = crypto_ahash_alignmask(tfm);
338 
339 	if ((unsigned long)req->result & alignmask)
340 		return ahash_op_unaligned(req, op);
341 
342 	return op(req);
343 }
344 
345 int crypto_ahash_final(struct ahash_request *req)
346 {
347 	return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
348 }
349 EXPORT_SYMBOL_GPL(crypto_ahash_final);
350 
351 int crypto_ahash_finup(struct ahash_request *req)
352 {
353 	return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
354 }
355 EXPORT_SYMBOL_GPL(crypto_ahash_finup);
356 
357 int crypto_ahash_digest(struct ahash_request *req)
358 {
359 	return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest);
360 }
361 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
362 
363 static void ahash_def_finup_finish2(struct ahash_request *req, int err)
364 {
365 	struct ahash_request_priv *priv = req->priv;
366 
367 	if (err == -EINPROGRESS)
368 		return;
369 
370 	if (!err)
371 		memcpy(priv->result, req->result,
372 		       crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
373 
374 	ahash_restore_req(req);
375 }
376 
377 static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
378 {
379 	struct ahash_request *areq = req->data;
380 
381 	ahash_def_finup_finish2(areq, err);
382 
383 	areq->base.complete(&areq->base, err);
384 }
385 
386 static int ahash_def_finup_finish1(struct ahash_request *req, int err)
387 {
388 	if (err)
389 		goto out;
390 
391 	req->base.complete = ahash_def_finup_done2;
392 	req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
393 	err = crypto_ahash_reqtfm(req)->final(req);
394 
395 out:
396 	ahash_def_finup_finish2(req, err);
397 	return err;
398 }
399 
400 static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
401 {
402 	struct ahash_request *areq = req->data;
403 
404 	err = ahash_def_finup_finish1(areq, err);
405 
406 	areq->base.complete(&areq->base, err);
407 }
408 
409 static int ahash_def_finup(struct ahash_request *req)
410 {
411 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
412 	int err;
413 
414 	err = ahash_save_req(req, ahash_def_finup_done1);
415 	if (err)
416 		return err;
417 
418 	err = tfm->update(req);
419 	return ahash_def_finup_finish1(req, err);
420 }
421 
422 static int ahash_no_export(struct ahash_request *req, void *out)
423 {
424 	return -ENOSYS;
425 }
426 
427 static int ahash_no_import(struct ahash_request *req, const void *in)
428 {
429 	return -ENOSYS;
430 }
431 
432 static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
433 {
434 	struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
435 	struct ahash_alg *alg = crypto_ahash_alg(hash);
436 
437 	hash->setkey = ahash_nosetkey;
438 	hash->has_setkey = false;
439 	hash->export = ahash_no_export;
440 	hash->import = ahash_no_import;
441 
442 	if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
443 		return crypto_init_shash_ops_async(tfm);
444 
445 	hash->init = alg->init;
446 	hash->update = alg->update;
447 	hash->final = alg->final;
448 	hash->finup = alg->finup ?: ahash_def_finup;
449 	hash->digest = alg->digest;
450 
451 	if (alg->setkey) {
452 		hash->setkey = alg->setkey;
453 		hash->has_setkey = true;
454 	}
455 	if (alg->export)
456 		hash->export = alg->export;
457 	if (alg->import)
458 		hash->import = alg->import;
459 
460 	return 0;
461 }
462 
463 static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
464 {
465 	if (alg->cra_type != &crypto_ahash_type)
466 		return sizeof(struct crypto_shash *);
467 
468 	return crypto_alg_extsize(alg);
469 }
470 
471 #ifdef CONFIG_NET
472 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
473 {
474 	struct crypto_report_hash rhash;
475 
476 	strncpy(rhash.type, "ahash", sizeof(rhash.type));
477 
478 	rhash.blocksize = alg->cra_blocksize;
479 	rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
480 
481 	if (nla_put(skb, CRYPTOCFGA_REPORT_HASH,
482 		    sizeof(struct crypto_report_hash), &rhash))
483 		goto nla_put_failure;
484 	return 0;
485 
486 nla_put_failure:
487 	return -EMSGSIZE;
488 }
489 #else
490 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
491 {
492 	return -ENOSYS;
493 }
494 #endif
495 
496 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
497 	__maybe_unused;
498 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
499 {
500 	seq_printf(m, "type         : ahash\n");
501 	seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
502 					     "yes" : "no");
503 	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
504 	seq_printf(m, "digestsize   : %u\n",
505 		   __crypto_hash_alg_common(alg)->digestsize);
506 }
507 
508 const struct crypto_type crypto_ahash_type = {
509 	.extsize = crypto_ahash_extsize,
510 	.init_tfm = crypto_ahash_init_tfm,
511 #ifdef CONFIG_PROC_FS
512 	.show = crypto_ahash_show,
513 #endif
514 	.report = crypto_ahash_report,
515 	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
516 	.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
517 	.type = CRYPTO_ALG_TYPE_AHASH,
518 	.tfmsize = offsetof(struct crypto_ahash, base),
519 };
520 EXPORT_SYMBOL_GPL(crypto_ahash_type);
521 
522 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
523 					u32 mask)
524 {
525 	return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
526 }
527 EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
528 
529 int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
530 {
531 	return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
532 }
533 EXPORT_SYMBOL_GPL(crypto_has_ahash);
534 
535 static int ahash_prepare_alg(struct ahash_alg *alg)
536 {
537 	struct crypto_alg *base = &alg->halg.base;
538 
539 	if (alg->halg.digestsize > PAGE_SIZE / 8 ||
540 	    alg->halg.statesize > PAGE_SIZE / 8 ||
541 	    alg->halg.statesize == 0)
542 		return -EINVAL;
543 
544 	base->cra_type = &crypto_ahash_type;
545 	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
546 	base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
547 
548 	return 0;
549 }
550 
551 int crypto_register_ahash(struct ahash_alg *alg)
552 {
553 	struct crypto_alg *base = &alg->halg.base;
554 	int err;
555 
556 	err = ahash_prepare_alg(alg);
557 	if (err)
558 		return err;
559 
560 	return crypto_register_alg(base);
561 }
562 EXPORT_SYMBOL_GPL(crypto_register_ahash);
563 
564 int crypto_unregister_ahash(struct ahash_alg *alg)
565 {
566 	return crypto_unregister_alg(&alg->halg.base);
567 }
568 EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
569 
570 int ahash_register_instance(struct crypto_template *tmpl,
571 			    struct ahash_instance *inst)
572 {
573 	int err;
574 
575 	err = ahash_prepare_alg(&inst->alg);
576 	if (err)
577 		return err;
578 
579 	return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
580 }
581 EXPORT_SYMBOL_GPL(ahash_register_instance);
582 
583 void ahash_free_instance(struct crypto_instance *inst)
584 {
585 	crypto_drop_spawn(crypto_instance_ctx(inst));
586 	kfree(ahash_instance(inst));
587 }
588 EXPORT_SYMBOL_GPL(ahash_free_instance);
589 
590 int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
591 			    struct hash_alg_common *alg,
592 			    struct crypto_instance *inst)
593 {
594 	return crypto_init_spawn2(&spawn->base, &alg->base, inst,
595 				  &crypto_ahash_type);
596 }
597 EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
598 
599 struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
600 {
601 	struct crypto_alg *alg;
602 
603 	alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
604 	return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
605 }
606 EXPORT_SYMBOL_GPL(ahash_attr_alg);
607 
608 MODULE_LICENSE("GPL");
609 MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
610