xref: /openbmc/linux/crypto/ahash.c (revision 56d06fa2)
1 /*
2  * Asynchronous Cryptographic Hash operations.
3  *
4  * This is the asynchronous version of hash.c with notification of
5  * completion via a callback.
6  *
7  * Copyright (c) 2008 Loc Ho <lho@amcc.com>
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU General Public License as published by the Free
11  * Software Foundation; either version 2 of the License, or (at your option)
12  * any later version.
13  *
14  */
15 
16 #include <crypto/internal/hash.h>
17 #include <crypto/scatterwalk.h>
18 #include <linux/bug.h>
19 #include <linux/err.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/sched.h>
23 #include <linux/slab.h>
24 #include <linux/seq_file.h>
25 #include <linux/cryptouser.h>
26 #include <net/netlink.h>
27 
28 #include "internal.h"
29 
30 struct ahash_request_priv {
31 	crypto_completion_t complete;
32 	void *data;
33 	u8 *result;
34 	void *ubuf[] CRYPTO_MINALIGN_ATTR;
35 };
36 
37 static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
38 {
39 	return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
40 			    halg);
41 }
42 
43 static int hash_walk_next(struct crypto_hash_walk *walk)
44 {
45 	unsigned int alignmask = walk->alignmask;
46 	unsigned int offset = walk->offset;
47 	unsigned int nbytes = min(walk->entrylen,
48 				  ((unsigned int)(PAGE_SIZE)) - offset);
49 
50 	if (walk->flags & CRYPTO_ALG_ASYNC)
51 		walk->data = kmap(walk->pg);
52 	else
53 		walk->data = kmap_atomic(walk->pg);
54 	walk->data += offset;
55 
56 	if (offset & alignmask) {
57 		unsigned int unaligned = alignmask + 1 - (offset & alignmask);
58 
59 		if (nbytes > unaligned)
60 			nbytes = unaligned;
61 	}
62 
63 	walk->entrylen -= nbytes;
64 	return nbytes;
65 }
66 
67 static int hash_walk_new_entry(struct crypto_hash_walk *walk)
68 {
69 	struct scatterlist *sg;
70 
71 	sg = walk->sg;
72 	walk->pg = sg_page(sg);
73 	walk->offset = sg->offset;
74 	walk->entrylen = sg->length;
75 
76 	if (walk->entrylen > walk->total)
77 		walk->entrylen = walk->total;
78 	walk->total -= walk->entrylen;
79 
80 	return hash_walk_next(walk);
81 }
82 
83 int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
84 {
85 	unsigned int alignmask = walk->alignmask;
86 	unsigned int nbytes = walk->entrylen;
87 
88 	walk->data -= walk->offset;
89 
90 	if (nbytes && walk->offset & alignmask && !err) {
91 		walk->offset = ALIGN(walk->offset, alignmask + 1);
92 		walk->data += walk->offset;
93 
94 		nbytes = min(nbytes,
95 			     ((unsigned int)(PAGE_SIZE)) - walk->offset);
96 		walk->entrylen -= nbytes;
97 
98 		return nbytes;
99 	}
100 
101 	if (walk->flags & CRYPTO_ALG_ASYNC)
102 		kunmap(walk->pg);
103 	else {
104 		kunmap_atomic(walk->data);
105 		/*
106 		 * The may sleep test only makes sense for sync users.
107 		 * Async users don't need to sleep here anyway.
108 		 */
109 		crypto_yield(walk->flags);
110 	}
111 
112 	if (err)
113 		return err;
114 
115 	if (nbytes) {
116 		walk->offset = 0;
117 		walk->pg++;
118 		return hash_walk_next(walk);
119 	}
120 
121 	if (!walk->total)
122 		return 0;
123 
124 	walk->sg = sg_next(walk->sg);
125 
126 	return hash_walk_new_entry(walk);
127 }
128 EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
129 
130 int crypto_hash_walk_first(struct ahash_request *req,
131 			   struct crypto_hash_walk *walk)
132 {
133 	walk->total = req->nbytes;
134 
135 	if (!walk->total) {
136 		walk->entrylen = 0;
137 		return 0;
138 	}
139 
140 	walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
141 	walk->sg = req->src;
142 	walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
143 
144 	return hash_walk_new_entry(walk);
145 }
146 EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
147 
148 int crypto_ahash_walk_first(struct ahash_request *req,
149 			    struct crypto_hash_walk *walk)
150 {
151 	walk->total = req->nbytes;
152 
153 	if (!walk->total) {
154 		walk->entrylen = 0;
155 		return 0;
156 	}
157 
158 	walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
159 	walk->sg = req->src;
160 	walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
161 	walk->flags |= CRYPTO_ALG_ASYNC;
162 
163 	BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC);
164 
165 	return hash_walk_new_entry(walk);
166 }
167 EXPORT_SYMBOL_GPL(crypto_ahash_walk_first);
168 
169 static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
170 				unsigned int keylen)
171 {
172 	unsigned long alignmask = crypto_ahash_alignmask(tfm);
173 	int ret;
174 	u8 *buffer, *alignbuffer;
175 	unsigned long absize;
176 
177 	absize = keylen + alignmask;
178 	buffer = kmalloc(absize, GFP_KERNEL);
179 	if (!buffer)
180 		return -ENOMEM;
181 
182 	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
183 	memcpy(alignbuffer, key, keylen);
184 	ret = tfm->setkey(tfm, alignbuffer, keylen);
185 	kzfree(buffer);
186 	return ret;
187 }
188 
189 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
190 			unsigned int keylen)
191 {
192 	unsigned long alignmask = crypto_ahash_alignmask(tfm);
193 
194 	if ((unsigned long)key & alignmask)
195 		return ahash_setkey_unaligned(tfm, key, keylen);
196 
197 	return tfm->setkey(tfm, key, keylen);
198 }
199 EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
200 
201 static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
202 			  unsigned int keylen)
203 {
204 	return -ENOSYS;
205 }
206 
207 static inline unsigned int ahash_align_buffer_size(unsigned len,
208 						   unsigned long mask)
209 {
210 	return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
211 }
212 
213 static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
214 {
215 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
216 	unsigned long alignmask = crypto_ahash_alignmask(tfm);
217 	unsigned int ds = crypto_ahash_digestsize(tfm);
218 	struct ahash_request_priv *priv;
219 
220 	priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
221 		       (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
222 		       GFP_KERNEL : GFP_ATOMIC);
223 	if (!priv)
224 		return -ENOMEM;
225 
226 	/*
227 	 * WARNING: Voodoo programming below!
228 	 *
229 	 * The code below is obscure and hard to understand, thus explanation
230 	 * is necessary. See include/crypto/hash.h and include/linux/crypto.h
231 	 * to understand the layout of structures used here!
232 	 *
233 	 * The code here will replace portions of the ORIGINAL request with
234 	 * pointers to new code and buffers so the hashing operation can store
235 	 * the result in aligned buffer. We will call the modified request
236 	 * an ADJUSTED request.
237 	 *
238 	 * The newly mangled request will look as such:
239 	 *
240 	 * req {
241 	 *   .result        = ADJUSTED[new aligned buffer]
242 	 *   .base.complete = ADJUSTED[pointer to completion function]
243 	 *   .base.data     = ADJUSTED[*req (pointer to self)]
244 	 *   .priv          = ADJUSTED[new priv] {
245 	 *           .result   = ORIGINAL(result)
246 	 *           .complete = ORIGINAL(base.complete)
247 	 *           .data     = ORIGINAL(base.data)
248 	 *   }
249 	 */
250 
251 	priv->result = req->result;
252 	priv->complete = req->base.complete;
253 	priv->data = req->base.data;
254 	/*
255 	 * WARNING: We do not backup req->priv here! The req->priv
256 	 *          is for internal use of the Crypto API and the
257 	 *          user must _NOT_ _EVER_ depend on it's content!
258 	 */
259 
260 	req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
261 	req->base.complete = cplt;
262 	req->base.data = req;
263 	req->priv = priv;
264 
265 	return 0;
266 }
267 
268 static void ahash_restore_req(struct ahash_request *req)
269 {
270 	struct ahash_request_priv *priv = req->priv;
271 
272 	/* Restore the original crypto request. */
273 	req->result = priv->result;
274 	req->base.complete = priv->complete;
275 	req->base.data = priv->data;
276 	req->priv = NULL;
277 
278 	/* Free the req->priv.priv from the ADJUSTED request. */
279 	kzfree(priv);
280 }
281 
282 static void ahash_op_unaligned_finish(struct ahash_request *req, int err)
283 {
284 	struct ahash_request_priv *priv = req->priv;
285 
286 	if (err == -EINPROGRESS)
287 		return;
288 
289 	if (!err)
290 		memcpy(priv->result, req->result,
291 		       crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
292 
293 	ahash_restore_req(req);
294 }
295 
296 static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
297 {
298 	struct ahash_request *areq = req->data;
299 
300 	/*
301 	 * Restore the original request, see ahash_op_unaligned() for what
302 	 * goes where.
303 	 *
304 	 * The "struct ahash_request *req" here is in fact the "req.base"
305 	 * from the ADJUSTED request from ahash_op_unaligned(), thus as it
306 	 * is a pointer to self, it is also the ADJUSTED "req" .
307 	 */
308 
309 	/* First copy req->result into req->priv.result */
310 	ahash_op_unaligned_finish(areq, err);
311 
312 	/* Complete the ORIGINAL request. */
313 	areq->base.complete(&areq->base, err);
314 }
315 
316 static int ahash_op_unaligned(struct ahash_request *req,
317 			      int (*op)(struct ahash_request *))
318 {
319 	int err;
320 
321 	err = ahash_save_req(req, ahash_op_unaligned_done);
322 	if (err)
323 		return err;
324 
325 	err = op(req);
326 	ahash_op_unaligned_finish(req, err);
327 
328 	return err;
329 }
330 
331 static int crypto_ahash_op(struct ahash_request *req,
332 			   int (*op)(struct ahash_request *))
333 {
334 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
335 	unsigned long alignmask = crypto_ahash_alignmask(tfm);
336 
337 	if ((unsigned long)req->result & alignmask)
338 		return ahash_op_unaligned(req, op);
339 
340 	return op(req);
341 }
342 
343 int crypto_ahash_final(struct ahash_request *req)
344 {
345 	return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
346 }
347 EXPORT_SYMBOL_GPL(crypto_ahash_final);
348 
349 int crypto_ahash_finup(struct ahash_request *req)
350 {
351 	return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
352 }
353 EXPORT_SYMBOL_GPL(crypto_ahash_finup);
354 
355 int crypto_ahash_digest(struct ahash_request *req)
356 {
357 	return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest);
358 }
359 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
360 
361 static void ahash_def_finup_finish2(struct ahash_request *req, int err)
362 {
363 	struct ahash_request_priv *priv = req->priv;
364 
365 	if (err == -EINPROGRESS)
366 		return;
367 
368 	if (!err)
369 		memcpy(priv->result, req->result,
370 		       crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
371 
372 	ahash_restore_req(req);
373 }
374 
375 static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
376 {
377 	struct ahash_request *areq = req->data;
378 
379 	ahash_def_finup_finish2(areq, err);
380 
381 	areq->base.complete(&areq->base, err);
382 }
383 
384 static int ahash_def_finup_finish1(struct ahash_request *req, int err)
385 {
386 	if (err)
387 		goto out;
388 
389 	req->base.complete = ahash_def_finup_done2;
390 	req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
391 	err = crypto_ahash_reqtfm(req)->final(req);
392 
393 out:
394 	ahash_def_finup_finish2(req, err);
395 	return err;
396 }
397 
398 static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
399 {
400 	struct ahash_request *areq = req->data;
401 
402 	err = ahash_def_finup_finish1(areq, err);
403 
404 	areq->base.complete(&areq->base, err);
405 }
406 
407 static int ahash_def_finup(struct ahash_request *req)
408 {
409 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
410 	int err;
411 
412 	err = ahash_save_req(req, ahash_def_finup_done1);
413 	if (err)
414 		return err;
415 
416 	err = tfm->update(req);
417 	return ahash_def_finup_finish1(req, err);
418 }
419 
420 static int ahash_no_export(struct ahash_request *req, void *out)
421 {
422 	return -ENOSYS;
423 }
424 
425 static int ahash_no_import(struct ahash_request *req, const void *in)
426 {
427 	return -ENOSYS;
428 }
429 
430 static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
431 {
432 	struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
433 	struct ahash_alg *alg = crypto_ahash_alg(hash);
434 
435 	hash->setkey = ahash_nosetkey;
436 	hash->has_setkey = false;
437 	hash->export = ahash_no_export;
438 	hash->import = ahash_no_import;
439 
440 	if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
441 		return crypto_init_shash_ops_async(tfm);
442 
443 	hash->init = alg->init;
444 	hash->update = alg->update;
445 	hash->final = alg->final;
446 	hash->finup = alg->finup ?: ahash_def_finup;
447 	hash->digest = alg->digest;
448 
449 	if (alg->setkey) {
450 		hash->setkey = alg->setkey;
451 		hash->has_setkey = true;
452 	}
453 	if (alg->export)
454 		hash->export = alg->export;
455 	if (alg->import)
456 		hash->import = alg->import;
457 
458 	return 0;
459 }
460 
461 static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
462 {
463 	if (alg->cra_type == &crypto_ahash_type)
464 		return alg->cra_ctxsize;
465 
466 	return sizeof(struct crypto_shash *);
467 }
468 
469 #ifdef CONFIG_NET
470 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
471 {
472 	struct crypto_report_hash rhash;
473 
474 	strncpy(rhash.type, "ahash", sizeof(rhash.type));
475 
476 	rhash.blocksize = alg->cra_blocksize;
477 	rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
478 
479 	if (nla_put(skb, CRYPTOCFGA_REPORT_HASH,
480 		    sizeof(struct crypto_report_hash), &rhash))
481 		goto nla_put_failure;
482 	return 0;
483 
484 nla_put_failure:
485 	return -EMSGSIZE;
486 }
487 #else
488 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
489 {
490 	return -ENOSYS;
491 }
492 #endif
493 
494 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
495 	__attribute__ ((unused));
496 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
497 {
498 	seq_printf(m, "type         : ahash\n");
499 	seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
500 					     "yes" : "no");
501 	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
502 	seq_printf(m, "digestsize   : %u\n",
503 		   __crypto_hash_alg_common(alg)->digestsize);
504 }
505 
506 const struct crypto_type crypto_ahash_type = {
507 	.extsize = crypto_ahash_extsize,
508 	.init_tfm = crypto_ahash_init_tfm,
509 #ifdef CONFIG_PROC_FS
510 	.show = crypto_ahash_show,
511 #endif
512 	.report = crypto_ahash_report,
513 	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
514 	.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
515 	.type = CRYPTO_ALG_TYPE_AHASH,
516 	.tfmsize = offsetof(struct crypto_ahash, base),
517 };
518 EXPORT_SYMBOL_GPL(crypto_ahash_type);
519 
520 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
521 					u32 mask)
522 {
523 	return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
524 }
525 EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
526 
527 int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
528 {
529 	return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
530 }
531 EXPORT_SYMBOL_GPL(crypto_has_ahash);
532 
533 static int ahash_prepare_alg(struct ahash_alg *alg)
534 {
535 	struct crypto_alg *base = &alg->halg.base;
536 
537 	if (alg->halg.digestsize > PAGE_SIZE / 8 ||
538 	    alg->halg.statesize > PAGE_SIZE / 8 ||
539 	    alg->halg.statesize == 0)
540 		return -EINVAL;
541 
542 	base->cra_type = &crypto_ahash_type;
543 	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
544 	base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
545 
546 	return 0;
547 }
548 
549 int crypto_register_ahash(struct ahash_alg *alg)
550 {
551 	struct crypto_alg *base = &alg->halg.base;
552 	int err;
553 
554 	err = ahash_prepare_alg(alg);
555 	if (err)
556 		return err;
557 
558 	return crypto_register_alg(base);
559 }
560 EXPORT_SYMBOL_GPL(crypto_register_ahash);
561 
562 int crypto_unregister_ahash(struct ahash_alg *alg)
563 {
564 	return crypto_unregister_alg(&alg->halg.base);
565 }
566 EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
567 
568 int ahash_register_instance(struct crypto_template *tmpl,
569 			    struct ahash_instance *inst)
570 {
571 	int err;
572 
573 	err = ahash_prepare_alg(&inst->alg);
574 	if (err)
575 		return err;
576 
577 	return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
578 }
579 EXPORT_SYMBOL_GPL(ahash_register_instance);
580 
581 void ahash_free_instance(struct crypto_instance *inst)
582 {
583 	crypto_drop_spawn(crypto_instance_ctx(inst));
584 	kfree(ahash_instance(inst));
585 }
586 EXPORT_SYMBOL_GPL(ahash_free_instance);
587 
588 int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
589 			    struct hash_alg_common *alg,
590 			    struct crypto_instance *inst)
591 {
592 	return crypto_init_spawn2(&spawn->base, &alg->base, inst,
593 				  &crypto_ahash_type);
594 }
595 EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
596 
597 struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
598 {
599 	struct crypto_alg *alg;
600 
601 	alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
602 	return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
603 }
604 EXPORT_SYMBOL_GPL(ahash_attr_alg);
605 
606 MODULE_LICENSE("GPL");
607 MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
608