xref: /openbmc/linux/crypto/ahash.c (revision 12eb4683)
1 /*
2  * Asynchronous Cryptographic Hash operations.
3  *
4  * This is the asynchronous version of hash.c with notification of
5  * completion via a callback.
6  *
7  * Copyright (c) 2008 Loc Ho <lho@amcc.com>
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU General Public License as published by the Free
11  * Software Foundation; either version 2 of the License, or (at your option)
12  * any later version.
13  *
14  */
15 
16 #include <crypto/internal/hash.h>
17 #include <crypto/scatterwalk.h>
18 #include <linux/err.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/seq_file.h>
24 #include <linux/cryptouser.h>
25 #include <net/netlink.h>
26 
27 #include "internal.h"
28 
29 struct ahash_request_priv {
30 	crypto_completion_t complete;
31 	void *data;
32 	u8 *result;
33 	void *ubuf[] CRYPTO_MINALIGN_ATTR;
34 };
35 
36 static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
37 {
38 	return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
39 			    halg);
40 }
41 
42 static int hash_walk_next(struct crypto_hash_walk *walk)
43 {
44 	unsigned int alignmask = walk->alignmask;
45 	unsigned int offset = walk->offset;
46 	unsigned int nbytes = min(walk->entrylen,
47 				  ((unsigned int)(PAGE_SIZE)) - offset);
48 
49 	walk->data = kmap_atomic(walk->pg);
50 	walk->data += offset;
51 
52 	if (offset & alignmask) {
53 		unsigned int unaligned = alignmask + 1 - (offset & alignmask);
54 		if (nbytes > unaligned)
55 			nbytes = unaligned;
56 	}
57 
58 	walk->entrylen -= nbytes;
59 	return nbytes;
60 }
61 
62 static int hash_walk_new_entry(struct crypto_hash_walk *walk)
63 {
64 	struct scatterlist *sg;
65 
66 	sg = walk->sg;
67 	walk->pg = sg_page(sg);
68 	walk->offset = sg->offset;
69 	walk->entrylen = sg->length;
70 
71 	if (walk->entrylen > walk->total)
72 		walk->entrylen = walk->total;
73 	walk->total -= walk->entrylen;
74 
75 	return hash_walk_next(walk);
76 }
77 
78 int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
79 {
80 	unsigned int alignmask = walk->alignmask;
81 	unsigned int nbytes = walk->entrylen;
82 
83 	walk->data -= walk->offset;
84 
85 	if (nbytes && walk->offset & alignmask && !err) {
86 		walk->offset = ALIGN(walk->offset, alignmask + 1);
87 		walk->data += walk->offset;
88 
89 		nbytes = min(nbytes,
90 			     ((unsigned int)(PAGE_SIZE)) - walk->offset);
91 		walk->entrylen -= nbytes;
92 
93 		return nbytes;
94 	}
95 
96 	kunmap_atomic(walk->data);
97 	crypto_yield(walk->flags);
98 
99 	if (err)
100 		return err;
101 
102 	if (nbytes) {
103 		walk->offset = 0;
104 		walk->pg++;
105 		return hash_walk_next(walk);
106 	}
107 
108 	if (!walk->total)
109 		return 0;
110 
111 	walk->sg = scatterwalk_sg_next(walk->sg);
112 
113 	return hash_walk_new_entry(walk);
114 }
115 EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
116 
117 int crypto_hash_walk_first(struct ahash_request *req,
118 			   struct crypto_hash_walk *walk)
119 {
120 	walk->total = req->nbytes;
121 
122 	if (!walk->total)
123 		return 0;
124 
125 	walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
126 	walk->sg = req->src;
127 	walk->flags = req->base.flags;
128 
129 	return hash_walk_new_entry(walk);
130 }
131 EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
132 
133 int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
134 				  struct crypto_hash_walk *walk,
135 				  struct scatterlist *sg, unsigned int len)
136 {
137 	walk->total = len;
138 
139 	if (!walk->total)
140 		return 0;
141 
142 	walk->alignmask = crypto_hash_alignmask(hdesc->tfm);
143 	walk->sg = sg;
144 	walk->flags = hdesc->flags;
145 
146 	return hash_walk_new_entry(walk);
147 }
148 
149 static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
150 				unsigned int keylen)
151 {
152 	unsigned long alignmask = crypto_ahash_alignmask(tfm);
153 	int ret;
154 	u8 *buffer, *alignbuffer;
155 	unsigned long absize;
156 
157 	absize = keylen + alignmask;
158 	buffer = kmalloc(absize, GFP_KERNEL);
159 	if (!buffer)
160 		return -ENOMEM;
161 
162 	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
163 	memcpy(alignbuffer, key, keylen);
164 	ret = tfm->setkey(tfm, alignbuffer, keylen);
165 	kzfree(buffer);
166 	return ret;
167 }
168 
169 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
170 			unsigned int keylen)
171 {
172 	unsigned long alignmask = crypto_ahash_alignmask(tfm);
173 
174 	if ((unsigned long)key & alignmask)
175 		return ahash_setkey_unaligned(tfm, key, keylen);
176 
177 	return tfm->setkey(tfm, key, keylen);
178 }
179 EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
180 
181 static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
182 			  unsigned int keylen)
183 {
184 	return -ENOSYS;
185 }
186 
187 static inline unsigned int ahash_align_buffer_size(unsigned len,
188 						   unsigned long mask)
189 {
190 	return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
191 }
192 
193 static void ahash_op_unaligned_finish(struct ahash_request *req, int err)
194 {
195 	struct ahash_request_priv *priv = req->priv;
196 
197 	if (err == -EINPROGRESS)
198 		return;
199 
200 	if (!err)
201 		memcpy(priv->result, req->result,
202 		       crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
203 
204 	kzfree(priv);
205 }
206 
207 static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
208 {
209 	struct ahash_request *areq = req->data;
210 	struct ahash_request_priv *priv = areq->priv;
211 	crypto_completion_t complete = priv->complete;
212 	void *data = priv->data;
213 
214 	ahash_op_unaligned_finish(areq, err);
215 
216 	complete(data, err);
217 }
218 
219 static int ahash_op_unaligned(struct ahash_request *req,
220 			      int (*op)(struct ahash_request *))
221 {
222 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
223 	unsigned long alignmask = crypto_ahash_alignmask(tfm);
224 	unsigned int ds = crypto_ahash_digestsize(tfm);
225 	struct ahash_request_priv *priv;
226 	int err;
227 
228 	priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
229 		       (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
230 		       GFP_KERNEL : GFP_ATOMIC);
231 	if (!priv)
232 		return -ENOMEM;
233 
234 	priv->result = req->result;
235 	priv->complete = req->base.complete;
236 	priv->data = req->base.data;
237 
238 	req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
239 	req->base.complete = ahash_op_unaligned_done;
240 	req->base.data = req;
241 	req->priv = priv;
242 
243 	err = op(req);
244 	ahash_op_unaligned_finish(req, err);
245 
246 	return err;
247 }
248 
249 static int crypto_ahash_op(struct ahash_request *req,
250 			   int (*op)(struct ahash_request *))
251 {
252 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
253 	unsigned long alignmask = crypto_ahash_alignmask(tfm);
254 
255 	if ((unsigned long)req->result & alignmask)
256 		return ahash_op_unaligned(req, op);
257 
258 	return op(req);
259 }
260 
261 int crypto_ahash_final(struct ahash_request *req)
262 {
263 	return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
264 }
265 EXPORT_SYMBOL_GPL(crypto_ahash_final);
266 
267 int crypto_ahash_finup(struct ahash_request *req)
268 {
269 	return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
270 }
271 EXPORT_SYMBOL_GPL(crypto_ahash_finup);
272 
273 int crypto_ahash_digest(struct ahash_request *req)
274 {
275 	return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest);
276 }
277 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
278 
279 static void ahash_def_finup_finish2(struct ahash_request *req, int err)
280 {
281 	struct ahash_request_priv *priv = req->priv;
282 
283 	if (err == -EINPROGRESS)
284 		return;
285 
286 	if (!err)
287 		memcpy(priv->result, req->result,
288 		       crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
289 
290 	kzfree(priv);
291 }
292 
293 static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
294 {
295 	struct ahash_request *areq = req->data;
296 	struct ahash_request_priv *priv = areq->priv;
297 	crypto_completion_t complete = priv->complete;
298 	void *data = priv->data;
299 
300 	ahash_def_finup_finish2(areq, err);
301 
302 	complete(data, err);
303 }
304 
305 static int ahash_def_finup_finish1(struct ahash_request *req, int err)
306 {
307 	if (err)
308 		goto out;
309 
310 	req->base.complete = ahash_def_finup_done2;
311 	req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
312 	err = crypto_ahash_reqtfm(req)->final(req);
313 
314 out:
315 	ahash_def_finup_finish2(req, err);
316 	return err;
317 }
318 
319 static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
320 {
321 	struct ahash_request *areq = req->data;
322 	struct ahash_request_priv *priv = areq->priv;
323 	crypto_completion_t complete = priv->complete;
324 	void *data = priv->data;
325 
326 	err = ahash_def_finup_finish1(areq, err);
327 
328 	complete(data, err);
329 }
330 
331 static int ahash_def_finup(struct ahash_request *req)
332 {
333 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
334 	unsigned long alignmask = crypto_ahash_alignmask(tfm);
335 	unsigned int ds = crypto_ahash_digestsize(tfm);
336 	struct ahash_request_priv *priv;
337 
338 	priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
339 		       (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
340 		       GFP_KERNEL : GFP_ATOMIC);
341 	if (!priv)
342 		return -ENOMEM;
343 
344 	priv->result = req->result;
345 	priv->complete = req->base.complete;
346 	priv->data = req->base.data;
347 
348 	req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
349 	req->base.complete = ahash_def_finup_done1;
350 	req->base.data = req;
351 	req->priv = priv;
352 
353 	return ahash_def_finup_finish1(req, tfm->update(req));
354 }
355 
356 static int ahash_no_export(struct ahash_request *req, void *out)
357 {
358 	return -ENOSYS;
359 }
360 
361 static int ahash_no_import(struct ahash_request *req, const void *in)
362 {
363 	return -ENOSYS;
364 }
365 
366 static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
367 {
368 	struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
369 	struct ahash_alg *alg = crypto_ahash_alg(hash);
370 
371 	hash->setkey = ahash_nosetkey;
372 	hash->export = ahash_no_export;
373 	hash->import = ahash_no_import;
374 
375 	if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
376 		return crypto_init_shash_ops_async(tfm);
377 
378 	hash->init = alg->init;
379 	hash->update = alg->update;
380 	hash->final = alg->final;
381 	hash->finup = alg->finup ?: ahash_def_finup;
382 	hash->digest = alg->digest;
383 
384 	if (alg->setkey)
385 		hash->setkey = alg->setkey;
386 	if (alg->export)
387 		hash->export = alg->export;
388 	if (alg->import)
389 		hash->import = alg->import;
390 
391 	return 0;
392 }
393 
394 static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
395 {
396 	if (alg->cra_type == &crypto_ahash_type)
397 		return alg->cra_ctxsize;
398 
399 	return sizeof(struct crypto_shash *);
400 }
401 
402 #ifdef CONFIG_NET
403 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
404 {
405 	struct crypto_report_hash rhash;
406 
407 	strncpy(rhash.type, "ahash", sizeof(rhash.type));
408 
409 	rhash.blocksize = alg->cra_blocksize;
410 	rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
411 
412 	if (nla_put(skb, CRYPTOCFGA_REPORT_HASH,
413 		    sizeof(struct crypto_report_hash), &rhash))
414 		goto nla_put_failure;
415 	return 0;
416 
417 nla_put_failure:
418 	return -EMSGSIZE;
419 }
420 #else
421 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
422 {
423 	return -ENOSYS;
424 }
425 #endif
426 
427 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
428 	__attribute__ ((unused));
429 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
430 {
431 	seq_printf(m, "type         : ahash\n");
432 	seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
433 					     "yes" : "no");
434 	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
435 	seq_printf(m, "digestsize   : %u\n",
436 		   __crypto_hash_alg_common(alg)->digestsize);
437 }
438 
439 const struct crypto_type crypto_ahash_type = {
440 	.extsize = crypto_ahash_extsize,
441 	.init_tfm = crypto_ahash_init_tfm,
442 #ifdef CONFIG_PROC_FS
443 	.show = crypto_ahash_show,
444 #endif
445 	.report = crypto_ahash_report,
446 	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
447 	.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
448 	.type = CRYPTO_ALG_TYPE_AHASH,
449 	.tfmsize = offsetof(struct crypto_ahash, base),
450 };
451 EXPORT_SYMBOL_GPL(crypto_ahash_type);
452 
453 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
454 					u32 mask)
455 {
456 	return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
457 }
458 EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
459 
460 static int ahash_prepare_alg(struct ahash_alg *alg)
461 {
462 	struct crypto_alg *base = &alg->halg.base;
463 
464 	if (alg->halg.digestsize > PAGE_SIZE / 8 ||
465 	    alg->halg.statesize > PAGE_SIZE / 8)
466 		return -EINVAL;
467 
468 	base->cra_type = &crypto_ahash_type;
469 	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
470 	base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
471 
472 	return 0;
473 }
474 
475 int crypto_register_ahash(struct ahash_alg *alg)
476 {
477 	struct crypto_alg *base = &alg->halg.base;
478 	int err;
479 
480 	err = ahash_prepare_alg(alg);
481 	if (err)
482 		return err;
483 
484 	return crypto_register_alg(base);
485 }
486 EXPORT_SYMBOL_GPL(crypto_register_ahash);
487 
488 int crypto_unregister_ahash(struct ahash_alg *alg)
489 {
490 	return crypto_unregister_alg(&alg->halg.base);
491 }
492 EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
493 
494 int ahash_register_instance(struct crypto_template *tmpl,
495 			    struct ahash_instance *inst)
496 {
497 	int err;
498 
499 	err = ahash_prepare_alg(&inst->alg);
500 	if (err)
501 		return err;
502 
503 	return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
504 }
505 EXPORT_SYMBOL_GPL(ahash_register_instance);
506 
507 void ahash_free_instance(struct crypto_instance *inst)
508 {
509 	crypto_drop_spawn(crypto_instance_ctx(inst));
510 	kfree(ahash_instance(inst));
511 }
512 EXPORT_SYMBOL_GPL(ahash_free_instance);
513 
514 int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
515 			    struct hash_alg_common *alg,
516 			    struct crypto_instance *inst)
517 {
518 	return crypto_init_spawn2(&spawn->base, &alg->base, inst,
519 				  &crypto_ahash_type);
520 }
521 EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
522 
523 struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
524 {
525 	struct crypto_alg *alg;
526 
527 	alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
528 	return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
529 }
530 EXPORT_SYMBOL_GPL(ahash_attr_alg);
531 
532 MODULE_LICENSE("GPL");
533 MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
534