xref: /openbmc/linux/crypto/ahash.c (revision 9d56dd3b083a3bec56e9da35ce07baca81030b03)
1 /*
2  * Asynchronous Cryptographic Hash operations.
3  *
4  * This is the asynchronous version of hash.c with notification of
5  * completion via a callback.
6  *
7  * Copyright (c) 2008 Loc Ho <lho@amcc.com>
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU General Public License as published by the Free
11  * Software Foundation; either version 2 of the License, or (at your option)
12  * any later version.
13  *
14  */
15 
16 #include <crypto/internal/hash.h>
17 #include <crypto/scatterwalk.h>
18 #include <linux/err.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/seq_file.h>
24 
25 #include "internal.h"
26 
27 struct ahash_request_priv {
28 	crypto_completion_t complete;
29 	void *data;
30 	u8 *result;
31 	void *ubuf[] CRYPTO_MINALIGN_ATTR;
32 };
33 
34 static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
35 {
36 	return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
37 			    halg);
38 }
39 
40 static int hash_walk_next(struct crypto_hash_walk *walk)
41 {
42 	unsigned int alignmask = walk->alignmask;
43 	unsigned int offset = walk->offset;
44 	unsigned int nbytes = min(walk->entrylen,
45 				  ((unsigned int)(PAGE_SIZE)) - offset);
46 
47 	walk->data = crypto_kmap(walk->pg, 0);
48 	walk->data += offset;
49 
50 	if (offset & alignmask)
51 		nbytes = alignmask + 1 - (offset & alignmask);
52 
53 	walk->entrylen -= nbytes;
54 	return nbytes;
55 }
56 
57 static int hash_walk_new_entry(struct crypto_hash_walk *walk)
58 {
59 	struct scatterlist *sg;
60 
61 	sg = walk->sg;
62 	walk->pg = sg_page(sg);
63 	walk->offset = sg->offset;
64 	walk->entrylen = sg->length;
65 
66 	if (walk->entrylen > walk->total)
67 		walk->entrylen = walk->total;
68 	walk->total -= walk->entrylen;
69 
70 	return hash_walk_next(walk);
71 }
72 
73 int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
74 {
75 	unsigned int alignmask = walk->alignmask;
76 	unsigned int nbytes = walk->entrylen;
77 
78 	walk->data -= walk->offset;
79 
80 	if (nbytes && walk->offset & alignmask && !err) {
81 		walk->offset += alignmask - 1;
82 		walk->offset = ALIGN(walk->offset, alignmask + 1);
83 		walk->data += walk->offset;
84 
85 		nbytes = min(nbytes,
86 			     ((unsigned int)(PAGE_SIZE)) - walk->offset);
87 		walk->entrylen -= nbytes;
88 
89 		return nbytes;
90 	}
91 
92 	crypto_kunmap(walk->data, 0);
93 	crypto_yield(walk->flags);
94 
95 	if (err)
96 		return err;
97 
98 	if (nbytes) {
99 		walk->offset = 0;
100 		walk->pg++;
101 		return hash_walk_next(walk);
102 	}
103 
104 	if (!walk->total)
105 		return 0;
106 
107 	walk->sg = scatterwalk_sg_next(walk->sg);
108 
109 	return hash_walk_new_entry(walk);
110 }
111 EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
112 
113 int crypto_hash_walk_first(struct ahash_request *req,
114 			   struct crypto_hash_walk *walk)
115 {
116 	walk->total = req->nbytes;
117 
118 	if (!walk->total)
119 		return 0;
120 
121 	walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
122 	walk->sg = req->src;
123 	walk->flags = req->base.flags;
124 
125 	return hash_walk_new_entry(walk);
126 }
127 EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
128 
129 int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
130 				  struct crypto_hash_walk *walk,
131 				  struct scatterlist *sg, unsigned int len)
132 {
133 	walk->total = len;
134 
135 	if (!walk->total)
136 		return 0;
137 
138 	walk->alignmask = crypto_hash_alignmask(hdesc->tfm);
139 	walk->sg = sg;
140 	walk->flags = hdesc->flags;
141 
142 	return hash_walk_new_entry(walk);
143 }
144 
145 static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
146 				unsigned int keylen)
147 {
148 	unsigned long alignmask = crypto_ahash_alignmask(tfm);
149 	int ret;
150 	u8 *buffer, *alignbuffer;
151 	unsigned long absize;
152 
153 	absize = keylen + alignmask;
154 	buffer = kmalloc(absize, GFP_KERNEL);
155 	if (!buffer)
156 		return -ENOMEM;
157 
158 	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
159 	memcpy(alignbuffer, key, keylen);
160 	ret = tfm->setkey(tfm, alignbuffer, keylen);
161 	kzfree(buffer);
162 	return ret;
163 }
164 
165 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
166 			unsigned int keylen)
167 {
168 	unsigned long alignmask = crypto_ahash_alignmask(tfm);
169 
170 	if ((unsigned long)key & alignmask)
171 		return ahash_setkey_unaligned(tfm, key, keylen);
172 
173 	return tfm->setkey(tfm, key, keylen);
174 }
175 EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
176 
177 static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
178 			  unsigned int keylen)
179 {
180 	return -ENOSYS;
181 }
182 
183 static inline unsigned int ahash_align_buffer_size(unsigned len,
184 						   unsigned long mask)
185 {
186 	return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
187 }
188 
189 static void ahash_op_unaligned_finish(struct ahash_request *req, int err)
190 {
191 	struct ahash_request_priv *priv = req->priv;
192 
193 	if (err == -EINPROGRESS)
194 		return;
195 
196 	if (!err)
197 		memcpy(priv->result, req->result,
198 		       crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
199 
200 	kzfree(priv);
201 }
202 
203 static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
204 {
205 	struct ahash_request *areq = req->data;
206 	struct ahash_request_priv *priv = areq->priv;
207 	crypto_completion_t complete = priv->complete;
208 	void *data = priv->data;
209 
210 	ahash_op_unaligned_finish(areq, err);
211 
212 	complete(data, err);
213 }
214 
215 static int ahash_op_unaligned(struct ahash_request *req,
216 			      int (*op)(struct ahash_request *))
217 {
218 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
219 	unsigned long alignmask = crypto_ahash_alignmask(tfm);
220 	unsigned int ds = crypto_ahash_digestsize(tfm);
221 	struct ahash_request_priv *priv;
222 	int err;
223 
224 	priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
225 		       (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
226 		       GFP_KERNEL : GFP_ATOMIC);
227 	if (!priv)
228 		return -ENOMEM;
229 
230 	priv->result = req->result;
231 	priv->complete = req->base.complete;
232 	priv->data = req->base.data;
233 
234 	req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
235 	req->base.complete = ahash_op_unaligned_done;
236 	req->base.data = req;
237 	req->priv = priv;
238 
239 	err = op(req);
240 	ahash_op_unaligned_finish(req, err);
241 
242 	return err;
243 }
244 
245 static int crypto_ahash_op(struct ahash_request *req,
246 			   int (*op)(struct ahash_request *))
247 {
248 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
249 	unsigned long alignmask = crypto_ahash_alignmask(tfm);
250 
251 	if ((unsigned long)req->result & alignmask)
252 		return ahash_op_unaligned(req, op);
253 
254 	return op(req);
255 }
256 
257 int crypto_ahash_final(struct ahash_request *req)
258 {
259 	return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
260 }
261 EXPORT_SYMBOL_GPL(crypto_ahash_final);
262 
263 int crypto_ahash_finup(struct ahash_request *req)
264 {
265 	return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
266 }
267 EXPORT_SYMBOL_GPL(crypto_ahash_finup);
268 
269 int crypto_ahash_digest(struct ahash_request *req)
270 {
271 	return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest);
272 }
273 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
274 
275 static void ahash_def_finup_finish2(struct ahash_request *req, int err)
276 {
277 	struct ahash_request_priv *priv = req->priv;
278 
279 	if (err == -EINPROGRESS)
280 		return;
281 
282 	if (!err)
283 		memcpy(priv->result, req->result,
284 		       crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
285 
286 	kzfree(priv);
287 }
288 
289 static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
290 {
291 	struct ahash_request *areq = req->data;
292 	struct ahash_request_priv *priv = areq->priv;
293 	crypto_completion_t complete = priv->complete;
294 	void *data = priv->data;
295 
296 	ahash_def_finup_finish2(areq, err);
297 
298 	complete(data, err);
299 }
300 
301 static int ahash_def_finup_finish1(struct ahash_request *req, int err)
302 {
303 	if (err)
304 		goto out;
305 
306 	req->base.complete = ahash_def_finup_done2;
307 	req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
308 	err = crypto_ahash_reqtfm(req)->final(req);
309 
310 out:
311 	ahash_def_finup_finish2(req, err);
312 	return err;
313 }
314 
315 static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
316 {
317 	struct ahash_request *areq = req->data;
318 	struct ahash_request_priv *priv = areq->priv;
319 	crypto_completion_t complete = priv->complete;
320 	void *data = priv->data;
321 
322 	err = ahash_def_finup_finish1(areq, err);
323 
324 	complete(data, err);
325 }
326 
327 static int ahash_def_finup(struct ahash_request *req)
328 {
329 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
330 	unsigned long alignmask = crypto_ahash_alignmask(tfm);
331 	unsigned int ds = crypto_ahash_digestsize(tfm);
332 	struct ahash_request_priv *priv;
333 
334 	priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
335 		       (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
336 		       GFP_KERNEL : GFP_ATOMIC);
337 	if (!priv)
338 		return -ENOMEM;
339 
340 	priv->result = req->result;
341 	priv->complete = req->base.complete;
342 	priv->data = req->base.data;
343 
344 	req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
345 	req->base.complete = ahash_def_finup_done1;
346 	req->base.data = req;
347 	req->priv = priv;
348 
349 	return ahash_def_finup_finish1(req, tfm->update(req));
350 }
351 
352 static int ahash_no_export(struct ahash_request *req, void *out)
353 {
354 	return -ENOSYS;
355 }
356 
357 static int ahash_no_import(struct ahash_request *req, const void *in)
358 {
359 	return -ENOSYS;
360 }
361 
362 static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
363 {
364 	struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
365 	struct ahash_alg *alg = crypto_ahash_alg(hash);
366 
367 	hash->setkey = ahash_nosetkey;
368 	hash->export = ahash_no_export;
369 	hash->import = ahash_no_import;
370 
371 	if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
372 		return crypto_init_shash_ops_async(tfm);
373 
374 	hash->init = alg->init;
375 	hash->update = alg->update;
376 	hash->final = alg->final;
377 	hash->finup = alg->finup ?: ahash_def_finup;
378 	hash->digest = alg->digest;
379 
380 	if (alg->setkey)
381 		hash->setkey = alg->setkey;
382 	if (alg->export)
383 		hash->export = alg->export;
384 	if (alg->import)
385 		hash->import = alg->import;
386 
387 	return 0;
388 }
389 
390 static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
391 {
392 	if (alg->cra_type == &crypto_ahash_type)
393 		return alg->cra_ctxsize;
394 
395 	return sizeof(struct crypto_shash *);
396 }
397 
398 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
399 	__attribute__ ((unused));
400 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
401 {
402 	seq_printf(m, "type         : ahash\n");
403 	seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
404 					     "yes" : "no");
405 	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
406 	seq_printf(m, "digestsize   : %u\n",
407 		   __crypto_hash_alg_common(alg)->digestsize);
408 }
409 
410 const struct crypto_type crypto_ahash_type = {
411 	.extsize = crypto_ahash_extsize,
412 	.init_tfm = crypto_ahash_init_tfm,
413 #ifdef CONFIG_PROC_FS
414 	.show = crypto_ahash_show,
415 #endif
416 	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
417 	.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
418 	.type = CRYPTO_ALG_TYPE_AHASH,
419 	.tfmsize = offsetof(struct crypto_ahash, base),
420 };
421 EXPORT_SYMBOL_GPL(crypto_ahash_type);
422 
423 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
424 					u32 mask)
425 {
426 	return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
427 }
428 EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
429 
430 static int ahash_prepare_alg(struct ahash_alg *alg)
431 {
432 	struct crypto_alg *base = &alg->halg.base;
433 
434 	if (alg->halg.digestsize > PAGE_SIZE / 8 ||
435 	    alg->halg.statesize > PAGE_SIZE / 8)
436 		return -EINVAL;
437 
438 	base->cra_type = &crypto_ahash_type;
439 	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
440 	base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
441 
442 	return 0;
443 }
444 
445 int crypto_register_ahash(struct ahash_alg *alg)
446 {
447 	struct crypto_alg *base = &alg->halg.base;
448 	int err;
449 
450 	err = ahash_prepare_alg(alg);
451 	if (err)
452 		return err;
453 
454 	return crypto_register_alg(base);
455 }
456 EXPORT_SYMBOL_GPL(crypto_register_ahash);
457 
458 int crypto_unregister_ahash(struct ahash_alg *alg)
459 {
460 	return crypto_unregister_alg(&alg->halg.base);
461 }
462 EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
463 
464 int ahash_register_instance(struct crypto_template *tmpl,
465 			    struct ahash_instance *inst)
466 {
467 	int err;
468 
469 	err = ahash_prepare_alg(&inst->alg);
470 	if (err)
471 		return err;
472 
473 	return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
474 }
475 EXPORT_SYMBOL_GPL(ahash_register_instance);
476 
477 void ahash_free_instance(struct crypto_instance *inst)
478 {
479 	crypto_drop_spawn(crypto_instance_ctx(inst));
480 	kfree(ahash_instance(inst));
481 }
482 EXPORT_SYMBOL_GPL(ahash_free_instance);
483 
484 int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
485 			    struct hash_alg_common *alg,
486 			    struct crypto_instance *inst)
487 {
488 	return crypto_init_spawn2(&spawn->base, &alg->base, inst,
489 				  &crypto_ahash_type);
490 }
491 EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
492 
493 struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
494 {
495 	struct crypto_alg *alg;
496 
497 	alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
498 	return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
499 }
500 EXPORT_SYMBOL_GPL(ahash_attr_alg);
501 
502 MODULE_LICENSE("GPL");
503 MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
504