xref: /openbmc/linux/crypto/ahash.c (revision ef0579b6)
1004a403cSLoc Ho /*
2004a403cSLoc Ho  * Asynchronous Cryptographic Hash operations.
3004a403cSLoc Ho  *
4004a403cSLoc Ho  * This is the asynchronous version of hash.c with notification of
5004a403cSLoc Ho  * completion via a callback.
6004a403cSLoc Ho  *
7004a403cSLoc Ho  * Copyright (c) 2008 Loc Ho <lho@amcc.com>
8004a403cSLoc Ho  *
9004a403cSLoc Ho  * This program is free software; you can redistribute it and/or modify it
10004a403cSLoc Ho  * under the terms of the GNU General Public License as published by the Free
11004a403cSLoc Ho  * Software Foundation; either version 2 of the License, or (at your option)
12004a403cSLoc Ho  * any later version.
13004a403cSLoc Ho  *
14004a403cSLoc Ho  */
15004a403cSLoc Ho 
1620036252SHerbert Xu #include <crypto/internal/hash.h>
1720036252SHerbert Xu #include <crypto/scatterwalk.h>
1875ecb231SHerbert Xu #include <linux/bug.h>
19004a403cSLoc Ho #include <linux/err.h>
20004a403cSLoc Ho #include <linux/kernel.h>
21004a403cSLoc Ho #include <linux/module.h>
22004a403cSLoc Ho #include <linux/sched.h>
23004a403cSLoc Ho #include <linux/slab.h>
24004a403cSLoc Ho #include <linux/seq_file.h>
256238cbaeSSteffen Klassert #include <linux/cryptouser.h>
26d8c34b94SGideon Israel Dsouza #include <linux/compiler.h>
276238cbaeSSteffen Klassert #include <net/netlink.h>
28004a403cSLoc Ho 
29004a403cSLoc Ho #include "internal.h"
30004a403cSLoc Ho 
3166f6ce5eSHerbert Xu struct ahash_request_priv {
3266f6ce5eSHerbert Xu 	crypto_completion_t complete;
3366f6ce5eSHerbert Xu 	void *data;
3466f6ce5eSHerbert Xu 	u8 *result;
35ef0579b6SHerbert Xu 	u32 flags;
3666f6ce5eSHerbert Xu 	void *ubuf[] CRYPTO_MINALIGN_ATTR;
3766f6ce5eSHerbert Xu };
3866f6ce5eSHerbert Xu 
3988056ec3SHerbert Xu static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
4088056ec3SHerbert Xu {
4188056ec3SHerbert Xu 	return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
4288056ec3SHerbert Xu 			    halg);
4388056ec3SHerbert Xu }
4488056ec3SHerbert Xu 
4520036252SHerbert Xu static int hash_walk_next(struct crypto_hash_walk *walk)
4620036252SHerbert Xu {
4720036252SHerbert Xu 	unsigned int alignmask = walk->alignmask;
4820036252SHerbert Xu 	unsigned int offset = walk->offset;
4920036252SHerbert Xu 	unsigned int nbytes = min(walk->entrylen,
5020036252SHerbert Xu 				  ((unsigned int)(PAGE_SIZE)) - offset);
5120036252SHerbert Xu 
5275ecb231SHerbert Xu 	if (walk->flags & CRYPTO_ALG_ASYNC)
5375ecb231SHerbert Xu 		walk->data = kmap(walk->pg);
5475ecb231SHerbert Xu 	else
55f0dfc0b0SCong Wang 		walk->data = kmap_atomic(walk->pg);
5620036252SHerbert Xu 	walk->data += offset;
5720036252SHerbert Xu 
5823a75eeeSSzilveszter Ördög 	if (offset & alignmask) {
5923a75eeeSSzilveszter Ördög 		unsigned int unaligned = alignmask + 1 - (offset & alignmask);
60b516d514SJoshua I. James 
6123a75eeeSSzilveszter Ördög 		if (nbytes > unaligned)
6223a75eeeSSzilveszter Ördög 			nbytes = unaligned;
6323a75eeeSSzilveszter Ördög 	}
6420036252SHerbert Xu 
6520036252SHerbert Xu 	walk->entrylen -= nbytes;
6620036252SHerbert Xu 	return nbytes;
6720036252SHerbert Xu }
6820036252SHerbert Xu 
6920036252SHerbert Xu static int hash_walk_new_entry(struct crypto_hash_walk *walk)
7020036252SHerbert Xu {
7120036252SHerbert Xu 	struct scatterlist *sg;
7220036252SHerbert Xu 
7320036252SHerbert Xu 	sg = walk->sg;
7420036252SHerbert Xu 	walk->offset = sg->offset;
7513f4bb78SHerbert Xu 	walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
7613f4bb78SHerbert Xu 	walk->offset = offset_in_page(walk->offset);
7720036252SHerbert Xu 	walk->entrylen = sg->length;
7820036252SHerbert Xu 
7920036252SHerbert Xu 	if (walk->entrylen > walk->total)
8020036252SHerbert Xu 		walk->entrylen = walk->total;
8120036252SHerbert Xu 	walk->total -= walk->entrylen;
8220036252SHerbert Xu 
8320036252SHerbert Xu 	return hash_walk_next(walk);
8420036252SHerbert Xu }
8520036252SHerbert Xu 
8620036252SHerbert Xu int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
8720036252SHerbert Xu {
8820036252SHerbert Xu 	unsigned int alignmask = walk->alignmask;
8920036252SHerbert Xu 	unsigned int nbytes = walk->entrylen;
9020036252SHerbert Xu 
9120036252SHerbert Xu 	walk->data -= walk->offset;
9220036252SHerbert Xu 
9320036252SHerbert Xu 	if (nbytes && walk->offset & alignmask && !err) {
9420036252SHerbert Xu 		walk->offset = ALIGN(walk->offset, alignmask + 1);
9520036252SHerbert Xu 		walk->data += walk->offset;
9620036252SHerbert Xu 
9720036252SHerbert Xu 		nbytes = min(nbytes,
9820036252SHerbert Xu 			     ((unsigned int)(PAGE_SIZE)) - walk->offset);
9920036252SHerbert Xu 		walk->entrylen -= nbytes;
10020036252SHerbert Xu 
10120036252SHerbert Xu 		return nbytes;
10220036252SHerbert Xu 	}
10320036252SHerbert Xu 
10475ecb231SHerbert Xu 	if (walk->flags & CRYPTO_ALG_ASYNC)
10575ecb231SHerbert Xu 		kunmap(walk->pg);
10675ecb231SHerbert Xu 	else {
107f0dfc0b0SCong Wang 		kunmap_atomic(walk->data);
10875ecb231SHerbert Xu 		/*
10975ecb231SHerbert Xu 		 * The may sleep test only makes sense for sync users.
11075ecb231SHerbert Xu 		 * Async users don't need to sleep here anyway.
11175ecb231SHerbert Xu 		 */
11220036252SHerbert Xu 		crypto_yield(walk->flags);
11375ecb231SHerbert Xu 	}
11420036252SHerbert Xu 
11520036252SHerbert Xu 	if (err)
11620036252SHerbert Xu 		return err;
11720036252SHerbert Xu 
118d315a0e0SHerbert Xu 	if (nbytes) {
11920036252SHerbert Xu 		walk->offset = 0;
120d315a0e0SHerbert Xu 		walk->pg++;
12120036252SHerbert Xu 		return hash_walk_next(walk);
122d315a0e0SHerbert Xu 	}
12320036252SHerbert Xu 
12420036252SHerbert Xu 	if (!walk->total)
12520036252SHerbert Xu 		return 0;
12620036252SHerbert Xu 
1275be4d4c9SCristian Stoica 	walk->sg = sg_next(walk->sg);
12820036252SHerbert Xu 
12920036252SHerbert Xu 	return hash_walk_new_entry(walk);
13020036252SHerbert Xu }
13120036252SHerbert Xu EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
13220036252SHerbert Xu 
13320036252SHerbert Xu int crypto_hash_walk_first(struct ahash_request *req,
13420036252SHerbert Xu 			   struct crypto_hash_walk *walk)
13520036252SHerbert Xu {
13620036252SHerbert Xu 	walk->total = req->nbytes;
13720036252SHerbert Xu 
1386d9529c5STim Chen 	if (!walk->total) {
1396d9529c5STim Chen 		walk->entrylen = 0;
14020036252SHerbert Xu 		return 0;
1416d9529c5STim Chen 	}
14220036252SHerbert Xu 
14320036252SHerbert Xu 	walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
14420036252SHerbert Xu 	walk->sg = req->src;
14575ecb231SHerbert Xu 	walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
14620036252SHerbert Xu 
14720036252SHerbert Xu 	return hash_walk_new_entry(walk);
14820036252SHerbert Xu }
14920036252SHerbert Xu EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
15020036252SHerbert Xu 
15175ecb231SHerbert Xu int crypto_ahash_walk_first(struct ahash_request *req,
15275ecb231SHerbert Xu 			    struct crypto_hash_walk *walk)
15375ecb231SHerbert Xu {
15475ecb231SHerbert Xu 	walk->total = req->nbytes;
15575ecb231SHerbert Xu 
1566d9529c5STim Chen 	if (!walk->total) {
1576d9529c5STim Chen 		walk->entrylen = 0;
15875ecb231SHerbert Xu 		return 0;
1596d9529c5STim Chen 	}
16075ecb231SHerbert Xu 
16175ecb231SHerbert Xu 	walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
16275ecb231SHerbert Xu 	walk->sg = req->src;
16375ecb231SHerbert Xu 	walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
16475ecb231SHerbert Xu 	walk->flags |= CRYPTO_ALG_ASYNC;
16575ecb231SHerbert Xu 
16675ecb231SHerbert Xu 	BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC);
16775ecb231SHerbert Xu 
16875ecb231SHerbert Xu 	return hash_walk_new_entry(walk);
16975ecb231SHerbert Xu }
17075ecb231SHerbert Xu EXPORT_SYMBOL_GPL(crypto_ahash_walk_first);
17175ecb231SHerbert Xu 
172004a403cSLoc Ho static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
173004a403cSLoc Ho 				unsigned int keylen)
174004a403cSLoc Ho {
175004a403cSLoc Ho 	unsigned long alignmask = crypto_ahash_alignmask(tfm);
176004a403cSLoc Ho 	int ret;
177004a403cSLoc Ho 	u8 *buffer, *alignbuffer;
178004a403cSLoc Ho 	unsigned long absize;
179004a403cSLoc Ho 
180004a403cSLoc Ho 	absize = keylen + alignmask;
181093900c2SHerbert Xu 	buffer = kmalloc(absize, GFP_KERNEL);
182004a403cSLoc Ho 	if (!buffer)
183004a403cSLoc Ho 		return -ENOMEM;
184004a403cSLoc Ho 
185004a403cSLoc Ho 	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
186004a403cSLoc Ho 	memcpy(alignbuffer, key, keylen);
187a70c5225SHerbert Xu 	ret = tfm->setkey(tfm, alignbuffer, keylen);
1888c32c516SHerbert Xu 	kzfree(buffer);
189004a403cSLoc Ho 	return ret;
190004a403cSLoc Ho }
191004a403cSLoc Ho 
19266f6ce5eSHerbert Xu int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
193004a403cSLoc Ho 			unsigned int keylen)
194004a403cSLoc Ho {
195004a403cSLoc Ho 	unsigned long alignmask = crypto_ahash_alignmask(tfm);
196004a403cSLoc Ho 
197004a403cSLoc Ho 	if ((unsigned long)key & alignmask)
198004a403cSLoc Ho 		return ahash_setkey_unaligned(tfm, key, keylen);
199004a403cSLoc Ho 
200a70c5225SHerbert Xu 	return tfm->setkey(tfm, key, keylen);
201004a403cSLoc Ho }
20266f6ce5eSHerbert Xu EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
203004a403cSLoc Ho 
2043751f402SHerbert Xu static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
2053751f402SHerbert Xu 			  unsigned int keylen)
2063751f402SHerbert Xu {
2073751f402SHerbert Xu 	return -ENOSYS;
2083751f402SHerbert Xu }
2093751f402SHerbert Xu 
21066f6ce5eSHerbert Xu static inline unsigned int ahash_align_buffer_size(unsigned len,
21166f6ce5eSHerbert Xu 						   unsigned long mask)
21266f6ce5eSHerbert Xu {
21366f6ce5eSHerbert Xu 	return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
21466f6ce5eSHerbert Xu }
21566f6ce5eSHerbert Xu 
2161ffc9fbdSMarek Vasut static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
21766f6ce5eSHerbert Xu {
21866f6ce5eSHerbert Xu 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
21966f6ce5eSHerbert Xu 	unsigned long alignmask = crypto_ahash_alignmask(tfm);
22066f6ce5eSHerbert Xu 	unsigned int ds = crypto_ahash_digestsize(tfm);
22166f6ce5eSHerbert Xu 	struct ahash_request_priv *priv;
22266f6ce5eSHerbert Xu 
22366f6ce5eSHerbert Xu 	priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
22466f6ce5eSHerbert Xu 		       (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
2255befbd5aSSteffen Klassert 		       GFP_KERNEL : GFP_ATOMIC);
22666f6ce5eSHerbert Xu 	if (!priv)
22766f6ce5eSHerbert Xu 		return -ENOMEM;
22866f6ce5eSHerbert Xu 
229ab6bf4e5SMarek Vasut 	/*
230ab6bf4e5SMarek Vasut 	 * WARNING: Voodoo programming below!
231ab6bf4e5SMarek Vasut 	 *
232ab6bf4e5SMarek Vasut 	 * The code below is obscure and hard to understand, thus explanation
233ab6bf4e5SMarek Vasut 	 * is necessary. See include/crypto/hash.h and include/linux/crypto.h
234ab6bf4e5SMarek Vasut 	 * to understand the layout of structures used here!
235ab6bf4e5SMarek Vasut 	 *
236ab6bf4e5SMarek Vasut 	 * The code here will replace portions of the ORIGINAL request with
237ab6bf4e5SMarek Vasut 	 * pointers to new code and buffers so the hashing operation can store
238ab6bf4e5SMarek Vasut 	 * the result in aligned buffer. We will call the modified request
239ab6bf4e5SMarek Vasut 	 * an ADJUSTED request.
240ab6bf4e5SMarek Vasut 	 *
241ab6bf4e5SMarek Vasut 	 * The newly mangled request will look as such:
242ab6bf4e5SMarek Vasut 	 *
243ab6bf4e5SMarek Vasut 	 * req {
244ab6bf4e5SMarek Vasut 	 *   .result        = ADJUSTED[new aligned buffer]
245ab6bf4e5SMarek Vasut 	 *   .base.complete = ADJUSTED[pointer to completion function]
246ab6bf4e5SMarek Vasut 	 *   .base.data     = ADJUSTED[*req (pointer to self)]
247ab6bf4e5SMarek Vasut 	 *   .priv          = ADJUSTED[new priv] {
248ab6bf4e5SMarek Vasut 	 *           .result   = ORIGINAL(result)
249ab6bf4e5SMarek Vasut 	 *           .complete = ORIGINAL(base.complete)
250ab6bf4e5SMarek Vasut 	 *           .data     = ORIGINAL(base.data)
251ab6bf4e5SMarek Vasut 	 *   }
252ab6bf4e5SMarek Vasut 	 */
253ab6bf4e5SMarek Vasut 
25466f6ce5eSHerbert Xu 	priv->result = req->result;
25566f6ce5eSHerbert Xu 	priv->complete = req->base.complete;
25666f6ce5eSHerbert Xu 	priv->data = req->base.data;
257ef0579b6SHerbert Xu 	priv->flags = req->base.flags;
258ef0579b6SHerbert Xu 
259ab6bf4e5SMarek Vasut 	/*
260ab6bf4e5SMarek Vasut 	 * WARNING: We do not backup req->priv here! The req->priv
261ab6bf4e5SMarek Vasut 	 *          is for internal use of the Crypto API and the
262ab6bf4e5SMarek Vasut 	 *          user must _NOT_ _EVER_ depend on it's content!
263ab6bf4e5SMarek Vasut 	 */
26466f6ce5eSHerbert Xu 
26566f6ce5eSHerbert Xu 	req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
2661ffc9fbdSMarek Vasut 	req->base.complete = cplt;
26766f6ce5eSHerbert Xu 	req->base.data = req;
26866f6ce5eSHerbert Xu 	req->priv = priv;
26966f6ce5eSHerbert Xu 
2701ffc9fbdSMarek Vasut 	return 0;
2711ffc9fbdSMarek Vasut }
2721ffc9fbdSMarek Vasut 
273ef0579b6SHerbert Xu static void ahash_restore_req(struct ahash_request *req, int err)
2741ffc9fbdSMarek Vasut {
2751ffc9fbdSMarek Vasut 	struct ahash_request_priv *priv = req->priv;
2761ffc9fbdSMarek Vasut 
277ef0579b6SHerbert Xu 	if (!err)
278ef0579b6SHerbert Xu 		memcpy(priv->result, req->result,
279ef0579b6SHerbert Xu 		       crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
280ef0579b6SHerbert Xu 
2811ffc9fbdSMarek Vasut 	/* Restore the original crypto request. */
2821ffc9fbdSMarek Vasut 	req->result = priv->result;
283ef0579b6SHerbert Xu 
284ef0579b6SHerbert Xu 	ahash_request_set_callback(req, priv->flags,
285ef0579b6SHerbert Xu 				   priv->complete, priv->data);
2861ffc9fbdSMarek Vasut 	req->priv = NULL;
2871ffc9fbdSMarek Vasut 
2881ffc9fbdSMarek Vasut 	/* Free the req->priv.priv from the ADJUSTED request. */
2891ffc9fbdSMarek Vasut 	kzfree(priv);
2901ffc9fbdSMarek Vasut }
2911ffc9fbdSMarek Vasut 
292ef0579b6SHerbert Xu static void ahash_notify_einprogress(struct ahash_request *req)
2931ffc9fbdSMarek Vasut {
2941ffc9fbdSMarek Vasut 	struct ahash_request_priv *priv = req->priv;
295ef0579b6SHerbert Xu 	struct crypto_async_request oreq;
2961ffc9fbdSMarek Vasut 
297ef0579b6SHerbert Xu 	oreq.data = priv->data;
2981ffc9fbdSMarek Vasut 
299ef0579b6SHerbert Xu 	priv->complete(&oreq, -EINPROGRESS);
3001ffc9fbdSMarek Vasut }
3011ffc9fbdSMarek Vasut 
3021ffc9fbdSMarek Vasut static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
3031ffc9fbdSMarek Vasut {
3041ffc9fbdSMarek Vasut 	struct ahash_request *areq = req->data;
3051ffc9fbdSMarek Vasut 
306ef0579b6SHerbert Xu 	if (err == -EINPROGRESS) {
307ef0579b6SHerbert Xu 		ahash_notify_einprogress(areq);
308ef0579b6SHerbert Xu 		return;
309ef0579b6SHerbert Xu 	}
310ef0579b6SHerbert Xu 
3111ffc9fbdSMarek Vasut 	/*
3121ffc9fbdSMarek Vasut 	 * Restore the original request, see ahash_op_unaligned() for what
3131ffc9fbdSMarek Vasut 	 * goes where.
3141ffc9fbdSMarek Vasut 	 *
3151ffc9fbdSMarek Vasut 	 * The "struct ahash_request *req" here is in fact the "req.base"
3161ffc9fbdSMarek Vasut 	 * from the ADJUSTED request from ahash_op_unaligned(), thus as it
3171ffc9fbdSMarek Vasut 	 * is a pointer to self, it is also the ADJUSTED "req" .
3181ffc9fbdSMarek Vasut 	 */
3191ffc9fbdSMarek Vasut 
3201ffc9fbdSMarek Vasut 	/* First copy req->result into req->priv.result */
321ef0579b6SHerbert Xu 	ahash_restore_req(areq, err);
3221ffc9fbdSMarek Vasut 
3231ffc9fbdSMarek Vasut 	/* Complete the ORIGINAL request. */
3241ffc9fbdSMarek Vasut 	areq->base.complete(&areq->base, err);
3251ffc9fbdSMarek Vasut }
3261ffc9fbdSMarek Vasut 
3271ffc9fbdSMarek Vasut static int ahash_op_unaligned(struct ahash_request *req,
3281ffc9fbdSMarek Vasut 			      int (*op)(struct ahash_request *))
3291ffc9fbdSMarek Vasut {
3301ffc9fbdSMarek Vasut 	int err;
3311ffc9fbdSMarek Vasut 
3321ffc9fbdSMarek Vasut 	err = ahash_save_req(req, ahash_op_unaligned_done);
3331ffc9fbdSMarek Vasut 	if (err)
3341ffc9fbdSMarek Vasut 		return err;
3351ffc9fbdSMarek Vasut 
33666f6ce5eSHerbert Xu 	err = op(req);
337ef0579b6SHerbert Xu 	if (err == -EINPROGRESS ||
338ef0579b6SHerbert Xu 	    (err == -EBUSY && (ahash_request_flags(req) &
339ef0579b6SHerbert Xu 			       CRYPTO_TFM_REQ_MAY_BACKLOG)))
340ef0579b6SHerbert Xu 		return err;
341ef0579b6SHerbert Xu 
342ef0579b6SHerbert Xu 	ahash_restore_req(req, err);
34366f6ce5eSHerbert Xu 
34466f6ce5eSHerbert Xu 	return err;
34566f6ce5eSHerbert Xu }
34666f6ce5eSHerbert Xu 
34766f6ce5eSHerbert Xu static int crypto_ahash_op(struct ahash_request *req,
34866f6ce5eSHerbert Xu 			   int (*op)(struct ahash_request *))
34966f6ce5eSHerbert Xu {
35066f6ce5eSHerbert Xu 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
35166f6ce5eSHerbert Xu 	unsigned long alignmask = crypto_ahash_alignmask(tfm);
35266f6ce5eSHerbert Xu 
35366f6ce5eSHerbert Xu 	if ((unsigned long)req->result & alignmask)
35466f6ce5eSHerbert Xu 		return ahash_op_unaligned(req, op);
35566f6ce5eSHerbert Xu 
35666f6ce5eSHerbert Xu 	return op(req);
35766f6ce5eSHerbert Xu }
35866f6ce5eSHerbert Xu 
35966f6ce5eSHerbert Xu int crypto_ahash_final(struct ahash_request *req)
36066f6ce5eSHerbert Xu {
36166f6ce5eSHerbert Xu 	return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
36266f6ce5eSHerbert Xu }
36366f6ce5eSHerbert Xu EXPORT_SYMBOL_GPL(crypto_ahash_final);
36466f6ce5eSHerbert Xu 
36566f6ce5eSHerbert Xu int crypto_ahash_finup(struct ahash_request *req)
36666f6ce5eSHerbert Xu {
36766f6ce5eSHerbert Xu 	return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
36866f6ce5eSHerbert Xu }
36966f6ce5eSHerbert Xu EXPORT_SYMBOL_GPL(crypto_ahash_finup);
37066f6ce5eSHerbert Xu 
37166f6ce5eSHerbert Xu int crypto_ahash_digest(struct ahash_request *req)
37266f6ce5eSHerbert Xu {
37366f6ce5eSHerbert Xu 	return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest);
37466f6ce5eSHerbert Xu }
37566f6ce5eSHerbert Xu EXPORT_SYMBOL_GPL(crypto_ahash_digest);
37666f6ce5eSHerbert Xu 
37766f6ce5eSHerbert Xu static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
37866f6ce5eSHerbert Xu {
37966f6ce5eSHerbert Xu 	struct ahash_request *areq = req->data;
38066f6ce5eSHerbert Xu 
381ef0579b6SHerbert Xu 	if (err == -EINPROGRESS)
382ef0579b6SHerbert Xu 		return;
383ef0579b6SHerbert Xu 
384ef0579b6SHerbert Xu 	ahash_restore_req(areq, err);
38566f6ce5eSHerbert Xu 
386d4a7a0fbSMarek Vasut 	areq->base.complete(&areq->base, err);
38766f6ce5eSHerbert Xu }
38866f6ce5eSHerbert Xu 
38966f6ce5eSHerbert Xu static int ahash_def_finup_finish1(struct ahash_request *req, int err)
39066f6ce5eSHerbert Xu {
39166f6ce5eSHerbert Xu 	if (err)
39266f6ce5eSHerbert Xu 		goto out;
39366f6ce5eSHerbert Xu 
39466f6ce5eSHerbert Xu 	req->base.complete = ahash_def_finup_done2;
395ef0579b6SHerbert Xu 
39666f6ce5eSHerbert Xu 	err = crypto_ahash_reqtfm(req)->final(req);
397ef0579b6SHerbert Xu 	if (err == -EINPROGRESS ||
398ef0579b6SHerbert Xu 	    (err == -EBUSY && (ahash_request_flags(req) &
399ef0579b6SHerbert Xu 			       CRYPTO_TFM_REQ_MAY_BACKLOG)))
400ef0579b6SHerbert Xu 		return err;
40166f6ce5eSHerbert Xu 
40266f6ce5eSHerbert Xu out:
403ef0579b6SHerbert Xu 	ahash_restore_req(req, err);
40466f6ce5eSHerbert Xu 	return err;
40566f6ce5eSHerbert Xu }
40666f6ce5eSHerbert Xu 
40766f6ce5eSHerbert Xu static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
40866f6ce5eSHerbert Xu {
40966f6ce5eSHerbert Xu 	struct ahash_request *areq = req->data;
41066f6ce5eSHerbert Xu 
411ef0579b6SHerbert Xu 	if (err == -EINPROGRESS) {
412ef0579b6SHerbert Xu 		ahash_notify_einprogress(areq);
413ef0579b6SHerbert Xu 		return;
414ef0579b6SHerbert Xu 	}
415ef0579b6SHerbert Xu 
416ef0579b6SHerbert Xu 	areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
417ef0579b6SHerbert Xu 
41866f6ce5eSHerbert Xu 	err = ahash_def_finup_finish1(areq, err);
419ef0579b6SHerbert Xu 	if (areq->priv)
420ef0579b6SHerbert Xu 		return;
42166f6ce5eSHerbert Xu 
422d4a7a0fbSMarek Vasut 	areq->base.complete(&areq->base, err);
42366f6ce5eSHerbert Xu }
42466f6ce5eSHerbert Xu 
42566f6ce5eSHerbert Xu static int ahash_def_finup(struct ahash_request *req)
42666f6ce5eSHerbert Xu {
42766f6ce5eSHerbert Xu 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
428d4a7a0fbSMarek Vasut 	int err;
42966f6ce5eSHerbert Xu 
430d4a7a0fbSMarek Vasut 	err = ahash_save_req(req, ahash_def_finup_done1);
431d4a7a0fbSMarek Vasut 	if (err)
432d4a7a0fbSMarek Vasut 		return err;
43366f6ce5eSHerbert Xu 
434d4a7a0fbSMarek Vasut 	err = tfm->update(req);
435ef0579b6SHerbert Xu 	if (err == -EINPROGRESS ||
436ef0579b6SHerbert Xu 	    (err == -EBUSY && (ahash_request_flags(req) &
437ef0579b6SHerbert Xu 			       CRYPTO_TFM_REQ_MAY_BACKLOG)))
438ef0579b6SHerbert Xu 		return err;
439ef0579b6SHerbert Xu 
440d4a7a0fbSMarek Vasut 	return ahash_def_finup_finish1(req, err);
44166f6ce5eSHerbert Xu }
44266f6ce5eSHerbert Xu 
44366f6ce5eSHerbert Xu static int ahash_no_export(struct ahash_request *req, void *out)
44466f6ce5eSHerbert Xu {
44566f6ce5eSHerbert Xu 	return -ENOSYS;
44666f6ce5eSHerbert Xu }
44766f6ce5eSHerbert Xu 
44866f6ce5eSHerbert Xu static int ahash_no_import(struct ahash_request *req, const void *in)
44966f6ce5eSHerbert Xu {
45066f6ce5eSHerbert Xu 	return -ENOSYS;
45166f6ce5eSHerbert Xu }
45266f6ce5eSHerbert Xu 
45388056ec3SHerbert Xu static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
45488056ec3SHerbert Xu {
45588056ec3SHerbert Xu 	struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
45688056ec3SHerbert Xu 	struct ahash_alg *alg = crypto_ahash_alg(hash);
45788056ec3SHerbert Xu 
45866f6ce5eSHerbert Xu 	hash->setkey = ahash_nosetkey;
459a5596d63SHerbert Xu 	hash->has_setkey = false;
46066f6ce5eSHerbert Xu 	hash->export = ahash_no_export;
46166f6ce5eSHerbert Xu 	hash->import = ahash_no_import;
46266f6ce5eSHerbert Xu 
46388056ec3SHerbert Xu 	if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
46488056ec3SHerbert Xu 		return crypto_init_shash_ops_async(tfm);
46588056ec3SHerbert Xu 
46688056ec3SHerbert Xu 	hash->init = alg->init;
46788056ec3SHerbert Xu 	hash->update = alg->update;
46888056ec3SHerbert Xu 	hash->final = alg->final;
46966f6ce5eSHerbert Xu 	hash->finup = alg->finup ?: ahash_def_finup;
47088056ec3SHerbert Xu 	hash->digest = alg->digest;
47166f6ce5eSHerbert Xu 
472a5596d63SHerbert Xu 	if (alg->setkey) {
47366f6ce5eSHerbert Xu 		hash->setkey = alg->setkey;
474a5596d63SHerbert Xu 		hash->has_setkey = true;
475a5596d63SHerbert Xu 	}
47666f6ce5eSHerbert Xu 	if (alg->export)
47766f6ce5eSHerbert Xu 		hash->export = alg->export;
47866f6ce5eSHerbert Xu 	if (alg->import)
47966f6ce5eSHerbert Xu 		hash->import = alg->import;
48088056ec3SHerbert Xu 
48188056ec3SHerbert Xu 	return 0;
48288056ec3SHerbert Xu }
48388056ec3SHerbert Xu 
48488056ec3SHerbert Xu static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
48588056ec3SHerbert Xu {
4862495cf25SHerbert Xu 	if (alg->cra_type != &crypto_ahash_type)
48788056ec3SHerbert Xu 		return sizeof(struct crypto_shash *);
4882495cf25SHerbert Xu 
4892495cf25SHerbert Xu 	return crypto_alg_extsize(alg);
49088056ec3SHerbert Xu }
49188056ec3SHerbert Xu 
4923acc8473SHerbert Xu #ifdef CONFIG_NET
4936238cbaeSSteffen Klassert static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
4946238cbaeSSteffen Klassert {
4956238cbaeSSteffen Klassert 	struct crypto_report_hash rhash;
4966238cbaeSSteffen Klassert 
4979a5467bfSMathias Krause 	strncpy(rhash.type, "ahash", sizeof(rhash.type));
4986238cbaeSSteffen Klassert 
4996238cbaeSSteffen Klassert 	rhash.blocksize = alg->cra_blocksize;
5006238cbaeSSteffen Klassert 	rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
5016238cbaeSSteffen Klassert 
5026662df33SDavid S. Miller 	if (nla_put(skb, CRYPTOCFGA_REPORT_HASH,
5036662df33SDavid S. Miller 		    sizeof(struct crypto_report_hash), &rhash))
5046662df33SDavid S. Miller 		goto nla_put_failure;
5056238cbaeSSteffen Klassert 	return 0;
5066238cbaeSSteffen Klassert 
5076238cbaeSSteffen Klassert nla_put_failure:
5086238cbaeSSteffen Klassert 	return -EMSGSIZE;
5096238cbaeSSteffen Klassert }
5103acc8473SHerbert Xu #else
5113acc8473SHerbert Xu static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
5123acc8473SHerbert Xu {
5133acc8473SHerbert Xu 	return -ENOSYS;
5143acc8473SHerbert Xu }
5153acc8473SHerbert Xu #endif
5166238cbaeSSteffen Klassert 
517004a403cSLoc Ho static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
518d8c34b94SGideon Israel Dsouza 	__maybe_unused;
519004a403cSLoc Ho static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
520004a403cSLoc Ho {
521004a403cSLoc Ho 	seq_printf(m, "type         : ahash\n");
522004a403cSLoc Ho 	seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
523004a403cSLoc Ho 					     "yes" : "no");
524004a403cSLoc Ho 	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
52588056ec3SHerbert Xu 	seq_printf(m, "digestsize   : %u\n",
52688056ec3SHerbert Xu 		   __crypto_hash_alg_common(alg)->digestsize);
527004a403cSLoc Ho }
528004a403cSLoc Ho 
529004a403cSLoc Ho const struct crypto_type crypto_ahash_type = {
53088056ec3SHerbert Xu 	.extsize = crypto_ahash_extsize,
53188056ec3SHerbert Xu 	.init_tfm = crypto_ahash_init_tfm,
532004a403cSLoc Ho #ifdef CONFIG_PROC_FS
533004a403cSLoc Ho 	.show = crypto_ahash_show,
534004a403cSLoc Ho #endif
5356238cbaeSSteffen Klassert 	.report = crypto_ahash_report,
53688056ec3SHerbert Xu 	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
53788056ec3SHerbert Xu 	.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
53888056ec3SHerbert Xu 	.type = CRYPTO_ALG_TYPE_AHASH,
53988056ec3SHerbert Xu 	.tfmsize = offsetof(struct crypto_ahash, base),
540004a403cSLoc Ho };
541004a403cSLoc Ho EXPORT_SYMBOL_GPL(crypto_ahash_type);
542004a403cSLoc Ho 
54388056ec3SHerbert Xu struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
54488056ec3SHerbert Xu 					u32 mask)
54588056ec3SHerbert Xu {
54688056ec3SHerbert Xu 	return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
54788056ec3SHerbert Xu }
54888056ec3SHerbert Xu EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
54988056ec3SHerbert Xu 
5508d18e34cSHerbert Xu int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
5518d18e34cSHerbert Xu {
5528d18e34cSHerbert Xu 	return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
5538d18e34cSHerbert Xu }
5548d18e34cSHerbert Xu EXPORT_SYMBOL_GPL(crypto_has_ahash);
5558d18e34cSHerbert Xu 
55601c2deceSHerbert Xu static int ahash_prepare_alg(struct ahash_alg *alg)
55701c2deceSHerbert Xu {
55801c2deceSHerbert Xu 	struct crypto_alg *base = &alg->halg.base;
55901c2deceSHerbert Xu 
56001c2deceSHerbert Xu 	if (alg->halg.digestsize > PAGE_SIZE / 8 ||
5618996eafdSRussell King 	    alg->halg.statesize > PAGE_SIZE / 8 ||
5628996eafdSRussell King 	    alg->halg.statesize == 0)
56301c2deceSHerbert Xu 		return -EINVAL;
56401c2deceSHerbert Xu 
56501c2deceSHerbert Xu 	base->cra_type = &crypto_ahash_type;
56601c2deceSHerbert Xu 	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
56701c2deceSHerbert Xu 	base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
56801c2deceSHerbert Xu 
56901c2deceSHerbert Xu 	return 0;
57001c2deceSHerbert Xu }
57101c2deceSHerbert Xu 
57201c2deceSHerbert Xu int crypto_register_ahash(struct ahash_alg *alg)
57301c2deceSHerbert Xu {
57401c2deceSHerbert Xu 	struct crypto_alg *base = &alg->halg.base;
57501c2deceSHerbert Xu 	int err;
57601c2deceSHerbert Xu 
57701c2deceSHerbert Xu 	err = ahash_prepare_alg(alg);
57801c2deceSHerbert Xu 	if (err)
57901c2deceSHerbert Xu 		return err;
58001c2deceSHerbert Xu 
58101c2deceSHerbert Xu 	return crypto_register_alg(base);
58201c2deceSHerbert Xu }
58301c2deceSHerbert Xu EXPORT_SYMBOL_GPL(crypto_register_ahash);
58401c2deceSHerbert Xu 
58501c2deceSHerbert Xu int crypto_unregister_ahash(struct ahash_alg *alg)
58601c2deceSHerbert Xu {
58701c2deceSHerbert Xu 	return crypto_unregister_alg(&alg->halg.base);
58801c2deceSHerbert Xu }
58901c2deceSHerbert Xu EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
59001c2deceSHerbert Xu 
59101c2deceSHerbert Xu int ahash_register_instance(struct crypto_template *tmpl,
59201c2deceSHerbert Xu 			    struct ahash_instance *inst)
59301c2deceSHerbert Xu {
59401c2deceSHerbert Xu 	int err;
59501c2deceSHerbert Xu 
59601c2deceSHerbert Xu 	err = ahash_prepare_alg(&inst->alg);
59701c2deceSHerbert Xu 	if (err)
59801c2deceSHerbert Xu 		return err;
59901c2deceSHerbert Xu 
60001c2deceSHerbert Xu 	return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
60101c2deceSHerbert Xu }
60201c2deceSHerbert Xu EXPORT_SYMBOL_GPL(ahash_register_instance);
60301c2deceSHerbert Xu 
60401c2deceSHerbert Xu void ahash_free_instance(struct crypto_instance *inst)
60501c2deceSHerbert Xu {
60601c2deceSHerbert Xu 	crypto_drop_spawn(crypto_instance_ctx(inst));
60701c2deceSHerbert Xu 	kfree(ahash_instance(inst));
60801c2deceSHerbert Xu }
60901c2deceSHerbert Xu EXPORT_SYMBOL_GPL(ahash_free_instance);
61001c2deceSHerbert Xu 
61101c2deceSHerbert Xu int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
61201c2deceSHerbert Xu 			    struct hash_alg_common *alg,
61301c2deceSHerbert Xu 			    struct crypto_instance *inst)
61401c2deceSHerbert Xu {
61501c2deceSHerbert Xu 	return crypto_init_spawn2(&spawn->base, &alg->base, inst,
61601c2deceSHerbert Xu 				  &crypto_ahash_type);
61701c2deceSHerbert Xu }
61801c2deceSHerbert Xu EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
61901c2deceSHerbert Xu 
62001c2deceSHerbert Xu struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
62101c2deceSHerbert Xu {
62201c2deceSHerbert Xu 	struct crypto_alg *alg;
62301c2deceSHerbert Xu 
62401c2deceSHerbert Xu 	alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
62501c2deceSHerbert Xu 	return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
62601c2deceSHerbert Xu }
62701c2deceSHerbert Xu EXPORT_SYMBOL_GPL(ahash_attr_alg);
62801c2deceSHerbert Xu 
629004a403cSLoc Ho MODULE_LICENSE("GPL");
630004a403cSLoc Ho MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
631