xref: /openbmc/linux/drivers/crypto/marvell/cesa/hash.c (revision 0c3dc787)
1655ff1a1SSrujanaChalla // SPDX-License-Identifier: GPL-2.0-only
2655ff1a1SSrujanaChalla /*
3655ff1a1SSrujanaChalla  * Hash algorithms supported by the CESA: MD5, SHA1 and SHA256.
4655ff1a1SSrujanaChalla  *
5655ff1a1SSrujanaChalla  * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
6655ff1a1SSrujanaChalla  * Author: Arnaud Ebalard <arno@natisbad.org>
7655ff1a1SSrujanaChalla  *
8655ff1a1SSrujanaChalla  * This work is based on an initial version written by
9655ff1a1SSrujanaChalla  * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
10655ff1a1SSrujanaChalla  */
11655ff1a1SSrujanaChalla 
12655ff1a1SSrujanaChalla #include <crypto/hmac.h>
13655ff1a1SSrujanaChalla #include <crypto/md5.h>
14655ff1a1SSrujanaChalla #include <crypto/sha.h>
150c3dc787SHerbert Xu #include <linux/device.h>
160c3dc787SHerbert Xu #include <linux/dma-mapping.h>
17655ff1a1SSrujanaChalla 
18655ff1a1SSrujanaChalla #include "cesa.h"
19655ff1a1SSrujanaChalla 
20655ff1a1SSrujanaChalla struct mv_cesa_ahash_dma_iter {
21655ff1a1SSrujanaChalla 	struct mv_cesa_dma_iter base;
22655ff1a1SSrujanaChalla 	struct mv_cesa_sg_dma_iter src;
23655ff1a1SSrujanaChalla };
24655ff1a1SSrujanaChalla 
25655ff1a1SSrujanaChalla static inline void
26655ff1a1SSrujanaChalla mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter *iter,
27655ff1a1SSrujanaChalla 			    struct ahash_request *req)
28655ff1a1SSrujanaChalla {
29655ff1a1SSrujanaChalla 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
30655ff1a1SSrujanaChalla 	unsigned int len = req->nbytes + creq->cache_ptr;
31655ff1a1SSrujanaChalla 
32655ff1a1SSrujanaChalla 	if (!creq->last_req)
33655ff1a1SSrujanaChalla 		len &= ~CESA_HASH_BLOCK_SIZE_MSK;
34655ff1a1SSrujanaChalla 
35655ff1a1SSrujanaChalla 	mv_cesa_req_dma_iter_init(&iter->base, len);
36655ff1a1SSrujanaChalla 	mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
37655ff1a1SSrujanaChalla 	iter->src.op_offset = creq->cache_ptr;
38655ff1a1SSrujanaChalla }
39655ff1a1SSrujanaChalla 
40655ff1a1SSrujanaChalla static inline bool
41655ff1a1SSrujanaChalla mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter)
42655ff1a1SSrujanaChalla {
43655ff1a1SSrujanaChalla 	iter->src.op_offset = 0;
44655ff1a1SSrujanaChalla 
45655ff1a1SSrujanaChalla 	return mv_cesa_req_dma_iter_next_op(&iter->base);
46655ff1a1SSrujanaChalla }
47655ff1a1SSrujanaChalla 
48655ff1a1SSrujanaChalla static inline int
49655ff1a1SSrujanaChalla mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_dma_req *req, gfp_t flags)
50655ff1a1SSrujanaChalla {
51655ff1a1SSrujanaChalla 	req->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags,
52655ff1a1SSrujanaChalla 				    &req->cache_dma);
53655ff1a1SSrujanaChalla 	if (!req->cache)
54655ff1a1SSrujanaChalla 		return -ENOMEM;
55655ff1a1SSrujanaChalla 
56655ff1a1SSrujanaChalla 	return 0;
57655ff1a1SSrujanaChalla }
58655ff1a1SSrujanaChalla 
59655ff1a1SSrujanaChalla static inline void
60655ff1a1SSrujanaChalla mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_dma_req *req)
61655ff1a1SSrujanaChalla {
62655ff1a1SSrujanaChalla 	if (!req->cache)
63655ff1a1SSrujanaChalla 		return;
64655ff1a1SSrujanaChalla 
65655ff1a1SSrujanaChalla 	dma_pool_free(cesa_dev->dma->cache_pool, req->cache,
66655ff1a1SSrujanaChalla 		      req->cache_dma);
67655ff1a1SSrujanaChalla }
68655ff1a1SSrujanaChalla 
69655ff1a1SSrujanaChalla static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req *req,
70655ff1a1SSrujanaChalla 					   gfp_t flags)
71655ff1a1SSrujanaChalla {
72655ff1a1SSrujanaChalla 	if (req->padding)
73655ff1a1SSrujanaChalla 		return 0;
74655ff1a1SSrujanaChalla 
75655ff1a1SSrujanaChalla 	req->padding = dma_pool_alloc(cesa_dev->dma->padding_pool, flags,
76655ff1a1SSrujanaChalla 				      &req->padding_dma);
77655ff1a1SSrujanaChalla 	if (!req->padding)
78655ff1a1SSrujanaChalla 		return -ENOMEM;
79655ff1a1SSrujanaChalla 
80655ff1a1SSrujanaChalla 	return 0;
81655ff1a1SSrujanaChalla }
82655ff1a1SSrujanaChalla 
83655ff1a1SSrujanaChalla static void mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req *req)
84655ff1a1SSrujanaChalla {
85655ff1a1SSrujanaChalla 	if (!req->padding)
86655ff1a1SSrujanaChalla 		return;
87655ff1a1SSrujanaChalla 
88655ff1a1SSrujanaChalla 	dma_pool_free(cesa_dev->dma->padding_pool, req->padding,
89655ff1a1SSrujanaChalla 		      req->padding_dma);
90655ff1a1SSrujanaChalla 	req->padding = NULL;
91655ff1a1SSrujanaChalla }
92655ff1a1SSrujanaChalla 
93655ff1a1SSrujanaChalla static inline void mv_cesa_ahash_dma_last_cleanup(struct ahash_request *req)
94655ff1a1SSrujanaChalla {
95655ff1a1SSrujanaChalla 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
96655ff1a1SSrujanaChalla 
97655ff1a1SSrujanaChalla 	mv_cesa_ahash_dma_free_padding(&creq->req.dma);
98655ff1a1SSrujanaChalla }
99655ff1a1SSrujanaChalla 
100655ff1a1SSrujanaChalla static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req)
101655ff1a1SSrujanaChalla {
102655ff1a1SSrujanaChalla 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
103655ff1a1SSrujanaChalla 
104655ff1a1SSrujanaChalla 	dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
105655ff1a1SSrujanaChalla 	mv_cesa_ahash_dma_free_cache(&creq->req.dma);
106655ff1a1SSrujanaChalla 	mv_cesa_dma_cleanup(&creq->base);
107655ff1a1SSrujanaChalla }
108655ff1a1SSrujanaChalla 
109655ff1a1SSrujanaChalla static inline void mv_cesa_ahash_cleanup(struct ahash_request *req)
110655ff1a1SSrujanaChalla {
111655ff1a1SSrujanaChalla 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
112655ff1a1SSrujanaChalla 
113655ff1a1SSrujanaChalla 	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
114655ff1a1SSrujanaChalla 		mv_cesa_ahash_dma_cleanup(req);
115655ff1a1SSrujanaChalla }
116655ff1a1SSrujanaChalla 
117655ff1a1SSrujanaChalla static void mv_cesa_ahash_last_cleanup(struct ahash_request *req)
118655ff1a1SSrujanaChalla {
119655ff1a1SSrujanaChalla 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
120655ff1a1SSrujanaChalla 
121655ff1a1SSrujanaChalla 	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
122655ff1a1SSrujanaChalla 		mv_cesa_ahash_dma_last_cleanup(req);
123655ff1a1SSrujanaChalla }
124655ff1a1SSrujanaChalla 
125655ff1a1SSrujanaChalla static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq)
126655ff1a1SSrujanaChalla {
127655ff1a1SSrujanaChalla 	unsigned int index, padlen;
128655ff1a1SSrujanaChalla 
129655ff1a1SSrujanaChalla 	index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
130655ff1a1SSrujanaChalla 	padlen = (index < 56) ? (56 - index) : (64 + 56 - index);
131655ff1a1SSrujanaChalla 
132655ff1a1SSrujanaChalla 	return padlen;
133655ff1a1SSrujanaChalla }
134655ff1a1SSrujanaChalla 
135655ff1a1SSrujanaChalla static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf)
136655ff1a1SSrujanaChalla {
137655ff1a1SSrujanaChalla 	unsigned int padlen;
138655ff1a1SSrujanaChalla 
139655ff1a1SSrujanaChalla 	buf[0] = 0x80;
140655ff1a1SSrujanaChalla 	/* Pad out to 56 mod 64 */
141655ff1a1SSrujanaChalla 	padlen = mv_cesa_ahash_pad_len(creq);
142655ff1a1SSrujanaChalla 	memset(buf + 1, 0, padlen - 1);
143655ff1a1SSrujanaChalla 
144655ff1a1SSrujanaChalla 	if (creq->algo_le) {
145655ff1a1SSrujanaChalla 		__le64 bits = cpu_to_le64(creq->len << 3);
146655ff1a1SSrujanaChalla 
147655ff1a1SSrujanaChalla 		memcpy(buf + padlen, &bits, sizeof(bits));
148655ff1a1SSrujanaChalla 	} else {
149655ff1a1SSrujanaChalla 		__be64 bits = cpu_to_be64(creq->len << 3);
150655ff1a1SSrujanaChalla 
151655ff1a1SSrujanaChalla 		memcpy(buf + padlen, &bits, sizeof(bits));
152655ff1a1SSrujanaChalla 	}
153655ff1a1SSrujanaChalla 
154655ff1a1SSrujanaChalla 	return padlen + 8;
155655ff1a1SSrujanaChalla }
156655ff1a1SSrujanaChalla 
157655ff1a1SSrujanaChalla static void mv_cesa_ahash_std_step(struct ahash_request *req)
158655ff1a1SSrujanaChalla {
159655ff1a1SSrujanaChalla 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
160655ff1a1SSrujanaChalla 	struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
161655ff1a1SSrujanaChalla 	struct mv_cesa_engine *engine = creq->base.engine;
162655ff1a1SSrujanaChalla 	struct mv_cesa_op_ctx *op;
163655ff1a1SSrujanaChalla 	unsigned int new_cache_ptr = 0;
164655ff1a1SSrujanaChalla 	u32 frag_mode;
165655ff1a1SSrujanaChalla 	size_t  len;
166655ff1a1SSrujanaChalla 	unsigned int digsize;
167655ff1a1SSrujanaChalla 	int i;
168655ff1a1SSrujanaChalla 
169655ff1a1SSrujanaChalla 	mv_cesa_adjust_op(engine, &creq->op_tmpl);
170655ff1a1SSrujanaChalla 	memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
171655ff1a1SSrujanaChalla 
172655ff1a1SSrujanaChalla 	if (!sreq->offset) {
173655ff1a1SSrujanaChalla 		digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
174655ff1a1SSrujanaChalla 		for (i = 0; i < digsize / 4; i++)
175655ff1a1SSrujanaChalla 			writel_relaxed(creq->state[i],
176655ff1a1SSrujanaChalla 				       engine->regs + CESA_IVDIG(i));
177655ff1a1SSrujanaChalla 	}
178655ff1a1SSrujanaChalla 
179655ff1a1SSrujanaChalla 	if (creq->cache_ptr)
180655ff1a1SSrujanaChalla 		memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,
181655ff1a1SSrujanaChalla 			    creq->cache, creq->cache_ptr);
182655ff1a1SSrujanaChalla 
183655ff1a1SSrujanaChalla 	len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset,
184655ff1a1SSrujanaChalla 		    CESA_SA_SRAM_PAYLOAD_SIZE);
185655ff1a1SSrujanaChalla 
186655ff1a1SSrujanaChalla 	if (!creq->last_req) {
187655ff1a1SSrujanaChalla 		new_cache_ptr = len & CESA_HASH_BLOCK_SIZE_MSK;
188655ff1a1SSrujanaChalla 		len &= ~CESA_HASH_BLOCK_SIZE_MSK;
189655ff1a1SSrujanaChalla 	}
190655ff1a1SSrujanaChalla 
191655ff1a1SSrujanaChalla 	if (len - creq->cache_ptr)
192655ff1a1SSrujanaChalla 		sreq->offset += sg_pcopy_to_buffer(req->src, creq->src_nents,
193655ff1a1SSrujanaChalla 						   engine->sram +
194655ff1a1SSrujanaChalla 						   CESA_SA_DATA_SRAM_OFFSET +
195655ff1a1SSrujanaChalla 						   creq->cache_ptr,
196655ff1a1SSrujanaChalla 						   len - creq->cache_ptr,
197655ff1a1SSrujanaChalla 						   sreq->offset);
198655ff1a1SSrujanaChalla 
199655ff1a1SSrujanaChalla 	op = &creq->op_tmpl;
200655ff1a1SSrujanaChalla 
201655ff1a1SSrujanaChalla 	frag_mode = mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK;
202655ff1a1SSrujanaChalla 
203655ff1a1SSrujanaChalla 	if (creq->last_req && sreq->offset == req->nbytes &&
204655ff1a1SSrujanaChalla 	    creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
205655ff1a1SSrujanaChalla 		if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
206655ff1a1SSrujanaChalla 			frag_mode = CESA_SA_DESC_CFG_NOT_FRAG;
207655ff1a1SSrujanaChalla 		else if (frag_mode == CESA_SA_DESC_CFG_MID_FRAG)
208655ff1a1SSrujanaChalla 			frag_mode = CESA_SA_DESC_CFG_LAST_FRAG;
209655ff1a1SSrujanaChalla 	}
210655ff1a1SSrujanaChalla 
211655ff1a1SSrujanaChalla 	if (frag_mode == CESA_SA_DESC_CFG_NOT_FRAG ||
212655ff1a1SSrujanaChalla 	    frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) {
213655ff1a1SSrujanaChalla 		if (len &&
214655ff1a1SSrujanaChalla 		    creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
215655ff1a1SSrujanaChalla 			mv_cesa_set_mac_op_total_len(op, creq->len);
216655ff1a1SSrujanaChalla 		} else {
217655ff1a1SSrujanaChalla 			int trailerlen = mv_cesa_ahash_pad_len(creq) + 8;
218655ff1a1SSrujanaChalla 
219655ff1a1SSrujanaChalla 			if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) {
220655ff1a1SSrujanaChalla 				len &= CESA_HASH_BLOCK_SIZE_MSK;
221655ff1a1SSrujanaChalla 				new_cache_ptr = 64 - trailerlen;
222655ff1a1SSrujanaChalla 				memcpy_fromio(creq->cache,
223655ff1a1SSrujanaChalla 					      engine->sram +
224655ff1a1SSrujanaChalla 					      CESA_SA_DATA_SRAM_OFFSET + len,
225655ff1a1SSrujanaChalla 					      new_cache_ptr);
226655ff1a1SSrujanaChalla 			} else {
227655ff1a1SSrujanaChalla 				len += mv_cesa_ahash_pad_req(creq,
228655ff1a1SSrujanaChalla 						engine->sram + len +
229655ff1a1SSrujanaChalla 						CESA_SA_DATA_SRAM_OFFSET);
230655ff1a1SSrujanaChalla 			}
231655ff1a1SSrujanaChalla 
232655ff1a1SSrujanaChalla 			if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG)
233655ff1a1SSrujanaChalla 				frag_mode = CESA_SA_DESC_CFG_MID_FRAG;
234655ff1a1SSrujanaChalla 			else
235655ff1a1SSrujanaChalla 				frag_mode = CESA_SA_DESC_CFG_FIRST_FRAG;
236655ff1a1SSrujanaChalla 		}
237655ff1a1SSrujanaChalla 	}
238655ff1a1SSrujanaChalla 
239655ff1a1SSrujanaChalla 	mv_cesa_set_mac_op_frag_len(op, len);
240655ff1a1SSrujanaChalla 	mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK);
241655ff1a1SSrujanaChalla 
242655ff1a1SSrujanaChalla 	/* FIXME: only update enc_len field */
243655ff1a1SSrujanaChalla 	memcpy_toio(engine->sram, op, sizeof(*op));
244655ff1a1SSrujanaChalla 
245655ff1a1SSrujanaChalla 	if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
246655ff1a1SSrujanaChalla 		mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG,
247655ff1a1SSrujanaChalla 				      CESA_SA_DESC_CFG_FRAG_MSK);
248655ff1a1SSrujanaChalla 
249655ff1a1SSrujanaChalla 	creq->cache_ptr = new_cache_ptr;
250655ff1a1SSrujanaChalla 
251655ff1a1SSrujanaChalla 	mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
252655ff1a1SSrujanaChalla 	writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
253655ff1a1SSrujanaChalla 	WARN_ON(readl(engine->regs + CESA_SA_CMD) &
254655ff1a1SSrujanaChalla 		CESA_SA_CMD_EN_CESA_SA_ACCL0);
255655ff1a1SSrujanaChalla 	writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
256655ff1a1SSrujanaChalla }
257655ff1a1SSrujanaChalla 
258655ff1a1SSrujanaChalla static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status)
259655ff1a1SSrujanaChalla {
260655ff1a1SSrujanaChalla 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
261655ff1a1SSrujanaChalla 	struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
262655ff1a1SSrujanaChalla 
263655ff1a1SSrujanaChalla 	if (sreq->offset < (req->nbytes - creq->cache_ptr))
264655ff1a1SSrujanaChalla 		return -EINPROGRESS;
265655ff1a1SSrujanaChalla 
266655ff1a1SSrujanaChalla 	return 0;
267655ff1a1SSrujanaChalla }
268655ff1a1SSrujanaChalla 
269655ff1a1SSrujanaChalla static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req)
270655ff1a1SSrujanaChalla {
271655ff1a1SSrujanaChalla 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
272655ff1a1SSrujanaChalla 	struct mv_cesa_req *basereq = &creq->base;
273655ff1a1SSrujanaChalla 
274655ff1a1SSrujanaChalla 	mv_cesa_dma_prepare(basereq, basereq->engine);
275655ff1a1SSrujanaChalla }
276655ff1a1SSrujanaChalla 
277655ff1a1SSrujanaChalla static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
278655ff1a1SSrujanaChalla {
279655ff1a1SSrujanaChalla 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
280655ff1a1SSrujanaChalla 	struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
281655ff1a1SSrujanaChalla 
282655ff1a1SSrujanaChalla 	sreq->offset = 0;
283655ff1a1SSrujanaChalla }
284655ff1a1SSrujanaChalla 
285655ff1a1SSrujanaChalla static void mv_cesa_ahash_dma_step(struct ahash_request *req)
286655ff1a1SSrujanaChalla {
287655ff1a1SSrujanaChalla 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
288655ff1a1SSrujanaChalla 	struct mv_cesa_req *base = &creq->base;
289655ff1a1SSrujanaChalla 
290655ff1a1SSrujanaChalla 	/* We must explicitly set the digest state. */
291655ff1a1SSrujanaChalla 	if (base->chain.first->flags & CESA_TDMA_SET_STATE) {
292655ff1a1SSrujanaChalla 		struct mv_cesa_engine *engine = base->engine;
293655ff1a1SSrujanaChalla 		int i;
294655ff1a1SSrujanaChalla 
295655ff1a1SSrujanaChalla 		/* Set the hash state in the IVDIG regs. */
296655ff1a1SSrujanaChalla 		for (i = 0; i < ARRAY_SIZE(creq->state); i++)
297655ff1a1SSrujanaChalla 			writel_relaxed(creq->state[i], engine->regs +
298655ff1a1SSrujanaChalla 				       CESA_IVDIG(i));
299655ff1a1SSrujanaChalla 	}
300655ff1a1SSrujanaChalla 
301655ff1a1SSrujanaChalla 	mv_cesa_dma_step(base);
302655ff1a1SSrujanaChalla }
303655ff1a1SSrujanaChalla 
304655ff1a1SSrujanaChalla static void mv_cesa_ahash_step(struct crypto_async_request *req)
305655ff1a1SSrujanaChalla {
306655ff1a1SSrujanaChalla 	struct ahash_request *ahashreq = ahash_request_cast(req);
307655ff1a1SSrujanaChalla 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
308655ff1a1SSrujanaChalla 
309655ff1a1SSrujanaChalla 	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
310655ff1a1SSrujanaChalla 		mv_cesa_ahash_dma_step(ahashreq);
311655ff1a1SSrujanaChalla 	else
312655ff1a1SSrujanaChalla 		mv_cesa_ahash_std_step(ahashreq);
313655ff1a1SSrujanaChalla }
314655ff1a1SSrujanaChalla 
315655ff1a1SSrujanaChalla static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status)
316655ff1a1SSrujanaChalla {
317655ff1a1SSrujanaChalla 	struct ahash_request *ahashreq = ahash_request_cast(req);
318655ff1a1SSrujanaChalla 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
319655ff1a1SSrujanaChalla 
320655ff1a1SSrujanaChalla 	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
321655ff1a1SSrujanaChalla 		return mv_cesa_dma_process(&creq->base, status);
322655ff1a1SSrujanaChalla 
323655ff1a1SSrujanaChalla 	return mv_cesa_ahash_std_process(ahashreq, status);
324655ff1a1SSrujanaChalla }
325655ff1a1SSrujanaChalla 
326655ff1a1SSrujanaChalla static void mv_cesa_ahash_complete(struct crypto_async_request *req)
327655ff1a1SSrujanaChalla {
328655ff1a1SSrujanaChalla 	struct ahash_request *ahashreq = ahash_request_cast(req);
329655ff1a1SSrujanaChalla 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
330655ff1a1SSrujanaChalla 	struct mv_cesa_engine *engine = creq->base.engine;
331655ff1a1SSrujanaChalla 	unsigned int digsize;
332655ff1a1SSrujanaChalla 	int i;
333655ff1a1SSrujanaChalla 
334655ff1a1SSrujanaChalla 	digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
335655ff1a1SSrujanaChalla 
336655ff1a1SSrujanaChalla 	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ &&
337655ff1a1SSrujanaChalla 	    (creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) ==
338655ff1a1SSrujanaChalla 	     CESA_TDMA_RESULT) {
339655ff1a1SSrujanaChalla 		__le32 *data = NULL;
340655ff1a1SSrujanaChalla 
341655ff1a1SSrujanaChalla 		/*
342655ff1a1SSrujanaChalla 		 * Result is already in the correct endianness when the SA is
343655ff1a1SSrujanaChalla 		 * used
344655ff1a1SSrujanaChalla 		 */
345655ff1a1SSrujanaChalla 		data = creq->base.chain.last->op->ctx.hash.hash;
346655ff1a1SSrujanaChalla 		for (i = 0; i < digsize / 4; i++)
347655ff1a1SSrujanaChalla 			creq->state[i] = cpu_to_le32(data[i]);
348655ff1a1SSrujanaChalla 
349655ff1a1SSrujanaChalla 		memcpy(ahashreq->result, data, digsize);
350655ff1a1SSrujanaChalla 	} else {
351655ff1a1SSrujanaChalla 		for (i = 0; i < digsize / 4; i++)
352655ff1a1SSrujanaChalla 			creq->state[i] = readl_relaxed(engine->regs +
353655ff1a1SSrujanaChalla 						       CESA_IVDIG(i));
354655ff1a1SSrujanaChalla 		if (creq->last_req) {
355655ff1a1SSrujanaChalla 			/*
356655ff1a1SSrujanaChalla 			 * Hardware's MD5 digest is in little endian format, but
357655ff1a1SSrujanaChalla 			 * SHA in big endian format
358655ff1a1SSrujanaChalla 			 */
359655ff1a1SSrujanaChalla 			if (creq->algo_le) {
360655ff1a1SSrujanaChalla 				__le32 *result = (void *)ahashreq->result;
361655ff1a1SSrujanaChalla 
362655ff1a1SSrujanaChalla 				for (i = 0; i < digsize / 4; i++)
363655ff1a1SSrujanaChalla 					result[i] = cpu_to_le32(creq->state[i]);
364655ff1a1SSrujanaChalla 			} else {
365655ff1a1SSrujanaChalla 				__be32 *result = (void *)ahashreq->result;
366655ff1a1SSrujanaChalla 
367655ff1a1SSrujanaChalla 				for (i = 0; i < digsize / 4; i++)
368655ff1a1SSrujanaChalla 					result[i] = cpu_to_be32(creq->state[i]);
369655ff1a1SSrujanaChalla 			}
370655ff1a1SSrujanaChalla 		}
371655ff1a1SSrujanaChalla 	}
372655ff1a1SSrujanaChalla 
373655ff1a1SSrujanaChalla 	atomic_sub(ahashreq->nbytes, &engine->load);
374655ff1a1SSrujanaChalla }
375655ff1a1SSrujanaChalla 
376655ff1a1SSrujanaChalla static void mv_cesa_ahash_prepare(struct crypto_async_request *req,
377655ff1a1SSrujanaChalla 				  struct mv_cesa_engine *engine)
378655ff1a1SSrujanaChalla {
379655ff1a1SSrujanaChalla 	struct ahash_request *ahashreq = ahash_request_cast(req);
380655ff1a1SSrujanaChalla 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
381655ff1a1SSrujanaChalla 
382655ff1a1SSrujanaChalla 	creq->base.engine = engine;
383655ff1a1SSrujanaChalla 
384655ff1a1SSrujanaChalla 	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
385655ff1a1SSrujanaChalla 		mv_cesa_ahash_dma_prepare(ahashreq);
386655ff1a1SSrujanaChalla 	else
387655ff1a1SSrujanaChalla 		mv_cesa_ahash_std_prepare(ahashreq);
388655ff1a1SSrujanaChalla }
389655ff1a1SSrujanaChalla 
390655ff1a1SSrujanaChalla static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req)
391655ff1a1SSrujanaChalla {
392655ff1a1SSrujanaChalla 	struct ahash_request *ahashreq = ahash_request_cast(req);
393655ff1a1SSrujanaChalla 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
394655ff1a1SSrujanaChalla 
395655ff1a1SSrujanaChalla 	if (creq->last_req)
396655ff1a1SSrujanaChalla 		mv_cesa_ahash_last_cleanup(ahashreq);
397655ff1a1SSrujanaChalla 
398655ff1a1SSrujanaChalla 	mv_cesa_ahash_cleanup(ahashreq);
399655ff1a1SSrujanaChalla 
400655ff1a1SSrujanaChalla 	if (creq->cache_ptr)
401655ff1a1SSrujanaChalla 		sg_pcopy_to_buffer(ahashreq->src, creq->src_nents,
402655ff1a1SSrujanaChalla 				   creq->cache,
403655ff1a1SSrujanaChalla 				   creq->cache_ptr,
404655ff1a1SSrujanaChalla 				   ahashreq->nbytes - creq->cache_ptr);
405655ff1a1SSrujanaChalla }
406655ff1a1SSrujanaChalla 
407655ff1a1SSrujanaChalla static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = {
408655ff1a1SSrujanaChalla 	.step = mv_cesa_ahash_step,
409655ff1a1SSrujanaChalla 	.process = mv_cesa_ahash_process,
410655ff1a1SSrujanaChalla 	.cleanup = mv_cesa_ahash_req_cleanup,
411655ff1a1SSrujanaChalla 	.complete = mv_cesa_ahash_complete,
412655ff1a1SSrujanaChalla };
413655ff1a1SSrujanaChalla 
414655ff1a1SSrujanaChalla static void mv_cesa_ahash_init(struct ahash_request *req,
415655ff1a1SSrujanaChalla 			      struct mv_cesa_op_ctx *tmpl, bool algo_le)
416655ff1a1SSrujanaChalla {
417655ff1a1SSrujanaChalla 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
418655ff1a1SSrujanaChalla 
419655ff1a1SSrujanaChalla 	memset(creq, 0, sizeof(*creq));
420655ff1a1SSrujanaChalla 	mv_cesa_update_op_cfg(tmpl,
421655ff1a1SSrujanaChalla 			      CESA_SA_DESC_CFG_OP_MAC_ONLY |
422655ff1a1SSrujanaChalla 			      CESA_SA_DESC_CFG_FIRST_FRAG,
423655ff1a1SSrujanaChalla 			      CESA_SA_DESC_CFG_OP_MSK |
424655ff1a1SSrujanaChalla 			      CESA_SA_DESC_CFG_FRAG_MSK);
425655ff1a1SSrujanaChalla 	mv_cesa_set_mac_op_total_len(tmpl, 0);
426655ff1a1SSrujanaChalla 	mv_cesa_set_mac_op_frag_len(tmpl, 0);
427655ff1a1SSrujanaChalla 	creq->op_tmpl = *tmpl;
428655ff1a1SSrujanaChalla 	creq->len = 0;
429655ff1a1SSrujanaChalla 	creq->algo_le = algo_le;
430655ff1a1SSrujanaChalla }
431655ff1a1SSrujanaChalla 
432655ff1a1SSrujanaChalla static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm)
433655ff1a1SSrujanaChalla {
434655ff1a1SSrujanaChalla 	struct mv_cesa_hash_ctx *ctx = crypto_tfm_ctx(tfm);
435655ff1a1SSrujanaChalla 
436655ff1a1SSrujanaChalla 	ctx->base.ops = &mv_cesa_ahash_req_ops;
437655ff1a1SSrujanaChalla 
438655ff1a1SSrujanaChalla 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
439655ff1a1SSrujanaChalla 				 sizeof(struct mv_cesa_ahash_req));
440655ff1a1SSrujanaChalla 	return 0;
441655ff1a1SSrujanaChalla }
442655ff1a1SSrujanaChalla 
443655ff1a1SSrujanaChalla static bool mv_cesa_ahash_cache_req(struct ahash_request *req)
444655ff1a1SSrujanaChalla {
445655ff1a1SSrujanaChalla 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
446655ff1a1SSrujanaChalla 	bool cached = false;
447655ff1a1SSrujanaChalla 
448655ff1a1SSrujanaChalla 	if (creq->cache_ptr + req->nbytes < CESA_MAX_HASH_BLOCK_SIZE &&
449655ff1a1SSrujanaChalla 	    !creq->last_req) {
450655ff1a1SSrujanaChalla 		cached = true;
451655ff1a1SSrujanaChalla 
452655ff1a1SSrujanaChalla 		if (!req->nbytes)
453655ff1a1SSrujanaChalla 			return cached;
454655ff1a1SSrujanaChalla 
455655ff1a1SSrujanaChalla 		sg_pcopy_to_buffer(req->src, creq->src_nents,
456655ff1a1SSrujanaChalla 				   creq->cache + creq->cache_ptr,
457655ff1a1SSrujanaChalla 				   req->nbytes, 0);
458655ff1a1SSrujanaChalla 
459655ff1a1SSrujanaChalla 		creq->cache_ptr += req->nbytes;
460655ff1a1SSrujanaChalla 	}
461655ff1a1SSrujanaChalla 
462655ff1a1SSrujanaChalla 	return cached;
463655ff1a1SSrujanaChalla }
464655ff1a1SSrujanaChalla 
465655ff1a1SSrujanaChalla static struct mv_cesa_op_ctx *
466655ff1a1SSrujanaChalla mv_cesa_dma_add_frag(struct mv_cesa_tdma_chain *chain,
467655ff1a1SSrujanaChalla 		     struct mv_cesa_op_ctx *tmpl, unsigned int frag_len,
468655ff1a1SSrujanaChalla 		     gfp_t flags)
469655ff1a1SSrujanaChalla {
470655ff1a1SSrujanaChalla 	struct mv_cesa_op_ctx *op;
471655ff1a1SSrujanaChalla 	int ret;
472655ff1a1SSrujanaChalla 
473655ff1a1SSrujanaChalla 	op = mv_cesa_dma_add_op(chain, tmpl, false, flags);
474655ff1a1SSrujanaChalla 	if (IS_ERR(op))
475655ff1a1SSrujanaChalla 		return op;
476655ff1a1SSrujanaChalla 
477655ff1a1SSrujanaChalla 	/* Set the operation block fragment length. */
478655ff1a1SSrujanaChalla 	mv_cesa_set_mac_op_frag_len(op, frag_len);
479655ff1a1SSrujanaChalla 
480655ff1a1SSrujanaChalla 	/* Append dummy desc to launch operation */
481655ff1a1SSrujanaChalla 	ret = mv_cesa_dma_add_dummy_launch(chain, flags);
482655ff1a1SSrujanaChalla 	if (ret)
483655ff1a1SSrujanaChalla 		return ERR_PTR(ret);
484655ff1a1SSrujanaChalla 
485655ff1a1SSrujanaChalla 	if (mv_cesa_mac_op_is_first_frag(tmpl))
486655ff1a1SSrujanaChalla 		mv_cesa_update_op_cfg(tmpl,
487655ff1a1SSrujanaChalla 				      CESA_SA_DESC_CFG_MID_FRAG,
488655ff1a1SSrujanaChalla 				      CESA_SA_DESC_CFG_FRAG_MSK);
489655ff1a1SSrujanaChalla 
490655ff1a1SSrujanaChalla 	return op;
491655ff1a1SSrujanaChalla }
492655ff1a1SSrujanaChalla 
493655ff1a1SSrujanaChalla static int
494655ff1a1SSrujanaChalla mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain,
495655ff1a1SSrujanaChalla 			    struct mv_cesa_ahash_req *creq,
496655ff1a1SSrujanaChalla 			    gfp_t flags)
497655ff1a1SSrujanaChalla {
498655ff1a1SSrujanaChalla 	struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
499655ff1a1SSrujanaChalla 	int ret;
500655ff1a1SSrujanaChalla 
501655ff1a1SSrujanaChalla 	if (!creq->cache_ptr)
502655ff1a1SSrujanaChalla 		return 0;
503655ff1a1SSrujanaChalla 
504655ff1a1SSrujanaChalla 	ret = mv_cesa_ahash_dma_alloc_cache(ahashdreq, flags);
505655ff1a1SSrujanaChalla 	if (ret)
506655ff1a1SSrujanaChalla 		return ret;
507655ff1a1SSrujanaChalla 
508655ff1a1SSrujanaChalla 	memcpy(ahashdreq->cache, creq->cache, creq->cache_ptr);
509655ff1a1SSrujanaChalla 
510655ff1a1SSrujanaChalla 	return mv_cesa_dma_add_data_transfer(chain,
511655ff1a1SSrujanaChalla 					     CESA_SA_DATA_SRAM_OFFSET,
512655ff1a1SSrujanaChalla 					     ahashdreq->cache_dma,
513655ff1a1SSrujanaChalla 					     creq->cache_ptr,
514655ff1a1SSrujanaChalla 					     CESA_TDMA_DST_IN_SRAM,
515655ff1a1SSrujanaChalla 					     flags);
516655ff1a1SSrujanaChalla }
517655ff1a1SSrujanaChalla 
518655ff1a1SSrujanaChalla static struct mv_cesa_op_ctx *
519655ff1a1SSrujanaChalla mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain,
520655ff1a1SSrujanaChalla 			   struct mv_cesa_ahash_dma_iter *dma_iter,
521655ff1a1SSrujanaChalla 			   struct mv_cesa_ahash_req *creq,
522655ff1a1SSrujanaChalla 			   unsigned int frag_len, gfp_t flags)
523655ff1a1SSrujanaChalla {
524655ff1a1SSrujanaChalla 	struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
525655ff1a1SSrujanaChalla 	unsigned int len, trailerlen, padoff = 0;
526655ff1a1SSrujanaChalla 	struct mv_cesa_op_ctx *op;
527655ff1a1SSrujanaChalla 	int ret;
528655ff1a1SSrujanaChalla 
529655ff1a1SSrujanaChalla 	/*
530655ff1a1SSrujanaChalla 	 * If the transfer is smaller than our maximum length, and we have
531655ff1a1SSrujanaChalla 	 * some data outstanding, we can ask the engine to finish the hash.
532655ff1a1SSrujanaChalla 	 */
533655ff1a1SSrujanaChalla 	if (creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX && frag_len) {
534655ff1a1SSrujanaChalla 		op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len,
535655ff1a1SSrujanaChalla 					  flags);
536655ff1a1SSrujanaChalla 		if (IS_ERR(op))
537655ff1a1SSrujanaChalla 			return op;
538655ff1a1SSrujanaChalla 
539655ff1a1SSrujanaChalla 		mv_cesa_set_mac_op_total_len(op, creq->len);
540655ff1a1SSrujanaChalla 		mv_cesa_update_op_cfg(op, mv_cesa_mac_op_is_first_frag(op) ?
541655ff1a1SSrujanaChalla 						CESA_SA_DESC_CFG_NOT_FRAG :
542655ff1a1SSrujanaChalla 						CESA_SA_DESC_CFG_LAST_FRAG,
543655ff1a1SSrujanaChalla 				      CESA_SA_DESC_CFG_FRAG_MSK);
544655ff1a1SSrujanaChalla 
545655ff1a1SSrujanaChalla 		ret = mv_cesa_dma_add_result_op(chain,
546655ff1a1SSrujanaChalla 						CESA_SA_CFG_SRAM_OFFSET,
547655ff1a1SSrujanaChalla 						CESA_SA_DATA_SRAM_OFFSET,
548655ff1a1SSrujanaChalla 						CESA_TDMA_SRC_IN_SRAM, flags);
549655ff1a1SSrujanaChalla 		if (ret)
550655ff1a1SSrujanaChalla 			return ERR_PTR(-ENOMEM);
551655ff1a1SSrujanaChalla 		return op;
552655ff1a1SSrujanaChalla 	}
553655ff1a1SSrujanaChalla 
554655ff1a1SSrujanaChalla 	/*
555655ff1a1SSrujanaChalla 	 * The request is longer than the engine can handle, or we have
556655ff1a1SSrujanaChalla 	 * no data outstanding. Manually generate the padding, adding it
557655ff1a1SSrujanaChalla 	 * as a "mid" fragment.
558655ff1a1SSrujanaChalla 	 */
559655ff1a1SSrujanaChalla 	ret = mv_cesa_ahash_dma_alloc_padding(ahashdreq, flags);
560655ff1a1SSrujanaChalla 	if (ret)
561655ff1a1SSrujanaChalla 		return ERR_PTR(ret);
562655ff1a1SSrujanaChalla 
563655ff1a1SSrujanaChalla 	trailerlen = mv_cesa_ahash_pad_req(creq, ahashdreq->padding);
564655ff1a1SSrujanaChalla 
565655ff1a1SSrujanaChalla 	len = min(CESA_SA_SRAM_PAYLOAD_SIZE - frag_len, trailerlen);
566655ff1a1SSrujanaChalla 	if (len) {
567655ff1a1SSrujanaChalla 		ret = mv_cesa_dma_add_data_transfer(chain,
568655ff1a1SSrujanaChalla 						CESA_SA_DATA_SRAM_OFFSET +
569655ff1a1SSrujanaChalla 						frag_len,
570655ff1a1SSrujanaChalla 						ahashdreq->padding_dma,
571655ff1a1SSrujanaChalla 						len, CESA_TDMA_DST_IN_SRAM,
572655ff1a1SSrujanaChalla 						flags);
573655ff1a1SSrujanaChalla 		if (ret)
574655ff1a1SSrujanaChalla 			return ERR_PTR(ret);
575655ff1a1SSrujanaChalla 
576655ff1a1SSrujanaChalla 		op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len + len,
577655ff1a1SSrujanaChalla 					  flags);
578655ff1a1SSrujanaChalla 		if (IS_ERR(op))
579655ff1a1SSrujanaChalla 			return op;
580655ff1a1SSrujanaChalla 
581655ff1a1SSrujanaChalla 		if (len == trailerlen)
582655ff1a1SSrujanaChalla 			return op;
583655ff1a1SSrujanaChalla 
584655ff1a1SSrujanaChalla 		padoff += len;
585655ff1a1SSrujanaChalla 	}
586655ff1a1SSrujanaChalla 
587655ff1a1SSrujanaChalla 	ret = mv_cesa_dma_add_data_transfer(chain,
588655ff1a1SSrujanaChalla 					    CESA_SA_DATA_SRAM_OFFSET,
589655ff1a1SSrujanaChalla 					    ahashdreq->padding_dma +
590655ff1a1SSrujanaChalla 					    padoff,
591655ff1a1SSrujanaChalla 					    trailerlen - padoff,
592655ff1a1SSrujanaChalla 					    CESA_TDMA_DST_IN_SRAM,
593655ff1a1SSrujanaChalla 					    flags);
594655ff1a1SSrujanaChalla 	if (ret)
595655ff1a1SSrujanaChalla 		return ERR_PTR(ret);
596655ff1a1SSrujanaChalla 
597655ff1a1SSrujanaChalla 	return mv_cesa_dma_add_frag(chain, &creq->op_tmpl, trailerlen - padoff,
598655ff1a1SSrujanaChalla 				    flags);
599655ff1a1SSrujanaChalla }
600655ff1a1SSrujanaChalla 
601655ff1a1SSrujanaChalla static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
602655ff1a1SSrujanaChalla {
603655ff1a1SSrujanaChalla 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
604655ff1a1SSrujanaChalla 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
605655ff1a1SSrujanaChalla 		      GFP_KERNEL : GFP_ATOMIC;
606655ff1a1SSrujanaChalla 	struct mv_cesa_req *basereq = &creq->base;
607655ff1a1SSrujanaChalla 	struct mv_cesa_ahash_dma_iter iter;
608655ff1a1SSrujanaChalla 	struct mv_cesa_op_ctx *op = NULL;
609655ff1a1SSrujanaChalla 	unsigned int frag_len;
610655ff1a1SSrujanaChalla 	bool set_state = false;
611655ff1a1SSrujanaChalla 	int ret;
612655ff1a1SSrujanaChalla 	u32 type;
613655ff1a1SSrujanaChalla 
614655ff1a1SSrujanaChalla 	basereq->chain.first = NULL;
615655ff1a1SSrujanaChalla 	basereq->chain.last = NULL;
616655ff1a1SSrujanaChalla 
617655ff1a1SSrujanaChalla 	if (!mv_cesa_mac_op_is_first_frag(&creq->op_tmpl))
618655ff1a1SSrujanaChalla 		set_state = true;
619655ff1a1SSrujanaChalla 
620655ff1a1SSrujanaChalla 	if (creq->src_nents) {
621655ff1a1SSrujanaChalla 		ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
622655ff1a1SSrujanaChalla 				 DMA_TO_DEVICE);
623655ff1a1SSrujanaChalla 		if (!ret) {
624655ff1a1SSrujanaChalla 			ret = -ENOMEM;
625655ff1a1SSrujanaChalla 			goto err;
626655ff1a1SSrujanaChalla 		}
627655ff1a1SSrujanaChalla 	}
628655ff1a1SSrujanaChalla 
629655ff1a1SSrujanaChalla 	mv_cesa_tdma_desc_iter_init(&basereq->chain);
630655ff1a1SSrujanaChalla 	mv_cesa_ahash_req_iter_init(&iter, req);
631655ff1a1SSrujanaChalla 
632655ff1a1SSrujanaChalla 	/*
633655ff1a1SSrujanaChalla 	 * Add the cache (left-over data from a previous block) first.
634655ff1a1SSrujanaChalla 	 * This will never overflow the SRAM size.
635655ff1a1SSrujanaChalla 	 */
636655ff1a1SSrujanaChalla 	ret = mv_cesa_ahash_dma_add_cache(&basereq->chain, creq, flags);
637655ff1a1SSrujanaChalla 	if (ret)
638655ff1a1SSrujanaChalla 		goto err_free_tdma;
639655ff1a1SSrujanaChalla 
640655ff1a1SSrujanaChalla 	if (iter.src.sg) {
641655ff1a1SSrujanaChalla 		/*
642655ff1a1SSrujanaChalla 		 * Add all the new data, inserting an operation block and
643655ff1a1SSrujanaChalla 		 * launch command between each full SRAM block-worth of
644655ff1a1SSrujanaChalla 		 * data. We intentionally do not add the final op block.
645655ff1a1SSrujanaChalla 		 */
646655ff1a1SSrujanaChalla 		while (true) {
647655ff1a1SSrujanaChalla 			ret = mv_cesa_dma_add_op_transfers(&basereq->chain,
648655ff1a1SSrujanaChalla 							   &iter.base,
649655ff1a1SSrujanaChalla 							   &iter.src, flags);
650655ff1a1SSrujanaChalla 			if (ret)
651655ff1a1SSrujanaChalla 				goto err_free_tdma;
652655ff1a1SSrujanaChalla 
653655ff1a1SSrujanaChalla 			frag_len = iter.base.op_len;
654655ff1a1SSrujanaChalla 
655655ff1a1SSrujanaChalla 			if (!mv_cesa_ahash_req_iter_next_op(&iter))
656655ff1a1SSrujanaChalla 				break;
657655ff1a1SSrujanaChalla 
658655ff1a1SSrujanaChalla 			op = mv_cesa_dma_add_frag(&basereq->chain,
659655ff1a1SSrujanaChalla 						  &creq->op_tmpl,
660655ff1a1SSrujanaChalla 						  frag_len, flags);
661655ff1a1SSrujanaChalla 			if (IS_ERR(op)) {
662655ff1a1SSrujanaChalla 				ret = PTR_ERR(op);
663655ff1a1SSrujanaChalla 				goto err_free_tdma;
664655ff1a1SSrujanaChalla 			}
665655ff1a1SSrujanaChalla 		}
666655ff1a1SSrujanaChalla 	} else {
667655ff1a1SSrujanaChalla 		/* Account for the data that was in the cache. */
668655ff1a1SSrujanaChalla 		frag_len = iter.base.op_len;
669655ff1a1SSrujanaChalla 	}
670655ff1a1SSrujanaChalla 
671655ff1a1SSrujanaChalla 	/*
672655ff1a1SSrujanaChalla 	 * At this point, frag_len indicates whether we have any data
673655ff1a1SSrujanaChalla 	 * outstanding which needs an operation.  Queue up the final
674655ff1a1SSrujanaChalla 	 * operation, which depends whether this is the final request.
675655ff1a1SSrujanaChalla 	 */
676655ff1a1SSrujanaChalla 	if (creq->last_req)
677655ff1a1SSrujanaChalla 		op = mv_cesa_ahash_dma_last_req(&basereq->chain, &iter, creq,
678655ff1a1SSrujanaChalla 						frag_len, flags);
679655ff1a1SSrujanaChalla 	else if (frag_len)
680655ff1a1SSrujanaChalla 		op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl,
681655ff1a1SSrujanaChalla 					  frag_len, flags);
682655ff1a1SSrujanaChalla 
683655ff1a1SSrujanaChalla 	if (IS_ERR(op)) {
684655ff1a1SSrujanaChalla 		ret = PTR_ERR(op);
685655ff1a1SSrujanaChalla 		goto err_free_tdma;
686655ff1a1SSrujanaChalla 	}
687655ff1a1SSrujanaChalla 
688655ff1a1SSrujanaChalla 	/*
689655ff1a1SSrujanaChalla 	 * If results are copied via DMA, this means that this
690655ff1a1SSrujanaChalla 	 * request can be directly processed by the engine,
691655ff1a1SSrujanaChalla 	 * without partial updates. So we can chain it at the
692655ff1a1SSrujanaChalla 	 * DMA level with other requests.
693655ff1a1SSrujanaChalla 	 */
694655ff1a1SSrujanaChalla 	type = basereq->chain.last->flags & CESA_TDMA_TYPE_MSK;
695655ff1a1SSrujanaChalla 
696655ff1a1SSrujanaChalla 	if (op && type != CESA_TDMA_RESULT) {
697655ff1a1SSrujanaChalla 		/* Add dummy desc to wait for crypto operation end */
698655ff1a1SSrujanaChalla 		ret = mv_cesa_dma_add_dummy_end(&basereq->chain, flags);
699655ff1a1SSrujanaChalla 		if (ret)
700655ff1a1SSrujanaChalla 			goto err_free_tdma;
701655ff1a1SSrujanaChalla 	}
702655ff1a1SSrujanaChalla 
703655ff1a1SSrujanaChalla 	if (!creq->last_req)
704655ff1a1SSrujanaChalla 		creq->cache_ptr = req->nbytes + creq->cache_ptr -
705655ff1a1SSrujanaChalla 				  iter.base.len;
706655ff1a1SSrujanaChalla 	else
707655ff1a1SSrujanaChalla 		creq->cache_ptr = 0;
708655ff1a1SSrujanaChalla 
709655ff1a1SSrujanaChalla 	basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
710655ff1a1SSrujanaChalla 
711655ff1a1SSrujanaChalla 	if (type != CESA_TDMA_RESULT)
712655ff1a1SSrujanaChalla 		basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN;
713655ff1a1SSrujanaChalla 
714655ff1a1SSrujanaChalla 	if (set_state) {
715655ff1a1SSrujanaChalla 		/*
716655ff1a1SSrujanaChalla 		 * Put the CESA_TDMA_SET_STATE flag on the first tdma desc to
717655ff1a1SSrujanaChalla 		 * let the step logic know that the IVDIG registers should be
718655ff1a1SSrujanaChalla 		 * explicitly set before launching a TDMA chain.
719655ff1a1SSrujanaChalla 		 */
720655ff1a1SSrujanaChalla 		basereq->chain.first->flags |= CESA_TDMA_SET_STATE;
721655ff1a1SSrujanaChalla 	}
722655ff1a1SSrujanaChalla 
723655ff1a1SSrujanaChalla 	return 0;
724655ff1a1SSrujanaChalla 
725655ff1a1SSrujanaChalla err_free_tdma:
726655ff1a1SSrujanaChalla 	mv_cesa_dma_cleanup(basereq);
727655ff1a1SSrujanaChalla 	dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
728655ff1a1SSrujanaChalla 
729655ff1a1SSrujanaChalla err:
730655ff1a1SSrujanaChalla 	mv_cesa_ahash_last_cleanup(req);
731655ff1a1SSrujanaChalla 
732655ff1a1SSrujanaChalla 	return ret;
733655ff1a1SSrujanaChalla }
734655ff1a1SSrujanaChalla 
735655ff1a1SSrujanaChalla static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached)
736655ff1a1SSrujanaChalla {
737655ff1a1SSrujanaChalla 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
738655ff1a1SSrujanaChalla 
739655ff1a1SSrujanaChalla 	creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
740655ff1a1SSrujanaChalla 	if (creq->src_nents < 0) {
741655ff1a1SSrujanaChalla 		dev_err(cesa_dev->dev, "Invalid number of src SG");
742655ff1a1SSrujanaChalla 		return creq->src_nents;
743655ff1a1SSrujanaChalla 	}
744655ff1a1SSrujanaChalla 
745655ff1a1SSrujanaChalla 	*cached = mv_cesa_ahash_cache_req(req);
746655ff1a1SSrujanaChalla 
747655ff1a1SSrujanaChalla 	if (*cached)
748655ff1a1SSrujanaChalla 		return 0;
749655ff1a1SSrujanaChalla 
750655ff1a1SSrujanaChalla 	if (cesa_dev->caps->has_tdma)
751655ff1a1SSrujanaChalla 		return mv_cesa_ahash_dma_req_init(req);
752655ff1a1SSrujanaChalla 	else
753655ff1a1SSrujanaChalla 		return 0;
754655ff1a1SSrujanaChalla }
755655ff1a1SSrujanaChalla 
756655ff1a1SSrujanaChalla static int mv_cesa_ahash_queue_req(struct ahash_request *req)
757655ff1a1SSrujanaChalla {
758655ff1a1SSrujanaChalla 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
759655ff1a1SSrujanaChalla 	struct mv_cesa_engine *engine;
760655ff1a1SSrujanaChalla 	bool cached = false;
761655ff1a1SSrujanaChalla 	int ret;
762655ff1a1SSrujanaChalla 
763655ff1a1SSrujanaChalla 	ret = mv_cesa_ahash_req_init(req, &cached);
764655ff1a1SSrujanaChalla 	if (ret)
765655ff1a1SSrujanaChalla 		return ret;
766655ff1a1SSrujanaChalla 
767655ff1a1SSrujanaChalla 	if (cached)
768655ff1a1SSrujanaChalla 		return 0;
769655ff1a1SSrujanaChalla 
770655ff1a1SSrujanaChalla 	engine = mv_cesa_select_engine(req->nbytes);
771655ff1a1SSrujanaChalla 	mv_cesa_ahash_prepare(&req->base, engine);
772655ff1a1SSrujanaChalla 
773655ff1a1SSrujanaChalla 	ret = mv_cesa_queue_req(&req->base, &creq->base);
774655ff1a1SSrujanaChalla 
775655ff1a1SSrujanaChalla 	if (mv_cesa_req_needs_cleanup(&req->base, ret))
776655ff1a1SSrujanaChalla 		mv_cesa_ahash_cleanup(req);
777655ff1a1SSrujanaChalla 
778655ff1a1SSrujanaChalla 	return ret;
779655ff1a1SSrujanaChalla }
780655ff1a1SSrujanaChalla 
781655ff1a1SSrujanaChalla static int mv_cesa_ahash_update(struct ahash_request *req)
782655ff1a1SSrujanaChalla {
783655ff1a1SSrujanaChalla 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
784655ff1a1SSrujanaChalla 
785655ff1a1SSrujanaChalla 	creq->len += req->nbytes;
786655ff1a1SSrujanaChalla 
787655ff1a1SSrujanaChalla 	return mv_cesa_ahash_queue_req(req);
788655ff1a1SSrujanaChalla }
789655ff1a1SSrujanaChalla 
790655ff1a1SSrujanaChalla static int mv_cesa_ahash_final(struct ahash_request *req)
791655ff1a1SSrujanaChalla {
792655ff1a1SSrujanaChalla 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
793655ff1a1SSrujanaChalla 	struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
794655ff1a1SSrujanaChalla 
795655ff1a1SSrujanaChalla 	mv_cesa_set_mac_op_total_len(tmpl, creq->len);
796655ff1a1SSrujanaChalla 	creq->last_req = true;
797655ff1a1SSrujanaChalla 	req->nbytes = 0;
798655ff1a1SSrujanaChalla 
799655ff1a1SSrujanaChalla 	return mv_cesa_ahash_queue_req(req);
800655ff1a1SSrujanaChalla }
801655ff1a1SSrujanaChalla 
802655ff1a1SSrujanaChalla static int mv_cesa_ahash_finup(struct ahash_request *req)
803655ff1a1SSrujanaChalla {
804655ff1a1SSrujanaChalla 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
805655ff1a1SSrujanaChalla 	struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
806655ff1a1SSrujanaChalla 
807655ff1a1SSrujanaChalla 	creq->len += req->nbytes;
808655ff1a1SSrujanaChalla 	mv_cesa_set_mac_op_total_len(tmpl, creq->len);
809655ff1a1SSrujanaChalla 	creq->last_req = true;
810655ff1a1SSrujanaChalla 
811655ff1a1SSrujanaChalla 	return mv_cesa_ahash_queue_req(req);
812655ff1a1SSrujanaChalla }
813655ff1a1SSrujanaChalla 
814655ff1a1SSrujanaChalla static int mv_cesa_ahash_export(struct ahash_request *req, void *hash,
815655ff1a1SSrujanaChalla 				u64 *len, void *cache)
816655ff1a1SSrujanaChalla {
817655ff1a1SSrujanaChalla 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
818655ff1a1SSrujanaChalla 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
819655ff1a1SSrujanaChalla 	unsigned int digsize = crypto_ahash_digestsize(ahash);
820655ff1a1SSrujanaChalla 	unsigned int blocksize;
821655ff1a1SSrujanaChalla 
822655ff1a1SSrujanaChalla 	blocksize = crypto_ahash_blocksize(ahash);
823655ff1a1SSrujanaChalla 
824655ff1a1SSrujanaChalla 	*len = creq->len;
825655ff1a1SSrujanaChalla 	memcpy(hash, creq->state, digsize);
826655ff1a1SSrujanaChalla 	memset(cache, 0, blocksize);
827655ff1a1SSrujanaChalla 	memcpy(cache, creq->cache, creq->cache_ptr);
828655ff1a1SSrujanaChalla 
829655ff1a1SSrujanaChalla 	return 0;
830655ff1a1SSrujanaChalla }
831655ff1a1SSrujanaChalla 
832655ff1a1SSrujanaChalla static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash,
833655ff1a1SSrujanaChalla 				u64 len, const void *cache)
834655ff1a1SSrujanaChalla {
835655ff1a1SSrujanaChalla 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
836655ff1a1SSrujanaChalla 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
837655ff1a1SSrujanaChalla 	unsigned int digsize = crypto_ahash_digestsize(ahash);
838655ff1a1SSrujanaChalla 	unsigned int blocksize;
839655ff1a1SSrujanaChalla 	unsigned int cache_ptr;
840655ff1a1SSrujanaChalla 	int ret;
841655ff1a1SSrujanaChalla 
842655ff1a1SSrujanaChalla 	ret = crypto_ahash_init(req);
843655ff1a1SSrujanaChalla 	if (ret)
844655ff1a1SSrujanaChalla 		return ret;
845655ff1a1SSrujanaChalla 
846655ff1a1SSrujanaChalla 	blocksize = crypto_ahash_blocksize(ahash);
847655ff1a1SSrujanaChalla 	if (len >= blocksize)
848655ff1a1SSrujanaChalla 		mv_cesa_update_op_cfg(&creq->op_tmpl,
849655ff1a1SSrujanaChalla 				      CESA_SA_DESC_CFG_MID_FRAG,
850655ff1a1SSrujanaChalla 				      CESA_SA_DESC_CFG_FRAG_MSK);
851655ff1a1SSrujanaChalla 
852655ff1a1SSrujanaChalla 	creq->len = len;
853655ff1a1SSrujanaChalla 	memcpy(creq->state, hash, digsize);
854655ff1a1SSrujanaChalla 	creq->cache_ptr = 0;
855655ff1a1SSrujanaChalla 
856655ff1a1SSrujanaChalla 	cache_ptr = do_div(len, blocksize);
857655ff1a1SSrujanaChalla 	if (!cache_ptr)
858655ff1a1SSrujanaChalla 		return 0;
859655ff1a1SSrujanaChalla 
860655ff1a1SSrujanaChalla 	memcpy(creq->cache, cache, cache_ptr);
861655ff1a1SSrujanaChalla 	creq->cache_ptr = cache_ptr;
862655ff1a1SSrujanaChalla 
863655ff1a1SSrujanaChalla 	return 0;
864655ff1a1SSrujanaChalla }
865655ff1a1SSrujanaChalla 
866655ff1a1SSrujanaChalla static int mv_cesa_md5_init(struct ahash_request *req)
867655ff1a1SSrujanaChalla {
868655ff1a1SSrujanaChalla 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
869655ff1a1SSrujanaChalla 	struct mv_cesa_op_ctx tmpl = { };
870655ff1a1SSrujanaChalla 
871655ff1a1SSrujanaChalla 	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5);
872655ff1a1SSrujanaChalla 
873655ff1a1SSrujanaChalla 	mv_cesa_ahash_init(req, &tmpl, true);
874655ff1a1SSrujanaChalla 
875655ff1a1SSrujanaChalla 	creq->state[0] = MD5_H0;
876655ff1a1SSrujanaChalla 	creq->state[1] = MD5_H1;
877655ff1a1SSrujanaChalla 	creq->state[2] = MD5_H2;
878655ff1a1SSrujanaChalla 	creq->state[3] = MD5_H3;
879655ff1a1SSrujanaChalla 
880655ff1a1SSrujanaChalla 	return 0;
881655ff1a1SSrujanaChalla }
882655ff1a1SSrujanaChalla 
883655ff1a1SSrujanaChalla static int mv_cesa_md5_export(struct ahash_request *req, void *out)
884655ff1a1SSrujanaChalla {
885655ff1a1SSrujanaChalla 	struct md5_state *out_state = out;
886655ff1a1SSrujanaChalla 
887655ff1a1SSrujanaChalla 	return mv_cesa_ahash_export(req, out_state->hash,
888655ff1a1SSrujanaChalla 				    &out_state->byte_count, out_state->block);
889655ff1a1SSrujanaChalla }
890655ff1a1SSrujanaChalla 
891655ff1a1SSrujanaChalla static int mv_cesa_md5_import(struct ahash_request *req, const void *in)
892655ff1a1SSrujanaChalla {
893655ff1a1SSrujanaChalla 	const struct md5_state *in_state = in;
894655ff1a1SSrujanaChalla 
895655ff1a1SSrujanaChalla 	return mv_cesa_ahash_import(req, in_state->hash, in_state->byte_count,
896655ff1a1SSrujanaChalla 				    in_state->block);
897655ff1a1SSrujanaChalla }
898655ff1a1SSrujanaChalla 
899655ff1a1SSrujanaChalla static int mv_cesa_md5_digest(struct ahash_request *req)
900655ff1a1SSrujanaChalla {
901655ff1a1SSrujanaChalla 	int ret;
902655ff1a1SSrujanaChalla 
903655ff1a1SSrujanaChalla 	ret = mv_cesa_md5_init(req);
904655ff1a1SSrujanaChalla 	if (ret)
905655ff1a1SSrujanaChalla 		return ret;
906655ff1a1SSrujanaChalla 
907655ff1a1SSrujanaChalla 	return mv_cesa_ahash_finup(req);
908655ff1a1SSrujanaChalla }
909655ff1a1SSrujanaChalla 
910655ff1a1SSrujanaChalla struct ahash_alg mv_md5_alg = {
911655ff1a1SSrujanaChalla 	.init = mv_cesa_md5_init,
912655ff1a1SSrujanaChalla 	.update = mv_cesa_ahash_update,
913655ff1a1SSrujanaChalla 	.final = mv_cesa_ahash_final,
914655ff1a1SSrujanaChalla 	.finup = mv_cesa_ahash_finup,
915655ff1a1SSrujanaChalla 	.digest = mv_cesa_md5_digest,
916655ff1a1SSrujanaChalla 	.export = mv_cesa_md5_export,
917655ff1a1SSrujanaChalla 	.import = mv_cesa_md5_import,
918655ff1a1SSrujanaChalla 	.halg = {
919655ff1a1SSrujanaChalla 		.digestsize = MD5_DIGEST_SIZE,
920655ff1a1SSrujanaChalla 		.statesize = sizeof(struct md5_state),
921655ff1a1SSrujanaChalla 		.base = {
922655ff1a1SSrujanaChalla 			.cra_name = "md5",
923655ff1a1SSrujanaChalla 			.cra_driver_name = "mv-md5",
924655ff1a1SSrujanaChalla 			.cra_priority = 300,
925655ff1a1SSrujanaChalla 			.cra_flags = CRYPTO_ALG_ASYNC |
926b8aa7dc5SMikulas Patocka 				     CRYPTO_ALG_ALLOCATES_MEMORY |
927655ff1a1SSrujanaChalla 				     CRYPTO_ALG_KERN_DRIVER_ONLY,
928655ff1a1SSrujanaChalla 			.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
929655ff1a1SSrujanaChalla 			.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
930655ff1a1SSrujanaChalla 			.cra_init = mv_cesa_ahash_cra_init,
931655ff1a1SSrujanaChalla 			.cra_module = THIS_MODULE,
932655ff1a1SSrujanaChalla 		}
933655ff1a1SSrujanaChalla 	}
934655ff1a1SSrujanaChalla };
935655ff1a1SSrujanaChalla 
936655ff1a1SSrujanaChalla static int mv_cesa_sha1_init(struct ahash_request *req)
937655ff1a1SSrujanaChalla {
938655ff1a1SSrujanaChalla 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
939655ff1a1SSrujanaChalla 	struct mv_cesa_op_ctx tmpl = { };
940655ff1a1SSrujanaChalla 
941655ff1a1SSrujanaChalla 	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1);
942655ff1a1SSrujanaChalla 
943655ff1a1SSrujanaChalla 	mv_cesa_ahash_init(req, &tmpl, false);
944655ff1a1SSrujanaChalla 
945655ff1a1SSrujanaChalla 	creq->state[0] = SHA1_H0;
946655ff1a1SSrujanaChalla 	creq->state[1] = SHA1_H1;
947655ff1a1SSrujanaChalla 	creq->state[2] = SHA1_H2;
948655ff1a1SSrujanaChalla 	creq->state[3] = SHA1_H3;
949655ff1a1SSrujanaChalla 	creq->state[4] = SHA1_H4;
950655ff1a1SSrujanaChalla 
951655ff1a1SSrujanaChalla 	return 0;
952655ff1a1SSrujanaChalla }
953655ff1a1SSrujanaChalla 
954655ff1a1SSrujanaChalla static int mv_cesa_sha1_export(struct ahash_request *req, void *out)
955655ff1a1SSrujanaChalla {
956655ff1a1SSrujanaChalla 	struct sha1_state *out_state = out;
957655ff1a1SSrujanaChalla 
958655ff1a1SSrujanaChalla 	return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
959655ff1a1SSrujanaChalla 				    out_state->buffer);
960655ff1a1SSrujanaChalla }
961655ff1a1SSrujanaChalla 
962655ff1a1SSrujanaChalla static int mv_cesa_sha1_import(struct ahash_request *req, const void *in)
963655ff1a1SSrujanaChalla {
964655ff1a1SSrujanaChalla 	const struct sha1_state *in_state = in;
965655ff1a1SSrujanaChalla 
966655ff1a1SSrujanaChalla 	return mv_cesa_ahash_import(req, in_state->state, in_state->count,
967655ff1a1SSrujanaChalla 				    in_state->buffer);
968655ff1a1SSrujanaChalla }
969655ff1a1SSrujanaChalla 
970655ff1a1SSrujanaChalla static int mv_cesa_sha1_digest(struct ahash_request *req)
971655ff1a1SSrujanaChalla {
972655ff1a1SSrujanaChalla 	int ret;
973655ff1a1SSrujanaChalla 
974655ff1a1SSrujanaChalla 	ret = mv_cesa_sha1_init(req);
975655ff1a1SSrujanaChalla 	if (ret)
976655ff1a1SSrujanaChalla 		return ret;
977655ff1a1SSrujanaChalla 
978655ff1a1SSrujanaChalla 	return mv_cesa_ahash_finup(req);
979655ff1a1SSrujanaChalla }
980655ff1a1SSrujanaChalla 
981655ff1a1SSrujanaChalla struct ahash_alg mv_sha1_alg = {
982655ff1a1SSrujanaChalla 	.init = mv_cesa_sha1_init,
983655ff1a1SSrujanaChalla 	.update = mv_cesa_ahash_update,
984655ff1a1SSrujanaChalla 	.final = mv_cesa_ahash_final,
985655ff1a1SSrujanaChalla 	.finup = mv_cesa_ahash_finup,
986655ff1a1SSrujanaChalla 	.digest = mv_cesa_sha1_digest,
987655ff1a1SSrujanaChalla 	.export = mv_cesa_sha1_export,
988655ff1a1SSrujanaChalla 	.import = mv_cesa_sha1_import,
989655ff1a1SSrujanaChalla 	.halg = {
990655ff1a1SSrujanaChalla 		.digestsize = SHA1_DIGEST_SIZE,
991655ff1a1SSrujanaChalla 		.statesize = sizeof(struct sha1_state),
992655ff1a1SSrujanaChalla 		.base = {
993655ff1a1SSrujanaChalla 			.cra_name = "sha1",
994655ff1a1SSrujanaChalla 			.cra_driver_name = "mv-sha1",
995655ff1a1SSrujanaChalla 			.cra_priority = 300,
996655ff1a1SSrujanaChalla 			.cra_flags = CRYPTO_ALG_ASYNC |
997b8aa7dc5SMikulas Patocka 				     CRYPTO_ALG_ALLOCATES_MEMORY |
998655ff1a1SSrujanaChalla 				     CRYPTO_ALG_KERN_DRIVER_ONLY,
999655ff1a1SSrujanaChalla 			.cra_blocksize = SHA1_BLOCK_SIZE,
1000655ff1a1SSrujanaChalla 			.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
1001655ff1a1SSrujanaChalla 			.cra_init = mv_cesa_ahash_cra_init,
1002655ff1a1SSrujanaChalla 			.cra_module = THIS_MODULE,
1003655ff1a1SSrujanaChalla 		}
1004655ff1a1SSrujanaChalla 	}
1005655ff1a1SSrujanaChalla };
1006655ff1a1SSrujanaChalla 
1007655ff1a1SSrujanaChalla static int mv_cesa_sha256_init(struct ahash_request *req)
1008655ff1a1SSrujanaChalla {
1009655ff1a1SSrujanaChalla 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
1010655ff1a1SSrujanaChalla 	struct mv_cesa_op_ctx tmpl = { };
1011655ff1a1SSrujanaChalla 
1012655ff1a1SSrujanaChalla 	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256);
1013655ff1a1SSrujanaChalla 
1014655ff1a1SSrujanaChalla 	mv_cesa_ahash_init(req, &tmpl, false);
1015655ff1a1SSrujanaChalla 
1016655ff1a1SSrujanaChalla 	creq->state[0] = SHA256_H0;
1017655ff1a1SSrujanaChalla 	creq->state[1] = SHA256_H1;
1018655ff1a1SSrujanaChalla 	creq->state[2] = SHA256_H2;
1019655ff1a1SSrujanaChalla 	creq->state[3] = SHA256_H3;
1020655ff1a1SSrujanaChalla 	creq->state[4] = SHA256_H4;
1021655ff1a1SSrujanaChalla 	creq->state[5] = SHA256_H5;
1022655ff1a1SSrujanaChalla 	creq->state[6] = SHA256_H6;
1023655ff1a1SSrujanaChalla 	creq->state[7] = SHA256_H7;
1024655ff1a1SSrujanaChalla 
1025655ff1a1SSrujanaChalla 	return 0;
1026655ff1a1SSrujanaChalla }
1027655ff1a1SSrujanaChalla 
1028655ff1a1SSrujanaChalla static int mv_cesa_sha256_digest(struct ahash_request *req)
1029655ff1a1SSrujanaChalla {
1030655ff1a1SSrujanaChalla 	int ret;
1031655ff1a1SSrujanaChalla 
1032655ff1a1SSrujanaChalla 	ret = mv_cesa_sha256_init(req);
1033655ff1a1SSrujanaChalla 	if (ret)
1034655ff1a1SSrujanaChalla 		return ret;
1035655ff1a1SSrujanaChalla 
1036655ff1a1SSrujanaChalla 	return mv_cesa_ahash_finup(req);
1037655ff1a1SSrujanaChalla }
1038655ff1a1SSrujanaChalla 
1039655ff1a1SSrujanaChalla static int mv_cesa_sha256_export(struct ahash_request *req, void *out)
1040655ff1a1SSrujanaChalla {
1041655ff1a1SSrujanaChalla 	struct sha256_state *out_state = out;
1042655ff1a1SSrujanaChalla 
1043655ff1a1SSrujanaChalla 	return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
1044655ff1a1SSrujanaChalla 				    out_state->buf);
1045655ff1a1SSrujanaChalla }
1046655ff1a1SSrujanaChalla 
1047655ff1a1SSrujanaChalla static int mv_cesa_sha256_import(struct ahash_request *req, const void *in)
1048655ff1a1SSrujanaChalla {
1049655ff1a1SSrujanaChalla 	const struct sha256_state *in_state = in;
1050655ff1a1SSrujanaChalla 
1051655ff1a1SSrujanaChalla 	return mv_cesa_ahash_import(req, in_state->state, in_state->count,
1052655ff1a1SSrujanaChalla 				    in_state->buf);
1053655ff1a1SSrujanaChalla }
1054655ff1a1SSrujanaChalla 
1055655ff1a1SSrujanaChalla struct ahash_alg mv_sha256_alg = {
1056655ff1a1SSrujanaChalla 	.init = mv_cesa_sha256_init,
1057655ff1a1SSrujanaChalla 	.update = mv_cesa_ahash_update,
1058655ff1a1SSrujanaChalla 	.final = mv_cesa_ahash_final,
1059655ff1a1SSrujanaChalla 	.finup = mv_cesa_ahash_finup,
1060655ff1a1SSrujanaChalla 	.digest = mv_cesa_sha256_digest,
1061655ff1a1SSrujanaChalla 	.export = mv_cesa_sha256_export,
1062655ff1a1SSrujanaChalla 	.import = mv_cesa_sha256_import,
1063655ff1a1SSrujanaChalla 	.halg = {
1064655ff1a1SSrujanaChalla 		.digestsize = SHA256_DIGEST_SIZE,
1065655ff1a1SSrujanaChalla 		.statesize = sizeof(struct sha256_state),
1066655ff1a1SSrujanaChalla 		.base = {
1067655ff1a1SSrujanaChalla 			.cra_name = "sha256",
1068655ff1a1SSrujanaChalla 			.cra_driver_name = "mv-sha256",
1069655ff1a1SSrujanaChalla 			.cra_priority = 300,
1070655ff1a1SSrujanaChalla 			.cra_flags = CRYPTO_ALG_ASYNC |
1071b8aa7dc5SMikulas Patocka 				     CRYPTO_ALG_ALLOCATES_MEMORY |
1072655ff1a1SSrujanaChalla 				     CRYPTO_ALG_KERN_DRIVER_ONLY,
1073655ff1a1SSrujanaChalla 			.cra_blocksize = SHA256_BLOCK_SIZE,
1074655ff1a1SSrujanaChalla 			.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
1075655ff1a1SSrujanaChalla 			.cra_init = mv_cesa_ahash_cra_init,
1076655ff1a1SSrujanaChalla 			.cra_module = THIS_MODULE,
1077655ff1a1SSrujanaChalla 		}
1078655ff1a1SSrujanaChalla 	}
1079655ff1a1SSrujanaChalla };
1080655ff1a1SSrujanaChalla 
1081655ff1a1SSrujanaChalla struct mv_cesa_ahash_result {
1082655ff1a1SSrujanaChalla 	struct completion completion;
1083655ff1a1SSrujanaChalla 	int error;
1084655ff1a1SSrujanaChalla };
1085655ff1a1SSrujanaChalla 
1086655ff1a1SSrujanaChalla static void mv_cesa_hmac_ahash_complete(struct crypto_async_request *req,
1087655ff1a1SSrujanaChalla 					int error)
1088655ff1a1SSrujanaChalla {
1089655ff1a1SSrujanaChalla 	struct mv_cesa_ahash_result *result = req->data;
1090655ff1a1SSrujanaChalla 
1091655ff1a1SSrujanaChalla 	if (error == -EINPROGRESS)
1092655ff1a1SSrujanaChalla 		return;
1093655ff1a1SSrujanaChalla 
1094655ff1a1SSrujanaChalla 	result->error = error;
1095655ff1a1SSrujanaChalla 	complete(&result->completion);
1096655ff1a1SSrujanaChalla }
1097655ff1a1SSrujanaChalla 
1098655ff1a1SSrujanaChalla static int mv_cesa_ahmac_iv_state_init(struct ahash_request *req, u8 *pad,
1099655ff1a1SSrujanaChalla 				       void *state, unsigned int blocksize)
1100655ff1a1SSrujanaChalla {
1101655ff1a1SSrujanaChalla 	struct mv_cesa_ahash_result result;
1102655ff1a1SSrujanaChalla 	struct scatterlist sg;
1103655ff1a1SSrujanaChalla 	int ret;
1104655ff1a1SSrujanaChalla 
1105655ff1a1SSrujanaChalla 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1106655ff1a1SSrujanaChalla 				   mv_cesa_hmac_ahash_complete, &result);
1107655ff1a1SSrujanaChalla 	sg_init_one(&sg, pad, blocksize);
1108655ff1a1SSrujanaChalla 	ahash_request_set_crypt(req, &sg, pad, blocksize);
1109655ff1a1SSrujanaChalla 	init_completion(&result.completion);
1110655ff1a1SSrujanaChalla 
1111655ff1a1SSrujanaChalla 	ret = crypto_ahash_init(req);
1112655ff1a1SSrujanaChalla 	if (ret)
1113655ff1a1SSrujanaChalla 		return ret;
1114655ff1a1SSrujanaChalla 
1115655ff1a1SSrujanaChalla 	ret = crypto_ahash_update(req);
1116655ff1a1SSrujanaChalla 	if (ret && ret != -EINPROGRESS)
1117655ff1a1SSrujanaChalla 		return ret;
1118655ff1a1SSrujanaChalla 
1119655ff1a1SSrujanaChalla 	wait_for_completion_interruptible(&result.completion);
1120655ff1a1SSrujanaChalla 	if (result.error)
1121655ff1a1SSrujanaChalla 		return result.error;
1122655ff1a1SSrujanaChalla 
1123655ff1a1SSrujanaChalla 	ret = crypto_ahash_export(req, state);
1124655ff1a1SSrujanaChalla 	if (ret)
1125655ff1a1SSrujanaChalla 		return ret;
1126655ff1a1SSrujanaChalla 
1127655ff1a1SSrujanaChalla 	return 0;
1128655ff1a1SSrujanaChalla }
1129655ff1a1SSrujanaChalla 
1130655ff1a1SSrujanaChalla static int mv_cesa_ahmac_pad_init(struct ahash_request *req,
1131655ff1a1SSrujanaChalla 				  const u8 *key, unsigned int keylen,
1132655ff1a1SSrujanaChalla 				  u8 *ipad, u8 *opad,
1133655ff1a1SSrujanaChalla 				  unsigned int blocksize)
1134655ff1a1SSrujanaChalla {
1135655ff1a1SSrujanaChalla 	struct mv_cesa_ahash_result result;
1136655ff1a1SSrujanaChalla 	struct scatterlist sg;
1137655ff1a1SSrujanaChalla 	int ret;
1138655ff1a1SSrujanaChalla 	int i;
1139655ff1a1SSrujanaChalla 
1140655ff1a1SSrujanaChalla 	if (keylen <= blocksize) {
1141655ff1a1SSrujanaChalla 		memcpy(ipad, key, keylen);
1142655ff1a1SSrujanaChalla 	} else {
1143655ff1a1SSrujanaChalla 		u8 *keydup = kmemdup(key, keylen, GFP_KERNEL);
1144655ff1a1SSrujanaChalla 
1145655ff1a1SSrujanaChalla 		if (!keydup)
1146655ff1a1SSrujanaChalla 			return -ENOMEM;
1147655ff1a1SSrujanaChalla 
1148655ff1a1SSrujanaChalla 		ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1149655ff1a1SSrujanaChalla 					   mv_cesa_hmac_ahash_complete,
1150655ff1a1SSrujanaChalla 					   &result);
1151655ff1a1SSrujanaChalla 		sg_init_one(&sg, keydup, keylen);
1152655ff1a1SSrujanaChalla 		ahash_request_set_crypt(req, &sg, ipad, keylen);
1153655ff1a1SSrujanaChalla 		init_completion(&result.completion);
1154655ff1a1SSrujanaChalla 
1155655ff1a1SSrujanaChalla 		ret = crypto_ahash_digest(req);
1156655ff1a1SSrujanaChalla 		if (ret == -EINPROGRESS) {
1157655ff1a1SSrujanaChalla 			wait_for_completion_interruptible(&result.completion);
1158655ff1a1SSrujanaChalla 			ret = result.error;
1159655ff1a1SSrujanaChalla 		}
1160655ff1a1SSrujanaChalla 
1161655ff1a1SSrujanaChalla 		/* Set the memory region to 0 to avoid any leak. */
1162453431a5SWaiman Long 		kfree_sensitive(keydup);
1163655ff1a1SSrujanaChalla 
1164655ff1a1SSrujanaChalla 		if (ret)
1165655ff1a1SSrujanaChalla 			return ret;
1166655ff1a1SSrujanaChalla 
1167655ff1a1SSrujanaChalla 		keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
1168655ff1a1SSrujanaChalla 	}
1169655ff1a1SSrujanaChalla 
1170655ff1a1SSrujanaChalla 	memset(ipad + keylen, 0, blocksize - keylen);
1171655ff1a1SSrujanaChalla 	memcpy(opad, ipad, blocksize);
1172655ff1a1SSrujanaChalla 
1173655ff1a1SSrujanaChalla 	for (i = 0; i < blocksize; i++) {
1174655ff1a1SSrujanaChalla 		ipad[i] ^= HMAC_IPAD_VALUE;
1175655ff1a1SSrujanaChalla 		opad[i] ^= HMAC_OPAD_VALUE;
1176655ff1a1SSrujanaChalla 	}
1177655ff1a1SSrujanaChalla 
1178655ff1a1SSrujanaChalla 	return 0;
1179655ff1a1SSrujanaChalla }
1180655ff1a1SSrujanaChalla 
1181655ff1a1SSrujanaChalla static int mv_cesa_ahmac_setkey(const char *hash_alg_name,
1182655ff1a1SSrujanaChalla 				const u8 *key, unsigned int keylen,
1183655ff1a1SSrujanaChalla 				void *istate, void *ostate)
1184655ff1a1SSrujanaChalla {
1185655ff1a1SSrujanaChalla 	struct ahash_request *req;
1186655ff1a1SSrujanaChalla 	struct crypto_ahash *tfm;
1187655ff1a1SSrujanaChalla 	unsigned int blocksize;
1188655ff1a1SSrujanaChalla 	u8 *ipad = NULL;
1189655ff1a1SSrujanaChalla 	u8 *opad;
1190655ff1a1SSrujanaChalla 	int ret;
1191655ff1a1SSrujanaChalla 
1192655ff1a1SSrujanaChalla 	tfm = crypto_alloc_ahash(hash_alg_name, 0, 0);
1193655ff1a1SSrujanaChalla 	if (IS_ERR(tfm))
1194655ff1a1SSrujanaChalla 		return PTR_ERR(tfm);
1195655ff1a1SSrujanaChalla 
1196655ff1a1SSrujanaChalla 	req = ahash_request_alloc(tfm, GFP_KERNEL);
1197655ff1a1SSrujanaChalla 	if (!req) {
1198655ff1a1SSrujanaChalla 		ret = -ENOMEM;
1199655ff1a1SSrujanaChalla 		goto free_ahash;
1200655ff1a1SSrujanaChalla 	}
1201655ff1a1SSrujanaChalla 
1202655ff1a1SSrujanaChalla 	crypto_ahash_clear_flags(tfm, ~0);
1203655ff1a1SSrujanaChalla 
1204655ff1a1SSrujanaChalla 	blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1205655ff1a1SSrujanaChalla 
1206655ff1a1SSrujanaChalla 	ipad = kcalloc(2, blocksize, GFP_KERNEL);
1207655ff1a1SSrujanaChalla 	if (!ipad) {
1208655ff1a1SSrujanaChalla 		ret = -ENOMEM;
1209655ff1a1SSrujanaChalla 		goto free_req;
1210655ff1a1SSrujanaChalla 	}
1211655ff1a1SSrujanaChalla 
1212655ff1a1SSrujanaChalla 	opad = ipad + blocksize;
1213655ff1a1SSrujanaChalla 
1214655ff1a1SSrujanaChalla 	ret = mv_cesa_ahmac_pad_init(req, key, keylen, ipad, opad, blocksize);
1215655ff1a1SSrujanaChalla 	if (ret)
1216655ff1a1SSrujanaChalla 		goto free_ipad;
1217655ff1a1SSrujanaChalla 
1218655ff1a1SSrujanaChalla 	ret = mv_cesa_ahmac_iv_state_init(req, ipad, istate, blocksize);
1219655ff1a1SSrujanaChalla 	if (ret)
1220655ff1a1SSrujanaChalla 		goto free_ipad;
1221655ff1a1SSrujanaChalla 
1222655ff1a1SSrujanaChalla 	ret = mv_cesa_ahmac_iv_state_init(req, opad, ostate, blocksize);
1223655ff1a1SSrujanaChalla 
1224655ff1a1SSrujanaChalla free_ipad:
1225655ff1a1SSrujanaChalla 	kfree(ipad);
1226655ff1a1SSrujanaChalla free_req:
1227655ff1a1SSrujanaChalla 	ahash_request_free(req);
1228655ff1a1SSrujanaChalla free_ahash:
1229655ff1a1SSrujanaChalla 	crypto_free_ahash(tfm);
1230655ff1a1SSrujanaChalla 
1231655ff1a1SSrujanaChalla 	return ret;
1232655ff1a1SSrujanaChalla }
1233655ff1a1SSrujanaChalla 
1234655ff1a1SSrujanaChalla static int mv_cesa_ahmac_cra_init(struct crypto_tfm *tfm)
1235655ff1a1SSrujanaChalla {
1236655ff1a1SSrujanaChalla 	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(tfm);
1237655ff1a1SSrujanaChalla 
1238655ff1a1SSrujanaChalla 	ctx->base.ops = &mv_cesa_ahash_req_ops;
1239655ff1a1SSrujanaChalla 
1240655ff1a1SSrujanaChalla 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1241655ff1a1SSrujanaChalla 				 sizeof(struct mv_cesa_ahash_req));
1242655ff1a1SSrujanaChalla 	return 0;
1243655ff1a1SSrujanaChalla }
1244655ff1a1SSrujanaChalla 
1245655ff1a1SSrujanaChalla static int mv_cesa_ahmac_md5_init(struct ahash_request *req)
1246655ff1a1SSrujanaChalla {
1247655ff1a1SSrujanaChalla 	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1248655ff1a1SSrujanaChalla 	struct mv_cesa_op_ctx tmpl = { };
1249655ff1a1SSrujanaChalla 
1250655ff1a1SSrujanaChalla 	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_MD5);
1251655ff1a1SSrujanaChalla 	memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1252655ff1a1SSrujanaChalla 
1253655ff1a1SSrujanaChalla 	mv_cesa_ahash_init(req, &tmpl, true);
1254655ff1a1SSrujanaChalla 
1255655ff1a1SSrujanaChalla 	return 0;
1256655ff1a1SSrujanaChalla }
1257655ff1a1SSrujanaChalla 
1258655ff1a1SSrujanaChalla static int mv_cesa_ahmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key,
1259655ff1a1SSrujanaChalla 				    unsigned int keylen)
1260655ff1a1SSrujanaChalla {
1261655ff1a1SSrujanaChalla 	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1262655ff1a1SSrujanaChalla 	struct md5_state istate, ostate;
1263655ff1a1SSrujanaChalla 	int ret, i;
1264655ff1a1SSrujanaChalla 
1265655ff1a1SSrujanaChalla 	ret = mv_cesa_ahmac_setkey("mv-md5", key, keylen, &istate, &ostate);
1266655ff1a1SSrujanaChalla 	if (ret)
1267655ff1a1SSrujanaChalla 		return ret;
1268655ff1a1SSrujanaChalla 
1269655ff1a1SSrujanaChalla 	for (i = 0; i < ARRAY_SIZE(istate.hash); i++)
1270655ff1a1SSrujanaChalla 		ctx->iv[i] = be32_to_cpu(istate.hash[i]);
1271655ff1a1SSrujanaChalla 
1272655ff1a1SSrujanaChalla 	for (i = 0; i < ARRAY_SIZE(ostate.hash); i++)
1273655ff1a1SSrujanaChalla 		ctx->iv[i + 8] = be32_to_cpu(ostate.hash[i]);
1274655ff1a1SSrujanaChalla 
1275655ff1a1SSrujanaChalla 	return 0;
1276655ff1a1SSrujanaChalla }
1277655ff1a1SSrujanaChalla 
1278655ff1a1SSrujanaChalla static int mv_cesa_ahmac_md5_digest(struct ahash_request *req)
1279655ff1a1SSrujanaChalla {
1280655ff1a1SSrujanaChalla 	int ret;
1281655ff1a1SSrujanaChalla 
1282655ff1a1SSrujanaChalla 	ret = mv_cesa_ahmac_md5_init(req);
1283655ff1a1SSrujanaChalla 	if (ret)
1284655ff1a1SSrujanaChalla 		return ret;
1285655ff1a1SSrujanaChalla 
1286655ff1a1SSrujanaChalla 	return mv_cesa_ahash_finup(req);
1287655ff1a1SSrujanaChalla }
1288655ff1a1SSrujanaChalla 
1289655ff1a1SSrujanaChalla struct ahash_alg mv_ahmac_md5_alg = {
1290655ff1a1SSrujanaChalla 	.init = mv_cesa_ahmac_md5_init,
1291655ff1a1SSrujanaChalla 	.update = mv_cesa_ahash_update,
1292655ff1a1SSrujanaChalla 	.final = mv_cesa_ahash_final,
1293655ff1a1SSrujanaChalla 	.finup = mv_cesa_ahash_finup,
1294655ff1a1SSrujanaChalla 	.digest = mv_cesa_ahmac_md5_digest,
1295655ff1a1SSrujanaChalla 	.setkey = mv_cesa_ahmac_md5_setkey,
1296655ff1a1SSrujanaChalla 	.export = mv_cesa_md5_export,
1297655ff1a1SSrujanaChalla 	.import = mv_cesa_md5_import,
1298655ff1a1SSrujanaChalla 	.halg = {
1299655ff1a1SSrujanaChalla 		.digestsize = MD5_DIGEST_SIZE,
1300655ff1a1SSrujanaChalla 		.statesize = sizeof(struct md5_state),
1301655ff1a1SSrujanaChalla 		.base = {
1302655ff1a1SSrujanaChalla 			.cra_name = "hmac(md5)",
1303655ff1a1SSrujanaChalla 			.cra_driver_name = "mv-hmac-md5",
1304655ff1a1SSrujanaChalla 			.cra_priority = 300,
1305655ff1a1SSrujanaChalla 			.cra_flags = CRYPTO_ALG_ASYNC |
1306b8aa7dc5SMikulas Patocka 				     CRYPTO_ALG_ALLOCATES_MEMORY |
1307655ff1a1SSrujanaChalla 				     CRYPTO_ALG_KERN_DRIVER_ONLY,
1308655ff1a1SSrujanaChalla 			.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1309655ff1a1SSrujanaChalla 			.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1310655ff1a1SSrujanaChalla 			.cra_init = mv_cesa_ahmac_cra_init,
1311655ff1a1SSrujanaChalla 			.cra_module = THIS_MODULE,
1312655ff1a1SSrujanaChalla 		}
1313655ff1a1SSrujanaChalla 	}
1314655ff1a1SSrujanaChalla };
1315655ff1a1SSrujanaChalla 
1316655ff1a1SSrujanaChalla static int mv_cesa_ahmac_sha1_init(struct ahash_request *req)
1317655ff1a1SSrujanaChalla {
1318655ff1a1SSrujanaChalla 	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1319655ff1a1SSrujanaChalla 	struct mv_cesa_op_ctx tmpl = { };
1320655ff1a1SSrujanaChalla 
1321655ff1a1SSrujanaChalla 	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA1);
1322655ff1a1SSrujanaChalla 	memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1323655ff1a1SSrujanaChalla 
1324655ff1a1SSrujanaChalla 	mv_cesa_ahash_init(req, &tmpl, false);
1325655ff1a1SSrujanaChalla 
1326655ff1a1SSrujanaChalla 	return 0;
1327655ff1a1SSrujanaChalla }
1328655ff1a1SSrujanaChalla 
1329655ff1a1SSrujanaChalla static int mv_cesa_ahmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
1330655ff1a1SSrujanaChalla 				     unsigned int keylen)
1331655ff1a1SSrujanaChalla {
1332655ff1a1SSrujanaChalla 	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1333655ff1a1SSrujanaChalla 	struct sha1_state istate, ostate;
1334655ff1a1SSrujanaChalla 	int ret, i;
1335655ff1a1SSrujanaChalla 
1336655ff1a1SSrujanaChalla 	ret = mv_cesa_ahmac_setkey("mv-sha1", key, keylen, &istate, &ostate);
1337655ff1a1SSrujanaChalla 	if (ret)
1338655ff1a1SSrujanaChalla 		return ret;
1339655ff1a1SSrujanaChalla 
1340655ff1a1SSrujanaChalla 	for (i = 0; i < ARRAY_SIZE(istate.state); i++)
1341655ff1a1SSrujanaChalla 		ctx->iv[i] = be32_to_cpu(istate.state[i]);
1342655ff1a1SSrujanaChalla 
1343655ff1a1SSrujanaChalla 	for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
1344655ff1a1SSrujanaChalla 		ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]);
1345655ff1a1SSrujanaChalla 
1346655ff1a1SSrujanaChalla 	return 0;
1347655ff1a1SSrujanaChalla }
1348655ff1a1SSrujanaChalla 
1349655ff1a1SSrujanaChalla static int mv_cesa_ahmac_sha1_digest(struct ahash_request *req)
1350655ff1a1SSrujanaChalla {
1351655ff1a1SSrujanaChalla 	int ret;
1352655ff1a1SSrujanaChalla 
1353655ff1a1SSrujanaChalla 	ret = mv_cesa_ahmac_sha1_init(req);
1354655ff1a1SSrujanaChalla 	if (ret)
1355655ff1a1SSrujanaChalla 		return ret;
1356655ff1a1SSrujanaChalla 
1357655ff1a1SSrujanaChalla 	return mv_cesa_ahash_finup(req);
1358655ff1a1SSrujanaChalla }
1359655ff1a1SSrujanaChalla 
1360655ff1a1SSrujanaChalla struct ahash_alg mv_ahmac_sha1_alg = {
1361655ff1a1SSrujanaChalla 	.init = mv_cesa_ahmac_sha1_init,
1362655ff1a1SSrujanaChalla 	.update = mv_cesa_ahash_update,
1363655ff1a1SSrujanaChalla 	.final = mv_cesa_ahash_final,
1364655ff1a1SSrujanaChalla 	.finup = mv_cesa_ahash_finup,
1365655ff1a1SSrujanaChalla 	.digest = mv_cesa_ahmac_sha1_digest,
1366655ff1a1SSrujanaChalla 	.setkey = mv_cesa_ahmac_sha1_setkey,
1367655ff1a1SSrujanaChalla 	.export = mv_cesa_sha1_export,
1368655ff1a1SSrujanaChalla 	.import = mv_cesa_sha1_import,
1369655ff1a1SSrujanaChalla 	.halg = {
1370655ff1a1SSrujanaChalla 		.digestsize = SHA1_DIGEST_SIZE,
1371655ff1a1SSrujanaChalla 		.statesize = sizeof(struct sha1_state),
1372655ff1a1SSrujanaChalla 		.base = {
1373655ff1a1SSrujanaChalla 			.cra_name = "hmac(sha1)",
1374655ff1a1SSrujanaChalla 			.cra_driver_name = "mv-hmac-sha1",
1375655ff1a1SSrujanaChalla 			.cra_priority = 300,
1376655ff1a1SSrujanaChalla 			.cra_flags = CRYPTO_ALG_ASYNC |
1377b8aa7dc5SMikulas Patocka 				     CRYPTO_ALG_ALLOCATES_MEMORY |
1378655ff1a1SSrujanaChalla 				     CRYPTO_ALG_KERN_DRIVER_ONLY,
1379655ff1a1SSrujanaChalla 			.cra_blocksize = SHA1_BLOCK_SIZE,
1380655ff1a1SSrujanaChalla 			.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1381655ff1a1SSrujanaChalla 			.cra_init = mv_cesa_ahmac_cra_init,
1382655ff1a1SSrujanaChalla 			.cra_module = THIS_MODULE,
1383655ff1a1SSrujanaChalla 		}
1384655ff1a1SSrujanaChalla 	}
1385655ff1a1SSrujanaChalla };
1386655ff1a1SSrujanaChalla 
1387655ff1a1SSrujanaChalla static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
1388655ff1a1SSrujanaChalla 				       unsigned int keylen)
1389655ff1a1SSrujanaChalla {
1390655ff1a1SSrujanaChalla 	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1391655ff1a1SSrujanaChalla 	struct sha256_state istate, ostate;
1392655ff1a1SSrujanaChalla 	int ret, i;
1393655ff1a1SSrujanaChalla 
1394655ff1a1SSrujanaChalla 	ret = mv_cesa_ahmac_setkey("mv-sha256", key, keylen, &istate, &ostate);
1395655ff1a1SSrujanaChalla 	if (ret)
1396655ff1a1SSrujanaChalla 		return ret;
1397655ff1a1SSrujanaChalla 
1398655ff1a1SSrujanaChalla 	for (i = 0; i < ARRAY_SIZE(istate.state); i++)
1399655ff1a1SSrujanaChalla 		ctx->iv[i] = be32_to_cpu(istate.state[i]);
1400655ff1a1SSrujanaChalla 
1401655ff1a1SSrujanaChalla 	for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
1402655ff1a1SSrujanaChalla 		ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]);
1403655ff1a1SSrujanaChalla 
1404655ff1a1SSrujanaChalla 	return 0;
1405655ff1a1SSrujanaChalla }
1406655ff1a1SSrujanaChalla 
1407655ff1a1SSrujanaChalla static int mv_cesa_ahmac_sha256_init(struct ahash_request *req)
1408655ff1a1SSrujanaChalla {
1409655ff1a1SSrujanaChalla 	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1410655ff1a1SSrujanaChalla 	struct mv_cesa_op_ctx tmpl = { };
1411655ff1a1SSrujanaChalla 
1412655ff1a1SSrujanaChalla 	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA256);
1413655ff1a1SSrujanaChalla 	memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1414655ff1a1SSrujanaChalla 
1415655ff1a1SSrujanaChalla 	mv_cesa_ahash_init(req, &tmpl, false);
1416655ff1a1SSrujanaChalla 
1417655ff1a1SSrujanaChalla 	return 0;
1418655ff1a1SSrujanaChalla }
1419655ff1a1SSrujanaChalla 
1420655ff1a1SSrujanaChalla static int mv_cesa_ahmac_sha256_digest(struct ahash_request *req)
1421655ff1a1SSrujanaChalla {
1422655ff1a1SSrujanaChalla 	int ret;
1423655ff1a1SSrujanaChalla 
1424655ff1a1SSrujanaChalla 	ret = mv_cesa_ahmac_sha256_init(req);
1425655ff1a1SSrujanaChalla 	if (ret)
1426655ff1a1SSrujanaChalla 		return ret;
1427655ff1a1SSrujanaChalla 
1428655ff1a1SSrujanaChalla 	return mv_cesa_ahash_finup(req);
1429655ff1a1SSrujanaChalla }
1430655ff1a1SSrujanaChalla 
1431655ff1a1SSrujanaChalla struct ahash_alg mv_ahmac_sha256_alg = {
1432655ff1a1SSrujanaChalla 	.init = mv_cesa_ahmac_sha256_init,
1433655ff1a1SSrujanaChalla 	.update = mv_cesa_ahash_update,
1434655ff1a1SSrujanaChalla 	.final = mv_cesa_ahash_final,
1435655ff1a1SSrujanaChalla 	.finup = mv_cesa_ahash_finup,
1436655ff1a1SSrujanaChalla 	.digest = mv_cesa_ahmac_sha256_digest,
1437655ff1a1SSrujanaChalla 	.setkey = mv_cesa_ahmac_sha256_setkey,
1438655ff1a1SSrujanaChalla 	.export = mv_cesa_sha256_export,
1439655ff1a1SSrujanaChalla 	.import = mv_cesa_sha256_import,
1440655ff1a1SSrujanaChalla 	.halg = {
1441655ff1a1SSrujanaChalla 		.digestsize = SHA256_DIGEST_SIZE,
1442655ff1a1SSrujanaChalla 		.statesize = sizeof(struct sha256_state),
1443655ff1a1SSrujanaChalla 		.base = {
1444655ff1a1SSrujanaChalla 			.cra_name = "hmac(sha256)",
1445655ff1a1SSrujanaChalla 			.cra_driver_name = "mv-hmac-sha256",
1446655ff1a1SSrujanaChalla 			.cra_priority = 300,
1447655ff1a1SSrujanaChalla 			.cra_flags = CRYPTO_ALG_ASYNC |
1448b8aa7dc5SMikulas Patocka 				     CRYPTO_ALG_ALLOCATES_MEMORY |
1449655ff1a1SSrujanaChalla 				     CRYPTO_ALG_KERN_DRIVER_ONLY,
1450655ff1a1SSrujanaChalla 			.cra_blocksize = SHA256_BLOCK_SIZE,
1451655ff1a1SSrujanaChalla 			.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1452655ff1a1SSrujanaChalla 			.cra_init = mv_cesa_ahmac_cra_init,
1453655ff1a1SSrujanaChalla 			.cra_module = THIS_MODULE,
1454655ff1a1SSrujanaChalla 		}
1455655ff1a1SSrujanaChalla 	}
1456655ff1a1SSrujanaChalla };
1457