xref: /openbmc/linux/drivers/crypto/inside-secure/safexcel_hash.c (revision e33bbe69149b802c0c77bfb822685772f85388ca)
1 /*
2  * Copyright (C) 2017 Marvell
3  *
4  * Antoine Tenart <antoine.tenart@free-electrons.com>
5  *
6  * This file is licensed under the terms of the GNU General Public
7  * License version 2. This program is licensed "as is" without any
8  * warranty of any kind, whether express or implied.
9  */
10 
11 #include <crypto/hmac.h>
12 #include <crypto/sha.h>
13 #include <linux/device.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/dmapool.h>
16 
17 #include "safexcel.h"
18 
19 struct safexcel_ahash_ctx {
20 	struct safexcel_context base;
21 	struct safexcel_crypto_priv *priv;
22 
23 	u32 alg;
24 
25 	u32 ipad[SHA256_DIGEST_SIZE / sizeof(u32)];
26 	u32 opad[SHA256_DIGEST_SIZE / sizeof(u32)];
27 };
28 
29 struct safexcel_ahash_req {
30 	bool last_req;
31 	bool finish;
32 	bool hmac;
33 	bool needs_inv;
34 
35 	int nents;
36 	dma_addr_t result_dma;
37 
38 	u32 digest;
39 
40 	u8 state_sz;    /* expected sate size, only set once */
41 	u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
42 
43 	u64 len;
44 	u64 processed;
45 
46 	u8 cache[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
47 	dma_addr_t cache_dma;
48 	unsigned int cache_sz;
49 
50 	u8 cache_next[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
51 };
52 
53 struct safexcel_ahash_export_state {
54 	u64 len;
55 	u64 processed;
56 
57 	u32 digest;
58 
59 	u32 state[SHA256_DIGEST_SIZE / sizeof(u32)];
60 	u8 cache[SHA256_BLOCK_SIZE];
61 };
62 
63 static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
64 				u32 input_length, u32 result_length)
65 {
66 	struct safexcel_token *token =
67 		(struct safexcel_token *)cdesc->control_data.token;
68 
69 	token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
70 	token[0].packet_length = input_length;
71 	token[0].stat = EIP197_TOKEN_STAT_LAST_HASH;
72 	token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH;
73 
74 	token[1].opcode = EIP197_TOKEN_OPCODE_INSERT;
75 	token[1].packet_length = result_length;
76 	token[1].stat = EIP197_TOKEN_STAT_LAST_HASH |
77 			EIP197_TOKEN_STAT_LAST_PACKET;
78 	token[1].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
79 				EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
80 }
81 
82 static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
83 				     struct safexcel_ahash_req *req,
84 				     struct safexcel_command_desc *cdesc,
85 				     unsigned int digestsize,
86 				     unsigned int blocksize)
87 {
88 	int i;
89 
90 	cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_OUT;
91 	cdesc->control_data.control0 |= ctx->alg;
92 	cdesc->control_data.control0 |= req->digest;
93 
94 	if (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) {
95 		if (req->processed) {
96 			if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
97 				cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(6);
98 			else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224 ||
99 				 ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
100 				cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(9);
101 
102 			cdesc->control_data.control1 |= CONTEXT_CONTROL_DIGEST_CNT;
103 		} else {
104 			cdesc->control_data.control0 |= CONTEXT_CONTROL_RESTART_HASH;
105 		}
106 
107 		if (!req->finish)
108 			cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH;
109 
110 		/*
111 		 * Copy the input digest if needed, and setup the context
112 		 * fields. Do this now as we need it to setup the first command
113 		 * descriptor.
114 		 */
115 		if (req->processed) {
116 			for (i = 0; i < digestsize / sizeof(u32); i++)
117 				ctx->base.ctxr->data[i] = cpu_to_le32(req->state[i]);
118 
119 			if (req->finish)
120 				ctx->base.ctxr->data[i] = cpu_to_le32(req->processed / blocksize);
121 		}
122 	} else if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) {
123 		cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(2 * req->state_sz / sizeof(u32));
124 
125 		memcpy(ctx->base.ctxr->data, ctx->ipad, req->state_sz);
126 		memcpy(ctx->base.ctxr->data + req->state_sz / sizeof(u32),
127 		       ctx->opad, req->state_sz);
128 	}
129 }
130 
131 static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
132 				      struct crypto_async_request *async,
133 				      bool *should_complete, int *ret)
134 {
135 	struct safexcel_result_desc *rdesc;
136 	struct ahash_request *areq = ahash_request_cast(async);
137 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
138 	struct safexcel_ahash_req *sreq = ahash_request_ctx(areq);
139 	int cache_len;
140 
141 	*ret = 0;
142 
143 	spin_lock_bh(&priv->ring[ring].egress_lock);
144 	rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
145 	if (IS_ERR(rdesc)) {
146 		dev_err(priv->dev,
147 			"hash: result: could not retrieve the result descriptor\n");
148 		*ret = PTR_ERR(rdesc);
149 	} else if (rdesc->result_data.error_code) {
150 		dev_err(priv->dev,
151 			"hash: result: result descriptor error (%d)\n",
152 			rdesc->result_data.error_code);
153 		*ret = -EINVAL;
154 	}
155 
156 	safexcel_complete(priv, ring);
157 	spin_unlock_bh(&priv->ring[ring].egress_lock);
158 
159 	if (sreq->nents) {
160 		dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
161 		sreq->nents = 0;
162 	}
163 
164 	if (sreq->result_dma) {
165 		dma_unmap_single(priv->dev, sreq->result_dma, sreq->state_sz,
166 				 DMA_FROM_DEVICE);
167 		sreq->result_dma = 0;
168 	}
169 
170 	if (sreq->cache_dma) {
171 		dma_unmap_single(priv->dev, sreq->cache_dma, sreq->cache_sz,
172 				 DMA_TO_DEVICE);
173 		sreq->cache_dma = 0;
174 	}
175 
176 	if (sreq->finish)
177 		memcpy(areq->result, sreq->state,
178 		       crypto_ahash_digestsize(ahash));
179 
180 	cache_len = sreq->len - sreq->processed;
181 	if (cache_len)
182 		memcpy(sreq->cache, sreq->cache_next, cache_len);
183 
184 	*should_complete = true;
185 
186 	return 1;
187 }
188 
189 static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
190 				   struct safexcel_request *request,
191 				   int *commands, int *results)
192 {
193 	struct ahash_request *areq = ahash_request_cast(async);
194 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
195 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
196 	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
197 	struct safexcel_crypto_priv *priv = ctx->priv;
198 	struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
199 	struct safexcel_result_desc *rdesc;
200 	struct scatterlist *sg;
201 	int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
202 
203 	queued = len = req->len - req->processed;
204 	if (queued <= crypto_ahash_blocksize(ahash))
205 		cache_len = queued;
206 	else
207 		cache_len = queued - areq->nbytes;
208 
209 	if (!req->last_req) {
210 		/* If this is not the last request and the queued data does not
211 		 * fit into full blocks, cache it for the next send() call.
212 		 */
213 		extra = queued & (crypto_ahash_blocksize(ahash) - 1);
214 		if (!extra)
215 			/* If this is not the last request and the queued data
216 			 * is a multiple of a block, cache the last one for now.
217 			 */
218 			extra = crypto_ahash_blocksize(ahash);
219 
220 		if (extra) {
221 			sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
222 					   req->cache_next, extra,
223 					   areq->nbytes - extra);
224 
225 			queued -= extra;
226 			len -= extra;
227 
228 			if (!queued) {
229 				*commands = 0;
230 				*results = 0;
231 				return 0;
232 			}
233 		}
234 	}
235 
236 	spin_lock_bh(&priv->ring[ring].egress_lock);
237 
238 	/* Add a command descriptor for the cached data, if any */
239 	if (cache_len) {
240 		req->cache_dma = dma_map_single(priv->dev, req->cache,
241 						cache_len, DMA_TO_DEVICE);
242 		if (dma_mapping_error(priv->dev, req->cache_dma)) {
243 			spin_unlock_bh(&priv->ring[ring].egress_lock);
244 			return -EINVAL;
245 		}
246 
247 		req->cache_sz = cache_len;
248 		first_cdesc = safexcel_add_cdesc(priv, ring, 1,
249 						 (cache_len == len),
250 						 req->cache_dma, cache_len, len,
251 						 ctx->base.ctxr_dma);
252 		if (IS_ERR(first_cdesc)) {
253 			ret = PTR_ERR(first_cdesc);
254 			goto unmap_cache;
255 		}
256 		n_cdesc++;
257 
258 		queued -= cache_len;
259 		if (!queued)
260 			goto send_command;
261 	}
262 
263 	/* Now handle the current ahash request buffer(s) */
264 	req->nents = dma_map_sg(priv->dev, areq->src,
265 				sg_nents_for_len(areq->src, areq->nbytes),
266 				DMA_TO_DEVICE);
267 	if (!req->nents) {
268 		ret = -ENOMEM;
269 		goto cdesc_rollback;
270 	}
271 
272 	for_each_sg(areq->src, sg, req->nents, i) {
273 		int sglen = sg_dma_len(sg);
274 
275 		/* Do not overflow the request */
276 		if (queued - sglen < 0)
277 			sglen = queued;
278 
279 		cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
280 					   !(queued - sglen), sg_dma_address(sg),
281 					   sglen, len, ctx->base.ctxr_dma);
282 		if (IS_ERR(cdesc)) {
283 			ret = PTR_ERR(cdesc);
284 			goto unmap_sg;
285 		}
286 		n_cdesc++;
287 
288 		if (n_cdesc == 1)
289 			first_cdesc = cdesc;
290 
291 		queued -= sglen;
292 		if (!queued)
293 			break;
294 	}
295 
296 send_command:
297 	/* Setup the context options */
298 	safexcel_context_control(ctx, req, first_cdesc, req->state_sz,
299 				 crypto_ahash_blocksize(ahash));
300 
301 	/* Add the token */
302 	safexcel_hash_token(first_cdesc, len, req->state_sz);
303 
304 	req->result_dma = dma_map_single(priv->dev, req->state, req->state_sz,
305 					 DMA_FROM_DEVICE);
306 	if (dma_mapping_error(priv->dev, req->result_dma)) {
307 		ret = -EINVAL;
308 		goto unmap_sg;
309 	}
310 
311 	/* Add a result descriptor */
312 	rdesc = safexcel_add_rdesc(priv, ring, 1, 1, req->result_dma,
313 				   req->state_sz);
314 	if (IS_ERR(rdesc)) {
315 		ret = PTR_ERR(rdesc);
316 		goto unmap_result;
317 	}
318 
319 	spin_unlock_bh(&priv->ring[ring].egress_lock);
320 
321 	req->processed += len;
322 	request->req = &areq->base;
323 
324 	*commands = n_cdesc;
325 	*results = 1;
326 	return 0;
327 
328 unmap_result:
329 	dma_unmap_single(priv->dev, req->result_dma, req->state_sz,
330 			 DMA_FROM_DEVICE);
331 unmap_sg:
332 	dma_unmap_sg(priv->dev, areq->src, req->nents, DMA_TO_DEVICE);
333 cdesc_rollback:
334 	for (i = 0; i < n_cdesc; i++)
335 		safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
336 unmap_cache:
337 	if (req->cache_dma) {
338 		dma_unmap_single(priv->dev, req->cache_dma, req->cache_sz,
339 				 DMA_TO_DEVICE);
340 		req->cache_sz = 0;
341 	}
342 
343 	spin_unlock_bh(&priv->ring[ring].egress_lock);
344 	return ret;
345 }
346 
347 static inline bool safexcel_ahash_needs_inv_get(struct ahash_request *areq)
348 {
349 	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
350 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
351 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
352 	unsigned int state_w_sz = req->state_sz / sizeof(u32);
353 	int i;
354 
355 	for (i = 0; i < state_w_sz; i++)
356 		if (ctx->base.ctxr->data[i] != cpu_to_le32(req->state[i]))
357 			return true;
358 
359 	if (ctx->base.ctxr->data[state_w_sz] !=
360 	    cpu_to_le32(req->processed / crypto_ahash_blocksize(ahash)))
361 		return true;
362 
363 	return false;
364 }
365 
366 static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
367 				      int ring,
368 				      struct crypto_async_request *async,
369 				      bool *should_complete, int *ret)
370 {
371 	struct safexcel_result_desc *rdesc;
372 	struct ahash_request *areq = ahash_request_cast(async);
373 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
374 	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(ahash);
375 	int enq_ret;
376 
377 	*ret = 0;
378 
379 	spin_lock_bh(&priv->ring[ring].egress_lock);
380 	rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
381 	if (IS_ERR(rdesc)) {
382 		dev_err(priv->dev,
383 			"hash: invalidate: could not retrieve the result descriptor\n");
384 		*ret = PTR_ERR(rdesc);
385 	} else if (rdesc->result_data.error_code) {
386 		dev_err(priv->dev,
387 			"hash: invalidate: result descriptor error (%d)\n",
388 			rdesc->result_data.error_code);
389 		*ret = -EINVAL;
390 	}
391 
392 	safexcel_complete(priv, ring);
393 	spin_unlock_bh(&priv->ring[ring].egress_lock);
394 
395 	if (ctx->base.exit_inv) {
396 		dma_pool_free(priv->context_pool, ctx->base.ctxr,
397 			      ctx->base.ctxr_dma);
398 
399 		*should_complete = true;
400 		return 1;
401 	}
402 
403 	ring = safexcel_select_ring(priv);
404 	ctx->base.ring = ring;
405 
406 	spin_lock_bh(&priv->ring[ring].queue_lock);
407 	enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
408 	spin_unlock_bh(&priv->ring[ring].queue_lock);
409 
410 	if (enq_ret != -EINPROGRESS)
411 		*ret = enq_ret;
412 
413 	queue_work(priv->ring[ring].workqueue,
414 		   &priv->ring[ring].work_data.work);
415 
416 	*should_complete = false;
417 
418 	return 1;
419 }
420 
421 static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
422 				  struct crypto_async_request *async,
423 				  bool *should_complete, int *ret)
424 {
425 	struct ahash_request *areq = ahash_request_cast(async);
426 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
427 	int err;
428 
429 	BUG_ON(priv->version == EIP97 && req->needs_inv);
430 
431 	if (req->needs_inv) {
432 		req->needs_inv = false;
433 		err = safexcel_handle_inv_result(priv, ring, async,
434 						 should_complete, ret);
435 	} else {
436 		err = safexcel_handle_req_result(priv, ring, async,
437 						 should_complete, ret);
438 	}
439 
440 	return err;
441 }
442 
443 static int safexcel_ahash_send_inv(struct crypto_async_request *async,
444 				   int ring, struct safexcel_request *request,
445 				   int *commands, int *results)
446 {
447 	struct ahash_request *areq = ahash_request_cast(async);
448 	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
449 	int ret;
450 
451 	ret = safexcel_invalidate_cache(async, ctx->priv,
452 					ctx->base.ctxr_dma, ring, request);
453 	if (unlikely(ret))
454 		return ret;
455 
456 	*commands = 1;
457 	*results = 1;
458 
459 	return 0;
460 }
461 
462 static int safexcel_ahash_send(struct crypto_async_request *async,
463 			       int ring, struct safexcel_request *request,
464 			       int *commands, int *results)
465 {
466 	struct ahash_request *areq = ahash_request_cast(async);
467 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
468 	int ret;
469 
470 	if (req->needs_inv)
471 		ret = safexcel_ahash_send_inv(async, ring, request,
472 					      commands, results);
473 	else
474 		ret = safexcel_ahash_send_req(async, ring, request,
475 					      commands, results);
476 	return ret;
477 }
478 
479 static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
480 {
481 	struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
482 	struct safexcel_crypto_priv *priv = ctx->priv;
483 	AHASH_REQUEST_ON_STACK(req, __crypto_ahash_cast(tfm));
484 	struct safexcel_ahash_req *rctx = ahash_request_ctx(req);
485 	struct safexcel_inv_result result = {};
486 	int ring = ctx->base.ring;
487 
488 	memset(req, 0, sizeof(struct ahash_request));
489 
490 	/* create invalidation request */
491 	init_completion(&result.completion);
492 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
493 				   safexcel_inv_complete, &result);
494 
495 	ahash_request_set_tfm(req, __crypto_ahash_cast(tfm));
496 	ctx = crypto_tfm_ctx(req->base.tfm);
497 	ctx->base.exit_inv = true;
498 	rctx->needs_inv = true;
499 
500 	spin_lock_bh(&priv->ring[ring].queue_lock);
501 	crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
502 	spin_unlock_bh(&priv->ring[ring].queue_lock);
503 
504 	queue_work(priv->ring[ring].workqueue,
505 		   &priv->ring[ring].work_data.work);
506 
507 	wait_for_completion(&result.completion);
508 
509 	if (result.error) {
510 		dev_warn(priv->dev, "hash: completion error (%d)\n",
511 			 result.error);
512 		return result.error;
513 	}
514 
515 	return 0;
516 }
517 
518 /* safexcel_ahash_cache: cache data until at least one request can be sent to
519  * the engine, aka. when there is at least 1 block size in the pipe.
520  */
521 static int safexcel_ahash_cache(struct ahash_request *areq)
522 {
523 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
524 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
525 	int queued, cache_len;
526 
527 	/* cache_len: everyting accepted by the driver but not sent yet,
528 	 * tot sz handled by update() - last req sz - tot sz handled by send()
529 	 */
530 	cache_len = req->len - areq->nbytes - req->processed;
531 	/* queued: everything accepted by the driver which will be handled by
532 	 * the next send() calls.
533 	 * tot sz handled by update() - tot sz handled by send()
534 	 */
535 	queued = req->len - req->processed;
536 
537 	/*
538 	 * In case there isn't enough bytes to proceed (less than a
539 	 * block size), cache the data until we have enough.
540 	 */
541 	if (cache_len + areq->nbytes <= crypto_ahash_blocksize(ahash)) {
542 		sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
543 				   req->cache + cache_len,
544 				   areq->nbytes, 0);
545 		return areq->nbytes;
546 	}
547 
548 	/* We couldn't cache all the data */
549 	return -E2BIG;
550 }
551 
552 static int safexcel_ahash_enqueue(struct ahash_request *areq)
553 {
554 	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
555 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
556 	struct safexcel_crypto_priv *priv = ctx->priv;
557 	int ret, ring;
558 
559 	req->needs_inv = false;
560 
561 	if (ctx->base.ctxr) {
562 		if (priv->version == EIP197 &&
563 		    !ctx->base.needs_inv && req->processed &&
564 		    req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
565 			/* We're still setting needs_inv here, even though it is
566 			 * cleared right away, because the needs_inv flag can be
567 			 * set in other functions and we want to keep the same
568 			 * logic.
569 			 */
570 			ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq);
571 
572 		if (ctx->base.needs_inv) {
573 			ctx->base.needs_inv = false;
574 			req->needs_inv = true;
575 		}
576 	} else {
577 		ctx->base.ring = safexcel_select_ring(priv);
578 		ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
579 						 EIP197_GFP_FLAGS(areq->base),
580 						 &ctx->base.ctxr_dma);
581 		if (!ctx->base.ctxr)
582 			return -ENOMEM;
583 	}
584 
585 	ring = ctx->base.ring;
586 
587 	spin_lock_bh(&priv->ring[ring].queue_lock);
588 	ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base);
589 	spin_unlock_bh(&priv->ring[ring].queue_lock);
590 
591 	queue_work(priv->ring[ring].workqueue,
592 		   &priv->ring[ring].work_data.work);
593 
594 	return ret;
595 }
596 
597 static int safexcel_ahash_update(struct ahash_request *areq)
598 {
599 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
600 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
601 
602 	/* If the request is 0 length, do nothing */
603 	if (!areq->nbytes)
604 		return 0;
605 
606 	req->len += areq->nbytes;
607 
608 	safexcel_ahash_cache(areq);
609 
610 	/*
611 	 * We're not doing partial updates when performing an hmac request.
612 	 * Everything will be handled by the final() call.
613 	 */
614 	if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
615 		return 0;
616 
617 	if (req->hmac)
618 		return safexcel_ahash_enqueue(areq);
619 
620 	if (!req->last_req &&
621 	    req->len - req->processed > crypto_ahash_blocksize(ahash))
622 		return safexcel_ahash_enqueue(areq);
623 
624 	return 0;
625 }
626 
627 static int safexcel_ahash_final(struct ahash_request *areq)
628 {
629 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
630 	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
631 
632 	req->last_req = true;
633 	req->finish = true;
634 
635 	/* If we have an overall 0 length request */
636 	if (!(req->len + areq->nbytes)) {
637 		if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
638 			memcpy(areq->result, sha1_zero_message_hash,
639 			       SHA1_DIGEST_SIZE);
640 		else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224)
641 			memcpy(areq->result, sha224_zero_message_hash,
642 			       SHA224_DIGEST_SIZE);
643 		else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
644 			memcpy(areq->result, sha256_zero_message_hash,
645 			       SHA256_DIGEST_SIZE);
646 
647 		return 0;
648 	}
649 
650 	return safexcel_ahash_enqueue(areq);
651 }
652 
653 static int safexcel_ahash_finup(struct ahash_request *areq)
654 {
655 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
656 
657 	req->last_req = true;
658 	req->finish = true;
659 
660 	safexcel_ahash_update(areq);
661 	return safexcel_ahash_final(areq);
662 }
663 
664 static int safexcel_ahash_export(struct ahash_request *areq, void *out)
665 {
666 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
667 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
668 	struct safexcel_ahash_export_state *export = out;
669 
670 	export->len = req->len;
671 	export->processed = req->processed;
672 
673 	export->digest = req->digest;
674 
675 	memcpy(export->state, req->state, req->state_sz);
676 	memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash));
677 
678 	return 0;
679 }
680 
681 static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
682 {
683 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
684 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
685 	const struct safexcel_ahash_export_state *export = in;
686 	int ret;
687 
688 	ret = crypto_ahash_init(areq);
689 	if (ret)
690 		return ret;
691 
692 	req->len = export->len;
693 	req->processed = export->processed;
694 
695 	req->digest = export->digest;
696 
697 	memcpy(req->cache, export->cache, crypto_ahash_blocksize(ahash));
698 	memcpy(req->state, export->state, req->state_sz);
699 
700 	return 0;
701 }
702 
703 static int safexcel_ahash_cra_init(struct crypto_tfm *tfm)
704 {
705 	struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
706 	struct safexcel_alg_template *tmpl =
707 		container_of(__crypto_ahash_alg(tfm->__crt_alg),
708 			     struct safexcel_alg_template, alg.ahash);
709 
710 	ctx->priv = tmpl->priv;
711 	ctx->base.send = safexcel_ahash_send;
712 	ctx->base.handle_result = safexcel_handle_result;
713 
714 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
715 				 sizeof(struct safexcel_ahash_req));
716 	return 0;
717 }
718 
719 static int safexcel_sha1_init(struct ahash_request *areq)
720 {
721 	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
722 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
723 
724 	memset(req, 0, sizeof(*req));
725 
726 	req->state[0] = SHA1_H0;
727 	req->state[1] = SHA1_H1;
728 	req->state[2] = SHA1_H2;
729 	req->state[3] = SHA1_H3;
730 	req->state[4] = SHA1_H4;
731 
732 	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
733 	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
734 	req->state_sz = SHA1_DIGEST_SIZE;
735 
736 	return 0;
737 }
738 
739 static int safexcel_sha1_digest(struct ahash_request *areq)
740 {
741 	int ret = safexcel_sha1_init(areq);
742 
743 	if (ret)
744 		return ret;
745 
746 	return safexcel_ahash_finup(areq);
747 }
748 
749 static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm)
750 {
751 	struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
752 	struct safexcel_crypto_priv *priv = ctx->priv;
753 	int ret;
754 
755 	/* context not allocated, skip invalidation */
756 	if (!ctx->base.ctxr)
757 		return;
758 
759 	if (priv->version == EIP197) {
760 		ret = safexcel_ahash_exit_inv(tfm);
761 		if (ret)
762 			dev_warn(priv->dev, "hash: invalidation error %d\n", ret);
763 	} else {
764 		dma_pool_free(priv->context_pool, ctx->base.ctxr,
765 			      ctx->base.ctxr_dma);
766 	}
767 }
768 
769 struct safexcel_alg_template safexcel_alg_sha1 = {
770 	.type = SAFEXCEL_ALG_TYPE_AHASH,
771 	.alg.ahash = {
772 		.init = safexcel_sha1_init,
773 		.update = safexcel_ahash_update,
774 		.final = safexcel_ahash_final,
775 		.finup = safexcel_ahash_finup,
776 		.digest = safexcel_sha1_digest,
777 		.export = safexcel_ahash_export,
778 		.import = safexcel_ahash_import,
779 		.halg = {
780 			.digestsize = SHA1_DIGEST_SIZE,
781 			.statesize = sizeof(struct safexcel_ahash_export_state),
782 			.base = {
783 				.cra_name = "sha1",
784 				.cra_driver_name = "safexcel-sha1",
785 				.cra_priority = 300,
786 				.cra_flags = CRYPTO_ALG_ASYNC |
787 					     CRYPTO_ALG_KERN_DRIVER_ONLY,
788 				.cra_blocksize = SHA1_BLOCK_SIZE,
789 				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
790 				.cra_init = safexcel_ahash_cra_init,
791 				.cra_exit = safexcel_ahash_cra_exit,
792 				.cra_module = THIS_MODULE,
793 			},
794 		},
795 	},
796 };
797 
798 static int safexcel_hmac_sha1_init(struct ahash_request *areq)
799 {
800 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
801 
802 	safexcel_sha1_init(areq);
803 	req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
804 	return 0;
805 }
806 
807 static int safexcel_hmac_sha1_digest(struct ahash_request *areq)
808 {
809 	int ret = safexcel_hmac_sha1_init(areq);
810 
811 	if (ret)
812 		return ret;
813 
814 	return safexcel_ahash_finup(areq);
815 }
816 
817 struct safexcel_ahash_result {
818 	struct completion completion;
819 	int error;
820 };
821 
822 static void safexcel_ahash_complete(struct crypto_async_request *req, int error)
823 {
824 	struct safexcel_ahash_result *result = req->data;
825 
826 	if (error == -EINPROGRESS)
827 		return;
828 
829 	result->error = error;
830 	complete(&result->completion);
831 }
832 
833 static int safexcel_hmac_init_pad(struct ahash_request *areq,
834 				  unsigned int blocksize, const u8 *key,
835 				  unsigned int keylen, u8 *ipad, u8 *opad)
836 {
837 	struct safexcel_ahash_result result;
838 	struct scatterlist sg;
839 	int ret, i;
840 	u8 *keydup;
841 
842 	if (keylen <= blocksize) {
843 		memcpy(ipad, key, keylen);
844 	} else {
845 		keydup = kmemdup(key, keylen, GFP_KERNEL);
846 		if (!keydup)
847 			return -ENOMEM;
848 
849 		ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
850 					   safexcel_ahash_complete, &result);
851 		sg_init_one(&sg, keydup, keylen);
852 		ahash_request_set_crypt(areq, &sg, ipad, keylen);
853 		init_completion(&result.completion);
854 
855 		ret = crypto_ahash_digest(areq);
856 		if (ret == -EINPROGRESS || ret == -EBUSY) {
857 			wait_for_completion_interruptible(&result.completion);
858 			ret = result.error;
859 		}
860 
861 		/* Avoid leaking */
862 		memzero_explicit(keydup, keylen);
863 		kfree(keydup);
864 
865 		if (ret)
866 			return ret;
867 
868 		keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
869 	}
870 
871 	memset(ipad + keylen, 0, blocksize - keylen);
872 	memcpy(opad, ipad, blocksize);
873 
874 	for (i = 0; i < blocksize; i++) {
875 		ipad[i] ^= HMAC_IPAD_VALUE;
876 		opad[i] ^= HMAC_OPAD_VALUE;
877 	}
878 
879 	return 0;
880 }
881 
882 static int safexcel_hmac_init_iv(struct ahash_request *areq,
883 				 unsigned int blocksize, u8 *pad, void *state)
884 {
885 	struct safexcel_ahash_result result;
886 	struct safexcel_ahash_req *req;
887 	struct scatterlist sg;
888 	int ret;
889 
890 	ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
891 				   safexcel_ahash_complete, &result);
892 	sg_init_one(&sg, pad, blocksize);
893 	ahash_request_set_crypt(areq, &sg, pad, blocksize);
894 	init_completion(&result.completion);
895 
896 	ret = crypto_ahash_init(areq);
897 	if (ret)
898 		return ret;
899 
900 	req = ahash_request_ctx(areq);
901 	req->hmac = true;
902 	req->last_req = true;
903 
904 	ret = crypto_ahash_update(areq);
905 	if (ret && ret != -EINPROGRESS && ret != -EBUSY)
906 		return ret;
907 
908 	wait_for_completion_interruptible(&result.completion);
909 	if (result.error)
910 		return result.error;
911 
912 	return crypto_ahash_export(areq, state);
913 }
914 
915 static int safexcel_hmac_setkey(const char *alg, const u8 *key,
916 				unsigned int keylen, void *istate, void *ostate)
917 {
918 	struct ahash_request *areq;
919 	struct crypto_ahash *tfm;
920 	unsigned int blocksize;
921 	u8 *ipad, *opad;
922 	int ret;
923 
924 	tfm = crypto_alloc_ahash(alg, CRYPTO_ALG_TYPE_AHASH,
925 				 CRYPTO_ALG_TYPE_AHASH_MASK);
926 	if (IS_ERR(tfm))
927 		return PTR_ERR(tfm);
928 
929 	areq = ahash_request_alloc(tfm, GFP_KERNEL);
930 	if (!areq) {
931 		ret = -ENOMEM;
932 		goto free_ahash;
933 	}
934 
935 	crypto_ahash_clear_flags(tfm, ~0);
936 	blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
937 
938 	ipad = kzalloc(2 * blocksize, GFP_KERNEL);
939 	if (!ipad) {
940 		ret = -ENOMEM;
941 		goto free_request;
942 	}
943 
944 	opad = ipad + blocksize;
945 
946 	ret = safexcel_hmac_init_pad(areq, blocksize, key, keylen, ipad, opad);
947 	if (ret)
948 		goto free_ipad;
949 
950 	ret = safexcel_hmac_init_iv(areq, blocksize, ipad, istate);
951 	if (ret)
952 		goto free_ipad;
953 
954 	ret = safexcel_hmac_init_iv(areq, blocksize, opad, ostate);
955 
956 free_ipad:
957 	kfree(ipad);
958 free_request:
959 	ahash_request_free(areq);
960 free_ahash:
961 	crypto_free_ahash(tfm);
962 
963 	return ret;
964 }
965 
966 static int safexcel_hmac_alg_setkey(struct crypto_ahash *tfm, const u8 *key,
967 				    unsigned int keylen, const char *alg,
968 				    unsigned int state_sz)
969 {
970 	struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
971 	struct safexcel_crypto_priv *priv = ctx->priv;
972 	struct safexcel_ahash_export_state istate, ostate;
973 	int ret, i;
974 
975 	ret = safexcel_hmac_setkey(alg, key, keylen, &istate, &ostate);
976 	if (ret)
977 		return ret;
978 
979 	if (priv->version == EIP197 && ctx->base.ctxr) {
980 		for (i = 0; i < state_sz / sizeof(u32); i++) {
981 			if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
982 			    ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
983 				ctx->base.needs_inv = true;
984 				break;
985 			}
986 		}
987 	}
988 
989 	memcpy(ctx->ipad, &istate.state, state_sz);
990 	memcpy(ctx->opad, &ostate.state, state_sz);
991 
992 	return 0;
993 }
994 
995 static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
996 				     unsigned int keylen)
997 {
998 	return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha1",
999 					SHA1_DIGEST_SIZE);
1000 }
1001 
1002 struct safexcel_alg_template safexcel_alg_hmac_sha1 = {
1003 	.type = SAFEXCEL_ALG_TYPE_AHASH,
1004 	.alg.ahash = {
1005 		.init = safexcel_hmac_sha1_init,
1006 		.update = safexcel_ahash_update,
1007 		.final = safexcel_ahash_final,
1008 		.finup = safexcel_ahash_finup,
1009 		.digest = safexcel_hmac_sha1_digest,
1010 		.setkey = safexcel_hmac_sha1_setkey,
1011 		.export = safexcel_ahash_export,
1012 		.import = safexcel_ahash_import,
1013 		.halg = {
1014 			.digestsize = SHA1_DIGEST_SIZE,
1015 			.statesize = sizeof(struct safexcel_ahash_export_state),
1016 			.base = {
1017 				.cra_name = "hmac(sha1)",
1018 				.cra_driver_name = "safexcel-hmac-sha1",
1019 				.cra_priority = 300,
1020 				.cra_flags = CRYPTO_ALG_ASYNC |
1021 					     CRYPTO_ALG_KERN_DRIVER_ONLY,
1022 				.cra_blocksize = SHA1_BLOCK_SIZE,
1023 				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1024 				.cra_init = safexcel_ahash_cra_init,
1025 				.cra_exit = safexcel_ahash_cra_exit,
1026 				.cra_module = THIS_MODULE,
1027 			},
1028 		},
1029 	},
1030 };
1031 
1032 static int safexcel_sha256_init(struct ahash_request *areq)
1033 {
1034 	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
1035 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1036 
1037 	memset(req, 0, sizeof(*req));
1038 
1039 	req->state[0] = SHA256_H0;
1040 	req->state[1] = SHA256_H1;
1041 	req->state[2] = SHA256_H2;
1042 	req->state[3] = SHA256_H3;
1043 	req->state[4] = SHA256_H4;
1044 	req->state[5] = SHA256_H5;
1045 	req->state[6] = SHA256_H6;
1046 	req->state[7] = SHA256_H7;
1047 
1048 	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
1049 	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1050 	req->state_sz = SHA256_DIGEST_SIZE;
1051 
1052 	return 0;
1053 }
1054 
1055 static int safexcel_sha256_digest(struct ahash_request *areq)
1056 {
1057 	int ret = safexcel_sha256_init(areq);
1058 
1059 	if (ret)
1060 		return ret;
1061 
1062 	return safexcel_ahash_finup(areq);
1063 }
1064 
1065 struct safexcel_alg_template safexcel_alg_sha256 = {
1066 	.type = SAFEXCEL_ALG_TYPE_AHASH,
1067 	.alg.ahash = {
1068 		.init = safexcel_sha256_init,
1069 		.update = safexcel_ahash_update,
1070 		.final = safexcel_ahash_final,
1071 		.finup = safexcel_ahash_finup,
1072 		.digest = safexcel_sha256_digest,
1073 		.export = safexcel_ahash_export,
1074 		.import = safexcel_ahash_import,
1075 		.halg = {
1076 			.digestsize = SHA256_DIGEST_SIZE,
1077 			.statesize = sizeof(struct safexcel_ahash_export_state),
1078 			.base = {
1079 				.cra_name = "sha256",
1080 				.cra_driver_name = "safexcel-sha256",
1081 				.cra_priority = 300,
1082 				.cra_flags = CRYPTO_ALG_ASYNC |
1083 					     CRYPTO_ALG_KERN_DRIVER_ONLY,
1084 				.cra_blocksize = SHA256_BLOCK_SIZE,
1085 				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1086 				.cra_init = safexcel_ahash_cra_init,
1087 				.cra_exit = safexcel_ahash_cra_exit,
1088 				.cra_module = THIS_MODULE,
1089 			},
1090 		},
1091 	},
1092 };
1093 
1094 static int safexcel_sha224_init(struct ahash_request *areq)
1095 {
1096 	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
1097 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1098 
1099 	memset(req, 0, sizeof(*req));
1100 
1101 	req->state[0] = SHA224_H0;
1102 	req->state[1] = SHA224_H1;
1103 	req->state[2] = SHA224_H2;
1104 	req->state[3] = SHA224_H3;
1105 	req->state[4] = SHA224_H4;
1106 	req->state[5] = SHA224_H5;
1107 	req->state[6] = SHA224_H6;
1108 	req->state[7] = SHA224_H7;
1109 
1110 	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
1111 	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1112 	req->state_sz = SHA256_DIGEST_SIZE;
1113 
1114 	return 0;
1115 }
1116 
1117 static int safexcel_sha224_digest(struct ahash_request *areq)
1118 {
1119 	int ret = safexcel_sha224_init(areq);
1120 
1121 	if (ret)
1122 		return ret;
1123 
1124 	return safexcel_ahash_finup(areq);
1125 }
1126 
1127 struct safexcel_alg_template safexcel_alg_sha224 = {
1128 	.type = SAFEXCEL_ALG_TYPE_AHASH,
1129 	.alg.ahash = {
1130 		.init = safexcel_sha224_init,
1131 		.update = safexcel_ahash_update,
1132 		.final = safexcel_ahash_final,
1133 		.finup = safexcel_ahash_finup,
1134 		.digest = safexcel_sha224_digest,
1135 		.export = safexcel_ahash_export,
1136 		.import = safexcel_ahash_import,
1137 		.halg = {
1138 			.digestsize = SHA224_DIGEST_SIZE,
1139 			.statesize = sizeof(struct safexcel_ahash_export_state),
1140 			.base = {
1141 				.cra_name = "sha224",
1142 				.cra_driver_name = "safexcel-sha224",
1143 				.cra_priority = 300,
1144 				.cra_flags = CRYPTO_ALG_ASYNC |
1145 					     CRYPTO_ALG_KERN_DRIVER_ONLY,
1146 				.cra_blocksize = SHA224_BLOCK_SIZE,
1147 				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1148 				.cra_init = safexcel_ahash_cra_init,
1149 				.cra_exit = safexcel_ahash_cra_exit,
1150 				.cra_module = THIS_MODULE,
1151 			},
1152 		},
1153 	},
1154 };
1155 
1156 static int safexcel_hmac_sha224_setkey(struct crypto_ahash *tfm, const u8 *key,
1157 				       unsigned int keylen)
1158 {
1159 	return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha224",
1160 					SHA256_DIGEST_SIZE);
1161 }
1162 
1163 static int safexcel_hmac_sha224_init(struct ahash_request *areq)
1164 {
1165 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1166 
1167 	safexcel_sha224_init(areq);
1168 	req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
1169 	return 0;
1170 }
1171 
1172 static int safexcel_hmac_sha224_digest(struct ahash_request *areq)
1173 {
1174 	int ret = safexcel_hmac_sha224_init(areq);
1175 
1176 	if (ret)
1177 		return ret;
1178 
1179 	return safexcel_ahash_finup(areq);
1180 }
1181 
1182 struct safexcel_alg_template safexcel_alg_hmac_sha224 = {
1183 	.type = SAFEXCEL_ALG_TYPE_AHASH,
1184 	.alg.ahash = {
1185 		.init = safexcel_hmac_sha224_init,
1186 		.update = safexcel_ahash_update,
1187 		.final = safexcel_ahash_final,
1188 		.finup = safexcel_ahash_finup,
1189 		.digest = safexcel_hmac_sha224_digest,
1190 		.setkey = safexcel_hmac_sha224_setkey,
1191 		.export = safexcel_ahash_export,
1192 		.import = safexcel_ahash_import,
1193 		.halg = {
1194 			.digestsize = SHA224_DIGEST_SIZE,
1195 			.statesize = sizeof(struct safexcel_ahash_export_state),
1196 			.base = {
1197 				.cra_name = "hmac(sha224)",
1198 				.cra_driver_name = "safexcel-hmac-sha224",
1199 				.cra_priority = 300,
1200 				.cra_flags = CRYPTO_ALG_ASYNC |
1201 					     CRYPTO_ALG_KERN_DRIVER_ONLY,
1202 				.cra_blocksize = SHA224_BLOCK_SIZE,
1203 				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1204 				.cra_init = safexcel_ahash_cra_init,
1205 				.cra_exit = safexcel_ahash_cra_exit,
1206 				.cra_module = THIS_MODULE,
1207 			},
1208 		},
1209 	},
1210 };
1211 
1212 static int safexcel_hmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
1213 				     unsigned int keylen)
1214 {
1215 	return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha256",
1216 					SHA256_DIGEST_SIZE);
1217 }
1218 
1219 static int safexcel_hmac_sha256_init(struct ahash_request *areq)
1220 {
1221 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1222 
1223 	safexcel_sha256_init(areq);
1224 	req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
1225 	return 0;
1226 }
1227 
1228 static int safexcel_hmac_sha256_digest(struct ahash_request *areq)
1229 {
1230 	int ret = safexcel_hmac_sha256_init(areq);
1231 
1232 	if (ret)
1233 		return ret;
1234 
1235 	return safexcel_ahash_finup(areq);
1236 }
1237 
1238 struct safexcel_alg_template safexcel_alg_hmac_sha256 = {
1239 	.type = SAFEXCEL_ALG_TYPE_AHASH,
1240 	.alg.ahash = {
1241 		.init = safexcel_hmac_sha256_init,
1242 		.update = safexcel_ahash_update,
1243 		.final = safexcel_ahash_final,
1244 		.finup = safexcel_ahash_finup,
1245 		.digest = safexcel_hmac_sha256_digest,
1246 		.setkey = safexcel_hmac_sha256_setkey,
1247 		.export = safexcel_ahash_export,
1248 		.import = safexcel_ahash_import,
1249 		.halg = {
1250 			.digestsize = SHA256_DIGEST_SIZE,
1251 			.statesize = sizeof(struct safexcel_ahash_export_state),
1252 			.base = {
1253 				.cra_name = "hmac(sha256)",
1254 				.cra_driver_name = "safexcel-hmac-sha256",
1255 				.cra_priority = 300,
1256 				.cra_flags = CRYPTO_ALG_ASYNC |
1257 					     CRYPTO_ALG_KERN_DRIVER_ONLY,
1258 				.cra_blocksize = SHA256_BLOCK_SIZE,
1259 				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1260 				.cra_init = safexcel_ahash_cra_init,
1261 				.cra_exit = safexcel_ahash_cra_exit,
1262 				.cra_module = THIS_MODULE,
1263 			},
1264 		},
1265 	},
1266 };
1267