1 /*
2  * Copyright (C) 2017 Marvell
3  *
4  * Antoine Tenart <antoine.tenart@free-electrons.com>
5  *
6  * This file is licensed under the terms of the GNU General Public
7  * License version 2. This program is licensed "as is" without any
8  * warranty of any kind, whether express or implied.
9  */
10 
11 #include <crypto/hmac.h>
12 #include <crypto/sha.h>
13 #include <linux/device.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/dmapool.h>
16 
17 
18 #include "safexcel.h"
19 
20 struct safexcel_ahash_ctx {
21 	struct safexcel_context base;
22 	struct safexcel_crypto_priv *priv;
23 
24 	u32 alg;
25 	u32 digest;
26 
27 	u32 ipad[SHA1_DIGEST_SIZE / sizeof(u32)];
28 	u32 opad[SHA1_DIGEST_SIZE / sizeof(u32)];
29 };
30 
31 struct safexcel_ahash_req {
32 	bool last_req;
33 	bool finish;
34 	bool hmac;
35 
36 	u8 state_sz;    /* expected sate size, only set once */
37 	u32 state[SHA256_DIGEST_SIZE / sizeof(u32)];
38 
39 	u64 len;
40 	u64 processed;
41 
42 	u8 cache[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
43 	u8 cache_next[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
44 };
45 
46 struct safexcel_ahash_export_state {
47 	u64 len;
48 	u64 processed;
49 
50 	u32 state[SHA256_DIGEST_SIZE / sizeof(u32)];
51 	u8 cache[SHA256_BLOCK_SIZE];
52 };
53 
54 static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
55 				u32 input_length, u32 result_length)
56 {
57 	struct safexcel_token *token =
58 		(struct safexcel_token *)cdesc->control_data.token;
59 
60 	token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
61 	token[0].packet_length = input_length;
62 	token[0].stat = EIP197_TOKEN_STAT_LAST_HASH;
63 	token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH;
64 
65 	token[1].opcode = EIP197_TOKEN_OPCODE_INSERT;
66 	token[1].packet_length = result_length;
67 	token[1].stat = EIP197_TOKEN_STAT_LAST_HASH |
68 			EIP197_TOKEN_STAT_LAST_PACKET;
69 	token[1].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
70 				EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
71 }
72 
73 static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
74 				     struct safexcel_ahash_req *req,
75 				     struct safexcel_command_desc *cdesc,
76 				     unsigned int digestsize,
77 				     unsigned int blocksize)
78 {
79 	int i;
80 
81 	cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_OUT;
82 	cdesc->control_data.control0 |= ctx->alg;
83 	cdesc->control_data.control0 |= ctx->digest;
84 
85 	if (ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) {
86 		if (req->processed) {
87 			if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
88 				cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(6);
89 			else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224 ||
90 				 ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
91 				cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(9);
92 
93 			cdesc->control_data.control1 |= CONTEXT_CONTROL_DIGEST_CNT;
94 		} else {
95 			cdesc->control_data.control0 |= CONTEXT_CONTROL_RESTART_HASH;
96 		}
97 
98 		if (!req->finish)
99 			cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH;
100 
101 		/*
102 		 * Copy the input digest if needed, and setup the context
103 		 * fields. Do this now as we need it to setup the first command
104 		 * descriptor.
105 		 */
106 		if (req->processed) {
107 			for (i = 0; i < digestsize / sizeof(u32); i++)
108 				ctx->base.ctxr->data[i] = cpu_to_le32(req->state[i]);
109 
110 			if (req->finish)
111 				ctx->base.ctxr->data[i] = cpu_to_le32(req->processed / blocksize);
112 		}
113 	} else if (ctx->digest == CONTEXT_CONTROL_DIGEST_HMAC) {
114 		cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(10);
115 
116 		memcpy(ctx->base.ctxr->data, ctx->ipad, digestsize);
117 		memcpy(ctx->base.ctxr->data + digestsize / sizeof(u32),
118 		       ctx->opad, digestsize);
119 	}
120 }
121 
122 static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
123 				  struct crypto_async_request *async,
124 				  bool *should_complete, int *ret)
125 {
126 	struct safexcel_result_desc *rdesc;
127 	struct ahash_request *areq = ahash_request_cast(async);
128 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
129 	struct safexcel_ahash_req *sreq = ahash_request_ctx(areq);
130 	int cache_len, result_sz = sreq->state_sz;
131 
132 	*ret = 0;
133 
134 	spin_lock_bh(&priv->ring[ring].egress_lock);
135 	rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
136 	if (IS_ERR(rdesc)) {
137 		dev_err(priv->dev,
138 			"hash: result: could not retrieve the result descriptor\n");
139 		*ret = PTR_ERR(rdesc);
140 	} else if (rdesc->result_data.error_code) {
141 		dev_err(priv->dev,
142 			"hash: result: result descriptor error (%d)\n",
143 			rdesc->result_data.error_code);
144 		*ret = -EINVAL;
145 	}
146 
147 	safexcel_complete(priv, ring);
148 	spin_unlock_bh(&priv->ring[ring].egress_lock);
149 
150 	if (sreq->finish)
151 		result_sz = crypto_ahash_digestsize(ahash);
152 	memcpy(sreq->state, areq->result, result_sz);
153 
154 	dma_unmap_sg(priv->dev, areq->src,
155 		     sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE);
156 
157 	safexcel_free_context(priv, async, sreq->state_sz);
158 
159 	cache_len = sreq->len - sreq->processed;
160 	if (cache_len)
161 		memcpy(sreq->cache, sreq->cache_next, cache_len);
162 
163 	*should_complete = true;
164 
165 	return 1;
166 }
167 
168 static int safexcel_ahash_send(struct crypto_async_request *async, int ring,
169 			       struct safexcel_request *request, int *commands,
170 			       int *results)
171 {
172 	struct ahash_request *areq = ahash_request_cast(async);
173 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
174 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
175 	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
176 	struct safexcel_crypto_priv *priv = ctx->priv;
177 	struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
178 	struct safexcel_result_desc *rdesc;
179 	struct scatterlist *sg;
180 	int i, nents, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
181 
182 	queued = len = req->len - req->processed;
183 	if (queued < crypto_ahash_blocksize(ahash))
184 		cache_len = queued;
185 	else
186 		cache_len = queued - areq->nbytes;
187 
188 	/*
189 	 * If this is not the last request and the queued data does not fit
190 	 * into full blocks, cache it for the next send() call.
191 	 */
192 	extra = queued & (crypto_ahash_blocksize(ahash) - 1);
193 	if (!req->last_req && extra) {
194 		sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
195 				   req->cache_next, extra, areq->nbytes - extra);
196 
197 		queued -= extra;
198 		len -= extra;
199 	}
200 
201 	spin_lock_bh(&priv->ring[ring].egress_lock);
202 
203 	/* Add a command descriptor for the cached data, if any */
204 	if (cache_len) {
205 		ctx->base.cache = kzalloc(cache_len, EIP197_GFP_FLAGS(*async));
206 		if (!ctx->base.cache) {
207 			ret = -ENOMEM;
208 			goto unlock;
209 		}
210 		memcpy(ctx->base.cache, req->cache, cache_len);
211 		ctx->base.cache_dma = dma_map_single(priv->dev, ctx->base.cache,
212 						     cache_len, DMA_TO_DEVICE);
213 		if (dma_mapping_error(priv->dev, ctx->base.cache_dma)) {
214 			ret = -EINVAL;
215 			goto free_cache;
216 		}
217 
218 		ctx->base.cache_sz = cache_len;
219 		first_cdesc = safexcel_add_cdesc(priv, ring, 1,
220 						 (cache_len == len),
221 						 ctx->base.cache_dma,
222 						 cache_len, len,
223 						 ctx->base.ctxr_dma);
224 		if (IS_ERR(first_cdesc)) {
225 			ret = PTR_ERR(first_cdesc);
226 			goto unmap_cache;
227 		}
228 		n_cdesc++;
229 
230 		queued -= cache_len;
231 		if (!queued)
232 			goto send_command;
233 	}
234 
235 	/* Now handle the current ahash request buffer(s) */
236 	nents = dma_map_sg(priv->dev, areq->src,
237 		       sg_nents_for_len(areq->src, areq->nbytes),
238 		       DMA_TO_DEVICE);
239 	if (!nents) {
240 		ret = -ENOMEM;
241 		goto cdesc_rollback;
242 	}
243 
244 	for_each_sg(areq->src, sg, nents, i) {
245 		int sglen = sg_dma_len(sg);
246 
247 		/* Do not overflow the request */
248 		if (queued - sglen < 0)
249 			sglen = queued;
250 
251 		cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
252 					   !(queued - sglen), sg_dma_address(sg),
253 					   sglen, len, ctx->base.ctxr_dma);
254 		if (IS_ERR(cdesc)) {
255 			ret = PTR_ERR(cdesc);
256 			goto cdesc_rollback;
257 		}
258 		n_cdesc++;
259 
260 		if (n_cdesc == 1)
261 			first_cdesc = cdesc;
262 
263 		queued -= sglen;
264 		if (!queued)
265 			break;
266 	}
267 
268 send_command:
269 	/* Setup the context options */
270 	safexcel_context_control(ctx, req, first_cdesc, req->state_sz,
271 				 crypto_ahash_blocksize(ahash));
272 
273 	/* Add the token */
274 	safexcel_hash_token(first_cdesc, len, req->state_sz);
275 
276 	ctx->base.result_dma = dma_map_single(priv->dev, areq->result,
277 					      req->state_sz, DMA_FROM_DEVICE);
278 	if (dma_mapping_error(priv->dev, ctx->base.result_dma)) {
279 		ret = -EINVAL;
280 		goto cdesc_rollback;
281 	}
282 
283 	/* Add a result descriptor */
284 	rdesc = safexcel_add_rdesc(priv, ring, 1, 1, ctx->base.result_dma,
285 				   req->state_sz);
286 	if (IS_ERR(rdesc)) {
287 		ret = PTR_ERR(rdesc);
288 		goto cdesc_rollback;
289 	}
290 
291 	spin_unlock_bh(&priv->ring[ring].egress_lock);
292 
293 	req->processed += len;
294 	request->req = &areq->base;
295 	ctx->base.handle_result = safexcel_handle_result;
296 
297 	*commands = n_cdesc;
298 	*results = 1;
299 	return 0;
300 
301 cdesc_rollback:
302 	for (i = 0; i < n_cdesc; i++)
303 		safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
304 unmap_cache:
305 	if (ctx->base.cache_dma) {
306 		dma_unmap_single(priv->dev, ctx->base.cache_dma,
307 				 ctx->base.cache_sz, DMA_TO_DEVICE);
308 		ctx->base.cache_sz = 0;
309 	}
310 free_cache:
311 	kfree(ctx->base.cache);
312 	ctx->base.cache = NULL;
313 
314 unlock:
315 	spin_unlock_bh(&priv->ring[ring].egress_lock);
316 	return ret;
317 }
318 
319 static inline bool safexcel_ahash_needs_inv_get(struct ahash_request *areq)
320 {
321 	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
322 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
323 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
324 	unsigned int state_w_sz = req->state_sz / sizeof(u32);
325 	int i;
326 
327 	for (i = 0; i < state_w_sz; i++)
328 		if (ctx->base.ctxr->data[i] != cpu_to_le32(req->state[i]))
329 			return true;
330 
331 	if (ctx->base.ctxr->data[state_w_sz] !=
332 	    cpu_to_le32(req->processed / crypto_ahash_blocksize(ahash)))
333 		return true;
334 
335 	return false;
336 }
337 
338 static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
339 				      int ring,
340 				      struct crypto_async_request *async,
341 				      bool *should_complete, int *ret)
342 {
343 	struct safexcel_result_desc *rdesc;
344 	struct ahash_request *areq = ahash_request_cast(async);
345 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
346 	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(ahash);
347 	int enq_ret;
348 
349 	*ret = 0;
350 
351 	spin_lock_bh(&priv->ring[ring].egress_lock);
352 	rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
353 	if (IS_ERR(rdesc)) {
354 		dev_err(priv->dev,
355 			"hash: invalidate: could not retrieve the result descriptor\n");
356 		*ret = PTR_ERR(rdesc);
357 	} else if (rdesc->result_data.error_code) {
358 		dev_err(priv->dev,
359 			"hash: invalidate: result descriptor error (%d)\n",
360 			rdesc->result_data.error_code);
361 		*ret = -EINVAL;
362 	}
363 
364 	safexcel_complete(priv, ring);
365 	spin_unlock_bh(&priv->ring[ring].egress_lock);
366 
367 	if (ctx->base.exit_inv) {
368 		dma_pool_free(priv->context_pool, ctx->base.ctxr,
369 			      ctx->base.ctxr_dma);
370 
371 		*should_complete = true;
372 		return 1;
373 	}
374 
375 	ring = safexcel_select_ring(priv);
376 	ctx->base.ring = ring;
377 	ctx->base.needs_inv = false;
378 	ctx->base.send = safexcel_ahash_send;
379 
380 	spin_lock_bh(&priv->ring[ring].queue_lock);
381 	enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
382 	spin_unlock_bh(&priv->ring[ring].queue_lock);
383 
384 	if (enq_ret != -EINPROGRESS)
385 		*ret = enq_ret;
386 
387 	if (!priv->ring[ring].need_dequeue)
388 		safexcel_dequeue(priv, ring);
389 
390 	*should_complete = false;
391 
392 	return 1;
393 }
394 
395 static int safexcel_ahash_send_inv(struct crypto_async_request *async,
396 				   int ring, struct safexcel_request *request,
397 				   int *commands, int *results)
398 {
399 	struct ahash_request *areq = ahash_request_cast(async);
400 	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
401 	int ret;
402 
403 	ctx->base.handle_result = safexcel_handle_inv_result;
404 	ret = safexcel_invalidate_cache(async, &ctx->base, ctx->priv,
405 					ctx->base.ctxr_dma, ring, request);
406 	if (unlikely(ret))
407 		return ret;
408 
409 	*commands = 1;
410 	*results = 1;
411 
412 	return 0;
413 }
414 
415 static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
416 {
417 	struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
418 	struct safexcel_crypto_priv *priv = ctx->priv;
419 	struct ahash_request req;
420 	struct safexcel_inv_result result = {};
421 	int ring = ctx->base.ring;
422 
423 	memset(&req, 0, sizeof(struct ahash_request));
424 
425 	/* create invalidation request */
426 	init_completion(&result.completion);
427 	ahash_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG,
428 				   safexcel_inv_complete, &result);
429 
430 	ahash_request_set_tfm(&req, __crypto_ahash_cast(tfm));
431 	ctx = crypto_tfm_ctx(req.base.tfm);
432 	ctx->base.exit_inv = true;
433 	ctx->base.send = safexcel_ahash_send_inv;
434 
435 	spin_lock_bh(&priv->ring[ring].queue_lock);
436 	crypto_enqueue_request(&priv->ring[ring].queue, &req.base);
437 	spin_unlock_bh(&priv->ring[ring].queue_lock);
438 
439 	if (!priv->ring[ring].need_dequeue)
440 		safexcel_dequeue(priv, ring);
441 
442 	wait_for_completion_interruptible(&result.completion);
443 
444 	if (result.error) {
445 		dev_warn(priv->dev, "hash: completion error (%d)\n",
446 			 result.error);
447 		return result.error;
448 	}
449 
450 	return 0;
451 }
452 
453 static int safexcel_ahash_cache(struct ahash_request *areq)
454 {
455 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
456 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
457 	int queued, cache_len;
458 
459 	cache_len = req->len - areq->nbytes - req->processed;
460 	queued = req->len - req->processed;
461 
462 	/*
463 	 * In case there isn't enough bytes to proceed (less than a
464 	 * block size), cache the data until we have enough.
465 	 */
466 	if (cache_len + areq->nbytes <= crypto_ahash_blocksize(ahash)) {
467 		sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
468 				   req->cache + cache_len,
469 				   areq->nbytes, 0);
470 		return areq->nbytes;
471 	}
472 
473 	/* We could'nt cache all the data */
474 	return -E2BIG;
475 }
476 
477 static int safexcel_ahash_enqueue(struct ahash_request *areq)
478 {
479 	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
480 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
481 	struct safexcel_crypto_priv *priv = ctx->priv;
482 	int ret, ring;
483 
484 	ctx->base.send = safexcel_ahash_send;
485 
486 	if (req->processed && ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
487 		ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq);
488 
489 	if (ctx->base.ctxr) {
490 		if (ctx->base.needs_inv)
491 			ctx->base.send = safexcel_ahash_send_inv;
492 	} else {
493 		ctx->base.ring = safexcel_select_ring(priv);
494 		ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
495 						 EIP197_GFP_FLAGS(areq->base),
496 						 &ctx->base.ctxr_dma);
497 		if (!ctx->base.ctxr)
498 			return -ENOMEM;
499 	}
500 
501 	ring = ctx->base.ring;
502 
503 	spin_lock_bh(&priv->ring[ring].queue_lock);
504 	ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base);
505 	spin_unlock_bh(&priv->ring[ring].queue_lock);
506 
507 	if (!priv->ring[ring].need_dequeue)
508 		safexcel_dequeue(priv, ring);
509 
510 	return ret;
511 }
512 
513 static int safexcel_ahash_update(struct ahash_request *areq)
514 {
515 	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
516 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
517 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
518 
519 	/* If the request is 0 length, do nothing */
520 	if (!areq->nbytes)
521 		return 0;
522 
523 	req->len += areq->nbytes;
524 
525 	safexcel_ahash_cache(areq);
526 
527 	/*
528 	 * We're not doing partial updates when performing an hmac request.
529 	 * Everything will be handled by the final() call.
530 	 */
531 	if (ctx->digest == CONTEXT_CONTROL_DIGEST_HMAC)
532 		return 0;
533 
534 	if (req->hmac)
535 		return safexcel_ahash_enqueue(areq);
536 
537 	if (!req->last_req &&
538 	    req->len - req->processed > crypto_ahash_blocksize(ahash))
539 		return safexcel_ahash_enqueue(areq);
540 
541 	return 0;
542 }
543 
544 static int safexcel_ahash_final(struct ahash_request *areq)
545 {
546 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
547 	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
548 
549 	req->last_req = true;
550 	req->finish = true;
551 
552 	/* If we have an overall 0 length request */
553 	if (!(req->len + areq->nbytes)) {
554 		if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
555 			memcpy(areq->result, sha1_zero_message_hash,
556 			       SHA1_DIGEST_SIZE);
557 		else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224)
558 			memcpy(areq->result, sha224_zero_message_hash,
559 			       SHA224_DIGEST_SIZE);
560 		else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
561 			memcpy(areq->result, sha256_zero_message_hash,
562 			       SHA256_DIGEST_SIZE);
563 
564 		return 0;
565 	}
566 
567 	return safexcel_ahash_enqueue(areq);
568 }
569 
570 static int safexcel_ahash_finup(struct ahash_request *areq)
571 {
572 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
573 
574 	req->last_req = true;
575 	req->finish = true;
576 
577 	safexcel_ahash_update(areq);
578 	return safexcel_ahash_final(areq);
579 }
580 
581 static int safexcel_ahash_export(struct ahash_request *areq, void *out)
582 {
583 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
584 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
585 	struct safexcel_ahash_export_state *export = out;
586 
587 	export->len = req->len;
588 	export->processed = req->processed;
589 
590 	memcpy(export->state, req->state, req->state_sz);
591 	memset(export->cache, 0, crypto_ahash_blocksize(ahash));
592 	memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash));
593 
594 	return 0;
595 }
596 
597 static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
598 {
599 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
600 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
601 	const struct safexcel_ahash_export_state *export = in;
602 	int ret;
603 
604 	ret = crypto_ahash_init(areq);
605 	if (ret)
606 		return ret;
607 
608 	req->len = export->len;
609 	req->processed = export->processed;
610 
611 	memcpy(req->cache, export->cache, crypto_ahash_blocksize(ahash));
612 	memcpy(req->state, export->state, req->state_sz);
613 
614 	return 0;
615 }
616 
617 static int safexcel_ahash_cra_init(struct crypto_tfm *tfm)
618 {
619 	struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
620 	struct safexcel_alg_template *tmpl =
621 		container_of(__crypto_ahash_alg(tfm->__crt_alg),
622 			     struct safexcel_alg_template, alg.ahash);
623 
624 	ctx->priv = tmpl->priv;
625 
626 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
627 				 sizeof(struct safexcel_ahash_req));
628 	return 0;
629 }
630 
631 static int safexcel_sha1_init(struct ahash_request *areq)
632 {
633 	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
634 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
635 
636 	memset(req, 0, sizeof(*req));
637 
638 	req->state[0] = SHA1_H0;
639 	req->state[1] = SHA1_H1;
640 	req->state[2] = SHA1_H2;
641 	req->state[3] = SHA1_H3;
642 	req->state[4] = SHA1_H4;
643 
644 	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
645 	ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
646 	req->state_sz = SHA1_DIGEST_SIZE;
647 
648 	return 0;
649 }
650 
651 static int safexcel_sha1_digest(struct ahash_request *areq)
652 {
653 	int ret = safexcel_sha1_init(areq);
654 
655 	if (ret)
656 		return ret;
657 
658 	return safexcel_ahash_finup(areq);
659 }
660 
661 static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm)
662 {
663 	struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
664 	struct safexcel_crypto_priv *priv = ctx->priv;
665 	int ret;
666 
667 	/* context not allocated, skip invalidation */
668 	if (!ctx->base.ctxr)
669 		return;
670 
671 	ret = safexcel_ahash_exit_inv(tfm);
672 	if (ret)
673 		dev_warn(priv->dev, "hash: invalidation error %d\n", ret);
674 }
675 
676 struct safexcel_alg_template safexcel_alg_sha1 = {
677 	.type = SAFEXCEL_ALG_TYPE_AHASH,
678 	.alg.ahash = {
679 		.init = safexcel_sha1_init,
680 		.update = safexcel_ahash_update,
681 		.final = safexcel_ahash_final,
682 		.finup = safexcel_ahash_finup,
683 		.digest = safexcel_sha1_digest,
684 		.export = safexcel_ahash_export,
685 		.import = safexcel_ahash_import,
686 		.halg = {
687 			.digestsize = SHA1_DIGEST_SIZE,
688 			.statesize = sizeof(struct safexcel_ahash_export_state),
689 			.base = {
690 				.cra_name = "sha1",
691 				.cra_driver_name = "safexcel-sha1",
692 				.cra_priority = 300,
693 				.cra_flags = CRYPTO_ALG_ASYNC |
694 					     CRYPTO_ALG_KERN_DRIVER_ONLY,
695 				.cra_blocksize = SHA1_BLOCK_SIZE,
696 				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
697 				.cra_init = safexcel_ahash_cra_init,
698 				.cra_exit = safexcel_ahash_cra_exit,
699 				.cra_module = THIS_MODULE,
700 			},
701 		},
702 	},
703 };
704 
705 static int safexcel_hmac_sha1_init(struct ahash_request *areq)
706 {
707 	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
708 
709 	safexcel_sha1_init(areq);
710 	ctx->digest = CONTEXT_CONTROL_DIGEST_HMAC;
711 	return 0;
712 }
713 
714 static int safexcel_hmac_sha1_digest(struct ahash_request *areq)
715 {
716 	int ret = safexcel_hmac_sha1_init(areq);
717 
718 	if (ret)
719 		return ret;
720 
721 	return safexcel_ahash_finup(areq);
722 }
723 
724 struct safexcel_ahash_result {
725 	struct completion completion;
726 	int error;
727 };
728 
729 static void safexcel_ahash_complete(struct crypto_async_request *req, int error)
730 {
731 	struct safexcel_ahash_result *result = req->data;
732 
733 	if (error == -EINPROGRESS)
734 		return;
735 
736 	result->error = error;
737 	complete(&result->completion);
738 }
739 
740 static int safexcel_hmac_init_pad(struct ahash_request *areq,
741 				  unsigned int blocksize, const u8 *key,
742 				  unsigned int keylen, u8 *ipad, u8 *opad)
743 {
744 	struct safexcel_ahash_result result;
745 	struct scatterlist sg;
746 	int ret, i;
747 	u8 *keydup;
748 
749 	if (keylen <= blocksize) {
750 		memcpy(ipad, key, keylen);
751 	} else {
752 		keydup = kmemdup(key, keylen, GFP_KERNEL);
753 		if (!keydup)
754 			return -ENOMEM;
755 
756 		ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
757 					   safexcel_ahash_complete, &result);
758 		sg_init_one(&sg, keydup, keylen);
759 		ahash_request_set_crypt(areq, &sg, ipad, keylen);
760 		init_completion(&result.completion);
761 
762 		ret = crypto_ahash_digest(areq);
763 		if (ret == -EINPROGRESS) {
764 			wait_for_completion_interruptible(&result.completion);
765 			ret = result.error;
766 		}
767 
768 		/* Avoid leaking */
769 		memzero_explicit(keydup, keylen);
770 		kfree(keydup);
771 
772 		if (ret)
773 			return ret;
774 
775 		keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
776 	}
777 
778 	memset(ipad + keylen, 0, blocksize - keylen);
779 	memcpy(opad, ipad, blocksize);
780 
781 	for (i = 0; i < blocksize; i++) {
782 		ipad[i] ^= HMAC_IPAD_VALUE;
783 		opad[i] ^= HMAC_OPAD_VALUE;
784 	}
785 
786 	return 0;
787 }
788 
789 static int safexcel_hmac_init_iv(struct ahash_request *areq,
790 				 unsigned int blocksize, u8 *pad, void *state)
791 {
792 	struct safexcel_ahash_result result;
793 	struct safexcel_ahash_req *req;
794 	struct scatterlist sg;
795 	int ret;
796 
797 	ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
798 				   safexcel_ahash_complete, &result);
799 	sg_init_one(&sg, pad, blocksize);
800 	ahash_request_set_crypt(areq, &sg, pad, blocksize);
801 	init_completion(&result.completion);
802 
803 	ret = crypto_ahash_init(areq);
804 	if (ret)
805 		return ret;
806 
807 	req = ahash_request_ctx(areq);
808 	req->hmac = true;
809 	req->last_req = true;
810 
811 	ret = crypto_ahash_update(areq);
812 	if (ret && ret != -EINPROGRESS)
813 		return ret;
814 
815 	wait_for_completion_interruptible(&result.completion);
816 	if (result.error)
817 		return result.error;
818 
819 	return crypto_ahash_export(areq, state);
820 }
821 
822 static int safexcel_hmac_setkey(const char *alg, const u8 *key,
823 				unsigned int keylen, void *istate, void *ostate)
824 {
825 	struct ahash_request *areq;
826 	struct crypto_ahash *tfm;
827 	unsigned int blocksize;
828 	u8 *ipad, *opad;
829 	int ret;
830 
831 	tfm = crypto_alloc_ahash(alg, CRYPTO_ALG_TYPE_AHASH,
832 				 CRYPTO_ALG_TYPE_AHASH_MASK);
833 	if (IS_ERR(tfm))
834 		return PTR_ERR(tfm);
835 
836 	areq = ahash_request_alloc(tfm, GFP_KERNEL);
837 	if (!areq) {
838 		ret = -ENOMEM;
839 		goto free_ahash;
840 	}
841 
842 	crypto_ahash_clear_flags(tfm, ~0);
843 	blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
844 
845 	ipad = kzalloc(2 * blocksize, GFP_KERNEL);
846 	if (!ipad) {
847 		ret = -ENOMEM;
848 		goto free_request;
849 	}
850 
851 	opad = ipad + blocksize;
852 
853 	ret = safexcel_hmac_init_pad(areq, blocksize, key, keylen, ipad, opad);
854 	if (ret)
855 		goto free_ipad;
856 
857 	ret = safexcel_hmac_init_iv(areq, blocksize, ipad, istate);
858 	if (ret)
859 		goto free_ipad;
860 
861 	ret = safexcel_hmac_init_iv(areq, blocksize, opad, ostate);
862 
863 free_ipad:
864 	kfree(ipad);
865 free_request:
866 	ahash_request_free(areq);
867 free_ahash:
868 	crypto_free_ahash(tfm);
869 
870 	return ret;
871 }
872 
873 static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
874 				     unsigned int keylen)
875 {
876 	struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
877 	struct safexcel_ahash_export_state istate, ostate;
878 	int ret, i;
879 
880 	ret = safexcel_hmac_setkey("safexcel-sha1", key, keylen, &istate, &ostate);
881 	if (ret)
882 		return ret;
883 
884 	for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
885 		if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
886 		    ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
887 			ctx->base.needs_inv = true;
888 			break;
889 		}
890 	}
891 
892 	memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE);
893 	memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE);
894 
895 	return 0;
896 }
897 
898 struct safexcel_alg_template safexcel_alg_hmac_sha1 = {
899 	.type = SAFEXCEL_ALG_TYPE_AHASH,
900 	.alg.ahash = {
901 		.init = safexcel_hmac_sha1_init,
902 		.update = safexcel_ahash_update,
903 		.final = safexcel_ahash_final,
904 		.finup = safexcel_ahash_finup,
905 		.digest = safexcel_hmac_sha1_digest,
906 		.setkey = safexcel_hmac_sha1_setkey,
907 		.export = safexcel_ahash_export,
908 		.import = safexcel_ahash_import,
909 		.halg = {
910 			.digestsize = SHA1_DIGEST_SIZE,
911 			.statesize = sizeof(struct safexcel_ahash_export_state),
912 			.base = {
913 				.cra_name = "hmac(sha1)",
914 				.cra_driver_name = "safexcel-hmac-sha1",
915 				.cra_priority = 300,
916 				.cra_flags = CRYPTO_ALG_ASYNC |
917 					     CRYPTO_ALG_KERN_DRIVER_ONLY,
918 				.cra_blocksize = SHA1_BLOCK_SIZE,
919 				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
920 				.cra_init = safexcel_ahash_cra_init,
921 				.cra_exit = safexcel_ahash_cra_exit,
922 				.cra_module = THIS_MODULE,
923 			},
924 		},
925 	},
926 };
927 
928 static int safexcel_sha256_init(struct ahash_request *areq)
929 {
930 	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
931 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
932 
933 	memset(req, 0, sizeof(*req));
934 
935 	req->state[0] = SHA256_H0;
936 	req->state[1] = SHA256_H1;
937 	req->state[2] = SHA256_H2;
938 	req->state[3] = SHA256_H3;
939 	req->state[4] = SHA256_H4;
940 	req->state[5] = SHA256_H5;
941 	req->state[6] = SHA256_H6;
942 	req->state[7] = SHA256_H7;
943 
944 	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
945 	ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
946 	req->state_sz = SHA256_DIGEST_SIZE;
947 
948 	return 0;
949 }
950 
951 static int safexcel_sha256_digest(struct ahash_request *areq)
952 {
953 	int ret = safexcel_sha256_init(areq);
954 
955 	if (ret)
956 		return ret;
957 
958 	return safexcel_ahash_finup(areq);
959 }
960 
961 struct safexcel_alg_template safexcel_alg_sha256 = {
962 	.type = SAFEXCEL_ALG_TYPE_AHASH,
963 	.alg.ahash = {
964 		.init = safexcel_sha256_init,
965 		.update = safexcel_ahash_update,
966 		.final = safexcel_ahash_final,
967 		.finup = safexcel_ahash_finup,
968 		.digest = safexcel_sha256_digest,
969 		.export = safexcel_ahash_export,
970 		.import = safexcel_ahash_import,
971 		.halg = {
972 			.digestsize = SHA256_DIGEST_SIZE,
973 			.statesize = sizeof(struct safexcel_ahash_export_state),
974 			.base = {
975 				.cra_name = "sha256",
976 				.cra_driver_name = "safexcel-sha256",
977 				.cra_priority = 300,
978 				.cra_flags = CRYPTO_ALG_ASYNC |
979 					     CRYPTO_ALG_KERN_DRIVER_ONLY,
980 				.cra_blocksize = SHA256_BLOCK_SIZE,
981 				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
982 				.cra_init = safexcel_ahash_cra_init,
983 				.cra_exit = safexcel_ahash_cra_exit,
984 				.cra_module = THIS_MODULE,
985 			},
986 		},
987 	},
988 };
989 
990 static int safexcel_sha224_init(struct ahash_request *areq)
991 {
992 	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
993 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
994 
995 	memset(req, 0, sizeof(*req));
996 
997 	req->state[0] = SHA224_H0;
998 	req->state[1] = SHA224_H1;
999 	req->state[2] = SHA224_H2;
1000 	req->state[3] = SHA224_H3;
1001 	req->state[4] = SHA224_H4;
1002 	req->state[5] = SHA224_H5;
1003 	req->state[6] = SHA224_H6;
1004 	req->state[7] = SHA224_H7;
1005 
1006 	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
1007 	ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1008 	req->state_sz = SHA256_DIGEST_SIZE;
1009 
1010 	return 0;
1011 }
1012 
1013 static int safexcel_sha224_digest(struct ahash_request *areq)
1014 {
1015 	int ret = safexcel_sha224_init(areq);
1016 
1017 	if (ret)
1018 		return ret;
1019 
1020 	return safexcel_ahash_finup(areq);
1021 }
1022 
1023 struct safexcel_alg_template safexcel_alg_sha224 = {
1024 	.type = SAFEXCEL_ALG_TYPE_AHASH,
1025 	.alg.ahash = {
1026 		.init = safexcel_sha224_init,
1027 		.update = safexcel_ahash_update,
1028 		.final = safexcel_ahash_final,
1029 		.finup = safexcel_ahash_finup,
1030 		.digest = safexcel_sha224_digest,
1031 		.export = safexcel_ahash_export,
1032 		.import = safexcel_ahash_import,
1033 		.halg = {
1034 			.digestsize = SHA224_DIGEST_SIZE,
1035 			.statesize = sizeof(struct safexcel_ahash_export_state),
1036 			.base = {
1037 				.cra_name = "sha224",
1038 				.cra_driver_name = "safexcel-sha224",
1039 				.cra_priority = 300,
1040 				.cra_flags = CRYPTO_ALG_ASYNC |
1041 					     CRYPTO_ALG_KERN_DRIVER_ONLY,
1042 				.cra_blocksize = SHA224_BLOCK_SIZE,
1043 				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1044 				.cra_init = safexcel_ahash_cra_init,
1045 				.cra_exit = safexcel_ahash_cra_exit,
1046 				.cra_module = THIS_MODULE,
1047 			},
1048 		},
1049 	},
1050 };
1051