xref: /openbmc/linux/drivers/crypto/inside-secure/safexcel_cipher.c (revision b240b419db5d624ce7a5a397d6f62a1a686009ec)
1 /*
2  * Copyright (C) 2017 Marvell
3  *
4  * Antoine Tenart <antoine.tenart@free-electrons.com>
5  *
6  * This file is licensed under the terms of the GNU General Public
7  * License version 2. This program is licensed "as is" without any
8  * warranty of any kind, whether express or implied.
9  */
10 
11 #include <linux/device.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmapool.h>
14 
15 #include <crypto/aes.h>
16 #include <crypto/skcipher.h>
17 #include <crypto/internal/skcipher.h>
18 
19 #include "safexcel.h"
20 
21 enum safexcel_cipher_direction {
22 	SAFEXCEL_ENCRYPT,
23 	SAFEXCEL_DECRYPT,
24 };
25 
26 struct safexcel_cipher_ctx {
27 	struct safexcel_context base;
28 	struct safexcel_crypto_priv *priv;
29 
30 	u32 mode;
31 
32 	__le32 key[8];
33 	unsigned int key_len;
34 };
35 
36 struct safexcel_cipher_req {
37 	enum safexcel_cipher_direction direction;
38 	bool needs_inv;
39 };
40 
41 static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx,
42 				  struct crypto_async_request *async,
43 				  struct safexcel_command_desc *cdesc,
44 				  u32 length)
45 {
46 	struct skcipher_request *req = skcipher_request_cast(async);
47 	struct safexcel_token *token;
48 	unsigned offset = 0;
49 
50 	if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
51 		offset = AES_BLOCK_SIZE / sizeof(u32);
52 		memcpy(cdesc->control_data.token, req->iv, AES_BLOCK_SIZE);
53 
54 		cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
55 	}
56 
57 	token = (struct safexcel_token *)(cdesc->control_data.token + offset);
58 
59 	token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
60 	token[0].packet_length = length;
61 	token[0].stat = EIP197_TOKEN_STAT_LAST_PACKET |
62 			EIP197_TOKEN_STAT_LAST_HASH;
63 	token[0].instructions = EIP197_TOKEN_INS_LAST |
64 				EIP197_TOKEN_INS_TYPE_CRYTO |
65 				EIP197_TOKEN_INS_TYPE_OUTPUT;
66 }
67 
68 static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key,
69 			       unsigned int len)
70 {
71 	struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
72 	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
73 	struct safexcel_crypto_priv *priv = ctx->priv;
74 	struct crypto_aes_ctx aes;
75 	int ret, i;
76 
77 	ret = crypto_aes_expand_key(&aes, key, len);
78 	if (ret) {
79 		crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
80 		return ret;
81 	}
82 
83 	if (priv->version == EIP197 && ctx->base.ctxr_dma) {
84 		for (i = 0; i < len / sizeof(u32); i++) {
85 			if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
86 				ctx->base.needs_inv = true;
87 				break;
88 			}
89 		}
90 	}
91 
92 	for (i = 0; i < len / sizeof(u32); i++)
93 		ctx->key[i] = cpu_to_le32(aes.key_enc[i]);
94 
95 	ctx->key_len = len;
96 
97 	memzero_explicit(&aes, sizeof(aes));
98 	return 0;
99 }
100 
101 static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
102 				    struct crypto_async_request *async,
103 				    struct safexcel_command_desc *cdesc)
104 {
105 	struct safexcel_crypto_priv *priv = ctx->priv;
106 	struct skcipher_request *req = skcipher_request_cast(async);
107 	struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
108 	int ctrl_size;
109 
110 	if (sreq->direction == SAFEXCEL_ENCRYPT)
111 		cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_OUT;
112 	else
113 		cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_IN;
114 
115 	cdesc->control_data.control0 |= CONTEXT_CONTROL_KEY_EN;
116 	cdesc->control_data.control1 |= ctx->mode;
117 
118 	switch (ctx->key_len) {
119 	case AES_KEYSIZE_128:
120 		cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES128;
121 		ctrl_size = 4;
122 		break;
123 	case AES_KEYSIZE_192:
124 		cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES192;
125 		ctrl_size = 6;
126 		break;
127 	case AES_KEYSIZE_256:
128 		cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES256;
129 		ctrl_size = 8;
130 		break;
131 	default:
132 		dev_err(priv->dev, "aes keysize not supported: %u\n",
133 			ctx->key_len);
134 		return -EINVAL;
135 	}
136 	cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(ctrl_size);
137 
138 	return 0;
139 }
140 
141 static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
142 				      struct crypto_async_request *async,
143 				      bool *should_complete, int *ret)
144 {
145 	struct skcipher_request *req = skcipher_request_cast(async);
146 	struct safexcel_result_desc *rdesc;
147 	int ndesc = 0;
148 
149 	*ret = 0;
150 
151 	spin_lock_bh(&priv->ring[ring].egress_lock);
152 	do {
153 		rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
154 		if (IS_ERR(rdesc)) {
155 			dev_err(priv->dev,
156 				"cipher: result: could not retrieve the result descriptor\n");
157 			*ret = PTR_ERR(rdesc);
158 			break;
159 		}
160 
161 		if (rdesc->result_data.error_code) {
162 			dev_err(priv->dev,
163 				"cipher: result: result descriptor error (%d)\n",
164 				rdesc->result_data.error_code);
165 			*ret = -EIO;
166 		}
167 
168 		ndesc++;
169 	} while (!rdesc->last_seg);
170 
171 	safexcel_complete(priv, ring);
172 	spin_unlock_bh(&priv->ring[ring].egress_lock);
173 
174 	if (req->src == req->dst) {
175 		dma_unmap_sg(priv->dev, req->src,
176 			     sg_nents_for_len(req->src, req->cryptlen),
177 			     DMA_BIDIRECTIONAL);
178 	} else {
179 		dma_unmap_sg(priv->dev, req->src,
180 			     sg_nents_for_len(req->src, req->cryptlen),
181 			     DMA_TO_DEVICE);
182 		dma_unmap_sg(priv->dev, req->dst,
183 			     sg_nents_for_len(req->dst, req->cryptlen),
184 			     DMA_FROM_DEVICE);
185 	}
186 
187 	*should_complete = true;
188 
189 	return ndesc;
190 }
191 
192 static int safexcel_aes_send(struct crypto_async_request *async,
193 			     int ring, struct safexcel_request *request,
194 			     int *commands, int *results)
195 {
196 	struct skcipher_request *req = skcipher_request_cast(async);
197 	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
198 	struct safexcel_crypto_priv *priv = ctx->priv;
199 	struct safexcel_command_desc *cdesc;
200 	struct safexcel_result_desc *rdesc;
201 	struct scatterlist *sg;
202 	int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = req->cryptlen;
203 	int i, ret = 0;
204 
205 	if (req->src == req->dst) {
206 		nr_src = dma_map_sg(priv->dev, req->src,
207 				    sg_nents_for_len(req->src, req->cryptlen),
208 				    DMA_BIDIRECTIONAL);
209 		nr_dst = nr_src;
210 		if (!nr_src)
211 			return -EINVAL;
212 	} else {
213 		nr_src = dma_map_sg(priv->dev, req->src,
214 				    sg_nents_for_len(req->src, req->cryptlen),
215 				    DMA_TO_DEVICE);
216 		if (!nr_src)
217 			return -EINVAL;
218 
219 		nr_dst = dma_map_sg(priv->dev, req->dst,
220 				    sg_nents_for_len(req->dst, req->cryptlen),
221 				    DMA_FROM_DEVICE);
222 		if (!nr_dst) {
223 			dma_unmap_sg(priv->dev, req->src,
224 				     sg_nents_for_len(req->src, req->cryptlen),
225 				     DMA_TO_DEVICE);
226 			return -EINVAL;
227 		}
228 	}
229 
230 	memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len);
231 
232 	spin_lock_bh(&priv->ring[ring].egress_lock);
233 
234 	/* command descriptors */
235 	for_each_sg(req->src, sg, nr_src, i) {
236 		int len = sg_dma_len(sg);
237 
238 		/* Do not overflow the request */
239 		if (queued - len < 0)
240 			len = queued;
241 
242 		cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, !(queued - len),
243 					   sg_dma_address(sg), len, req->cryptlen,
244 					   ctx->base.ctxr_dma);
245 		if (IS_ERR(cdesc)) {
246 			/* No space left in the command descriptor ring */
247 			ret = PTR_ERR(cdesc);
248 			goto cdesc_rollback;
249 		}
250 		n_cdesc++;
251 
252 		if (n_cdesc == 1) {
253 			safexcel_context_control(ctx, async, cdesc);
254 			safexcel_cipher_token(ctx, async, cdesc, req->cryptlen);
255 		}
256 
257 		queued -= len;
258 		if (!queued)
259 			break;
260 	}
261 
262 	/* result descriptors */
263 	for_each_sg(req->dst, sg, nr_dst, i) {
264 		bool first = !i, last = (i == nr_dst - 1);
265 		u32 len = sg_dma_len(sg);
266 
267 		rdesc = safexcel_add_rdesc(priv, ring, first, last,
268 					   sg_dma_address(sg), len);
269 		if (IS_ERR(rdesc)) {
270 			/* No space left in the result descriptor ring */
271 			ret = PTR_ERR(rdesc);
272 			goto rdesc_rollback;
273 		}
274 		n_rdesc++;
275 	}
276 
277 	spin_unlock_bh(&priv->ring[ring].egress_lock);
278 
279 	request->req = &req->base;
280 
281 	*commands = n_cdesc;
282 	*results = n_rdesc;
283 	return 0;
284 
285 rdesc_rollback:
286 	for (i = 0; i < n_rdesc; i++)
287 		safexcel_ring_rollback_wptr(priv, &priv->ring[ring].rdr);
288 cdesc_rollback:
289 	for (i = 0; i < n_cdesc; i++)
290 		safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
291 
292 	spin_unlock_bh(&priv->ring[ring].egress_lock);
293 
294 	if (req->src == req->dst) {
295 		dma_unmap_sg(priv->dev, req->src,
296 			     sg_nents_for_len(req->src, req->cryptlen),
297 			     DMA_BIDIRECTIONAL);
298 	} else {
299 		dma_unmap_sg(priv->dev, req->src,
300 			     sg_nents_for_len(req->src, req->cryptlen),
301 			     DMA_TO_DEVICE);
302 		dma_unmap_sg(priv->dev, req->dst,
303 			     sg_nents_for_len(req->dst, req->cryptlen),
304 			     DMA_FROM_DEVICE);
305 	}
306 
307 	return ret;
308 }
309 
310 static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
311 				      int ring,
312 				      struct crypto_async_request *async,
313 				      bool *should_complete, int *ret)
314 {
315 	struct skcipher_request *req = skcipher_request_cast(async);
316 	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
317 	struct safexcel_result_desc *rdesc;
318 	int ndesc = 0, enq_ret;
319 
320 	*ret = 0;
321 
322 	spin_lock_bh(&priv->ring[ring].egress_lock);
323 	do {
324 		rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
325 		if (IS_ERR(rdesc)) {
326 			dev_err(priv->dev,
327 				"cipher: invalidate: could not retrieve the result descriptor\n");
328 			*ret = PTR_ERR(rdesc);
329 			break;
330 		}
331 
332 		if (rdesc->result_data.error_code) {
333 			dev_err(priv->dev, "cipher: invalidate: result descriptor error (%d)\n",
334 				rdesc->result_data.error_code);
335 			*ret = -EIO;
336 		}
337 
338 		ndesc++;
339 	} while (!rdesc->last_seg);
340 
341 	safexcel_complete(priv, ring);
342 	spin_unlock_bh(&priv->ring[ring].egress_lock);
343 
344 	if (ctx->base.exit_inv) {
345 		dma_pool_free(priv->context_pool, ctx->base.ctxr,
346 			      ctx->base.ctxr_dma);
347 
348 		*should_complete = true;
349 
350 		return ndesc;
351 	}
352 
353 	ring = safexcel_select_ring(priv);
354 	ctx->base.ring = ring;
355 
356 	spin_lock_bh(&priv->ring[ring].queue_lock);
357 	enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
358 	spin_unlock_bh(&priv->ring[ring].queue_lock);
359 
360 	if (enq_ret != -EINPROGRESS)
361 		*ret = enq_ret;
362 
363 	queue_work(priv->ring[ring].workqueue,
364 		   &priv->ring[ring].work_data.work);
365 
366 	*should_complete = false;
367 
368 	return ndesc;
369 }
370 
371 static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
372 				  struct crypto_async_request *async,
373 				  bool *should_complete, int *ret)
374 {
375 	struct skcipher_request *req = skcipher_request_cast(async);
376 	struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
377 	int err;
378 
379 	if (sreq->needs_inv) {
380 		sreq->needs_inv = false;
381 		err = safexcel_handle_inv_result(priv, ring, async,
382 						 should_complete, ret);
383 	} else {
384 		err = safexcel_handle_req_result(priv, ring, async,
385 						 should_complete, ret);
386 	}
387 
388 	return err;
389 }
390 
391 static int safexcel_cipher_send_inv(struct crypto_async_request *async,
392 				    int ring, struct safexcel_request *request,
393 				    int *commands, int *results)
394 {
395 	struct skcipher_request *req = skcipher_request_cast(async);
396 	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
397 	struct safexcel_crypto_priv *priv = ctx->priv;
398 	int ret;
399 
400 	ret = safexcel_invalidate_cache(async, priv,
401 					ctx->base.ctxr_dma, ring, request);
402 	if (unlikely(ret))
403 		return ret;
404 
405 	*commands = 1;
406 	*results = 1;
407 
408 	return 0;
409 }
410 
411 static int safexcel_send(struct crypto_async_request *async,
412 			 int ring, struct safexcel_request *request,
413 			 int *commands, int *results)
414 {
415 	struct skcipher_request *req = skcipher_request_cast(async);
416 	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
417 	struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
418 	struct safexcel_crypto_priv *priv = ctx->priv;
419 	int ret;
420 
421 	BUG_ON(priv->version == EIP97 && sreq->needs_inv);
422 
423 	if (sreq->needs_inv)
424 		ret = safexcel_cipher_send_inv(async, ring, request,
425 					       commands, results);
426 	else
427 		ret = safexcel_aes_send(async, ring, request,
428 					commands, results);
429 	return ret;
430 }
431 
432 static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
433 {
434 	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
435 	struct safexcel_crypto_priv *priv = ctx->priv;
436 	SKCIPHER_REQUEST_ON_STACK(req, __crypto_skcipher_cast(tfm));
437 	struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
438 	struct safexcel_inv_result result = {};
439 	int ring = ctx->base.ring;
440 
441 	memset(req, 0, sizeof(struct skcipher_request));
442 
443 	/* create invalidation request */
444 	init_completion(&result.completion);
445 	skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
446 				      safexcel_inv_complete, &result);
447 
448 	skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm));
449 	ctx = crypto_tfm_ctx(req->base.tfm);
450 	ctx->base.exit_inv = true;
451 	sreq->needs_inv = true;
452 
453 	spin_lock_bh(&priv->ring[ring].queue_lock);
454 	crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
455 	spin_unlock_bh(&priv->ring[ring].queue_lock);
456 
457 	queue_work(priv->ring[ring].workqueue,
458 		   &priv->ring[ring].work_data.work);
459 
460 	wait_for_completion(&result.completion);
461 
462 	if (result.error) {
463 		dev_warn(priv->dev,
464 			"cipher: sync: invalidate: completion error %d\n",
465 			 result.error);
466 		return result.error;
467 	}
468 
469 	return 0;
470 }
471 
472 static int safexcel_aes(struct skcipher_request *req,
473 			enum safexcel_cipher_direction dir, u32 mode)
474 {
475 	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
476 	struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
477 	struct safexcel_crypto_priv *priv = ctx->priv;
478 	int ret, ring;
479 
480 	sreq->needs_inv = false;
481 	sreq->direction = dir;
482 	ctx->mode = mode;
483 
484 	if (ctx->base.ctxr) {
485 		if (priv->version == EIP197 && ctx->base.needs_inv) {
486 			sreq->needs_inv = true;
487 			ctx->base.needs_inv = false;
488 		}
489 	} else {
490 		ctx->base.ring = safexcel_select_ring(priv);
491 		ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
492 						 EIP197_GFP_FLAGS(req->base),
493 						 &ctx->base.ctxr_dma);
494 		if (!ctx->base.ctxr)
495 			return -ENOMEM;
496 	}
497 
498 	ring = ctx->base.ring;
499 
500 	spin_lock_bh(&priv->ring[ring].queue_lock);
501 	ret = crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
502 	spin_unlock_bh(&priv->ring[ring].queue_lock);
503 
504 	queue_work(priv->ring[ring].workqueue,
505 		   &priv->ring[ring].work_data.work);
506 
507 	return ret;
508 }
509 
510 static int safexcel_ecb_aes_encrypt(struct skcipher_request *req)
511 {
512 	return safexcel_aes(req, SAFEXCEL_ENCRYPT,
513 			    CONTEXT_CONTROL_CRYPTO_MODE_ECB);
514 }
515 
516 static int safexcel_ecb_aes_decrypt(struct skcipher_request *req)
517 {
518 	return safexcel_aes(req, SAFEXCEL_DECRYPT,
519 			    CONTEXT_CONTROL_CRYPTO_MODE_ECB);
520 }
521 
522 static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
523 {
524 	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
525 	struct safexcel_alg_template *tmpl =
526 		container_of(tfm->__crt_alg, struct safexcel_alg_template,
527 			     alg.skcipher.base);
528 
529 	ctx->priv = tmpl->priv;
530 	ctx->base.send = safexcel_send;
531 	ctx->base.handle_result = safexcel_handle_result;
532 
533 	crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
534 				    sizeof(struct safexcel_cipher_req));
535 
536 	return 0;
537 }
538 
539 static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm)
540 {
541 	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
542 	struct safexcel_crypto_priv *priv = ctx->priv;
543 	int ret;
544 
545 	memzero_explicit(ctx->key, 8 * sizeof(u32));
546 
547 	/* context not allocated, skip invalidation */
548 	if (!ctx->base.ctxr)
549 		return;
550 
551 	memzero_explicit(ctx->base.ctxr->data, 8 * sizeof(u32));
552 
553 	if (priv->version == EIP197) {
554 		ret = safexcel_cipher_exit_inv(tfm);
555 		if (ret)
556 			dev_warn(priv->dev, "cipher: invalidation error %d\n", ret);
557 	} else {
558 		dma_pool_free(priv->context_pool, ctx->base.ctxr,
559 			      ctx->base.ctxr_dma);
560 	}
561 }
562 
563 struct safexcel_alg_template safexcel_alg_ecb_aes = {
564 	.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
565 	.alg.skcipher = {
566 		.setkey = safexcel_aes_setkey,
567 		.encrypt = safexcel_ecb_aes_encrypt,
568 		.decrypt = safexcel_ecb_aes_decrypt,
569 		.min_keysize = AES_MIN_KEY_SIZE,
570 		.max_keysize = AES_MAX_KEY_SIZE,
571 		.base = {
572 			.cra_name = "ecb(aes)",
573 			.cra_driver_name = "safexcel-ecb-aes",
574 			.cra_priority = 300,
575 			.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
576 				     CRYPTO_ALG_KERN_DRIVER_ONLY,
577 			.cra_blocksize = AES_BLOCK_SIZE,
578 			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
579 			.cra_alignmask = 0,
580 			.cra_init = safexcel_skcipher_cra_init,
581 			.cra_exit = safexcel_skcipher_cra_exit,
582 			.cra_module = THIS_MODULE,
583 		},
584 	},
585 };
586 
587 static int safexcel_cbc_aes_encrypt(struct skcipher_request *req)
588 {
589 	return safexcel_aes(req, SAFEXCEL_ENCRYPT,
590 			    CONTEXT_CONTROL_CRYPTO_MODE_CBC);
591 }
592 
593 static int safexcel_cbc_aes_decrypt(struct skcipher_request *req)
594 {
595 	return safexcel_aes(req, SAFEXCEL_DECRYPT,
596 			    CONTEXT_CONTROL_CRYPTO_MODE_CBC);
597 }
598 
599 struct safexcel_alg_template safexcel_alg_cbc_aes = {
600 	.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
601 	.alg.skcipher = {
602 		.setkey = safexcel_aes_setkey,
603 		.encrypt = safexcel_cbc_aes_encrypt,
604 		.decrypt = safexcel_cbc_aes_decrypt,
605 		.min_keysize = AES_MIN_KEY_SIZE,
606 		.max_keysize = AES_MAX_KEY_SIZE,
607 		.ivsize = AES_BLOCK_SIZE,
608 		.base = {
609 			.cra_name = "cbc(aes)",
610 			.cra_driver_name = "safexcel-cbc-aes",
611 			.cra_priority = 300,
612 			.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
613 				     CRYPTO_ALG_KERN_DRIVER_ONLY,
614 			.cra_blocksize = AES_BLOCK_SIZE,
615 			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
616 			.cra_alignmask = 0,
617 			.cra_init = safexcel_skcipher_cra_init,
618 			.cra_exit = safexcel_skcipher_cra_exit,
619 			.cra_module = THIS_MODULE,
620 		},
621 	},
622 };
623