1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Intel Keem Bay OCS AES Crypto Driver.
4  *
5  * Copyright (C) 2018-2020 Intel Corporation
6  */
7 
8 #include <crypto/aes.h>
9 #include <crypto/engine.h>
10 #include <crypto/gcm.h>
11 #include <crypto/internal/aead.h>
12 #include <crypto/internal/skcipher.h>
13 #include <crypto/scatterwalk.h>
14 #include <linux/clk.h>
15 #include <linux/completion.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/err.h>
18 #include <linux/interrupt.h>
19 #include <linux/io.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/of.h>
23 #include <linux/platform_device.h>
24 #include <linux/string.h>
25 
26 #include "ocs-aes.h"
27 
28 #define KMB_OCS_PRIORITY	350
29 #define DRV_NAME		"keembay-ocs-aes"
30 
31 #define OCS_AES_MIN_KEY_SIZE	16
32 #define OCS_AES_MAX_KEY_SIZE	32
33 #define OCS_AES_KEYSIZE_128	16
34 #define OCS_AES_KEYSIZE_192	24
35 #define OCS_AES_KEYSIZE_256	32
36 #define OCS_SM4_KEY_SIZE	16
37 
38 /**
39  * struct ocs_aes_tctx - OCS AES Transform context
40  * @aes_dev:		The OCS AES device.
41  * @key:		AES/SM4 key.
42  * @key_len:		The length (in bytes) of @key.
43  * @cipher:		OCS cipher to use (either AES or SM4).
44  * @sw_cipher:		The cipher to use as fallback.
45  * @use_fallback:	Whether or not fallback cipher should be used.
46  */
47 struct ocs_aes_tctx {
48 	struct ocs_aes_dev *aes_dev;
49 	u8 key[OCS_AES_KEYSIZE_256];
50 	unsigned int key_len;
51 	enum ocs_cipher cipher;
52 	union {
53 		struct crypto_sync_skcipher *sk;
54 		struct crypto_aead *aead;
55 	} sw_cipher;
56 	bool use_fallback;
57 };
58 
59 /**
60  * struct ocs_aes_rctx - OCS AES Request context.
61  * @instruction:	Instruction to be executed (encrypt / decrypt).
62  * @mode:		Mode to use (ECB, CBC, CTR, CCm, GCM, CTS)
63  * @src_nents:		Number of source SG entries.
64  * @dst_nents:		Number of destination SG entries.
65  * @src_dma_count:	The number of DMA-mapped entries of the source SG.
66  * @dst_dma_count:	The number of DMA-mapped entries of the destination SG.
67  * @in_place:		Whether or not this is an in place request, i.e.,
68  *			src_sg == dst_sg.
69  * @src_dll:		OCS DMA linked list for input data.
70  * @dst_dll:		OCS DMA linked list for output data.
71  * @last_ct_blk:	Buffer to hold last cipher text block (only used in CBC
72  *			mode).
73  * @cts_swap:		Whether or not CTS swap must be performed.
74  * @aad_src_dll:	OCS DMA linked list for input AAD data.
75  * @aad_dst_dll:	OCS DMA linked list for output AAD data.
76  * @in_tag:		Buffer to hold input encrypted tag (only used for
77  *			CCM/GCM decrypt).
78  * @out_tag:		Buffer to hold output encrypted / decrypted tag (only
79  *			used for GCM encrypt / decrypt).
80  */
81 struct ocs_aes_rctx {
82 	/* Fields common across all modes. */
83 	enum ocs_instruction	instruction;
84 	enum ocs_mode		mode;
85 	int			src_nents;
86 	int			dst_nents;
87 	int			src_dma_count;
88 	int			dst_dma_count;
89 	bool			in_place;
90 	struct ocs_dll_desc	src_dll;
91 	struct ocs_dll_desc	dst_dll;
92 
93 	/* CBC specific */
94 	u8			last_ct_blk[AES_BLOCK_SIZE];
95 
96 	/* CTS specific */
97 	int			cts_swap;
98 
99 	/* CCM/GCM specific */
100 	struct ocs_dll_desc	aad_src_dll;
101 	struct ocs_dll_desc	aad_dst_dll;
102 	u8			in_tag[AES_BLOCK_SIZE];
103 
104 	/* GCM specific */
105 	u8			out_tag[AES_BLOCK_SIZE];
106 };
107 
108 /* Driver data. */
109 struct ocs_aes_drv {
110 	struct list_head dev_list;
111 	spinlock_t lock;	/* Protects dev_list. */
112 };
113 
114 static struct ocs_aes_drv ocs_aes = {
115 	.dev_list = LIST_HEAD_INIT(ocs_aes.dev_list),
116 	.lock = __SPIN_LOCK_UNLOCKED(ocs_aes.lock),
117 };
118 
kmb_ocs_aes_find_dev(struct ocs_aes_tctx * tctx)119 static struct ocs_aes_dev *kmb_ocs_aes_find_dev(struct ocs_aes_tctx *tctx)
120 {
121 	struct ocs_aes_dev *aes_dev;
122 
123 	spin_lock(&ocs_aes.lock);
124 
125 	if (tctx->aes_dev) {
126 		aes_dev = tctx->aes_dev;
127 		goto exit;
128 	}
129 
130 	/* Only a single OCS device available */
131 	aes_dev = list_first_entry(&ocs_aes.dev_list, struct ocs_aes_dev, list);
132 	tctx->aes_dev = aes_dev;
133 
134 exit:
135 	spin_unlock(&ocs_aes.lock);
136 
137 	return aes_dev;
138 }
139 
140 /*
141  * Ensure key is 128-bit or 256-bit for AES or 128-bit for SM4 and an actual
142  * key is being passed in.
143  *
144  * Return: 0 if key is valid, -EINVAL otherwise.
145  */
check_key(const u8 * in_key,size_t key_len,enum ocs_cipher cipher)146 static int check_key(const u8 *in_key, size_t key_len, enum ocs_cipher cipher)
147 {
148 	if (!in_key)
149 		return -EINVAL;
150 
151 	/* For AES, only 128-byte or 256-byte keys are supported. */
152 	if (cipher == OCS_AES && (key_len == OCS_AES_KEYSIZE_128 ||
153 				  key_len == OCS_AES_KEYSIZE_256))
154 		return 0;
155 
156 	/* For SM4, only 128-byte keys are supported. */
157 	if (cipher == OCS_SM4 && key_len == OCS_AES_KEYSIZE_128)
158 		return 0;
159 
160 	/* Everything else is unsupported. */
161 	return -EINVAL;
162 }
163 
164 /* Save key into transformation context. */
save_key(struct ocs_aes_tctx * tctx,const u8 * in_key,size_t key_len,enum ocs_cipher cipher)165 static int save_key(struct ocs_aes_tctx *tctx, const u8 *in_key, size_t key_len,
166 		    enum ocs_cipher cipher)
167 {
168 	int ret;
169 
170 	ret = check_key(in_key, key_len, cipher);
171 	if (ret)
172 		return ret;
173 
174 	memcpy(tctx->key, in_key, key_len);
175 	tctx->key_len = key_len;
176 	tctx->cipher = cipher;
177 
178 	return 0;
179 }
180 
181 /* Set key for symmetric cypher. */
kmb_ocs_sk_set_key(struct crypto_skcipher * tfm,const u8 * in_key,size_t key_len,enum ocs_cipher cipher)182 static int kmb_ocs_sk_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
183 			      size_t key_len, enum ocs_cipher cipher)
184 {
185 	struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
186 
187 	/* Fallback is used for AES with 192-bit key. */
188 	tctx->use_fallback = (cipher == OCS_AES &&
189 			      key_len == OCS_AES_KEYSIZE_192);
190 
191 	if (!tctx->use_fallback)
192 		return save_key(tctx, in_key, key_len, cipher);
193 
194 	crypto_sync_skcipher_clear_flags(tctx->sw_cipher.sk,
195 					 CRYPTO_TFM_REQ_MASK);
196 	crypto_sync_skcipher_set_flags(tctx->sw_cipher.sk,
197 				       tfm->base.crt_flags &
198 				       CRYPTO_TFM_REQ_MASK);
199 
200 	return crypto_sync_skcipher_setkey(tctx->sw_cipher.sk, in_key, key_len);
201 }
202 
203 /* Set key for AEAD cipher. */
kmb_ocs_aead_set_key(struct crypto_aead * tfm,const u8 * in_key,size_t key_len,enum ocs_cipher cipher)204 static int kmb_ocs_aead_set_key(struct crypto_aead *tfm, const u8 *in_key,
205 				size_t key_len, enum ocs_cipher cipher)
206 {
207 	struct ocs_aes_tctx *tctx = crypto_aead_ctx(tfm);
208 
209 	/* Fallback is used for AES with 192-bit key. */
210 	tctx->use_fallback = (cipher == OCS_AES &&
211 			      key_len == OCS_AES_KEYSIZE_192);
212 
213 	if (!tctx->use_fallback)
214 		return save_key(tctx, in_key, key_len, cipher);
215 
216 	crypto_aead_clear_flags(tctx->sw_cipher.aead, CRYPTO_TFM_REQ_MASK);
217 	crypto_aead_set_flags(tctx->sw_cipher.aead,
218 			      crypto_aead_get_flags(tfm) & CRYPTO_TFM_REQ_MASK);
219 
220 	return crypto_aead_setkey(tctx->sw_cipher.aead, in_key, key_len);
221 }
222 
223 /* Swap two AES blocks in SG lists. */
sg_swap_blocks(struct scatterlist * sgl,unsigned int nents,off_t blk1_offset,off_t blk2_offset)224 static void sg_swap_blocks(struct scatterlist *sgl, unsigned int nents,
225 			   off_t blk1_offset, off_t blk2_offset)
226 {
227 	u8 tmp_buf1[AES_BLOCK_SIZE], tmp_buf2[AES_BLOCK_SIZE];
228 
229 	/*
230 	 * No easy way to copy within sg list, so copy both blocks to temporary
231 	 * buffers first.
232 	 */
233 	sg_pcopy_to_buffer(sgl, nents, tmp_buf1, AES_BLOCK_SIZE, blk1_offset);
234 	sg_pcopy_to_buffer(sgl, nents, tmp_buf2, AES_BLOCK_SIZE, blk2_offset);
235 	sg_pcopy_from_buffer(sgl, nents, tmp_buf1, AES_BLOCK_SIZE, blk2_offset);
236 	sg_pcopy_from_buffer(sgl, nents, tmp_buf2, AES_BLOCK_SIZE, blk1_offset);
237 }
238 
239 /* Initialize request context to default values. */
ocs_aes_init_rctx(struct ocs_aes_rctx * rctx)240 static void ocs_aes_init_rctx(struct ocs_aes_rctx *rctx)
241 {
242 	/* Zero everything. */
243 	memset(rctx, 0, sizeof(*rctx));
244 
245 	/* Set initial value for DMA addresses. */
246 	rctx->src_dll.dma_addr = DMA_MAPPING_ERROR;
247 	rctx->dst_dll.dma_addr = DMA_MAPPING_ERROR;
248 	rctx->aad_src_dll.dma_addr = DMA_MAPPING_ERROR;
249 	rctx->aad_dst_dll.dma_addr = DMA_MAPPING_ERROR;
250 }
251 
kmb_ocs_sk_validate_input(struct skcipher_request * req,enum ocs_mode mode)252 static int kmb_ocs_sk_validate_input(struct skcipher_request *req,
253 				     enum ocs_mode mode)
254 {
255 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
256 	int iv_size = crypto_skcipher_ivsize(tfm);
257 
258 	switch (mode) {
259 	case OCS_MODE_ECB:
260 		/* Ensure input length is multiple of block size */
261 		if (req->cryptlen % AES_BLOCK_SIZE != 0)
262 			return -EINVAL;
263 
264 		return 0;
265 
266 	case OCS_MODE_CBC:
267 		/* Ensure input length is multiple of block size */
268 		if (req->cryptlen % AES_BLOCK_SIZE != 0)
269 			return -EINVAL;
270 
271 		/* Ensure IV is present and block size in length */
272 		if (!req->iv || iv_size != AES_BLOCK_SIZE)
273 			return -EINVAL;
274 		/*
275 		 * NOTE: Since req->cryptlen == 0 case was already handled in
276 		 * kmb_ocs_sk_common(), the above two conditions also guarantee
277 		 * that: cryptlen >= iv_size
278 		 */
279 		return 0;
280 
281 	case OCS_MODE_CTR:
282 		/* Ensure IV is present and block size in length */
283 		if (!req->iv || iv_size != AES_BLOCK_SIZE)
284 			return -EINVAL;
285 		return 0;
286 
287 	case OCS_MODE_CTS:
288 		/* Ensure input length >= block size */
289 		if (req->cryptlen < AES_BLOCK_SIZE)
290 			return -EINVAL;
291 
292 		/* Ensure IV is present and block size in length */
293 		if (!req->iv || iv_size != AES_BLOCK_SIZE)
294 			return -EINVAL;
295 
296 		return 0;
297 	default:
298 		return -EINVAL;
299 	}
300 }
301 
302 /*
303  * Called by encrypt() / decrypt() skcipher functions.
304  *
305  * Use fallback if needed, otherwise initialize context and enqueue request
306  * into engine.
307  */
kmb_ocs_sk_common(struct skcipher_request * req,enum ocs_cipher cipher,enum ocs_instruction instruction,enum ocs_mode mode)308 static int kmb_ocs_sk_common(struct skcipher_request *req,
309 			     enum ocs_cipher cipher,
310 			     enum ocs_instruction instruction,
311 			     enum ocs_mode mode)
312 {
313 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
314 	struct ocs_aes_rctx *rctx = skcipher_request_ctx(req);
315 	struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
316 	struct ocs_aes_dev *aes_dev;
317 	int rc;
318 
319 	if (tctx->use_fallback) {
320 		SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, tctx->sw_cipher.sk);
321 
322 		skcipher_request_set_sync_tfm(subreq, tctx->sw_cipher.sk);
323 		skcipher_request_set_callback(subreq, req->base.flags, NULL,
324 					      NULL);
325 		skcipher_request_set_crypt(subreq, req->src, req->dst,
326 					   req->cryptlen, req->iv);
327 
328 		if (instruction == OCS_ENCRYPT)
329 			rc = crypto_skcipher_encrypt(subreq);
330 		else
331 			rc = crypto_skcipher_decrypt(subreq);
332 
333 		skcipher_request_zero(subreq);
334 
335 		return rc;
336 	}
337 
338 	/*
339 	 * If cryptlen == 0, no processing needed for ECB, CBC and CTR.
340 	 *
341 	 * For CTS continue: kmb_ocs_sk_validate_input() will return -EINVAL.
342 	 */
343 	if (!req->cryptlen && mode != OCS_MODE_CTS)
344 		return 0;
345 
346 	rc = kmb_ocs_sk_validate_input(req, mode);
347 	if (rc)
348 		return rc;
349 
350 	aes_dev = kmb_ocs_aes_find_dev(tctx);
351 	if (!aes_dev)
352 		return -ENODEV;
353 
354 	if (cipher != tctx->cipher)
355 		return -EINVAL;
356 
357 	ocs_aes_init_rctx(rctx);
358 	rctx->instruction = instruction;
359 	rctx->mode = mode;
360 
361 	return crypto_transfer_skcipher_request_to_engine(aes_dev->engine, req);
362 }
363 
cleanup_ocs_dma_linked_list(struct device * dev,struct ocs_dll_desc * dll)364 static void cleanup_ocs_dma_linked_list(struct device *dev,
365 					struct ocs_dll_desc *dll)
366 {
367 	if (dll->vaddr)
368 		dma_free_coherent(dev, dll->size, dll->vaddr, dll->dma_addr);
369 	dll->vaddr = NULL;
370 	dll->size = 0;
371 	dll->dma_addr = DMA_MAPPING_ERROR;
372 }
373 
kmb_ocs_sk_dma_cleanup(struct skcipher_request * req)374 static void kmb_ocs_sk_dma_cleanup(struct skcipher_request *req)
375 {
376 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
377 	struct ocs_aes_rctx *rctx = skcipher_request_ctx(req);
378 	struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
379 	struct device *dev = tctx->aes_dev->dev;
380 
381 	if (rctx->src_dma_count) {
382 		dma_unmap_sg(dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
383 		rctx->src_dma_count = 0;
384 	}
385 
386 	if (rctx->dst_dma_count) {
387 		dma_unmap_sg(dev, req->dst, rctx->dst_nents, rctx->in_place ?
388 							     DMA_BIDIRECTIONAL :
389 							     DMA_FROM_DEVICE);
390 		rctx->dst_dma_count = 0;
391 	}
392 
393 	/* Clean up OCS DMA linked lists */
394 	cleanup_ocs_dma_linked_list(dev, &rctx->src_dll);
395 	cleanup_ocs_dma_linked_list(dev, &rctx->dst_dll);
396 }
397 
kmb_ocs_sk_prepare_inplace(struct skcipher_request * req)398 static int kmb_ocs_sk_prepare_inplace(struct skcipher_request *req)
399 {
400 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
401 	struct ocs_aes_rctx *rctx = skcipher_request_ctx(req);
402 	struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
403 	int iv_size = crypto_skcipher_ivsize(tfm);
404 	int rc;
405 
406 	/*
407 	 * For CBC decrypt, save last block (iv) to last_ct_blk buffer.
408 	 *
409 	 * Note: if we are here, we already checked that cryptlen >= iv_size
410 	 * and iv_size == AES_BLOCK_SIZE (i.e., the size of last_ct_blk); see
411 	 * kmb_ocs_sk_validate_input().
412 	 */
413 	if (rctx->mode == OCS_MODE_CBC && rctx->instruction == OCS_DECRYPT)
414 		scatterwalk_map_and_copy(rctx->last_ct_blk, req->src,
415 					 req->cryptlen - iv_size, iv_size, 0);
416 
417 	/* For CTS decrypt, swap last two blocks, if needed. */
418 	if (rctx->cts_swap && rctx->instruction == OCS_DECRYPT)
419 		sg_swap_blocks(req->dst, rctx->dst_nents,
420 			       req->cryptlen - AES_BLOCK_SIZE,
421 			       req->cryptlen - (2 * AES_BLOCK_SIZE));
422 
423 	/* src and dst buffers are the same, use bidirectional DMA mapping. */
424 	rctx->dst_dma_count = dma_map_sg(tctx->aes_dev->dev, req->dst,
425 					 rctx->dst_nents, DMA_BIDIRECTIONAL);
426 	if (rctx->dst_dma_count == 0) {
427 		dev_err(tctx->aes_dev->dev, "Failed to map destination sg\n");
428 		return -ENOMEM;
429 	}
430 
431 	/* Create DST linked list */
432 	rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->dst,
433 					    rctx->dst_dma_count, &rctx->dst_dll,
434 					    req->cryptlen, 0);
435 	if (rc)
436 		return rc;
437 	/*
438 	 * If descriptor creation was successful, set the src_dll.dma_addr to
439 	 * the value of dst_dll.dma_addr, as we do in-place AES operation on
440 	 * the src.
441 	 */
442 	rctx->src_dll.dma_addr = rctx->dst_dll.dma_addr;
443 
444 	return 0;
445 }
446 
kmb_ocs_sk_prepare_notinplace(struct skcipher_request * req)447 static int kmb_ocs_sk_prepare_notinplace(struct skcipher_request *req)
448 {
449 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
450 	struct ocs_aes_rctx *rctx = skcipher_request_ctx(req);
451 	struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
452 	int rc;
453 
454 	rctx->src_nents =  sg_nents_for_len(req->src, req->cryptlen);
455 	if (rctx->src_nents < 0)
456 		return -EBADMSG;
457 
458 	/* Map SRC SG. */
459 	rctx->src_dma_count = dma_map_sg(tctx->aes_dev->dev, req->src,
460 					 rctx->src_nents, DMA_TO_DEVICE);
461 	if (rctx->src_dma_count == 0) {
462 		dev_err(tctx->aes_dev->dev, "Failed to map source sg\n");
463 		return -ENOMEM;
464 	}
465 
466 	/* Create SRC linked list */
467 	rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->src,
468 					    rctx->src_dma_count, &rctx->src_dll,
469 					    req->cryptlen, 0);
470 	if (rc)
471 		return rc;
472 
473 	/* Map DST SG. */
474 	rctx->dst_dma_count = dma_map_sg(tctx->aes_dev->dev, req->dst,
475 					 rctx->dst_nents, DMA_FROM_DEVICE);
476 	if (rctx->dst_dma_count == 0) {
477 		dev_err(tctx->aes_dev->dev, "Failed to map destination sg\n");
478 		return -ENOMEM;
479 	}
480 
481 	/* Create DST linked list */
482 	rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->dst,
483 					    rctx->dst_dma_count, &rctx->dst_dll,
484 					    req->cryptlen, 0);
485 	if (rc)
486 		return rc;
487 
488 	/* If this is not a CTS decrypt operation with swapping, we are done. */
489 	if (!(rctx->cts_swap && rctx->instruction == OCS_DECRYPT))
490 		return 0;
491 
492 	/*
493 	 * Otherwise, we have to copy src to dst (as we cannot modify src).
494 	 * Use OCS AES bypass mode to copy src to dst via DMA.
495 	 *
496 	 * NOTE: for anything other than small data sizes this is rather
497 	 * inefficient.
498 	 */
499 	rc = ocs_aes_bypass_op(tctx->aes_dev, rctx->dst_dll.dma_addr,
500 			       rctx->src_dll.dma_addr, req->cryptlen);
501 	if (rc)
502 		return rc;
503 
504 	/*
505 	 * Now dst == src, so clean up what we did so far and use in_place
506 	 * logic.
507 	 */
508 	kmb_ocs_sk_dma_cleanup(req);
509 	rctx->in_place = true;
510 
511 	return kmb_ocs_sk_prepare_inplace(req);
512 }
513 
kmb_ocs_sk_run(struct skcipher_request * req)514 static int kmb_ocs_sk_run(struct skcipher_request *req)
515 {
516 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
517 	struct ocs_aes_rctx *rctx = skcipher_request_ctx(req);
518 	struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
519 	struct ocs_aes_dev *aes_dev = tctx->aes_dev;
520 	int iv_size = crypto_skcipher_ivsize(tfm);
521 	int rc;
522 
523 	rctx->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
524 	if (rctx->dst_nents < 0)
525 		return -EBADMSG;
526 
527 	/*
528 	 * If 2 blocks or greater, and multiple of block size swap last two
529 	 * blocks to be compatible with other crypto API CTS implementations:
530 	 * OCS mode uses CBC-CS2, whereas other crypto API implementations use
531 	 * CBC-CS3.
532 	 * CBC-CS2 and CBC-CS3 defined by:
533 	 * https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38a-add.pdf
534 	 */
535 	rctx->cts_swap = (rctx->mode == OCS_MODE_CTS &&
536 			  req->cryptlen > AES_BLOCK_SIZE &&
537 			  req->cryptlen % AES_BLOCK_SIZE == 0);
538 
539 	rctx->in_place = (req->src == req->dst);
540 
541 	if (rctx->in_place)
542 		rc = kmb_ocs_sk_prepare_inplace(req);
543 	else
544 		rc = kmb_ocs_sk_prepare_notinplace(req);
545 
546 	if (rc)
547 		goto error;
548 
549 	rc = ocs_aes_op(aes_dev, rctx->mode, tctx->cipher, rctx->instruction,
550 			rctx->dst_dll.dma_addr, rctx->src_dll.dma_addr,
551 			req->cryptlen, req->iv, iv_size);
552 	if (rc)
553 		goto error;
554 
555 	/* Clean-up DMA before further processing output. */
556 	kmb_ocs_sk_dma_cleanup(req);
557 
558 	/* For CTS Encrypt, swap last 2 blocks, if needed. */
559 	if (rctx->cts_swap && rctx->instruction == OCS_ENCRYPT) {
560 		sg_swap_blocks(req->dst, rctx->dst_nents,
561 			       req->cryptlen - AES_BLOCK_SIZE,
562 			       req->cryptlen - (2 * AES_BLOCK_SIZE));
563 		return 0;
564 	}
565 
566 	/* For CBC copy IV to req->IV. */
567 	if (rctx->mode == OCS_MODE_CBC) {
568 		/* CBC encrypt case. */
569 		if (rctx->instruction == OCS_ENCRYPT) {
570 			scatterwalk_map_and_copy(req->iv, req->dst,
571 						 req->cryptlen - iv_size,
572 						 iv_size, 0);
573 			return 0;
574 		}
575 		/* CBC decrypt case. */
576 		if (rctx->in_place)
577 			memcpy(req->iv, rctx->last_ct_blk, iv_size);
578 		else
579 			scatterwalk_map_and_copy(req->iv, req->src,
580 						 req->cryptlen - iv_size,
581 						 iv_size, 0);
582 		return 0;
583 	}
584 	/* For all other modes there's nothing to do. */
585 
586 	return 0;
587 
588 error:
589 	kmb_ocs_sk_dma_cleanup(req);
590 
591 	return rc;
592 }
593 
kmb_ocs_aead_validate_input(struct aead_request * req,enum ocs_instruction instruction,enum ocs_mode mode)594 static int kmb_ocs_aead_validate_input(struct aead_request *req,
595 				       enum ocs_instruction instruction,
596 				       enum ocs_mode mode)
597 {
598 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
599 	int tag_size = crypto_aead_authsize(tfm);
600 	int iv_size = crypto_aead_ivsize(tfm);
601 
602 	/* For decrypt crytplen == len(PT) + len(tag). */
603 	if (instruction == OCS_DECRYPT && req->cryptlen < tag_size)
604 		return -EINVAL;
605 
606 	/* IV is mandatory. */
607 	if (!req->iv)
608 		return -EINVAL;
609 
610 	switch (mode) {
611 	case OCS_MODE_GCM:
612 		if (iv_size != GCM_AES_IV_SIZE)
613 			return -EINVAL;
614 
615 		return 0;
616 
617 	case OCS_MODE_CCM:
618 		/* Ensure IV is present and block size in length */
619 		if (iv_size != AES_BLOCK_SIZE)
620 			return -EINVAL;
621 
622 		return 0;
623 
624 	default:
625 		return -EINVAL;
626 	}
627 }
628 
629 /*
630  * Called by encrypt() / decrypt() aead functions.
631  *
632  * Use fallback if needed, otherwise initialize context and enqueue request
633  * into engine.
634  */
kmb_ocs_aead_common(struct aead_request * req,enum ocs_cipher cipher,enum ocs_instruction instruction,enum ocs_mode mode)635 static int kmb_ocs_aead_common(struct aead_request *req,
636 			       enum ocs_cipher cipher,
637 			       enum ocs_instruction instruction,
638 			       enum ocs_mode mode)
639 {
640 	struct ocs_aes_tctx *tctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
641 	struct ocs_aes_rctx *rctx = aead_request_ctx(req);
642 	struct ocs_aes_dev *dd;
643 	int rc;
644 
645 	if (tctx->use_fallback) {
646 		struct aead_request *subreq = aead_request_ctx(req);
647 
648 		aead_request_set_tfm(subreq, tctx->sw_cipher.aead);
649 		aead_request_set_callback(subreq, req->base.flags,
650 					  req->base.complete, req->base.data);
651 		aead_request_set_crypt(subreq, req->src, req->dst,
652 				       req->cryptlen, req->iv);
653 		aead_request_set_ad(subreq, req->assoclen);
654 		rc = crypto_aead_setauthsize(tctx->sw_cipher.aead,
655 					     crypto_aead_authsize(crypto_aead_reqtfm(req)));
656 		if (rc)
657 			return rc;
658 
659 		return (instruction == OCS_ENCRYPT) ?
660 		       crypto_aead_encrypt(subreq) :
661 		       crypto_aead_decrypt(subreq);
662 	}
663 
664 	rc = kmb_ocs_aead_validate_input(req, instruction, mode);
665 	if (rc)
666 		return rc;
667 
668 	dd = kmb_ocs_aes_find_dev(tctx);
669 	if (!dd)
670 		return -ENODEV;
671 
672 	if (cipher != tctx->cipher)
673 		return -EINVAL;
674 
675 	ocs_aes_init_rctx(rctx);
676 	rctx->instruction = instruction;
677 	rctx->mode = mode;
678 
679 	return crypto_transfer_aead_request_to_engine(dd->engine, req);
680 }
681 
kmb_ocs_aead_dma_cleanup(struct aead_request * req)682 static void kmb_ocs_aead_dma_cleanup(struct aead_request *req)
683 {
684 	struct ocs_aes_tctx *tctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
685 	struct ocs_aes_rctx *rctx = aead_request_ctx(req);
686 	struct device *dev = tctx->aes_dev->dev;
687 
688 	if (rctx->src_dma_count) {
689 		dma_unmap_sg(dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
690 		rctx->src_dma_count = 0;
691 	}
692 
693 	if (rctx->dst_dma_count) {
694 		dma_unmap_sg(dev, req->dst, rctx->dst_nents, rctx->in_place ?
695 							     DMA_BIDIRECTIONAL :
696 							     DMA_FROM_DEVICE);
697 		rctx->dst_dma_count = 0;
698 	}
699 	/* Clean up OCS DMA linked lists */
700 	cleanup_ocs_dma_linked_list(dev, &rctx->src_dll);
701 	cleanup_ocs_dma_linked_list(dev, &rctx->dst_dll);
702 	cleanup_ocs_dma_linked_list(dev, &rctx->aad_src_dll);
703 	cleanup_ocs_dma_linked_list(dev, &rctx->aad_dst_dll);
704 }
705 
706 /**
707  * kmb_ocs_aead_dma_prepare() - Do DMA mapping for AEAD processing.
708  * @req:		The AEAD request being processed.
709  * @src_dll_size:	Where to store the length of the data mapped into the
710  *			src_dll OCS DMA list.
711  *
712  * Do the following:
713  * - DMA map req->src and req->dst
714  * - Initialize the following OCS DMA linked lists: rctx->src_dll,
715  *   rctx->dst_dll, rctx->aad_src_dll and rxtc->aad_dst_dll.
716  *
717  * Return: 0 on success, negative error code otherwise.
718  */
kmb_ocs_aead_dma_prepare(struct aead_request * req,u32 * src_dll_size)719 static int kmb_ocs_aead_dma_prepare(struct aead_request *req, u32 *src_dll_size)
720 {
721 	struct ocs_aes_tctx *tctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
722 	const int tag_size = crypto_aead_authsize(crypto_aead_reqtfm(req));
723 	struct ocs_aes_rctx *rctx = aead_request_ctx(req);
724 	u32 in_size;	/* The length of the data to be mapped by src_dll. */
725 	u32 out_size;	/* The length of the data to be mapped by dst_dll. */
726 	u32 dst_size;	/* The length of the data in dst_sg. */
727 	int rc;
728 
729 	/* Get number of entries in input data SG list. */
730 	rctx->src_nents = sg_nents_for_len(req->src,
731 					   req->assoclen + req->cryptlen);
732 	if (rctx->src_nents < 0)
733 		return -EBADMSG;
734 
735 	if (rctx->instruction == OCS_DECRYPT) {
736 		/*
737 		 * For decrypt:
738 		 * - src sg list is:		AAD|CT|tag
739 		 * - dst sg list expects:	AAD|PT
740 		 *
741 		 * in_size == len(CT); out_size == len(PT)
742 		 */
743 
744 		/* req->cryptlen includes both CT and tag. */
745 		in_size = req->cryptlen - tag_size;
746 
747 		/* out_size = PT size == CT size */
748 		out_size = in_size;
749 
750 		/* len(dst_sg) == len(AAD) + len(PT) */
751 		dst_size = req->assoclen + out_size;
752 
753 		/*
754 		 * Copy tag from source SG list to 'in_tag' buffer.
755 		 *
756 		 * Note: this needs to be done here, before DMA mapping src_sg.
757 		 */
758 		sg_pcopy_to_buffer(req->src, rctx->src_nents, rctx->in_tag,
759 				   tag_size, req->assoclen + in_size);
760 
761 	} else { /* OCS_ENCRYPT */
762 		/*
763 		 * For encrypt:
764 		 *	src sg list is:		AAD|PT
765 		 *	dst sg list expects:	AAD|CT|tag
766 		 */
767 		/* in_size == len(PT) */
768 		in_size = req->cryptlen;
769 
770 		/*
771 		 * In CCM mode the OCS engine appends the tag to the ciphertext,
772 		 * but in GCM mode the tag must be read from the tag registers
773 		 * and appended manually below
774 		 */
775 		out_size = (rctx->mode == OCS_MODE_CCM) ? in_size + tag_size :
776 							  in_size;
777 		/* len(dst_sg) == len(AAD) + len(CT) + len(tag) */
778 		dst_size = req->assoclen + in_size + tag_size;
779 	}
780 	*src_dll_size = in_size;
781 
782 	/* Get number of entries in output data SG list. */
783 	rctx->dst_nents = sg_nents_for_len(req->dst, dst_size);
784 	if (rctx->dst_nents < 0)
785 		return -EBADMSG;
786 
787 	rctx->in_place = (req->src == req->dst) ? 1 : 0;
788 
789 	/* Map destination; use bidirectional mapping for in-place case. */
790 	rctx->dst_dma_count = dma_map_sg(tctx->aes_dev->dev, req->dst,
791 					 rctx->dst_nents,
792 					 rctx->in_place ? DMA_BIDIRECTIONAL :
793 							  DMA_FROM_DEVICE);
794 	if (rctx->dst_dma_count == 0 && rctx->dst_nents != 0) {
795 		dev_err(tctx->aes_dev->dev, "Failed to map destination sg\n");
796 		return -ENOMEM;
797 	}
798 
799 	/* Create AAD DST list: maps dst[0:AAD_SIZE-1]. */
800 	rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->dst,
801 					    rctx->dst_dma_count,
802 					    &rctx->aad_dst_dll, req->assoclen,
803 					    0);
804 	if (rc)
805 		return rc;
806 
807 	/* Create DST list: maps dst[AAD_SIZE:out_size] */
808 	rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->dst,
809 					    rctx->dst_dma_count, &rctx->dst_dll,
810 					    out_size, req->assoclen);
811 	if (rc)
812 		return rc;
813 
814 	if (rctx->in_place) {
815 		/* If this is not CCM encrypt, we are done. */
816 		if (!(rctx->mode == OCS_MODE_CCM &&
817 		      rctx->instruction == OCS_ENCRYPT)) {
818 			/*
819 			 * SRC and DST are the same, so re-use the same DMA
820 			 * addresses (to avoid allocating new DMA lists
821 			 * identical to the dst ones).
822 			 */
823 			rctx->src_dll.dma_addr = rctx->dst_dll.dma_addr;
824 			rctx->aad_src_dll.dma_addr = rctx->aad_dst_dll.dma_addr;
825 
826 			return 0;
827 		}
828 		/*
829 		 * For CCM encrypt the input and output linked lists contain
830 		 * different amounts of data, so, we need to create different
831 		 * SRC and AAD SRC lists, even for the in-place case.
832 		 */
833 		rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->dst,
834 						    rctx->dst_dma_count,
835 						    &rctx->aad_src_dll,
836 						    req->assoclen, 0);
837 		if (rc)
838 			return rc;
839 		rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->dst,
840 						    rctx->dst_dma_count,
841 						    &rctx->src_dll, in_size,
842 						    req->assoclen);
843 		if (rc)
844 			return rc;
845 
846 		return 0;
847 	}
848 	/* Not in-place case. */
849 
850 	/* Map source SG. */
851 	rctx->src_dma_count = dma_map_sg(tctx->aes_dev->dev, req->src,
852 					 rctx->src_nents, DMA_TO_DEVICE);
853 	if (rctx->src_dma_count == 0 && rctx->src_nents != 0) {
854 		dev_err(tctx->aes_dev->dev, "Failed to map source sg\n");
855 		return -ENOMEM;
856 	}
857 
858 	/* Create AAD SRC list. */
859 	rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->src,
860 					    rctx->src_dma_count,
861 					    &rctx->aad_src_dll,
862 					    req->assoclen, 0);
863 	if (rc)
864 		return rc;
865 
866 	/* Create SRC list. */
867 	rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->src,
868 					    rctx->src_dma_count,
869 					    &rctx->src_dll, in_size,
870 					    req->assoclen);
871 	if (rc)
872 		return rc;
873 
874 	if (req->assoclen == 0)
875 		return 0;
876 
877 	/* Copy AAD from src sg to dst sg using OCS DMA. */
878 	rc = ocs_aes_bypass_op(tctx->aes_dev, rctx->aad_dst_dll.dma_addr,
879 			       rctx->aad_src_dll.dma_addr, req->cryptlen);
880 	if (rc)
881 		dev_err(tctx->aes_dev->dev,
882 			"Failed to copy source AAD to destination AAD\n");
883 
884 	return rc;
885 }
886 
kmb_ocs_aead_run(struct aead_request * req)887 static int kmb_ocs_aead_run(struct aead_request *req)
888 {
889 	struct ocs_aes_tctx *tctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
890 	const int tag_size = crypto_aead_authsize(crypto_aead_reqtfm(req));
891 	struct ocs_aes_rctx *rctx = aead_request_ctx(req);
892 	u32 in_size;	/* The length of the data mapped by src_dll. */
893 	int rc;
894 
895 	rc = kmb_ocs_aead_dma_prepare(req, &in_size);
896 	if (rc)
897 		goto exit;
898 
899 	/* For CCM, we just call the OCS processing and we are done. */
900 	if (rctx->mode == OCS_MODE_CCM) {
901 		rc = ocs_aes_ccm_op(tctx->aes_dev, tctx->cipher,
902 				    rctx->instruction, rctx->dst_dll.dma_addr,
903 				    rctx->src_dll.dma_addr, in_size,
904 				    req->iv,
905 				    rctx->aad_src_dll.dma_addr, req->assoclen,
906 				    rctx->in_tag, tag_size);
907 		goto exit;
908 	}
909 	/* GCM case; invoke OCS processing. */
910 	rc = ocs_aes_gcm_op(tctx->aes_dev, tctx->cipher,
911 			    rctx->instruction,
912 			    rctx->dst_dll.dma_addr,
913 			    rctx->src_dll.dma_addr, in_size,
914 			    req->iv,
915 			    rctx->aad_src_dll.dma_addr, req->assoclen,
916 			    rctx->out_tag, tag_size);
917 	if (rc)
918 		goto exit;
919 
920 	/* For GCM decrypt, we have to compare in_tag with out_tag. */
921 	if (rctx->instruction == OCS_DECRYPT) {
922 		rc = memcmp(rctx->in_tag, rctx->out_tag, tag_size) ?
923 		     -EBADMSG : 0;
924 		goto exit;
925 	}
926 
927 	/* For GCM encrypt, we must manually copy out_tag to DST sg. */
928 
929 	/* Clean-up must be called before the sg_pcopy_from_buffer() below. */
930 	kmb_ocs_aead_dma_cleanup(req);
931 
932 	/* Copy tag to destination sg after AAD and CT. */
933 	sg_pcopy_from_buffer(req->dst, rctx->dst_nents, rctx->out_tag,
934 			     tag_size, req->assoclen + req->cryptlen);
935 
936 	/* Return directly as DMA cleanup already done. */
937 	return 0;
938 
939 exit:
940 	kmb_ocs_aead_dma_cleanup(req);
941 
942 	return rc;
943 }
944 
kmb_ocs_aes_sk_do_one_request(struct crypto_engine * engine,void * areq)945 static int kmb_ocs_aes_sk_do_one_request(struct crypto_engine *engine,
946 					 void *areq)
947 {
948 	struct skcipher_request *req =
949 			container_of(areq, struct skcipher_request, base);
950 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
951 	struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
952 	int err;
953 
954 	if (!tctx->aes_dev) {
955 		err = -ENODEV;
956 		goto exit;
957 	}
958 
959 	err = ocs_aes_set_key(tctx->aes_dev, tctx->key_len, tctx->key,
960 			      tctx->cipher);
961 	if (err)
962 		goto exit;
963 
964 	err = kmb_ocs_sk_run(req);
965 
966 exit:
967 	crypto_finalize_skcipher_request(engine, req, err);
968 
969 	return 0;
970 }
971 
kmb_ocs_aes_aead_do_one_request(struct crypto_engine * engine,void * areq)972 static int kmb_ocs_aes_aead_do_one_request(struct crypto_engine *engine,
973 					   void *areq)
974 {
975 	struct aead_request *req = container_of(areq,
976 						struct aead_request, base);
977 	struct ocs_aes_tctx *tctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
978 	int err;
979 
980 	if (!tctx->aes_dev)
981 		return -ENODEV;
982 
983 	err = ocs_aes_set_key(tctx->aes_dev, tctx->key_len, tctx->key,
984 			      tctx->cipher);
985 	if (err)
986 		goto exit;
987 
988 	err = kmb_ocs_aead_run(req);
989 
990 exit:
991 	crypto_finalize_aead_request(tctx->aes_dev->engine, req, err);
992 
993 	return 0;
994 }
995 
kmb_ocs_aes_set_key(struct crypto_skcipher * tfm,const u8 * in_key,unsigned int key_len)996 static int kmb_ocs_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
997 			       unsigned int key_len)
998 {
999 	return kmb_ocs_sk_set_key(tfm, in_key, key_len, OCS_AES);
1000 }
1001 
kmb_ocs_aes_aead_set_key(struct crypto_aead * tfm,const u8 * in_key,unsigned int key_len)1002 static int kmb_ocs_aes_aead_set_key(struct crypto_aead *tfm, const u8 *in_key,
1003 				    unsigned int key_len)
1004 {
1005 	return kmb_ocs_aead_set_key(tfm, in_key, key_len, OCS_AES);
1006 }
1007 
1008 #ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
kmb_ocs_aes_ecb_encrypt(struct skcipher_request * req)1009 static int kmb_ocs_aes_ecb_encrypt(struct skcipher_request *req)
1010 {
1011 	return kmb_ocs_sk_common(req, OCS_AES, OCS_ENCRYPT, OCS_MODE_ECB);
1012 }
1013 
kmb_ocs_aes_ecb_decrypt(struct skcipher_request * req)1014 static int kmb_ocs_aes_ecb_decrypt(struct skcipher_request *req)
1015 {
1016 	return kmb_ocs_sk_common(req, OCS_AES, OCS_DECRYPT, OCS_MODE_ECB);
1017 }
1018 #endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */
1019 
kmb_ocs_aes_cbc_encrypt(struct skcipher_request * req)1020 static int kmb_ocs_aes_cbc_encrypt(struct skcipher_request *req)
1021 {
1022 	return kmb_ocs_sk_common(req, OCS_AES, OCS_ENCRYPT, OCS_MODE_CBC);
1023 }
1024 
kmb_ocs_aes_cbc_decrypt(struct skcipher_request * req)1025 static int kmb_ocs_aes_cbc_decrypt(struct skcipher_request *req)
1026 {
1027 	return kmb_ocs_sk_common(req, OCS_AES, OCS_DECRYPT, OCS_MODE_CBC);
1028 }
1029 
kmb_ocs_aes_ctr_encrypt(struct skcipher_request * req)1030 static int kmb_ocs_aes_ctr_encrypt(struct skcipher_request *req)
1031 {
1032 	return kmb_ocs_sk_common(req, OCS_AES, OCS_ENCRYPT, OCS_MODE_CTR);
1033 }
1034 
kmb_ocs_aes_ctr_decrypt(struct skcipher_request * req)1035 static int kmb_ocs_aes_ctr_decrypt(struct skcipher_request *req)
1036 {
1037 	return kmb_ocs_sk_common(req, OCS_AES, OCS_DECRYPT, OCS_MODE_CTR);
1038 }
1039 
1040 #ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
kmb_ocs_aes_cts_encrypt(struct skcipher_request * req)1041 static int kmb_ocs_aes_cts_encrypt(struct skcipher_request *req)
1042 {
1043 	return kmb_ocs_sk_common(req, OCS_AES, OCS_ENCRYPT, OCS_MODE_CTS);
1044 }
1045 
kmb_ocs_aes_cts_decrypt(struct skcipher_request * req)1046 static int kmb_ocs_aes_cts_decrypt(struct skcipher_request *req)
1047 {
1048 	return kmb_ocs_sk_common(req, OCS_AES, OCS_DECRYPT, OCS_MODE_CTS);
1049 }
1050 #endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */
1051 
kmb_ocs_aes_gcm_encrypt(struct aead_request * req)1052 static int kmb_ocs_aes_gcm_encrypt(struct aead_request *req)
1053 {
1054 	return kmb_ocs_aead_common(req, OCS_AES, OCS_ENCRYPT, OCS_MODE_GCM);
1055 }
1056 
kmb_ocs_aes_gcm_decrypt(struct aead_request * req)1057 static int kmb_ocs_aes_gcm_decrypt(struct aead_request *req)
1058 {
1059 	return kmb_ocs_aead_common(req, OCS_AES, OCS_DECRYPT, OCS_MODE_GCM);
1060 }
1061 
kmb_ocs_aes_ccm_encrypt(struct aead_request * req)1062 static int kmb_ocs_aes_ccm_encrypt(struct aead_request *req)
1063 {
1064 	return kmb_ocs_aead_common(req, OCS_AES, OCS_ENCRYPT, OCS_MODE_CCM);
1065 }
1066 
kmb_ocs_aes_ccm_decrypt(struct aead_request * req)1067 static int kmb_ocs_aes_ccm_decrypt(struct aead_request *req)
1068 {
1069 	return kmb_ocs_aead_common(req, OCS_AES, OCS_DECRYPT, OCS_MODE_CCM);
1070 }
1071 
kmb_ocs_sm4_set_key(struct crypto_skcipher * tfm,const u8 * in_key,unsigned int key_len)1072 static int kmb_ocs_sm4_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
1073 			       unsigned int key_len)
1074 {
1075 	return kmb_ocs_sk_set_key(tfm, in_key, key_len, OCS_SM4);
1076 }
1077 
kmb_ocs_sm4_aead_set_key(struct crypto_aead * tfm,const u8 * in_key,unsigned int key_len)1078 static int kmb_ocs_sm4_aead_set_key(struct crypto_aead *tfm, const u8 *in_key,
1079 				    unsigned int key_len)
1080 {
1081 	return kmb_ocs_aead_set_key(tfm, in_key, key_len, OCS_SM4);
1082 }
1083 
1084 #ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
kmb_ocs_sm4_ecb_encrypt(struct skcipher_request * req)1085 static int kmb_ocs_sm4_ecb_encrypt(struct skcipher_request *req)
1086 {
1087 	return kmb_ocs_sk_common(req, OCS_SM4, OCS_ENCRYPT, OCS_MODE_ECB);
1088 }
1089 
kmb_ocs_sm4_ecb_decrypt(struct skcipher_request * req)1090 static int kmb_ocs_sm4_ecb_decrypt(struct skcipher_request *req)
1091 {
1092 	return kmb_ocs_sk_common(req, OCS_SM4, OCS_DECRYPT, OCS_MODE_ECB);
1093 }
1094 #endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */
1095 
kmb_ocs_sm4_cbc_encrypt(struct skcipher_request * req)1096 static int kmb_ocs_sm4_cbc_encrypt(struct skcipher_request *req)
1097 {
1098 	return kmb_ocs_sk_common(req, OCS_SM4, OCS_ENCRYPT, OCS_MODE_CBC);
1099 }
1100 
kmb_ocs_sm4_cbc_decrypt(struct skcipher_request * req)1101 static int kmb_ocs_sm4_cbc_decrypt(struct skcipher_request *req)
1102 {
1103 	return kmb_ocs_sk_common(req, OCS_SM4, OCS_DECRYPT, OCS_MODE_CBC);
1104 }
1105 
kmb_ocs_sm4_ctr_encrypt(struct skcipher_request * req)1106 static int kmb_ocs_sm4_ctr_encrypt(struct skcipher_request *req)
1107 {
1108 	return kmb_ocs_sk_common(req, OCS_SM4, OCS_ENCRYPT, OCS_MODE_CTR);
1109 }
1110 
kmb_ocs_sm4_ctr_decrypt(struct skcipher_request * req)1111 static int kmb_ocs_sm4_ctr_decrypt(struct skcipher_request *req)
1112 {
1113 	return kmb_ocs_sk_common(req, OCS_SM4, OCS_DECRYPT, OCS_MODE_CTR);
1114 }
1115 
1116 #ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
kmb_ocs_sm4_cts_encrypt(struct skcipher_request * req)1117 static int kmb_ocs_sm4_cts_encrypt(struct skcipher_request *req)
1118 {
1119 	return kmb_ocs_sk_common(req, OCS_SM4, OCS_ENCRYPT, OCS_MODE_CTS);
1120 }
1121 
kmb_ocs_sm4_cts_decrypt(struct skcipher_request * req)1122 static int kmb_ocs_sm4_cts_decrypt(struct skcipher_request *req)
1123 {
1124 	return kmb_ocs_sk_common(req, OCS_SM4, OCS_DECRYPT, OCS_MODE_CTS);
1125 }
1126 #endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */
1127 
kmb_ocs_sm4_gcm_encrypt(struct aead_request * req)1128 static int kmb_ocs_sm4_gcm_encrypt(struct aead_request *req)
1129 {
1130 	return kmb_ocs_aead_common(req, OCS_SM4, OCS_ENCRYPT, OCS_MODE_GCM);
1131 }
1132 
kmb_ocs_sm4_gcm_decrypt(struct aead_request * req)1133 static int kmb_ocs_sm4_gcm_decrypt(struct aead_request *req)
1134 {
1135 	return kmb_ocs_aead_common(req, OCS_SM4, OCS_DECRYPT, OCS_MODE_GCM);
1136 }
1137 
kmb_ocs_sm4_ccm_encrypt(struct aead_request * req)1138 static int kmb_ocs_sm4_ccm_encrypt(struct aead_request *req)
1139 {
1140 	return kmb_ocs_aead_common(req, OCS_SM4, OCS_ENCRYPT, OCS_MODE_CCM);
1141 }
1142 
kmb_ocs_sm4_ccm_decrypt(struct aead_request * req)1143 static int kmb_ocs_sm4_ccm_decrypt(struct aead_request *req)
1144 {
1145 	return kmb_ocs_aead_common(req, OCS_SM4, OCS_DECRYPT, OCS_MODE_CCM);
1146 }
1147 
ocs_aes_init_tfm(struct crypto_skcipher * tfm)1148 static int ocs_aes_init_tfm(struct crypto_skcipher *tfm)
1149 {
1150 	const char *alg_name = crypto_tfm_alg_name(&tfm->base);
1151 	struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
1152 	struct crypto_sync_skcipher *blk;
1153 
1154 	/* set fallback cipher in case it will be needed */
1155 	blk = crypto_alloc_sync_skcipher(alg_name, 0, CRYPTO_ALG_NEED_FALLBACK);
1156 	if (IS_ERR(blk))
1157 		return PTR_ERR(blk);
1158 
1159 	tctx->sw_cipher.sk = blk;
1160 
1161 	crypto_skcipher_set_reqsize(tfm, sizeof(struct ocs_aes_rctx));
1162 
1163 	return 0;
1164 }
1165 
ocs_sm4_init_tfm(struct crypto_skcipher * tfm)1166 static int ocs_sm4_init_tfm(struct crypto_skcipher *tfm)
1167 {
1168 	crypto_skcipher_set_reqsize(tfm, sizeof(struct ocs_aes_rctx));
1169 
1170 	return 0;
1171 }
1172 
clear_key(struct ocs_aes_tctx * tctx)1173 static inline void clear_key(struct ocs_aes_tctx *tctx)
1174 {
1175 	memzero_explicit(tctx->key, OCS_AES_KEYSIZE_256);
1176 
1177 	/* Zero key registers if set */
1178 	if (tctx->aes_dev)
1179 		ocs_aes_set_key(tctx->aes_dev, OCS_AES_KEYSIZE_256,
1180 				tctx->key, OCS_AES);
1181 }
1182 
ocs_exit_tfm(struct crypto_skcipher * tfm)1183 static void ocs_exit_tfm(struct crypto_skcipher *tfm)
1184 {
1185 	struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
1186 
1187 	clear_key(tctx);
1188 
1189 	if (tctx->sw_cipher.sk) {
1190 		crypto_free_sync_skcipher(tctx->sw_cipher.sk);
1191 		tctx->sw_cipher.sk = NULL;
1192 	}
1193 }
1194 
ocs_aes_aead_cra_init(struct crypto_aead * tfm)1195 static int ocs_aes_aead_cra_init(struct crypto_aead *tfm)
1196 {
1197 	const char *alg_name = crypto_tfm_alg_name(&tfm->base);
1198 	struct ocs_aes_tctx *tctx = crypto_aead_ctx(tfm);
1199 	struct crypto_aead *blk;
1200 
1201 	/* Set fallback cipher in case it will be needed */
1202 	blk = crypto_alloc_aead(alg_name, 0, CRYPTO_ALG_NEED_FALLBACK);
1203 	if (IS_ERR(blk))
1204 		return PTR_ERR(blk);
1205 
1206 	tctx->sw_cipher.aead = blk;
1207 
1208 	crypto_aead_set_reqsize(tfm,
1209 				max(sizeof(struct ocs_aes_rctx),
1210 				    (sizeof(struct aead_request) +
1211 				     crypto_aead_reqsize(tctx->sw_cipher.aead))));
1212 
1213 	return 0;
1214 }
1215 
kmb_ocs_aead_ccm_setauthsize(struct crypto_aead * tfm,unsigned int authsize)1216 static int kmb_ocs_aead_ccm_setauthsize(struct crypto_aead *tfm,
1217 					unsigned int authsize)
1218 {
1219 	switch (authsize) {
1220 	case 4:
1221 	case 6:
1222 	case 8:
1223 	case 10:
1224 	case 12:
1225 	case 14:
1226 	case 16:
1227 		return 0;
1228 	default:
1229 		return -EINVAL;
1230 	}
1231 }
1232 
kmb_ocs_aead_gcm_setauthsize(struct crypto_aead * tfm,unsigned int authsize)1233 static int kmb_ocs_aead_gcm_setauthsize(struct crypto_aead *tfm,
1234 					unsigned int authsize)
1235 {
1236 	return crypto_gcm_check_authsize(authsize);
1237 }
1238 
ocs_sm4_aead_cra_init(struct crypto_aead * tfm)1239 static int ocs_sm4_aead_cra_init(struct crypto_aead *tfm)
1240 {
1241 	crypto_aead_set_reqsize(tfm, sizeof(struct ocs_aes_rctx));
1242 
1243 	return 0;
1244 }
1245 
ocs_aead_cra_exit(struct crypto_aead * tfm)1246 static void ocs_aead_cra_exit(struct crypto_aead *tfm)
1247 {
1248 	struct ocs_aes_tctx *tctx = crypto_aead_ctx(tfm);
1249 
1250 	clear_key(tctx);
1251 
1252 	if (tctx->sw_cipher.aead) {
1253 		crypto_free_aead(tctx->sw_cipher.aead);
1254 		tctx->sw_cipher.aead = NULL;
1255 	}
1256 }
1257 
1258 static struct skcipher_engine_alg algs[] = {
1259 #ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
1260 	{
1261 		.base.base.cra_name = "ecb(aes)",
1262 		.base.base.cra_driver_name = "ecb-aes-keembay-ocs",
1263 		.base.base.cra_priority = KMB_OCS_PRIORITY,
1264 		.base.base.cra_flags = CRYPTO_ALG_ASYNC |
1265 				       CRYPTO_ALG_KERN_DRIVER_ONLY |
1266 				       CRYPTO_ALG_NEED_FALLBACK,
1267 		.base.base.cra_blocksize = AES_BLOCK_SIZE,
1268 		.base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
1269 		.base.base.cra_module = THIS_MODULE,
1270 		.base.base.cra_alignmask = 0,
1271 
1272 		.base.min_keysize = OCS_AES_MIN_KEY_SIZE,
1273 		.base.max_keysize = OCS_AES_MAX_KEY_SIZE,
1274 		.base.setkey = kmb_ocs_aes_set_key,
1275 		.base.encrypt = kmb_ocs_aes_ecb_encrypt,
1276 		.base.decrypt = kmb_ocs_aes_ecb_decrypt,
1277 		.base.init = ocs_aes_init_tfm,
1278 		.base.exit = ocs_exit_tfm,
1279 		.op.do_one_request = kmb_ocs_aes_sk_do_one_request,
1280 	},
1281 #endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */
1282 	{
1283 		.base.base.cra_name = "cbc(aes)",
1284 		.base.base.cra_driver_name = "cbc-aes-keembay-ocs",
1285 		.base.base.cra_priority = KMB_OCS_PRIORITY,
1286 		.base.base.cra_flags = CRYPTO_ALG_ASYNC |
1287 				       CRYPTO_ALG_KERN_DRIVER_ONLY |
1288 				       CRYPTO_ALG_NEED_FALLBACK,
1289 		.base.base.cra_blocksize = AES_BLOCK_SIZE,
1290 		.base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
1291 		.base.base.cra_module = THIS_MODULE,
1292 		.base.base.cra_alignmask = 0,
1293 
1294 		.base.min_keysize = OCS_AES_MIN_KEY_SIZE,
1295 		.base.max_keysize = OCS_AES_MAX_KEY_SIZE,
1296 		.base.ivsize = AES_BLOCK_SIZE,
1297 		.base.setkey = kmb_ocs_aes_set_key,
1298 		.base.encrypt = kmb_ocs_aes_cbc_encrypt,
1299 		.base.decrypt = kmb_ocs_aes_cbc_decrypt,
1300 		.base.init = ocs_aes_init_tfm,
1301 		.base.exit = ocs_exit_tfm,
1302 		.op.do_one_request = kmb_ocs_aes_sk_do_one_request,
1303 	},
1304 	{
1305 		.base.base.cra_name = "ctr(aes)",
1306 		.base.base.cra_driver_name = "ctr-aes-keembay-ocs",
1307 		.base.base.cra_priority = KMB_OCS_PRIORITY,
1308 		.base.base.cra_flags = CRYPTO_ALG_ASYNC |
1309 				       CRYPTO_ALG_KERN_DRIVER_ONLY |
1310 				       CRYPTO_ALG_NEED_FALLBACK,
1311 		.base.base.cra_blocksize = 1,
1312 		.base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
1313 		.base.base.cra_module = THIS_MODULE,
1314 		.base.base.cra_alignmask = 0,
1315 
1316 		.base.min_keysize = OCS_AES_MIN_KEY_SIZE,
1317 		.base.max_keysize = OCS_AES_MAX_KEY_SIZE,
1318 		.base.ivsize = AES_BLOCK_SIZE,
1319 		.base.setkey = kmb_ocs_aes_set_key,
1320 		.base.encrypt = kmb_ocs_aes_ctr_encrypt,
1321 		.base.decrypt = kmb_ocs_aes_ctr_decrypt,
1322 		.base.init = ocs_aes_init_tfm,
1323 		.base.exit = ocs_exit_tfm,
1324 		.op.do_one_request = kmb_ocs_aes_sk_do_one_request,
1325 	},
1326 #ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
1327 	{
1328 		.base.base.cra_name = "cts(cbc(aes))",
1329 		.base.base.cra_driver_name = "cts-aes-keembay-ocs",
1330 		.base.base.cra_priority = KMB_OCS_PRIORITY,
1331 		.base.base.cra_flags = CRYPTO_ALG_ASYNC |
1332 				       CRYPTO_ALG_KERN_DRIVER_ONLY |
1333 				       CRYPTO_ALG_NEED_FALLBACK,
1334 		.base.base.cra_blocksize = AES_BLOCK_SIZE,
1335 		.base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
1336 		.base.base.cra_module = THIS_MODULE,
1337 		.base.base.cra_alignmask = 0,
1338 
1339 		.base.min_keysize = OCS_AES_MIN_KEY_SIZE,
1340 		.base.max_keysize = OCS_AES_MAX_KEY_SIZE,
1341 		.base.ivsize = AES_BLOCK_SIZE,
1342 		.base.setkey = kmb_ocs_aes_set_key,
1343 		.base.encrypt = kmb_ocs_aes_cts_encrypt,
1344 		.base.decrypt = kmb_ocs_aes_cts_decrypt,
1345 		.base.init = ocs_aes_init_tfm,
1346 		.base.exit = ocs_exit_tfm,
1347 		.op.do_one_request = kmb_ocs_aes_sk_do_one_request,
1348 	},
1349 #endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */
1350 #ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
1351 	{
1352 		.base.base.cra_name = "ecb(sm4)",
1353 		.base.base.cra_driver_name = "ecb-sm4-keembay-ocs",
1354 		.base.base.cra_priority = KMB_OCS_PRIORITY,
1355 		.base.base.cra_flags = CRYPTO_ALG_ASYNC |
1356 				       CRYPTO_ALG_KERN_DRIVER_ONLY,
1357 		.base.base.cra_blocksize = AES_BLOCK_SIZE,
1358 		.base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
1359 		.base.base.cra_module = THIS_MODULE,
1360 		.base.base.cra_alignmask = 0,
1361 
1362 		.base.min_keysize = OCS_SM4_KEY_SIZE,
1363 		.base.max_keysize = OCS_SM4_KEY_SIZE,
1364 		.base.setkey = kmb_ocs_sm4_set_key,
1365 		.base.encrypt = kmb_ocs_sm4_ecb_encrypt,
1366 		.base.decrypt = kmb_ocs_sm4_ecb_decrypt,
1367 		.base.init = ocs_sm4_init_tfm,
1368 		.base.exit = ocs_exit_tfm,
1369 		.op.do_one_request = kmb_ocs_aes_sk_do_one_request,
1370 	},
1371 #endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */
1372 	{
1373 		.base.base.cra_name = "cbc(sm4)",
1374 		.base.base.cra_driver_name = "cbc-sm4-keembay-ocs",
1375 		.base.base.cra_priority = KMB_OCS_PRIORITY,
1376 		.base.base.cra_flags = CRYPTO_ALG_ASYNC |
1377 				       CRYPTO_ALG_KERN_DRIVER_ONLY,
1378 		.base.base.cra_blocksize = AES_BLOCK_SIZE,
1379 		.base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
1380 		.base.base.cra_module = THIS_MODULE,
1381 		.base.base.cra_alignmask = 0,
1382 
1383 		.base.min_keysize = OCS_SM4_KEY_SIZE,
1384 		.base.max_keysize = OCS_SM4_KEY_SIZE,
1385 		.base.ivsize = AES_BLOCK_SIZE,
1386 		.base.setkey = kmb_ocs_sm4_set_key,
1387 		.base.encrypt = kmb_ocs_sm4_cbc_encrypt,
1388 		.base.decrypt = kmb_ocs_sm4_cbc_decrypt,
1389 		.base.init = ocs_sm4_init_tfm,
1390 		.base.exit = ocs_exit_tfm,
1391 		.op.do_one_request = kmb_ocs_aes_sk_do_one_request,
1392 	},
1393 	{
1394 		.base.base.cra_name = "ctr(sm4)",
1395 		.base.base.cra_driver_name = "ctr-sm4-keembay-ocs",
1396 		.base.base.cra_priority = KMB_OCS_PRIORITY,
1397 		.base.base.cra_flags = CRYPTO_ALG_ASYNC |
1398 				       CRYPTO_ALG_KERN_DRIVER_ONLY,
1399 		.base.base.cra_blocksize = 1,
1400 		.base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
1401 		.base.base.cra_module = THIS_MODULE,
1402 		.base.base.cra_alignmask = 0,
1403 
1404 		.base.min_keysize = OCS_SM4_KEY_SIZE,
1405 		.base.max_keysize = OCS_SM4_KEY_SIZE,
1406 		.base.ivsize = AES_BLOCK_SIZE,
1407 		.base.setkey = kmb_ocs_sm4_set_key,
1408 		.base.encrypt = kmb_ocs_sm4_ctr_encrypt,
1409 		.base.decrypt = kmb_ocs_sm4_ctr_decrypt,
1410 		.base.init = ocs_sm4_init_tfm,
1411 		.base.exit = ocs_exit_tfm,
1412 		.op.do_one_request = kmb_ocs_aes_sk_do_one_request,
1413 	},
1414 #ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
1415 	{
1416 		.base.base.cra_name = "cts(cbc(sm4))",
1417 		.base.base.cra_driver_name = "cts-sm4-keembay-ocs",
1418 		.base.base.cra_priority = KMB_OCS_PRIORITY,
1419 		.base.base.cra_flags = CRYPTO_ALG_ASYNC |
1420 				       CRYPTO_ALG_KERN_DRIVER_ONLY,
1421 		.base.base.cra_blocksize = AES_BLOCK_SIZE,
1422 		.base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
1423 		.base.base.cra_module = THIS_MODULE,
1424 		.base.base.cra_alignmask = 0,
1425 
1426 		.base.min_keysize = OCS_SM4_KEY_SIZE,
1427 		.base.max_keysize = OCS_SM4_KEY_SIZE,
1428 		.base.ivsize = AES_BLOCK_SIZE,
1429 		.base.setkey = kmb_ocs_sm4_set_key,
1430 		.base.encrypt = kmb_ocs_sm4_cts_encrypt,
1431 		.base.decrypt = kmb_ocs_sm4_cts_decrypt,
1432 		.base.init = ocs_sm4_init_tfm,
1433 		.base.exit = ocs_exit_tfm,
1434 		.op.do_one_request = kmb_ocs_aes_sk_do_one_request,
1435 	}
1436 #endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */
1437 };
1438 
1439 static struct aead_engine_alg algs_aead[] = {
1440 	{
1441 		.base.base = {
1442 			.cra_name = "gcm(aes)",
1443 			.cra_driver_name = "gcm-aes-keembay-ocs",
1444 			.cra_priority = KMB_OCS_PRIORITY,
1445 			.cra_flags = CRYPTO_ALG_ASYNC |
1446 				     CRYPTO_ALG_KERN_DRIVER_ONLY |
1447 				     CRYPTO_ALG_NEED_FALLBACK,
1448 			.cra_blocksize = 1,
1449 			.cra_ctxsize = sizeof(struct ocs_aes_tctx),
1450 			.cra_alignmask = 0,
1451 			.cra_module = THIS_MODULE,
1452 		},
1453 		.base.init = ocs_aes_aead_cra_init,
1454 		.base.exit = ocs_aead_cra_exit,
1455 		.base.ivsize = GCM_AES_IV_SIZE,
1456 		.base.maxauthsize = AES_BLOCK_SIZE,
1457 		.base.setauthsize = kmb_ocs_aead_gcm_setauthsize,
1458 		.base.setkey = kmb_ocs_aes_aead_set_key,
1459 		.base.encrypt = kmb_ocs_aes_gcm_encrypt,
1460 		.base.decrypt = kmb_ocs_aes_gcm_decrypt,
1461 		.op.do_one_request = kmb_ocs_aes_aead_do_one_request,
1462 	},
1463 	{
1464 		.base.base = {
1465 			.cra_name = "ccm(aes)",
1466 			.cra_driver_name = "ccm-aes-keembay-ocs",
1467 			.cra_priority = KMB_OCS_PRIORITY,
1468 			.cra_flags = CRYPTO_ALG_ASYNC |
1469 				     CRYPTO_ALG_KERN_DRIVER_ONLY |
1470 				     CRYPTO_ALG_NEED_FALLBACK,
1471 			.cra_blocksize = 1,
1472 			.cra_ctxsize = sizeof(struct ocs_aes_tctx),
1473 			.cra_alignmask = 0,
1474 			.cra_module = THIS_MODULE,
1475 		},
1476 		.base.init = ocs_aes_aead_cra_init,
1477 		.base.exit = ocs_aead_cra_exit,
1478 		.base.ivsize = AES_BLOCK_SIZE,
1479 		.base.maxauthsize = AES_BLOCK_SIZE,
1480 		.base.setauthsize = kmb_ocs_aead_ccm_setauthsize,
1481 		.base.setkey = kmb_ocs_aes_aead_set_key,
1482 		.base.encrypt = kmb_ocs_aes_ccm_encrypt,
1483 		.base.decrypt = kmb_ocs_aes_ccm_decrypt,
1484 		.op.do_one_request = kmb_ocs_aes_aead_do_one_request,
1485 	},
1486 	{
1487 		.base.base = {
1488 			.cra_name = "gcm(sm4)",
1489 			.cra_driver_name = "gcm-sm4-keembay-ocs",
1490 			.cra_priority = KMB_OCS_PRIORITY,
1491 			.cra_flags = CRYPTO_ALG_ASYNC |
1492 				     CRYPTO_ALG_KERN_DRIVER_ONLY,
1493 			.cra_blocksize = 1,
1494 			.cra_ctxsize = sizeof(struct ocs_aes_tctx),
1495 			.cra_alignmask = 0,
1496 			.cra_module = THIS_MODULE,
1497 		},
1498 		.base.init = ocs_sm4_aead_cra_init,
1499 		.base.exit = ocs_aead_cra_exit,
1500 		.base.ivsize = GCM_AES_IV_SIZE,
1501 		.base.maxauthsize = AES_BLOCK_SIZE,
1502 		.base.setauthsize = kmb_ocs_aead_gcm_setauthsize,
1503 		.base.setkey = kmb_ocs_sm4_aead_set_key,
1504 		.base.encrypt = kmb_ocs_sm4_gcm_encrypt,
1505 		.base.decrypt = kmb_ocs_sm4_gcm_decrypt,
1506 		.op.do_one_request = kmb_ocs_aes_aead_do_one_request,
1507 	},
1508 	{
1509 		.base.base = {
1510 			.cra_name = "ccm(sm4)",
1511 			.cra_driver_name = "ccm-sm4-keembay-ocs",
1512 			.cra_priority = KMB_OCS_PRIORITY,
1513 			.cra_flags = CRYPTO_ALG_ASYNC |
1514 				     CRYPTO_ALG_KERN_DRIVER_ONLY,
1515 			.cra_blocksize = 1,
1516 			.cra_ctxsize = sizeof(struct ocs_aes_tctx),
1517 			.cra_alignmask = 0,
1518 			.cra_module = THIS_MODULE,
1519 		},
1520 		.base.init = ocs_sm4_aead_cra_init,
1521 		.base.exit = ocs_aead_cra_exit,
1522 		.base.ivsize = AES_BLOCK_SIZE,
1523 		.base.maxauthsize = AES_BLOCK_SIZE,
1524 		.base.setauthsize = kmb_ocs_aead_ccm_setauthsize,
1525 		.base.setkey = kmb_ocs_sm4_aead_set_key,
1526 		.base.encrypt = kmb_ocs_sm4_ccm_encrypt,
1527 		.base.decrypt = kmb_ocs_sm4_ccm_decrypt,
1528 		.op.do_one_request = kmb_ocs_aes_aead_do_one_request,
1529 	}
1530 };
1531 
unregister_aes_algs(struct ocs_aes_dev * aes_dev)1532 static void unregister_aes_algs(struct ocs_aes_dev *aes_dev)
1533 {
1534 	crypto_engine_unregister_aeads(algs_aead, ARRAY_SIZE(algs_aead));
1535 	crypto_engine_unregister_skciphers(algs, ARRAY_SIZE(algs));
1536 }
1537 
register_aes_algs(struct ocs_aes_dev * aes_dev)1538 static int register_aes_algs(struct ocs_aes_dev *aes_dev)
1539 {
1540 	int ret;
1541 
1542 	/*
1543 	 * If any algorithm fails to register, all preceding algorithms that
1544 	 * were successfully registered will be automatically unregistered.
1545 	 */
1546 	ret = crypto_engine_register_aeads(algs_aead, ARRAY_SIZE(algs_aead));
1547 	if (ret)
1548 		return ret;
1549 
1550 	ret = crypto_engine_register_skciphers(algs, ARRAY_SIZE(algs));
1551 	if (ret)
1552 		crypto_engine_unregister_aeads(algs_aead, ARRAY_SIZE(algs));
1553 
1554 	return ret;
1555 }
1556 
1557 /* Device tree driver match. */
1558 static const struct of_device_id kmb_ocs_aes_of_match[] = {
1559 	{
1560 		.compatible = "intel,keembay-ocs-aes",
1561 	},
1562 	{}
1563 };
1564 
kmb_ocs_aes_remove(struct platform_device * pdev)1565 static int kmb_ocs_aes_remove(struct platform_device *pdev)
1566 {
1567 	struct ocs_aes_dev *aes_dev;
1568 
1569 	aes_dev = platform_get_drvdata(pdev);
1570 
1571 	unregister_aes_algs(aes_dev);
1572 
1573 	spin_lock(&ocs_aes.lock);
1574 	list_del(&aes_dev->list);
1575 	spin_unlock(&ocs_aes.lock);
1576 
1577 	crypto_engine_exit(aes_dev->engine);
1578 
1579 	return 0;
1580 }
1581 
kmb_ocs_aes_probe(struct platform_device * pdev)1582 static int kmb_ocs_aes_probe(struct platform_device *pdev)
1583 {
1584 	struct device *dev = &pdev->dev;
1585 	struct ocs_aes_dev *aes_dev;
1586 	int rc;
1587 
1588 	aes_dev = devm_kzalloc(dev, sizeof(*aes_dev), GFP_KERNEL);
1589 	if (!aes_dev)
1590 		return -ENOMEM;
1591 
1592 	aes_dev->dev = dev;
1593 
1594 	platform_set_drvdata(pdev, aes_dev);
1595 
1596 	rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
1597 	if (rc) {
1598 		dev_err(dev, "Failed to set 32 bit dma mask %d\n", rc);
1599 		return rc;
1600 	}
1601 
1602 	/* Get base register address. */
1603 	aes_dev->base_reg = devm_platform_ioremap_resource(pdev, 0);
1604 	if (IS_ERR(aes_dev->base_reg))
1605 		return PTR_ERR(aes_dev->base_reg);
1606 
1607 	/* Get and request IRQ */
1608 	aes_dev->irq = platform_get_irq(pdev, 0);
1609 	if (aes_dev->irq < 0)
1610 		return aes_dev->irq;
1611 
1612 	rc = devm_request_threaded_irq(dev, aes_dev->irq, ocs_aes_irq_handler,
1613 				       NULL, 0, "keembay-ocs-aes", aes_dev);
1614 	if (rc < 0) {
1615 		dev_err(dev, "Could not request IRQ\n");
1616 		return rc;
1617 	}
1618 
1619 	INIT_LIST_HEAD(&aes_dev->list);
1620 	spin_lock(&ocs_aes.lock);
1621 	list_add_tail(&aes_dev->list, &ocs_aes.dev_list);
1622 	spin_unlock(&ocs_aes.lock);
1623 
1624 	init_completion(&aes_dev->irq_completion);
1625 
1626 	/* Initialize crypto engine */
1627 	aes_dev->engine = crypto_engine_alloc_init(dev, true);
1628 	if (!aes_dev->engine) {
1629 		rc = -ENOMEM;
1630 		goto list_del;
1631 	}
1632 
1633 	rc = crypto_engine_start(aes_dev->engine);
1634 	if (rc) {
1635 		dev_err(dev, "Could not start crypto engine\n");
1636 		goto cleanup;
1637 	}
1638 
1639 	rc = register_aes_algs(aes_dev);
1640 	if (rc) {
1641 		dev_err(dev,
1642 			"Could not register OCS algorithms with Crypto API\n");
1643 		goto cleanup;
1644 	}
1645 
1646 	return 0;
1647 
1648 cleanup:
1649 	crypto_engine_exit(aes_dev->engine);
1650 list_del:
1651 	spin_lock(&ocs_aes.lock);
1652 	list_del(&aes_dev->list);
1653 	spin_unlock(&ocs_aes.lock);
1654 
1655 	return rc;
1656 }
1657 
1658 /* The OCS driver is a platform device. */
1659 static struct platform_driver kmb_ocs_aes_driver = {
1660 	.probe = kmb_ocs_aes_probe,
1661 	.remove = kmb_ocs_aes_remove,
1662 	.driver = {
1663 			.name = DRV_NAME,
1664 			.of_match_table = kmb_ocs_aes_of_match,
1665 		},
1666 };
1667 
1668 module_platform_driver(kmb_ocs_aes_driver);
1669 
1670 MODULE_DESCRIPTION("Intel Keem Bay Offload and Crypto Subsystem (OCS) AES/SM4 Driver");
1671 MODULE_LICENSE("GPL");
1672 
1673 MODULE_ALIAS_CRYPTO("cbc-aes-keembay-ocs");
1674 MODULE_ALIAS_CRYPTO("ctr-aes-keembay-ocs");
1675 MODULE_ALIAS_CRYPTO("gcm-aes-keembay-ocs");
1676 MODULE_ALIAS_CRYPTO("ccm-aes-keembay-ocs");
1677 
1678 MODULE_ALIAS_CRYPTO("cbc-sm4-keembay-ocs");
1679 MODULE_ALIAS_CRYPTO("ctr-sm4-keembay-ocs");
1680 MODULE_ALIAS_CRYPTO("gcm-sm4-keembay-ocs");
1681 MODULE_ALIAS_CRYPTO("ccm-sm4-keembay-ocs");
1682 
1683 #ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
1684 MODULE_ALIAS_CRYPTO("ecb-aes-keembay-ocs");
1685 MODULE_ALIAS_CRYPTO("ecb-sm4-keembay-ocs");
1686 #endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */
1687 
1688 #ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
1689 MODULE_ALIAS_CRYPTO("cts-aes-keembay-ocs");
1690 MODULE_ALIAS_CRYPTO("cts-sm4-keembay-ocs");
1691 #endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */
1692