1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Marvell. */
3 
4 #include <crypto/aes.h>
5 #include <crypto/authenc.h>
6 #include <crypto/cryptd.h>
7 #include <crypto/des.h>
8 #include <crypto/internal/aead.h>
9 #include <crypto/sha1.h>
10 #include <crypto/sha2.h>
11 #include <crypto/xts.h>
12 #include <crypto/gcm.h>
13 #include <crypto/scatterwalk.h>
14 #include <linux/sort.h>
15 #include <linux/module.h>
16 #include "otx2_cptvf.h"
17 #include "otx2_cptvf_algs.h"
18 #include "otx2_cpt_reqmgr.h"
19 
20 /* Size of salt in AES GCM mode */
21 #define AES_GCM_SALT_SIZE 4
22 /* Size of IV in AES GCM mode */
23 #define AES_GCM_IV_SIZE 8
24 /* Size of ICV (Integrity Check Value) in AES GCM mode */
25 #define AES_GCM_ICV_SIZE 16
26 /* Offset of IV in AES GCM mode */
27 #define AES_GCM_IV_OFFSET 8
28 #define CONTROL_WORD_LEN 8
29 #define KEY2_OFFSET 48
30 #define DMA_MODE_FLAG(dma_mode) \
31 	(((dma_mode) == OTX2_CPT_DMA_MODE_SG) ? (1 << 7) : 0)
32 
33 /* Truncated SHA digest size */
34 #define SHA1_TRUNC_DIGEST_SIZE 12
35 #define SHA256_TRUNC_DIGEST_SIZE 16
36 #define SHA384_TRUNC_DIGEST_SIZE 24
37 #define SHA512_TRUNC_DIGEST_SIZE 32
38 
39 static DEFINE_MUTEX(mutex);
40 static int is_crypto_registered;
41 
42 struct cpt_device_desc {
43 	struct pci_dev *dev;
44 	int num_queues;
45 };
46 
47 struct cpt_device_table {
48 	atomic_t count;
49 	struct cpt_device_desc desc[OTX2_CPT_MAX_LFS_NUM];
50 };
51 
52 static struct cpt_device_table se_devices = {
53 	.count = ATOMIC_INIT(0)
54 };
55 
56 static struct otx2_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg);
57 
get_se_device(struct pci_dev ** pdev,int * cpu_num)58 static inline int get_se_device(struct pci_dev **pdev, int *cpu_num)
59 {
60 	int count;
61 
62 	count = atomic_read(&se_devices.count);
63 	if (count < 1)
64 		return -ENODEV;
65 
66 	*cpu_num = get_cpu();
67 	/*
68 	 * On OcteonTX2 platform CPT instruction queue is bound to each
69 	 * local function LF, in turn LFs can be attached to PF
70 	 * or VF therefore we always use first device. We get maximum
71 	 * performance if one CPT queue is available for each cpu
72 	 * otherwise CPT queues need to be shared between cpus.
73 	 */
74 	if (*cpu_num >= se_devices.desc[0].num_queues)
75 		*cpu_num %= se_devices.desc[0].num_queues;
76 	*pdev = se_devices.desc[0].dev;
77 
78 	put_cpu();
79 
80 	return 0;
81 }
82 
validate_hmac_cipher_null(struct otx2_cpt_req_info * cpt_req)83 static inline int validate_hmac_cipher_null(struct otx2_cpt_req_info *cpt_req)
84 {
85 	struct otx2_cpt_req_ctx *rctx;
86 	struct aead_request *req;
87 	struct crypto_aead *tfm;
88 
89 	req = container_of(cpt_req->areq, struct aead_request, base);
90 	tfm = crypto_aead_reqtfm(req);
91 	rctx = aead_request_ctx_dma(req);
92 	if (memcmp(rctx->fctx.hmac.s.hmac_calc,
93 		   rctx->fctx.hmac.s.hmac_recv,
94 		   crypto_aead_authsize(tfm)) != 0)
95 		return -EBADMSG;
96 
97 	return 0;
98 }
99 
otx2_cpt_aead_callback(int status,void * arg1,void * arg2)100 static void otx2_cpt_aead_callback(int status, void *arg1, void *arg2)
101 {
102 	struct otx2_cpt_inst_info *inst_info = arg2;
103 	struct crypto_async_request *areq = arg1;
104 	struct otx2_cpt_req_info *cpt_req;
105 	struct pci_dev *pdev;
106 
107 	if (inst_info) {
108 		cpt_req = inst_info->req;
109 		if (!status) {
110 			/*
111 			 * When selected cipher is NULL we need to manually
112 			 * verify whether calculated hmac value matches
113 			 * received hmac value
114 			 */
115 			if (cpt_req->req_type ==
116 			    OTX2_CPT_AEAD_ENC_DEC_NULL_REQ &&
117 			    !cpt_req->is_enc)
118 				status = validate_hmac_cipher_null(cpt_req);
119 		}
120 		pdev = inst_info->pdev;
121 		otx2_cpt_info_destroy(pdev, inst_info);
122 	}
123 	if (areq)
124 		crypto_request_complete(areq, status);
125 }
126 
output_iv_copyback(struct crypto_async_request * areq)127 static void output_iv_copyback(struct crypto_async_request *areq)
128 {
129 	struct otx2_cpt_req_info *req_info;
130 	struct otx2_cpt_req_ctx *rctx;
131 	struct skcipher_request *sreq;
132 	struct crypto_skcipher *stfm;
133 	struct otx2_cpt_enc_ctx *ctx;
134 	u32 start, ivsize;
135 
136 	sreq = container_of(areq, struct skcipher_request, base);
137 	stfm = crypto_skcipher_reqtfm(sreq);
138 	ctx = crypto_skcipher_ctx(stfm);
139 	if (ctx->cipher_type == OTX2_CPT_AES_CBC ||
140 	    ctx->cipher_type == OTX2_CPT_DES3_CBC) {
141 		rctx = skcipher_request_ctx_dma(sreq);
142 		req_info = &rctx->cpt_req;
143 		ivsize = crypto_skcipher_ivsize(stfm);
144 		start = sreq->cryptlen - ivsize;
145 
146 		if (req_info->is_enc) {
147 			scatterwalk_map_and_copy(sreq->iv, sreq->dst, start,
148 						 ivsize, 0);
149 		} else {
150 			if (sreq->src != sreq->dst) {
151 				scatterwalk_map_and_copy(sreq->iv, sreq->src,
152 							 start, ivsize, 0);
153 			} else {
154 				memcpy(sreq->iv, req_info->iv_out, ivsize);
155 				kfree(req_info->iv_out);
156 			}
157 		}
158 	}
159 }
160 
otx2_cpt_skcipher_callback(int status,void * arg1,void * arg2)161 static void otx2_cpt_skcipher_callback(int status, void *arg1, void *arg2)
162 {
163 	struct otx2_cpt_inst_info *inst_info = arg2;
164 	struct crypto_async_request *areq = arg1;
165 	struct pci_dev *pdev;
166 
167 	if (areq) {
168 		if (!status)
169 			output_iv_copyback(areq);
170 		if (inst_info) {
171 			pdev = inst_info->pdev;
172 			otx2_cpt_info_destroy(pdev, inst_info);
173 		}
174 		crypto_request_complete(areq, status);
175 	}
176 }
177 
update_input_data(struct otx2_cpt_req_info * req_info,struct scatterlist * inp_sg,u32 nbytes,u32 * argcnt)178 static inline void update_input_data(struct otx2_cpt_req_info *req_info,
179 				     struct scatterlist *inp_sg,
180 				     u32 nbytes, u32 *argcnt)
181 {
182 	req_info->req.dlen += nbytes;
183 
184 	while (nbytes) {
185 		u32 len = (nbytes < inp_sg->length) ? nbytes : inp_sg->length;
186 		u8 *ptr = sg_virt(inp_sg);
187 
188 		req_info->in[*argcnt].vptr = (void *)ptr;
189 		req_info->in[*argcnt].size = len;
190 		nbytes -= len;
191 		++(*argcnt);
192 		inp_sg = sg_next(inp_sg);
193 	}
194 }
195 
update_output_data(struct otx2_cpt_req_info * req_info,struct scatterlist * outp_sg,u32 offset,u32 nbytes,u32 * argcnt)196 static inline void update_output_data(struct otx2_cpt_req_info *req_info,
197 				      struct scatterlist *outp_sg,
198 				      u32 offset, u32 nbytes, u32 *argcnt)
199 {
200 	u32 len, sg_len;
201 	u8 *ptr;
202 
203 	req_info->rlen += nbytes;
204 
205 	while (nbytes) {
206 		sg_len = outp_sg->length - offset;
207 		len = (nbytes < sg_len) ? nbytes : sg_len;
208 		ptr = sg_virt(outp_sg);
209 
210 		req_info->out[*argcnt].vptr = (void *) (ptr + offset);
211 		req_info->out[*argcnt].size = len;
212 		nbytes -= len;
213 		++(*argcnt);
214 		offset = 0;
215 		outp_sg = sg_next(outp_sg);
216 	}
217 }
218 
create_ctx_hdr(struct skcipher_request * req,u32 enc,u32 * argcnt)219 static inline int create_ctx_hdr(struct skcipher_request *req, u32 enc,
220 				 u32 *argcnt)
221 {
222 	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
223 	struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
224 	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
225 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
226 	struct otx2_cpt_fc_ctx *fctx = &rctx->fctx;
227 	int ivsize = crypto_skcipher_ivsize(stfm);
228 	u32 start = req->cryptlen - ivsize;
229 	gfp_t flags;
230 
231 	flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
232 			GFP_KERNEL : GFP_ATOMIC;
233 	req_info->ctrl.s.dma_mode = OTX2_CPT_DMA_MODE_SG;
234 	req_info->ctrl.s.se_req = 1;
235 
236 	req_info->req.opcode.s.major = OTX2_CPT_MAJOR_OP_FC |
237 				DMA_MODE_FLAG(OTX2_CPT_DMA_MODE_SG);
238 	if (enc) {
239 		req_info->req.opcode.s.minor = 2;
240 	} else {
241 		req_info->req.opcode.s.minor = 3;
242 		if ((ctx->cipher_type == OTX2_CPT_AES_CBC ||
243 		    ctx->cipher_type == OTX2_CPT_DES3_CBC) &&
244 		    req->src == req->dst) {
245 			req_info->iv_out = kmalloc(ivsize, flags);
246 			if (!req_info->iv_out)
247 				return -ENOMEM;
248 
249 			scatterwalk_map_and_copy(req_info->iv_out, req->src,
250 						 start, ivsize, 0);
251 		}
252 	}
253 	/* Encryption data length */
254 	req_info->req.param1 = req->cryptlen;
255 	/* Authentication data length */
256 	req_info->req.param2 = 0;
257 
258 	fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
259 	fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
260 	fctx->enc.enc_ctrl.e.iv_source = OTX2_CPT_FROM_CPTR;
261 
262 	if (ctx->cipher_type == OTX2_CPT_AES_XTS)
263 		memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2);
264 	else
265 		memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len);
266 
267 	memcpy(fctx->enc.encr_iv, req->iv, crypto_skcipher_ivsize(stfm));
268 
269 	cpu_to_be64s(&fctx->enc.enc_ctrl.u);
270 
271 	/*
272 	 * Storing  Packet Data Information in offset
273 	 * Control Word First 8 bytes
274 	 */
275 	req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
276 	req_info->in[*argcnt].size = CONTROL_WORD_LEN;
277 	req_info->req.dlen += CONTROL_WORD_LEN;
278 	++(*argcnt);
279 
280 	req_info->in[*argcnt].vptr = (u8 *)fctx;
281 	req_info->in[*argcnt].size = sizeof(struct otx2_cpt_fc_ctx);
282 	req_info->req.dlen += sizeof(struct otx2_cpt_fc_ctx);
283 
284 	++(*argcnt);
285 
286 	return 0;
287 }
288 
create_input_list(struct skcipher_request * req,u32 enc,u32 enc_iv_len)289 static inline int create_input_list(struct skcipher_request *req, u32 enc,
290 				    u32 enc_iv_len)
291 {
292 	struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
293 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
294 	u32 argcnt =  0;
295 	int ret;
296 
297 	ret = create_ctx_hdr(req, enc, &argcnt);
298 	if (ret)
299 		return ret;
300 
301 	update_input_data(req_info, req->src, req->cryptlen, &argcnt);
302 	req_info->in_cnt = argcnt;
303 
304 	return 0;
305 }
306 
create_output_list(struct skcipher_request * req,u32 enc_iv_len)307 static inline void create_output_list(struct skcipher_request *req,
308 				      u32 enc_iv_len)
309 {
310 	struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
311 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
312 	u32 argcnt = 0;
313 
314 	/*
315 	 * OUTPUT Buffer Processing
316 	 * AES encryption/decryption output would be
317 	 * received in the following format
318 	 *
319 	 * ------IV--------|------ENCRYPTED/DECRYPTED DATA-----|
320 	 * [ 16 Bytes/     [   Request Enc/Dec/ DATA Len AES CBC ]
321 	 */
322 	update_output_data(req_info, req->dst, 0, req->cryptlen, &argcnt);
323 	req_info->out_cnt = argcnt;
324 }
325 
skcipher_do_fallback(struct skcipher_request * req,bool is_enc)326 static int skcipher_do_fallback(struct skcipher_request *req, bool is_enc)
327 {
328 	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
329 	struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
330 	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
331 	int ret;
332 
333 	if (ctx->fbk_cipher) {
334 		skcipher_request_set_tfm(&rctx->sk_fbk_req, ctx->fbk_cipher);
335 		skcipher_request_set_callback(&rctx->sk_fbk_req,
336 					      req->base.flags,
337 					      req->base.complete,
338 					      req->base.data);
339 		skcipher_request_set_crypt(&rctx->sk_fbk_req, req->src,
340 					   req->dst, req->cryptlen, req->iv);
341 		ret = is_enc ? crypto_skcipher_encrypt(&rctx->sk_fbk_req) :
342 			       crypto_skcipher_decrypt(&rctx->sk_fbk_req);
343 	} else {
344 		ret = -EINVAL;
345 	}
346 	return ret;
347 }
348 
cpt_enc_dec(struct skcipher_request * req,u32 enc)349 static inline int cpt_enc_dec(struct skcipher_request *req, u32 enc)
350 {
351 	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
352 	struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
353 	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
354 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
355 	u32 enc_iv_len = crypto_skcipher_ivsize(stfm);
356 	struct pci_dev *pdev;
357 	int status, cpu_num;
358 
359 	if (req->cryptlen == 0)
360 		return 0;
361 
362 	if (!IS_ALIGNED(req->cryptlen, ctx->enc_align_len))
363 		return -EINVAL;
364 
365 	if (req->cryptlen > OTX2_CPT_MAX_REQ_SIZE)
366 		return skcipher_do_fallback(req, enc);
367 
368 	/* Clear control words */
369 	rctx->ctrl_word.flags = 0;
370 	rctx->fctx.enc.enc_ctrl.u = 0;
371 
372 	status = create_input_list(req, enc, enc_iv_len);
373 	if (status)
374 		return status;
375 	create_output_list(req, enc_iv_len);
376 
377 	status = get_se_device(&pdev, &cpu_num);
378 	if (status)
379 		return status;
380 
381 	req_info->callback = otx2_cpt_skcipher_callback;
382 	req_info->areq = &req->base;
383 	req_info->req_type = OTX2_CPT_ENC_DEC_REQ;
384 	req_info->is_enc = enc;
385 	req_info->is_trunc_hmac = false;
386 	req_info->ctrl.s.grp = otx2_cpt_get_kcrypto_eng_grp_num(pdev);
387 
388 	/*
389 	 * We perform an asynchronous send and once
390 	 * the request is completed the driver would
391 	 * intimate through registered call back functions
392 	 */
393 	status = otx2_cpt_do_request(pdev, req_info, cpu_num);
394 
395 	return status;
396 }
397 
otx2_cpt_skcipher_encrypt(struct skcipher_request * req)398 static int otx2_cpt_skcipher_encrypt(struct skcipher_request *req)
399 {
400 	return cpt_enc_dec(req, true);
401 }
402 
otx2_cpt_skcipher_decrypt(struct skcipher_request * req)403 static int otx2_cpt_skcipher_decrypt(struct skcipher_request *req)
404 {
405 	return cpt_enc_dec(req, false);
406 }
407 
otx2_cpt_skcipher_xts_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen)408 static int otx2_cpt_skcipher_xts_setkey(struct crypto_skcipher *tfm,
409 				       const u8 *key, u32 keylen)
410 {
411 	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
412 	const u8 *key2 = key + (keylen / 2);
413 	const u8 *key1 = key;
414 	int ret;
415 
416 	ret = xts_verify_key(tfm, key, keylen);
417 	if (ret)
418 		return ret;
419 	ctx->key_len = keylen;
420 	ctx->enc_align_len = 1;
421 	memcpy(ctx->enc_key, key1, keylen / 2);
422 	memcpy(ctx->enc_key + KEY2_OFFSET, key2, keylen / 2);
423 	ctx->cipher_type = OTX2_CPT_AES_XTS;
424 	switch (ctx->key_len) {
425 	case 2 * AES_KEYSIZE_128:
426 		ctx->key_type = OTX2_CPT_AES_128_BIT;
427 		break;
428 	case 2 * AES_KEYSIZE_192:
429 		ctx->key_type = OTX2_CPT_AES_192_BIT;
430 		break;
431 	case 2 * AES_KEYSIZE_256:
432 		ctx->key_type = OTX2_CPT_AES_256_BIT;
433 		break;
434 	default:
435 		return -EINVAL;
436 	}
437 	return crypto_skcipher_setkey(ctx->fbk_cipher, key, keylen);
438 }
439 
cpt_des_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen,u8 cipher_type)440 static int cpt_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
441 			  u32 keylen, u8 cipher_type)
442 {
443 	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
444 
445 	if (keylen != DES3_EDE_KEY_SIZE)
446 		return -EINVAL;
447 
448 	ctx->key_len = keylen;
449 	ctx->cipher_type = cipher_type;
450 	ctx->enc_align_len = 8;
451 
452 	memcpy(ctx->enc_key, key, keylen);
453 
454 	return crypto_skcipher_setkey(ctx->fbk_cipher, key, keylen);
455 }
456 
cpt_aes_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen,u8 cipher_type)457 static int cpt_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
458 			  u32 keylen, u8 cipher_type)
459 {
460 	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
461 
462 	switch (keylen) {
463 	case AES_KEYSIZE_128:
464 		ctx->key_type = OTX2_CPT_AES_128_BIT;
465 		break;
466 	case AES_KEYSIZE_192:
467 		ctx->key_type = OTX2_CPT_AES_192_BIT;
468 		break;
469 	case AES_KEYSIZE_256:
470 		ctx->key_type = OTX2_CPT_AES_256_BIT;
471 		break;
472 	default:
473 		return -EINVAL;
474 	}
475 	if (cipher_type == OTX2_CPT_AES_CBC || cipher_type == OTX2_CPT_AES_ECB)
476 		ctx->enc_align_len = 16;
477 	else
478 		ctx->enc_align_len = 1;
479 
480 	ctx->key_len = keylen;
481 	ctx->cipher_type = cipher_type;
482 
483 	memcpy(ctx->enc_key, key, keylen);
484 
485 	return crypto_skcipher_setkey(ctx->fbk_cipher, key, keylen);
486 }
487 
otx2_cpt_skcipher_cbc_aes_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen)488 static int otx2_cpt_skcipher_cbc_aes_setkey(struct crypto_skcipher *tfm,
489 					    const u8 *key, u32 keylen)
490 {
491 	return cpt_aes_setkey(tfm, key, keylen, OTX2_CPT_AES_CBC);
492 }
493 
otx2_cpt_skcipher_ecb_aes_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen)494 static int otx2_cpt_skcipher_ecb_aes_setkey(struct crypto_skcipher *tfm,
495 					    const u8 *key, u32 keylen)
496 {
497 	return cpt_aes_setkey(tfm, key, keylen, OTX2_CPT_AES_ECB);
498 }
499 
otx2_cpt_skcipher_cbc_des3_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen)500 static int otx2_cpt_skcipher_cbc_des3_setkey(struct crypto_skcipher *tfm,
501 					     const u8 *key, u32 keylen)
502 {
503 	return cpt_des_setkey(tfm, key, keylen, OTX2_CPT_DES3_CBC);
504 }
505 
otx2_cpt_skcipher_ecb_des3_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen)506 static int otx2_cpt_skcipher_ecb_des3_setkey(struct crypto_skcipher *tfm,
507 					     const u8 *key, u32 keylen)
508 {
509 	return cpt_des_setkey(tfm, key, keylen, OTX2_CPT_DES3_ECB);
510 }
511 
cpt_skcipher_fallback_init(struct otx2_cpt_enc_ctx * ctx,struct crypto_alg * alg)512 static int cpt_skcipher_fallback_init(struct otx2_cpt_enc_ctx *ctx,
513 				      struct crypto_alg *alg)
514 {
515 	if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
516 		ctx->fbk_cipher =
517 				crypto_alloc_skcipher(alg->cra_name, 0,
518 						      CRYPTO_ALG_ASYNC |
519 						      CRYPTO_ALG_NEED_FALLBACK);
520 		if (IS_ERR(ctx->fbk_cipher)) {
521 			pr_err("%s() failed to allocate fallback for %s\n",
522 				__func__, alg->cra_name);
523 			return PTR_ERR(ctx->fbk_cipher);
524 		}
525 	}
526 	return 0;
527 }
528 
otx2_cpt_enc_dec_init(struct crypto_skcipher * stfm)529 static int otx2_cpt_enc_dec_init(struct crypto_skcipher *stfm)
530 {
531 	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
532 	struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
533 	struct crypto_alg *alg = tfm->__crt_alg;
534 
535 	memset(ctx, 0, sizeof(*ctx));
536 	/*
537 	 * Additional memory for skcipher_request is
538 	 * allocated since the cryptd daemon uses
539 	 * this memory for request_ctx information
540 	 */
541 	crypto_skcipher_set_reqsize_dma(
542 		stfm, sizeof(struct otx2_cpt_req_ctx) +
543 		      sizeof(struct skcipher_request));
544 
545 	return cpt_skcipher_fallback_init(ctx, alg);
546 }
547 
otx2_cpt_skcipher_exit(struct crypto_skcipher * tfm)548 static void otx2_cpt_skcipher_exit(struct crypto_skcipher *tfm)
549 {
550 	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
551 
552 	if (ctx->fbk_cipher) {
553 		crypto_free_skcipher(ctx->fbk_cipher);
554 		ctx->fbk_cipher = NULL;
555 	}
556 }
557 
cpt_aead_fallback_init(struct otx2_cpt_aead_ctx * ctx,struct crypto_alg * alg)558 static int cpt_aead_fallback_init(struct otx2_cpt_aead_ctx *ctx,
559 				  struct crypto_alg *alg)
560 {
561 	if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
562 		ctx->fbk_cipher =
563 			    crypto_alloc_aead(alg->cra_name, 0,
564 					      CRYPTO_ALG_ASYNC |
565 					      CRYPTO_ALG_NEED_FALLBACK);
566 		if (IS_ERR(ctx->fbk_cipher)) {
567 			pr_err("%s() failed to allocate fallback for %s\n",
568 				__func__, alg->cra_name);
569 			return PTR_ERR(ctx->fbk_cipher);
570 		}
571 	}
572 	return 0;
573 }
574 
cpt_aead_init(struct crypto_aead * atfm,u8 cipher_type,u8 mac_type)575 static int cpt_aead_init(struct crypto_aead *atfm, u8 cipher_type, u8 mac_type)
576 {
577 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(atfm);
578 	struct crypto_tfm *tfm = crypto_aead_tfm(atfm);
579 	struct crypto_alg *alg = tfm->__crt_alg;
580 
581 	ctx->cipher_type = cipher_type;
582 	ctx->mac_type = mac_type;
583 
584 	switch (ctx->mac_type) {
585 	case OTX2_CPT_SHA1:
586 		ctx->hashalg = crypto_alloc_shash("sha1", 0, 0);
587 		break;
588 
589 	case OTX2_CPT_SHA256:
590 		ctx->hashalg = crypto_alloc_shash("sha256", 0, 0);
591 		break;
592 
593 	case OTX2_CPT_SHA384:
594 		ctx->hashalg = crypto_alloc_shash("sha384", 0, 0);
595 		break;
596 
597 	case OTX2_CPT_SHA512:
598 		ctx->hashalg = crypto_alloc_shash("sha512", 0, 0);
599 		break;
600 	}
601 
602 	if (IS_ERR(ctx->hashalg))
603 		return PTR_ERR(ctx->hashalg);
604 
605 	if (ctx->hashalg) {
606 		ctx->sdesc = alloc_sdesc(ctx->hashalg);
607 		if (!ctx->sdesc) {
608 			crypto_free_shash(ctx->hashalg);
609 			return -ENOMEM;
610 		}
611 	}
612 
613 	/*
614 	 * When selected cipher is NULL we use HMAC opcode instead of
615 	 * FLEXICRYPTO opcode therefore we don't need to use HASH algorithms
616 	 * for calculating ipad and opad
617 	 */
618 	if (ctx->cipher_type != OTX2_CPT_CIPHER_NULL && ctx->hashalg) {
619 		int ss = crypto_shash_statesize(ctx->hashalg);
620 
621 		ctx->ipad = kzalloc(ss, GFP_KERNEL);
622 		if (!ctx->ipad) {
623 			kfree(ctx->sdesc);
624 			crypto_free_shash(ctx->hashalg);
625 			return -ENOMEM;
626 		}
627 
628 		ctx->opad = kzalloc(ss, GFP_KERNEL);
629 		if (!ctx->opad) {
630 			kfree(ctx->ipad);
631 			kfree(ctx->sdesc);
632 			crypto_free_shash(ctx->hashalg);
633 			return -ENOMEM;
634 		}
635 	}
636 	switch (ctx->cipher_type) {
637 	case OTX2_CPT_AES_CBC:
638 	case OTX2_CPT_AES_ECB:
639 		ctx->enc_align_len = 16;
640 		break;
641 	case OTX2_CPT_DES3_CBC:
642 	case OTX2_CPT_DES3_ECB:
643 		ctx->enc_align_len = 8;
644 		break;
645 	case OTX2_CPT_AES_GCM:
646 	case OTX2_CPT_CIPHER_NULL:
647 		ctx->enc_align_len = 1;
648 		break;
649 	}
650 	crypto_aead_set_reqsize_dma(atfm, sizeof(struct otx2_cpt_req_ctx));
651 
652 	return cpt_aead_fallback_init(ctx, alg);
653 }
654 
otx2_cpt_aead_cbc_aes_sha1_init(struct crypto_aead * tfm)655 static int otx2_cpt_aead_cbc_aes_sha1_init(struct crypto_aead *tfm)
656 {
657 	return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA1);
658 }
659 
otx2_cpt_aead_cbc_aes_sha256_init(struct crypto_aead * tfm)660 static int otx2_cpt_aead_cbc_aes_sha256_init(struct crypto_aead *tfm)
661 {
662 	return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA256);
663 }
664 
otx2_cpt_aead_cbc_aes_sha384_init(struct crypto_aead * tfm)665 static int otx2_cpt_aead_cbc_aes_sha384_init(struct crypto_aead *tfm)
666 {
667 	return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA384);
668 }
669 
otx2_cpt_aead_cbc_aes_sha512_init(struct crypto_aead * tfm)670 static int otx2_cpt_aead_cbc_aes_sha512_init(struct crypto_aead *tfm)
671 {
672 	return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA512);
673 }
674 
otx2_cpt_aead_ecb_null_sha1_init(struct crypto_aead * tfm)675 static int otx2_cpt_aead_ecb_null_sha1_init(struct crypto_aead *tfm)
676 {
677 	return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA1);
678 }
679 
otx2_cpt_aead_ecb_null_sha256_init(struct crypto_aead * tfm)680 static int otx2_cpt_aead_ecb_null_sha256_init(struct crypto_aead *tfm)
681 {
682 	return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA256);
683 }
684 
otx2_cpt_aead_ecb_null_sha384_init(struct crypto_aead * tfm)685 static int otx2_cpt_aead_ecb_null_sha384_init(struct crypto_aead *tfm)
686 {
687 	return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA384);
688 }
689 
otx2_cpt_aead_ecb_null_sha512_init(struct crypto_aead * tfm)690 static int otx2_cpt_aead_ecb_null_sha512_init(struct crypto_aead *tfm)
691 {
692 	return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA512);
693 }
694 
otx2_cpt_aead_gcm_aes_init(struct crypto_aead * tfm)695 static int otx2_cpt_aead_gcm_aes_init(struct crypto_aead *tfm)
696 {
697 	return cpt_aead_init(tfm, OTX2_CPT_AES_GCM, OTX2_CPT_MAC_NULL);
698 }
699 
otx2_cpt_aead_exit(struct crypto_aead * tfm)700 static void otx2_cpt_aead_exit(struct crypto_aead *tfm)
701 {
702 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
703 
704 	kfree(ctx->ipad);
705 	kfree(ctx->opad);
706 	crypto_free_shash(ctx->hashalg);
707 	kfree(ctx->sdesc);
708 
709 	if (ctx->fbk_cipher) {
710 		crypto_free_aead(ctx->fbk_cipher);
711 		ctx->fbk_cipher = NULL;
712 	}
713 }
714 
otx2_cpt_aead_gcm_set_authsize(struct crypto_aead * tfm,unsigned int authsize)715 static int otx2_cpt_aead_gcm_set_authsize(struct crypto_aead *tfm,
716 					  unsigned int authsize)
717 {
718 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
719 
720 	if (crypto_rfc4106_check_authsize(authsize))
721 		return -EINVAL;
722 
723 	tfm->authsize = authsize;
724 	/* Set authsize for fallback case */
725 	if (ctx->fbk_cipher)
726 		ctx->fbk_cipher->authsize = authsize;
727 
728 	return 0;
729 }
730 
otx2_cpt_aead_set_authsize(struct crypto_aead * tfm,unsigned int authsize)731 static int otx2_cpt_aead_set_authsize(struct crypto_aead *tfm,
732 				      unsigned int authsize)
733 {
734 	tfm->authsize = authsize;
735 
736 	return 0;
737 }
738 
otx2_cpt_aead_null_set_authsize(struct crypto_aead * tfm,unsigned int authsize)739 static int otx2_cpt_aead_null_set_authsize(struct crypto_aead *tfm,
740 					   unsigned int authsize)
741 {
742 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
743 
744 	ctx->is_trunc_hmac = true;
745 	tfm->authsize = authsize;
746 
747 	return 0;
748 }
749 
alloc_sdesc(struct crypto_shash * alg)750 static struct otx2_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg)
751 {
752 	struct otx2_cpt_sdesc *sdesc;
753 	int size;
754 
755 	size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
756 	sdesc = kmalloc(size, GFP_KERNEL);
757 	if (!sdesc)
758 		return NULL;
759 
760 	sdesc->shash.tfm = alg;
761 
762 	return sdesc;
763 }
764 
swap_data32(void * buf,u32 len)765 static inline void swap_data32(void *buf, u32 len)
766 {
767 	cpu_to_be32_array(buf, buf, len / 4);
768 }
769 
swap_data64(void * buf,u32 len)770 static inline void swap_data64(void *buf, u32 len)
771 {
772 	u64 *src = buf;
773 	int i = 0;
774 
775 	for (i = 0 ; i < len / 8; i++, src++)
776 		cpu_to_be64s(src);
777 }
778 
swap_pad(u8 mac_type,u8 * pad)779 static int swap_pad(u8 mac_type, u8 *pad)
780 {
781 	struct sha512_state *sha512;
782 	struct sha256_state *sha256;
783 	struct sha1_state *sha1;
784 
785 	switch (mac_type) {
786 	case OTX2_CPT_SHA1:
787 		sha1 = (struct sha1_state *)pad;
788 		swap_data32(sha1->state, SHA1_DIGEST_SIZE);
789 		break;
790 
791 	case OTX2_CPT_SHA256:
792 		sha256 = (struct sha256_state *)pad;
793 		swap_data32(sha256->state, SHA256_DIGEST_SIZE);
794 		break;
795 
796 	case OTX2_CPT_SHA384:
797 	case OTX2_CPT_SHA512:
798 		sha512 = (struct sha512_state *)pad;
799 		swap_data64(sha512->state, SHA512_DIGEST_SIZE);
800 		break;
801 
802 	default:
803 		return -EINVAL;
804 	}
805 
806 	return 0;
807 }
808 
aead_hmac_init(struct crypto_aead * cipher,struct crypto_authenc_keys * keys)809 static int aead_hmac_init(struct crypto_aead *cipher,
810 			  struct crypto_authenc_keys *keys)
811 {
812 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
813 	int ds = crypto_shash_digestsize(ctx->hashalg);
814 	int bs = crypto_shash_blocksize(ctx->hashalg);
815 	int authkeylen = keys->authkeylen;
816 	u8 *ipad = NULL, *opad = NULL;
817 	int icount = 0;
818 	int ret;
819 
820 	if (authkeylen > bs) {
821 		ret = crypto_shash_digest(&ctx->sdesc->shash, keys->authkey,
822 					  authkeylen, ctx->key);
823 		if (ret)
824 			goto calc_fail;
825 
826 		authkeylen = ds;
827 	} else
828 		memcpy(ctx->key, keys->authkey, authkeylen);
829 
830 	ctx->enc_key_len = keys->enckeylen;
831 	ctx->auth_key_len = authkeylen;
832 
833 	if (ctx->cipher_type == OTX2_CPT_CIPHER_NULL)
834 		return keys->enckeylen ? -EINVAL : 0;
835 
836 	switch (keys->enckeylen) {
837 	case AES_KEYSIZE_128:
838 		ctx->key_type = OTX2_CPT_AES_128_BIT;
839 		break;
840 	case AES_KEYSIZE_192:
841 		ctx->key_type = OTX2_CPT_AES_192_BIT;
842 		break;
843 	case AES_KEYSIZE_256:
844 		ctx->key_type = OTX2_CPT_AES_256_BIT;
845 		break;
846 	default:
847 		/* Invalid key length */
848 		return -EINVAL;
849 	}
850 
851 	memcpy(ctx->key + authkeylen, keys->enckey, keys->enckeylen);
852 
853 	ipad = ctx->ipad;
854 	opad = ctx->opad;
855 
856 	memcpy(ipad, ctx->key, authkeylen);
857 	memset(ipad + authkeylen, 0, bs - authkeylen);
858 	memcpy(opad, ipad, bs);
859 
860 	for (icount = 0; icount < bs; icount++) {
861 		ipad[icount] ^= 0x36;
862 		opad[icount] ^= 0x5c;
863 	}
864 
865 	/*
866 	 * Partial Hash calculated from the software
867 	 * algorithm is retrieved for IPAD & OPAD
868 	 */
869 
870 	/* IPAD Calculation */
871 	crypto_shash_init(&ctx->sdesc->shash);
872 	crypto_shash_update(&ctx->sdesc->shash, ipad, bs);
873 	crypto_shash_export(&ctx->sdesc->shash, ipad);
874 	ret = swap_pad(ctx->mac_type, ipad);
875 	if (ret)
876 		goto calc_fail;
877 
878 	/* OPAD Calculation */
879 	crypto_shash_init(&ctx->sdesc->shash);
880 	crypto_shash_update(&ctx->sdesc->shash, opad, bs);
881 	crypto_shash_export(&ctx->sdesc->shash, opad);
882 	ret = swap_pad(ctx->mac_type, opad);
883 
884 calc_fail:
885 	return ret;
886 }
887 
otx2_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead * cipher,const unsigned char * key,unsigned int keylen)888 static int otx2_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher,
889 					    const unsigned char *key,
890 					    unsigned int keylen)
891 {
892 	struct crypto_authenc_keys authenc_keys;
893 
894 	return crypto_authenc_extractkeys(&authenc_keys, key, keylen) ?:
895 	       aead_hmac_init(cipher, &authenc_keys);
896 }
897 
otx2_cpt_aead_ecb_null_sha_setkey(struct crypto_aead * cipher,const unsigned char * key,unsigned int keylen)898 static int otx2_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher,
899 					     const unsigned char *key,
900 					     unsigned int keylen)
901 {
902 	return otx2_cpt_aead_cbc_aes_sha_setkey(cipher, key, keylen);
903 }
904 
otx2_cpt_aead_gcm_aes_setkey(struct crypto_aead * cipher,const unsigned char * key,unsigned int keylen)905 static int otx2_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher,
906 					const unsigned char *key,
907 					unsigned int keylen)
908 {
909 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
910 
911 	/*
912 	 * For aes gcm we expect to get encryption key (16, 24, 32 bytes)
913 	 * and salt (4 bytes)
914 	 */
915 	switch (keylen) {
916 	case AES_KEYSIZE_128 + AES_GCM_SALT_SIZE:
917 		ctx->key_type = OTX2_CPT_AES_128_BIT;
918 		ctx->enc_key_len = AES_KEYSIZE_128;
919 		break;
920 	case AES_KEYSIZE_192 + AES_GCM_SALT_SIZE:
921 		ctx->key_type = OTX2_CPT_AES_192_BIT;
922 		ctx->enc_key_len = AES_KEYSIZE_192;
923 		break;
924 	case AES_KEYSIZE_256 + AES_GCM_SALT_SIZE:
925 		ctx->key_type = OTX2_CPT_AES_256_BIT;
926 		ctx->enc_key_len = AES_KEYSIZE_256;
927 		break;
928 	default:
929 		/* Invalid key and salt length */
930 		return -EINVAL;
931 	}
932 
933 	/* Store encryption key and salt */
934 	memcpy(ctx->key, key, keylen);
935 
936 	return crypto_aead_setkey(ctx->fbk_cipher, key, keylen);
937 }
938 
create_aead_ctx_hdr(struct aead_request * req,u32 enc,u32 * argcnt)939 static inline int create_aead_ctx_hdr(struct aead_request *req, u32 enc,
940 				      u32 *argcnt)
941 {
942 	struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
943 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
944 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
945 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
946 	struct otx2_cpt_fc_ctx *fctx = &rctx->fctx;
947 	int mac_len = crypto_aead_authsize(tfm);
948 	int ds;
949 
950 	rctx->ctrl_word.e.enc_data_offset = req->assoclen;
951 
952 	switch (ctx->cipher_type) {
953 	case OTX2_CPT_AES_CBC:
954 		if (req->assoclen > 248 || !IS_ALIGNED(req->assoclen, 8))
955 			return -EINVAL;
956 
957 		fctx->enc.enc_ctrl.e.iv_source = OTX2_CPT_FROM_CPTR;
958 		/* Copy encryption key to context */
959 		memcpy(fctx->enc.encr_key, ctx->key + ctx->auth_key_len,
960 		       ctx->enc_key_len);
961 		/* Copy IV to context */
962 		memcpy(fctx->enc.encr_iv, req->iv, crypto_aead_ivsize(tfm));
963 
964 		ds = crypto_shash_digestsize(ctx->hashalg);
965 		if (ctx->mac_type == OTX2_CPT_SHA384)
966 			ds = SHA512_DIGEST_SIZE;
967 		if (ctx->ipad)
968 			memcpy(fctx->hmac.e.ipad, ctx->ipad, ds);
969 		if (ctx->opad)
970 			memcpy(fctx->hmac.e.opad, ctx->opad, ds);
971 		break;
972 
973 	case OTX2_CPT_AES_GCM:
974 		if (crypto_ipsec_check_assoclen(req->assoclen))
975 			return -EINVAL;
976 
977 		fctx->enc.enc_ctrl.e.iv_source = OTX2_CPT_FROM_DPTR;
978 		/* Copy encryption key to context */
979 		memcpy(fctx->enc.encr_key, ctx->key, ctx->enc_key_len);
980 		/* Copy salt to context */
981 		memcpy(fctx->enc.encr_iv, ctx->key + ctx->enc_key_len,
982 		       AES_GCM_SALT_SIZE);
983 
984 		rctx->ctrl_word.e.iv_offset = req->assoclen - AES_GCM_IV_OFFSET;
985 		break;
986 
987 	default:
988 		/* Unknown cipher type */
989 		return -EINVAL;
990 	}
991 	cpu_to_be64s(&rctx->ctrl_word.flags);
992 
993 	req_info->ctrl.s.dma_mode = OTX2_CPT_DMA_MODE_SG;
994 	req_info->ctrl.s.se_req = 1;
995 	req_info->req.opcode.s.major = OTX2_CPT_MAJOR_OP_FC |
996 				 DMA_MODE_FLAG(OTX2_CPT_DMA_MODE_SG);
997 	if (enc) {
998 		req_info->req.opcode.s.minor = 2;
999 		req_info->req.param1 = req->cryptlen;
1000 		req_info->req.param2 = req->cryptlen + req->assoclen;
1001 	} else {
1002 		req_info->req.opcode.s.minor = 3;
1003 		req_info->req.param1 = req->cryptlen - mac_len;
1004 		req_info->req.param2 = req->cryptlen + req->assoclen - mac_len;
1005 	}
1006 
1007 	fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
1008 	fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
1009 	fctx->enc.enc_ctrl.e.mac_type = ctx->mac_type;
1010 	fctx->enc.enc_ctrl.e.mac_len = mac_len;
1011 	cpu_to_be64s(&fctx->enc.enc_ctrl.u);
1012 
1013 	/*
1014 	 * Storing Packet Data Information in offset
1015 	 * Control Word First 8 bytes
1016 	 */
1017 	req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
1018 	req_info->in[*argcnt].size = CONTROL_WORD_LEN;
1019 	req_info->req.dlen += CONTROL_WORD_LEN;
1020 	++(*argcnt);
1021 
1022 	req_info->in[*argcnt].vptr = (u8 *)fctx;
1023 	req_info->in[*argcnt].size = sizeof(struct otx2_cpt_fc_ctx);
1024 	req_info->req.dlen += sizeof(struct otx2_cpt_fc_ctx);
1025 	++(*argcnt);
1026 
1027 	return 0;
1028 }
1029 
create_hmac_ctx_hdr(struct aead_request * req,u32 * argcnt,u32 enc)1030 static inline void create_hmac_ctx_hdr(struct aead_request *req, u32 *argcnt,
1031 				      u32 enc)
1032 {
1033 	struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1034 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1035 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
1036 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
1037 
1038 	req_info->ctrl.s.dma_mode = OTX2_CPT_DMA_MODE_SG;
1039 	req_info->ctrl.s.se_req = 1;
1040 	req_info->req.opcode.s.major = OTX2_CPT_MAJOR_OP_HMAC |
1041 				 DMA_MODE_FLAG(OTX2_CPT_DMA_MODE_SG);
1042 	req_info->is_trunc_hmac = ctx->is_trunc_hmac;
1043 
1044 	req_info->req.opcode.s.minor = 0;
1045 	req_info->req.param1 = ctx->auth_key_len;
1046 	req_info->req.param2 = ctx->mac_type << 8;
1047 
1048 	/* Add authentication key */
1049 	req_info->in[*argcnt].vptr = ctx->key;
1050 	req_info->in[*argcnt].size = round_up(ctx->auth_key_len, 8);
1051 	req_info->req.dlen += round_up(ctx->auth_key_len, 8);
1052 	++(*argcnt);
1053 }
1054 
create_aead_input_list(struct aead_request * req,u32 enc)1055 static inline int create_aead_input_list(struct aead_request *req, u32 enc)
1056 {
1057 	struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1058 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
1059 	u32 inputlen =  req->cryptlen + req->assoclen;
1060 	u32 status, argcnt = 0;
1061 
1062 	status = create_aead_ctx_hdr(req, enc, &argcnt);
1063 	if (status)
1064 		return status;
1065 	update_input_data(req_info, req->src, inputlen, &argcnt);
1066 	req_info->in_cnt = argcnt;
1067 
1068 	return 0;
1069 }
1070 
create_aead_output_list(struct aead_request * req,u32 enc,u32 mac_len)1071 static inline void create_aead_output_list(struct aead_request *req, u32 enc,
1072 					   u32 mac_len)
1073 {
1074 	struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1075 	struct otx2_cpt_req_info *req_info =  &rctx->cpt_req;
1076 	u32 argcnt = 0, outputlen = 0;
1077 
1078 	if (enc)
1079 		outputlen = req->cryptlen +  req->assoclen + mac_len;
1080 	else
1081 		outputlen = req->cryptlen + req->assoclen - mac_len;
1082 
1083 	update_output_data(req_info, req->dst, 0, outputlen, &argcnt);
1084 	req_info->out_cnt = argcnt;
1085 }
1086 
create_aead_null_input_list(struct aead_request * req,u32 enc,u32 mac_len)1087 static inline void create_aead_null_input_list(struct aead_request *req,
1088 					       u32 enc, u32 mac_len)
1089 {
1090 	struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1091 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
1092 	u32 inputlen, argcnt = 0;
1093 
1094 	if (enc)
1095 		inputlen =  req->cryptlen + req->assoclen;
1096 	else
1097 		inputlen =  req->cryptlen + req->assoclen - mac_len;
1098 
1099 	create_hmac_ctx_hdr(req, &argcnt, enc);
1100 	update_input_data(req_info, req->src, inputlen, &argcnt);
1101 	req_info->in_cnt = argcnt;
1102 }
1103 
create_aead_null_output_list(struct aead_request * req,u32 enc,u32 mac_len)1104 static inline int create_aead_null_output_list(struct aead_request *req,
1105 					       u32 enc, u32 mac_len)
1106 {
1107 	struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1108 	struct otx2_cpt_req_info *req_info =  &rctx->cpt_req;
1109 	struct scatterlist *dst;
1110 	u8 *ptr = NULL;
1111 	int argcnt = 0, status, offset;
1112 	u32 inputlen;
1113 
1114 	if (enc)
1115 		inputlen =  req->cryptlen + req->assoclen;
1116 	else
1117 		inputlen =  req->cryptlen + req->assoclen - mac_len;
1118 
1119 	/*
1120 	 * If source and destination are different
1121 	 * then copy payload to destination
1122 	 */
1123 	if (req->src != req->dst) {
1124 
1125 		ptr = kmalloc(inputlen, (req_info->areq->flags &
1126 					 CRYPTO_TFM_REQ_MAY_SLEEP) ?
1127 					 GFP_KERNEL : GFP_ATOMIC);
1128 		if (!ptr)
1129 			return -ENOMEM;
1130 
1131 		status = sg_copy_to_buffer(req->src, sg_nents(req->src), ptr,
1132 					   inputlen);
1133 		if (status != inputlen) {
1134 			status = -EINVAL;
1135 			goto error_free;
1136 		}
1137 		status = sg_copy_from_buffer(req->dst, sg_nents(req->dst), ptr,
1138 					     inputlen);
1139 		if (status != inputlen) {
1140 			status = -EINVAL;
1141 			goto error_free;
1142 		}
1143 		kfree(ptr);
1144 	}
1145 
1146 	if (enc) {
1147 		/*
1148 		 * In an encryption scenario hmac needs
1149 		 * to be appended after payload
1150 		 */
1151 		dst = req->dst;
1152 		offset = inputlen;
1153 		while (offset >= dst->length) {
1154 			offset -= dst->length;
1155 			dst = sg_next(dst);
1156 			if (!dst)
1157 				return -ENOENT;
1158 		}
1159 
1160 		update_output_data(req_info, dst, offset, mac_len, &argcnt);
1161 	} else {
1162 		/*
1163 		 * In a decryption scenario calculated hmac for received
1164 		 * payload needs to be compare with hmac received
1165 		 */
1166 		status = sg_copy_buffer(req->src, sg_nents(req->src),
1167 					rctx->fctx.hmac.s.hmac_recv, mac_len,
1168 					inputlen, true);
1169 		if (status != mac_len)
1170 			return -EINVAL;
1171 
1172 		req_info->out[argcnt].vptr = rctx->fctx.hmac.s.hmac_calc;
1173 		req_info->out[argcnt].size = mac_len;
1174 		argcnt++;
1175 	}
1176 
1177 	req_info->out_cnt = argcnt;
1178 	return 0;
1179 
1180 error_free:
1181 	kfree(ptr);
1182 	return status;
1183 }
1184 
aead_do_fallback(struct aead_request * req,bool is_enc)1185 static int aead_do_fallback(struct aead_request *req, bool is_enc)
1186 {
1187 	struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1188 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1189 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(aead);
1190 	int ret;
1191 
1192 	if (ctx->fbk_cipher) {
1193 		/* Store the cipher tfm and then use the fallback tfm */
1194 		aead_request_set_tfm(&rctx->fbk_req, ctx->fbk_cipher);
1195 		aead_request_set_callback(&rctx->fbk_req, req->base.flags,
1196 					  req->base.complete, req->base.data);
1197 		aead_request_set_crypt(&rctx->fbk_req, req->src,
1198 				       req->dst, req->cryptlen, req->iv);
1199 		aead_request_set_ad(&rctx->fbk_req, req->assoclen);
1200 		ret = is_enc ? crypto_aead_encrypt(&rctx->fbk_req) :
1201 			       crypto_aead_decrypt(&rctx->fbk_req);
1202 	} else {
1203 		ret = -EINVAL;
1204 	}
1205 
1206 	return ret;
1207 }
1208 
cpt_aead_enc_dec(struct aead_request * req,u8 reg_type,u8 enc)1209 static int cpt_aead_enc_dec(struct aead_request *req, u8 reg_type, u8 enc)
1210 {
1211 	struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1212 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
1213 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1214 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
1215 	struct pci_dev *pdev;
1216 	int status, cpu_num;
1217 
1218 	/* Clear control words */
1219 	rctx->ctrl_word.flags = 0;
1220 	rctx->fctx.enc.enc_ctrl.u = 0;
1221 
1222 	req_info->callback = otx2_cpt_aead_callback;
1223 	req_info->areq = &req->base;
1224 	req_info->req_type = reg_type;
1225 	req_info->is_enc = enc;
1226 	req_info->is_trunc_hmac = false;
1227 
1228 	switch (reg_type) {
1229 	case OTX2_CPT_AEAD_ENC_DEC_REQ:
1230 		status = create_aead_input_list(req, enc);
1231 		if (status)
1232 			return status;
1233 		create_aead_output_list(req, enc, crypto_aead_authsize(tfm));
1234 		break;
1235 
1236 	case OTX2_CPT_AEAD_ENC_DEC_NULL_REQ:
1237 		create_aead_null_input_list(req, enc,
1238 					    crypto_aead_authsize(tfm));
1239 		status = create_aead_null_output_list(req, enc,
1240 						crypto_aead_authsize(tfm));
1241 		if (status)
1242 			return status;
1243 		break;
1244 
1245 	default:
1246 		return -EINVAL;
1247 	}
1248 	if (!IS_ALIGNED(req_info->req.param1, ctx->enc_align_len))
1249 		return -EINVAL;
1250 
1251 	if (!req_info->req.param2 ||
1252 	    (req_info->req.param1 > OTX2_CPT_MAX_REQ_SIZE) ||
1253 	    (req_info->req.param2 > OTX2_CPT_MAX_REQ_SIZE))
1254 		return aead_do_fallback(req, enc);
1255 
1256 	status = get_se_device(&pdev, &cpu_num);
1257 	if (status)
1258 		return status;
1259 
1260 	req_info->ctrl.s.grp = otx2_cpt_get_kcrypto_eng_grp_num(pdev);
1261 
1262 	/*
1263 	 * We perform an asynchronous send and once
1264 	 * the request is completed the driver would
1265 	 * intimate through registered call back functions
1266 	 */
1267 	return otx2_cpt_do_request(pdev, req_info, cpu_num);
1268 }
1269 
otx2_cpt_aead_encrypt(struct aead_request * req)1270 static int otx2_cpt_aead_encrypt(struct aead_request *req)
1271 {
1272 	return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_REQ, true);
1273 }
1274 
otx2_cpt_aead_decrypt(struct aead_request * req)1275 static int otx2_cpt_aead_decrypt(struct aead_request *req)
1276 {
1277 	return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_REQ, false);
1278 }
1279 
otx2_cpt_aead_null_encrypt(struct aead_request * req)1280 static int otx2_cpt_aead_null_encrypt(struct aead_request *req)
1281 {
1282 	return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_NULL_REQ, true);
1283 }
1284 
otx2_cpt_aead_null_decrypt(struct aead_request * req)1285 static int otx2_cpt_aead_null_decrypt(struct aead_request *req)
1286 {
1287 	return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_NULL_REQ, false);
1288 }
1289 
1290 static struct skcipher_alg otx2_cpt_skciphers[] = { {
1291 	.base.cra_name = "xts(aes)",
1292 	.base.cra_driver_name = "cpt_xts_aes",
1293 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1294 	.base.cra_blocksize = AES_BLOCK_SIZE,
1295 	.base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
1296 	.base.cra_alignmask = 7,
1297 	.base.cra_priority = 4001,
1298 	.base.cra_module = THIS_MODULE,
1299 
1300 	.init = otx2_cpt_enc_dec_init,
1301 	.exit = otx2_cpt_skcipher_exit,
1302 	.ivsize = AES_BLOCK_SIZE,
1303 	.min_keysize = 2 * AES_MIN_KEY_SIZE,
1304 	.max_keysize = 2 * AES_MAX_KEY_SIZE,
1305 	.setkey = otx2_cpt_skcipher_xts_setkey,
1306 	.encrypt = otx2_cpt_skcipher_encrypt,
1307 	.decrypt = otx2_cpt_skcipher_decrypt,
1308 }, {
1309 	.base.cra_name = "cbc(aes)",
1310 	.base.cra_driver_name = "cpt_cbc_aes",
1311 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1312 	.base.cra_blocksize = AES_BLOCK_SIZE,
1313 	.base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
1314 	.base.cra_alignmask = 7,
1315 	.base.cra_priority = 4001,
1316 	.base.cra_module = THIS_MODULE,
1317 
1318 	.init = otx2_cpt_enc_dec_init,
1319 	.exit = otx2_cpt_skcipher_exit,
1320 	.ivsize = AES_BLOCK_SIZE,
1321 	.min_keysize = AES_MIN_KEY_SIZE,
1322 	.max_keysize = AES_MAX_KEY_SIZE,
1323 	.setkey = otx2_cpt_skcipher_cbc_aes_setkey,
1324 	.encrypt = otx2_cpt_skcipher_encrypt,
1325 	.decrypt = otx2_cpt_skcipher_decrypt,
1326 }, {
1327 	.base.cra_name = "ecb(aes)",
1328 	.base.cra_driver_name = "cpt_ecb_aes",
1329 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1330 	.base.cra_blocksize = AES_BLOCK_SIZE,
1331 	.base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
1332 	.base.cra_alignmask = 7,
1333 	.base.cra_priority = 4001,
1334 	.base.cra_module = THIS_MODULE,
1335 
1336 	.init = otx2_cpt_enc_dec_init,
1337 	.exit = otx2_cpt_skcipher_exit,
1338 	.ivsize = 0,
1339 	.min_keysize = AES_MIN_KEY_SIZE,
1340 	.max_keysize = AES_MAX_KEY_SIZE,
1341 	.setkey = otx2_cpt_skcipher_ecb_aes_setkey,
1342 	.encrypt = otx2_cpt_skcipher_encrypt,
1343 	.decrypt = otx2_cpt_skcipher_decrypt,
1344 }, {
1345 	.base.cra_name = "cbc(des3_ede)",
1346 	.base.cra_driver_name = "cpt_cbc_des3_ede",
1347 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1348 	.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1349 	.base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
1350 	.base.cra_alignmask = 7,
1351 	.base.cra_priority = 4001,
1352 	.base.cra_module = THIS_MODULE,
1353 
1354 	.init = otx2_cpt_enc_dec_init,
1355 	.exit = otx2_cpt_skcipher_exit,
1356 	.min_keysize = DES3_EDE_KEY_SIZE,
1357 	.max_keysize = DES3_EDE_KEY_SIZE,
1358 	.ivsize = DES_BLOCK_SIZE,
1359 	.setkey = otx2_cpt_skcipher_cbc_des3_setkey,
1360 	.encrypt = otx2_cpt_skcipher_encrypt,
1361 	.decrypt = otx2_cpt_skcipher_decrypt,
1362 }, {
1363 	.base.cra_name = "ecb(des3_ede)",
1364 	.base.cra_driver_name = "cpt_ecb_des3_ede",
1365 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1366 	.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1367 	.base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
1368 	.base.cra_alignmask = 7,
1369 	.base.cra_priority = 4001,
1370 	.base.cra_module = THIS_MODULE,
1371 
1372 	.init = otx2_cpt_enc_dec_init,
1373 	.exit = otx2_cpt_skcipher_exit,
1374 	.min_keysize = DES3_EDE_KEY_SIZE,
1375 	.max_keysize = DES3_EDE_KEY_SIZE,
1376 	.ivsize = 0,
1377 	.setkey = otx2_cpt_skcipher_ecb_des3_setkey,
1378 	.encrypt = otx2_cpt_skcipher_encrypt,
1379 	.decrypt = otx2_cpt_skcipher_decrypt,
1380 } };
1381 
1382 static struct aead_alg otx2_cpt_aeads[] = { {
1383 	.base = {
1384 		.cra_name = "authenc(hmac(sha1),cbc(aes))",
1385 		.cra_driver_name = "cpt_hmac_sha1_cbc_aes",
1386 		.cra_blocksize = AES_BLOCK_SIZE,
1387 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1388 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1389 		.cra_priority = 4001,
1390 		.cra_alignmask = 0,
1391 		.cra_module = THIS_MODULE,
1392 	},
1393 	.init = otx2_cpt_aead_cbc_aes_sha1_init,
1394 	.exit = otx2_cpt_aead_exit,
1395 	.setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
1396 	.setauthsize = otx2_cpt_aead_set_authsize,
1397 	.encrypt = otx2_cpt_aead_encrypt,
1398 	.decrypt = otx2_cpt_aead_decrypt,
1399 	.ivsize = AES_BLOCK_SIZE,
1400 	.maxauthsize = SHA1_DIGEST_SIZE,
1401 }, {
1402 	.base = {
1403 		.cra_name = "authenc(hmac(sha256),cbc(aes))",
1404 		.cra_driver_name = "cpt_hmac_sha256_cbc_aes",
1405 		.cra_blocksize = AES_BLOCK_SIZE,
1406 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1407 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1408 		.cra_priority = 4001,
1409 		.cra_alignmask = 0,
1410 		.cra_module = THIS_MODULE,
1411 	},
1412 	.init = otx2_cpt_aead_cbc_aes_sha256_init,
1413 	.exit = otx2_cpt_aead_exit,
1414 	.setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
1415 	.setauthsize = otx2_cpt_aead_set_authsize,
1416 	.encrypt = otx2_cpt_aead_encrypt,
1417 	.decrypt = otx2_cpt_aead_decrypt,
1418 	.ivsize = AES_BLOCK_SIZE,
1419 	.maxauthsize = SHA256_DIGEST_SIZE,
1420 }, {
1421 	.base = {
1422 		.cra_name = "authenc(hmac(sha384),cbc(aes))",
1423 		.cra_driver_name = "cpt_hmac_sha384_cbc_aes",
1424 		.cra_blocksize = AES_BLOCK_SIZE,
1425 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1426 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1427 		.cra_priority = 4001,
1428 		.cra_alignmask = 0,
1429 		.cra_module = THIS_MODULE,
1430 	},
1431 	.init = otx2_cpt_aead_cbc_aes_sha384_init,
1432 	.exit = otx2_cpt_aead_exit,
1433 	.setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
1434 	.setauthsize = otx2_cpt_aead_set_authsize,
1435 	.encrypt = otx2_cpt_aead_encrypt,
1436 	.decrypt = otx2_cpt_aead_decrypt,
1437 	.ivsize = AES_BLOCK_SIZE,
1438 	.maxauthsize = SHA384_DIGEST_SIZE,
1439 }, {
1440 	.base = {
1441 		.cra_name = "authenc(hmac(sha512),cbc(aes))",
1442 		.cra_driver_name = "cpt_hmac_sha512_cbc_aes",
1443 		.cra_blocksize = AES_BLOCK_SIZE,
1444 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1445 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1446 		.cra_priority = 4001,
1447 		.cra_alignmask = 0,
1448 		.cra_module = THIS_MODULE,
1449 	},
1450 	.init = otx2_cpt_aead_cbc_aes_sha512_init,
1451 	.exit = otx2_cpt_aead_exit,
1452 	.setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
1453 	.setauthsize = otx2_cpt_aead_set_authsize,
1454 	.encrypt = otx2_cpt_aead_encrypt,
1455 	.decrypt = otx2_cpt_aead_decrypt,
1456 	.ivsize = AES_BLOCK_SIZE,
1457 	.maxauthsize = SHA512_DIGEST_SIZE,
1458 }, {
1459 	.base = {
1460 		.cra_name = "authenc(hmac(sha1),ecb(cipher_null))",
1461 		.cra_driver_name = "cpt_hmac_sha1_ecb_null",
1462 		.cra_blocksize = 1,
1463 		.cra_flags = CRYPTO_ALG_ASYNC,
1464 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1465 		.cra_priority = 4001,
1466 		.cra_alignmask = 0,
1467 		.cra_module = THIS_MODULE,
1468 	},
1469 	.init = otx2_cpt_aead_ecb_null_sha1_init,
1470 	.exit = otx2_cpt_aead_exit,
1471 	.setkey = otx2_cpt_aead_ecb_null_sha_setkey,
1472 	.setauthsize = otx2_cpt_aead_null_set_authsize,
1473 	.encrypt = otx2_cpt_aead_null_encrypt,
1474 	.decrypt = otx2_cpt_aead_null_decrypt,
1475 	.ivsize = 0,
1476 	.maxauthsize = SHA1_DIGEST_SIZE,
1477 }, {
1478 	.base = {
1479 		.cra_name = "authenc(hmac(sha256),ecb(cipher_null))",
1480 		.cra_driver_name = "cpt_hmac_sha256_ecb_null",
1481 		.cra_blocksize = 1,
1482 		.cra_flags = CRYPTO_ALG_ASYNC,
1483 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1484 		.cra_priority = 4001,
1485 		.cra_alignmask = 0,
1486 		.cra_module = THIS_MODULE,
1487 	},
1488 	.init = otx2_cpt_aead_ecb_null_sha256_init,
1489 	.exit = otx2_cpt_aead_exit,
1490 	.setkey = otx2_cpt_aead_ecb_null_sha_setkey,
1491 	.setauthsize = otx2_cpt_aead_null_set_authsize,
1492 	.encrypt = otx2_cpt_aead_null_encrypt,
1493 	.decrypt = otx2_cpt_aead_null_decrypt,
1494 	.ivsize = 0,
1495 	.maxauthsize = SHA256_DIGEST_SIZE,
1496 }, {
1497 	.base = {
1498 		.cra_name = "authenc(hmac(sha384),ecb(cipher_null))",
1499 		.cra_driver_name = "cpt_hmac_sha384_ecb_null",
1500 		.cra_blocksize = 1,
1501 		.cra_flags = CRYPTO_ALG_ASYNC,
1502 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1503 		.cra_priority = 4001,
1504 		.cra_alignmask = 0,
1505 		.cra_module = THIS_MODULE,
1506 	},
1507 	.init = otx2_cpt_aead_ecb_null_sha384_init,
1508 	.exit = otx2_cpt_aead_exit,
1509 	.setkey = otx2_cpt_aead_ecb_null_sha_setkey,
1510 	.setauthsize = otx2_cpt_aead_null_set_authsize,
1511 	.encrypt = otx2_cpt_aead_null_encrypt,
1512 	.decrypt = otx2_cpt_aead_null_decrypt,
1513 	.ivsize = 0,
1514 	.maxauthsize = SHA384_DIGEST_SIZE,
1515 }, {
1516 	.base = {
1517 		.cra_name = "authenc(hmac(sha512),ecb(cipher_null))",
1518 		.cra_driver_name = "cpt_hmac_sha512_ecb_null",
1519 		.cra_blocksize = 1,
1520 		.cra_flags = CRYPTO_ALG_ASYNC,
1521 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1522 		.cra_priority = 4001,
1523 		.cra_alignmask = 0,
1524 		.cra_module = THIS_MODULE,
1525 	},
1526 	.init = otx2_cpt_aead_ecb_null_sha512_init,
1527 	.exit = otx2_cpt_aead_exit,
1528 	.setkey = otx2_cpt_aead_ecb_null_sha_setkey,
1529 	.setauthsize = otx2_cpt_aead_null_set_authsize,
1530 	.encrypt = otx2_cpt_aead_null_encrypt,
1531 	.decrypt = otx2_cpt_aead_null_decrypt,
1532 	.ivsize = 0,
1533 	.maxauthsize = SHA512_DIGEST_SIZE,
1534 }, {
1535 	.base = {
1536 		.cra_name = "rfc4106(gcm(aes))",
1537 		.cra_driver_name = "cpt_rfc4106_gcm_aes",
1538 		.cra_blocksize = 1,
1539 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1540 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1541 		.cra_priority = 4001,
1542 		.cra_alignmask = 0,
1543 		.cra_module = THIS_MODULE,
1544 	},
1545 	.init = otx2_cpt_aead_gcm_aes_init,
1546 	.exit = otx2_cpt_aead_exit,
1547 	.setkey = otx2_cpt_aead_gcm_aes_setkey,
1548 	.setauthsize = otx2_cpt_aead_gcm_set_authsize,
1549 	.encrypt = otx2_cpt_aead_encrypt,
1550 	.decrypt = otx2_cpt_aead_decrypt,
1551 	.ivsize = AES_GCM_IV_SIZE,
1552 	.maxauthsize = AES_GCM_ICV_SIZE,
1553 } };
1554 
cpt_register_algs(void)1555 static inline int cpt_register_algs(void)
1556 {
1557 	int i, err = 0;
1558 
1559 	for (i = 0; i < ARRAY_SIZE(otx2_cpt_skciphers); i++)
1560 		otx2_cpt_skciphers[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
1561 
1562 	err = crypto_register_skciphers(otx2_cpt_skciphers,
1563 					ARRAY_SIZE(otx2_cpt_skciphers));
1564 	if (err)
1565 		return err;
1566 
1567 	for (i = 0; i < ARRAY_SIZE(otx2_cpt_aeads); i++)
1568 		otx2_cpt_aeads[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
1569 
1570 	err = crypto_register_aeads(otx2_cpt_aeads,
1571 				    ARRAY_SIZE(otx2_cpt_aeads));
1572 	if (err) {
1573 		crypto_unregister_skciphers(otx2_cpt_skciphers,
1574 					    ARRAY_SIZE(otx2_cpt_skciphers));
1575 		return err;
1576 	}
1577 
1578 	return 0;
1579 }
1580 
cpt_unregister_algs(void)1581 static inline void cpt_unregister_algs(void)
1582 {
1583 	crypto_unregister_skciphers(otx2_cpt_skciphers,
1584 				    ARRAY_SIZE(otx2_cpt_skciphers));
1585 	crypto_unregister_aeads(otx2_cpt_aeads, ARRAY_SIZE(otx2_cpt_aeads));
1586 }
1587 
compare_func(const void * lptr,const void * rptr)1588 static int compare_func(const void *lptr, const void *rptr)
1589 {
1590 	const struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr;
1591 	const struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr;
1592 
1593 	if (ldesc->dev->devfn < rdesc->dev->devfn)
1594 		return -1;
1595 	if (ldesc->dev->devfn > rdesc->dev->devfn)
1596 		return 1;
1597 	return 0;
1598 }
1599 
swap_func(void * lptr,void * rptr,int size)1600 static void swap_func(void *lptr, void *rptr, int size)
1601 {
1602 	struct cpt_device_desc *ldesc = lptr;
1603 	struct cpt_device_desc *rdesc = rptr;
1604 
1605 	swap(*ldesc, *rdesc);
1606 }
1607 
otx2_cpt_crypto_init(struct pci_dev * pdev,struct module * mod,int num_queues,int num_devices)1608 int otx2_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
1609 			 int num_queues, int num_devices)
1610 {
1611 	int ret = 0;
1612 	int count;
1613 
1614 	mutex_lock(&mutex);
1615 	count = atomic_read(&se_devices.count);
1616 	if (count >= OTX2_CPT_MAX_LFS_NUM) {
1617 		dev_err(&pdev->dev, "No space to add a new device\n");
1618 		ret = -ENOSPC;
1619 		goto unlock;
1620 	}
1621 	se_devices.desc[count].num_queues = num_queues;
1622 	se_devices.desc[count++].dev = pdev;
1623 	atomic_inc(&se_devices.count);
1624 
1625 	if (atomic_read(&se_devices.count) == num_devices &&
1626 	    is_crypto_registered == false) {
1627 		if (cpt_register_algs()) {
1628 			dev_err(&pdev->dev,
1629 				"Error in registering crypto algorithms\n");
1630 			ret =  -EINVAL;
1631 			goto unlock;
1632 		}
1633 		try_module_get(mod);
1634 		is_crypto_registered = true;
1635 	}
1636 	sort(se_devices.desc, count, sizeof(struct cpt_device_desc),
1637 	     compare_func, swap_func);
1638 
1639 unlock:
1640 	mutex_unlock(&mutex);
1641 	return ret;
1642 }
1643 
otx2_cpt_crypto_exit(struct pci_dev * pdev,struct module * mod)1644 void otx2_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod)
1645 {
1646 	struct cpt_device_table *dev_tbl;
1647 	bool dev_found = false;
1648 	int i, j, count;
1649 
1650 	mutex_lock(&mutex);
1651 
1652 	dev_tbl = &se_devices;
1653 	count = atomic_read(&dev_tbl->count);
1654 	for (i = 0; i < count; i++) {
1655 		if (pdev == dev_tbl->desc[i].dev) {
1656 			for (j = i; j < count-1; j++)
1657 				dev_tbl->desc[j] = dev_tbl->desc[j+1];
1658 			dev_found = true;
1659 			break;
1660 		}
1661 	}
1662 
1663 	if (!dev_found) {
1664 		dev_err(&pdev->dev, "%s device not found\n", __func__);
1665 		goto unlock;
1666 	}
1667 	if (atomic_dec_and_test(&se_devices.count)) {
1668 		cpt_unregister_algs();
1669 		module_put(mod);
1670 		is_crypto_registered = false;
1671 	}
1672 
1673 unlock:
1674 	mutex_unlock(&mutex);
1675 }
1676