1 
2 /*
3  * Copyright (C) 2016 Cavium, Inc.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of version 2 of the GNU General Public License
7  * as published by the Free Software Foundation.
8  */
9 
10 #include <crypto/aes.h>
11 #include <crypto/algapi.h>
12 #include <crypto/authenc.h>
13 #include <crypto/crypto_wq.h>
14 #include <crypto/des.h>
15 #include <crypto/xts.h>
16 #include <linux/crypto.h>
17 #include <linux/err.h>
18 #include <linux/list.h>
19 #include <linux/scatterlist.h>
20 
21 #include "cptvf.h"
22 #include "cptvf_algs.h"
23 
24 struct cpt_device_handle {
25 	void *cdev[MAX_DEVICES];
26 	u32 dev_count;
27 };
28 
29 static struct cpt_device_handle dev_handle;
30 
31 static void cvm_callback(u32 status, void *arg)
32 {
33 	struct crypto_async_request *req = (struct crypto_async_request *)arg;
34 
35 	req->complete(req, !status);
36 }
37 
38 static inline void update_input_iv(struct cpt_request_info *req_info,
39 				   u8 *iv, u32 enc_iv_len,
40 				   u32 *argcnt)
41 {
42 	/* Setting the iv information */
43 	req_info->in[*argcnt].vptr = (void *)iv;
44 	req_info->in[*argcnt].size = enc_iv_len;
45 	req_info->req.dlen += enc_iv_len;
46 
47 	++(*argcnt);
48 }
49 
50 static inline void update_output_iv(struct cpt_request_info *req_info,
51 				    u8 *iv, u32 enc_iv_len,
52 				    u32 *argcnt)
53 {
54 	/* Setting the iv information */
55 	req_info->out[*argcnt].vptr = (void *)iv;
56 	req_info->out[*argcnt].size = enc_iv_len;
57 	req_info->rlen += enc_iv_len;
58 
59 	++(*argcnt);
60 }
61 
62 static inline void update_input_data(struct cpt_request_info *req_info,
63 				     struct scatterlist *inp_sg,
64 				     u32 nbytes, u32 *argcnt)
65 {
66 	req_info->req.dlen += nbytes;
67 
68 	while (nbytes) {
69 		u32 len = min(nbytes, inp_sg->length);
70 		u8 *ptr = sg_virt(inp_sg);
71 
72 		req_info->in[*argcnt].vptr = (void *)ptr;
73 		req_info->in[*argcnt].size = len;
74 		nbytes -= len;
75 
76 		++(*argcnt);
77 		++inp_sg;
78 	}
79 }
80 
81 static inline void update_output_data(struct cpt_request_info *req_info,
82 				      struct scatterlist *outp_sg,
83 				      u32 nbytes, u32 *argcnt)
84 {
85 	req_info->rlen += nbytes;
86 
87 	while (nbytes) {
88 		u32 len = min(nbytes, outp_sg->length);
89 		u8 *ptr = sg_virt(outp_sg);
90 
91 		req_info->out[*argcnt].vptr = (void *)ptr;
92 		req_info->out[*argcnt].size = len;
93 		nbytes -= len;
94 		++(*argcnt);
95 		++outp_sg;
96 	}
97 }
98 
99 static inline u32 create_ctx_hdr(struct ablkcipher_request *req, u32 enc,
100 				 u32 *argcnt)
101 {
102 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
103 	struct cvm_enc_ctx *ctx = crypto_ablkcipher_ctx(tfm);
104 	struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req);
105 	struct fc_context *fctx = &rctx->fctx;
106 	u64 *offset_control = &rctx->control_word;
107 	u32 enc_iv_len = crypto_ablkcipher_ivsize(tfm);
108 	struct cpt_request_info *req_info = &rctx->cpt_req;
109 	u64 *ctrl_flags = NULL;
110 
111 	req_info->ctrl.s.grp = 0;
112 	req_info->ctrl.s.dma_mode = DMA_GATHER_SCATTER;
113 	req_info->ctrl.s.se_req = SE_CORE_REQ;
114 
115 	req_info->req.opcode.s.major = MAJOR_OP_FC |
116 					DMA_MODE_FLAG(DMA_GATHER_SCATTER);
117 	if (enc)
118 		req_info->req.opcode.s.minor = 2;
119 	else
120 		req_info->req.opcode.s.minor = 3;
121 
122 	req_info->req.param1 = req->nbytes; /* Encryption Data length */
123 	req_info->req.param2 = 0; /*Auth data length */
124 
125 	fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
126 	fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
127 	fctx->enc.enc_ctrl.e.iv_source = FROM_DPTR;
128 
129 	if (ctx->cipher_type == AES_XTS)
130 		memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2);
131 	else
132 		memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len);
133 	ctrl_flags = (u64 *)&fctx->enc.enc_ctrl.flags;
134 	*ctrl_flags = cpu_to_be64(*ctrl_flags);
135 
136 	*offset_control = cpu_to_be64(((u64)(enc_iv_len) << 16));
137 	/* Storing  Packet Data Information in offset
138 	 * Control Word First 8 bytes
139 	 */
140 	req_info->in[*argcnt].vptr = (u8 *)offset_control;
141 	req_info->in[*argcnt].size = CONTROL_WORD_LEN;
142 	req_info->req.dlen += CONTROL_WORD_LEN;
143 	++(*argcnt);
144 
145 	req_info->in[*argcnt].vptr = (u8 *)fctx;
146 	req_info->in[*argcnt].size = sizeof(struct fc_context);
147 	req_info->req.dlen += sizeof(struct fc_context);
148 
149 	++(*argcnt);
150 
151 	return 0;
152 }
153 
154 static inline u32 create_input_list(struct ablkcipher_request  *req, u32 enc,
155 				    u32 enc_iv_len)
156 {
157 	struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req);
158 	struct cpt_request_info *req_info = &rctx->cpt_req;
159 	u32 argcnt =  0;
160 
161 	create_ctx_hdr(req, enc, &argcnt);
162 	update_input_iv(req_info, req->info, enc_iv_len, &argcnt);
163 	update_input_data(req_info, req->src, req->nbytes, &argcnt);
164 	req_info->incnt = argcnt;
165 
166 	return 0;
167 }
168 
169 static inline void store_cb_info(struct ablkcipher_request *req,
170 				 struct cpt_request_info *req_info)
171 {
172 	req_info->callback = (void *)cvm_callback;
173 	req_info->callback_arg = (void *)&req->base;
174 }
175 
176 static inline void create_output_list(struct ablkcipher_request *req,
177 				      u32 enc_iv_len)
178 {
179 	struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req);
180 	struct cpt_request_info *req_info = &rctx->cpt_req;
181 	u32 argcnt = 0;
182 
183 	/* OUTPUT Buffer Processing
184 	 * AES encryption/decryption output would be
185 	 * received in the following format
186 	 *
187 	 * ------IV--------|------ENCRYPTED/DECRYPTED DATA-----|
188 	 * [ 16 Bytes/     [   Request Enc/Dec/ DATA Len AES CBC ]
189 	 */
190 	/* Reading IV information */
191 	update_output_iv(req_info, req->info, enc_iv_len, &argcnt);
192 	update_output_data(req_info, req->dst, req->nbytes, &argcnt);
193 	req_info->outcnt = argcnt;
194 }
195 
196 static inline int cvm_enc_dec(struct ablkcipher_request *req, u32 enc)
197 {
198 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
199 	struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req);
200 	u32 enc_iv_len = crypto_ablkcipher_ivsize(tfm);
201 	struct fc_context *fctx = &rctx->fctx;
202 	struct cpt_request_info *req_info = &rctx->cpt_req;
203 	void *cdev = NULL;
204 	int status;
205 
206 	memset(req_info, 0, sizeof(struct cpt_request_info));
207 	memset(fctx, 0, sizeof(struct fc_context));
208 	create_input_list(req, enc, enc_iv_len);
209 	create_output_list(req, enc_iv_len);
210 	store_cb_info(req, req_info);
211 	cdev = dev_handle.cdev[smp_processor_id()];
212 	status = cptvf_do_request(cdev, req_info);
213 	/* We perform an asynchronous send and once
214 	 * the request is completed the driver would
215 	 * intimate through  registered call back functions
216 	 */
217 
218 	if (status)
219 		return status;
220 	else
221 		return -EINPROGRESS;
222 }
223 
224 static int cvm_encrypt(struct ablkcipher_request *req)
225 {
226 	return cvm_enc_dec(req, true);
227 }
228 
229 static int cvm_decrypt(struct ablkcipher_request *req)
230 {
231 	return cvm_enc_dec(req, false);
232 }
233 
234 static int cvm_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
235 		   u32 keylen)
236 {
237 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
238 	struct cvm_enc_ctx *ctx = crypto_tfm_ctx(tfm);
239 	int err;
240 	const u8 *key1 = key;
241 	const u8 *key2 = key + (keylen / 2);
242 
243 	err = xts_check_key(tfm, key, keylen);
244 	if (err)
245 		return err;
246 	ctx->key_len = keylen;
247 	memcpy(ctx->enc_key, key1, keylen / 2);
248 	memcpy(ctx->enc_key + KEY2_OFFSET, key2, keylen / 2);
249 	ctx->cipher_type = AES_XTS;
250 	switch (ctx->key_len) {
251 	case 32:
252 		ctx->key_type = AES_128_BIT;
253 		break;
254 	case 64:
255 		ctx->key_type = AES_256_BIT;
256 		break;
257 	default:
258 		return -EINVAL;
259 	}
260 
261 	return 0;
262 }
263 
264 static int cvm_validate_keylen(struct cvm_enc_ctx *ctx, u32 keylen)
265 {
266 	if ((keylen == 16) || (keylen == 24) || (keylen == 32)) {
267 		ctx->key_len = keylen;
268 		switch (ctx->key_len) {
269 		case 16:
270 			ctx->key_type = AES_128_BIT;
271 			break;
272 		case 24:
273 			ctx->key_type = AES_192_BIT;
274 			break;
275 		case 32:
276 			ctx->key_type = AES_256_BIT;
277 			break;
278 		default:
279 			return -EINVAL;
280 		}
281 
282 		if (ctx->cipher_type == DES3_CBC)
283 			ctx->key_type = 0;
284 
285 		return 0;
286 	}
287 
288 	return -EINVAL;
289 }
290 
291 static int cvm_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
292 		      u32 keylen, u8 cipher_type)
293 {
294 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
295 	struct cvm_enc_ctx *ctx = crypto_tfm_ctx(tfm);
296 
297 	ctx->cipher_type = cipher_type;
298 	if (!cvm_validate_keylen(ctx, keylen)) {
299 		memcpy(ctx->enc_key, key, keylen);
300 		return 0;
301 	} else {
302 		crypto_ablkcipher_set_flags(cipher,
303 					    CRYPTO_TFM_RES_BAD_KEY_LEN);
304 		return -EINVAL;
305 	}
306 }
307 
308 static int cvm_cbc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
309 			      u32 keylen)
310 {
311 	return cvm_setkey(cipher, key, keylen, AES_CBC);
312 }
313 
314 static int cvm_ecb_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
315 			      u32 keylen)
316 {
317 	return cvm_setkey(cipher, key, keylen, AES_ECB);
318 }
319 
320 static int cvm_cfb_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
321 			      u32 keylen)
322 {
323 	return cvm_setkey(cipher, key, keylen, AES_CFB);
324 }
325 
326 static int cvm_cbc_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
327 			       u32 keylen)
328 {
329 	u32 flags = crypto_ablkcipher_get_flags(cipher);
330 	int err;
331 
332 	err = __des3_verify_key(&flags, key);
333 	if (unlikely(err)) {
334 		crypto_ablkcipher_set_flags(cipher, flags);
335 		return err;
336 	}
337 
338 	return cvm_setkey(cipher, key, keylen, DES3_CBC);
339 }
340 
341 static int cvm_ecb_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
342 			       u32 keylen)
343 {
344 	u32 flags = crypto_ablkcipher_get_flags(cipher);
345 	int err;
346 
347 	err = __des3_verify_key(&flags, key);
348 	if (unlikely(err)) {
349 		crypto_ablkcipher_set_flags(cipher, flags);
350 		return err;
351 	}
352 
353 	return cvm_setkey(cipher, key, keylen, DES3_ECB);
354 }
355 
356 static int cvm_enc_dec_init(struct crypto_tfm *tfm)
357 {
358 	tfm->crt_ablkcipher.reqsize = sizeof(struct cvm_req_ctx);
359 	return 0;
360 }
361 
362 static struct crypto_alg algs[] = { {
363 	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
364 	.cra_blocksize = AES_BLOCK_SIZE,
365 	.cra_ctxsize = sizeof(struct cvm_enc_ctx),
366 	.cra_alignmask = 7,
367 	.cra_priority = 4001,
368 	.cra_name = "xts(aes)",
369 	.cra_driver_name = "cavium-xts-aes",
370 	.cra_type = &crypto_ablkcipher_type,
371 	.cra_u = {
372 		.ablkcipher = {
373 			.ivsize = AES_BLOCK_SIZE,
374 			.min_keysize = 2 * AES_MIN_KEY_SIZE,
375 			.max_keysize = 2 * AES_MAX_KEY_SIZE,
376 			.setkey = cvm_xts_setkey,
377 			.encrypt = cvm_encrypt,
378 			.decrypt = cvm_decrypt,
379 		},
380 	},
381 	.cra_init = cvm_enc_dec_init,
382 	.cra_module = THIS_MODULE,
383 }, {
384 	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
385 	.cra_blocksize = AES_BLOCK_SIZE,
386 	.cra_ctxsize = sizeof(struct cvm_enc_ctx),
387 	.cra_alignmask = 7,
388 	.cra_priority = 4001,
389 	.cra_name = "cbc(aes)",
390 	.cra_driver_name = "cavium-cbc-aes",
391 	.cra_type = &crypto_ablkcipher_type,
392 	.cra_u = {
393 		.ablkcipher = {
394 			.ivsize = AES_BLOCK_SIZE,
395 			.min_keysize = AES_MIN_KEY_SIZE,
396 			.max_keysize = AES_MAX_KEY_SIZE,
397 			.setkey = cvm_cbc_aes_setkey,
398 			.encrypt = cvm_encrypt,
399 			.decrypt = cvm_decrypt,
400 		},
401 	},
402 	.cra_init = cvm_enc_dec_init,
403 	.cra_module = THIS_MODULE,
404 }, {
405 	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
406 	.cra_blocksize = AES_BLOCK_SIZE,
407 	.cra_ctxsize = sizeof(struct cvm_enc_ctx),
408 	.cra_alignmask = 7,
409 	.cra_priority = 4001,
410 	.cra_name = "ecb(aes)",
411 	.cra_driver_name = "cavium-ecb-aes",
412 	.cra_type = &crypto_ablkcipher_type,
413 	.cra_u = {
414 		.ablkcipher = {
415 			.ivsize = AES_BLOCK_SIZE,
416 			.min_keysize = AES_MIN_KEY_SIZE,
417 			.max_keysize = AES_MAX_KEY_SIZE,
418 			.setkey = cvm_ecb_aes_setkey,
419 			.encrypt = cvm_encrypt,
420 			.decrypt = cvm_decrypt,
421 		},
422 	},
423 	.cra_init = cvm_enc_dec_init,
424 	.cra_module = THIS_MODULE,
425 }, {
426 	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
427 	.cra_blocksize = AES_BLOCK_SIZE,
428 	.cra_ctxsize = sizeof(struct cvm_enc_ctx),
429 	.cra_alignmask = 7,
430 	.cra_priority = 4001,
431 	.cra_name = "cfb(aes)",
432 	.cra_driver_name = "cavium-cfb-aes",
433 	.cra_type = &crypto_ablkcipher_type,
434 	.cra_u = {
435 		.ablkcipher = {
436 			.ivsize = AES_BLOCK_SIZE,
437 			.min_keysize = AES_MIN_KEY_SIZE,
438 			.max_keysize = AES_MAX_KEY_SIZE,
439 			.setkey = cvm_cfb_aes_setkey,
440 			.encrypt = cvm_encrypt,
441 			.decrypt = cvm_decrypt,
442 		},
443 	},
444 	.cra_init = cvm_enc_dec_init,
445 	.cra_module = THIS_MODULE,
446 }, {
447 	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
448 	.cra_blocksize = DES3_EDE_BLOCK_SIZE,
449 	.cra_ctxsize = sizeof(struct cvm_des3_ctx),
450 	.cra_alignmask = 7,
451 	.cra_priority = 4001,
452 	.cra_name = "cbc(des3_ede)",
453 	.cra_driver_name = "cavium-cbc-des3_ede",
454 	.cra_type = &crypto_ablkcipher_type,
455 	.cra_u = {
456 		.ablkcipher = {
457 			.min_keysize = DES3_EDE_KEY_SIZE,
458 			.max_keysize = DES3_EDE_KEY_SIZE,
459 			.ivsize = DES_BLOCK_SIZE,
460 			.setkey = cvm_cbc_des3_setkey,
461 			.encrypt = cvm_encrypt,
462 			.decrypt = cvm_decrypt,
463 		},
464 	},
465 	.cra_init = cvm_enc_dec_init,
466 	.cra_module = THIS_MODULE,
467 }, {
468 	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
469 	.cra_blocksize = DES3_EDE_BLOCK_SIZE,
470 	.cra_ctxsize = sizeof(struct cvm_des3_ctx),
471 	.cra_alignmask = 7,
472 	.cra_priority = 4001,
473 	.cra_name = "ecb(des3_ede)",
474 	.cra_driver_name = "cavium-ecb-des3_ede",
475 	.cra_type = &crypto_ablkcipher_type,
476 	.cra_u = {
477 		.ablkcipher = {
478 			.min_keysize = DES3_EDE_KEY_SIZE,
479 			.max_keysize = DES3_EDE_KEY_SIZE,
480 			.ivsize = DES_BLOCK_SIZE,
481 			.setkey = cvm_ecb_des3_setkey,
482 			.encrypt = cvm_encrypt,
483 			.decrypt = cvm_decrypt,
484 		},
485 	},
486 	.cra_init = cvm_enc_dec_init,
487 	.cra_module = THIS_MODULE,
488 } };
489 
490 static inline int cav_register_algs(void)
491 {
492 	int err = 0;
493 
494 	err = crypto_register_algs(algs, ARRAY_SIZE(algs));
495 	if (err)
496 		return err;
497 
498 	return 0;
499 }
500 
501 static inline void cav_unregister_algs(void)
502 {
503 	crypto_unregister_algs(algs, ARRAY_SIZE(algs));
504 }
505 
506 int cvm_crypto_init(struct cpt_vf *cptvf)
507 {
508 	struct pci_dev *pdev = cptvf->pdev;
509 	u32 dev_count;
510 
511 	dev_count = dev_handle.dev_count;
512 	dev_handle.cdev[dev_count] = cptvf;
513 	dev_handle.dev_count++;
514 
515 	if (dev_count == 3) {
516 		if (cav_register_algs()) {
517 			dev_err(&pdev->dev, "Error in registering crypto algorithms\n");
518 			return -EINVAL;
519 		}
520 	}
521 
522 	return 0;
523 }
524 
525 void cvm_crypto_exit(void)
526 {
527 	u32 dev_count;
528 
529 	dev_count = --dev_handle.dev_count;
530 	if (!dev_count)
531 		cav_unregister_algs();
532 }
533