1 // SPDX-License-Identifier: GPL-2.0-only
2
3 /*
4 * Copyright (C) 2016 Cavium, Inc.
5 */
6
7 #include <crypto/aes.h>
8 #include <crypto/algapi.h>
9 #include <crypto/authenc.h>
10 #include <crypto/internal/des.h>
11 #include <crypto/xts.h>
12 #include <linux/crypto.h>
13 #include <linux/err.h>
14 #include <linux/list.h>
15 #include <linux/scatterlist.h>
16
17 #include "cptvf.h"
18 #include "cptvf_algs.h"
19
20 struct cpt_device_handle {
21 void *cdev[MAX_DEVICES];
22 u32 dev_count;
23 };
24
25 static struct cpt_device_handle dev_handle;
26
cvm_callback(u32 status,void * arg)27 static void cvm_callback(u32 status, void *arg)
28 {
29 struct crypto_async_request *req = (struct crypto_async_request *)arg;
30
31 crypto_request_complete(req, !status);
32 }
33
update_input_iv(struct cpt_request_info * req_info,u8 * iv,u32 enc_iv_len,u32 * argcnt)34 static inline void update_input_iv(struct cpt_request_info *req_info,
35 u8 *iv, u32 enc_iv_len,
36 u32 *argcnt)
37 {
38 /* Setting the iv information */
39 req_info->in[*argcnt].vptr = (void *)iv;
40 req_info->in[*argcnt].size = enc_iv_len;
41 req_info->req.dlen += enc_iv_len;
42
43 ++(*argcnt);
44 }
45
update_output_iv(struct cpt_request_info * req_info,u8 * iv,u32 enc_iv_len,u32 * argcnt)46 static inline void update_output_iv(struct cpt_request_info *req_info,
47 u8 *iv, u32 enc_iv_len,
48 u32 *argcnt)
49 {
50 /* Setting the iv information */
51 req_info->out[*argcnt].vptr = (void *)iv;
52 req_info->out[*argcnt].size = enc_iv_len;
53 req_info->rlen += enc_iv_len;
54
55 ++(*argcnt);
56 }
57
update_input_data(struct cpt_request_info * req_info,struct scatterlist * inp_sg,u32 nbytes,u32 * argcnt)58 static inline void update_input_data(struct cpt_request_info *req_info,
59 struct scatterlist *inp_sg,
60 u32 nbytes, u32 *argcnt)
61 {
62 req_info->req.dlen += nbytes;
63
64 while (nbytes) {
65 u32 len = min(nbytes, inp_sg->length);
66 u8 *ptr = sg_virt(inp_sg);
67
68 req_info->in[*argcnt].vptr = (void *)ptr;
69 req_info->in[*argcnt].size = len;
70 nbytes -= len;
71
72 ++(*argcnt);
73 ++inp_sg;
74 }
75 }
76
update_output_data(struct cpt_request_info * req_info,struct scatterlist * outp_sg,u32 nbytes,u32 * argcnt)77 static inline void update_output_data(struct cpt_request_info *req_info,
78 struct scatterlist *outp_sg,
79 u32 nbytes, u32 *argcnt)
80 {
81 req_info->rlen += nbytes;
82
83 while (nbytes) {
84 u32 len = min(nbytes, outp_sg->length);
85 u8 *ptr = sg_virt(outp_sg);
86
87 req_info->out[*argcnt].vptr = (void *)ptr;
88 req_info->out[*argcnt].size = len;
89 nbytes -= len;
90 ++(*argcnt);
91 ++outp_sg;
92 }
93 }
94
create_ctx_hdr(struct skcipher_request * req,u32 enc,u32 * argcnt)95 static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc,
96 u32 *argcnt)
97 {
98 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
99 struct cvm_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
100 struct cvm_req_ctx *rctx = skcipher_request_ctx_dma(req);
101 struct fc_context *fctx = &rctx->fctx;
102 u32 enc_iv_len = crypto_skcipher_ivsize(tfm);
103 struct cpt_request_info *req_info = &rctx->cpt_req;
104 __be64 *ctrl_flags = NULL;
105 __be64 *offset_control;
106
107 req_info->ctrl.s.grp = 0;
108 req_info->ctrl.s.dma_mode = DMA_GATHER_SCATTER;
109 req_info->ctrl.s.se_req = SE_CORE_REQ;
110
111 req_info->req.opcode.s.major = MAJOR_OP_FC |
112 DMA_MODE_FLAG(DMA_GATHER_SCATTER);
113 if (enc)
114 req_info->req.opcode.s.minor = 2;
115 else
116 req_info->req.opcode.s.minor = 3;
117
118 req_info->req.param1 = req->cryptlen; /* Encryption Data length */
119 req_info->req.param2 = 0; /*Auth data length */
120
121 fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
122 fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
123 fctx->enc.enc_ctrl.e.iv_source = FROM_DPTR;
124
125 if (ctx->cipher_type == AES_XTS)
126 memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2);
127 else
128 memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len);
129 ctrl_flags = (__be64 *)&fctx->enc.enc_ctrl.flags;
130 *ctrl_flags = cpu_to_be64(fctx->enc.enc_ctrl.flags);
131
132 offset_control = (__be64 *)&rctx->control_word;
133 *offset_control = cpu_to_be64(((u64)(enc_iv_len) << 16));
134 /* Storing Packet Data Information in offset
135 * Control Word First 8 bytes
136 */
137 req_info->in[*argcnt].vptr = (u8 *)offset_control;
138 req_info->in[*argcnt].size = CONTROL_WORD_LEN;
139 req_info->req.dlen += CONTROL_WORD_LEN;
140 ++(*argcnt);
141
142 req_info->in[*argcnt].vptr = (u8 *)fctx;
143 req_info->in[*argcnt].size = sizeof(struct fc_context);
144 req_info->req.dlen += sizeof(struct fc_context);
145
146 ++(*argcnt);
147
148 return 0;
149 }
150
create_input_list(struct skcipher_request * req,u32 enc,u32 enc_iv_len)151 static inline u32 create_input_list(struct skcipher_request *req, u32 enc,
152 u32 enc_iv_len)
153 {
154 struct cvm_req_ctx *rctx = skcipher_request_ctx_dma(req);
155 struct cpt_request_info *req_info = &rctx->cpt_req;
156 u32 argcnt = 0;
157
158 create_ctx_hdr(req, enc, &argcnt);
159 update_input_iv(req_info, req->iv, enc_iv_len, &argcnt);
160 update_input_data(req_info, req->src, req->cryptlen, &argcnt);
161 req_info->incnt = argcnt;
162
163 return 0;
164 }
165
store_cb_info(struct skcipher_request * req,struct cpt_request_info * req_info)166 static inline void store_cb_info(struct skcipher_request *req,
167 struct cpt_request_info *req_info)
168 {
169 req_info->callback = (void *)cvm_callback;
170 req_info->callback_arg = (void *)&req->base;
171 }
172
create_output_list(struct skcipher_request * req,u32 enc_iv_len)173 static inline void create_output_list(struct skcipher_request *req,
174 u32 enc_iv_len)
175 {
176 struct cvm_req_ctx *rctx = skcipher_request_ctx_dma(req);
177 struct cpt_request_info *req_info = &rctx->cpt_req;
178 u32 argcnt = 0;
179
180 /* OUTPUT Buffer Processing
181 * AES encryption/decryption output would be
182 * received in the following format
183 *
184 * ------IV--------|------ENCRYPTED/DECRYPTED DATA-----|
185 * [ 16 Bytes/ [ Request Enc/Dec/ DATA Len AES CBC ]
186 */
187 /* Reading IV information */
188 update_output_iv(req_info, req->iv, enc_iv_len, &argcnt);
189 update_output_data(req_info, req->dst, req->cryptlen, &argcnt);
190 req_info->outcnt = argcnt;
191 }
192
cvm_enc_dec(struct skcipher_request * req,u32 enc)193 static inline int cvm_enc_dec(struct skcipher_request *req, u32 enc)
194 {
195 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
196 struct cvm_req_ctx *rctx = skcipher_request_ctx_dma(req);
197 u32 enc_iv_len = crypto_skcipher_ivsize(tfm);
198 struct fc_context *fctx = &rctx->fctx;
199 struct cpt_request_info *req_info = &rctx->cpt_req;
200 void *cdev = NULL;
201 int status;
202
203 memset(req_info, 0, sizeof(struct cpt_request_info));
204 req_info->may_sleep = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) != 0;
205 memset(fctx, 0, sizeof(struct fc_context));
206 create_input_list(req, enc, enc_iv_len);
207 create_output_list(req, enc_iv_len);
208 store_cb_info(req, req_info);
209 cdev = dev_handle.cdev[smp_processor_id()];
210 status = cptvf_do_request(cdev, req_info);
211 /* We perform an asynchronous send and once
212 * the request is completed the driver would
213 * intimate through registered call back functions
214 */
215
216 if (status)
217 return status;
218 else
219 return -EINPROGRESS;
220 }
221
cvm_encrypt(struct skcipher_request * req)222 static int cvm_encrypt(struct skcipher_request *req)
223 {
224 return cvm_enc_dec(req, true);
225 }
226
cvm_decrypt(struct skcipher_request * req)227 static int cvm_decrypt(struct skcipher_request *req)
228 {
229 return cvm_enc_dec(req, false);
230 }
231
cvm_xts_setkey(struct crypto_skcipher * cipher,const u8 * key,u32 keylen)232 static int cvm_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
233 u32 keylen)
234 {
235 struct cvm_enc_ctx *ctx = crypto_skcipher_ctx(cipher);
236 int err;
237 const u8 *key1 = key;
238 const u8 *key2 = key + (keylen / 2);
239
240 err = xts_verify_key(cipher, key, keylen);
241 if (err)
242 return err;
243 ctx->key_len = keylen;
244 memcpy(ctx->enc_key, key1, keylen / 2);
245 memcpy(ctx->enc_key + KEY2_OFFSET, key2, keylen / 2);
246 ctx->cipher_type = AES_XTS;
247 switch (ctx->key_len) {
248 case 32:
249 ctx->key_type = AES_128_BIT;
250 break;
251 case 64:
252 ctx->key_type = AES_256_BIT;
253 break;
254 default:
255 return -EINVAL;
256 }
257
258 return 0;
259 }
260
cvm_validate_keylen(struct cvm_enc_ctx * ctx,u32 keylen)261 static int cvm_validate_keylen(struct cvm_enc_ctx *ctx, u32 keylen)
262 {
263 if ((keylen == 16) || (keylen == 24) || (keylen == 32)) {
264 ctx->key_len = keylen;
265 switch (ctx->key_len) {
266 case 16:
267 ctx->key_type = AES_128_BIT;
268 break;
269 case 24:
270 ctx->key_type = AES_192_BIT;
271 break;
272 case 32:
273 ctx->key_type = AES_256_BIT;
274 break;
275 default:
276 return -EINVAL;
277 }
278
279 if (ctx->cipher_type == DES3_CBC)
280 ctx->key_type = 0;
281
282 return 0;
283 }
284
285 return -EINVAL;
286 }
287
cvm_setkey(struct crypto_skcipher * cipher,const u8 * key,u32 keylen,u8 cipher_type)288 static int cvm_setkey(struct crypto_skcipher *cipher, const u8 *key,
289 u32 keylen, u8 cipher_type)
290 {
291 struct cvm_enc_ctx *ctx = crypto_skcipher_ctx(cipher);
292
293 ctx->cipher_type = cipher_type;
294 if (!cvm_validate_keylen(ctx, keylen)) {
295 memcpy(ctx->enc_key, key, keylen);
296 return 0;
297 } else {
298 return -EINVAL;
299 }
300 }
301
cvm_cbc_aes_setkey(struct crypto_skcipher * cipher,const u8 * key,u32 keylen)302 static int cvm_cbc_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
303 u32 keylen)
304 {
305 return cvm_setkey(cipher, key, keylen, AES_CBC);
306 }
307
cvm_ecb_aes_setkey(struct crypto_skcipher * cipher,const u8 * key,u32 keylen)308 static int cvm_ecb_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
309 u32 keylen)
310 {
311 return cvm_setkey(cipher, key, keylen, AES_ECB);
312 }
313
cvm_cfb_aes_setkey(struct crypto_skcipher * cipher,const u8 * key,u32 keylen)314 static int cvm_cfb_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
315 u32 keylen)
316 {
317 return cvm_setkey(cipher, key, keylen, AES_CFB);
318 }
319
cvm_cbc_des3_setkey(struct crypto_skcipher * cipher,const u8 * key,u32 keylen)320 static int cvm_cbc_des3_setkey(struct crypto_skcipher *cipher, const u8 *key,
321 u32 keylen)
322 {
323 return verify_skcipher_des3_key(cipher, key) ?:
324 cvm_setkey(cipher, key, keylen, DES3_CBC);
325 }
326
cvm_ecb_des3_setkey(struct crypto_skcipher * cipher,const u8 * key,u32 keylen)327 static int cvm_ecb_des3_setkey(struct crypto_skcipher *cipher, const u8 *key,
328 u32 keylen)
329 {
330 return verify_skcipher_des3_key(cipher, key) ?:
331 cvm_setkey(cipher, key, keylen, DES3_ECB);
332 }
333
cvm_enc_dec_init(struct crypto_skcipher * tfm)334 static int cvm_enc_dec_init(struct crypto_skcipher *tfm)
335 {
336 crypto_skcipher_set_reqsize_dma(tfm, sizeof(struct cvm_req_ctx));
337
338 return 0;
339 }
340
341 static struct skcipher_alg algs[] = { {
342 .base.cra_flags = CRYPTO_ALG_ASYNC |
343 CRYPTO_ALG_ALLOCATES_MEMORY,
344 .base.cra_blocksize = AES_BLOCK_SIZE,
345 .base.cra_ctxsize = sizeof(struct cvm_enc_ctx),
346 .base.cra_alignmask = 7,
347 .base.cra_priority = 4001,
348 .base.cra_name = "xts(aes)",
349 .base.cra_driver_name = "cavium-xts-aes",
350 .base.cra_module = THIS_MODULE,
351
352 .ivsize = AES_BLOCK_SIZE,
353 .min_keysize = 2 * AES_MIN_KEY_SIZE,
354 .max_keysize = 2 * AES_MAX_KEY_SIZE,
355 .setkey = cvm_xts_setkey,
356 .encrypt = cvm_encrypt,
357 .decrypt = cvm_decrypt,
358 .init = cvm_enc_dec_init,
359 }, {
360 .base.cra_flags = CRYPTO_ALG_ASYNC |
361 CRYPTO_ALG_ALLOCATES_MEMORY,
362 .base.cra_blocksize = AES_BLOCK_SIZE,
363 .base.cra_ctxsize = sizeof(struct cvm_enc_ctx),
364 .base.cra_alignmask = 7,
365 .base.cra_priority = 4001,
366 .base.cra_name = "cbc(aes)",
367 .base.cra_driver_name = "cavium-cbc-aes",
368 .base.cra_module = THIS_MODULE,
369
370 .ivsize = AES_BLOCK_SIZE,
371 .min_keysize = AES_MIN_KEY_SIZE,
372 .max_keysize = AES_MAX_KEY_SIZE,
373 .setkey = cvm_cbc_aes_setkey,
374 .encrypt = cvm_encrypt,
375 .decrypt = cvm_decrypt,
376 .init = cvm_enc_dec_init,
377 }, {
378 .base.cra_flags = CRYPTO_ALG_ASYNC |
379 CRYPTO_ALG_ALLOCATES_MEMORY,
380 .base.cra_blocksize = AES_BLOCK_SIZE,
381 .base.cra_ctxsize = sizeof(struct cvm_enc_ctx),
382 .base.cra_alignmask = 7,
383 .base.cra_priority = 4001,
384 .base.cra_name = "ecb(aes)",
385 .base.cra_driver_name = "cavium-ecb-aes",
386 .base.cra_module = THIS_MODULE,
387
388 .min_keysize = AES_MIN_KEY_SIZE,
389 .max_keysize = AES_MAX_KEY_SIZE,
390 .setkey = cvm_ecb_aes_setkey,
391 .encrypt = cvm_encrypt,
392 .decrypt = cvm_decrypt,
393 .init = cvm_enc_dec_init,
394 }, {
395 .base.cra_flags = CRYPTO_ALG_ASYNC |
396 CRYPTO_ALG_ALLOCATES_MEMORY,
397 .base.cra_blocksize = AES_BLOCK_SIZE,
398 .base.cra_ctxsize = sizeof(struct cvm_enc_ctx),
399 .base.cra_alignmask = 7,
400 .base.cra_priority = 4001,
401 .base.cra_name = "cfb(aes)",
402 .base.cra_driver_name = "cavium-cfb-aes",
403 .base.cra_module = THIS_MODULE,
404
405 .ivsize = AES_BLOCK_SIZE,
406 .min_keysize = AES_MIN_KEY_SIZE,
407 .max_keysize = AES_MAX_KEY_SIZE,
408 .setkey = cvm_cfb_aes_setkey,
409 .encrypt = cvm_encrypt,
410 .decrypt = cvm_decrypt,
411 .init = cvm_enc_dec_init,
412 }, {
413 .base.cra_flags = CRYPTO_ALG_ASYNC |
414 CRYPTO_ALG_ALLOCATES_MEMORY,
415 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
416 .base.cra_ctxsize = sizeof(struct cvm_des3_ctx),
417 .base.cra_alignmask = 7,
418 .base.cra_priority = 4001,
419 .base.cra_name = "cbc(des3_ede)",
420 .base.cra_driver_name = "cavium-cbc-des3_ede",
421 .base.cra_module = THIS_MODULE,
422
423 .min_keysize = DES3_EDE_KEY_SIZE,
424 .max_keysize = DES3_EDE_KEY_SIZE,
425 .ivsize = DES_BLOCK_SIZE,
426 .setkey = cvm_cbc_des3_setkey,
427 .encrypt = cvm_encrypt,
428 .decrypt = cvm_decrypt,
429 .init = cvm_enc_dec_init,
430 }, {
431 .base.cra_flags = CRYPTO_ALG_ASYNC |
432 CRYPTO_ALG_ALLOCATES_MEMORY,
433 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
434 .base.cra_ctxsize = sizeof(struct cvm_des3_ctx),
435 .base.cra_alignmask = 7,
436 .base.cra_priority = 4001,
437 .base.cra_name = "ecb(des3_ede)",
438 .base.cra_driver_name = "cavium-ecb-des3_ede",
439 .base.cra_module = THIS_MODULE,
440
441 .min_keysize = DES3_EDE_KEY_SIZE,
442 .max_keysize = DES3_EDE_KEY_SIZE,
443 .ivsize = DES_BLOCK_SIZE,
444 .setkey = cvm_ecb_des3_setkey,
445 .encrypt = cvm_encrypt,
446 .decrypt = cvm_decrypt,
447 .init = cvm_enc_dec_init,
448 } };
449
cav_register_algs(void)450 static inline int cav_register_algs(void)
451 {
452 return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
453 }
454
cav_unregister_algs(void)455 static inline void cav_unregister_algs(void)
456 {
457 crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
458 }
459
cvm_crypto_init(struct cpt_vf * cptvf)460 int cvm_crypto_init(struct cpt_vf *cptvf)
461 {
462 struct pci_dev *pdev = cptvf->pdev;
463 u32 dev_count;
464
465 dev_count = dev_handle.dev_count;
466 dev_handle.cdev[dev_count] = cptvf;
467 dev_handle.dev_count++;
468
469 if (dev_count == 3) {
470 if (cav_register_algs()) {
471 dev_err(&pdev->dev, "Error in registering crypto algorithms\n");
472 return -EINVAL;
473 }
474 }
475
476 return 0;
477 }
478
cvm_crypto_exit(void)479 void cvm_crypto_exit(void)
480 {
481 u32 dev_count;
482
483 dev_count = --dev_handle.dev_count;
484 if (!dev_count)
485 cav_unregister_algs();
486 }
487