1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. 4 */ 5 6 #include <linux/device.h> 7 #include <linux/dma-mapping.h> 8 #include <linux/interrupt.h> 9 #include <linux/moduleparam.h> 10 #include <linux/types.h> 11 #include <linux/errno.h> 12 #include <crypto/aes.h> 13 #include <crypto/internal/des.h> 14 #include <crypto/internal/skcipher.h> 15 16 #include "cipher.h" 17 18 static unsigned int aes_sw_max_len = CONFIG_CRYPTO_DEV_QCE_SW_MAX_LEN; 19 module_param(aes_sw_max_len, uint, 0644); 20 MODULE_PARM_DESC(aes_sw_max_len, 21 "Only use hardware for AES requests larger than this " 22 "[0=always use hardware; anything <16 breaks AES-GCM; default=" 23 __stringify(CONFIG_CRYPTO_DEV_QCE_SW_MAX_LEN)"]"); 24 25 static LIST_HEAD(skcipher_algs); 26 27 static void qce_skcipher_done(void *data) 28 { 29 struct crypto_async_request *async_req = data; 30 struct skcipher_request *req = skcipher_request_cast(async_req); 31 struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req); 32 struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req)); 33 struct qce_device *qce = tmpl->qce; 34 struct qce_result_dump *result_buf = qce->dma.result_buf; 35 enum dma_data_direction dir_src, dir_dst; 36 u32 status; 37 int error; 38 bool diff_dst; 39 40 diff_dst = (req->src != req->dst) ? true : false; 41 dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; 42 dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL; 43 44 error = qce_dma_terminate_all(&qce->dma); 45 if (error) 46 dev_dbg(qce->dev, "skcipher dma termination error (%d)\n", 47 error); 48 49 if (diff_dst) 50 dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src); 51 dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst); 52 53 sg_free_table(&rctx->dst_tbl); 54 55 error = qce_check_status(qce, &status); 56 if (error < 0) 57 dev_dbg(qce->dev, "skcipher operation error (%x)\n", status); 58 59 memcpy(rctx->iv, result_buf->encr_cntr_iv, rctx->ivsize); 60 qce->async_req_done(tmpl->qce, error); 61 } 62 63 static int 64 qce_skcipher_async_req_handle(struct crypto_async_request *async_req) 65 { 66 struct skcipher_request *req = skcipher_request_cast(async_req); 67 struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req); 68 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 69 struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req)); 70 struct qce_device *qce = tmpl->qce; 71 enum dma_data_direction dir_src, dir_dst; 72 struct scatterlist *sg; 73 bool diff_dst; 74 gfp_t gfp; 75 int ret; 76 77 rctx->iv = req->iv; 78 rctx->ivsize = crypto_skcipher_ivsize(skcipher); 79 rctx->cryptlen = req->cryptlen; 80 81 diff_dst = (req->src != req->dst) ? true : false; 82 dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; 83 dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL; 84 85 rctx->src_nents = sg_nents_for_len(req->src, req->cryptlen); 86 if (diff_dst) 87 rctx->dst_nents = sg_nents_for_len(req->dst, req->cryptlen); 88 else 89 rctx->dst_nents = rctx->src_nents; 90 if (rctx->src_nents < 0) { 91 dev_err(qce->dev, "Invalid numbers of src SG.\n"); 92 return rctx->src_nents; 93 } 94 if (rctx->dst_nents < 0) { 95 dev_err(qce->dev, "Invalid numbers of dst SG.\n"); 96 return -rctx->dst_nents; 97 } 98 99 rctx->dst_nents += 1; 100 101 gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 102 GFP_KERNEL : GFP_ATOMIC; 103 104 ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp); 105 if (ret) 106 return ret; 107 108 sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ); 109 110 sg = qce_sgtable_add(&rctx->dst_tbl, req->dst, req->cryptlen); 111 if (IS_ERR(sg)) { 112 ret = PTR_ERR(sg); 113 goto error_free; 114 } 115 116 sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg, 117 QCE_RESULT_BUF_SZ); 118 if (IS_ERR(sg)) { 119 ret = PTR_ERR(sg); 120 goto error_free; 121 } 122 123 sg_mark_end(sg); 124 rctx->dst_sg = rctx->dst_tbl.sgl; 125 126 ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst); 127 if (ret < 0) 128 goto error_free; 129 130 if (diff_dst) { 131 ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src); 132 if (ret < 0) 133 goto error_unmap_dst; 134 rctx->src_sg = req->src; 135 } else { 136 rctx->src_sg = rctx->dst_sg; 137 } 138 139 ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents, 140 rctx->dst_sg, rctx->dst_nents, 141 qce_skcipher_done, async_req); 142 if (ret) 143 goto error_unmap_src; 144 145 qce_dma_issue_pending(&qce->dma); 146 147 ret = qce_start(async_req, tmpl->crypto_alg_type, req->cryptlen, 0); 148 if (ret) 149 goto error_terminate; 150 151 return 0; 152 153 error_terminate: 154 qce_dma_terminate_all(&qce->dma); 155 error_unmap_src: 156 if (diff_dst) 157 dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src); 158 error_unmap_dst: 159 dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst); 160 error_free: 161 sg_free_table(&rctx->dst_tbl); 162 return ret; 163 } 164 165 static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key, 166 unsigned int keylen) 167 { 168 struct crypto_tfm *tfm = crypto_skcipher_tfm(ablk); 169 struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); 170 unsigned long flags = to_cipher_tmpl(ablk)->alg_flags; 171 unsigned int __keylen; 172 int ret; 173 174 if (!key || !keylen) 175 return -EINVAL; 176 177 /* 178 * AES XTS key1 = key2 not supported by crypto engine. 179 * Revisit to request a fallback cipher in this case. 180 */ 181 if (IS_XTS(flags)) { 182 __keylen = keylen >> 1; 183 if (!memcmp(key, key + __keylen, __keylen)) 184 return -ENOKEY; 185 } else { 186 __keylen = keylen; 187 } 188 189 switch (__keylen) { 190 case AES_KEYSIZE_128: 191 case AES_KEYSIZE_256: 192 memcpy(ctx->enc_key, key, keylen); 193 break; 194 case AES_KEYSIZE_192: 195 break; 196 default: 197 return -EINVAL; 198 } 199 200 ret = crypto_skcipher_setkey(ctx->fallback, key, keylen); 201 if (!ret) 202 ctx->enc_keylen = keylen; 203 return ret; 204 } 205 206 static int qce_des_setkey(struct crypto_skcipher *ablk, const u8 *key, 207 unsigned int keylen) 208 { 209 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk); 210 int err; 211 212 err = verify_skcipher_des_key(ablk, key); 213 if (err) 214 return err; 215 216 ctx->enc_keylen = keylen; 217 memcpy(ctx->enc_key, key, keylen); 218 return 0; 219 } 220 221 static int qce_des3_setkey(struct crypto_skcipher *ablk, const u8 *key, 222 unsigned int keylen) 223 { 224 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk); 225 u32 _key[6]; 226 int err; 227 228 err = verify_skcipher_des3_key(ablk, key); 229 if (err) 230 return err; 231 232 /* 233 * The crypto engine does not support any two keys 234 * being the same for triple des algorithms. The 235 * verify_skcipher_des3_key does not check for all the 236 * below conditions. Return -ENOKEY in case any two keys 237 * are the same. Revisit to see if a fallback cipher 238 * is needed to handle this condition. 239 */ 240 memcpy(_key, key, DES3_EDE_KEY_SIZE); 241 if (!((_key[0] ^ _key[2]) | (_key[1] ^ _key[3])) || 242 !((_key[2] ^ _key[4]) | (_key[3] ^ _key[5])) || 243 !((_key[0] ^ _key[4]) | (_key[1] ^ _key[5]))) 244 return -ENOKEY; 245 246 ctx->enc_keylen = keylen; 247 memcpy(ctx->enc_key, key, keylen); 248 return 0; 249 } 250 251 static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt) 252 { 253 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 254 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); 255 struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req); 256 struct qce_alg_template *tmpl = to_cipher_tmpl(tfm); 257 unsigned int blocksize = crypto_skcipher_blocksize(tfm); 258 int keylen; 259 int ret; 260 261 rctx->flags = tmpl->alg_flags; 262 rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT; 263 keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen; 264 265 /* CE does not handle 0 length messages */ 266 if (!req->cryptlen) 267 return 0; 268 269 /* 270 * ECB and CBC algorithms require message lengths to be 271 * multiples of block size. 272 */ 273 if (IS_ECB(rctx->flags) || IS_CBC(rctx->flags)) 274 if (!IS_ALIGNED(req->cryptlen, blocksize)) 275 return -EINVAL; 276 277 /* qce is hanging when AES-XTS request len > QCE_SECTOR_SIZE and 278 * is not a multiple of it; pass such requests to the fallback 279 */ 280 if (IS_AES(rctx->flags) && 281 (((keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256) || 282 req->cryptlen <= aes_sw_max_len) || 283 (IS_XTS(rctx->flags) && req->cryptlen > QCE_SECTOR_SIZE && 284 req->cryptlen % QCE_SECTOR_SIZE))) { 285 skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); 286 skcipher_request_set_callback(&rctx->fallback_req, 287 req->base.flags, 288 req->base.complete, 289 req->base.data); 290 skcipher_request_set_crypt(&rctx->fallback_req, req->src, 291 req->dst, req->cryptlen, req->iv); 292 ret = encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) : 293 crypto_skcipher_decrypt(&rctx->fallback_req); 294 return ret; 295 } 296 297 return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base); 298 } 299 300 static int qce_skcipher_encrypt(struct skcipher_request *req) 301 { 302 return qce_skcipher_crypt(req, 1); 303 } 304 305 static int qce_skcipher_decrypt(struct skcipher_request *req) 306 { 307 return qce_skcipher_crypt(req, 0); 308 } 309 310 static int qce_skcipher_init(struct crypto_skcipher *tfm) 311 { 312 /* take the size without the fallback skcipher_request at the end */ 313 crypto_skcipher_set_reqsize(tfm, offsetof(struct qce_cipher_reqctx, 314 fallback_req)); 315 return 0; 316 } 317 318 static int qce_skcipher_init_fallback(struct crypto_skcipher *tfm) 319 { 320 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); 321 322 ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(&tfm->base), 323 0, CRYPTO_ALG_NEED_FALLBACK); 324 if (IS_ERR(ctx->fallback)) 325 return PTR_ERR(ctx->fallback); 326 327 crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx) + 328 crypto_skcipher_reqsize(ctx->fallback)); 329 return 0; 330 } 331 332 static void qce_skcipher_exit(struct crypto_skcipher *tfm) 333 { 334 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); 335 336 crypto_free_skcipher(ctx->fallback); 337 } 338 339 struct qce_skcipher_def { 340 unsigned long flags; 341 const char *name; 342 const char *drv_name; 343 unsigned int blocksize; 344 unsigned int chunksize; 345 unsigned int ivsize; 346 unsigned int min_keysize; 347 unsigned int max_keysize; 348 }; 349 350 static const struct qce_skcipher_def skcipher_def[] = { 351 { 352 .flags = QCE_ALG_AES | QCE_MODE_ECB, 353 .name = "ecb(aes)", 354 .drv_name = "ecb-aes-qce", 355 .blocksize = AES_BLOCK_SIZE, 356 .ivsize = AES_BLOCK_SIZE, 357 .min_keysize = AES_MIN_KEY_SIZE, 358 .max_keysize = AES_MAX_KEY_SIZE, 359 }, 360 { 361 .flags = QCE_ALG_AES | QCE_MODE_CBC, 362 .name = "cbc(aes)", 363 .drv_name = "cbc-aes-qce", 364 .blocksize = AES_BLOCK_SIZE, 365 .ivsize = AES_BLOCK_SIZE, 366 .min_keysize = AES_MIN_KEY_SIZE, 367 .max_keysize = AES_MAX_KEY_SIZE, 368 }, 369 { 370 .flags = QCE_ALG_AES | QCE_MODE_CTR, 371 .name = "ctr(aes)", 372 .drv_name = "ctr-aes-qce", 373 .blocksize = 1, 374 .chunksize = AES_BLOCK_SIZE, 375 .ivsize = AES_BLOCK_SIZE, 376 .min_keysize = AES_MIN_KEY_SIZE, 377 .max_keysize = AES_MAX_KEY_SIZE, 378 }, 379 { 380 .flags = QCE_ALG_AES | QCE_MODE_XTS, 381 .name = "xts(aes)", 382 .drv_name = "xts-aes-qce", 383 .blocksize = AES_BLOCK_SIZE, 384 .ivsize = AES_BLOCK_SIZE, 385 .min_keysize = AES_MIN_KEY_SIZE * 2, 386 .max_keysize = AES_MAX_KEY_SIZE * 2, 387 }, 388 { 389 .flags = QCE_ALG_DES | QCE_MODE_ECB, 390 .name = "ecb(des)", 391 .drv_name = "ecb-des-qce", 392 .blocksize = DES_BLOCK_SIZE, 393 .ivsize = 0, 394 .min_keysize = DES_KEY_SIZE, 395 .max_keysize = DES_KEY_SIZE, 396 }, 397 { 398 .flags = QCE_ALG_DES | QCE_MODE_CBC, 399 .name = "cbc(des)", 400 .drv_name = "cbc-des-qce", 401 .blocksize = DES_BLOCK_SIZE, 402 .ivsize = DES_BLOCK_SIZE, 403 .min_keysize = DES_KEY_SIZE, 404 .max_keysize = DES_KEY_SIZE, 405 }, 406 { 407 .flags = QCE_ALG_3DES | QCE_MODE_ECB, 408 .name = "ecb(des3_ede)", 409 .drv_name = "ecb-3des-qce", 410 .blocksize = DES3_EDE_BLOCK_SIZE, 411 .ivsize = 0, 412 .min_keysize = DES3_EDE_KEY_SIZE, 413 .max_keysize = DES3_EDE_KEY_SIZE, 414 }, 415 { 416 .flags = QCE_ALG_3DES | QCE_MODE_CBC, 417 .name = "cbc(des3_ede)", 418 .drv_name = "cbc-3des-qce", 419 .blocksize = DES3_EDE_BLOCK_SIZE, 420 .ivsize = DES3_EDE_BLOCK_SIZE, 421 .min_keysize = DES3_EDE_KEY_SIZE, 422 .max_keysize = DES3_EDE_KEY_SIZE, 423 }, 424 }; 425 426 static int qce_skcipher_register_one(const struct qce_skcipher_def *def, 427 struct qce_device *qce) 428 { 429 struct qce_alg_template *tmpl; 430 struct skcipher_alg *alg; 431 int ret; 432 433 tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL); 434 if (!tmpl) 435 return -ENOMEM; 436 437 alg = &tmpl->alg.skcipher; 438 439 snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); 440 snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 441 def->drv_name); 442 443 alg->base.cra_blocksize = def->blocksize; 444 alg->chunksize = def->chunksize; 445 alg->ivsize = def->ivsize; 446 alg->min_keysize = def->min_keysize; 447 alg->max_keysize = def->max_keysize; 448 alg->setkey = IS_3DES(def->flags) ? qce_des3_setkey : 449 IS_DES(def->flags) ? qce_des_setkey : 450 qce_skcipher_setkey; 451 alg->encrypt = qce_skcipher_encrypt; 452 alg->decrypt = qce_skcipher_decrypt; 453 454 alg->base.cra_priority = 300; 455 alg->base.cra_flags = CRYPTO_ALG_ASYNC | 456 CRYPTO_ALG_ALLOCATES_MEMORY | 457 CRYPTO_ALG_KERN_DRIVER_ONLY; 458 alg->base.cra_ctxsize = sizeof(struct qce_cipher_ctx); 459 alg->base.cra_alignmask = 0; 460 alg->base.cra_module = THIS_MODULE; 461 462 if (IS_AES(def->flags)) { 463 alg->base.cra_flags |= CRYPTO_ALG_NEED_FALLBACK; 464 alg->init = qce_skcipher_init_fallback; 465 alg->exit = qce_skcipher_exit; 466 } else { 467 alg->init = qce_skcipher_init; 468 } 469 470 INIT_LIST_HEAD(&tmpl->entry); 471 tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_SKCIPHER; 472 tmpl->alg_flags = def->flags; 473 tmpl->qce = qce; 474 475 ret = crypto_register_skcipher(alg); 476 if (ret) { 477 kfree(tmpl); 478 dev_err(qce->dev, "%s registration failed\n", alg->base.cra_name); 479 return ret; 480 } 481 482 list_add_tail(&tmpl->entry, &skcipher_algs); 483 dev_dbg(qce->dev, "%s is registered\n", alg->base.cra_name); 484 return 0; 485 } 486 487 static void qce_skcipher_unregister(struct qce_device *qce) 488 { 489 struct qce_alg_template *tmpl, *n; 490 491 list_for_each_entry_safe(tmpl, n, &skcipher_algs, entry) { 492 crypto_unregister_skcipher(&tmpl->alg.skcipher); 493 list_del(&tmpl->entry); 494 kfree(tmpl); 495 } 496 } 497 498 static int qce_skcipher_register(struct qce_device *qce) 499 { 500 int ret, i; 501 502 for (i = 0; i < ARRAY_SIZE(skcipher_def); i++) { 503 ret = qce_skcipher_register_one(&skcipher_def[i], qce); 504 if (ret) 505 goto err; 506 } 507 508 return 0; 509 err: 510 qce_skcipher_unregister(qce); 511 return ret; 512 } 513 514 const struct qce_algo_ops skcipher_ops = { 515 .type = CRYPTO_ALG_TYPE_SKCIPHER, 516 .register_algs = qce_skcipher_register, 517 .unregister_algs = qce_skcipher_unregister, 518 .async_req_handle = qce_skcipher_async_req_handle, 519 }; 520