1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. 4 */ 5 6 #include <linux/device.h> 7 #include <linux/dma-mapping.h> 8 #include <linux/interrupt.h> 9 #include <linux/moduleparam.h> 10 #include <linux/types.h> 11 #include <crypto/aes.h> 12 #include <crypto/internal/des.h> 13 #include <crypto/internal/skcipher.h> 14 15 #include "cipher.h" 16 17 static unsigned int aes_sw_max_len = CONFIG_CRYPTO_DEV_QCE_SW_MAX_LEN; 18 module_param(aes_sw_max_len, uint, 0644); 19 MODULE_PARM_DESC(aes_sw_max_len, 20 "Only use hardware for AES requests larger than this " 21 "[0=always use hardware; anything <16 breaks AES-GCM; default=" 22 __stringify(CONFIG_CRYPTO_DEV_QCE_SW_MAX_LEN)"]"); 23 24 static LIST_HEAD(skcipher_algs); 25 26 static void qce_skcipher_done(void *data) 27 { 28 struct crypto_async_request *async_req = data; 29 struct skcipher_request *req = skcipher_request_cast(async_req); 30 struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req); 31 struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req)); 32 struct qce_device *qce = tmpl->qce; 33 struct qce_result_dump *result_buf = qce->dma.result_buf; 34 enum dma_data_direction dir_src, dir_dst; 35 u32 status; 36 int error; 37 bool diff_dst; 38 39 diff_dst = (req->src != req->dst) ? true : false; 40 dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; 41 dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL; 42 43 error = qce_dma_terminate_all(&qce->dma); 44 if (error) 45 dev_dbg(qce->dev, "skcipher dma termination error (%d)\n", 46 error); 47 48 if (diff_dst) 49 dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src); 50 dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst); 51 52 sg_free_table(&rctx->dst_tbl); 53 54 error = qce_check_status(qce, &status); 55 if (error < 0) 56 dev_dbg(qce->dev, "skcipher operation error (%x)\n", status); 57 58 memcpy(rctx->iv, result_buf->encr_cntr_iv, rctx->ivsize); 59 qce->async_req_done(tmpl->qce, error); 60 } 61 62 static int 63 qce_skcipher_async_req_handle(struct crypto_async_request *async_req) 64 { 65 struct skcipher_request *req = skcipher_request_cast(async_req); 66 struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req); 67 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 68 struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req)); 69 struct qce_device *qce = tmpl->qce; 70 enum dma_data_direction dir_src, dir_dst; 71 struct scatterlist *sg; 72 bool diff_dst; 73 gfp_t gfp; 74 int ret; 75 76 rctx->iv = req->iv; 77 rctx->ivsize = crypto_skcipher_ivsize(skcipher); 78 rctx->cryptlen = req->cryptlen; 79 80 diff_dst = (req->src != req->dst) ? true : false; 81 dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; 82 dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL; 83 84 rctx->src_nents = sg_nents_for_len(req->src, req->cryptlen); 85 if (diff_dst) 86 rctx->dst_nents = sg_nents_for_len(req->dst, req->cryptlen); 87 else 88 rctx->dst_nents = rctx->src_nents; 89 if (rctx->src_nents < 0) { 90 dev_err(qce->dev, "Invalid numbers of src SG.\n"); 91 return rctx->src_nents; 92 } 93 if (rctx->dst_nents < 0) { 94 dev_err(qce->dev, "Invalid numbers of dst SG.\n"); 95 return -rctx->dst_nents; 96 } 97 98 rctx->dst_nents += 1; 99 100 gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 101 GFP_KERNEL : GFP_ATOMIC; 102 103 ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp); 104 if (ret) 105 return ret; 106 107 sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ); 108 109 sg = qce_sgtable_add(&rctx->dst_tbl, req->dst, req->cryptlen); 110 if (IS_ERR(sg)) { 111 ret = PTR_ERR(sg); 112 goto error_free; 113 } 114 115 sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg, 116 QCE_RESULT_BUF_SZ); 117 if (IS_ERR(sg)) { 118 ret = PTR_ERR(sg); 119 goto error_free; 120 } 121 122 sg_mark_end(sg); 123 rctx->dst_sg = rctx->dst_tbl.sgl; 124 125 ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst); 126 if (ret < 0) 127 goto error_free; 128 129 if (diff_dst) { 130 ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src); 131 if (ret < 0) 132 goto error_unmap_dst; 133 rctx->src_sg = req->src; 134 } else { 135 rctx->src_sg = rctx->dst_sg; 136 } 137 138 ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents, 139 rctx->dst_sg, rctx->dst_nents, 140 qce_skcipher_done, async_req); 141 if (ret) 142 goto error_unmap_src; 143 144 qce_dma_issue_pending(&qce->dma); 145 146 ret = qce_start(async_req, tmpl->crypto_alg_type, req->cryptlen, 0); 147 if (ret) 148 goto error_terminate; 149 150 return 0; 151 152 error_terminate: 153 qce_dma_terminate_all(&qce->dma); 154 error_unmap_src: 155 if (diff_dst) 156 dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src); 157 error_unmap_dst: 158 dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst); 159 error_free: 160 sg_free_table(&rctx->dst_tbl); 161 return ret; 162 } 163 164 static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key, 165 unsigned int keylen) 166 { 167 struct crypto_tfm *tfm = crypto_skcipher_tfm(ablk); 168 struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); 169 unsigned long flags = to_cipher_tmpl(ablk)->alg_flags; 170 int ret; 171 172 if (!key || !keylen) 173 return -EINVAL; 174 175 switch (IS_XTS(flags) ? keylen >> 1 : keylen) { 176 case AES_KEYSIZE_128: 177 case AES_KEYSIZE_256: 178 memcpy(ctx->enc_key, key, keylen); 179 break; 180 } 181 182 ret = crypto_skcipher_setkey(ctx->fallback, key, keylen); 183 if (!ret) 184 ctx->enc_keylen = keylen; 185 return ret; 186 } 187 188 static int qce_des_setkey(struct crypto_skcipher *ablk, const u8 *key, 189 unsigned int keylen) 190 { 191 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk); 192 int err; 193 194 err = verify_skcipher_des_key(ablk, key); 195 if (err) 196 return err; 197 198 ctx->enc_keylen = keylen; 199 memcpy(ctx->enc_key, key, keylen); 200 return 0; 201 } 202 203 static int qce_des3_setkey(struct crypto_skcipher *ablk, const u8 *key, 204 unsigned int keylen) 205 { 206 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk); 207 int err; 208 209 err = verify_skcipher_des3_key(ablk, key); 210 if (err) 211 return err; 212 213 ctx->enc_keylen = keylen; 214 memcpy(ctx->enc_key, key, keylen); 215 return 0; 216 } 217 218 static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt) 219 { 220 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 221 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); 222 struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req); 223 struct qce_alg_template *tmpl = to_cipher_tmpl(tfm); 224 int keylen; 225 int ret; 226 227 rctx->flags = tmpl->alg_flags; 228 rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT; 229 keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen; 230 231 /* qce is hanging when AES-XTS request len > QCE_SECTOR_SIZE and 232 * is not a multiple of it; pass such requests to the fallback 233 */ 234 if (IS_AES(rctx->flags) && 235 (((keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256) || 236 req->cryptlen <= aes_sw_max_len) || 237 (IS_XTS(rctx->flags) && req->cryptlen > QCE_SECTOR_SIZE && 238 req->cryptlen % QCE_SECTOR_SIZE))) { 239 skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); 240 skcipher_request_set_callback(&rctx->fallback_req, 241 req->base.flags, 242 req->base.complete, 243 req->base.data); 244 skcipher_request_set_crypt(&rctx->fallback_req, req->src, 245 req->dst, req->cryptlen, req->iv); 246 ret = encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) : 247 crypto_skcipher_decrypt(&rctx->fallback_req); 248 return ret; 249 } 250 251 return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base); 252 } 253 254 static int qce_skcipher_encrypt(struct skcipher_request *req) 255 { 256 return qce_skcipher_crypt(req, 1); 257 } 258 259 static int qce_skcipher_decrypt(struct skcipher_request *req) 260 { 261 return qce_skcipher_crypt(req, 0); 262 } 263 264 static int qce_skcipher_init(struct crypto_skcipher *tfm) 265 { 266 /* take the size without the fallback skcipher_request at the end */ 267 crypto_skcipher_set_reqsize(tfm, offsetof(struct qce_cipher_reqctx, 268 fallback_req)); 269 return 0; 270 } 271 272 static int qce_skcipher_init_fallback(struct crypto_skcipher *tfm) 273 { 274 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); 275 276 ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(&tfm->base), 277 0, CRYPTO_ALG_NEED_FALLBACK); 278 if (IS_ERR(ctx->fallback)) 279 return PTR_ERR(ctx->fallback); 280 281 crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx) + 282 crypto_skcipher_reqsize(ctx->fallback)); 283 return 0; 284 } 285 286 static void qce_skcipher_exit(struct crypto_skcipher *tfm) 287 { 288 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); 289 290 crypto_free_skcipher(ctx->fallback); 291 } 292 293 struct qce_skcipher_def { 294 unsigned long flags; 295 const char *name; 296 const char *drv_name; 297 unsigned int blocksize; 298 unsigned int chunksize; 299 unsigned int ivsize; 300 unsigned int min_keysize; 301 unsigned int max_keysize; 302 }; 303 304 static const struct qce_skcipher_def skcipher_def[] = { 305 { 306 .flags = QCE_ALG_AES | QCE_MODE_ECB, 307 .name = "ecb(aes)", 308 .drv_name = "ecb-aes-qce", 309 .blocksize = AES_BLOCK_SIZE, 310 .ivsize = AES_BLOCK_SIZE, 311 .min_keysize = AES_MIN_KEY_SIZE, 312 .max_keysize = AES_MAX_KEY_SIZE, 313 }, 314 { 315 .flags = QCE_ALG_AES | QCE_MODE_CBC, 316 .name = "cbc(aes)", 317 .drv_name = "cbc-aes-qce", 318 .blocksize = AES_BLOCK_SIZE, 319 .ivsize = AES_BLOCK_SIZE, 320 .min_keysize = AES_MIN_KEY_SIZE, 321 .max_keysize = AES_MAX_KEY_SIZE, 322 }, 323 { 324 .flags = QCE_ALG_AES | QCE_MODE_CTR, 325 .name = "ctr(aes)", 326 .drv_name = "ctr-aes-qce", 327 .blocksize = 1, 328 .chunksize = AES_BLOCK_SIZE, 329 .ivsize = AES_BLOCK_SIZE, 330 .min_keysize = AES_MIN_KEY_SIZE, 331 .max_keysize = AES_MAX_KEY_SIZE, 332 }, 333 { 334 .flags = QCE_ALG_AES | QCE_MODE_XTS, 335 .name = "xts(aes)", 336 .drv_name = "xts-aes-qce", 337 .blocksize = AES_BLOCK_SIZE, 338 .ivsize = AES_BLOCK_SIZE, 339 .min_keysize = AES_MIN_KEY_SIZE * 2, 340 .max_keysize = AES_MAX_KEY_SIZE * 2, 341 }, 342 { 343 .flags = QCE_ALG_DES | QCE_MODE_ECB, 344 .name = "ecb(des)", 345 .drv_name = "ecb-des-qce", 346 .blocksize = DES_BLOCK_SIZE, 347 .ivsize = 0, 348 .min_keysize = DES_KEY_SIZE, 349 .max_keysize = DES_KEY_SIZE, 350 }, 351 { 352 .flags = QCE_ALG_DES | QCE_MODE_CBC, 353 .name = "cbc(des)", 354 .drv_name = "cbc-des-qce", 355 .blocksize = DES_BLOCK_SIZE, 356 .ivsize = DES_BLOCK_SIZE, 357 .min_keysize = DES_KEY_SIZE, 358 .max_keysize = DES_KEY_SIZE, 359 }, 360 { 361 .flags = QCE_ALG_3DES | QCE_MODE_ECB, 362 .name = "ecb(des3_ede)", 363 .drv_name = "ecb-3des-qce", 364 .blocksize = DES3_EDE_BLOCK_SIZE, 365 .ivsize = 0, 366 .min_keysize = DES3_EDE_KEY_SIZE, 367 .max_keysize = DES3_EDE_KEY_SIZE, 368 }, 369 { 370 .flags = QCE_ALG_3DES | QCE_MODE_CBC, 371 .name = "cbc(des3_ede)", 372 .drv_name = "cbc-3des-qce", 373 .blocksize = DES3_EDE_BLOCK_SIZE, 374 .ivsize = DES3_EDE_BLOCK_SIZE, 375 .min_keysize = DES3_EDE_KEY_SIZE, 376 .max_keysize = DES3_EDE_KEY_SIZE, 377 }, 378 }; 379 380 static int qce_skcipher_register_one(const struct qce_skcipher_def *def, 381 struct qce_device *qce) 382 { 383 struct qce_alg_template *tmpl; 384 struct skcipher_alg *alg; 385 int ret; 386 387 tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL); 388 if (!tmpl) 389 return -ENOMEM; 390 391 alg = &tmpl->alg.skcipher; 392 393 snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); 394 snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 395 def->drv_name); 396 397 alg->base.cra_blocksize = def->blocksize; 398 alg->chunksize = def->chunksize; 399 alg->ivsize = def->ivsize; 400 alg->min_keysize = def->min_keysize; 401 alg->max_keysize = def->max_keysize; 402 alg->setkey = IS_3DES(def->flags) ? qce_des3_setkey : 403 IS_DES(def->flags) ? qce_des_setkey : 404 qce_skcipher_setkey; 405 alg->encrypt = qce_skcipher_encrypt; 406 alg->decrypt = qce_skcipher_decrypt; 407 408 alg->base.cra_priority = 300; 409 alg->base.cra_flags = CRYPTO_ALG_ASYNC | 410 CRYPTO_ALG_ALLOCATES_MEMORY | 411 CRYPTO_ALG_KERN_DRIVER_ONLY; 412 alg->base.cra_ctxsize = sizeof(struct qce_cipher_ctx); 413 alg->base.cra_alignmask = 0; 414 alg->base.cra_module = THIS_MODULE; 415 416 if (IS_AES(def->flags)) { 417 alg->base.cra_flags |= CRYPTO_ALG_NEED_FALLBACK; 418 alg->init = qce_skcipher_init_fallback; 419 alg->exit = qce_skcipher_exit; 420 } else { 421 alg->init = qce_skcipher_init; 422 } 423 424 INIT_LIST_HEAD(&tmpl->entry); 425 tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_SKCIPHER; 426 tmpl->alg_flags = def->flags; 427 tmpl->qce = qce; 428 429 ret = crypto_register_skcipher(alg); 430 if (ret) { 431 kfree(tmpl); 432 dev_err(qce->dev, "%s registration failed\n", alg->base.cra_name); 433 return ret; 434 } 435 436 list_add_tail(&tmpl->entry, &skcipher_algs); 437 dev_dbg(qce->dev, "%s is registered\n", alg->base.cra_name); 438 return 0; 439 } 440 441 static void qce_skcipher_unregister(struct qce_device *qce) 442 { 443 struct qce_alg_template *tmpl, *n; 444 445 list_for_each_entry_safe(tmpl, n, &skcipher_algs, entry) { 446 crypto_unregister_skcipher(&tmpl->alg.skcipher); 447 list_del(&tmpl->entry); 448 kfree(tmpl); 449 } 450 } 451 452 static int qce_skcipher_register(struct qce_device *qce) 453 { 454 int ret, i; 455 456 for (i = 0; i < ARRAY_SIZE(skcipher_def); i++) { 457 ret = qce_skcipher_register_one(&skcipher_def[i], qce); 458 if (ret) 459 goto err; 460 } 461 462 return 0; 463 err: 464 qce_skcipher_unregister(qce); 465 return ret; 466 } 467 468 const struct qce_algo_ops skcipher_ops = { 469 .type = CRYPTO_ALG_TYPE_SKCIPHER, 470 .register_algs = qce_skcipher_register, 471 .unregister_algs = qce_skcipher_unregister, 472 .async_req_handle = qce_skcipher_async_req_handle, 473 }; 474