1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. 4 */ 5 6 #include <linux/device.h> 7 #include <linux/interrupt.h> 8 #include <linux/types.h> 9 #include <crypto/aes.h> 10 #include <crypto/internal/des.h> 11 #include <crypto/internal/skcipher.h> 12 13 #include "cipher.h" 14 15 static LIST_HEAD(skcipher_algs); 16 17 static void qce_skcipher_done(void *data) 18 { 19 struct crypto_async_request *async_req = data; 20 struct skcipher_request *req = skcipher_request_cast(async_req); 21 struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req); 22 struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req)); 23 struct qce_device *qce = tmpl->qce; 24 enum dma_data_direction dir_src, dir_dst; 25 u32 status; 26 int error; 27 bool diff_dst; 28 29 diff_dst = (req->src != req->dst) ? true : false; 30 dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; 31 dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL; 32 33 error = qce_dma_terminate_all(&qce->dma); 34 if (error) 35 dev_dbg(qce->dev, "skcipher dma termination error (%d)\n", 36 error); 37 38 if (diff_dst) 39 dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src); 40 dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst); 41 42 sg_free_table(&rctx->dst_tbl); 43 44 error = qce_check_status(qce, &status); 45 if (error < 0) 46 dev_dbg(qce->dev, "skcipher operation error (%x)\n", status); 47 48 qce->async_req_done(tmpl->qce, error); 49 } 50 51 static int 52 qce_skcipher_async_req_handle(struct crypto_async_request *async_req) 53 { 54 struct skcipher_request *req = skcipher_request_cast(async_req); 55 struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req); 56 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 57 struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req)); 58 struct qce_device *qce = tmpl->qce; 59 enum dma_data_direction dir_src, dir_dst; 60 struct scatterlist *sg; 61 bool diff_dst; 62 gfp_t gfp; 63 int ret; 64 65 rctx->iv = req->iv; 66 rctx->ivsize = crypto_skcipher_ivsize(skcipher); 67 rctx->cryptlen = req->cryptlen; 68 69 diff_dst = (req->src != req->dst) ? true : false; 70 dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; 71 dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL; 72 73 rctx->src_nents = sg_nents_for_len(req->src, req->cryptlen); 74 if (diff_dst) 75 rctx->dst_nents = sg_nents_for_len(req->dst, req->cryptlen); 76 else 77 rctx->dst_nents = rctx->src_nents; 78 if (rctx->src_nents < 0) { 79 dev_err(qce->dev, "Invalid numbers of src SG.\n"); 80 return rctx->src_nents; 81 } 82 if (rctx->dst_nents < 0) { 83 dev_err(qce->dev, "Invalid numbers of dst SG.\n"); 84 return -rctx->dst_nents; 85 } 86 87 rctx->dst_nents += 1; 88 89 gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 90 GFP_KERNEL : GFP_ATOMIC; 91 92 ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp); 93 if (ret) 94 return ret; 95 96 sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ); 97 98 sg = qce_sgtable_add(&rctx->dst_tbl, req->dst); 99 if (IS_ERR(sg)) { 100 ret = PTR_ERR(sg); 101 goto error_free; 102 } 103 104 sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg); 105 if (IS_ERR(sg)) { 106 ret = PTR_ERR(sg); 107 goto error_free; 108 } 109 110 sg_mark_end(sg); 111 rctx->dst_sg = rctx->dst_tbl.sgl; 112 113 ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst); 114 if (ret < 0) 115 goto error_free; 116 117 if (diff_dst) { 118 ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src); 119 if (ret < 0) 120 goto error_unmap_dst; 121 rctx->src_sg = req->src; 122 } else { 123 rctx->src_sg = rctx->dst_sg; 124 } 125 126 ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents, 127 rctx->dst_sg, rctx->dst_nents, 128 qce_skcipher_done, async_req); 129 if (ret) 130 goto error_unmap_src; 131 132 qce_dma_issue_pending(&qce->dma); 133 134 ret = qce_start(async_req, tmpl->crypto_alg_type, req->cryptlen, 0); 135 if (ret) 136 goto error_terminate; 137 138 return 0; 139 140 error_terminate: 141 qce_dma_terminate_all(&qce->dma); 142 error_unmap_src: 143 if (diff_dst) 144 dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src); 145 error_unmap_dst: 146 dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst); 147 error_free: 148 sg_free_table(&rctx->dst_tbl); 149 return ret; 150 } 151 152 static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key, 153 unsigned int keylen) 154 { 155 struct crypto_tfm *tfm = crypto_skcipher_tfm(ablk); 156 struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); 157 int ret; 158 159 if (!key || !keylen) 160 return -EINVAL; 161 162 switch (keylen) { 163 case AES_KEYSIZE_128: 164 case AES_KEYSIZE_256: 165 break; 166 default: 167 goto fallback; 168 } 169 170 ctx->enc_keylen = keylen; 171 memcpy(ctx->enc_key, key, keylen); 172 return 0; 173 fallback: 174 ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen); 175 if (!ret) 176 ctx->enc_keylen = keylen; 177 return ret; 178 } 179 180 static int qce_des_setkey(struct crypto_skcipher *ablk, const u8 *key, 181 unsigned int keylen) 182 { 183 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk); 184 int err; 185 186 err = verify_skcipher_des_key(ablk, key); 187 if (err) 188 return err; 189 190 ctx->enc_keylen = keylen; 191 memcpy(ctx->enc_key, key, keylen); 192 return 0; 193 } 194 195 static int qce_des3_setkey(struct crypto_skcipher *ablk, const u8 *key, 196 unsigned int keylen) 197 { 198 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk); 199 int err; 200 201 err = verify_skcipher_des3_key(ablk, key); 202 if (err) 203 return err; 204 205 ctx->enc_keylen = keylen; 206 memcpy(ctx->enc_key, key, keylen); 207 return 0; 208 } 209 210 static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt) 211 { 212 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 213 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); 214 struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req); 215 struct qce_alg_template *tmpl = to_cipher_tmpl(tfm); 216 int ret; 217 218 rctx->flags = tmpl->alg_flags; 219 rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT; 220 221 if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 && 222 ctx->enc_keylen != AES_KEYSIZE_256) { 223 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); 224 225 skcipher_request_set_sync_tfm(subreq, ctx->fallback); 226 skcipher_request_set_callback(subreq, req->base.flags, 227 NULL, NULL); 228 skcipher_request_set_crypt(subreq, req->src, req->dst, 229 req->cryptlen, req->iv); 230 ret = encrypt ? crypto_skcipher_encrypt(subreq) : 231 crypto_skcipher_decrypt(subreq); 232 skcipher_request_zero(subreq); 233 return ret; 234 } 235 236 return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base); 237 } 238 239 static int qce_skcipher_encrypt(struct skcipher_request *req) 240 { 241 return qce_skcipher_crypt(req, 1); 242 } 243 244 static int qce_skcipher_decrypt(struct skcipher_request *req) 245 { 246 return qce_skcipher_crypt(req, 0); 247 } 248 249 static int qce_skcipher_init(struct crypto_skcipher *tfm) 250 { 251 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); 252 253 memset(ctx, 0, sizeof(*ctx)); 254 crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx)); 255 256 ctx->fallback = crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm->base), 257 0, CRYPTO_ALG_NEED_FALLBACK); 258 return PTR_ERR_OR_ZERO(ctx->fallback); 259 } 260 261 static void qce_skcipher_exit(struct crypto_skcipher *tfm) 262 { 263 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); 264 265 crypto_free_sync_skcipher(ctx->fallback); 266 } 267 268 struct qce_skcipher_def { 269 unsigned long flags; 270 const char *name; 271 const char *drv_name; 272 unsigned int blocksize; 273 unsigned int ivsize; 274 unsigned int min_keysize; 275 unsigned int max_keysize; 276 }; 277 278 static const struct qce_skcipher_def skcipher_def[] = { 279 { 280 .flags = QCE_ALG_AES | QCE_MODE_ECB, 281 .name = "ecb(aes)", 282 .drv_name = "ecb-aes-qce", 283 .blocksize = AES_BLOCK_SIZE, 284 .ivsize = AES_BLOCK_SIZE, 285 .min_keysize = AES_MIN_KEY_SIZE, 286 .max_keysize = AES_MAX_KEY_SIZE, 287 }, 288 { 289 .flags = QCE_ALG_AES | QCE_MODE_CBC, 290 .name = "cbc(aes)", 291 .drv_name = "cbc-aes-qce", 292 .blocksize = AES_BLOCK_SIZE, 293 .ivsize = AES_BLOCK_SIZE, 294 .min_keysize = AES_MIN_KEY_SIZE, 295 .max_keysize = AES_MAX_KEY_SIZE, 296 }, 297 { 298 .flags = QCE_ALG_AES | QCE_MODE_CTR, 299 .name = "ctr(aes)", 300 .drv_name = "ctr-aes-qce", 301 .blocksize = AES_BLOCK_SIZE, 302 .ivsize = AES_BLOCK_SIZE, 303 .min_keysize = AES_MIN_KEY_SIZE, 304 .max_keysize = AES_MAX_KEY_SIZE, 305 }, 306 { 307 .flags = QCE_ALG_AES | QCE_MODE_XTS, 308 .name = "xts(aes)", 309 .drv_name = "xts-aes-qce", 310 .blocksize = AES_BLOCK_SIZE, 311 .ivsize = AES_BLOCK_SIZE, 312 .min_keysize = AES_MIN_KEY_SIZE, 313 .max_keysize = AES_MAX_KEY_SIZE, 314 }, 315 { 316 .flags = QCE_ALG_DES | QCE_MODE_ECB, 317 .name = "ecb(des)", 318 .drv_name = "ecb-des-qce", 319 .blocksize = DES_BLOCK_SIZE, 320 .ivsize = 0, 321 .min_keysize = DES_KEY_SIZE, 322 .max_keysize = DES_KEY_SIZE, 323 }, 324 { 325 .flags = QCE_ALG_DES | QCE_MODE_CBC, 326 .name = "cbc(des)", 327 .drv_name = "cbc-des-qce", 328 .blocksize = DES_BLOCK_SIZE, 329 .ivsize = DES_BLOCK_SIZE, 330 .min_keysize = DES_KEY_SIZE, 331 .max_keysize = DES_KEY_SIZE, 332 }, 333 { 334 .flags = QCE_ALG_3DES | QCE_MODE_ECB, 335 .name = "ecb(des3_ede)", 336 .drv_name = "ecb-3des-qce", 337 .blocksize = DES3_EDE_BLOCK_SIZE, 338 .ivsize = 0, 339 .min_keysize = DES3_EDE_KEY_SIZE, 340 .max_keysize = DES3_EDE_KEY_SIZE, 341 }, 342 { 343 .flags = QCE_ALG_3DES | QCE_MODE_CBC, 344 .name = "cbc(des3_ede)", 345 .drv_name = "cbc-3des-qce", 346 .blocksize = DES3_EDE_BLOCK_SIZE, 347 .ivsize = DES3_EDE_BLOCK_SIZE, 348 .min_keysize = DES3_EDE_KEY_SIZE, 349 .max_keysize = DES3_EDE_KEY_SIZE, 350 }, 351 }; 352 353 static int qce_skcipher_register_one(const struct qce_skcipher_def *def, 354 struct qce_device *qce) 355 { 356 struct qce_alg_template *tmpl; 357 struct skcipher_alg *alg; 358 int ret; 359 360 tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL); 361 if (!tmpl) 362 return -ENOMEM; 363 364 alg = &tmpl->alg.skcipher; 365 366 snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); 367 snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 368 def->drv_name); 369 370 alg->base.cra_blocksize = def->blocksize; 371 alg->ivsize = def->ivsize; 372 alg->min_keysize = def->min_keysize; 373 alg->max_keysize = def->max_keysize; 374 alg->setkey = IS_3DES(def->flags) ? qce_des3_setkey : 375 IS_DES(def->flags) ? qce_des_setkey : 376 qce_skcipher_setkey; 377 alg->encrypt = qce_skcipher_encrypt; 378 alg->decrypt = qce_skcipher_decrypt; 379 380 alg->base.cra_priority = 300; 381 alg->base.cra_flags = CRYPTO_ALG_ASYNC | 382 CRYPTO_ALG_NEED_FALLBACK | 383 CRYPTO_ALG_KERN_DRIVER_ONLY; 384 alg->base.cra_ctxsize = sizeof(struct qce_cipher_ctx); 385 alg->base.cra_alignmask = 0; 386 alg->base.cra_module = THIS_MODULE; 387 388 alg->init = qce_skcipher_init; 389 alg->exit = qce_skcipher_exit; 390 391 INIT_LIST_HEAD(&tmpl->entry); 392 tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_SKCIPHER; 393 tmpl->alg_flags = def->flags; 394 tmpl->qce = qce; 395 396 ret = crypto_register_skcipher(alg); 397 if (ret) { 398 kfree(tmpl); 399 dev_err(qce->dev, "%s registration failed\n", alg->base.cra_name); 400 return ret; 401 } 402 403 list_add_tail(&tmpl->entry, &skcipher_algs); 404 dev_dbg(qce->dev, "%s is registered\n", alg->base.cra_name); 405 return 0; 406 } 407 408 static void qce_skcipher_unregister(struct qce_device *qce) 409 { 410 struct qce_alg_template *tmpl, *n; 411 412 list_for_each_entry_safe(tmpl, n, &skcipher_algs, entry) { 413 crypto_unregister_skcipher(&tmpl->alg.skcipher); 414 list_del(&tmpl->entry); 415 kfree(tmpl); 416 } 417 } 418 419 static int qce_skcipher_register(struct qce_device *qce) 420 { 421 int ret, i; 422 423 for (i = 0; i < ARRAY_SIZE(skcipher_def); i++) { 424 ret = qce_skcipher_register_one(&skcipher_def[i], qce); 425 if (ret) 426 goto err; 427 } 428 429 return 0; 430 err: 431 qce_skcipher_unregister(qce); 432 return ret; 433 } 434 435 const struct qce_algo_ops skcipher_ops = { 436 .type = CRYPTO_ALG_TYPE_SKCIPHER, 437 .register_algs = qce_skcipher_register, 438 .unregister_algs = qce_skcipher_unregister, 439 .async_req_handle = qce_skcipher_async_req_handle, 440 }; 441