1 /* 2 * Freescale FSL CAAM support for crypto API over QI backend. 3 * Based on caamalg.c 4 * 5 * Copyright 2013-2016 Freescale Semiconductor, Inc. 6 * Copyright 2016-2017 NXP 7 */ 8 9 #include "compat.h" 10 11 #include "regs.h" 12 #include "intern.h" 13 #include "desc_constr.h" 14 #include "error.h" 15 #include "sg_sw_qm.h" 16 #include "key_gen.h" 17 #include "qi.h" 18 #include "jr.h" 19 #include "caamalg_desc.h" 20 21 /* 22 * crypto alg 23 */ 24 #define CAAM_CRA_PRIORITY 2000 25 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */ 26 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \ 27 SHA512_DIGEST_SIZE * 2) 28 29 #define DESC_MAX_USED_BYTES (DESC_QI_AEAD_GIVENC_LEN + \ 30 CAAM_MAX_KEY_SIZE) 31 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) 32 33 struct caam_alg_entry { 34 int class1_alg_type; 35 int class2_alg_type; 36 bool rfc3686; 37 bool geniv; 38 }; 39 40 struct caam_aead_alg { 41 struct aead_alg aead; 42 struct caam_alg_entry caam; 43 bool registered; 44 }; 45 46 /* 47 * per-session context 48 */ 49 struct caam_ctx { 50 struct device *jrdev; 51 u32 sh_desc_enc[DESC_MAX_USED_LEN]; 52 u32 sh_desc_dec[DESC_MAX_USED_LEN]; 53 u32 sh_desc_givenc[DESC_MAX_USED_LEN]; 54 u8 key[CAAM_MAX_KEY_SIZE]; 55 dma_addr_t key_dma; 56 struct alginfo adata; 57 struct alginfo cdata; 58 unsigned int authsize; 59 struct device *qidev; 60 spinlock_t lock; /* Protects multiple init of driver context */ 61 struct caam_drv_ctx *drv_ctx[NUM_OP]; 62 }; 63 64 static int aead_set_sh_desc(struct crypto_aead *aead) 65 { 66 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), 67 typeof(*alg), aead); 68 struct caam_ctx *ctx = crypto_aead_ctx(aead); 69 unsigned int ivsize = crypto_aead_ivsize(aead); 70 u32 ctx1_iv_off = 0; 71 u32 *nonce = NULL; 72 unsigned int data_len[2]; 73 u32 inl_mask; 74 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == 75 OP_ALG_AAI_CTR_MOD128); 76 const bool is_rfc3686 = alg->caam.rfc3686; 77 78 if (!ctx->cdata.keylen || !ctx->authsize) 79 return 0; 80 81 /* 82 * AES-CTR needs to load IV in CONTEXT1 reg 83 * at an offset of 128bits (16bytes) 84 * CONTEXT1[255:128] = IV 85 */ 86 if (ctr_mode) 87 ctx1_iv_off = 16; 88 89 /* 90 * RFC3686 specific: 91 * CONTEXT1[255:128] = {NONCE, IV, COUNTER} 92 */ 93 if (is_rfc3686) { 94 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; 95 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad + 96 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE); 97 } 98 99 data_len[0] = ctx->adata.keylen_pad; 100 data_len[1] = ctx->cdata.keylen; 101 102 if (alg->caam.geniv) 103 goto skip_enc; 104 105 /* aead_encrypt shared descriptor */ 106 if (desc_inline_query(DESC_QI_AEAD_ENC_LEN + 107 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), 108 DESC_JOB_IO_LEN, data_len, &inl_mask, 109 ARRAY_SIZE(data_len)) < 0) 110 return -EINVAL; 111 112 if (inl_mask & 1) 113 ctx->adata.key_virt = ctx->key; 114 else 115 ctx->adata.key_dma = ctx->key_dma; 116 117 if (inl_mask & 2) 118 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; 119 else 120 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; 121 122 ctx->adata.key_inline = !!(inl_mask & 1); 123 ctx->cdata.key_inline = !!(inl_mask & 2); 124 125 cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata, 126 ivsize, ctx->authsize, is_rfc3686, nonce, 127 ctx1_iv_off, true); 128 129 skip_enc: 130 /* aead_decrypt shared descriptor */ 131 if (desc_inline_query(DESC_QI_AEAD_DEC_LEN + 132 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), 133 DESC_JOB_IO_LEN, data_len, &inl_mask, 134 ARRAY_SIZE(data_len)) < 0) 135 return -EINVAL; 136 137 if (inl_mask & 1) 138 ctx->adata.key_virt = ctx->key; 139 else 140 ctx->adata.key_dma = ctx->key_dma; 141 142 if (inl_mask & 2) 143 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; 144 else 145 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; 146 147 ctx->adata.key_inline = !!(inl_mask & 1); 148 ctx->cdata.key_inline = !!(inl_mask & 2); 149 150 cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata, 151 ivsize, ctx->authsize, alg->caam.geniv, 152 is_rfc3686, nonce, ctx1_iv_off, true); 153 154 if (!alg->caam.geniv) 155 goto skip_givenc; 156 157 /* aead_givencrypt shared descriptor */ 158 if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN + 159 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), 160 DESC_JOB_IO_LEN, data_len, &inl_mask, 161 ARRAY_SIZE(data_len)) < 0) 162 return -EINVAL; 163 164 if (inl_mask & 1) 165 ctx->adata.key_virt = ctx->key; 166 else 167 ctx->adata.key_dma = ctx->key_dma; 168 169 if (inl_mask & 2) 170 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; 171 else 172 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; 173 174 ctx->adata.key_inline = !!(inl_mask & 1); 175 ctx->cdata.key_inline = !!(inl_mask & 2); 176 177 cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata, 178 ivsize, ctx->authsize, is_rfc3686, nonce, 179 ctx1_iv_off, true); 180 181 skip_givenc: 182 return 0; 183 } 184 185 static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize) 186 { 187 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 188 189 ctx->authsize = authsize; 190 aead_set_sh_desc(authenc); 191 192 return 0; 193 } 194 195 static int aead_setkey(struct crypto_aead *aead, const u8 *key, 196 unsigned int keylen) 197 { 198 struct caam_ctx *ctx = crypto_aead_ctx(aead); 199 struct device *jrdev = ctx->jrdev; 200 struct crypto_authenc_keys keys; 201 int ret = 0; 202 203 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) 204 goto badkey; 205 206 #ifdef DEBUG 207 dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n", 208 keys.authkeylen + keys.enckeylen, keys.enckeylen, 209 keys.authkeylen); 210 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", 211 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 212 #endif 213 214 ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey, 215 keys.authkeylen, CAAM_MAX_KEY_SIZE - 216 keys.enckeylen); 217 if (ret) 218 goto badkey; 219 220 /* postpend encryption key to auth split key */ 221 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); 222 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad + 223 keys.enckeylen, DMA_TO_DEVICE); 224 #ifdef DEBUG 225 print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", 226 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, 227 ctx->adata.keylen_pad + keys.enckeylen, 1); 228 #endif 229 230 ctx->cdata.keylen = keys.enckeylen; 231 232 ret = aead_set_sh_desc(aead); 233 if (ret) 234 goto badkey; 235 236 /* Now update the driver contexts with the new shared descriptor */ 237 if (ctx->drv_ctx[ENCRYPT]) { 238 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 239 ctx->sh_desc_enc); 240 if (ret) { 241 dev_err(jrdev, "driver enc context update failed\n"); 242 goto badkey; 243 } 244 } 245 246 if (ctx->drv_ctx[DECRYPT]) { 247 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 248 ctx->sh_desc_dec); 249 if (ret) { 250 dev_err(jrdev, "driver dec context update failed\n"); 251 goto badkey; 252 } 253 } 254 255 return ret; 256 badkey: 257 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); 258 return -EINVAL; 259 } 260 261 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, 262 const u8 *key, unsigned int keylen) 263 { 264 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 265 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher); 266 const char *alg_name = crypto_tfm_alg_name(tfm); 267 struct device *jrdev = ctx->jrdev; 268 unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 269 u32 ctx1_iv_off = 0; 270 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == 271 OP_ALG_AAI_CTR_MOD128); 272 const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686")); 273 int ret = 0; 274 275 memcpy(ctx->key, key, keylen); 276 #ifdef DEBUG 277 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", 278 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 279 #endif 280 /* 281 * AES-CTR needs to load IV in CONTEXT1 reg 282 * at an offset of 128bits (16bytes) 283 * CONTEXT1[255:128] = IV 284 */ 285 if (ctr_mode) 286 ctx1_iv_off = 16; 287 288 /* 289 * RFC3686 specific: 290 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER} 291 * | *key = {KEY, NONCE} 292 */ 293 if (is_rfc3686) { 294 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; 295 keylen -= CTR_RFC3686_NONCE_SIZE; 296 } 297 298 dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE); 299 ctx->cdata.keylen = keylen; 300 ctx->cdata.key_virt = ctx->key; 301 ctx->cdata.key_inline = true; 302 303 /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */ 304 cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, 305 is_rfc3686, ctx1_iv_off); 306 cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, 307 is_rfc3686, ctx1_iv_off); 308 cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata, 309 ivsize, is_rfc3686, ctx1_iv_off); 310 311 /* Now update the driver contexts with the new shared descriptor */ 312 if (ctx->drv_ctx[ENCRYPT]) { 313 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 314 ctx->sh_desc_enc); 315 if (ret) { 316 dev_err(jrdev, "driver enc context update failed\n"); 317 goto badkey; 318 } 319 } 320 321 if (ctx->drv_ctx[DECRYPT]) { 322 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 323 ctx->sh_desc_dec); 324 if (ret) { 325 dev_err(jrdev, "driver dec context update failed\n"); 326 goto badkey; 327 } 328 } 329 330 if (ctx->drv_ctx[GIVENCRYPT]) { 331 ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT], 332 ctx->sh_desc_givenc); 333 if (ret) { 334 dev_err(jrdev, "driver givenc context update failed\n"); 335 goto badkey; 336 } 337 } 338 339 return ret; 340 badkey: 341 crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 342 return -EINVAL; 343 } 344 345 static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, 346 const u8 *key, unsigned int keylen) 347 { 348 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 349 struct device *jrdev = ctx->jrdev; 350 int ret = 0; 351 352 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { 353 crypto_ablkcipher_set_flags(ablkcipher, 354 CRYPTO_TFM_RES_BAD_KEY_LEN); 355 dev_err(jrdev, "key size mismatch\n"); 356 return -EINVAL; 357 } 358 359 memcpy(ctx->key, key, keylen); 360 dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE); 361 ctx->cdata.keylen = keylen; 362 ctx->cdata.key_virt = ctx->key; 363 ctx->cdata.key_inline = true; 364 365 /* xts ablkcipher encrypt, decrypt shared descriptors */ 366 cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata); 367 cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata); 368 369 /* Now update the driver contexts with the new shared descriptor */ 370 if (ctx->drv_ctx[ENCRYPT]) { 371 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 372 ctx->sh_desc_enc); 373 if (ret) { 374 dev_err(jrdev, "driver enc context update failed\n"); 375 goto badkey; 376 } 377 } 378 379 if (ctx->drv_ctx[DECRYPT]) { 380 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 381 ctx->sh_desc_dec); 382 if (ret) { 383 dev_err(jrdev, "driver dec context update failed\n"); 384 goto badkey; 385 } 386 } 387 388 return ret; 389 badkey: 390 crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 391 return 0; 392 } 393 394 /* 395 * aead_edesc - s/w-extended aead descriptor 396 * @src_nents: number of segments in input scatterlist 397 * @dst_nents: number of segments in output scatterlist 398 * @iv_dma: dma address of iv for checking continuity and link table 399 * @qm_sg_bytes: length of dma mapped h/w link table 400 * @qm_sg_dma: bus physical mapped address of h/w link table 401 * @assoclen: associated data length, in CAAM endianness 402 * @assoclen_dma: bus physical mapped address of req->assoclen 403 * @drv_req: driver-specific request structure 404 * @sgt: the h/w link table 405 */ 406 struct aead_edesc { 407 int src_nents; 408 int dst_nents; 409 dma_addr_t iv_dma; 410 int qm_sg_bytes; 411 dma_addr_t qm_sg_dma; 412 unsigned int assoclen; 413 dma_addr_t assoclen_dma; 414 struct caam_drv_req drv_req; 415 #define CAAM_QI_MAX_AEAD_SG \ 416 ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) / \ 417 sizeof(struct qm_sg_entry)) 418 struct qm_sg_entry sgt[0]; 419 }; 420 421 /* 422 * ablkcipher_edesc - s/w-extended ablkcipher descriptor 423 * @src_nents: number of segments in input scatterlist 424 * @dst_nents: number of segments in output scatterlist 425 * @iv_dma: dma address of iv for checking continuity and link table 426 * @qm_sg_bytes: length of dma mapped h/w link table 427 * @qm_sg_dma: bus physical mapped address of h/w link table 428 * @drv_req: driver-specific request structure 429 * @sgt: the h/w link table 430 */ 431 struct ablkcipher_edesc { 432 int src_nents; 433 int dst_nents; 434 dma_addr_t iv_dma; 435 int qm_sg_bytes; 436 dma_addr_t qm_sg_dma; 437 struct caam_drv_req drv_req; 438 #define CAAM_QI_MAX_ABLKCIPHER_SG \ 439 ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \ 440 sizeof(struct qm_sg_entry)) 441 struct qm_sg_entry sgt[0]; 442 }; 443 444 static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx, 445 enum optype type) 446 { 447 /* 448 * This function is called on the fast path with values of 'type' 449 * known at compile time. Invalid arguments are not expected and 450 * thus no checks are made. 451 */ 452 struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type]; 453 u32 *desc; 454 455 if (unlikely(!drv_ctx)) { 456 spin_lock(&ctx->lock); 457 458 /* Read again to check if some other core init drv_ctx */ 459 drv_ctx = ctx->drv_ctx[type]; 460 if (!drv_ctx) { 461 int cpu; 462 463 if (type == ENCRYPT) 464 desc = ctx->sh_desc_enc; 465 else if (type == DECRYPT) 466 desc = ctx->sh_desc_dec; 467 else /* (type == GIVENCRYPT) */ 468 desc = ctx->sh_desc_givenc; 469 470 cpu = smp_processor_id(); 471 drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc); 472 if (likely(!IS_ERR_OR_NULL(drv_ctx))) 473 drv_ctx->op_type = type; 474 475 ctx->drv_ctx[type] = drv_ctx; 476 } 477 478 spin_unlock(&ctx->lock); 479 } 480 481 return drv_ctx; 482 } 483 484 static void caam_unmap(struct device *dev, struct scatterlist *src, 485 struct scatterlist *dst, int src_nents, 486 int dst_nents, dma_addr_t iv_dma, int ivsize, 487 enum optype op_type, dma_addr_t qm_sg_dma, 488 int qm_sg_bytes) 489 { 490 if (dst != src) { 491 if (src_nents) 492 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); 493 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); 494 } else { 495 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); 496 } 497 498 if (iv_dma) 499 dma_unmap_single(dev, iv_dma, ivsize, 500 op_type == GIVENCRYPT ? DMA_FROM_DEVICE : 501 DMA_TO_DEVICE); 502 if (qm_sg_bytes) 503 dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE); 504 } 505 506 static void aead_unmap(struct device *dev, 507 struct aead_edesc *edesc, 508 struct aead_request *req) 509 { 510 struct crypto_aead *aead = crypto_aead_reqtfm(req); 511 int ivsize = crypto_aead_ivsize(aead); 512 513 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, 514 edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type, 515 edesc->qm_sg_dma, edesc->qm_sg_bytes); 516 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); 517 } 518 519 static void ablkcipher_unmap(struct device *dev, 520 struct ablkcipher_edesc *edesc, 521 struct ablkcipher_request *req) 522 { 523 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 524 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 525 526 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, 527 edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type, 528 edesc->qm_sg_dma, edesc->qm_sg_bytes); 529 } 530 531 static void aead_done(struct caam_drv_req *drv_req, u32 status) 532 { 533 struct device *qidev; 534 struct aead_edesc *edesc; 535 struct aead_request *aead_req = drv_req->app_ctx; 536 struct crypto_aead *aead = crypto_aead_reqtfm(aead_req); 537 struct caam_ctx *caam_ctx = crypto_aead_ctx(aead); 538 int ecode = 0; 539 540 qidev = caam_ctx->qidev; 541 542 if (unlikely(status)) { 543 caam_jr_strstatus(qidev, status); 544 ecode = -EIO; 545 } 546 547 edesc = container_of(drv_req, typeof(*edesc), drv_req); 548 aead_unmap(qidev, edesc, aead_req); 549 550 aead_request_complete(aead_req, ecode); 551 qi_cache_free(edesc); 552 } 553 554 /* 555 * allocate and map the aead extended descriptor 556 */ 557 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, 558 bool encrypt) 559 { 560 struct crypto_aead *aead = crypto_aead_reqtfm(req); 561 struct caam_ctx *ctx = crypto_aead_ctx(aead); 562 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), 563 typeof(*alg), aead); 564 struct device *qidev = ctx->qidev; 565 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 566 GFP_KERNEL : GFP_ATOMIC; 567 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; 568 struct aead_edesc *edesc; 569 dma_addr_t qm_sg_dma, iv_dma = 0; 570 int ivsize = 0; 571 unsigned int authsize = ctx->authsize; 572 int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes; 573 int in_len, out_len; 574 struct qm_sg_entry *sg_table, *fd_sgt; 575 struct caam_drv_ctx *drv_ctx; 576 enum optype op_type = encrypt ? ENCRYPT : DECRYPT; 577 578 drv_ctx = get_drv_ctx(ctx, op_type); 579 if (unlikely(IS_ERR_OR_NULL(drv_ctx))) 580 return (struct aead_edesc *)drv_ctx; 581 582 /* allocate space for base edesc and hw desc commands, link tables */ 583 edesc = qi_cache_alloc(GFP_DMA | flags); 584 if (unlikely(!edesc)) { 585 dev_err(qidev, "could not allocate extended descriptor\n"); 586 return ERR_PTR(-ENOMEM); 587 } 588 589 if (likely(req->src == req->dst)) { 590 src_nents = sg_nents_for_len(req->src, req->assoclen + 591 req->cryptlen + 592 (encrypt ? authsize : 0)); 593 if (unlikely(src_nents < 0)) { 594 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", 595 req->assoclen + req->cryptlen + 596 (encrypt ? authsize : 0)); 597 qi_cache_free(edesc); 598 return ERR_PTR(src_nents); 599 } 600 601 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, 602 DMA_BIDIRECTIONAL); 603 if (unlikely(!mapped_src_nents)) { 604 dev_err(qidev, "unable to map source\n"); 605 qi_cache_free(edesc); 606 return ERR_PTR(-ENOMEM); 607 } 608 } else { 609 src_nents = sg_nents_for_len(req->src, req->assoclen + 610 req->cryptlen); 611 if (unlikely(src_nents < 0)) { 612 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", 613 req->assoclen + req->cryptlen); 614 qi_cache_free(edesc); 615 return ERR_PTR(src_nents); 616 } 617 618 dst_nents = sg_nents_for_len(req->dst, req->assoclen + 619 req->cryptlen + 620 (encrypt ? authsize : 621 (-authsize))); 622 if (unlikely(dst_nents < 0)) { 623 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", 624 req->assoclen + req->cryptlen + 625 (encrypt ? authsize : (-authsize))); 626 qi_cache_free(edesc); 627 return ERR_PTR(dst_nents); 628 } 629 630 if (src_nents) { 631 mapped_src_nents = dma_map_sg(qidev, req->src, 632 src_nents, DMA_TO_DEVICE); 633 if (unlikely(!mapped_src_nents)) { 634 dev_err(qidev, "unable to map source\n"); 635 qi_cache_free(edesc); 636 return ERR_PTR(-ENOMEM); 637 } 638 } else { 639 mapped_src_nents = 0; 640 } 641 642 mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents, 643 DMA_FROM_DEVICE); 644 if (unlikely(!mapped_dst_nents)) { 645 dev_err(qidev, "unable to map destination\n"); 646 dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE); 647 qi_cache_free(edesc); 648 return ERR_PTR(-ENOMEM); 649 } 650 } 651 652 if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) { 653 ivsize = crypto_aead_ivsize(aead); 654 iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE); 655 if (dma_mapping_error(qidev, iv_dma)) { 656 dev_err(qidev, "unable to map IV\n"); 657 caam_unmap(qidev, req->src, req->dst, src_nents, 658 dst_nents, 0, 0, op_type, 0, 0); 659 qi_cache_free(edesc); 660 return ERR_PTR(-ENOMEM); 661 } 662 } 663 664 /* 665 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst]. 666 * Input is not contiguous. 667 */ 668 qm_sg_ents = 1 + !!ivsize + mapped_src_nents + 669 (mapped_dst_nents > 1 ? mapped_dst_nents : 0); 670 if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) { 671 dev_err(qidev, "Insufficient S/G entries: %d > %lu\n", 672 qm_sg_ents, CAAM_QI_MAX_AEAD_SG); 673 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 674 iv_dma, ivsize, op_type, 0, 0); 675 qi_cache_free(edesc); 676 return ERR_PTR(-ENOMEM); 677 } 678 sg_table = &edesc->sgt[0]; 679 qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); 680 681 edesc->src_nents = src_nents; 682 edesc->dst_nents = dst_nents; 683 edesc->iv_dma = iv_dma; 684 edesc->drv_req.app_ctx = req; 685 edesc->drv_req.cbk = aead_done; 686 edesc->drv_req.drv_ctx = drv_ctx; 687 688 edesc->assoclen = cpu_to_caam32(req->assoclen); 689 edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4, 690 DMA_TO_DEVICE); 691 if (dma_mapping_error(qidev, edesc->assoclen_dma)) { 692 dev_err(qidev, "unable to map assoclen\n"); 693 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 694 iv_dma, ivsize, op_type, 0, 0); 695 qi_cache_free(edesc); 696 return ERR_PTR(-ENOMEM); 697 } 698 699 dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0); 700 qm_sg_index++; 701 if (ivsize) { 702 dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0); 703 qm_sg_index++; 704 } 705 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0); 706 qm_sg_index += mapped_src_nents; 707 708 if (mapped_dst_nents > 1) 709 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + 710 qm_sg_index, 0); 711 712 qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); 713 if (dma_mapping_error(qidev, qm_sg_dma)) { 714 dev_err(qidev, "unable to map S/G table\n"); 715 dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); 716 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 717 iv_dma, ivsize, op_type, 0, 0); 718 qi_cache_free(edesc); 719 return ERR_PTR(-ENOMEM); 720 } 721 722 edesc->qm_sg_dma = qm_sg_dma; 723 edesc->qm_sg_bytes = qm_sg_bytes; 724 725 out_len = req->assoclen + req->cryptlen + 726 (encrypt ? ctx->authsize : (-ctx->authsize)); 727 in_len = 4 + ivsize + req->assoclen + req->cryptlen; 728 729 fd_sgt = &edesc->drv_req.fd_sgt[0]; 730 dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0); 731 732 if (req->dst == req->src) { 733 if (mapped_src_nents == 1) 734 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src), 735 out_len, 0); 736 else 737 dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + 738 (1 + !!ivsize) * sizeof(*sg_table), 739 out_len, 0); 740 } else if (mapped_dst_nents == 1) { 741 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len, 742 0); 743 } else { 744 dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) * 745 qm_sg_index, out_len, 0); 746 } 747 748 return edesc; 749 } 750 751 static inline int aead_crypt(struct aead_request *req, bool encrypt) 752 { 753 struct aead_edesc *edesc; 754 struct crypto_aead *aead = crypto_aead_reqtfm(req); 755 struct caam_ctx *ctx = crypto_aead_ctx(aead); 756 int ret; 757 758 if (unlikely(caam_congested)) 759 return -EAGAIN; 760 761 /* allocate extended descriptor */ 762 edesc = aead_edesc_alloc(req, encrypt); 763 if (IS_ERR_OR_NULL(edesc)) 764 return PTR_ERR(edesc); 765 766 /* Create and submit job descriptor */ 767 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); 768 if (!ret) { 769 ret = -EINPROGRESS; 770 } else { 771 aead_unmap(ctx->qidev, edesc, req); 772 qi_cache_free(edesc); 773 } 774 775 return ret; 776 } 777 778 static int aead_encrypt(struct aead_request *req) 779 { 780 return aead_crypt(req, true); 781 } 782 783 static int aead_decrypt(struct aead_request *req) 784 { 785 return aead_crypt(req, false); 786 } 787 788 static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status) 789 { 790 struct ablkcipher_edesc *edesc; 791 struct ablkcipher_request *req = drv_req->app_ctx; 792 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 793 struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher); 794 struct device *qidev = caam_ctx->qidev; 795 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 796 797 #ifdef DEBUG 798 dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status); 799 #endif 800 801 edesc = container_of(drv_req, typeof(*edesc), drv_req); 802 803 if (status) 804 caam_jr_strstatus(qidev, status); 805 806 #ifdef DEBUG 807 print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ", 808 DUMP_PREFIX_ADDRESS, 16, 4, req->info, 809 edesc->src_nents > 1 ? 100 : ivsize, 1); 810 caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ", 811 DUMP_PREFIX_ADDRESS, 16, 4, req->dst, 812 edesc->dst_nents > 1 ? 100 : req->nbytes, 1); 813 #endif 814 815 ablkcipher_unmap(qidev, edesc, req); 816 qi_cache_free(edesc); 817 818 /* 819 * The crypto API expects us to set the IV (req->info) to the last 820 * ciphertext block. This is used e.g. by the CTS mode. 821 */ 822 scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize, 823 ivsize, 0); 824 825 ablkcipher_request_complete(req, status); 826 } 827 828 static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request 829 *req, bool encrypt) 830 { 831 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 832 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 833 struct device *qidev = ctx->qidev; 834 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 835 GFP_KERNEL : GFP_ATOMIC; 836 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; 837 struct ablkcipher_edesc *edesc; 838 dma_addr_t iv_dma; 839 bool in_contig; 840 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 841 int dst_sg_idx, qm_sg_ents; 842 struct qm_sg_entry *sg_table, *fd_sgt; 843 struct caam_drv_ctx *drv_ctx; 844 enum optype op_type = encrypt ? ENCRYPT : DECRYPT; 845 846 drv_ctx = get_drv_ctx(ctx, op_type); 847 if (unlikely(IS_ERR_OR_NULL(drv_ctx))) 848 return (struct ablkcipher_edesc *)drv_ctx; 849 850 src_nents = sg_nents_for_len(req->src, req->nbytes); 851 if (unlikely(src_nents < 0)) { 852 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", 853 req->nbytes); 854 return ERR_PTR(src_nents); 855 } 856 857 if (unlikely(req->src != req->dst)) { 858 dst_nents = sg_nents_for_len(req->dst, req->nbytes); 859 if (unlikely(dst_nents < 0)) { 860 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", 861 req->nbytes); 862 return ERR_PTR(dst_nents); 863 } 864 865 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, 866 DMA_TO_DEVICE); 867 if (unlikely(!mapped_src_nents)) { 868 dev_err(qidev, "unable to map source\n"); 869 return ERR_PTR(-ENOMEM); 870 } 871 872 mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents, 873 DMA_FROM_DEVICE); 874 if (unlikely(!mapped_dst_nents)) { 875 dev_err(qidev, "unable to map destination\n"); 876 dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE); 877 return ERR_PTR(-ENOMEM); 878 } 879 } else { 880 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, 881 DMA_BIDIRECTIONAL); 882 if (unlikely(!mapped_src_nents)) { 883 dev_err(qidev, "unable to map source\n"); 884 return ERR_PTR(-ENOMEM); 885 } 886 } 887 888 iv_dma = dma_map_single(qidev, req->info, ivsize, DMA_TO_DEVICE); 889 if (dma_mapping_error(qidev, iv_dma)) { 890 dev_err(qidev, "unable to map IV\n"); 891 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 892 0, 0, 0, 0); 893 return ERR_PTR(-ENOMEM); 894 } 895 896 if (mapped_src_nents == 1 && 897 iv_dma + ivsize == sg_dma_address(req->src)) { 898 in_contig = true; 899 qm_sg_ents = 0; 900 } else { 901 in_contig = false; 902 qm_sg_ents = 1 + mapped_src_nents; 903 } 904 dst_sg_idx = qm_sg_ents; 905 906 qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0; 907 if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) { 908 dev_err(qidev, "Insufficient S/G entries: %d > %lu\n", 909 qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG); 910 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 911 iv_dma, ivsize, op_type, 0, 0); 912 return ERR_PTR(-ENOMEM); 913 } 914 915 /* allocate space for base edesc and link tables */ 916 edesc = qi_cache_alloc(GFP_DMA | flags); 917 if (unlikely(!edesc)) { 918 dev_err(qidev, "could not allocate extended descriptor\n"); 919 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 920 iv_dma, ivsize, op_type, 0, 0); 921 return ERR_PTR(-ENOMEM); 922 } 923 924 edesc->src_nents = src_nents; 925 edesc->dst_nents = dst_nents; 926 edesc->iv_dma = iv_dma; 927 sg_table = &edesc->sgt[0]; 928 edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); 929 edesc->drv_req.app_ctx = req; 930 edesc->drv_req.cbk = ablkcipher_done; 931 edesc->drv_req.drv_ctx = drv_ctx; 932 933 if (!in_contig) { 934 dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); 935 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0); 936 } 937 938 if (mapped_dst_nents > 1) 939 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + 940 dst_sg_idx, 0); 941 942 edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes, 943 DMA_TO_DEVICE); 944 if (dma_mapping_error(qidev, edesc->qm_sg_dma)) { 945 dev_err(qidev, "unable to map S/G table\n"); 946 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 947 iv_dma, ivsize, op_type, 0, 0); 948 qi_cache_free(edesc); 949 return ERR_PTR(-ENOMEM); 950 } 951 952 fd_sgt = &edesc->drv_req.fd_sgt[0]; 953 954 if (!in_contig) 955 dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma, 956 ivsize + req->nbytes, 0); 957 else 958 dma_to_qm_sg_one_last(&fd_sgt[1], iv_dma, ivsize + req->nbytes, 959 0); 960 961 if (req->src == req->dst) { 962 if (!in_contig) 963 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + 964 sizeof(*sg_table), req->nbytes, 0); 965 else 966 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src), 967 req->nbytes, 0); 968 } else if (mapped_dst_nents > 1) { 969 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx * 970 sizeof(*sg_table), req->nbytes, 0); 971 } else { 972 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), 973 req->nbytes, 0); 974 } 975 976 return edesc; 977 } 978 979 static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( 980 struct skcipher_givcrypt_request *creq) 981 { 982 struct ablkcipher_request *req = &creq->creq; 983 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 984 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 985 struct device *qidev = ctx->qidev; 986 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 987 GFP_KERNEL : GFP_ATOMIC; 988 int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents; 989 struct ablkcipher_edesc *edesc; 990 dma_addr_t iv_dma; 991 bool out_contig; 992 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 993 struct qm_sg_entry *sg_table, *fd_sgt; 994 int dst_sg_idx, qm_sg_ents; 995 struct caam_drv_ctx *drv_ctx; 996 997 drv_ctx = get_drv_ctx(ctx, GIVENCRYPT); 998 if (unlikely(IS_ERR_OR_NULL(drv_ctx))) 999 return (struct ablkcipher_edesc *)drv_ctx; 1000 1001 src_nents = sg_nents_for_len(req->src, req->nbytes); 1002 if (unlikely(src_nents < 0)) { 1003 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", 1004 req->nbytes); 1005 return ERR_PTR(src_nents); 1006 } 1007 1008 if (unlikely(req->src != req->dst)) { 1009 dst_nents = sg_nents_for_len(req->dst, req->nbytes); 1010 if (unlikely(dst_nents < 0)) { 1011 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", 1012 req->nbytes); 1013 return ERR_PTR(dst_nents); 1014 } 1015 1016 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, 1017 DMA_TO_DEVICE); 1018 if (unlikely(!mapped_src_nents)) { 1019 dev_err(qidev, "unable to map source\n"); 1020 return ERR_PTR(-ENOMEM); 1021 } 1022 1023 mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents, 1024 DMA_FROM_DEVICE); 1025 if (unlikely(!mapped_dst_nents)) { 1026 dev_err(qidev, "unable to map destination\n"); 1027 dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE); 1028 return ERR_PTR(-ENOMEM); 1029 } 1030 } else { 1031 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, 1032 DMA_BIDIRECTIONAL); 1033 if (unlikely(!mapped_src_nents)) { 1034 dev_err(qidev, "unable to map source\n"); 1035 return ERR_PTR(-ENOMEM); 1036 } 1037 1038 dst_nents = src_nents; 1039 mapped_dst_nents = src_nents; 1040 } 1041 1042 iv_dma = dma_map_single(qidev, creq->giv, ivsize, DMA_FROM_DEVICE); 1043 if (dma_mapping_error(qidev, iv_dma)) { 1044 dev_err(qidev, "unable to map IV\n"); 1045 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 1046 0, 0, 0, 0); 1047 return ERR_PTR(-ENOMEM); 1048 } 1049 1050 qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0; 1051 dst_sg_idx = qm_sg_ents; 1052 if (mapped_dst_nents == 1 && 1053 iv_dma + ivsize == sg_dma_address(req->dst)) { 1054 out_contig = true; 1055 } else { 1056 out_contig = false; 1057 qm_sg_ents += 1 + mapped_dst_nents; 1058 } 1059 1060 if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) { 1061 dev_err(qidev, "Insufficient S/G entries: %d > %lu\n", 1062 qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG); 1063 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 1064 iv_dma, ivsize, GIVENCRYPT, 0, 0); 1065 return ERR_PTR(-ENOMEM); 1066 } 1067 1068 /* allocate space for base edesc and link tables */ 1069 edesc = qi_cache_alloc(GFP_DMA | flags); 1070 if (!edesc) { 1071 dev_err(qidev, "could not allocate extended descriptor\n"); 1072 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 1073 iv_dma, ivsize, GIVENCRYPT, 0, 0); 1074 return ERR_PTR(-ENOMEM); 1075 } 1076 1077 edesc->src_nents = src_nents; 1078 edesc->dst_nents = dst_nents; 1079 edesc->iv_dma = iv_dma; 1080 sg_table = &edesc->sgt[0]; 1081 edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); 1082 edesc->drv_req.app_ctx = req; 1083 edesc->drv_req.cbk = ablkcipher_done; 1084 edesc->drv_req.drv_ctx = drv_ctx; 1085 1086 if (mapped_src_nents > 1) 1087 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0); 1088 1089 if (!out_contig) { 1090 dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0); 1091 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + 1092 dst_sg_idx + 1, 0); 1093 } 1094 1095 edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes, 1096 DMA_TO_DEVICE); 1097 if (dma_mapping_error(qidev, edesc->qm_sg_dma)) { 1098 dev_err(qidev, "unable to map S/G table\n"); 1099 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 1100 iv_dma, ivsize, GIVENCRYPT, 0, 0); 1101 qi_cache_free(edesc); 1102 return ERR_PTR(-ENOMEM); 1103 } 1104 1105 fd_sgt = &edesc->drv_req.fd_sgt[0]; 1106 1107 if (mapped_src_nents > 1) 1108 dma_to_qm_sg_one_ext(&fd_sgt[1], edesc->qm_sg_dma, req->nbytes, 1109 0); 1110 else 1111 dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src), 1112 req->nbytes, 0); 1113 1114 if (!out_contig) 1115 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx * 1116 sizeof(*sg_table), ivsize + req->nbytes, 1117 0); 1118 else 1119 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), 1120 ivsize + req->nbytes, 0); 1121 1122 return edesc; 1123 } 1124 1125 static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt) 1126 { 1127 struct ablkcipher_edesc *edesc; 1128 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1129 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 1130 int ret; 1131 1132 if (unlikely(caam_congested)) 1133 return -EAGAIN; 1134 1135 /* allocate extended descriptor */ 1136 edesc = ablkcipher_edesc_alloc(req, encrypt); 1137 if (IS_ERR(edesc)) 1138 return PTR_ERR(edesc); 1139 1140 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); 1141 if (!ret) { 1142 ret = -EINPROGRESS; 1143 } else { 1144 ablkcipher_unmap(ctx->qidev, edesc, req); 1145 qi_cache_free(edesc); 1146 } 1147 1148 return ret; 1149 } 1150 1151 static int ablkcipher_encrypt(struct ablkcipher_request *req) 1152 { 1153 return ablkcipher_crypt(req, true); 1154 } 1155 1156 static int ablkcipher_decrypt(struct ablkcipher_request *req) 1157 { 1158 return ablkcipher_crypt(req, false); 1159 } 1160 1161 static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq) 1162 { 1163 struct ablkcipher_request *req = &creq->creq; 1164 struct ablkcipher_edesc *edesc; 1165 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1166 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 1167 int ret; 1168 1169 if (unlikely(caam_congested)) 1170 return -EAGAIN; 1171 1172 /* allocate extended descriptor */ 1173 edesc = ablkcipher_giv_edesc_alloc(creq); 1174 if (IS_ERR(edesc)) 1175 return PTR_ERR(edesc); 1176 1177 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); 1178 if (!ret) { 1179 ret = -EINPROGRESS; 1180 } else { 1181 ablkcipher_unmap(ctx->qidev, edesc, req); 1182 qi_cache_free(edesc); 1183 } 1184 1185 return ret; 1186 } 1187 1188 #define template_ablkcipher template_u.ablkcipher 1189 struct caam_alg_template { 1190 char name[CRYPTO_MAX_ALG_NAME]; 1191 char driver_name[CRYPTO_MAX_ALG_NAME]; 1192 unsigned int blocksize; 1193 u32 type; 1194 union { 1195 struct ablkcipher_alg ablkcipher; 1196 } template_u; 1197 u32 class1_alg_type; 1198 u32 class2_alg_type; 1199 }; 1200 1201 static struct caam_alg_template driver_algs[] = { 1202 /* ablkcipher descriptor */ 1203 { 1204 .name = "cbc(aes)", 1205 .driver_name = "cbc-aes-caam-qi", 1206 .blocksize = AES_BLOCK_SIZE, 1207 .type = CRYPTO_ALG_TYPE_GIVCIPHER, 1208 .template_ablkcipher = { 1209 .setkey = ablkcipher_setkey, 1210 .encrypt = ablkcipher_encrypt, 1211 .decrypt = ablkcipher_decrypt, 1212 .givencrypt = ablkcipher_givencrypt, 1213 .geniv = "<built-in>", 1214 .min_keysize = AES_MIN_KEY_SIZE, 1215 .max_keysize = AES_MAX_KEY_SIZE, 1216 .ivsize = AES_BLOCK_SIZE, 1217 }, 1218 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1219 }, 1220 { 1221 .name = "cbc(des3_ede)", 1222 .driver_name = "cbc-3des-caam-qi", 1223 .blocksize = DES3_EDE_BLOCK_SIZE, 1224 .type = CRYPTO_ALG_TYPE_GIVCIPHER, 1225 .template_ablkcipher = { 1226 .setkey = ablkcipher_setkey, 1227 .encrypt = ablkcipher_encrypt, 1228 .decrypt = ablkcipher_decrypt, 1229 .givencrypt = ablkcipher_givencrypt, 1230 .geniv = "<built-in>", 1231 .min_keysize = DES3_EDE_KEY_SIZE, 1232 .max_keysize = DES3_EDE_KEY_SIZE, 1233 .ivsize = DES3_EDE_BLOCK_SIZE, 1234 }, 1235 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1236 }, 1237 { 1238 .name = "cbc(des)", 1239 .driver_name = "cbc-des-caam-qi", 1240 .blocksize = DES_BLOCK_SIZE, 1241 .type = CRYPTO_ALG_TYPE_GIVCIPHER, 1242 .template_ablkcipher = { 1243 .setkey = ablkcipher_setkey, 1244 .encrypt = ablkcipher_encrypt, 1245 .decrypt = ablkcipher_decrypt, 1246 .givencrypt = ablkcipher_givencrypt, 1247 .geniv = "<built-in>", 1248 .min_keysize = DES_KEY_SIZE, 1249 .max_keysize = DES_KEY_SIZE, 1250 .ivsize = DES_BLOCK_SIZE, 1251 }, 1252 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 1253 }, 1254 { 1255 .name = "ctr(aes)", 1256 .driver_name = "ctr-aes-caam-qi", 1257 .blocksize = 1, 1258 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 1259 .template_ablkcipher = { 1260 .setkey = ablkcipher_setkey, 1261 .encrypt = ablkcipher_encrypt, 1262 .decrypt = ablkcipher_decrypt, 1263 .geniv = "chainiv", 1264 .min_keysize = AES_MIN_KEY_SIZE, 1265 .max_keysize = AES_MAX_KEY_SIZE, 1266 .ivsize = AES_BLOCK_SIZE, 1267 }, 1268 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, 1269 }, 1270 { 1271 .name = "rfc3686(ctr(aes))", 1272 .driver_name = "rfc3686-ctr-aes-caam-qi", 1273 .blocksize = 1, 1274 .type = CRYPTO_ALG_TYPE_GIVCIPHER, 1275 .template_ablkcipher = { 1276 .setkey = ablkcipher_setkey, 1277 .encrypt = ablkcipher_encrypt, 1278 .decrypt = ablkcipher_decrypt, 1279 .givencrypt = ablkcipher_givencrypt, 1280 .geniv = "<built-in>", 1281 .min_keysize = AES_MIN_KEY_SIZE + 1282 CTR_RFC3686_NONCE_SIZE, 1283 .max_keysize = AES_MAX_KEY_SIZE + 1284 CTR_RFC3686_NONCE_SIZE, 1285 .ivsize = CTR_RFC3686_IV_SIZE, 1286 }, 1287 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, 1288 }, 1289 { 1290 .name = "xts(aes)", 1291 .driver_name = "xts-aes-caam-qi", 1292 .blocksize = AES_BLOCK_SIZE, 1293 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 1294 .template_ablkcipher = { 1295 .setkey = xts_ablkcipher_setkey, 1296 .encrypt = ablkcipher_encrypt, 1297 .decrypt = ablkcipher_decrypt, 1298 .geniv = "eseqiv", 1299 .min_keysize = 2 * AES_MIN_KEY_SIZE, 1300 .max_keysize = 2 * AES_MAX_KEY_SIZE, 1301 .ivsize = AES_BLOCK_SIZE, 1302 }, 1303 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS, 1304 }, 1305 }; 1306 1307 static struct caam_aead_alg driver_aeads[] = { 1308 /* single-pass ipsec_esp descriptor */ 1309 { 1310 .aead = { 1311 .base = { 1312 .cra_name = "authenc(hmac(md5),cbc(aes))", 1313 .cra_driver_name = "authenc-hmac-md5-" 1314 "cbc-aes-caam-qi", 1315 .cra_blocksize = AES_BLOCK_SIZE, 1316 }, 1317 .setkey = aead_setkey, 1318 .setauthsize = aead_setauthsize, 1319 .encrypt = aead_encrypt, 1320 .decrypt = aead_decrypt, 1321 .ivsize = AES_BLOCK_SIZE, 1322 .maxauthsize = MD5_DIGEST_SIZE, 1323 }, 1324 .caam = { 1325 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1326 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 1327 OP_ALG_AAI_HMAC_PRECOMP, 1328 } 1329 }, 1330 { 1331 .aead = { 1332 .base = { 1333 .cra_name = "echainiv(authenc(hmac(md5)," 1334 "cbc(aes)))", 1335 .cra_driver_name = "echainiv-authenc-hmac-md5-" 1336 "cbc-aes-caam-qi", 1337 .cra_blocksize = AES_BLOCK_SIZE, 1338 }, 1339 .setkey = aead_setkey, 1340 .setauthsize = aead_setauthsize, 1341 .encrypt = aead_encrypt, 1342 .decrypt = aead_decrypt, 1343 .ivsize = AES_BLOCK_SIZE, 1344 .maxauthsize = MD5_DIGEST_SIZE, 1345 }, 1346 .caam = { 1347 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1348 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 1349 OP_ALG_AAI_HMAC_PRECOMP, 1350 .geniv = true, 1351 } 1352 }, 1353 { 1354 .aead = { 1355 .base = { 1356 .cra_name = "authenc(hmac(sha1),cbc(aes))", 1357 .cra_driver_name = "authenc-hmac-sha1-" 1358 "cbc-aes-caam-qi", 1359 .cra_blocksize = AES_BLOCK_SIZE, 1360 }, 1361 .setkey = aead_setkey, 1362 .setauthsize = aead_setauthsize, 1363 .encrypt = aead_encrypt, 1364 .decrypt = aead_decrypt, 1365 .ivsize = AES_BLOCK_SIZE, 1366 .maxauthsize = SHA1_DIGEST_SIZE, 1367 }, 1368 .caam = { 1369 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1370 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 1371 OP_ALG_AAI_HMAC_PRECOMP, 1372 } 1373 }, 1374 { 1375 .aead = { 1376 .base = { 1377 .cra_name = "echainiv(authenc(hmac(sha1)," 1378 "cbc(aes)))", 1379 .cra_driver_name = "echainiv-authenc-" 1380 "hmac-sha1-cbc-aes-caam-qi", 1381 .cra_blocksize = AES_BLOCK_SIZE, 1382 }, 1383 .setkey = aead_setkey, 1384 .setauthsize = aead_setauthsize, 1385 .encrypt = aead_encrypt, 1386 .decrypt = aead_decrypt, 1387 .ivsize = AES_BLOCK_SIZE, 1388 .maxauthsize = SHA1_DIGEST_SIZE, 1389 }, 1390 .caam = { 1391 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1392 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 1393 OP_ALG_AAI_HMAC_PRECOMP, 1394 .geniv = true, 1395 }, 1396 }, 1397 { 1398 .aead = { 1399 .base = { 1400 .cra_name = "authenc(hmac(sha224),cbc(aes))", 1401 .cra_driver_name = "authenc-hmac-sha224-" 1402 "cbc-aes-caam-qi", 1403 .cra_blocksize = AES_BLOCK_SIZE, 1404 }, 1405 .setkey = aead_setkey, 1406 .setauthsize = aead_setauthsize, 1407 .encrypt = aead_encrypt, 1408 .decrypt = aead_decrypt, 1409 .ivsize = AES_BLOCK_SIZE, 1410 .maxauthsize = SHA224_DIGEST_SIZE, 1411 }, 1412 .caam = { 1413 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1414 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 1415 OP_ALG_AAI_HMAC_PRECOMP, 1416 } 1417 }, 1418 { 1419 .aead = { 1420 .base = { 1421 .cra_name = "echainiv(authenc(hmac(sha224)," 1422 "cbc(aes)))", 1423 .cra_driver_name = "echainiv-authenc-" 1424 "hmac-sha224-cbc-aes-caam-qi", 1425 .cra_blocksize = AES_BLOCK_SIZE, 1426 }, 1427 .setkey = aead_setkey, 1428 .setauthsize = aead_setauthsize, 1429 .encrypt = aead_encrypt, 1430 .decrypt = aead_decrypt, 1431 .ivsize = AES_BLOCK_SIZE, 1432 .maxauthsize = SHA224_DIGEST_SIZE, 1433 }, 1434 .caam = { 1435 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1436 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 1437 OP_ALG_AAI_HMAC_PRECOMP, 1438 .geniv = true, 1439 } 1440 }, 1441 { 1442 .aead = { 1443 .base = { 1444 .cra_name = "authenc(hmac(sha256),cbc(aes))", 1445 .cra_driver_name = "authenc-hmac-sha256-" 1446 "cbc-aes-caam-qi", 1447 .cra_blocksize = AES_BLOCK_SIZE, 1448 }, 1449 .setkey = aead_setkey, 1450 .setauthsize = aead_setauthsize, 1451 .encrypt = aead_encrypt, 1452 .decrypt = aead_decrypt, 1453 .ivsize = AES_BLOCK_SIZE, 1454 .maxauthsize = SHA256_DIGEST_SIZE, 1455 }, 1456 .caam = { 1457 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1458 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 1459 OP_ALG_AAI_HMAC_PRECOMP, 1460 } 1461 }, 1462 { 1463 .aead = { 1464 .base = { 1465 .cra_name = "echainiv(authenc(hmac(sha256)," 1466 "cbc(aes)))", 1467 .cra_driver_name = "echainiv-authenc-" 1468 "hmac-sha256-cbc-aes-" 1469 "caam-qi", 1470 .cra_blocksize = AES_BLOCK_SIZE, 1471 }, 1472 .setkey = aead_setkey, 1473 .setauthsize = aead_setauthsize, 1474 .encrypt = aead_encrypt, 1475 .decrypt = aead_decrypt, 1476 .ivsize = AES_BLOCK_SIZE, 1477 .maxauthsize = SHA256_DIGEST_SIZE, 1478 }, 1479 .caam = { 1480 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1481 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 1482 OP_ALG_AAI_HMAC_PRECOMP, 1483 .geniv = true, 1484 } 1485 }, 1486 { 1487 .aead = { 1488 .base = { 1489 .cra_name = "authenc(hmac(sha384),cbc(aes))", 1490 .cra_driver_name = "authenc-hmac-sha384-" 1491 "cbc-aes-caam-qi", 1492 .cra_blocksize = AES_BLOCK_SIZE, 1493 }, 1494 .setkey = aead_setkey, 1495 .setauthsize = aead_setauthsize, 1496 .encrypt = aead_encrypt, 1497 .decrypt = aead_decrypt, 1498 .ivsize = AES_BLOCK_SIZE, 1499 .maxauthsize = SHA384_DIGEST_SIZE, 1500 }, 1501 .caam = { 1502 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1503 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 1504 OP_ALG_AAI_HMAC_PRECOMP, 1505 } 1506 }, 1507 { 1508 .aead = { 1509 .base = { 1510 .cra_name = "echainiv(authenc(hmac(sha384)," 1511 "cbc(aes)))", 1512 .cra_driver_name = "echainiv-authenc-" 1513 "hmac-sha384-cbc-aes-" 1514 "caam-qi", 1515 .cra_blocksize = AES_BLOCK_SIZE, 1516 }, 1517 .setkey = aead_setkey, 1518 .setauthsize = aead_setauthsize, 1519 .encrypt = aead_encrypt, 1520 .decrypt = aead_decrypt, 1521 .ivsize = AES_BLOCK_SIZE, 1522 .maxauthsize = SHA384_DIGEST_SIZE, 1523 }, 1524 .caam = { 1525 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1526 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 1527 OP_ALG_AAI_HMAC_PRECOMP, 1528 .geniv = true, 1529 } 1530 }, 1531 { 1532 .aead = { 1533 .base = { 1534 .cra_name = "authenc(hmac(sha512),cbc(aes))", 1535 .cra_driver_name = "authenc-hmac-sha512-" 1536 "cbc-aes-caam-qi", 1537 .cra_blocksize = AES_BLOCK_SIZE, 1538 }, 1539 .setkey = aead_setkey, 1540 .setauthsize = aead_setauthsize, 1541 .encrypt = aead_encrypt, 1542 .decrypt = aead_decrypt, 1543 .ivsize = AES_BLOCK_SIZE, 1544 .maxauthsize = SHA512_DIGEST_SIZE, 1545 }, 1546 .caam = { 1547 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1548 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 1549 OP_ALG_AAI_HMAC_PRECOMP, 1550 } 1551 }, 1552 { 1553 .aead = { 1554 .base = { 1555 .cra_name = "echainiv(authenc(hmac(sha512)," 1556 "cbc(aes)))", 1557 .cra_driver_name = "echainiv-authenc-" 1558 "hmac-sha512-cbc-aes-" 1559 "caam-qi", 1560 .cra_blocksize = AES_BLOCK_SIZE, 1561 }, 1562 .setkey = aead_setkey, 1563 .setauthsize = aead_setauthsize, 1564 .encrypt = aead_encrypt, 1565 .decrypt = aead_decrypt, 1566 .ivsize = AES_BLOCK_SIZE, 1567 .maxauthsize = SHA512_DIGEST_SIZE, 1568 }, 1569 .caam = { 1570 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1571 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 1572 OP_ALG_AAI_HMAC_PRECOMP, 1573 .geniv = true, 1574 } 1575 }, 1576 { 1577 .aead = { 1578 .base = { 1579 .cra_name = "authenc(hmac(md5),cbc(des3_ede))", 1580 .cra_driver_name = "authenc-hmac-md5-" 1581 "cbc-des3_ede-caam-qi", 1582 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1583 }, 1584 .setkey = aead_setkey, 1585 .setauthsize = aead_setauthsize, 1586 .encrypt = aead_encrypt, 1587 .decrypt = aead_decrypt, 1588 .ivsize = DES3_EDE_BLOCK_SIZE, 1589 .maxauthsize = MD5_DIGEST_SIZE, 1590 }, 1591 .caam = { 1592 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1593 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 1594 OP_ALG_AAI_HMAC_PRECOMP, 1595 } 1596 }, 1597 { 1598 .aead = { 1599 .base = { 1600 .cra_name = "echainiv(authenc(hmac(md5)," 1601 "cbc(des3_ede)))", 1602 .cra_driver_name = "echainiv-authenc-hmac-md5-" 1603 "cbc-des3_ede-caam-qi", 1604 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1605 }, 1606 .setkey = aead_setkey, 1607 .setauthsize = aead_setauthsize, 1608 .encrypt = aead_encrypt, 1609 .decrypt = aead_decrypt, 1610 .ivsize = DES3_EDE_BLOCK_SIZE, 1611 .maxauthsize = MD5_DIGEST_SIZE, 1612 }, 1613 .caam = { 1614 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1615 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 1616 OP_ALG_AAI_HMAC_PRECOMP, 1617 .geniv = true, 1618 } 1619 }, 1620 { 1621 .aead = { 1622 .base = { 1623 .cra_name = "authenc(hmac(sha1)," 1624 "cbc(des3_ede))", 1625 .cra_driver_name = "authenc-hmac-sha1-" 1626 "cbc-des3_ede-caam-qi", 1627 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1628 }, 1629 .setkey = aead_setkey, 1630 .setauthsize = aead_setauthsize, 1631 .encrypt = aead_encrypt, 1632 .decrypt = aead_decrypt, 1633 .ivsize = DES3_EDE_BLOCK_SIZE, 1634 .maxauthsize = SHA1_DIGEST_SIZE, 1635 }, 1636 .caam = { 1637 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1638 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 1639 OP_ALG_AAI_HMAC_PRECOMP, 1640 }, 1641 }, 1642 { 1643 .aead = { 1644 .base = { 1645 .cra_name = "echainiv(authenc(hmac(sha1)," 1646 "cbc(des3_ede)))", 1647 .cra_driver_name = "echainiv-authenc-" 1648 "hmac-sha1-" 1649 "cbc-des3_ede-caam-qi", 1650 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1651 }, 1652 .setkey = aead_setkey, 1653 .setauthsize = aead_setauthsize, 1654 .encrypt = aead_encrypt, 1655 .decrypt = aead_decrypt, 1656 .ivsize = DES3_EDE_BLOCK_SIZE, 1657 .maxauthsize = SHA1_DIGEST_SIZE, 1658 }, 1659 .caam = { 1660 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1661 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 1662 OP_ALG_AAI_HMAC_PRECOMP, 1663 .geniv = true, 1664 } 1665 }, 1666 { 1667 .aead = { 1668 .base = { 1669 .cra_name = "authenc(hmac(sha224)," 1670 "cbc(des3_ede))", 1671 .cra_driver_name = "authenc-hmac-sha224-" 1672 "cbc-des3_ede-caam-qi", 1673 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1674 }, 1675 .setkey = aead_setkey, 1676 .setauthsize = aead_setauthsize, 1677 .encrypt = aead_encrypt, 1678 .decrypt = aead_decrypt, 1679 .ivsize = DES3_EDE_BLOCK_SIZE, 1680 .maxauthsize = SHA224_DIGEST_SIZE, 1681 }, 1682 .caam = { 1683 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1684 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 1685 OP_ALG_AAI_HMAC_PRECOMP, 1686 }, 1687 }, 1688 { 1689 .aead = { 1690 .base = { 1691 .cra_name = "echainiv(authenc(hmac(sha224)," 1692 "cbc(des3_ede)))", 1693 .cra_driver_name = "echainiv-authenc-" 1694 "hmac-sha224-" 1695 "cbc-des3_ede-caam-qi", 1696 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1697 }, 1698 .setkey = aead_setkey, 1699 .setauthsize = aead_setauthsize, 1700 .encrypt = aead_encrypt, 1701 .decrypt = aead_decrypt, 1702 .ivsize = DES3_EDE_BLOCK_SIZE, 1703 .maxauthsize = SHA224_DIGEST_SIZE, 1704 }, 1705 .caam = { 1706 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1707 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 1708 OP_ALG_AAI_HMAC_PRECOMP, 1709 .geniv = true, 1710 } 1711 }, 1712 { 1713 .aead = { 1714 .base = { 1715 .cra_name = "authenc(hmac(sha256)," 1716 "cbc(des3_ede))", 1717 .cra_driver_name = "authenc-hmac-sha256-" 1718 "cbc-des3_ede-caam-qi", 1719 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1720 }, 1721 .setkey = aead_setkey, 1722 .setauthsize = aead_setauthsize, 1723 .encrypt = aead_encrypt, 1724 .decrypt = aead_decrypt, 1725 .ivsize = DES3_EDE_BLOCK_SIZE, 1726 .maxauthsize = SHA256_DIGEST_SIZE, 1727 }, 1728 .caam = { 1729 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1730 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 1731 OP_ALG_AAI_HMAC_PRECOMP, 1732 }, 1733 }, 1734 { 1735 .aead = { 1736 .base = { 1737 .cra_name = "echainiv(authenc(hmac(sha256)," 1738 "cbc(des3_ede)))", 1739 .cra_driver_name = "echainiv-authenc-" 1740 "hmac-sha256-" 1741 "cbc-des3_ede-caam-qi", 1742 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1743 }, 1744 .setkey = aead_setkey, 1745 .setauthsize = aead_setauthsize, 1746 .encrypt = aead_encrypt, 1747 .decrypt = aead_decrypt, 1748 .ivsize = DES3_EDE_BLOCK_SIZE, 1749 .maxauthsize = SHA256_DIGEST_SIZE, 1750 }, 1751 .caam = { 1752 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1753 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 1754 OP_ALG_AAI_HMAC_PRECOMP, 1755 .geniv = true, 1756 } 1757 }, 1758 { 1759 .aead = { 1760 .base = { 1761 .cra_name = "authenc(hmac(sha384)," 1762 "cbc(des3_ede))", 1763 .cra_driver_name = "authenc-hmac-sha384-" 1764 "cbc-des3_ede-caam-qi", 1765 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1766 }, 1767 .setkey = aead_setkey, 1768 .setauthsize = aead_setauthsize, 1769 .encrypt = aead_encrypt, 1770 .decrypt = aead_decrypt, 1771 .ivsize = DES3_EDE_BLOCK_SIZE, 1772 .maxauthsize = SHA384_DIGEST_SIZE, 1773 }, 1774 .caam = { 1775 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1776 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 1777 OP_ALG_AAI_HMAC_PRECOMP, 1778 }, 1779 }, 1780 { 1781 .aead = { 1782 .base = { 1783 .cra_name = "echainiv(authenc(hmac(sha384)," 1784 "cbc(des3_ede)))", 1785 .cra_driver_name = "echainiv-authenc-" 1786 "hmac-sha384-" 1787 "cbc-des3_ede-caam-qi", 1788 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1789 }, 1790 .setkey = aead_setkey, 1791 .setauthsize = aead_setauthsize, 1792 .encrypt = aead_encrypt, 1793 .decrypt = aead_decrypt, 1794 .ivsize = DES3_EDE_BLOCK_SIZE, 1795 .maxauthsize = SHA384_DIGEST_SIZE, 1796 }, 1797 .caam = { 1798 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1799 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 1800 OP_ALG_AAI_HMAC_PRECOMP, 1801 .geniv = true, 1802 } 1803 }, 1804 { 1805 .aead = { 1806 .base = { 1807 .cra_name = "authenc(hmac(sha512)," 1808 "cbc(des3_ede))", 1809 .cra_driver_name = "authenc-hmac-sha512-" 1810 "cbc-des3_ede-caam-qi", 1811 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1812 }, 1813 .setkey = aead_setkey, 1814 .setauthsize = aead_setauthsize, 1815 .encrypt = aead_encrypt, 1816 .decrypt = aead_decrypt, 1817 .ivsize = DES3_EDE_BLOCK_SIZE, 1818 .maxauthsize = SHA512_DIGEST_SIZE, 1819 }, 1820 .caam = { 1821 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1822 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 1823 OP_ALG_AAI_HMAC_PRECOMP, 1824 }, 1825 }, 1826 { 1827 .aead = { 1828 .base = { 1829 .cra_name = "echainiv(authenc(hmac(sha512)," 1830 "cbc(des3_ede)))", 1831 .cra_driver_name = "echainiv-authenc-" 1832 "hmac-sha512-" 1833 "cbc-des3_ede-caam-qi", 1834 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1835 }, 1836 .setkey = aead_setkey, 1837 .setauthsize = aead_setauthsize, 1838 .encrypt = aead_encrypt, 1839 .decrypt = aead_decrypt, 1840 .ivsize = DES3_EDE_BLOCK_SIZE, 1841 .maxauthsize = SHA512_DIGEST_SIZE, 1842 }, 1843 .caam = { 1844 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1845 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 1846 OP_ALG_AAI_HMAC_PRECOMP, 1847 .geniv = true, 1848 } 1849 }, 1850 { 1851 .aead = { 1852 .base = { 1853 .cra_name = "authenc(hmac(md5),cbc(des))", 1854 .cra_driver_name = "authenc-hmac-md5-" 1855 "cbc-des-caam-qi", 1856 .cra_blocksize = DES_BLOCK_SIZE, 1857 }, 1858 .setkey = aead_setkey, 1859 .setauthsize = aead_setauthsize, 1860 .encrypt = aead_encrypt, 1861 .decrypt = aead_decrypt, 1862 .ivsize = DES_BLOCK_SIZE, 1863 .maxauthsize = MD5_DIGEST_SIZE, 1864 }, 1865 .caam = { 1866 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 1867 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 1868 OP_ALG_AAI_HMAC_PRECOMP, 1869 }, 1870 }, 1871 { 1872 .aead = { 1873 .base = { 1874 .cra_name = "echainiv(authenc(hmac(md5)," 1875 "cbc(des)))", 1876 .cra_driver_name = "echainiv-authenc-hmac-md5-" 1877 "cbc-des-caam-qi", 1878 .cra_blocksize = DES_BLOCK_SIZE, 1879 }, 1880 .setkey = aead_setkey, 1881 .setauthsize = aead_setauthsize, 1882 .encrypt = aead_encrypt, 1883 .decrypt = aead_decrypt, 1884 .ivsize = DES_BLOCK_SIZE, 1885 .maxauthsize = MD5_DIGEST_SIZE, 1886 }, 1887 .caam = { 1888 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 1889 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 1890 OP_ALG_AAI_HMAC_PRECOMP, 1891 .geniv = true, 1892 } 1893 }, 1894 { 1895 .aead = { 1896 .base = { 1897 .cra_name = "authenc(hmac(sha1),cbc(des))", 1898 .cra_driver_name = "authenc-hmac-sha1-" 1899 "cbc-des-caam-qi", 1900 .cra_blocksize = DES_BLOCK_SIZE, 1901 }, 1902 .setkey = aead_setkey, 1903 .setauthsize = aead_setauthsize, 1904 .encrypt = aead_encrypt, 1905 .decrypt = aead_decrypt, 1906 .ivsize = DES_BLOCK_SIZE, 1907 .maxauthsize = SHA1_DIGEST_SIZE, 1908 }, 1909 .caam = { 1910 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 1911 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 1912 OP_ALG_AAI_HMAC_PRECOMP, 1913 }, 1914 }, 1915 { 1916 .aead = { 1917 .base = { 1918 .cra_name = "echainiv(authenc(hmac(sha1)," 1919 "cbc(des)))", 1920 .cra_driver_name = "echainiv-authenc-" 1921 "hmac-sha1-cbc-des-caam-qi", 1922 .cra_blocksize = DES_BLOCK_SIZE, 1923 }, 1924 .setkey = aead_setkey, 1925 .setauthsize = aead_setauthsize, 1926 .encrypt = aead_encrypt, 1927 .decrypt = aead_decrypt, 1928 .ivsize = DES_BLOCK_SIZE, 1929 .maxauthsize = SHA1_DIGEST_SIZE, 1930 }, 1931 .caam = { 1932 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 1933 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 1934 OP_ALG_AAI_HMAC_PRECOMP, 1935 .geniv = true, 1936 } 1937 }, 1938 { 1939 .aead = { 1940 .base = { 1941 .cra_name = "authenc(hmac(sha224),cbc(des))", 1942 .cra_driver_name = "authenc-hmac-sha224-" 1943 "cbc-des-caam-qi", 1944 .cra_blocksize = DES_BLOCK_SIZE, 1945 }, 1946 .setkey = aead_setkey, 1947 .setauthsize = aead_setauthsize, 1948 .encrypt = aead_encrypt, 1949 .decrypt = aead_decrypt, 1950 .ivsize = DES_BLOCK_SIZE, 1951 .maxauthsize = SHA224_DIGEST_SIZE, 1952 }, 1953 .caam = { 1954 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 1955 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 1956 OP_ALG_AAI_HMAC_PRECOMP, 1957 }, 1958 }, 1959 { 1960 .aead = { 1961 .base = { 1962 .cra_name = "echainiv(authenc(hmac(sha224)," 1963 "cbc(des)))", 1964 .cra_driver_name = "echainiv-authenc-" 1965 "hmac-sha224-cbc-des-" 1966 "caam-qi", 1967 .cra_blocksize = DES_BLOCK_SIZE, 1968 }, 1969 .setkey = aead_setkey, 1970 .setauthsize = aead_setauthsize, 1971 .encrypt = aead_encrypt, 1972 .decrypt = aead_decrypt, 1973 .ivsize = DES_BLOCK_SIZE, 1974 .maxauthsize = SHA224_DIGEST_SIZE, 1975 }, 1976 .caam = { 1977 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 1978 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 1979 OP_ALG_AAI_HMAC_PRECOMP, 1980 .geniv = true, 1981 } 1982 }, 1983 { 1984 .aead = { 1985 .base = { 1986 .cra_name = "authenc(hmac(sha256),cbc(des))", 1987 .cra_driver_name = "authenc-hmac-sha256-" 1988 "cbc-des-caam-qi", 1989 .cra_blocksize = DES_BLOCK_SIZE, 1990 }, 1991 .setkey = aead_setkey, 1992 .setauthsize = aead_setauthsize, 1993 .encrypt = aead_encrypt, 1994 .decrypt = aead_decrypt, 1995 .ivsize = DES_BLOCK_SIZE, 1996 .maxauthsize = SHA256_DIGEST_SIZE, 1997 }, 1998 .caam = { 1999 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2000 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 2001 OP_ALG_AAI_HMAC_PRECOMP, 2002 }, 2003 }, 2004 { 2005 .aead = { 2006 .base = { 2007 .cra_name = "echainiv(authenc(hmac(sha256)," 2008 "cbc(des)))", 2009 .cra_driver_name = "echainiv-authenc-" 2010 "hmac-sha256-cbc-des-" 2011 "caam-qi", 2012 .cra_blocksize = DES_BLOCK_SIZE, 2013 }, 2014 .setkey = aead_setkey, 2015 .setauthsize = aead_setauthsize, 2016 .encrypt = aead_encrypt, 2017 .decrypt = aead_decrypt, 2018 .ivsize = DES_BLOCK_SIZE, 2019 .maxauthsize = SHA256_DIGEST_SIZE, 2020 }, 2021 .caam = { 2022 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2023 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 2024 OP_ALG_AAI_HMAC_PRECOMP, 2025 .geniv = true, 2026 }, 2027 }, 2028 { 2029 .aead = { 2030 .base = { 2031 .cra_name = "authenc(hmac(sha384),cbc(des))", 2032 .cra_driver_name = "authenc-hmac-sha384-" 2033 "cbc-des-caam-qi", 2034 .cra_blocksize = DES_BLOCK_SIZE, 2035 }, 2036 .setkey = aead_setkey, 2037 .setauthsize = aead_setauthsize, 2038 .encrypt = aead_encrypt, 2039 .decrypt = aead_decrypt, 2040 .ivsize = DES_BLOCK_SIZE, 2041 .maxauthsize = SHA384_DIGEST_SIZE, 2042 }, 2043 .caam = { 2044 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2045 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2046 OP_ALG_AAI_HMAC_PRECOMP, 2047 }, 2048 }, 2049 { 2050 .aead = { 2051 .base = { 2052 .cra_name = "echainiv(authenc(hmac(sha384)," 2053 "cbc(des)))", 2054 .cra_driver_name = "echainiv-authenc-" 2055 "hmac-sha384-cbc-des-" 2056 "caam-qi", 2057 .cra_blocksize = DES_BLOCK_SIZE, 2058 }, 2059 .setkey = aead_setkey, 2060 .setauthsize = aead_setauthsize, 2061 .encrypt = aead_encrypt, 2062 .decrypt = aead_decrypt, 2063 .ivsize = DES_BLOCK_SIZE, 2064 .maxauthsize = SHA384_DIGEST_SIZE, 2065 }, 2066 .caam = { 2067 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2068 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2069 OP_ALG_AAI_HMAC_PRECOMP, 2070 .geniv = true, 2071 } 2072 }, 2073 { 2074 .aead = { 2075 .base = { 2076 .cra_name = "authenc(hmac(sha512),cbc(des))", 2077 .cra_driver_name = "authenc-hmac-sha512-" 2078 "cbc-des-caam-qi", 2079 .cra_blocksize = DES_BLOCK_SIZE, 2080 }, 2081 .setkey = aead_setkey, 2082 .setauthsize = aead_setauthsize, 2083 .encrypt = aead_encrypt, 2084 .decrypt = aead_decrypt, 2085 .ivsize = DES_BLOCK_SIZE, 2086 .maxauthsize = SHA512_DIGEST_SIZE, 2087 }, 2088 .caam = { 2089 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2090 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2091 OP_ALG_AAI_HMAC_PRECOMP, 2092 } 2093 }, 2094 { 2095 .aead = { 2096 .base = { 2097 .cra_name = "echainiv(authenc(hmac(sha512)," 2098 "cbc(des)))", 2099 .cra_driver_name = "echainiv-authenc-" 2100 "hmac-sha512-cbc-des-" 2101 "caam-qi", 2102 .cra_blocksize = DES_BLOCK_SIZE, 2103 }, 2104 .setkey = aead_setkey, 2105 .setauthsize = aead_setauthsize, 2106 .encrypt = aead_encrypt, 2107 .decrypt = aead_decrypt, 2108 .ivsize = DES_BLOCK_SIZE, 2109 .maxauthsize = SHA512_DIGEST_SIZE, 2110 }, 2111 .caam = { 2112 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2113 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2114 OP_ALG_AAI_HMAC_PRECOMP, 2115 .geniv = true, 2116 } 2117 }, 2118 }; 2119 2120 struct caam_crypto_alg { 2121 struct list_head entry; 2122 struct crypto_alg crypto_alg; 2123 struct caam_alg_entry caam; 2124 }; 2125 2126 static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam) 2127 { 2128 struct caam_drv_private *priv; 2129 2130 /* 2131 * distribute tfms across job rings to ensure in-order 2132 * crypto request processing per tfm 2133 */ 2134 ctx->jrdev = caam_jr_alloc(); 2135 if (IS_ERR(ctx->jrdev)) { 2136 pr_err("Job Ring Device allocation for transform failed\n"); 2137 return PTR_ERR(ctx->jrdev); 2138 } 2139 2140 ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key), 2141 DMA_TO_DEVICE); 2142 if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) { 2143 dev_err(ctx->jrdev, "unable to map key\n"); 2144 caam_jr_free(ctx->jrdev); 2145 return -ENOMEM; 2146 } 2147 2148 /* copy descriptor header template value */ 2149 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; 2150 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; 2151 2152 priv = dev_get_drvdata(ctx->jrdev->parent); 2153 ctx->qidev = priv->qidev; 2154 2155 spin_lock_init(&ctx->lock); 2156 ctx->drv_ctx[ENCRYPT] = NULL; 2157 ctx->drv_ctx[DECRYPT] = NULL; 2158 ctx->drv_ctx[GIVENCRYPT] = NULL; 2159 2160 return 0; 2161 } 2162 2163 static int caam_cra_init(struct crypto_tfm *tfm) 2164 { 2165 struct crypto_alg *alg = tfm->__crt_alg; 2166 struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg), 2167 crypto_alg); 2168 struct caam_ctx *ctx = crypto_tfm_ctx(tfm); 2169 2170 return caam_init_common(ctx, &caam_alg->caam); 2171 } 2172 2173 static int caam_aead_init(struct crypto_aead *tfm) 2174 { 2175 struct aead_alg *alg = crypto_aead_alg(tfm); 2176 struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg), 2177 aead); 2178 struct caam_ctx *ctx = crypto_aead_ctx(tfm); 2179 2180 return caam_init_common(ctx, &caam_alg->caam); 2181 } 2182 2183 static void caam_exit_common(struct caam_ctx *ctx) 2184 { 2185 caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]); 2186 caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]); 2187 caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]); 2188 2189 dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), 2190 DMA_TO_DEVICE); 2191 2192 caam_jr_free(ctx->jrdev); 2193 } 2194 2195 static void caam_cra_exit(struct crypto_tfm *tfm) 2196 { 2197 caam_exit_common(crypto_tfm_ctx(tfm)); 2198 } 2199 2200 static void caam_aead_exit(struct crypto_aead *tfm) 2201 { 2202 caam_exit_common(crypto_aead_ctx(tfm)); 2203 } 2204 2205 static struct list_head alg_list; 2206 static void __exit caam_qi_algapi_exit(void) 2207 { 2208 struct caam_crypto_alg *t_alg, *n; 2209 int i; 2210 2211 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { 2212 struct caam_aead_alg *t_alg = driver_aeads + i; 2213 2214 if (t_alg->registered) 2215 crypto_unregister_aead(&t_alg->aead); 2216 } 2217 2218 if (!alg_list.next) 2219 return; 2220 2221 list_for_each_entry_safe(t_alg, n, &alg_list, entry) { 2222 crypto_unregister_alg(&t_alg->crypto_alg); 2223 list_del(&t_alg->entry); 2224 kfree(t_alg); 2225 } 2226 } 2227 2228 static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template 2229 *template) 2230 { 2231 struct caam_crypto_alg *t_alg; 2232 struct crypto_alg *alg; 2233 2234 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL); 2235 if (!t_alg) 2236 return ERR_PTR(-ENOMEM); 2237 2238 alg = &t_alg->crypto_alg; 2239 2240 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name); 2241 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 2242 template->driver_name); 2243 alg->cra_module = THIS_MODULE; 2244 alg->cra_init = caam_cra_init; 2245 alg->cra_exit = caam_cra_exit; 2246 alg->cra_priority = CAAM_CRA_PRIORITY; 2247 alg->cra_blocksize = template->blocksize; 2248 alg->cra_alignmask = 0; 2249 alg->cra_ctxsize = sizeof(struct caam_ctx); 2250 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY | 2251 template->type; 2252 switch (template->type) { 2253 case CRYPTO_ALG_TYPE_GIVCIPHER: 2254 alg->cra_type = &crypto_givcipher_type; 2255 alg->cra_ablkcipher = template->template_ablkcipher; 2256 break; 2257 case CRYPTO_ALG_TYPE_ABLKCIPHER: 2258 alg->cra_type = &crypto_ablkcipher_type; 2259 alg->cra_ablkcipher = template->template_ablkcipher; 2260 break; 2261 } 2262 2263 t_alg->caam.class1_alg_type = template->class1_alg_type; 2264 t_alg->caam.class2_alg_type = template->class2_alg_type; 2265 2266 return t_alg; 2267 } 2268 2269 static void caam_aead_alg_init(struct caam_aead_alg *t_alg) 2270 { 2271 struct aead_alg *alg = &t_alg->aead; 2272 2273 alg->base.cra_module = THIS_MODULE; 2274 alg->base.cra_priority = CAAM_CRA_PRIORITY; 2275 alg->base.cra_ctxsize = sizeof(struct caam_ctx); 2276 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; 2277 2278 alg->init = caam_aead_init; 2279 alg->exit = caam_aead_exit; 2280 } 2281 2282 static int __init caam_qi_algapi_init(void) 2283 { 2284 struct device_node *dev_node; 2285 struct platform_device *pdev; 2286 struct device *ctrldev; 2287 struct caam_drv_private *priv; 2288 int i = 0, err = 0; 2289 u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst; 2290 unsigned int md_limit = SHA512_DIGEST_SIZE; 2291 bool registered = false; 2292 2293 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 2294 if (!dev_node) { 2295 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); 2296 if (!dev_node) 2297 return -ENODEV; 2298 } 2299 2300 pdev = of_find_device_by_node(dev_node); 2301 of_node_put(dev_node); 2302 if (!pdev) 2303 return -ENODEV; 2304 2305 ctrldev = &pdev->dev; 2306 priv = dev_get_drvdata(ctrldev); 2307 2308 /* 2309 * If priv is NULL, it's probably because the caam driver wasn't 2310 * properly initialized (e.g. RNG4 init failed). Thus, bail out here. 2311 */ 2312 if (!priv || !priv->qi_present) 2313 return -ENODEV; 2314 2315 INIT_LIST_HEAD(&alg_list); 2316 2317 /* 2318 * Register crypto algorithms the device supports. 2319 * First, detect presence and attributes of DES, AES, and MD blocks. 2320 */ 2321 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls); 2322 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); 2323 des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT; 2324 aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT; 2325 md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; 2326 2327 /* If MD is present, limit digest size based on LP256 */ 2328 if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)) 2329 md_limit = SHA256_DIGEST_SIZE; 2330 2331 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 2332 struct caam_crypto_alg *t_alg; 2333 struct caam_alg_template *alg = driver_algs + i; 2334 u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK; 2335 2336 /* Skip DES algorithms if not supported by device */ 2337 if (!des_inst && 2338 ((alg_sel == OP_ALG_ALGSEL_3DES) || 2339 (alg_sel == OP_ALG_ALGSEL_DES))) 2340 continue; 2341 2342 /* Skip AES algorithms if not supported by device */ 2343 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES)) 2344 continue; 2345 2346 t_alg = caam_alg_alloc(alg); 2347 if (IS_ERR(t_alg)) { 2348 err = PTR_ERR(t_alg); 2349 dev_warn(priv->qidev, "%s alg allocation failed\n", 2350 alg->driver_name); 2351 continue; 2352 } 2353 2354 err = crypto_register_alg(&t_alg->crypto_alg); 2355 if (err) { 2356 dev_warn(priv->qidev, "%s alg registration failed\n", 2357 t_alg->crypto_alg.cra_driver_name); 2358 kfree(t_alg); 2359 continue; 2360 } 2361 2362 list_add_tail(&t_alg->entry, &alg_list); 2363 registered = true; 2364 } 2365 2366 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { 2367 struct caam_aead_alg *t_alg = driver_aeads + i; 2368 u32 c1_alg_sel = t_alg->caam.class1_alg_type & 2369 OP_ALG_ALGSEL_MASK; 2370 u32 c2_alg_sel = t_alg->caam.class2_alg_type & 2371 OP_ALG_ALGSEL_MASK; 2372 u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK; 2373 2374 /* Skip DES algorithms if not supported by device */ 2375 if (!des_inst && 2376 ((c1_alg_sel == OP_ALG_ALGSEL_3DES) || 2377 (c1_alg_sel == OP_ALG_ALGSEL_DES))) 2378 continue; 2379 2380 /* Skip AES algorithms if not supported by device */ 2381 if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES)) 2382 continue; 2383 2384 /* 2385 * Check support for AES algorithms not available 2386 * on LP devices. 2387 */ 2388 if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) && 2389 (alg_aai == OP_ALG_AAI_GCM)) 2390 continue; 2391 2392 /* 2393 * Skip algorithms requiring message digests 2394 * if MD or MD size is not supported by device. 2395 */ 2396 if (c2_alg_sel && 2397 (!md_inst || (t_alg->aead.maxauthsize > md_limit))) 2398 continue; 2399 2400 caam_aead_alg_init(t_alg); 2401 2402 err = crypto_register_aead(&t_alg->aead); 2403 if (err) { 2404 pr_warn("%s alg registration failed\n", 2405 t_alg->aead.base.cra_driver_name); 2406 continue; 2407 } 2408 2409 t_alg->registered = true; 2410 registered = true; 2411 } 2412 2413 if (registered) 2414 dev_info(priv->qidev, "algorithms registered in /proc/crypto\n"); 2415 2416 return err; 2417 } 2418 2419 module_init(caam_qi_algapi_init); 2420 module_exit(caam_qi_algapi_exit); 2421 2422 MODULE_LICENSE("GPL"); 2423 MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend"); 2424 MODULE_AUTHOR("Freescale Semiconductor"); 2425