1 /* 2 * caam - Freescale FSL CAAM support for crypto API 3 * 4 * Copyright 2008-2011 Freescale Semiconductor, Inc. 5 * 6 * Based on talitos crypto API driver. 7 * 8 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008): 9 * 10 * --------------- --------------- 11 * | JobDesc #1 |-------------------->| ShareDesc | 12 * | *(packet 1) | | (PDB) | 13 * --------------- |------------->| (hashKey) | 14 * . | | (cipherKey) | 15 * . | |-------->| (operation) | 16 * --------------- | | --------------- 17 * | JobDesc #2 |------| | 18 * | *(packet 2) | | 19 * --------------- | 20 * . | 21 * . | 22 * --------------- | 23 * | JobDesc #3 |------------ 24 * | *(packet 3) | 25 * --------------- 26 * 27 * The SharedDesc never changes for a connection unless rekeyed, but 28 * each packet will likely be in a different place. So all we need 29 * to know to process the packet is where the input is, where the 30 * output goes, and what context we want to process with. Context is 31 * in the SharedDesc, packet references in the JobDesc. 32 * 33 * So, a job desc looks like: 34 * 35 * --------------------- 36 * | Header | 37 * | ShareDesc Pointer | 38 * | SEQ_OUT_PTR | 39 * | (output buffer) | 40 * | (output length) | 41 * | SEQ_IN_PTR | 42 * | (input buffer) | 43 * | (input length) | 44 * --------------------- 45 */ 46 47 #include "compat.h" 48 49 #include "regs.h" 50 #include "intern.h" 51 #include "desc_constr.h" 52 #include "jr.h" 53 #include "error.h" 54 #include "sg_sw_sec4.h" 55 #include "key_gen.h" 56 57 /* 58 * crypto alg 59 */ 60 #define CAAM_CRA_PRIORITY 3000 61 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */ 62 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \ 63 SHA512_DIGEST_SIZE * 2) 64 /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ 65 #define CAAM_MAX_IV_LENGTH 16 66 67 /* length of descriptors text */ 68 #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ) 69 #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ) 70 #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ) 71 #define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ) 72 73 #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ) 74 #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \ 75 20 * CAAM_CMD_SZ) 76 #define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \ 77 15 * CAAM_CMD_SZ) 78 79 #define DESC_MAX_USED_BYTES (DESC_AEAD_GIVENC_LEN + \ 80 CAAM_MAX_KEY_SIZE) 81 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) 82 83 #ifdef DEBUG 84 /* for print_hex_dumps with line references */ 85 #define debug(format, arg...) printk(format, arg) 86 #else 87 #define debug(format, arg...) 88 #endif 89 static struct list_head alg_list; 90 91 /* Set DK bit in class 1 operation if shared */ 92 static inline void append_dec_op1(u32 *desc, u32 type) 93 { 94 u32 *jump_cmd, *uncond_jump_cmd; 95 96 jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); 97 append_operation(desc, type | OP_ALG_AS_INITFINAL | 98 OP_ALG_DECRYPT); 99 uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL); 100 set_jump_tgt_here(desc, jump_cmd); 101 append_operation(desc, type | OP_ALG_AS_INITFINAL | 102 OP_ALG_DECRYPT | OP_ALG_AAI_DK); 103 set_jump_tgt_here(desc, uncond_jump_cmd); 104 } 105 106 /* 107 * Wait for completion of class 1 key loading before allowing 108 * error propagation 109 */ 110 static inline void append_dec_shr_done(u32 *desc) 111 { 112 u32 *jump_cmd; 113 114 jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL); 115 set_jump_tgt_here(desc, jump_cmd); 116 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); 117 } 118 119 /* 120 * For aead functions, read payload and write payload, 121 * both of which are specified in req->src and req->dst 122 */ 123 static inline void aead_append_src_dst(u32 *desc, u32 msg_type) 124 { 125 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | 126 KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH); 127 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); 128 } 129 130 /* 131 * For aead encrypt and decrypt, read iv for both classes 132 */ 133 static inline void aead_append_ld_iv(u32 *desc, int ivsize) 134 { 135 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | 136 LDST_CLASS_1_CCB | ivsize); 137 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize); 138 } 139 140 /* 141 * For ablkcipher encrypt and decrypt, read from req->src and 142 * write to req->dst 143 */ 144 static inline void ablkcipher_append_src_dst(u32 *desc) 145 { 146 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); 147 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); 148 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | 149 KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); 150 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); 151 } 152 153 /* 154 * If all data, including src (with assoc and iv) or dst (with iv only) are 155 * contiguous 156 */ 157 #define GIV_SRC_CONTIG 1 158 #define GIV_DST_CONTIG (1 << 1) 159 160 /* 161 * per-session context 162 */ 163 struct caam_ctx { 164 struct device *jrdev; 165 u32 sh_desc_enc[DESC_MAX_USED_LEN]; 166 u32 sh_desc_dec[DESC_MAX_USED_LEN]; 167 u32 sh_desc_givenc[DESC_MAX_USED_LEN]; 168 dma_addr_t sh_desc_enc_dma; 169 dma_addr_t sh_desc_dec_dma; 170 dma_addr_t sh_desc_givenc_dma; 171 u32 class1_alg_type; 172 u32 class2_alg_type; 173 u32 alg_op; 174 u8 key[CAAM_MAX_KEY_SIZE]; 175 dma_addr_t key_dma; 176 unsigned int enckeylen; 177 unsigned int split_key_len; 178 unsigned int split_key_pad_len; 179 unsigned int authsize; 180 }; 181 182 static void append_key_aead(u32 *desc, struct caam_ctx *ctx, 183 int keys_fit_inline) 184 { 185 if (keys_fit_inline) { 186 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, 187 ctx->split_key_len, CLASS_2 | 188 KEY_DEST_MDHA_SPLIT | KEY_ENC); 189 append_key_as_imm(desc, (void *)ctx->key + 190 ctx->split_key_pad_len, ctx->enckeylen, 191 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); 192 } else { 193 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 | 194 KEY_DEST_MDHA_SPLIT | KEY_ENC); 195 append_key(desc, ctx->key_dma + ctx->split_key_pad_len, 196 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); 197 } 198 } 199 200 static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx, 201 int keys_fit_inline) 202 { 203 u32 *key_jump_cmd; 204 205 init_sh_desc(desc, HDR_SHARE_SERIAL); 206 207 /* Skip if already shared */ 208 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | 209 JUMP_COND_SHRD); 210 211 append_key_aead(desc, ctx, keys_fit_inline); 212 213 set_jump_tgt_here(desc, key_jump_cmd); 214 215 /* Propagate errors from shared to job descriptor */ 216 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); 217 } 218 219 static int aead_set_sh_desc(struct crypto_aead *aead) 220 { 221 struct aead_tfm *tfm = &aead->base.crt_aead; 222 struct caam_ctx *ctx = crypto_aead_ctx(aead); 223 struct device *jrdev = ctx->jrdev; 224 bool keys_fit_inline = false; 225 u32 *key_jump_cmd, *jump_cmd; 226 u32 geniv, moveiv; 227 u32 *desc; 228 229 if (!ctx->enckeylen || !ctx->authsize) 230 return 0; 231 232 /* 233 * Job Descriptor and Shared Descriptors 234 * must all fit into the 64-word Descriptor h/w Buffer 235 */ 236 if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN + 237 ctx->split_key_pad_len + ctx->enckeylen <= 238 CAAM_DESC_BYTES_MAX) 239 keys_fit_inline = true; 240 241 /* aead_encrypt shared descriptor */ 242 desc = ctx->sh_desc_enc; 243 244 init_sh_desc_key_aead(desc, ctx, keys_fit_inline); 245 246 /* Class 2 operation */ 247 append_operation(desc, ctx->class2_alg_type | 248 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); 249 250 /* cryptlen = seqoutlen - authsize */ 251 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); 252 253 /* assoclen + cryptlen = seqinlen - ivsize */ 254 append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize); 255 256 /* assoclen + cryptlen = (assoclen + cryptlen) - cryptlen */ 257 append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ); 258 259 /* read assoc before reading payload */ 260 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | 261 KEY_VLF); 262 aead_append_ld_iv(desc, tfm->ivsize); 263 264 /* Class 1 operation */ 265 append_operation(desc, ctx->class1_alg_type | 266 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); 267 268 /* Read and write cryptlen bytes */ 269 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); 270 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); 271 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2); 272 273 /* Write ICV */ 274 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | 275 LDST_SRCDST_BYTE_CONTEXT); 276 277 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, 278 desc_bytes(desc), 279 DMA_TO_DEVICE); 280 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { 281 dev_err(jrdev, "unable to map shared descriptor\n"); 282 return -ENOMEM; 283 } 284 #ifdef DEBUG 285 print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ", 286 DUMP_PREFIX_ADDRESS, 16, 4, desc, 287 desc_bytes(desc), 1); 288 #endif 289 290 /* 291 * Job Descriptor and Shared Descriptors 292 * must all fit into the 64-word Descriptor h/w Buffer 293 */ 294 if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN + 295 ctx->split_key_pad_len + ctx->enckeylen <= 296 CAAM_DESC_BYTES_MAX) 297 keys_fit_inline = true; 298 299 desc = ctx->sh_desc_dec; 300 301 /* aead_decrypt shared descriptor */ 302 init_sh_desc(desc, HDR_SHARE_SERIAL); 303 304 /* Skip if already shared */ 305 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | 306 JUMP_COND_SHRD); 307 308 append_key_aead(desc, ctx, keys_fit_inline); 309 310 /* Only propagate error immediately if shared */ 311 jump_cmd = append_jump(desc, JUMP_TEST_ALL); 312 set_jump_tgt_here(desc, key_jump_cmd); 313 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); 314 set_jump_tgt_here(desc, jump_cmd); 315 316 /* Class 2 operation */ 317 append_operation(desc, ctx->class2_alg_type | 318 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); 319 320 /* assoclen + cryptlen = seqinlen - ivsize */ 321 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, 322 ctx->authsize + tfm->ivsize) 323 /* assoclen = (assoclen + cryptlen) - cryptlen */ 324 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); 325 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ); 326 327 /* read assoc before reading payload */ 328 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | 329 KEY_VLF); 330 331 aead_append_ld_iv(desc, tfm->ivsize); 332 333 append_dec_op1(desc, ctx->class1_alg_type); 334 335 /* Read and write cryptlen bytes */ 336 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); 337 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ); 338 aead_append_src_dst(desc, FIFOLD_TYPE_MSG); 339 340 /* Load ICV */ 341 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 | 342 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); 343 append_dec_shr_done(desc); 344 345 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, 346 desc_bytes(desc), 347 DMA_TO_DEVICE); 348 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { 349 dev_err(jrdev, "unable to map shared descriptor\n"); 350 return -ENOMEM; 351 } 352 #ifdef DEBUG 353 print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ", 354 DUMP_PREFIX_ADDRESS, 16, 4, desc, 355 desc_bytes(desc), 1); 356 #endif 357 358 /* 359 * Job Descriptor and Shared Descriptors 360 * must all fit into the 64-word Descriptor h/w Buffer 361 */ 362 if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN + 363 ctx->split_key_pad_len + ctx->enckeylen <= 364 CAAM_DESC_BYTES_MAX) 365 keys_fit_inline = true; 366 367 /* aead_givencrypt shared descriptor */ 368 desc = ctx->sh_desc_givenc; 369 370 init_sh_desc_key_aead(desc, ctx, keys_fit_inline); 371 372 /* Generate IV */ 373 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO | 374 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | 375 NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT); 376 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB | 377 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); 378 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); 379 append_move(desc, MOVE_SRC_INFIFO | 380 MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT)); 381 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); 382 383 /* Copy IV to class 1 context */ 384 append_move(desc, MOVE_SRC_CLASS1CTX | 385 MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT)); 386 387 /* Return to encryption */ 388 append_operation(desc, ctx->class2_alg_type | 389 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); 390 391 /* ivsize + cryptlen = seqoutlen - authsize */ 392 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); 393 394 /* assoclen = seqinlen - (ivsize + cryptlen) */ 395 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ); 396 397 /* read assoc before reading payload */ 398 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | 399 KEY_VLF); 400 401 /* Copy iv from class 1 ctx to class 2 fifo*/ 402 moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 | 403 NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT); 404 append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB | 405 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); 406 append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB | 407 LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM); 408 409 /* Class 1 operation */ 410 append_operation(desc, ctx->class1_alg_type | 411 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); 412 413 /* Will write ivsize + cryptlen */ 414 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); 415 416 /* Not need to reload iv */ 417 append_seq_fifo_load(desc, tfm->ivsize, 418 FIFOLD_CLASS_SKIP); 419 420 /* Will read cryptlen */ 421 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); 422 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2); 423 424 /* Write ICV */ 425 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | 426 LDST_SRCDST_BYTE_CONTEXT); 427 428 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc, 429 desc_bytes(desc), 430 DMA_TO_DEVICE); 431 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) { 432 dev_err(jrdev, "unable to map shared descriptor\n"); 433 return -ENOMEM; 434 } 435 #ifdef DEBUG 436 print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ", 437 DUMP_PREFIX_ADDRESS, 16, 4, desc, 438 desc_bytes(desc), 1); 439 #endif 440 441 return 0; 442 } 443 444 static int aead_setauthsize(struct crypto_aead *authenc, 445 unsigned int authsize) 446 { 447 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 448 449 ctx->authsize = authsize; 450 aead_set_sh_desc(authenc); 451 452 return 0; 453 } 454 455 static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in, 456 u32 authkeylen) 457 { 458 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len, 459 ctx->split_key_pad_len, key_in, authkeylen, 460 ctx->alg_op); 461 } 462 463 static int aead_setkey(struct crypto_aead *aead, 464 const u8 *key, unsigned int keylen) 465 { 466 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */ 467 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 }; 468 struct caam_ctx *ctx = crypto_aead_ctx(aead); 469 struct device *jrdev = ctx->jrdev; 470 struct rtattr *rta = (void *)key; 471 struct crypto_authenc_key_param *param; 472 unsigned int authkeylen; 473 unsigned int enckeylen; 474 int ret = 0; 475 476 param = RTA_DATA(rta); 477 enckeylen = be32_to_cpu(param->enckeylen); 478 479 key += RTA_ALIGN(rta->rta_len); 480 keylen -= RTA_ALIGN(rta->rta_len); 481 482 if (keylen < enckeylen) 483 goto badkey; 484 485 authkeylen = keylen - enckeylen; 486 487 if (keylen > CAAM_MAX_KEY_SIZE) 488 goto badkey; 489 490 /* Pick class 2 key length from algorithm submask */ 491 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >> 492 OP_ALG_ALGSEL_SHIFT] * 2; 493 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16); 494 495 #ifdef DEBUG 496 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n", 497 keylen, enckeylen, authkeylen); 498 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n", 499 ctx->split_key_len, ctx->split_key_pad_len); 500 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", 501 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 502 #endif 503 504 ret = gen_split_aead_key(ctx, key, authkeylen); 505 if (ret) { 506 goto badkey; 507 } 508 509 /* postpend encryption key to auth split key */ 510 memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen); 511 512 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len + 513 enckeylen, DMA_TO_DEVICE); 514 if (dma_mapping_error(jrdev, ctx->key_dma)) { 515 dev_err(jrdev, "unable to map key i/o memory\n"); 516 return -ENOMEM; 517 } 518 #ifdef DEBUG 519 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", 520 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, 521 ctx->split_key_pad_len + enckeylen, 1); 522 #endif 523 524 ctx->enckeylen = enckeylen; 525 526 ret = aead_set_sh_desc(aead); 527 if (ret) { 528 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len + 529 enckeylen, DMA_TO_DEVICE); 530 } 531 532 return ret; 533 badkey: 534 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); 535 return -EINVAL; 536 } 537 538 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, 539 const u8 *key, unsigned int keylen) 540 { 541 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 542 struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher; 543 struct device *jrdev = ctx->jrdev; 544 int ret = 0; 545 u32 *key_jump_cmd, *jump_cmd; 546 u32 *desc; 547 548 #ifdef DEBUG 549 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", 550 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 551 #endif 552 553 memcpy(ctx->key, key, keylen); 554 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, 555 DMA_TO_DEVICE); 556 if (dma_mapping_error(jrdev, ctx->key_dma)) { 557 dev_err(jrdev, "unable to map key i/o memory\n"); 558 return -ENOMEM; 559 } 560 ctx->enckeylen = keylen; 561 562 /* ablkcipher_encrypt shared descriptor */ 563 desc = ctx->sh_desc_enc; 564 init_sh_desc(desc, HDR_SHARE_SERIAL); 565 /* Skip if already shared */ 566 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | 567 JUMP_COND_SHRD); 568 569 /* Load class1 key only */ 570 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, 571 ctx->enckeylen, CLASS_1 | 572 KEY_DEST_CLASS_REG); 573 574 set_jump_tgt_here(desc, key_jump_cmd); 575 576 /* Propagate errors from shared to job descriptor */ 577 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); 578 579 /* Load iv */ 580 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | 581 LDST_CLASS_1_CCB | tfm->ivsize); 582 583 /* Load operation */ 584 append_operation(desc, ctx->class1_alg_type | 585 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); 586 587 /* Perform operation */ 588 ablkcipher_append_src_dst(desc); 589 590 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, 591 desc_bytes(desc), 592 DMA_TO_DEVICE); 593 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { 594 dev_err(jrdev, "unable to map shared descriptor\n"); 595 return -ENOMEM; 596 } 597 #ifdef DEBUG 598 print_hex_dump(KERN_ERR, 599 "ablkcipher enc shdesc@"__stringify(__LINE__)": ", 600 DUMP_PREFIX_ADDRESS, 16, 4, desc, 601 desc_bytes(desc), 1); 602 #endif 603 /* ablkcipher_decrypt shared descriptor */ 604 desc = ctx->sh_desc_dec; 605 606 init_sh_desc(desc, HDR_SHARE_SERIAL); 607 /* Skip if already shared */ 608 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | 609 JUMP_COND_SHRD); 610 611 /* Load class1 key only */ 612 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, 613 ctx->enckeylen, CLASS_1 | 614 KEY_DEST_CLASS_REG); 615 616 /* For aead, only propagate error immediately if shared */ 617 jump_cmd = append_jump(desc, JUMP_TEST_ALL); 618 set_jump_tgt_here(desc, key_jump_cmd); 619 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); 620 set_jump_tgt_here(desc, jump_cmd); 621 622 /* load IV */ 623 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | 624 LDST_CLASS_1_CCB | tfm->ivsize); 625 626 /* Choose operation */ 627 append_dec_op1(desc, ctx->class1_alg_type); 628 629 /* Perform operation */ 630 ablkcipher_append_src_dst(desc); 631 632 /* Wait for key to load before allowing propagating error */ 633 append_dec_shr_done(desc); 634 635 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, 636 desc_bytes(desc), 637 DMA_TO_DEVICE); 638 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { 639 dev_err(jrdev, "unable to map shared descriptor\n"); 640 return -ENOMEM; 641 } 642 643 #ifdef DEBUG 644 print_hex_dump(KERN_ERR, 645 "ablkcipher dec shdesc@"__stringify(__LINE__)": ", 646 DUMP_PREFIX_ADDRESS, 16, 4, desc, 647 desc_bytes(desc), 1); 648 #endif 649 650 return ret; 651 } 652 653 /* 654 * aead_edesc - s/w-extended aead descriptor 655 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist 656 * @assoc_chained: if source is chained 657 * @src_nents: number of segments in input scatterlist 658 * @src_chained: if source is chained 659 * @dst_nents: number of segments in output scatterlist 660 * @dst_chained: if destination is chained 661 * @iv_dma: dma address of iv for checking continuity and link table 662 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) 663 * @sec4_sg_bytes: length of dma mapped sec4_sg space 664 * @sec4_sg_dma: bus physical mapped address of h/w link table 665 * @hw_desc: the h/w job descriptor followed by any referenced link tables 666 */ 667 struct aead_edesc { 668 int assoc_nents; 669 bool assoc_chained; 670 int src_nents; 671 bool src_chained; 672 int dst_nents; 673 bool dst_chained; 674 dma_addr_t iv_dma; 675 int sec4_sg_bytes; 676 dma_addr_t sec4_sg_dma; 677 struct sec4_sg_entry *sec4_sg; 678 u32 hw_desc[0]; 679 }; 680 681 /* 682 * ablkcipher_edesc - s/w-extended ablkcipher descriptor 683 * @src_nents: number of segments in input scatterlist 684 * @src_chained: if source is chained 685 * @dst_nents: number of segments in output scatterlist 686 * @dst_chained: if destination is chained 687 * @iv_dma: dma address of iv for checking continuity and link table 688 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) 689 * @sec4_sg_bytes: length of dma mapped sec4_sg space 690 * @sec4_sg_dma: bus physical mapped address of h/w link table 691 * @hw_desc: the h/w job descriptor followed by any referenced link tables 692 */ 693 struct ablkcipher_edesc { 694 int src_nents; 695 bool src_chained; 696 int dst_nents; 697 bool dst_chained; 698 dma_addr_t iv_dma; 699 int sec4_sg_bytes; 700 dma_addr_t sec4_sg_dma; 701 struct sec4_sg_entry *sec4_sg; 702 u32 hw_desc[0]; 703 }; 704 705 static void caam_unmap(struct device *dev, struct scatterlist *src, 706 struct scatterlist *dst, int src_nents, 707 bool src_chained, int dst_nents, bool dst_chained, 708 dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma, 709 int sec4_sg_bytes) 710 { 711 if (dst != src) { 712 dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE, 713 src_chained); 714 dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE, 715 dst_chained); 716 } else { 717 dma_unmap_sg_chained(dev, src, src_nents ? : 1, 718 DMA_BIDIRECTIONAL, src_chained); 719 } 720 721 if (iv_dma) 722 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); 723 if (sec4_sg_bytes) 724 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes, 725 DMA_TO_DEVICE); 726 } 727 728 static void aead_unmap(struct device *dev, 729 struct aead_edesc *edesc, 730 struct aead_request *req) 731 { 732 struct crypto_aead *aead = crypto_aead_reqtfm(req); 733 int ivsize = crypto_aead_ivsize(aead); 734 735 dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents, 736 DMA_TO_DEVICE, edesc->assoc_chained); 737 738 caam_unmap(dev, req->src, req->dst, 739 edesc->src_nents, edesc->src_chained, edesc->dst_nents, 740 edesc->dst_chained, edesc->iv_dma, ivsize, 741 edesc->sec4_sg_dma, edesc->sec4_sg_bytes); 742 } 743 744 static void ablkcipher_unmap(struct device *dev, 745 struct ablkcipher_edesc *edesc, 746 struct ablkcipher_request *req) 747 { 748 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 749 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 750 751 caam_unmap(dev, req->src, req->dst, 752 edesc->src_nents, edesc->src_chained, edesc->dst_nents, 753 edesc->dst_chained, edesc->iv_dma, ivsize, 754 edesc->sec4_sg_dma, edesc->sec4_sg_bytes); 755 } 756 757 static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, 758 void *context) 759 { 760 struct aead_request *req = context; 761 struct aead_edesc *edesc; 762 #ifdef DEBUG 763 struct crypto_aead *aead = crypto_aead_reqtfm(req); 764 struct caam_ctx *ctx = crypto_aead_ctx(aead); 765 int ivsize = crypto_aead_ivsize(aead); 766 767 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 768 #endif 769 770 edesc = (struct aead_edesc *)((char *)desc - 771 offsetof(struct aead_edesc, hw_desc)); 772 773 if (err) { 774 char tmp[CAAM_ERROR_STR_MAX]; 775 776 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); 777 } 778 779 aead_unmap(jrdev, edesc, req); 780 781 #ifdef DEBUG 782 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ", 783 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), 784 req->assoclen , 1); 785 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", 786 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize, 787 edesc->src_nents ? 100 : ivsize, 1); 788 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", 789 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), 790 edesc->src_nents ? 100 : req->cryptlen + 791 ctx->authsize + 4, 1); 792 #endif 793 794 kfree(edesc); 795 796 aead_request_complete(req, err); 797 } 798 799 static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, 800 void *context) 801 { 802 struct aead_request *req = context; 803 struct aead_edesc *edesc; 804 #ifdef DEBUG 805 struct crypto_aead *aead = crypto_aead_reqtfm(req); 806 struct caam_ctx *ctx = crypto_aead_ctx(aead); 807 int ivsize = crypto_aead_ivsize(aead); 808 809 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 810 #endif 811 812 edesc = (struct aead_edesc *)((char *)desc - 813 offsetof(struct aead_edesc, hw_desc)); 814 815 #ifdef DEBUG 816 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", 817 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, 818 ivsize, 1); 819 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", 820 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst), 821 req->cryptlen - ctx->authsize, 1); 822 #endif 823 824 if (err) { 825 char tmp[CAAM_ERROR_STR_MAX]; 826 827 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); 828 } 829 830 aead_unmap(jrdev, edesc, req); 831 832 /* 833 * verify hw auth check passed else return -EBADMSG 834 */ 835 if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK) 836 err = -EBADMSG; 837 838 #ifdef DEBUG 839 print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ", 840 DUMP_PREFIX_ADDRESS, 16, 4, 841 ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)), 842 sizeof(struct iphdr) + req->assoclen + 843 ((req->cryptlen > 1500) ? 1500 : req->cryptlen) + 844 ctx->authsize + 36, 1); 845 if (!err && edesc->sec4_sg_bytes) { 846 struct scatterlist *sg = sg_last(req->src, edesc->src_nents); 847 print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ", 848 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg), 849 sg->length + ctx->authsize + 16, 1); 850 } 851 #endif 852 853 kfree(edesc); 854 855 aead_request_complete(req, err); 856 } 857 858 static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, 859 void *context) 860 { 861 struct ablkcipher_request *req = context; 862 struct ablkcipher_edesc *edesc; 863 #ifdef DEBUG 864 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 865 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 866 867 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 868 #endif 869 870 edesc = (struct ablkcipher_edesc *)((char *)desc - 871 offsetof(struct ablkcipher_edesc, hw_desc)); 872 873 if (err) { 874 char tmp[CAAM_ERROR_STR_MAX]; 875 876 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); 877 } 878 879 #ifdef DEBUG 880 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", 881 DUMP_PREFIX_ADDRESS, 16, 4, req->info, 882 edesc->src_nents > 1 ? 100 : ivsize, 1); 883 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", 884 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), 885 edesc->dst_nents > 1 ? 100 : req->nbytes, 1); 886 #endif 887 888 ablkcipher_unmap(jrdev, edesc, req); 889 kfree(edesc); 890 891 ablkcipher_request_complete(req, err); 892 } 893 894 static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, 895 void *context) 896 { 897 struct ablkcipher_request *req = context; 898 struct ablkcipher_edesc *edesc; 899 #ifdef DEBUG 900 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 901 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 902 903 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 904 #endif 905 906 edesc = (struct ablkcipher_edesc *)((char *)desc - 907 offsetof(struct ablkcipher_edesc, hw_desc)); 908 if (err) { 909 char tmp[CAAM_ERROR_STR_MAX]; 910 911 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); 912 } 913 914 #ifdef DEBUG 915 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", 916 DUMP_PREFIX_ADDRESS, 16, 4, req->info, 917 ivsize, 1); 918 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", 919 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), 920 edesc->dst_nents > 1 ? 100 : req->nbytes, 1); 921 #endif 922 923 ablkcipher_unmap(jrdev, edesc, req); 924 kfree(edesc); 925 926 ablkcipher_request_complete(req, err); 927 } 928 929 /* 930 * Fill in aead job descriptor 931 */ 932 static void init_aead_job(u32 *sh_desc, dma_addr_t ptr, 933 struct aead_edesc *edesc, 934 struct aead_request *req, 935 bool all_contig, bool encrypt) 936 { 937 struct crypto_aead *aead = crypto_aead_reqtfm(req); 938 struct caam_ctx *ctx = crypto_aead_ctx(aead); 939 int ivsize = crypto_aead_ivsize(aead); 940 int authsize = ctx->authsize; 941 u32 *desc = edesc->hw_desc; 942 u32 out_options = 0, in_options; 943 dma_addr_t dst_dma, src_dma; 944 int len, sec4_sg_index = 0; 945 946 #ifdef DEBUG 947 debug("assoclen %d cryptlen %d authsize %d\n", 948 req->assoclen, req->cryptlen, authsize); 949 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ", 950 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), 951 req->assoclen , 1); 952 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ", 953 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, 954 edesc->src_nents ? 100 : ivsize, 1); 955 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ", 956 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), 957 edesc->src_nents ? 100 : req->cryptlen, 1); 958 print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ", 959 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc, 960 desc_bytes(sh_desc), 1); 961 #endif 962 963 len = desc_len(sh_desc); 964 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); 965 966 if (all_contig) { 967 src_dma = sg_dma_address(req->assoc); 968 in_options = 0; 969 } else { 970 src_dma = edesc->sec4_sg_dma; 971 sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 + 972 (edesc->src_nents ? : 1); 973 in_options = LDST_SGF; 974 } 975 976 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen, 977 in_options); 978 979 if (likely(req->src == req->dst)) { 980 if (all_contig) { 981 dst_dma = sg_dma_address(req->src); 982 } else { 983 dst_dma = src_dma + sizeof(struct sec4_sg_entry) * 984 ((edesc->assoc_nents ? : 1) + 1); 985 out_options = LDST_SGF; 986 } 987 } else { 988 if (!edesc->dst_nents) { 989 dst_dma = sg_dma_address(req->dst); 990 } else { 991 dst_dma = edesc->sec4_sg_dma + 992 sec4_sg_index * 993 sizeof(struct sec4_sg_entry); 994 out_options = LDST_SGF; 995 } 996 } 997 if (encrypt) 998 append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize, 999 out_options); 1000 else 1001 append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize, 1002 out_options); 1003 } 1004 1005 /* 1006 * Fill in aead givencrypt job descriptor 1007 */ 1008 static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr, 1009 struct aead_edesc *edesc, 1010 struct aead_request *req, 1011 int contig) 1012 { 1013 struct crypto_aead *aead = crypto_aead_reqtfm(req); 1014 struct caam_ctx *ctx = crypto_aead_ctx(aead); 1015 int ivsize = crypto_aead_ivsize(aead); 1016 int authsize = ctx->authsize; 1017 u32 *desc = edesc->hw_desc; 1018 u32 out_options = 0, in_options; 1019 dma_addr_t dst_dma, src_dma; 1020 int len, sec4_sg_index = 0; 1021 1022 #ifdef DEBUG 1023 debug("assoclen %d cryptlen %d authsize %d\n", 1024 req->assoclen, req->cryptlen, authsize); 1025 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ", 1026 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), 1027 req->assoclen , 1); 1028 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ", 1029 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1); 1030 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ", 1031 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), 1032 edesc->src_nents > 1 ? 100 : req->cryptlen, 1); 1033 print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ", 1034 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc, 1035 desc_bytes(sh_desc), 1); 1036 #endif 1037 1038 len = desc_len(sh_desc); 1039 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); 1040 1041 if (contig & GIV_SRC_CONTIG) { 1042 src_dma = sg_dma_address(req->assoc); 1043 in_options = 0; 1044 } else { 1045 src_dma = edesc->sec4_sg_dma; 1046 sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents; 1047 in_options = LDST_SGF; 1048 } 1049 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen, 1050 in_options); 1051 1052 if (contig & GIV_DST_CONTIG) { 1053 dst_dma = edesc->iv_dma; 1054 } else { 1055 if (likely(req->src == req->dst)) { 1056 dst_dma = src_dma + sizeof(struct sec4_sg_entry) * 1057 edesc->assoc_nents; 1058 out_options = LDST_SGF; 1059 } else { 1060 dst_dma = edesc->sec4_sg_dma + 1061 sec4_sg_index * 1062 sizeof(struct sec4_sg_entry); 1063 out_options = LDST_SGF; 1064 } 1065 } 1066 1067 append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize, 1068 out_options); 1069 } 1070 1071 /* 1072 * Fill in ablkcipher job descriptor 1073 */ 1074 static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, 1075 struct ablkcipher_edesc *edesc, 1076 struct ablkcipher_request *req, 1077 bool iv_contig) 1078 { 1079 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1080 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 1081 u32 *desc = edesc->hw_desc; 1082 u32 out_options = 0, in_options; 1083 dma_addr_t dst_dma, src_dma; 1084 int len, sec4_sg_index = 0; 1085 1086 #ifdef DEBUG 1087 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ", 1088 DUMP_PREFIX_ADDRESS, 16, 4, req->info, 1089 ivsize, 1); 1090 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ", 1091 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), 1092 edesc->src_nents ? 100 : req->nbytes, 1); 1093 #endif 1094 1095 len = desc_len(sh_desc); 1096 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); 1097 1098 if (iv_contig) { 1099 src_dma = edesc->iv_dma; 1100 in_options = 0; 1101 } else { 1102 src_dma = edesc->sec4_sg_dma; 1103 sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents; 1104 in_options = LDST_SGF; 1105 } 1106 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options); 1107 1108 if (likely(req->src == req->dst)) { 1109 if (!edesc->src_nents && iv_contig) { 1110 dst_dma = sg_dma_address(req->src); 1111 } else { 1112 dst_dma = edesc->sec4_sg_dma + 1113 sizeof(struct sec4_sg_entry); 1114 out_options = LDST_SGF; 1115 } 1116 } else { 1117 if (!edesc->dst_nents) { 1118 dst_dma = sg_dma_address(req->dst); 1119 } else { 1120 dst_dma = edesc->sec4_sg_dma + 1121 sec4_sg_index * sizeof(struct sec4_sg_entry); 1122 out_options = LDST_SGF; 1123 } 1124 } 1125 append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options); 1126 } 1127 1128 /* 1129 * allocate and map the aead extended descriptor 1130 */ 1131 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, 1132 int desc_bytes, bool *all_contig_ptr, 1133 bool encrypt) 1134 { 1135 struct crypto_aead *aead = crypto_aead_reqtfm(req); 1136 struct caam_ctx *ctx = crypto_aead_ctx(aead); 1137 struct device *jrdev = ctx->jrdev; 1138 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 1139 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 1140 int assoc_nents, src_nents, dst_nents = 0; 1141 struct aead_edesc *edesc; 1142 dma_addr_t iv_dma = 0; 1143 int sgc; 1144 bool all_contig = true; 1145 bool assoc_chained = false, src_chained = false, dst_chained = false; 1146 int ivsize = crypto_aead_ivsize(aead); 1147 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; 1148 unsigned int authsize = ctx->authsize; 1149 1150 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained); 1151 1152 if (unlikely(req->dst != req->src)) { 1153 src_nents = sg_count(req->src, req->cryptlen, &src_chained); 1154 dst_nents = sg_count(req->dst, 1155 req->cryptlen + 1156 (encrypt ? authsize : (-authsize)), 1157 &dst_chained); 1158 } else { 1159 src_nents = sg_count(req->src, 1160 req->cryptlen + 1161 (encrypt ? authsize : 0), 1162 &src_chained); 1163 } 1164 1165 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, 1166 DMA_TO_DEVICE, assoc_chained); 1167 if (likely(req->src == req->dst)) { 1168 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, 1169 DMA_BIDIRECTIONAL, src_chained); 1170 } else { 1171 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, 1172 DMA_TO_DEVICE, src_chained); 1173 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, 1174 DMA_FROM_DEVICE, dst_chained); 1175 } 1176 1177 /* Check if data are contiguous */ 1178 iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE); 1179 if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen != 1180 iv_dma || src_nents || iv_dma + ivsize != 1181 sg_dma_address(req->src)) { 1182 all_contig = false; 1183 assoc_nents = assoc_nents ? : 1; 1184 src_nents = src_nents ? : 1; 1185 sec4_sg_len = assoc_nents + 1 + src_nents; 1186 } 1187 sec4_sg_len += dst_nents; 1188 1189 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); 1190 1191 /* allocate space for base edesc and hw desc commands, link tables */ 1192 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + 1193 sec4_sg_bytes, GFP_DMA | flags); 1194 if (!edesc) { 1195 dev_err(jrdev, "could not allocate extended descriptor\n"); 1196 return ERR_PTR(-ENOMEM); 1197 } 1198 1199 edesc->assoc_nents = assoc_nents; 1200 edesc->assoc_chained = assoc_chained; 1201 edesc->src_nents = src_nents; 1202 edesc->src_chained = src_chained; 1203 edesc->dst_nents = dst_nents; 1204 edesc->dst_chained = dst_chained; 1205 edesc->iv_dma = iv_dma; 1206 edesc->sec4_sg_bytes = sec4_sg_bytes; 1207 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + 1208 desc_bytes; 1209 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 1210 sec4_sg_bytes, DMA_TO_DEVICE); 1211 *all_contig_ptr = all_contig; 1212 1213 sec4_sg_index = 0; 1214 if (!all_contig) { 1215 sg_to_sec4_sg(req->assoc, 1216 (assoc_nents ? : 1), 1217 edesc->sec4_sg + 1218 sec4_sg_index, 0); 1219 sec4_sg_index += assoc_nents ? : 1; 1220 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, 1221 iv_dma, ivsize, 0); 1222 sec4_sg_index += 1; 1223 sg_to_sec4_sg_last(req->src, 1224 (src_nents ? : 1), 1225 edesc->sec4_sg + 1226 sec4_sg_index, 0); 1227 sec4_sg_index += src_nents ? : 1; 1228 } 1229 if (dst_nents) { 1230 sg_to_sec4_sg_last(req->dst, dst_nents, 1231 edesc->sec4_sg + sec4_sg_index, 0); 1232 } 1233 1234 return edesc; 1235 } 1236 1237 static int aead_encrypt(struct aead_request *req) 1238 { 1239 struct aead_edesc *edesc; 1240 struct crypto_aead *aead = crypto_aead_reqtfm(req); 1241 struct caam_ctx *ctx = crypto_aead_ctx(aead); 1242 struct device *jrdev = ctx->jrdev; 1243 bool all_contig; 1244 u32 *desc; 1245 int ret = 0; 1246 1247 /* allocate extended descriptor */ 1248 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * 1249 CAAM_CMD_SZ, &all_contig, true); 1250 if (IS_ERR(edesc)) 1251 return PTR_ERR(edesc); 1252 1253 /* Create and submit job descriptor */ 1254 init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req, 1255 all_contig, true); 1256 #ifdef DEBUG 1257 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", 1258 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1259 desc_bytes(edesc->hw_desc), 1); 1260 #endif 1261 1262 desc = edesc->hw_desc; 1263 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); 1264 if (!ret) { 1265 ret = -EINPROGRESS; 1266 } else { 1267 aead_unmap(jrdev, edesc, req); 1268 kfree(edesc); 1269 } 1270 1271 return ret; 1272 } 1273 1274 static int aead_decrypt(struct aead_request *req) 1275 { 1276 struct aead_edesc *edesc; 1277 struct crypto_aead *aead = crypto_aead_reqtfm(req); 1278 struct caam_ctx *ctx = crypto_aead_ctx(aead); 1279 struct device *jrdev = ctx->jrdev; 1280 bool all_contig; 1281 u32 *desc; 1282 int ret = 0; 1283 1284 /* allocate extended descriptor */ 1285 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * 1286 CAAM_CMD_SZ, &all_contig, false); 1287 if (IS_ERR(edesc)) 1288 return PTR_ERR(edesc); 1289 1290 #ifdef DEBUG 1291 print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ", 1292 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), 1293 req->cryptlen, 1); 1294 #endif 1295 1296 /* Create and submit job descriptor*/ 1297 init_aead_job(ctx->sh_desc_dec, 1298 ctx->sh_desc_dec_dma, edesc, req, all_contig, false); 1299 #ifdef DEBUG 1300 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", 1301 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1302 desc_bytes(edesc->hw_desc), 1); 1303 #endif 1304 1305 desc = edesc->hw_desc; 1306 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); 1307 if (!ret) { 1308 ret = -EINPROGRESS; 1309 } else { 1310 aead_unmap(jrdev, edesc, req); 1311 kfree(edesc); 1312 } 1313 1314 return ret; 1315 } 1316 1317 /* 1318 * allocate and map the aead extended descriptor for aead givencrypt 1319 */ 1320 static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request 1321 *greq, int desc_bytes, 1322 u32 *contig_ptr) 1323 { 1324 struct aead_request *req = &greq->areq; 1325 struct crypto_aead *aead = crypto_aead_reqtfm(req); 1326 struct caam_ctx *ctx = crypto_aead_ctx(aead); 1327 struct device *jrdev = ctx->jrdev; 1328 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 1329 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 1330 int assoc_nents, src_nents, dst_nents = 0; 1331 struct aead_edesc *edesc; 1332 dma_addr_t iv_dma = 0; 1333 int sgc; 1334 u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG; 1335 int ivsize = crypto_aead_ivsize(aead); 1336 bool assoc_chained = false, src_chained = false, dst_chained = false; 1337 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; 1338 1339 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained); 1340 src_nents = sg_count(req->src, req->cryptlen, &src_chained); 1341 1342 if (unlikely(req->dst != req->src)) 1343 dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize, 1344 &dst_chained); 1345 1346 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, 1347 DMA_TO_DEVICE, assoc_chained); 1348 if (likely(req->src == req->dst)) { 1349 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, 1350 DMA_BIDIRECTIONAL, src_chained); 1351 } else { 1352 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, 1353 DMA_TO_DEVICE, src_chained); 1354 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, 1355 DMA_FROM_DEVICE, dst_chained); 1356 } 1357 1358 /* Check if data are contiguous */ 1359 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE); 1360 if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen != 1361 iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src)) 1362 contig &= ~GIV_SRC_CONTIG; 1363 if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst)) 1364 contig &= ~GIV_DST_CONTIG; 1365 if (unlikely(req->src != req->dst)) { 1366 dst_nents = dst_nents ? : 1; 1367 sec4_sg_len += 1; 1368 } 1369 if (!(contig & GIV_SRC_CONTIG)) { 1370 assoc_nents = assoc_nents ? : 1; 1371 src_nents = src_nents ? : 1; 1372 sec4_sg_len += assoc_nents + 1 + src_nents; 1373 if (likely(req->src == req->dst)) 1374 contig &= ~GIV_DST_CONTIG; 1375 } 1376 sec4_sg_len += dst_nents; 1377 1378 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); 1379 1380 /* allocate space for base edesc and hw desc commands, link tables */ 1381 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + 1382 sec4_sg_bytes, GFP_DMA | flags); 1383 if (!edesc) { 1384 dev_err(jrdev, "could not allocate extended descriptor\n"); 1385 return ERR_PTR(-ENOMEM); 1386 } 1387 1388 edesc->assoc_nents = assoc_nents; 1389 edesc->assoc_chained = assoc_chained; 1390 edesc->src_nents = src_nents; 1391 edesc->src_chained = src_chained; 1392 edesc->dst_nents = dst_nents; 1393 edesc->dst_chained = dst_chained; 1394 edesc->iv_dma = iv_dma; 1395 edesc->sec4_sg_bytes = sec4_sg_bytes; 1396 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + 1397 desc_bytes; 1398 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 1399 sec4_sg_bytes, DMA_TO_DEVICE); 1400 *contig_ptr = contig; 1401 1402 sec4_sg_index = 0; 1403 if (!(contig & GIV_SRC_CONTIG)) { 1404 sg_to_sec4_sg(req->assoc, assoc_nents, 1405 edesc->sec4_sg + 1406 sec4_sg_index, 0); 1407 sec4_sg_index += assoc_nents; 1408 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, 1409 iv_dma, ivsize, 0); 1410 sec4_sg_index += 1; 1411 sg_to_sec4_sg_last(req->src, src_nents, 1412 edesc->sec4_sg + 1413 sec4_sg_index, 0); 1414 sec4_sg_index += src_nents; 1415 } 1416 if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) { 1417 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, 1418 iv_dma, ivsize, 0); 1419 sec4_sg_index += 1; 1420 sg_to_sec4_sg_last(req->dst, dst_nents, 1421 edesc->sec4_sg + sec4_sg_index, 0); 1422 } 1423 1424 return edesc; 1425 } 1426 1427 static int aead_givencrypt(struct aead_givcrypt_request *areq) 1428 { 1429 struct aead_request *req = &areq->areq; 1430 struct aead_edesc *edesc; 1431 struct crypto_aead *aead = crypto_aead_reqtfm(req); 1432 struct caam_ctx *ctx = crypto_aead_ctx(aead); 1433 struct device *jrdev = ctx->jrdev; 1434 u32 contig; 1435 u32 *desc; 1436 int ret = 0; 1437 1438 /* allocate extended descriptor */ 1439 edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN * 1440 CAAM_CMD_SZ, &contig); 1441 1442 if (IS_ERR(edesc)) 1443 return PTR_ERR(edesc); 1444 1445 #ifdef DEBUG 1446 print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ", 1447 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), 1448 req->cryptlen, 1); 1449 #endif 1450 1451 /* Create and submit job descriptor*/ 1452 init_aead_giv_job(ctx->sh_desc_givenc, 1453 ctx->sh_desc_givenc_dma, edesc, req, contig); 1454 #ifdef DEBUG 1455 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", 1456 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1457 desc_bytes(edesc->hw_desc), 1); 1458 #endif 1459 1460 desc = edesc->hw_desc; 1461 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); 1462 if (!ret) { 1463 ret = -EINPROGRESS; 1464 } else { 1465 aead_unmap(jrdev, edesc, req); 1466 kfree(edesc); 1467 } 1468 1469 return ret; 1470 } 1471 1472 /* 1473 * allocate and map the ablkcipher extended descriptor for ablkcipher 1474 */ 1475 static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request 1476 *req, int desc_bytes, 1477 bool *iv_contig_out) 1478 { 1479 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1480 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 1481 struct device *jrdev = ctx->jrdev; 1482 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 1483 CRYPTO_TFM_REQ_MAY_SLEEP)) ? 1484 GFP_KERNEL : GFP_ATOMIC; 1485 int src_nents, dst_nents = 0, sec4_sg_bytes; 1486 struct ablkcipher_edesc *edesc; 1487 dma_addr_t iv_dma = 0; 1488 bool iv_contig = false; 1489 int sgc; 1490 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 1491 bool src_chained = false, dst_chained = false; 1492 int sec4_sg_index; 1493 1494 src_nents = sg_count(req->src, req->nbytes, &src_chained); 1495 1496 if (req->dst != req->src) 1497 dst_nents = sg_count(req->dst, req->nbytes, &dst_chained); 1498 1499 if (likely(req->src == req->dst)) { 1500 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, 1501 DMA_BIDIRECTIONAL, src_chained); 1502 } else { 1503 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, 1504 DMA_TO_DEVICE, src_chained); 1505 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, 1506 DMA_FROM_DEVICE, dst_chained); 1507 } 1508 1509 /* 1510 * Check if iv can be contiguous with source and destination. 1511 * If so, include it. If not, create scatterlist. 1512 */ 1513 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE); 1514 if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src)) 1515 iv_contig = true; 1516 else 1517 src_nents = src_nents ? : 1; 1518 sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) * 1519 sizeof(struct sec4_sg_entry); 1520 1521 /* allocate space for base edesc and hw desc commands, link tables */ 1522 edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes + 1523 sec4_sg_bytes, GFP_DMA | flags); 1524 if (!edesc) { 1525 dev_err(jrdev, "could not allocate extended descriptor\n"); 1526 return ERR_PTR(-ENOMEM); 1527 } 1528 1529 edesc->src_nents = src_nents; 1530 edesc->src_chained = src_chained; 1531 edesc->dst_nents = dst_nents; 1532 edesc->dst_chained = dst_chained; 1533 edesc->sec4_sg_bytes = sec4_sg_bytes; 1534 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + 1535 desc_bytes; 1536 1537 sec4_sg_index = 0; 1538 if (!iv_contig) { 1539 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0); 1540 sg_to_sec4_sg_last(req->src, src_nents, 1541 edesc->sec4_sg + 1, 0); 1542 sec4_sg_index += 1 + src_nents; 1543 } 1544 1545 if (dst_nents) { 1546 sg_to_sec4_sg_last(req->dst, dst_nents, 1547 edesc->sec4_sg + sec4_sg_index, 0); 1548 } 1549 1550 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 1551 sec4_sg_bytes, DMA_TO_DEVICE); 1552 edesc->iv_dma = iv_dma; 1553 1554 #ifdef DEBUG 1555 print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ", 1556 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, 1557 sec4_sg_bytes, 1); 1558 #endif 1559 1560 *iv_contig_out = iv_contig; 1561 return edesc; 1562 } 1563 1564 static int ablkcipher_encrypt(struct ablkcipher_request *req) 1565 { 1566 struct ablkcipher_edesc *edesc; 1567 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1568 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 1569 struct device *jrdev = ctx->jrdev; 1570 bool iv_contig; 1571 u32 *desc; 1572 int ret = 0; 1573 1574 /* allocate extended descriptor */ 1575 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * 1576 CAAM_CMD_SZ, &iv_contig); 1577 if (IS_ERR(edesc)) 1578 return PTR_ERR(edesc); 1579 1580 /* Create and submit job descriptor*/ 1581 init_ablkcipher_job(ctx->sh_desc_enc, 1582 ctx->sh_desc_enc_dma, edesc, req, iv_contig); 1583 #ifdef DEBUG 1584 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ", 1585 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1586 desc_bytes(edesc->hw_desc), 1); 1587 #endif 1588 desc = edesc->hw_desc; 1589 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req); 1590 1591 if (!ret) { 1592 ret = -EINPROGRESS; 1593 } else { 1594 ablkcipher_unmap(jrdev, edesc, req); 1595 kfree(edesc); 1596 } 1597 1598 return ret; 1599 } 1600 1601 static int ablkcipher_decrypt(struct ablkcipher_request *req) 1602 { 1603 struct ablkcipher_edesc *edesc; 1604 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1605 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 1606 struct device *jrdev = ctx->jrdev; 1607 bool iv_contig; 1608 u32 *desc; 1609 int ret = 0; 1610 1611 /* allocate extended descriptor */ 1612 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * 1613 CAAM_CMD_SZ, &iv_contig); 1614 if (IS_ERR(edesc)) 1615 return PTR_ERR(edesc); 1616 1617 /* Create and submit job descriptor*/ 1618 init_ablkcipher_job(ctx->sh_desc_dec, 1619 ctx->sh_desc_dec_dma, edesc, req, iv_contig); 1620 desc = edesc->hw_desc; 1621 #ifdef DEBUG 1622 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ", 1623 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1624 desc_bytes(edesc->hw_desc), 1); 1625 #endif 1626 1627 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req); 1628 if (!ret) { 1629 ret = -EINPROGRESS; 1630 } else { 1631 ablkcipher_unmap(jrdev, edesc, req); 1632 kfree(edesc); 1633 } 1634 1635 return ret; 1636 } 1637 1638 #define template_aead template_u.aead 1639 #define template_ablkcipher template_u.ablkcipher 1640 struct caam_alg_template { 1641 char name[CRYPTO_MAX_ALG_NAME]; 1642 char driver_name[CRYPTO_MAX_ALG_NAME]; 1643 unsigned int blocksize; 1644 u32 type; 1645 union { 1646 struct ablkcipher_alg ablkcipher; 1647 struct aead_alg aead; 1648 struct blkcipher_alg blkcipher; 1649 struct cipher_alg cipher; 1650 struct compress_alg compress; 1651 struct rng_alg rng; 1652 } template_u; 1653 u32 class1_alg_type; 1654 u32 class2_alg_type; 1655 u32 alg_op; 1656 }; 1657 1658 static struct caam_alg_template driver_algs[] = { 1659 /* single-pass ipsec_esp descriptor */ 1660 { 1661 .name = "authenc(hmac(md5),cbc(aes))", 1662 .driver_name = "authenc-hmac-md5-cbc-aes-caam", 1663 .blocksize = AES_BLOCK_SIZE, 1664 .type = CRYPTO_ALG_TYPE_AEAD, 1665 .template_aead = { 1666 .setkey = aead_setkey, 1667 .setauthsize = aead_setauthsize, 1668 .encrypt = aead_encrypt, 1669 .decrypt = aead_decrypt, 1670 .givencrypt = aead_givencrypt, 1671 .geniv = "<built-in>", 1672 .ivsize = AES_BLOCK_SIZE, 1673 .maxauthsize = MD5_DIGEST_SIZE, 1674 }, 1675 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1676 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, 1677 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, 1678 }, 1679 { 1680 .name = "authenc(hmac(sha1),cbc(aes))", 1681 .driver_name = "authenc-hmac-sha1-cbc-aes-caam", 1682 .blocksize = AES_BLOCK_SIZE, 1683 .type = CRYPTO_ALG_TYPE_AEAD, 1684 .template_aead = { 1685 .setkey = aead_setkey, 1686 .setauthsize = aead_setauthsize, 1687 .encrypt = aead_encrypt, 1688 .decrypt = aead_decrypt, 1689 .givencrypt = aead_givencrypt, 1690 .geniv = "<built-in>", 1691 .ivsize = AES_BLOCK_SIZE, 1692 .maxauthsize = SHA1_DIGEST_SIZE, 1693 }, 1694 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1695 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, 1696 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, 1697 }, 1698 { 1699 .name = "authenc(hmac(sha224),cbc(aes))", 1700 .driver_name = "authenc-hmac-sha224-cbc-aes-caam", 1701 .blocksize = AES_BLOCK_SIZE, 1702 .type = CRYPTO_ALG_TYPE_AEAD, 1703 .template_aead = { 1704 .setkey = aead_setkey, 1705 .setauthsize = aead_setauthsize, 1706 .encrypt = aead_encrypt, 1707 .decrypt = aead_decrypt, 1708 .givencrypt = aead_givencrypt, 1709 .geniv = "<built-in>", 1710 .ivsize = AES_BLOCK_SIZE, 1711 .maxauthsize = SHA224_DIGEST_SIZE, 1712 }, 1713 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1714 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 1715 OP_ALG_AAI_HMAC_PRECOMP, 1716 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, 1717 }, 1718 { 1719 .name = "authenc(hmac(sha256),cbc(aes))", 1720 .driver_name = "authenc-hmac-sha256-cbc-aes-caam", 1721 .blocksize = AES_BLOCK_SIZE, 1722 .type = CRYPTO_ALG_TYPE_AEAD, 1723 .template_aead = { 1724 .setkey = aead_setkey, 1725 .setauthsize = aead_setauthsize, 1726 .encrypt = aead_encrypt, 1727 .decrypt = aead_decrypt, 1728 .givencrypt = aead_givencrypt, 1729 .geniv = "<built-in>", 1730 .ivsize = AES_BLOCK_SIZE, 1731 .maxauthsize = SHA256_DIGEST_SIZE, 1732 }, 1733 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1734 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 1735 OP_ALG_AAI_HMAC_PRECOMP, 1736 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, 1737 }, 1738 { 1739 .name = "authenc(hmac(sha384),cbc(aes))", 1740 .driver_name = "authenc-hmac-sha384-cbc-aes-caam", 1741 .blocksize = AES_BLOCK_SIZE, 1742 .type = CRYPTO_ALG_TYPE_AEAD, 1743 .template_aead = { 1744 .setkey = aead_setkey, 1745 .setauthsize = aead_setauthsize, 1746 .encrypt = aead_encrypt, 1747 .decrypt = aead_decrypt, 1748 .givencrypt = aead_givencrypt, 1749 .geniv = "<built-in>", 1750 .ivsize = AES_BLOCK_SIZE, 1751 .maxauthsize = SHA384_DIGEST_SIZE, 1752 }, 1753 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1754 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 1755 OP_ALG_AAI_HMAC_PRECOMP, 1756 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, 1757 }, 1758 1759 { 1760 .name = "authenc(hmac(sha512),cbc(aes))", 1761 .driver_name = "authenc-hmac-sha512-cbc-aes-caam", 1762 .blocksize = AES_BLOCK_SIZE, 1763 .type = CRYPTO_ALG_TYPE_AEAD, 1764 .template_aead = { 1765 .setkey = aead_setkey, 1766 .setauthsize = aead_setauthsize, 1767 .encrypt = aead_encrypt, 1768 .decrypt = aead_decrypt, 1769 .givencrypt = aead_givencrypt, 1770 .geniv = "<built-in>", 1771 .ivsize = AES_BLOCK_SIZE, 1772 .maxauthsize = SHA512_DIGEST_SIZE, 1773 }, 1774 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1775 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 1776 OP_ALG_AAI_HMAC_PRECOMP, 1777 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, 1778 }, 1779 { 1780 .name = "authenc(hmac(md5),cbc(des3_ede))", 1781 .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam", 1782 .blocksize = DES3_EDE_BLOCK_SIZE, 1783 .type = CRYPTO_ALG_TYPE_AEAD, 1784 .template_aead = { 1785 .setkey = aead_setkey, 1786 .setauthsize = aead_setauthsize, 1787 .encrypt = aead_encrypt, 1788 .decrypt = aead_decrypt, 1789 .givencrypt = aead_givencrypt, 1790 .geniv = "<built-in>", 1791 .ivsize = DES3_EDE_BLOCK_SIZE, 1792 .maxauthsize = MD5_DIGEST_SIZE, 1793 }, 1794 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1795 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, 1796 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, 1797 }, 1798 { 1799 .name = "authenc(hmac(sha1),cbc(des3_ede))", 1800 .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam", 1801 .blocksize = DES3_EDE_BLOCK_SIZE, 1802 .type = CRYPTO_ALG_TYPE_AEAD, 1803 .template_aead = { 1804 .setkey = aead_setkey, 1805 .setauthsize = aead_setauthsize, 1806 .encrypt = aead_encrypt, 1807 .decrypt = aead_decrypt, 1808 .givencrypt = aead_givencrypt, 1809 .geniv = "<built-in>", 1810 .ivsize = DES3_EDE_BLOCK_SIZE, 1811 .maxauthsize = SHA1_DIGEST_SIZE, 1812 }, 1813 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1814 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, 1815 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, 1816 }, 1817 { 1818 .name = "authenc(hmac(sha224),cbc(des3_ede))", 1819 .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam", 1820 .blocksize = DES3_EDE_BLOCK_SIZE, 1821 .type = CRYPTO_ALG_TYPE_AEAD, 1822 .template_aead = { 1823 .setkey = aead_setkey, 1824 .setauthsize = aead_setauthsize, 1825 .encrypt = aead_encrypt, 1826 .decrypt = aead_decrypt, 1827 .givencrypt = aead_givencrypt, 1828 .geniv = "<built-in>", 1829 .ivsize = DES3_EDE_BLOCK_SIZE, 1830 .maxauthsize = SHA224_DIGEST_SIZE, 1831 }, 1832 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1833 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 1834 OP_ALG_AAI_HMAC_PRECOMP, 1835 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, 1836 }, 1837 { 1838 .name = "authenc(hmac(sha256),cbc(des3_ede))", 1839 .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam", 1840 .blocksize = DES3_EDE_BLOCK_SIZE, 1841 .type = CRYPTO_ALG_TYPE_AEAD, 1842 .template_aead = { 1843 .setkey = aead_setkey, 1844 .setauthsize = aead_setauthsize, 1845 .encrypt = aead_encrypt, 1846 .decrypt = aead_decrypt, 1847 .givencrypt = aead_givencrypt, 1848 .geniv = "<built-in>", 1849 .ivsize = DES3_EDE_BLOCK_SIZE, 1850 .maxauthsize = SHA256_DIGEST_SIZE, 1851 }, 1852 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1853 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 1854 OP_ALG_AAI_HMAC_PRECOMP, 1855 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, 1856 }, 1857 { 1858 .name = "authenc(hmac(sha384),cbc(des3_ede))", 1859 .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam", 1860 .blocksize = DES3_EDE_BLOCK_SIZE, 1861 .type = CRYPTO_ALG_TYPE_AEAD, 1862 .template_aead = { 1863 .setkey = aead_setkey, 1864 .setauthsize = aead_setauthsize, 1865 .encrypt = aead_encrypt, 1866 .decrypt = aead_decrypt, 1867 .givencrypt = aead_givencrypt, 1868 .geniv = "<built-in>", 1869 .ivsize = DES3_EDE_BLOCK_SIZE, 1870 .maxauthsize = SHA384_DIGEST_SIZE, 1871 }, 1872 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1873 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 1874 OP_ALG_AAI_HMAC_PRECOMP, 1875 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, 1876 }, 1877 { 1878 .name = "authenc(hmac(sha512),cbc(des3_ede))", 1879 .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam", 1880 .blocksize = DES3_EDE_BLOCK_SIZE, 1881 .type = CRYPTO_ALG_TYPE_AEAD, 1882 .template_aead = { 1883 .setkey = aead_setkey, 1884 .setauthsize = aead_setauthsize, 1885 .encrypt = aead_encrypt, 1886 .decrypt = aead_decrypt, 1887 .givencrypt = aead_givencrypt, 1888 .geniv = "<built-in>", 1889 .ivsize = DES3_EDE_BLOCK_SIZE, 1890 .maxauthsize = SHA512_DIGEST_SIZE, 1891 }, 1892 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1893 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 1894 OP_ALG_AAI_HMAC_PRECOMP, 1895 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, 1896 }, 1897 { 1898 .name = "authenc(hmac(md5),cbc(des))", 1899 .driver_name = "authenc-hmac-md5-cbc-des-caam", 1900 .blocksize = DES_BLOCK_SIZE, 1901 .type = CRYPTO_ALG_TYPE_AEAD, 1902 .template_aead = { 1903 .setkey = aead_setkey, 1904 .setauthsize = aead_setauthsize, 1905 .encrypt = aead_encrypt, 1906 .decrypt = aead_decrypt, 1907 .givencrypt = aead_givencrypt, 1908 .geniv = "<built-in>", 1909 .ivsize = DES_BLOCK_SIZE, 1910 .maxauthsize = MD5_DIGEST_SIZE, 1911 }, 1912 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 1913 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, 1914 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, 1915 }, 1916 { 1917 .name = "authenc(hmac(sha1),cbc(des))", 1918 .driver_name = "authenc-hmac-sha1-cbc-des-caam", 1919 .blocksize = DES_BLOCK_SIZE, 1920 .type = CRYPTO_ALG_TYPE_AEAD, 1921 .template_aead = { 1922 .setkey = aead_setkey, 1923 .setauthsize = aead_setauthsize, 1924 .encrypt = aead_encrypt, 1925 .decrypt = aead_decrypt, 1926 .givencrypt = aead_givencrypt, 1927 .geniv = "<built-in>", 1928 .ivsize = DES_BLOCK_SIZE, 1929 .maxauthsize = SHA1_DIGEST_SIZE, 1930 }, 1931 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 1932 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, 1933 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, 1934 }, 1935 { 1936 .name = "authenc(hmac(sha224),cbc(des))", 1937 .driver_name = "authenc-hmac-sha224-cbc-des-caam", 1938 .blocksize = DES_BLOCK_SIZE, 1939 .type = CRYPTO_ALG_TYPE_AEAD, 1940 .template_aead = { 1941 .setkey = aead_setkey, 1942 .setauthsize = aead_setauthsize, 1943 .encrypt = aead_encrypt, 1944 .decrypt = aead_decrypt, 1945 .givencrypt = aead_givencrypt, 1946 .geniv = "<built-in>", 1947 .ivsize = DES_BLOCK_SIZE, 1948 .maxauthsize = SHA224_DIGEST_SIZE, 1949 }, 1950 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 1951 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 1952 OP_ALG_AAI_HMAC_PRECOMP, 1953 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, 1954 }, 1955 { 1956 .name = "authenc(hmac(sha256),cbc(des))", 1957 .driver_name = "authenc-hmac-sha256-cbc-des-caam", 1958 .blocksize = DES_BLOCK_SIZE, 1959 .type = CRYPTO_ALG_TYPE_AEAD, 1960 .template_aead = { 1961 .setkey = aead_setkey, 1962 .setauthsize = aead_setauthsize, 1963 .encrypt = aead_encrypt, 1964 .decrypt = aead_decrypt, 1965 .givencrypt = aead_givencrypt, 1966 .geniv = "<built-in>", 1967 .ivsize = DES_BLOCK_SIZE, 1968 .maxauthsize = SHA256_DIGEST_SIZE, 1969 }, 1970 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 1971 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 1972 OP_ALG_AAI_HMAC_PRECOMP, 1973 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, 1974 }, 1975 { 1976 .name = "authenc(hmac(sha384),cbc(des))", 1977 .driver_name = "authenc-hmac-sha384-cbc-des-caam", 1978 .blocksize = DES_BLOCK_SIZE, 1979 .type = CRYPTO_ALG_TYPE_AEAD, 1980 .template_aead = { 1981 .setkey = aead_setkey, 1982 .setauthsize = aead_setauthsize, 1983 .encrypt = aead_encrypt, 1984 .decrypt = aead_decrypt, 1985 .givencrypt = aead_givencrypt, 1986 .geniv = "<built-in>", 1987 .ivsize = DES_BLOCK_SIZE, 1988 .maxauthsize = SHA384_DIGEST_SIZE, 1989 }, 1990 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 1991 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 1992 OP_ALG_AAI_HMAC_PRECOMP, 1993 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, 1994 }, 1995 { 1996 .name = "authenc(hmac(sha512),cbc(des))", 1997 .driver_name = "authenc-hmac-sha512-cbc-des-caam", 1998 .blocksize = DES_BLOCK_SIZE, 1999 .type = CRYPTO_ALG_TYPE_AEAD, 2000 .template_aead = { 2001 .setkey = aead_setkey, 2002 .setauthsize = aead_setauthsize, 2003 .encrypt = aead_encrypt, 2004 .decrypt = aead_decrypt, 2005 .givencrypt = aead_givencrypt, 2006 .geniv = "<built-in>", 2007 .ivsize = DES_BLOCK_SIZE, 2008 .maxauthsize = SHA512_DIGEST_SIZE, 2009 }, 2010 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2011 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2012 OP_ALG_AAI_HMAC_PRECOMP, 2013 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, 2014 }, 2015 /* ablkcipher descriptor */ 2016 { 2017 .name = "cbc(aes)", 2018 .driver_name = "cbc-aes-caam", 2019 .blocksize = AES_BLOCK_SIZE, 2020 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 2021 .template_ablkcipher = { 2022 .setkey = ablkcipher_setkey, 2023 .encrypt = ablkcipher_encrypt, 2024 .decrypt = ablkcipher_decrypt, 2025 .geniv = "eseqiv", 2026 .min_keysize = AES_MIN_KEY_SIZE, 2027 .max_keysize = AES_MAX_KEY_SIZE, 2028 .ivsize = AES_BLOCK_SIZE, 2029 }, 2030 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 2031 }, 2032 { 2033 .name = "cbc(des3_ede)", 2034 .driver_name = "cbc-3des-caam", 2035 .blocksize = DES3_EDE_BLOCK_SIZE, 2036 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 2037 .template_ablkcipher = { 2038 .setkey = ablkcipher_setkey, 2039 .encrypt = ablkcipher_encrypt, 2040 .decrypt = ablkcipher_decrypt, 2041 .geniv = "eseqiv", 2042 .min_keysize = DES3_EDE_KEY_SIZE, 2043 .max_keysize = DES3_EDE_KEY_SIZE, 2044 .ivsize = DES3_EDE_BLOCK_SIZE, 2045 }, 2046 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2047 }, 2048 { 2049 .name = "cbc(des)", 2050 .driver_name = "cbc-des-caam", 2051 .blocksize = DES_BLOCK_SIZE, 2052 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 2053 .template_ablkcipher = { 2054 .setkey = ablkcipher_setkey, 2055 .encrypt = ablkcipher_encrypt, 2056 .decrypt = ablkcipher_decrypt, 2057 .geniv = "eseqiv", 2058 .min_keysize = DES_KEY_SIZE, 2059 .max_keysize = DES_KEY_SIZE, 2060 .ivsize = DES_BLOCK_SIZE, 2061 }, 2062 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2063 } 2064 }; 2065 2066 struct caam_crypto_alg { 2067 struct list_head entry; 2068 int class1_alg_type; 2069 int class2_alg_type; 2070 int alg_op; 2071 struct crypto_alg crypto_alg; 2072 }; 2073 2074 static int caam_cra_init(struct crypto_tfm *tfm) 2075 { 2076 struct crypto_alg *alg = tfm->__crt_alg; 2077 struct caam_crypto_alg *caam_alg = 2078 container_of(alg, struct caam_crypto_alg, crypto_alg); 2079 struct caam_ctx *ctx = crypto_tfm_ctx(tfm); 2080 2081 ctx->jrdev = caam_jr_alloc(); 2082 if (IS_ERR(ctx->jrdev)) { 2083 pr_err("Job Ring Device allocation for transform failed\n"); 2084 return PTR_ERR(ctx->jrdev); 2085 } 2086 2087 /* copy descriptor header template value */ 2088 ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type; 2089 ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type; 2090 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op; 2091 2092 return 0; 2093 } 2094 2095 static void caam_cra_exit(struct crypto_tfm *tfm) 2096 { 2097 struct caam_ctx *ctx = crypto_tfm_ctx(tfm); 2098 2099 if (ctx->sh_desc_enc_dma && 2100 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma)) 2101 dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma, 2102 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE); 2103 if (ctx->sh_desc_dec_dma && 2104 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma)) 2105 dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma, 2106 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE); 2107 if (ctx->sh_desc_givenc_dma && 2108 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma)) 2109 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma, 2110 desc_bytes(ctx->sh_desc_givenc), 2111 DMA_TO_DEVICE); 2112 2113 caam_jr_free(ctx->jrdev); 2114 } 2115 2116 static void __exit caam_algapi_exit(void) 2117 { 2118 2119 struct caam_crypto_alg *t_alg, *n; 2120 2121 if (!alg_list.next) 2122 return; 2123 2124 list_for_each_entry_safe(t_alg, n, &alg_list, entry) { 2125 crypto_unregister_alg(&t_alg->crypto_alg); 2126 list_del(&t_alg->entry); 2127 kfree(t_alg); 2128 } 2129 } 2130 2131 static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template 2132 *template) 2133 { 2134 struct caam_crypto_alg *t_alg; 2135 struct crypto_alg *alg; 2136 2137 t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL); 2138 if (!t_alg) { 2139 pr_err("failed to allocate t_alg\n"); 2140 return ERR_PTR(-ENOMEM); 2141 } 2142 2143 alg = &t_alg->crypto_alg; 2144 2145 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name); 2146 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 2147 template->driver_name); 2148 alg->cra_module = THIS_MODULE; 2149 alg->cra_init = caam_cra_init; 2150 alg->cra_exit = caam_cra_exit; 2151 alg->cra_priority = CAAM_CRA_PRIORITY; 2152 alg->cra_blocksize = template->blocksize; 2153 alg->cra_alignmask = 0; 2154 alg->cra_ctxsize = sizeof(struct caam_ctx); 2155 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY | 2156 template->type; 2157 switch (template->type) { 2158 case CRYPTO_ALG_TYPE_ABLKCIPHER: 2159 alg->cra_type = &crypto_ablkcipher_type; 2160 alg->cra_ablkcipher = template->template_ablkcipher; 2161 break; 2162 case CRYPTO_ALG_TYPE_AEAD: 2163 alg->cra_type = &crypto_aead_type; 2164 alg->cra_aead = template->template_aead; 2165 break; 2166 } 2167 2168 t_alg->class1_alg_type = template->class1_alg_type; 2169 t_alg->class2_alg_type = template->class2_alg_type; 2170 t_alg->alg_op = template->alg_op; 2171 2172 return t_alg; 2173 } 2174 2175 static int __init caam_algapi_init(void) 2176 { 2177 int i = 0, err = 0; 2178 2179 INIT_LIST_HEAD(&alg_list); 2180 2181 /* register crypto algorithms the device supports */ 2182 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 2183 /* TODO: check if h/w supports alg */ 2184 struct caam_crypto_alg *t_alg; 2185 2186 t_alg = caam_alg_alloc(&driver_algs[i]); 2187 if (IS_ERR(t_alg)) { 2188 err = PTR_ERR(t_alg); 2189 pr_warn("%s alg allocation failed\n", 2190 driver_algs[i].driver_name); 2191 continue; 2192 } 2193 2194 err = crypto_register_alg(&t_alg->crypto_alg); 2195 if (err) { 2196 pr_warn("%s alg registration failed\n", 2197 t_alg->crypto_alg.cra_driver_name); 2198 kfree(t_alg); 2199 } else 2200 list_add_tail(&t_alg->entry, &alg_list); 2201 } 2202 if (!list_empty(&alg_list)) 2203 pr_info("caam algorithms registered in /proc/crypto\n"); 2204 2205 return err; 2206 } 2207 2208 module_init(caam_algapi_init); 2209 module_exit(caam_algapi_exit); 2210 2211 MODULE_LICENSE("GPL"); 2212 MODULE_DESCRIPTION("FSL CAAM support for crypto API"); 2213 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC"); 2214