1 /* 2 * caam - Freescale FSL CAAM support for crypto API 3 * 4 * Copyright 2008-2011 Freescale Semiconductor, Inc. 5 * 6 * Based on talitos crypto API driver. 7 * 8 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008): 9 * 10 * --------------- --------------- 11 * | JobDesc #1 |-------------------->| ShareDesc | 12 * | *(packet 1) | | (PDB) | 13 * --------------- |------------->| (hashKey) | 14 * . | | (cipherKey) | 15 * . | |-------->| (operation) | 16 * --------------- | | --------------- 17 * | JobDesc #2 |------| | 18 * | *(packet 2) | | 19 * --------------- | 20 * . | 21 * . | 22 * --------------- | 23 * | JobDesc #3 |------------ 24 * | *(packet 3) | 25 * --------------- 26 * 27 * The SharedDesc never changes for a connection unless rekeyed, but 28 * each packet will likely be in a different place. So all we need 29 * to know to process the packet is where the input is, where the 30 * output goes, and what context we want to process with. Context is 31 * in the SharedDesc, packet references in the JobDesc. 32 * 33 * So, a job desc looks like: 34 * 35 * --------------------- 36 * | Header | 37 * | ShareDesc Pointer | 38 * | SEQ_OUT_PTR | 39 * | (output buffer) | 40 * | SEQ_IN_PTR | 41 * | (input buffer) | 42 * | LOAD (to DECO) | 43 * --------------------- 44 */ 45 46 #include "compat.h" 47 48 #include "regs.h" 49 #include "intern.h" 50 #include "desc_constr.h" 51 #include "jr.h" 52 #include "error.h" 53 54 /* 55 * crypto alg 56 */ 57 #define CAAM_CRA_PRIORITY 3000 58 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */ 59 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \ 60 SHA512_DIGEST_SIZE * 2) 61 /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ 62 #define CAAM_MAX_IV_LENGTH 16 63 64 /* length of descriptors text */ 65 #define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 3 + CAAM_PTR_SZ * 3) 66 67 #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ) 68 #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ) 69 #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ) 70 #define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ) 71 72 #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ) 73 #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \ 74 20 * CAAM_CMD_SZ) 75 #define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \ 76 15 * CAAM_CMD_SZ) 77 78 #define DESC_MAX_USED_BYTES (DESC_AEAD_GIVENC_LEN + \ 79 CAAM_MAX_KEY_SIZE) 80 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) 81 82 #ifdef DEBUG 83 /* for print_hex_dumps with line references */ 84 #define xstr(s) str(s) 85 #define str(s) #s 86 #define debug(format, arg...) printk(format, arg) 87 #else 88 #define debug(format, arg...) 89 #endif 90 91 /* Set DK bit in class 1 operation if shared */ 92 static inline void append_dec_op1(u32 *desc, u32 type) 93 { 94 u32 *jump_cmd, *uncond_jump_cmd; 95 96 jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); 97 append_operation(desc, type | OP_ALG_AS_INITFINAL | 98 OP_ALG_DECRYPT); 99 uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL); 100 set_jump_tgt_here(desc, jump_cmd); 101 append_operation(desc, type | OP_ALG_AS_INITFINAL | 102 OP_ALG_DECRYPT | OP_ALG_AAI_DK); 103 set_jump_tgt_here(desc, uncond_jump_cmd); 104 } 105 106 /* 107 * Wait for completion of class 1 key loading before allowing 108 * error propagation 109 */ 110 static inline void append_dec_shr_done(u32 *desc) 111 { 112 u32 *jump_cmd; 113 114 jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL); 115 set_jump_tgt_here(desc, jump_cmd); 116 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); 117 } 118 119 /* 120 * For aead functions, read payload and write payload, 121 * both of which are specified in req->src and req->dst 122 */ 123 static inline void aead_append_src_dst(u32 *desc, u32 msg_type) 124 { 125 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | 126 KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH); 127 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); 128 } 129 130 /* 131 * For aead encrypt and decrypt, read iv for both classes 132 */ 133 static inline void aead_append_ld_iv(u32 *desc, int ivsize) 134 { 135 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | 136 LDST_CLASS_1_CCB | ivsize); 137 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize); 138 } 139 140 /* 141 * For ablkcipher encrypt and decrypt, read from req->src and 142 * write to req->dst 143 */ 144 static inline void ablkcipher_append_src_dst(u32 *desc) 145 { 146 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); \ 147 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); \ 148 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | \ 149 KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); \ 150 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); \ 151 } 152 153 /* 154 * If all data, including src (with assoc and iv) or dst (with iv only) are 155 * contiguous 156 */ 157 #define GIV_SRC_CONTIG 1 158 #define GIV_DST_CONTIG (1 << 1) 159 160 /* 161 * per-session context 162 */ 163 struct caam_ctx { 164 struct device *jrdev; 165 u32 sh_desc_enc[DESC_MAX_USED_LEN]; 166 u32 sh_desc_dec[DESC_MAX_USED_LEN]; 167 u32 sh_desc_givenc[DESC_MAX_USED_LEN]; 168 dma_addr_t sh_desc_enc_dma; 169 dma_addr_t sh_desc_dec_dma; 170 dma_addr_t sh_desc_givenc_dma; 171 u32 class1_alg_type; 172 u32 class2_alg_type; 173 u32 alg_op; 174 u8 key[CAAM_MAX_KEY_SIZE]; 175 dma_addr_t key_dma; 176 unsigned int enckeylen; 177 unsigned int split_key_len; 178 unsigned int split_key_pad_len; 179 unsigned int authsize; 180 }; 181 182 static void append_key_aead(u32 *desc, struct caam_ctx *ctx, 183 int keys_fit_inline) 184 { 185 if (keys_fit_inline) { 186 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, 187 ctx->split_key_len, CLASS_2 | 188 KEY_DEST_MDHA_SPLIT | KEY_ENC); 189 append_key_as_imm(desc, (void *)ctx->key + 190 ctx->split_key_pad_len, ctx->enckeylen, 191 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); 192 } else { 193 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 | 194 KEY_DEST_MDHA_SPLIT | KEY_ENC); 195 append_key(desc, ctx->key_dma + ctx->split_key_pad_len, 196 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); 197 } 198 } 199 200 static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx, 201 int keys_fit_inline) 202 { 203 u32 *key_jump_cmd; 204 205 init_sh_desc(desc, HDR_SHARE_WAIT); 206 207 /* Skip if already shared */ 208 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | 209 JUMP_COND_SHRD); 210 211 append_key_aead(desc, ctx, keys_fit_inline); 212 213 set_jump_tgt_here(desc, key_jump_cmd); 214 215 /* Propagate errors from shared to job descriptor */ 216 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); 217 } 218 219 static int aead_set_sh_desc(struct crypto_aead *aead) 220 { 221 struct aead_tfm *tfm = &aead->base.crt_aead; 222 struct caam_ctx *ctx = crypto_aead_ctx(aead); 223 struct device *jrdev = ctx->jrdev; 224 bool keys_fit_inline = 0; 225 u32 *key_jump_cmd, *jump_cmd; 226 u32 geniv, moveiv; 227 u32 *desc; 228 229 if (!ctx->enckeylen || !ctx->authsize) 230 return 0; 231 232 /* 233 * Job Descriptor and Shared Descriptors 234 * must all fit into the 64-word Descriptor h/w Buffer 235 */ 236 if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN + 237 ctx->split_key_pad_len + ctx->enckeylen <= 238 CAAM_DESC_BYTES_MAX) 239 keys_fit_inline = 1; 240 241 /* aead_encrypt shared descriptor */ 242 desc = ctx->sh_desc_enc; 243 244 init_sh_desc_key_aead(desc, ctx, keys_fit_inline); 245 246 /* Class 2 operation */ 247 append_operation(desc, ctx->class2_alg_type | 248 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); 249 250 /* cryptlen = seqoutlen - authsize */ 251 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); 252 253 /* assoclen + cryptlen = seqinlen - ivsize */ 254 append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize); 255 256 /* assoclen + cryptlen = (assoclen + cryptlen) - cryptlen */ 257 append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ); 258 259 /* read assoc before reading payload */ 260 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | 261 KEY_VLF); 262 aead_append_ld_iv(desc, tfm->ivsize); 263 264 /* Class 1 operation */ 265 append_operation(desc, ctx->class1_alg_type | 266 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); 267 268 /* Read and write cryptlen bytes */ 269 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); 270 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); 271 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2); 272 273 /* Write ICV */ 274 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | 275 LDST_SRCDST_BYTE_CONTEXT); 276 277 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, 278 desc_bytes(desc), 279 DMA_TO_DEVICE); 280 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { 281 dev_err(jrdev, "unable to map shared descriptor\n"); 282 return -ENOMEM; 283 } 284 #ifdef DEBUG 285 print_hex_dump(KERN_ERR, "aead enc shdesc@"xstr(__LINE__)": ", 286 DUMP_PREFIX_ADDRESS, 16, 4, desc, 287 desc_bytes(desc), 1); 288 #endif 289 290 /* 291 * Job Descriptor and Shared Descriptors 292 * must all fit into the 64-word Descriptor h/w Buffer 293 */ 294 if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN + 295 ctx->split_key_pad_len + ctx->enckeylen <= 296 CAAM_DESC_BYTES_MAX) 297 keys_fit_inline = 1; 298 299 desc = ctx->sh_desc_dec; 300 301 /* aead_decrypt shared descriptor */ 302 init_sh_desc(desc, HDR_SHARE_WAIT); 303 304 /* Skip if already shared */ 305 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | 306 JUMP_COND_SHRD); 307 308 append_key_aead(desc, ctx, keys_fit_inline); 309 310 /* Only propagate error immediately if shared */ 311 jump_cmd = append_jump(desc, JUMP_TEST_ALL); 312 set_jump_tgt_here(desc, key_jump_cmd); 313 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); 314 set_jump_tgt_here(desc, jump_cmd); 315 316 /* Class 2 operation */ 317 append_operation(desc, ctx->class2_alg_type | 318 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); 319 320 /* assoclen + cryptlen = seqinlen - ivsize */ 321 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, 322 ctx->authsize + tfm->ivsize) 323 /* assoclen = (assoclen + cryptlen) - cryptlen */ 324 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); 325 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ); 326 327 /* read assoc before reading payload */ 328 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | 329 KEY_VLF); 330 331 aead_append_ld_iv(desc, tfm->ivsize); 332 333 append_dec_op1(desc, ctx->class1_alg_type); 334 335 /* Read and write cryptlen bytes */ 336 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); 337 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ); 338 aead_append_src_dst(desc, FIFOLD_TYPE_MSG); 339 340 /* Load ICV */ 341 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 | 342 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); 343 append_dec_shr_done(desc); 344 345 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, 346 desc_bytes(desc), 347 DMA_TO_DEVICE); 348 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { 349 dev_err(jrdev, "unable to map shared descriptor\n"); 350 return -ENOMEM; 351 } 352 #ifdef DEBUG 353 print_hex_dump(KERN_ERR, "aead dec shdesc@"xstr(__LINE__)": ", 354 DUMP_PREFIX_ADDRESS, 16, 4, desc, 355 desc_bytes(desc), 1); 356 #endif 357 358 /* 359 * Job Descriptor and Shared Descriptors 360 * must all fit into the 64-word Descriptor h/w Buffer 361 */ 362 if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN + 363 ctx->split_key_pad_len + ctx->enckeylen <= 364 CAAM_DESC_BYTES_MAX) 365 keys_fit_inline = 1; 366 367 /* aead_givencrypt shared descriptor */ 368 desc = ctx->sh_desc_givenc; 369 370 init_sh_desc_key_aead(desc, ctx, keys_fit_inline); 371 372 /* Generate IV */ 373 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO | 374 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | 375 NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT); 376 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB | 377 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); 378 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); 379 append_move(desc, MOVE_SRC_INFIFO | 380 MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT)); 381 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); 382 383 /* Copy IV to class 1 context */ 384 append_move(desc, MOVE_SRC_CLASS1CTX | 385 MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT)); 386 387 /* Return to encryption */ 388 append_operation(desc, ctx->class2_alg_type | 389 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); 390 391 /* ivsize + cryptlen = seqoutlen - authsize */ 392 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); 393 394 /* assoclen = seqinlen - (ivsize + cryptlen) */ 395 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ); 396 397 /* read assoc before reading payload */ 398 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | 399 KEY_VLF); 400 401 /* Copy iv from class 1 ctx to class 2 fifo*/ 402 moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 | 403 NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT); 404 append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB | 405 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); 406 append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB | 407 LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM); 408 409 /* Class 1 operation */ 410 append_operation(desc, ctx->class1_alg_type | 411 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); 412 413 /* Will write ivsize + cryptlen */ 414 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); 415 416 /* Not need to reload iv */ 417 append_seq_fifo_load(desc, tfm->ivsize, 418 FIFOLD_CLASS_SKIP); 419 420 /* Will read cryptlen */ 421 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); 422 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2); 423 424 /* Write ICV */ 425 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | 426 LDST_SRCDST_BYTE_CONTEXT); 427 428 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc, 429 desc_bytes(desc), 430 DMA_TO_DEVICE); 431 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) { 432 dev_err(jrdev, "unable to map shared descriptor\n"); 433 return -ENOMEM; 434 } 435 #ifdef DEBUG 436 print_hex_dump(KERN_ERR, "aead givenc shdesc@"xstr(__LINE__)": ", 437 DUMP_PREFIX_ADDRESS, 16, 4, desc, 438 desc_bytes(desc), 1); 439 #endif 440 441 return 0; 442 } 443 444 static int aead_setauthsize(struct crypto_aead *authenc, 445 unsigned int authsize) 446 { 447 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 448 449 ctx->authsize = authsize; 450 aead_set_sh_desc(authenc); 451 452 return 0; 453 } 454 455 struct split_key_result { 456 struct completion completion; 457 int err; 458 }; 459 460 static void split_key_done(struct device *dev, u32 *desc, u32 err, 461 void *context) 462 { 463 struct split_key_result *res = context; 464 465 #ifdef DEBUG 466 dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 467 #endif 468 469 if (err) { 470 char tmp[CAAM_ERROR_STR_MAX]; 471 472 dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); 473 } 474 475 res->err = err; 476 477 complete(&res->completion); 478 } 479 480 /* 481 get a split ipad/opad key 482 483 Split key generation----------------------------------------------- 484 485 [00] 0xb0810008 jobdesc: stidx=1 share=never len=8 486 [01] 0x04000014 key: class2->keyreg len=20 487 @0xffe01000 488 [03] 0x84410014 operation: cls2-op sha1 hmac init dec 489 [04] 0x24940000 fifold: class2 msgdata-last2 len=0 imm 490 [05] 0xa4000001 jump: class2 local all ->1 [06] 491 [06] 0x64260028 fifostr: class2 mdsplit-jdk len=40 492 @0xffe04000 493 */ 494 static u32 gen_split_key(struct caam_ctx *ctx, const u8 *key_in, u32 authkeylen) 495 { 496 struct device *jrdev = ctx->jrdev; 497 u32 *desc; 498 struct split_key_result result; 499 dma_addr_t dma_addr_in, dma_addr_out; 500 int ret = 0; 501 502 desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); 503 504 init_job_desc(desc, 0); 505 506 dma_addr_in = dma_map_single(jrdev, (void *)key_in, authkeylen, 507 DMA_TO_DEVICE); 508 if (dma_mapping_error(jrdev, dma_addr_in)) { 509 dev_err(jrdev, "unable to map key input memory\n"); 510 kfree(desc); 511 return -ENOMEM; 512 } 513 append_key(desc, dma_addr_in, authkeylen, CLASS_2 | 514 KEY_DEST_CLASS_REG); 515 516 /* Sets MDHA up into an HMAC-INIT */ 517 append_operation(desc, ctx->alg_op | OP_ALG_DECRYPT | 518 OP_ALG_AS_INIT); 519 520 /* 521 * do a FIFO_LOAD of zero, this will trigger the internal key expansion 522 into both pads inside MDHA 523 */ 524 append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB | 525 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2); 526 527 /* 528 * FIFO_STORE with the explicit split-key content store 529 * (0x26 output type) 530 */ 531 dma_addr_out = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len, 532 DMA_FROM_DEVICE); 533 if (dma_mapping_error(jrdev, dma_addr_out)) { 534 dev_err(jrdev, "unable to map key output memory\n"); 535 kfree(desc); 536 return -ENOMEM; 537 } 538 append_fifo_store(desc, dma_addr_out, ctx->split_key_len, 539 LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK); 540 541 #ifdef DEBUG 542 print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ", 543 DUMP_PREFIX_ADDRESS, 16, 4, key_in, authkeylen, 1); 544 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", 545 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 546 #endif 547 548 result.err = 0; 549 init_completion(&result.completion); 550 551 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); 552 if (!ret) { 553 /* in progress */ 554 wait_for_completion_interruptible(&result.completion); 555 ret = result.err; 556 #ifdef DEBUG 557 print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ", 558 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, 559 ctx->split_key_pad_len, 1); 560 #endif 561 } 562 563 dma_unmap_single(jrdev, dma_addr_out, ctx->split_key_pad_len, 564 DMA_FROM_DEVICE); 565 dma_unmap_single(jrdev, dma_addr_in, authkeylen, DMA_TO_DEVICE); 566 567 kfree(desc); 568 569 return ret; 570 } 571 572 static int aead_setkey(struct crypto_aead *aead, 573 const u8 *key, unsigned int keylen) 574 { 575 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */ 576 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 }; 577 struct caam_ctx *ctx = crypto_aead_ctx(aead); 578 struct device *jrdev = ctx->jrdev; 579 struct rtattr *rta = (void *)key; 580 struct crypto_authenc_key_param *param; 581 unsigned int authkeylen; 582 unsigned int enckeylen; 583 int ret = 0; 584 585 param = RTA_DATA(rta); 586 enckeylen = be32_to_cpu(param->enckeylen); 587 588 key += RTA_ALIGN(rta->rta_len); 589 keylen -= RTA_ALIGN(rta->rta_len); 590 591 if (keylen < enckeylen) 592 goto badkey; 593 594 authkeylen = keylen - enckeylen; 595 596 if (keylen > CAAM_MAX_KEY_SIZE) 597 goto badkey; 598 599 /* Pick class 2 key length from algorithm submask */ 600 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >> 601 OP_ALG_ALGSEL_SHIFT] * 2; 602 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16); 603 604 #ifdef DEBUG 605 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n", 606 keylen, enckeylen, authkeylen); 607 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n", 608 ctx->split_key_len, ctx->split_key_pad_len); 609 print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ", 610 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 611 #endif 612 613 ret = gen_split_key(ctx, key, authkeylen); 614 if (ret) { 615 goto badkey; 616 } 617 618 /* postpend encryption key to auth split key */ 619 memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen); 620 621 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len + 622 enckeylen, DMA_TO_DEVICE); 623 if (dma_mapping_error(jrdev, ctx->key_dma)) { 624 dev_err(jrdev, "unable to map key i/o memory\n"); 625 return -ENOMEM; 626 } 627 #ifdef DEBUG 628 print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ", 629 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, 630 ctx->split_key_pad_len + enckeylen, 1); 631 #endif 632 633 ctx->enckeylen = enckeylen; 634 635 ret = aead_set_sh_desc(aead); 636 if (ret) { 637 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len + 638 enckeylen, DMA_TO_DEVICE); 639 } 640 641 return ret; 642 badkey: 643 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); 644 return -EINVAL; 645 } 646 647 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, 648 const u8 *key, unsigned int keylen) 649 { 650 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 651 struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher; 652 struct device *jrdev = ctx->jrdev; 653 int ret = 0; 654 u32 *key_jump_cmd, *jump_cmd; 655 u32 *desc; 656 657 #ifdef DEBUG 658 print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ", 659 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 660 #endif 661 662 memcpy(ctx->key, key, keylen); 663 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, 664 DMA_TO_DEVICE); 665 if (dma_mapping_error(jrdev, ctx->key_dma)) { 666 dev_err(jrdev, "unable to map key i/o memory\n"); 667 return -ENOMEM; 668 } 669 ctx->enckeylen = keylen; 670 671 /* ablkcipher_encrypt shared descriptor */ 672 desc = ctx->sh_desc_enc; 673 init_sh_desc(desc, HDR_SHARE_WAIT); 674 /* Skip if already shared */ 675 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | 676 JUMP_COND_SHRD); 677 678 /* Load class1 key only */ 679 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, 680 ctx->enckeylen, CLASS_1 | 681 KEY_DEST_CLASS_REG); 682 683 set_jump_tgt_here(desc, key_jump_cmd); 684 685 /* Propagate errors from shared to job descriptor */ 686 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); 687 688 /* Load iv */ 689 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | 690 LDST_CLASS_1_CCB | tfm->ivsize); 691 692 /* Load operation */ 693 append_operation(desc, ctx->class1_alg_type | 694 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); 695 696 /* Perform operation */ 697 ablkcipher_append_src_dst(desc); 698 699 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, 700 desc_bytes(desc), 701 DMA_TO_DEVICE); 702 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { 703 dev_err(jrdev, "unable to map shared descriptor\n"); 704 return -ENOMEM; 705 } 706 #ifdef DEBUG 707 print_hex_dump(KERN_ERR, "ablkcipher enc shdesc@"xstr(__LINE__)": ", 708 DUMP_PREFIX_ADDRESS, 16, 4, desc, 709 desc_bytes(desc), 1); 710 #endif 711 /* ablkcipher_decrypt shared descriptor */ 712 desc = ctx->sh_desc_dec; 713 714 init_sh_desc(desc, HDR_SHARE_WAIT); 715 /* Skip if already shared */ 716 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | 717 JUMP_COND_SHRD); 718 719 /* Load class1 key only */ 720 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, 721 ctx->enckeylen, CLASS_1 | 722 KEY_DEST_CLASS_REG); 723 724 /* For aead, only propagate error immediately if shared */ 725 jump_cmd = append_jump(desc, JUMP_TEST_ALL); 726 set_jump_tgt_here(desc, key_jump_cmd); 727 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); 728 set_jump_tgt_here(desc, jump_cmd); 729 730 /* load IV */ 731 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | 732 LDST_CLASS_1_CCB | tfm->ivsize); 733 734 /* Choose operation */ 735 append_dec_op1(desc, ctx->class1_alg_type); 736 737 /* Perform operation */ 738 ablkcipher_append_src_dst(desc); 739 740 /* Wait for key to load before allowing propagating error */ 741 append_dec_shr_done(desc); 742 743 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, 744 desc_bytes(desc), 745 DMA_TO_DEVICE); 746 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { 747 dev_err(jrdev, "unable to map shared descriptor\n"); 748 return -ENOMEM; 749 } 750 751 #ifdef DEBUG 752 print_hex_dump(KERN_ERR, "ablkcipher dec shdesc@"xstr(__LINE__)": ", 753 DUMP_PREFIX_ADDRESS, 16, 4, desc, 754 desc_bytes(desc), 1); 755 #endif 756 757 return ret; 758 } 759 760 struct link_tbl_entry { 761 u64 ptr; 762 u32 len; 763 u8 reserved; 764 u8 buf_pool_id; 765 u16 offset; 766 }; 767 768 /* 769 * aead_edesc - s/w-extended aead descriptor 770 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist 771 * @src_nents: number of segments in input scatterlist 772 * @dst_nents: number of segments in output scatterlist 773 * @iv_dma: dma address of iv for checking continuity and link table 774 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) 775 * @link_tbl_bytes: length of dma mapped link_tbl space 776 * @link_tbl_dma: bus physical mapped address of h/w link table 777 * @hw_desc: the h/w job descriptor followed by any referenced link tables 778 */ 779 struct aead_edesc { 780 int assoc_nents; 781 int src_nents; 782 int dst_nents; 783 dma_addr_t iv_dma; 784 int link_tbl_bytes; 785 dma_addr_t link_tbl_dma; 786 struct link_tbl_entry *link_tbl; 787 u32 hw_desc[0]; 788 }; 789 790 /* 791 * ablkcipher_edesc - s/w-extended ablkcipher descriptor 792 * @src_nents: number of segments in input scatterlist 793 * @dst_nents: number of segments in output scatterlist 794 * @iv_dma: dma address of iv for checking continuity and link table 795 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) 796 * @link_tbl_bytes: length of dma mapped link_tbl space 797 * @link_tbl_dma: bus physical mapped address of h/w link table 798 * @hw_desc: the h/w job descriptor followed by any referenced link tables 799 */ 800 struct ablkcipher_edesc { 801 int src_nents; 802 int dst_nents; 803 dma_addr_t iv_dma; 804 int link_tbl_bytes; 805 dma_addr_t link_tbl_dma; 806 struct link_tbl_entry *link_tbl; 807 u32 hw_desc[0]; 808 }; 809 810 static void caam_unmap(struct device *dev, struct scatterlist *src, 811 struct scatterlist *dst, int src_nents, int dst_nents, 812 dma_addr_t iv_dma, int ivsize, dma_addr_t link_tbl_dma, 813 int link_tbl_bytes) 814 { 815 if (unlikely(dst != src)) { 816 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); 817 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); 818 } else { 819 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); 820 } 821 822 if (iv_dma) 823 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); 824 if (link_tbl_bytes) 825 dma_unmap_single(dev, link_tbl_dma, link_tbl_bytes, 826 DMA_TO_DEVICE); 827 } 828 829 static void aead_unmap(struct device *dev, 830 struct aead_edesc *edesc, 831 struct aead_request *req) 832 { 833 struct crypto_aead *aead = crypto_aead_reqtfm(req); 834 int ivsize = crypto_aead_ivsize(aead); 835 836 dma_unmap_sg(dev, req->assoc, edesc->assoc_nents, DMA_TO_DEVICE); 837 838 caam_unmap(dev, req->src, req->dst, 839 edesc->src_nents, edesc->dst_nents, 840 edesc->iv_dma, ivsize, edesc->link_tbl_dma, 841 edesc->link_tbl_bytes); 842 } 843 844 static void ablkcipher_unmap(struct device *dev, 845 struct ablkcipher_edesc *edesc, 846 struct ablkcipher_request *req) 847 { 848 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 849 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 850 851 caam_unmap(dev, req->src, req->dst, 852 edesc->src_nents, edesc->dst_nents, 853 edesc->iv_dma, ivsize, edesc->link_tbl_dma, 854 edesc->link_tbl_bytes); 855 } 856 857 static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, 858 void *context) 859 { 860 struct aead_request *req = context; 861 struct aead_edesc *edesc; 862 #ifdef DEBUG 863 struct crypto_aead *aead = crypto_aead_reqtfm(req); 864 struct caam_ctx *ctx = crypto_aead_ctx(aead); 865 int ivsize = crypto_aead_ivsize(aead); 866 867 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 868 #endif 869 870 edesc = (struct aead_edesc *)((char *)desc - 871 offsetof(struct aead_edesc, hw_desc)); 872 873 if (err) { 874 char tmp[CAAM_ERROR_STR_MAX]; 875 876 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); 877 } 878 879 aead_unmap(jrdev, edesc, req); 880 881 #ifdef DEBUG 882 print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", 883 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), 884 req->assoclen , 1); 885 print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", 886 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize, 887 edesc->src_nents ? 100 : ivsize, 1); 888 print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", 889 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), 890 edesc->src_nents ? 100 : req->cryptlen + 891 ctx->authsize + 4, 1); 892 #endif 893 894 kfree(edesc); 895 896 aead_request_complete(req, err); 897 } 898 899 static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, 900 void *context) 901 { 902 struct aead_request *req = context; 903 struct aead_edesc *edesc; 904 #ifdef DEBUG 905 struct crypto_aead *aead = crypto_aead_reqtfm(req); 906 struct caam_ctx *ctx = crypto_aead_ctx(aead); 907 int ivsize = crypto_aead_ivsize(aead); 908 909 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 910 #endif 911 912 edesc = (struct aead_edesc *)((char *)desc - 913 offsetof(struct aead_edesc, hw_desc)); 914 915 #ifdef DEBUG 916 print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", 917 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, 918 ivsize, 1); 919 print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", 920 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst), 921 req->cryptlen, 1); 922 #endif 923 924 if (err) { 925 char tmp[CAAM_ERROR_STR_MAX]; 926 927 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); 928 } 929 930 aead_unmap(jrdev, edesc, req); 931 932 /* 933 * verify hw auth check passed else return -EBADMSG 934 */ 935 if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK) 936 err = -EBADMSG; 937 938 #ifdef DEBUG 939 print_hex_dump(KERN_ERR, "iphdrout@"xstr(__LINE__)": ", 940 DUMP_PREFIX_ADDRESS, 16, 4, 941 ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)), 942 sizeof(struct iphdr) + req->assoclen + 943 ((req->cryptlen > 1500) ? 1500 : req->cryptlen) + 944 ctx->authsize + 36, 1); 945 if (!err && edesc->link_tbl_bytes) { 946 struct scatterlist *sg = sg_last(req->src, edesc->src_nents); 947 print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ", 948 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg), 949 sg->length + ctx->authsize + 16, 1); 950 } 951 #endif 952 953 kfree(edesc); 954 955 aead_request_complete(req, err); 956 } 957 958 static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, 959 void *context) 960 { 961 struct ablkcipher_request *req = context; 962 struct ablkcipher_edesc *edesc; 963 #ifdef DEBUG 964 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 965 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 966 967 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 968 #endif 969 970 edesc = (struct ablkcipher_edesc *)((char *)desc - 971 offsetof(struct ablkcipher_edesc, hw_desc)); 972 973 if (err) { 974 char tmp[CAAM_ERROR_STR_MAX]; 975 976 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); 977 } 978 979 #ifdef DEBUG 980 print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", 981 DUMP_PREFIX_ADDRESS, 16, 4, req->info, 982 edesc->src_nents > 1 ? 100 : ivsize, 1); 983 print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", 984 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), 985 edesc->dst_nents > 1 ? 100 : req->nbytes, 1); 986 #endif 987 988 ablkcipher_unmap(jrdev, edesc, req); 989 kfree(edesc); 990 991 ablkcipher_request_complete(req, err); 992 } 993 994 static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, 995 void *context) 996 { 997 struct ablkcipher_request *req = context; 998 struct ablkcipher_edesc *edesc; 999 #ifdef DEBUG 1000 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1001 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 1002 1003 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 1004 #endif 1005 1006 edesc = (struct ablkcipher_edesc *)((char *)desc - 1007 offsetof(struct ablkcipher_edesc, hw_desc)); 1008 if (err) { 1009 char tmp[CAAM_ERROR_STR_MAX]; 1010 1011 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); 1012 } 1013 1014 #ifdef DEBUG 1015 print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", 1016 DUMP_PREFIX_ADDRESS, 16, 4, req->info, 1017 ivsize, 1); 1018 print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", 1019 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), 1020 edesc->dst_nents > 1 ? 100 : req->nbytes, 1); 1021 #endif 1022 1023 ablkcipher_unmap(jrdev, edesc, req); 1024 kfree(edesc); 1025 1026 ablkcipher_request_complete(req, err); 1027 } 1028 1029 static void sg_to_link_tbl_one(struct link_tbl_entry *link_tbl_ptr, 1030 dma_addr_t dma, u32 len, u32 offset) 1031 { 1032 link_tbl_ptr->ptr = dma; 1033 link_tbl_ptr->len = len; 1034 link_tbl_ptr->reserved = 0; 1035 link_tbl_ptr->buf_pool_id = 0; 1036 link_tbl_ptr->offset = offset; 1037 #ifdef DEBUG 1038 print_hex_dump(KERN_ERR, "link_tbl_ptr@"xstr(__LINE__)": ", 1039 DUMP_PREFIX_ADDRESS, 16, 4, link_tbl_ptr, 1040 sizeof(struct link_tbl_entry), 1); 1041 #endif 1042 } 1043 1044 /* 1045 * convert scatterlist to h/w link table format 1046 * but does not have final bit; instead, returns last entry 1047 */ 1048 static struct link_tbl_entry *sg_to_link_tbl(struct scatterlist *sg, 1049 int sg_count, struct link_tbl_entry 1050 *link_tbl_ptr, u32 offset) 1051 { 1052 while (sg_count) { 1053 sg_to_link_tbl_one(link_tbl_ptr, sg_dma_address(sg), 1054 sg_dma_len(sg), offset); 1055 link_tbl_ptr++; 1056 sg = sg_next(sg); 1057 sg_count--; 1058 } 1059 return link_tbl_ptr - 1; 1060 } 1061 1062 /* 1063 * convert scatterlist to h/w link table format 1064 * scatterlist must have been previously dma mapped 1065 */ 1066 static void sg_to_link_tbl_last(struct scatterlist *sg, int sg_count, 1067 struct link_tbl_entry *link_tbl_ptr, u32 offset) 1068 { 1069 link_tbl_ptr = sg_to_link_tbl(sg, sg_count, link_tbl_ptr, offset); 1070 link_tbl_ptr->len |= 0x40000000; 1071 } 1072 1073 /* 1074 * Fill in aead job descriptor 1075 */ 1076 static void init_aead_job(u32 *sh_desc, dma_addr_t ptr, 1077 struct aead_edesc *edesc, 1078 struct aead_request *req, 1079 bool all_contig, bool encrypt) 1080 { 1081 struct crypto_aead *aead = crypto_aead_reqtfm(req); 1082 struct caam_ctx *ctx = crypto_aead_ctx(aead); 1083 int ivsize = crypto_aead_ivsize(aead); 1084 int authsize = ctx->authsize; 1085 u32 *desc = edesc->hw_desc; 1086 u32 out_options = 0, in_options; 1087 dma_addr_t dst_dma, src_dma; 1088 int len, link_tbl_index = 0; 1089 1090 #ifdef DEBUG 1091 debug("assoclen %d cryptlen %d authsize %d\n", 1092 req->assoclen, req->cryptlen, authsize); 1093 print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", 1094 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), 1095 req->assoclen , 1); 1096 print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", 1097 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, 1098 edesc->src_nents ? 100 : ivsize, 1); 1099 print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ", 1100 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), 1101 edesc->src_nents ? 100 : req->cryptlen, 1); 1102 print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ", 1103 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc, 1104 desc_bytes(sh_desc), 1); 1105 #endif 1106 1107 len = desc_len(sh_desc); 1108 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); 1109 1110 if (all_contig) { 1111 src_dma = sg_dma_address(req->assoc); 1112 in_options = 0; 1113 } else { 1114 src_dma = edesc->link_tbl_dma; 1115 link_tbl_index += (edesc->assoc_nents ? : 1) + 1 + 1116 (edesc->src_nents ? : 1); 1117 in_options = LDST_SGF; 1118 } 1119 if (encrypt) 1120 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + 1121 req->cryptlen - authsize, in_options); 1122 else 1123 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + 1124 req->cryptlen, in_options); 1125 1126 if (likely(req->src == req->dst)) { 1127 if (all_contig) { 1128 dst_dma = sg_dma_address(req->src); 1129 } else { 1130 dst_dma = src_dma + sizeof(struct link_tbl_entry) * 1131 ((edesc->assoc_nents ? : 1) + 1); 1132 out_options = LDST_SGF; 1133 } 1134 } else { 1135 if (!edesc->dst_nents) { 1136 dst_dma = sg_dma_address(req->dst); 1137 } else { 1138 dst_dma = edesc->link_tbl_dma + 1139 link_tbl_index * 1140 sizeof(struct link_tbl_entry); 1141 out_options = LDST_SGF; 1142 } 1143 } 1144 if (encrypt) 1145 append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options); 1146 else 1147 append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize, 1148 out_options); 1149 } 1150 1151 /* 1152 * Fill in aead givencrypt job descriptor 1153 */ 1154 static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr, 1155 struct aead_edesc *edesc, 1156 struct aead_request *req, 1157 int contig) 1158 { 1159 struct crypto_aead *aead = crypto_aead_reqtfm(req); 1160 struct caam_ctx *ctx = crypto_aead_ctx(aead); 1161 int ivsize = crypto_aead_ivsize(aead); 1162 int authsize = ctx->authsize; 1163 u32 *desc = edesc->hw_desc; 1164 u32 out_options = 0, in_options; 1165 dma_addr_t dst_dma, src_dma; 1166 int len, link_tbl_index = 0; 1167 1168 #ifdef DEBUG 1169 debug("assoclen %d cryptlen %d authsize %d\n", 1170 req->assoclen, req->cryptlen, authsize); 1171 print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", 1172 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), 1173 req->assoclen , 1); 1174 print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", 1175 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1); 1176 print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ", 1177 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), 1178 edesc->src_nents > 1 ? 100 : req->cryptlen, 1); 1179 print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ", 1180 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc, 1181 desc_bytes(sh_desc), 1); 1182 #endif 1183 1184 len = desc_len(sh_desc); 1185 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); 1186 1187 if (contig & GIV_SRC_CONTIG) { 1188 src_dma = sg_dma_address(req->assoc); 1189 in_options = 0; 1190 } else { 1191 src_dma = edesc->link_tbl_dma; 1192 link_tbl_index += edesc->assoc_nents + 1 + edesc->src_nents; 1193 in_options = LDST_SGF; 1194 } 1195 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + 1196 req->cryptlen - authsize, in_options); 1197 1198 if (contig & GIV_DST_CONTIG) { 1199 dst_dma = edesc->iv_dma; 1200 } else { 1201 if (likely(req->src == req->dst)) { 1202 dst_dma = src_dma + sizeof(struct link_tbl_entry) * 1203 edesc->assoc_nents; 1204 out_options = LDST_SGF; 1205 } else { 1206 dst_dma = edesc->link_tbl_dma + 1207 link_tbl_index * 1208 sizeof(struct link_tbl_entry); 1209 out_options = LDST_SGF; 1210 } 1211 } 1212 1213 append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen, out_options); 1214 } 1215 1216 /* 1217 * Fill in ablkcipher job descriptor 1218 */ 1219 static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, 1220 struct ablkcipher_edesc *edesc, 1221 struct ablkcipher_request *req, 1222 bool iv_contig) 1223 { 1224 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1225 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 1226 u32 *desc = edesc->hw_desc; 1227 u32 out_options = 0, in_options; 1228 dma_addr_t dst_dma, src_dma; 1229 int len, link_tbl_index = 0; 1230 1231 #ifdef DEBUG 1232 print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", 1233 DUMP_PREFIX_ADDRESS, 16, 4, req->info, 1234 ivsize, 1); 1235 print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ", 1236 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), 1237 edesc->src_nents ? 100 : req->nbytes, 1); 1238 #endif 1239 1240 len = desc_len(sh_desc); 1241 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); 1242 1243 if (iv_contig) { 1244 src_dma = edesc->iv_dma; 1245 in_options = 0; 1246 } else { 1247 src_dma = edesc->link_tbl_dma; 1248 link_tbl_index += (iv_contig ? 0 : 1) + edesc->src_nents; 1249 in_options = LDST_SGF; 1250 } 1251 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options); 1252 1253 if (likely(req->src == req->dst)) { 1254 if (!edesc->src_nents && iv_contig) { 1255 dst_dma = sg_dma_address(req->src); 1256 } else { 1257 dst_dma = edesc->link_tbl_dma + 1258 sizeof(struct link_tbl_entry); 1259 out_options = LDST_SGF; 1260 } 1261 } else { 1262 if (!edesc->dst_nents) { 1263 dst_dma = sg_dma_address(req->dst); 1264 } else { 1265 dst_dma = edesc->link_tbl_dma + 1266 link_tbl_index * sizeof(struct link_tbl_entry); 1267 out_options = LDST_SGF; 1268 } 1269 } 1270 append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options); 1271 } 1272 1273 /* 1274 * derive number of elements in scatterlist 1275 */ 1276 static int sg_count(struct scatterlist *sg_list, int nbytes) 1277 { 1278 struct scatterlist *sg = sg_list; 1279 int sg_nents = 0; 1280 1281 while (nbytes > 0) { 1282 sg_nents++; 1283 nbytes -= sg->length; 1284 if (!sg_is_last(sg) && (sg + 1)->length == 0) 1285 BUG(); /* Not support chaining */ 1286 sg = scatterwalk_sg_next(sg); 1287 } 1288 1289 if (likely(sg_nents == 1)) 1290 return 0; 1291 1292 return sg_nents; 1293 } 1294 1295 /* 1296 * allocate and map the aead extended descriptor 1297 */ 1298 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, 1299 int desc_bytes, bool *all_contig_ptr) 1300 { 1301 struct crypto_aead *aead = crypto_aead_reqtfm(req); 1302 struct caam_ctx *ctx = crypto_aead_ctx(aead); 1303 struct device *jrdev = ctx->jrdev; 1304 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 1305 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 1306 int assoc_nents, src_nents, dst_nents = 0; 1307 struct aead_edesc *edesc; 1308 dma_addr_t iv_dma = 0; 1309 int sgc; 1310 bool all_contig = true; 1311 int ivsize = crypto_aead_ivsize(aead); 1312 int link_tbl_index, link_tbl_len = 0, link_tbl_bytes; 1313 1314 assoc_nents = sg_count(req->assoc, req->assoclen); 1315 src_nents = sg_count(req->src, req->cryptlen); 1316 1317 if (unlikely(req->dst != req->src)) 1318 dst_nents = sg_count(req->dst, req->cryptlen); 1319 1320 sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1, 1321 DMA_BIDIRECTIONAL); 1322 if (likely(req->src == req->dst)) { 1323 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, 1324 DMA_BIDIRECTIONAL); 1325 } else { 1326 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, 1327 DMA_TO_DEVICE); 1328 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, 1329 DMA_FROM_DEVICE); 1330 } 1331 1332 /* Check if data are contiguous */ 1333 iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE); 1334 if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen != 1335 iv_dma || src_nents || iv_dma + ivsize != 1336 sg_dma_address(req->src)) { 1337 all_contig = false; 1338 assoc_nents = assoc_nents ? : 1; 1339 src_nents = src_nents ? : 1; 1340 link_tbl_len = assoc_nents + 1 + src_nents; 1341 } 1342 link_tbl_len += dst_nents; 1343 1344 link_tbl_bytes = link_tbl_len * sizeof(struct link_tbl_entry); 1345 1346 /* allocate space for base edesc and hw desc commands, link tables */ 1347 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + 1348 link_tbl_bytes, GFP_DMA | flags); 1349 if (!edesc) { 1350 dev_err(jrdev, "could not allocate extended descriptor\n"); 1351 return ERR_PTR(-ENOMEM); 1352 } 1353 1354 edesc->assoc_nents = assoc_nents; 1355 edesc->src_nents = src_nents; 1356 edesc->dst_nents = dst_nents; 1357 edesc->iv_dma = iv_dma; 1358 edesc->link_tbl_bytes = link_tbl_bytes; 1359 edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) + 1360 desc_bytes; 1361 edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl, 1362 link_tbl_bytes, DMA_TO_DEVICE); 1363 *all_contig_ptr = all_contig; 1364 1365 link_tbl_index = 0; 1366 if (!all_contig) { 1367 sg_to_link_tbl(req->assoc, 1368 (assoc_nents ? : 1), 1369 edesc->link_tbl + 1370 link_tbl_index, 0); 1371 link_tbl_index += assoc_nents ? : 1; 1372 sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index, 1373 iv_dma, ivsize, 0); 1374 link_tbl_index += 1; 1375 sg_to_link_tbl_last(req->src, 1376 (src_nents ? : 1), 1377 edesc->link_tbl + 1378 link_tbl_index, 0); 1379 link_tbl_index += src_nents ? : 1; 1380 } 1381 if (dst_nents) { 1382 sg_to_link_tbl_last(req->dst, dst_nents, 1383 edesc->link_tbl + link_tbl_index, 0); 1384 } 1385 1386 return edesc; 1387 } 1388 1389 static int aead_encrypt(struct aead_request *req) 1390 { 1391 struct aead_edesc *edesc; 1392 struct crypto_aead *aead = crypto_aead_reqtfm(req); 1393 struct caam_ctx *ctx = crypto_aead_ctx(aead); 1394 struct device *jrdev = ctx->jrdev; 1395 bool all_contig; 1396 u32 *desc; 1397 int ret = 0; 1398 1399 req->cryptlen += ctx->authsize; 1400 1401 /* allocate extended descriptor */ 1402 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * 1403 CAAM_CMD_SZ, &all_contig); 1404 if (IS_ERR(edesc)) 1405 return PTR_ERR(edesc); 1406 1407 /* Create and submit job descriptor */ 1408 init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req, 1409 all_contig, true); 1410 #ifdef DEBUG 1411 print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ", 1412 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1413 desc_bytes(edesc->hw_desc), 1); 1414 #endif 1415 1416 desc = edesc->hw_desc; 1417 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); 1418 if (!ret) { 1419 ret = -EINPROGRESS; 1420 } else { 1421 aead_unmap(jrdev, edesc, req); 1422 kfree(edesc); 1423 } 1424 1425 return ret; 1426 } 1427 1428 static int aead_decrypt(struct aead_request *req) 1429 { 1430 struct aead_edesc *edesc; 1431 struct crypto_aead *aead = crypto_aead_reqtfm(req); 1432 struct caam_ctx *ctx = crypto_aead_ctx(aead); 1433 struct device *jrdev = ctx->jrdev; 1434 bool all_contig; 1435 u32 *desc; 1436 int ret = 0; 1437 1438 /* allocate extended descriptor */ 1439 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * 1440 CAAM_CMD_SZ, &all_contig); 1441 if (IS_ERR(edesc)) 1442 return PTR_ERR(edesc); 1443 1444 #ifdef DEBUG 1445 print_hex_dump(KERN_ERR, "dec src@"xstr(__LINE__)": ", 1446 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), 1447 req->cryptlen, 1); 1448 #endif 1449 1450 /* Create and submit job descriptor*/ 1451 init_aead_job(ctx->sh_desc_dec, 1452 ctx->sh_desc_dec_dma, edesc, req, all_contig, false); 1453 #ifdef DEBUG 1454 print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ", 1455 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1456 desc_bytes(edesc->hw_desc), 1); 1457 #endif 1458 1459 desc = edesc->hw_desc; 1460 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); 1461 if (!ret) { 1462 ret = -EINPROGRESS; 1463 } else { 1464 aead_unmap(jrdev, edesc, req); 1465 kfree(edesc); 1466 } 1467 1468 return ret; 1469 } 1470 1471 /* 1472 * allocate and map the aead extended descriptor for aead givencrypt 1473 */ 1474 static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request 1475 *greq, int desc_bytes, 1476 u32 *contig_ptr) 1477 { 1478 struct aead_request *req = &greq->areq; 1479 struct crypto_aead *aead = crypto_aead_reqtfm(req); 1480 struct caam_ctx *ctx = crypto_aead_ctx(aead); 1481 struct device *jrdev = ctx->jrdev; 1482 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 1483 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 1484 int assoc_nents, src_nents, dst_nents = 0; 1485 struct aead_edesc *edesc; 1486 dma_addr_t iv_dma = 0; 1487 int sgc; 1488 u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG; 1489 int ivsize = crypto_aead_ivsize(aead); 1490 int link_tbl_index, link_tbl_len = 0, link_tbl_bytes; 1491 1492 assoc_nents = sg_count(req->assoc, req->assoclen); 1493 src_nents = sg_count(req->src, req->cryptlen); 1494 1495 if (unlikely(req->dst != req->src)) 1496 dst_nents = sg_count(req->dst, req->cryptlen); 1497 1498 sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1, 1499 DMA_BIDIRECTIONAL); 1500 if (likely(req->src == req->dst)) { 1501 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, 1502 DMA_BIDIRECTIONAL); 1503 } else { 1504 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, 1505 DMA_TO_DEVICE); 1506 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, 1507 DMA_FROM_DEVICE); 1508 } 1509 1510 /* Check if data are contiguous */ 1511 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE); 1512 if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen != 1513 iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src)) 1514 contig &= ~GIV_SRC_CONTIG; 1515 if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst)) 1516 contig &= ~GIV_DST_CONTIG; 1517 if (unlikely(req->src != req->dst)) { 1518 dst_nents = dst_nents ? : 1; 1519 link_tbl_len += 1; 1520 } 1521 if (!(contig & GIV_SRC_CONTIG)) { 1522 assoc_nents = assoc_nents ? : 1; 1523 src_nents = src_nents ? : 1; 1524 link_tbl_len += assoc_nents + 1 + src_nents; 1525 if (likely(req->src == req->dst)) 1526 contig &= ~GIV_DST_CONTIG; 1527 } 1528 link_tbl_len += dst_nents; 1529 1530 link_tbl_bytes = link_tbl_len * sizeof(struct link_tbl_entry); 1531 1532 /* allocate space for base edesc and hw desc commands, link tables */ 1533 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + 1534 link_tbl_bytes, GFP_DMA | flags); 1535 if (!edesc) { 1536 dev_err(jrdev, "could not allocate extended descriptor\n"); 1537 return ERR_PTR(-ENOMEM); 1538 } 1539 1540 edesc->assoc_nents = assoc_nents; 1541 edesc->src_nents = src_nents; 1542 edesc->dst_nents = dst_nents; 1543 edesc->iv_dma = iv_dma; 1544 edesc->link_tbl_bytes = link_tbl_bytes; 1545 edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) + 1546 desc_bytes; 1547 edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl, 1548 link_tbl_bytes, DMA_TO_DEVICE); 1549 *contig_ptr = contig; 1550 1551 link_tbl_index = 0; 1552 if (!(contig & GIV_SRC_CONTIG)) { 1553 sg_to_link_tbl(req->assoc, assoc_nents, 1554 edesc->link_tbl + 1555 link_tbl_index, 0); 1556 link_tbl_index += assoc_nents; 1557 sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index, 1558 iv_dma, ivsize, 0); 1559 link_tbl_index += 1; 1560 sg_to_link_tbl_last(req->src, src_nents, 1561 edesc->link_tbl + 1562 link_tbl_index, 0); 1563 link_tbl_index += src_nents; 1564 } 1565 if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) { 1566 sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index, 1567 iv_dma, ivsize, 0); 1568 link_tbl_index += 1; 1569 sg_to_link_tbl_last(req->dst, dst_nents, 1570 edesc->link_tbl + link_tbl_index, 0); 1571 } 1572 1573 return edesc; 1574 } 1575 1576 static int aead_givencrypt(struct aead_givcrypt_request *areq) 1577 { 1578 struct aead_request *req = &areq->areq; 1579 struct aead_edesc *edesc; 1580 struct crypto_aead *aead = crypto_aead_reqtfm(req); 1581 struct caam_ctx *ctx = crypto_aead_ctx(aead); 1582 struct device *jrdev = ctx->jrdev; 1583 u32 contig; 1584 u32 *desc; 1585 int ret = 0; 1586 1587 req->cryptlen += ctx->authsize; 1588 1589 /* allocate extended descriptor */ 1590 edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN * 1591 CAAM_CMD_SZ, &contig); 1592 1593 if (IS_ERR(edesc)) 1594 return PTR_ERR(edesc); 1595 1596 #ifdef DEBUG 1597 print_hex_dump(KERN_ERR, "giv src@"xstr(__LINE__)": ", 1598 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), 1599 req->cryptlen, 1); 1600 #endif 1601 1602 /* Create and submit job descriptor*/ 1603 init_aead_giv_job(ctx->sh_desc_givenc, 1604 ctx->sh_desc_givenc_dma, edesc, req, contig); 1605 #ifdef DEBUG 1606 print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ", 1607 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1608 desc_bytes(edesc->hw_desc), 1); 1609 #endif 1610 1611 desc = edesc->hw_desc; 1612 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); 1613 if (!ret) { 1614 ret = -EINPROGRESS; 1615 } else { 1616 aead_unmap(jrdev, edesc, req); 1617 kfree(edesc); 1618 } 1619 1620 return ret; 1621 } 1622 1623 /* 1624 * allocate and map the ablkcipher extended descriptor for ablkcipher 1625 */ 1626 static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request 1627 *req, int desc_bytes, 1628 bool *iv_contig_out) 1629 { 1630 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1631 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 1632 struct device *jrdev = ctx->jrdev; 1633 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 1634 CRYPTO_TFM_REQ_MAY_SLEEP)) ? 1635 GFP_KERNEL : GFP_ATOMIC; 1636 int src_nents, dst_nents = 0, link_tbl_bytes; 1637 struct ablkcipher_edesc *edesc; 1638 dma_addr_t iv_dma = 0; 1639 bool iv_contig = false; 1640 int sgc; 1641 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 1642 int link_tbl_index; 1643 1644 src_nents = sg_count(req->src, req->nbytes); 1645 1646 if (unlikely(req->dst != req->src)) 1647 dst_nents = sg_count(req->dst, req->nbytes); 1648 1649 if (likely(req->src == req->dst)) { 1650 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, 1651 DMA_BIDIRECTIONAL); 1652 } else { 1653 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, 1654 DMA_TO_DEVICE); 1655 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, 1656 DMA_FROM_DEVICE); 1657 } 1658 1659 /* 1660 * Check if iv can be contiguous with source and destination. 1661 * If so, include it. If not, create scatterlist. 1662 */ 1663 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE); 1664 if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src)) 1665 iv_contig = true; 1666 else 1667 src_nents = src_nents ? : 1; 1668 link_tbl_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) * 1669 sizeof(struct link_tbl_entry); 1670 1671 /* allocate space for base edesc and hw desc commands, link tables */ 1672 edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes + 1673 link_tbl_bytes, GFP_DMA | flags); 1674 if (!edesc) { 1675 dev_err(jrdev, "could not allocate extended descriptor\n"); 1676 return ERR_PTR(-ENOMEM); 1677 } 1678 1679 edesc->src_nents = src_nents; 1680 edesc->dst_nents = dst_nents; 1681 edesc->link_tbl_bytes = link_tbl_bytes; 1682 edesc->link_tbl = (void *)edesc + sizeof(struct ablkcipher_edesc) + 1683 desc_bytes; 1684 1685 link_tbl_index = 0; 1686 if (!iv_contig) { 1687 sg_to_link_tbl_one(edesc->link_tbl, iv_dma, ivsize, 0); 1688 sg_to_link_tbl_last(req->src, src_nents, 1689 edesc->link_tbl + 1, 0); 1690 link_tbl_index += 1 + src_nents; 1691 } 1692 1693 if (unlikely(dst_nents)) { 1694 sg_to_link_tbl_last(req->dst, dst_nents, 1695 edesc->link_tbl + link_tbl_index, 0); 1696 } 1697 1698 edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl, 1699 link_tbl_bytes, DMA_TO_DEVICE); 1700 edesc->iv_dma = iv_dma; 1701 1702 #ifdef DEBUG 1703 print_hex_dump(KERN_ERR, "ablkcipher link_tbl@"xstr(__LINE__)": ", 1704 DUMP_PREFIX_ADDRESS, 16, 4, edesc->link_tbl, 1705 link_tbl_bytes, 1); 1706 #endif 1707 1708 *iv_contig_out = iv_contig; 1709 return edesc; 1710 } 1711 1712 static int ablkcipher_encrypt(struct ablkcipher_request *req) 1713 { 1714 struct ablkcipher_edesc *edesc; 1715 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1716 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 1717 struct device *jrdev = ctx->jrdev; 1718 bool iv_contig; 1719 u32 *desc; 1720 int ret = 0; 1721 1722 /* allocate extended descriptor */ 1723 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * 1724 CAAM_CMD_SZ, &iv_contig); 1725 if (IS_ERR(edesc)) 1726 return PTR_ERR(edesc); 1727 1728 /* Create and submit job descriptor*/ 1729 init_ablkcipher_job(ctx->sh_desc_enc, 1730 ctx->sh_desc_enc_dma, edesc, req, iv_contig); 1731 #ifdef DEBUG 1732 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ", 1733 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1734 desc_bytes(edesc->hw_desc), 1); 1735 #endif 1736 desc = edesc->hw_desc; 1737 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req); 1738 1739 if (!ret) { 1740 ret = -EINPROGRESS; 1741 } else { 1742 ablkcipher_unmap(jrdev, edesc, req); 1743 kfree(edesc); 1744 } 1745 1746 return ret; 1747 } 1748 1749 static int ablkcipher_decrypt(struct ablkcipher_request *req) 1750 { 1751 struct ablkcipher_edesc *edesc; 1752 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1753 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 1754 struct device *jrdev = ctx->jrdev; 1755 bool iv_contig; 1756 u32 *desc; 1757 int ret = 0; 1758 1759 /* allocate extended descriptor */ 1760 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * 1761 CAAM_CMD_SZ, &iv_contig); 1762 if (IS_ERR(edesc)) 1763 return PTR_ERR(edesc); 1764 1765 /* Create and submit job descriptor*/ 1766 init_ablkcipher_job(ctx->sh_desc_dec, 1767 ctx->sh_desc_dec_dma, edesc, req, iv_contig); 1768 desc = edesc->hw_desc; 1769 #ifdef DEBUG 1770 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ", 1771 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1772 desc_bytes(edesc->hw_desc), 1); 1773 #endif 1774 1775 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req); 1776 if (!ret) { 1777 ret = -EINPROGRESS; 1778 } else { 1779 ablkcipher_unmap(jrdev, edesc, req); 1780 kfree(edesc); 1781 } 1782 1783 return ret; 1784 } 1785 1786 #define template_aead template_u.aead 1787 #define template_ablkcipher template_u.ablkcipher 1788 struct caam_alg_template { 1789 char name[CRYPTO_MAX_ALG_NAME]; 1790 char driver_name[CRYPTO_MAX_ALG_NAME]; 1791 unsigned int blocksize; 1792 u32 type; 1793 union { 1794 struct ablkcipher_alg ablkcipher; 1795 struct aead_alg aead; 1796 struct blkcipher_alg blkcipher; 1797 struct cipher_alg cipher; 1798 struct compress_alg compress; 1799 struct rng_alg rng; 1800 } template_u; 1801 u32 class1_alg_type; 1802 u32 class2_alg_type; 1803 u32 alg_op; 1804 }; 1805 1806 static struct caam_alg_template driver_algs[] = { 1807 /* single-pass ipsec_esp descriptor */ 1808 { 1809 .name = "authenc(hmac(md5),cbc(aes))", 1810 .driver_name = "authenc-hmac-md5-cbc-aes-caam", 1811 .blocksize = AES_BLOCK_SIZE, 1812 .type = CRYPTO_ALG_TYPE_AEAD, 1813 .template_aead = { 1814 .setkey = aead_setkey, 1815 .setauthsize = aead_setauthsize, 1816 .encrypt = aead_encrypt, 1817 .decrypt = aead_decrypt, 1818 .givencrypt = aead_givencrypt, 1819 .geniv = "<built-in>", 1820 .ivsize = AES_BLOCK_SIZE, 1821 .maxauthsize = MD5_DIGEST_SIZE, 1822 }, 1823 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1824 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, 1825 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, 1826 }, 1827 { 1828 .name = "authenc(hmac(sha1),cbc(aes))", 1829 .driver_name = "authenc-hmac-sha1-cbc-aes-caam", 1830 .blocksize = AES_BLOCK_SIZE, 1831 .type = CRYPTO_ALG_TYPE_AEAD, 1832 .template_aead = { 1833 .setkey = aead_setkey, 1834 .setauthsize = aead_setauthsize, 1835 .encrypt = aead_encrypt, 1836 .decrypt = aead_decrypt, 1837 .givencrypt = aead_givencrypt, 1838 .geniv = "<built-in>", 1839 .ivsize = AES_BLOCK_SIZE, 1840 .maxauthsize = SHA1_DIGEST_SIZE, 1841 }, 1842 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1843 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, 1844 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, 1845 }, 1846 { 1847 .name = "authenc(hmac(sha224),cbc(aes))", 1848 .driver_name = "authenc-hmac-sha224-cbc-aes-caam", 1849 .blocksize = AES_BLOCK_SIZE, 1850 .template_aead = { 1851 .setkey = aead_setkey, 1852 .setauthsize = aead_setauthsize, 1853 .encrypt = aead_encrypt, 1854 .decrypt = aead_decrypt, 1855 .givencrypt = aead_givencrypt, 1856 .geniv = "<built-in>", 1857 .ivsize = AES_BLOCK_SIZE, 1858 .maxauthsize = SHA224_DIGEST_SIZE, 1859 }, 1860 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1861 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 1862 OP_ALG_AAI_HMAC_PRECOMP, 1863 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, 1864 }, 1865 { 1866 .name = "authenc(hmac(sha256),cbc(aes))", 1867 .driver_name = "authenc-hmac-sha256-cbc-aes-caam", 1868 .blocksize = AES_BLOCK_SIZE, 1869 .type = CRYPTO_ALG_TYPE_AEAD, 1870 .template_aead = { 1871 .setkey = aead_setkey, 1872 .setauthsize = aead_setauthsize, 1873 .encrypt = aead_encrypt, 1874 .decrypt = aead_decrypt, 1875 .givencrypt = aead_givencrypt, 1876 .geniv = "<built-in>", 1877 .ivsize = AES_BLOCK_SIZE, 1878 .maxauthsize = SHA256_DIGEST_SIZE, 1879 }, 1880 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1881 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 1882 OP_ALG_AAI_HMAC_PRECOMP, 1883 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, 1884 }, 1885 { 1886 .name = "authenc(hmac(sha384),cbc(aes))", 1887 .driver_name = "authenc-hmac-sha384-cbc-aes-caam", 1888 .blocksize = AES_BLOCK_SIZE, 1889 .template_aead = { 1890 .setkey = aead_setkey, 1891 .setauthsize = aead_setauthsize, 1892 .encrypt = aead_encrypt, 1893 .decrypt = aead_decrypt, 1894 .givencrypt = aead_givencrypt, 1895 .geniv = "<built-in>", 1896 .ivsize = AES_BLOCK_SIZE, 1897 .maxauthsize = SHA384_DIGEST_SIZE, 1898 }, 1899 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1900 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 1901 OP_ALG_AAI_HMAC_PRECOMP, 1902 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, 1903 }, 1904 1905 { 1906 .name = "authenc(hmac(sha512),cbc(aes))", 1907 .driver_name = "authenc-hmac-sha512-cbc-aes-caam", 1908 .blocksize = AES_BLOCK_SIZE, 1909 .type = CRYPTO_ALG_TYPE_AEAD, 1910 .template_aead = { 1911 .setkey = aead_setkey, 1912 .setauthsize = aead_setauthsize, 1913 .encrypt = aead_encrypt, 1914 .decrypt = aead_decrypt, 1915 .givencrypt = aead_givencrypt, 1916 .geniv = "<built-in>", 1917 .ivsize = AES_BLOCK_SIZE, 1918 .maxauthsize = SHA512_DIGEST_SIZE, 1919 }, 1920 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1921 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 1922 OP_ALG_AAI_HMAC_PRECOMP, 1923 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, 1924 }, 1925 { 1926 .name = "authenc(hmac(md5),cbc(des3_ede))", 1927 .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam", 1928 .blocksize = DES3_EDE_BLOCK_SIZE, 1929 .type = CRYPTO_ALG_TYPE_AEAD, 1930 .template_aead = { 1931 .setkey = aead_setkey, 1932 .setauthsize = aead_setauthsize, 1933 .encrypt = aead_encrypt, 1934 .decrypt = aead_decrypt, 1935 .givencrypt = aead_givencrypt, 1936 .geniv = "<built-in>", 1937 .ivsize = DES3_EDE_BLOCK_SIZE, 1938 .maxauthsize = MD5_DIGEST_SIZE, 1939 }, 1940 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1941 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, 1942 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, 1943 }, 1944 { 1945 .name = "authenc(hmac(sha1),cbc(des3_ede))", 1946 .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam", 1947 .blocksize = DES3_EDE_BLOCK_SIZE, 1948 .type = CRYPTO_ALG_TYPE_AEAD, 1949 .template_aead = { 1950 .setkey = aead_setkey, 1951 .setauthsize = aead_setauthsize, 1952 .encrypt = aead_encrypt, 1953 .decrypt = aead_decrypt, 1954 .givencrypt = aead_givencrypt, 1955 .geniv = "<built-in>", 1956 .ivsize = DES3_EDE_BLOCK_SIZE, 1957 .maxauthsize = SHA1_DIGEST_SIZE, 1958 }, 1959 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1960 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, 1961 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, 1962 }, 1963 { 1964 .name = "authenc(hmac(sha224),cbc(des3_ede))", 1965 .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam", 1966 .blocksize = DES3_EDE_BLOCK_SIZE, 1967 .template_aead = { 1968 .setkey = aead_setkey, 1969 .setauthsize = aead_setauthsize, 1970 .encrypt = aead_encrypt, 1971 .decrypt = aead_decrypt, 1972 .givencrypt = aead_givencrypt, 1973 .geniv = "<built-in>", 1974 .ivsize = DES3_EDE_BLOCK_SIZE, 1975 .maxauthsize = SHA224_DIGEST_SIZE, 1976 }, 1977 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1978 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 1979 OP_ALG_AAI_HMAC_PRECOMP, 1980 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, 1981 }, 1982 { 1983 .name = "authenc(hmac(sha256),cbc(des3_ede))", 1984 .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam", 1985 .blocksize = DES3_EDE_BLOCK_SIZE, 1986 .type = CRYPTO_ALG_TYPE_AEAD, 1987 .template_aead = { 1988 .setkey = aead_setkey, 1989 .setauthsize = aead_setauthsize, 1990 .encrypt = aead_encrypt, 1991 .decrypt = aead_decrypt, 1992 .givencrypt = aead_givencrypt, 1993 .geniv = "<built-in>", 1994 .ivsize = DES3_EDE_BLOCK_SIZE, 1995 .maxauthsize = SHA256_DIGEST_SIZE, 1996 }, 1997 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1998 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 1999 OP_ALG_AAI_HMAC_PRECOMP, 2000 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, 2001 }, 2002 { 2003 .name = "authenc(hmac(sha384),cbc(des3_ede))", 2004 .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam", 2005 .blocksize = DES3_EDE_BLOCK_SIZE, 2006 .template_aead = { 2007 .setkey = aead_setkey, 2008 .setauthsize = aead_setauthsize, 2009 .encrypt = aead_encrypt, 2010 .decrypt = aead_decrypt, 2011 .givencrypt = aead_givencrypt, 2012 .geniv = "<built-in>", 2013 .ivsize = DES3_EDE_BLOCK_SIZE, 2014 .maxauthsize = SHA384_DIGEST_SIZE, 2015 }, 2016 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2017 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2018 OP_ALG_AAI_HMAC_PRECOMP, 2019 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, 2020 }, 2021 { 2022 .name = "authenc(hmac(sha512),cbc(des3_ede))", 2023 .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam", 2024 .blocksize = DES3_EDE_BLOCK_SIZE, 2025 .type = CRYPTO_ALG_TYPE_AEAD, 2026 .template_aead = { 2027 .setkey = aead_setkey, 2028 .setauthsize = aead_setauthsize, 2029 .encrypt = aead_encrypt, 2030 .decrypt = aead_decrypt, 2031 .givencrypt = aead_givencrypt, 2032 .geniv = "<built-in>", 2033 .ivsize = DES3_EDE_BLOCK_SIZE, 2034 .maxauthsize = SHA512_DIGEST_SIZE, 2035 }, 2036 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2037 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2038 OP_ALG_AAI_HMAC_PRECOMP, 2039 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, 2040 }, 2041 { 2042 .name = "authenc(hmac(md5),cbc(des))", 2043 .driver_name = "authenc-hmac-md5-cbc-des-caam", 2044 .blocksize = DES_BLOCK_SIZE, 2045 .type = CRYPTO_ALG_TYPE_AEAD, 2046 .template_aead = { 2047 .setkey = aead_setkey, 2048 .setauthsize = aead_setauthsize, 2049 .encrypt = aead_encrypt, 2050 .decrypt = aead_decrypt, 2051 .givencrypt = aead_givencrypt, 2052 .geniv = "<built-in>", 2053 .ivsize = DES_BLOCK_SIZE, 2054 .maxauthsize = MD5_DIGEST_SIZE, 2055 }, 2056 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2057 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, 2058 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, 2059 }, 2060 { 2061 .name = "authenc(hmac(sha1),cbc(des))", 2062 .driver_name = "authenc-hmac-sha1-cbc-des-caam", 2063 .blocksize = DES_BLOCK_SIZE, 2064 .type = CRYPTO_ALG_TYPE_AEAD, 2065 .template_aead = { 2066 .setkey = aead_setkey, 2067 .setauthsize = aead_setauthsize, 2068 .encrypt = aead_encrypt, 2069 .decrypt = aead_decrypt, 2070 .givencrypt = aead_givencrypt, 2071 .geniv = "<built-in>", 2072 .ivsize = DES_BLOCK_SIZE, 2073 .maxauthsize = SHA1_DIGEST_SIZE, 2074 }, 2075 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2076 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, 2077 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, 2078 }, 2079 { 2080 .name = "authenc(hmac(sha224),cbc(des))", 2081 .driver_name = "authenc-hmac-sha224-cbc-des-caam", 2082 .blocksize = DES_BLOCK_SIZE, 2083 .template_aead = { 2084 .setkey = aead_setkey, 2085 .setauthsize = aead_setauthsize, 2086 .encrypt = aead_encrypt, 2087 .decrypt = aead_decrypt, 2088 .givencrypt = aead_givencrypt, 2089 .geniv = "<built-in>", 2090 .ivsize = DES_BLOCK_SIZE, 2091 .maxauthsize = SHA224_DIGEST_SIZE, 2092 }, 2093 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2094 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 2095 OP_ALG_AAI_HMAC_PRECOMP, 2096 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, 2097 }, 2098 { 2099 .name = "authenc(hmac(sha256),cbc(des))", 2100 .driver_name = "authenc-hmac-sha256-cbc-des-caam", 2101 .blocksize = DES_BLOCK_SIZE, 2102 .type = CRYPTO_ALG_TYPE_AEAD, 2103 .template_aead = { 2104 .setkey = aead_setkey, 2105 .setauthsize = aead_setauthsize, 2106 .encrypt = aead_encrypt, 2107 .decrypt = aead_decrypt, 2108 .givencrypt = aead_givencrypt, 2109 .geniv = "<built-in>", 2110 .ivsize = DES_BLOCK_SIZE, 2111 .maxauthsize = SHA256_DIGEST_SIZE, 2112 }, 2113 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2114 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 2115 OP_ALG_AAI_HMAC_PRECOMP, 2116 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, 2117 }, 2118 { 2119 .name = "authenc(hmac(sha384),cbc(des))", 2120 .driver_name = "authenc-hmac-sha384-cbc-des-caam", 2121 .blocksize = DES_BLOCK_SIZE, 2122 .template_aead = { 2123 .setkey = aead_setkey, 2124 .setauthsize = aead_setauthsize, 2125 .encrypt = aead_encrypt, 2126 .decrypt = aead_decrypt, 2127 .givencrypt = aead_givencrypt, 2128 .geniv = "<built-in>", 2129 .ivsize = DES_BLOCK_SIZE, 2130 .maxauthsize = SHA384_DIGEST_SIZE, 2131 }, 2132 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2133 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2134 OP_ALG_AAI_HMAC_PRECOMP, 2135 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, 2136 }, 2137 { 2138 .name = "authenc(hmac(sha512),cbc(des))", 2139 .driver_name = "authenc-hmac-sha512-cbc-des-caam", 2140 .blocksize = DES_BLOCK_SIZE, 2141 .type = CRYPTO_ALG_TYPE_AEAD, 2142 .template_aead = { 2143 .setkey = aead_setkey, 2144 .setauthsize = aead_setauthsize, 2145 .encrypt = aead_encrypt, 2146 .decrypt = aead_decrypt, 2147 .givencrypt = aead_givencrypt, 2148 .geniv = "<built-in>", 2149 .ivsize = DES_BLOCK_SIZE, 2150 .maxauthsize = SHA512_DIGEST_SIZE, 2151 }, 2152 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2153 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2154 OP_ALG_AAI_HMAC_PRECOMP, 2155 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, 2156 }, 2157 /* ablkcipher descriptor */ 2158 { 2159 .name = "cbc(aes)", 2160 .driver_name = "cbc-aes-caam", 2161 .blocksize = AES_BLOCK_SIZE, 2162 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 2163 .template_ablkcipher = { 2164 .setkey = ablkcipher_setkey, 2165 .encrypt = ablkcipher_encrypt, 2166 .decrypt = ablkcipher_decrypt, 2167 .geniv = "eseqiv", 2168 .min_keysize = AES_MIN_KEY_SIZE, 2169 .max_keysize = AES_MAX_KEY_SIZE, 2170 .ivsize = AES_BLOCK_SIZE, 2171 }, 2172 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 2173 }, 2174 { 2175 .name = "cbc(des3_ede)", 2176 .driver_name = "cbc-3des-caam", 2177 .blocksize = DES3_EDE_BLOCK_SIZE, 2178 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 2179 .template_ablkcipher = { 2180 .setkey = ablkcipher_setkey, 2181 .encrypt = ablkcipher_encrypt, 2182 .decrypt = ablkcipher_decrypt, 2183 .geniv = "eseqiv", 2184 .min_keysize = DES3_EDE_KEY_SIZE, 2185 .max_keysize = DES3_EDE_KEY_SIZE, 2186 .ivsize = DES3_EDE_BLOCK_SIZE, 2187 }, 2188 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2189 }, 2190 { 2191 .name = "cbc(des)", 2192 .driver_name = "cbc-des-caam", 2193 .blocksize = DES_BLOCK_SIZE, 2194 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 2195 .template_ablkcipher = { 2196 .setkey = ablkcipher_setkey, 2197 .encrypt = ablkcipher_encrypt, 2198 .decrypt = ablkcipher_decrypt, 2199 .geniv = "eseqiv", 2200 .min_keysize = DES_KEY_SIZE, 2201 .max_keysize = DES_KEY_SIZE, 2202 .ivsize = DES_BLOCK_SIZE, 2203 }, 2204 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2205 } 2206 }; 2207 2208 struct caam_crypto_alg { 2209 struct list_head entry; 2210 struct device *ctrldev; 2211 int class1_alg_type; 2212 int class2_alg_type; 2213 int alg_op; 2214 struct crypto_alg crypto_alg; 2215 }; 2216 2217 static int caam_cra_init(struct crypto_tfm *tfm) 2218 { 2219 struct crypto_alg *alg = tfm->__crt_alg; 2220 struct caam_crypto_alg *caam_alg = 2221 container_of(alg, struct caam_crypto_alg, crypto_alg); 2222 struct caam_ctx *ctx = crypto_tfm_ctx(tfm); 2223 struct caam_drv_private *priv = dev_get_drvdata(caam_alg->ctrldev); 2224 int tgt_jr = atomic_inc_return(&priv->tfm_count); 2225 2226 /* 2227 * distribute tfms across job rings to ensure in-order 2228 * crypto request processing per tfm 2229 */ 2230 ctx->jrdev = priv->algapi_jr[(tgt_jr / 2) % priv->num_jrs_for_algapi]; 2231 2232 /* copy descriptor header template value */ 2233 ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type; 2234 ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type; 2235 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op; 2236 2237 return 0; 2238 } 2239 2240 static void caam_cra_exit(struct crypto_tfm *tfm) 2241 { 2242 struct caam_ctx *ctx = crypto_tfm_ctx(tfm); 2243 2244 if (ctx->sh_desc_enc_dma && 2245 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma)) 2246 dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma, 2247 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE); 2248 if (ctx->sh_desc_dec_dma && 2249 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma)) 2250 dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma, 2251 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE); 2252 if (ctx->sh_desc_givenc_dma && 2253 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma)) 2254 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma, 2255 desc_bytes(ctx->sh_desc_givenc), 2256 DMA_TO_DEVICE); 2257 } 2258 2259 static void __exit caam_algapi_exit(void) 2260 { 2261 2262 struct device_node *dev_node; 2263 struct platform_device *pdev; 2264 struct device *ctrldev; 2265 struct caam_drv_private *priv; 2266 struct caam_crypto_alg *t_alg, *n; 2267 int i, err; 2268 2269 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 2270 if (!dev_node) 2271 return; 2272 2273 pdev = of_find_device_by_node(dev_node); 2274 if (!pdev) 2275 return; 2276 2277 ctrldev = &pdev->dev; 2278 of_node_put(dev_node); 2279 priv = dev_get_drvdata(ctrldev); 2280 2281 if (!priv->alg_list.next) 2282 return; 2283 2284 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) { 2285 crypto_unregister_alg(&t_alg->crypto_alg); 2286 list_del(&t_alg->entry); 2287 kfree(t_alg); 2288 } 2289 2290 for (i = 0; i < priv->total_jobrs; i++) { 2291 err = caam_jr_deregister(priv->algapi_jr[i]); 2292 if (err < 0) 2293 break; 2294 } 2295 kfree(priv->algapi_jr); 2296 } 2297 2298 static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev, 2299 struct caam_alg_template 2300 *template) 2301 { 2302 struct caam_crypto_alg *t_alg; 2303 struct crypto_alg *alg; 2304 2305 t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL); 2306 if (!t_alg) { 2307 dev_err(ctrldev, "failed to allocate t_alg\n"); 2308 return ERR_PTR(-ENOMEM); 2309 } 2310 2311 alg = &t_alg->crypto_alg; 2312 2313 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name); 2314 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 2315 template->driver_name); 2316 alg->cra_module = THIS_MODULE; 2317 alg->cra_init = caam_cra_init; 2318 alg->cra_exit = caam_cra_exit; 2319 alg->cra_priority = CAAM_CRA_PRIORITY; 2320 alg->cra_blocksize = template->blocksize; 2321 alg->cra_alignmask = 0; 2322 alg->cra_ctxsize = sizeof(struct caam_ctx); 2323 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY | 2324 template->type; 2325 switch (template->type) { 2326 case CRYPTO_ALG_TYPE_ABLKCIPHER: 2327 alg->cra_type = &crypto_ablkcipher_type; 2328 alg->cra_ablkcipher = template->template_ablkcipher; 2329 break; 2330 case CRYPTO_ALG_TYPE_AEAD: 2331 alg->cra_type = &crypto_aead_type; 2332 alg->cra_aead = template->template_aead; 2333 break; 2334 } 2335 2336 t_alg->class1_alg_type = template->class1_alg_type; 2337 t_alg->class2_alg_type = template->class2_alg_type; 2338 t_alg->alg_op = template->alg_op; 2339 t_alg->ctrldev = ctrldev; 2340 2341 return t_alg; 2342 } 2343 2344 static int __init caam_algapi_init(void) 2345 { 2346 struct device_node *dev_node; 2347 struct platform_device *pdev; 2348 struct device *ctrldev, **jrdev; 2349 struct caam_drv_private *priv; 2350 int i = 0, err = 0; 2351 2352 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 2353 if (!dev_node) 2354 return -ENODEV; 2355 2356 pdev = of_find_device_by_node(dev_node); 2357 if (!pdev) 2358 return -ENODEV; 2359 2360 ctrldev = &pdev->dev; 2361 priv = dev_get_drvdata(ctrldev); 2362 of_node_put(dev_node); 2363 2364 INIT_LIST_HEAD(&priv->alg_list); 2365 2366 jrdev = kmalloc(sizeof(*jrdev) * priv->total_jobrs, GFP_KERNEL); 2367 if (!jrdev) 2368 return -ENOMEM; 2369 2370 for (i = 0; i < priv->total_jobrs; i++) { 2371 err = caam_jr_register(ctrldev, &jrdev[i]); 2372 if (err < 0) 2373 break; 2374 } 2375 if (err < 0 && i == 0) { 2376 dev_err(ctrldev, "algapi error in job ring registration: %d\n", 2377 err); 2378 kfree(jrdev); 2379 return err; 2380 } 2381 2382 priv->num_jrs_for_algapi = i; 2383 priv->algapi_jr = jrdev; 2384 atomic_set(&priv->tfm_count, -1); 2385 2386 /* register crypto algorithms the device supports */ 2387 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 2388 /* TODO: check if h/w supports alg */ 2389 struct caam_crypto_alg *t_alg; 2390 2391 t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]); 2392 if (IS_ERR(t_alg)) { 2393 err = PTR_ERR(t_alg); 2394 dev_warn(ctrldev, "%s alg allocation failed\n", 2395 driver_algs[i].driver_name); 2396 continue; 2397 } 2398 2399 err = crypto_register_alg(&t_alg->crypto_alg); 2400 if (err) { 2401 dev_warn(ctrldev, "%s alg registration failed\n", 2402 t_alg->crypto_alg.cra_driver_name); 2403 kfree(t_alg); 2404 } else 2405 list_add_tail(&t_alg->entry, &priv->alg_list); 2406 } 2407 if (!list_empty(&priv->alg_list)) 2408 dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n", 2409 (char *)of_get_property(dev_node, "compatible", NULL)); 2410 2411 return err; 2412 } 2413 2414 module_init(caam_algapi_init); 2415 module_exit(caam_algapi_exit); 2416 2417 MODULE_LICENSE("GPL"); 2418 MODULE_DESCRIPTION("FSL CAAM support for crypto API"); 2419 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC"); 2420