18e8ec596SKim Phillips /* 28e8ec596SKim Phillips * caam - Freescale FSL CAAM support for crypto API 38e8ec596SKim Phillips * 48e8ec596SKim Phillips * Copyright 2008-2011 Freescale Semiconductor, Inc. 58e8ec596SKim Phillips * 68e8ec596SKim Phillips * Based on talitos crypto API driver. 78e8ec596SKim Phillips * 88e8ec596SKim Phillips * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008): 98e8ec596SKim Phillips * 108e8ec596SKim Phillips * --------------- --------------- 118e8ec596SKim Phillips * | JobDesc #1 |-------------------->| ShareDesc | 128e8ec596SKim Phillips * | *(packet 1) | | (PDB) | 138e8ec596SKim Phillips * --------------- |------------->| (hashKey) | 148e8ec596SKim Phillips * . | | (cipherKey) | 158e8ec596SKim Phillips * . | |-------->| (operation) | 168e8ec596SKim Phillips * --------------- | | --------------- 178e8ec596SKim Phillips * | JobDesc #2 |------| | 188e8ec596SKim Phillips * | *(packet 2) | | 198e8ec596SKim Phillips * --------------- | 208e8ec596SKim Phillips * . | 218e8ec596SKim Phillips * . | 228e8ec596SKim Phillips * --------------- | 238e8ec596SKim Phillips * | JobDesc #3 |------------ 248e8ec596SKim Phillips * | *(packet 3) | 258e8ec596SKim Phillips * --------------- 268e8ec596SKim Phillips * 278e8ec596SKim Phillips * The SharedDesc never changes for a connection unless rekeyed, but 288e8ec596SKim Phillips * each packet will likely be in a different place. So all we need 298e8ec596SKim Phillips * to know to process the packet is where the input is, where the 308e8ec596SKim Phillips * output goes, and what context we want to process with. Context is 318e8ec596SKim Phillips * in the SharedDesc, packet references in the JobDesc. 328e8ec596SKim Phillips * 338e8ec596SKim Phillips * So, a job desc looks like: 348e8ec596SKim Phillips * 358e8ec596SKim Phillips * --------------------- 368e8ec596SKim Phillips * | Header | 378e8ec596SKim Phillips * | ShareDesc Pointer | 388e8ec596SKim Phillips * | SEQ_OUT_PTR | 398e8ec596SKim Phillips * | (output buffer) | 408e8ec596SKim Phillips * | SEQ_IN_PTR | 418e8ec596SKim Phillips * | (input buffer) | 428e8ec596SKim Phillips * | LOAD (to DECO) | 438e8ec596SKim Phillips * --------------------- 448e8ec596SKim Phillips */ 458e8ec596SKim Phillips 468e8ec596SKim Phillips #include "compat.h" 478e8ec596SKim Phillips 488e8ec596SKim Phillips #include "regs.h" 498e8ec596SKim Phillips #include "intern.h" 508e8ec596SKim Phillips #include "desc_constr.h" 518e8ec596SKim Phillips #include "jr.h" 528e8ec596SKim Phillips #include "error.h" 538e8ec596SKim Phillips 548e8ec596SKim Phillips /* 558e8ec596SKim Phillips * crypto alg 568e8ec596SKim Phillips */ 578e8ec596SKim Phillips #define CAAM_CRA_PRIORITY 3000 588e8ec596SKim Phillips /* max key is sum of AES_MAX_KEY_SIZE, max split key size */ 598e8ec596SKim Phillips #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \ 608e8ec596SKim Phillips SHA512_DIGEST_SIZE * 2) 618e8ec596SKim Phillips /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ 628e8ec596SKim Phillips #define CAAM_MAX_IV_LENGTH 16 638e8ec596SKim Phillips 644427b1b4SKim Phillips /* length of descriptors text */ 651acebad3SYuan Kang #define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 3 + CAAM_PTR_SZ * 3) 661acebad3SYuan Kang 671acebad3SYuan Kang #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ) 681acebad3SYuan Kang #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ) 691acebad3SYuan Kang #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ) 701acebad3SYuan Kang #define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ) 711acebad3SYuan Kang 72acdca31dSYuan Kang #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ) 73acdca31dSYuan Kang #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \ 74acdca31dSYuan Kang 20 * CAAM_CMD_SZ) 75acdca31dSYuan Kang #define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \ 76acdca31dSYuan Kang 15 * CAAM_CMD_SZ) 77acdca31dSYuan Kang 781acebad3SYuan Kang #define DESC_MAX_USED_BYTES (DESC_AEAD_GIVENC_LEN + \ 791acebad3SYuan Kang CAAM_MAX_KEY_SIZE) 801acebad3SYuan Kang #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) 814427b1b4SKim Phillips 828e8ec596SKim Phillips #ifdef DEBUG 838e8ec596SKim Phillips /* for print_hex_dumps with line references */ 848e8ec596SKim Phillips #define xstr(s) str(s) 858e8ec596SKim Phillips #define str(s) #s 868e8ec596SKim Phillips #define debug(format, arg...) printk(format, arg) 878e8ec596SKim Phillips #else 888e8ec596SKim Phillips #define debug(format, arg...) 898e8ec596SKim Phillips #endif 908e8ec596SKim Phillips 911acebad3SYuan Kang /* Set DK bit in class 1 operation if shared */ 921acebad3SYuan Kang static inline void append_dec_op1(u32 *desc, u32 type) 931acebad3SYuan Kang { 941acebad3SYuan Kang u32 *jump_cmd, *uncond_jump_cmd; 951acebad3SYuan Kang 961acebad3SYuan Kang jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); 971acebad3SYuan Kang append_operation(desc, type | OP_ALG_AS_INITFINAL | 981acebad3SYuan Kang OP_ALG_DECRYPT); 991acebad3SYuan Kang uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL); 1001acebad3SYuan Kang set_jump_tgt_here(desc, jump_cmd); 1011acebad3SYuan Kang append_operation(desc, type | OP_ALG_AS_INITFINAL | 1021acebad3SYuan Kang OP_ALG_DECRYPT | OP_ALG_AAI_DK); 1031acebad3SYuan Kang set_jump_tgt_here(desc, uncond_jump_cmd); 1041acebad3SYuan Kang } 1051acebad3SYuan Kang 1061acebad3SYuan Kang /* 1071acebad3SYuan Kang * Wait for completion of class 1 key loading before allowing 1081acebad3SYuan Kang * error propagation 1091acebad3SYuan Kang */ 1101acebad3SYuan Kang static inline void append_dec_shr_done(u32 *desc) 1111acebad3SYuan Kang { 1121acebad3SYuan Kang u32 *jump_cmd; 1131acebad3SYuan Kang 1141acebad3SYuan Kang jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL); 1151acebad3SYuan Kang set_jump_tgt_here(desc, jump_cmd); 116a2ecb155SKim Phillips append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); 1171acebad3SYuan Kang } 1181acebad3SYuan Kang 1191acebad3SYuan Kang /* 1201acebad3SYuan Kang * For aead functions, read payload and write payload, 1211acebad3SYuan Kang * both of which are specified in req->src and req->dst 1221acebad3SYuan Kang */ 1231acebad3SYuan Kang static inline void aead_append_src_dst(u32 *desc, u32 msg_type) 1241acebad3SYuan Kang { 1251acebad3SYuan Kang append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | 1261acebad3SYuan Kang KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH); 1271acebad3SYuan Kang append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); 1281acebad3SYuan Kang } 1291acebad3SYuan Kang 1301acebad3SYuan Kang /* 1311acebad3SYuan Kang * For aead encrypt and decrypt, read iv for both classes 1321acebad3SYuan Kang */ 1331acebad3SYuan Kang static inline void aead_append_ld_iv(u32 *desc, int ivsize) 1341acebad3SYuan Kang { 1351acebad3SYuan Kang append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | 1361acebad3SYuan Kang LDST_CLASS_1_CCB | ivsize); 1371acebad3SYuan Kang append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize); 1381acebad3SYuan Kang } 1391acebad3SYuan Kang 1401acebad3SYuan Kang /* 141acdca31dSYuan Kang * For ablkcipher encrypt and decrypt, read from req->src and 142acdca31dSYuan Kang * write to req->dst 143acdca31dSYuan Kang */ 144acdca31dSYuan Kang static inline void ablkcipher_append_src_dst(u32 *desc) 145acdca31dSYuan Kang { 146acdca31dSYuan Kang append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); \ 147acdca31dSYuan Kang append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); \ 148acdca31dSYuan Kang append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | \ 149acdca31dSYuan Kang KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); \ 150acdca31dSYuan Kang append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); \ 151acdca31dSYuan Kang } 152acdca31dSYuan Kang 153acdca31dSYuan Kang /* 1541acebad3SYuan Kang * If all data, including src (with assoc and iv) or dst (with iv only) are 1551acebad3SYuan Kang * contiguous 1561acebad3SYuan Kang */ 1571acebad3SYuan Kang #define GIV_SRC_CONTIG 1 1581acebad3SYuan Kang #define GIV_DST_CONTIG (1 << 1) 1591acebad3SYuan Kang 1608e8ec596SKim Phillips /* 1618e8ec596SKim Phillips * per-session context 1628e8ec596SKim Phillips */ 1638e8ec596SKim Phillips struct caam_ctx { 1648e8ec596SKim Phillips struct device *jrdev; 1651acebad3SYuan Kang u32 sh_desc_enc[DESC_MAX_USED_LEN]; 1661acebad3SYuan Kang u32 sh_desc_dec[DESC_MAX_USED_LEN]; 1671acebad3SYuan Kang u32 sh_desc_givenc[DESC_MAX_USED_LEN]; 1681acebad3SYuan Kang dma_addr_t sh_desc_enc_dma; 1691acebad3SYuan Kang dma_addr_t sh_desc_dec_dma; 1701acebad3SYuan Kang dma_addr_t sh_desc_givenc_dma; 1718e8ec596SKim Phillips u32 class1_alg_type; 1728e8ec596SKim Phillips u32 class2_alg_type; 1738e8ec596SKim Phillips u32 alg_op; 1741acebad3SYuan Kang u8 key[CAAM_MAX_KEY_SIZE]; 175885e9e2fSYuan Kang dma_addr_t key_dma; 1768e8ec596SKim Phillips unsigned int enckeylen; 1778e8ec596SKim Phillips unsigned int split_key_len; 1788e8ec596SKim Phillips unsigned int split_key_pad_len; 1798e8ec596SKim Phillips unsigned int authsize; 1808e8ec596SKim Phillips }; 1818e8ec596SKim Phillips 1821acebad3SYuan Kang static void append_key_aead(u32 *desc, struct caam_ctx *ctx, 1831acebad3SYuan Kang int keys_fit_inline) 1841acebad3SYuan Kang { 1851acebad3SYuan Kang if (keys_fit_inline) { 1861acebad3SYuan Kang append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, 1871acebad3SYuan Kang ctx->split_key_len, CLASS_2 | 1881acebad3SYuan Kang KEY_DEST_MDHA_SPLIT | KEY_ENC); 1891acebad3SYuan Kang append_key_as_imm(desc, (void *)ctx->key + 1901acebad3SYuan Kang ctx->split_key_pad_len, ctx->enckeylen, 1911acebad3SYuan Kang ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); 1921acebad3SYuan Kang } else { 1931acebad3SYuan Kang append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 | 1941acebad3SYuan Kang KEY_DEST_MDHA_SPLIT | KEY_ENC); 1951acebad3SYuan Kang append_key(desc, ctx->key_dma + ctx->split_key_pad_len, 1961acebad3SYuan Kang ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); 1971acebad3SYuan Kang } 1981acebad3SYuan Kang } 1991acebad3SYuan Kang 2001acebad3SYuan Kang static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx, 2011acebad3SYuan Kang int keys_fit_inline) 2021acebad3SYuan Kang { 2031acebad3SYuan Kang u32 *key_jump_cmd; 2041acebad3SYuan Kang 2051acebad3SYuan Kang init_sh_desc(desc, HDR_SHARE_WAIT); 2061acebad3SYuan Kang 2071acebad3SYuan Kang /* Skip if already shared */ 2081acebad3SYuan Kang key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | 2091acebad3SYuan Kang JUMP_COND_SHRD); 2101acebad3SYuan Kang 2111acebad3SYuan Kang append_key_aead(desc, ctx, keys_fit_inline); 2121acebad3SYuan Kang 2131acebad3SYuan Kang set_jump_tgt_here(desc, key_jump_cmd); 2141acebad3SYuan Kang 2151acebad3SYuan Kang /* Propagate errors from shared to job descriptor */ 216a2ecb155SKim Phillips append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); 2171acebad3SYuan Kang } 2181acebad3SYuan Kang 2191acebad3SYuan Kang static int aead_set_sh_desc(struct crypto_aead *aead) 2201acebad3SYuan Kang { 2211acebad3SYuan Kang struct aead_tfm *tfm = &aead->base.crt_aead; 2221acebad3SYuan Kang struct caam_ctx *ctx = crypto_aead_ctx(aead); 2231acebad3SYuan Kang struct device *jrdev = ctx->jrdev; 2241acebad3SYuan Kang bool keys_fit_inline = 0; 2251acebad3SYuan Kang u32 *key_jump_cmd, *jump_cmd; 2261acebad3SYuan Kang u32 geniv, moveiv; 2271acebad3SYuan Kang u32 *desc; 2281acebad3SYuan Kang 2291acebad3SYuan Kang if (!ctx->enckeylen || !ctx->authsize) 2301acebad3SYuan Kang return 0; 2311acebad3SYuan Kang 2321acebad3SYuan Kang /* 2331acebad3SYuan Kang * Job Descriptor and Shared Descriptors 2341acebad3SYuan Kang * must all fit into the 64-word Descriptor h/w Buffer 2351acebad3SYuan Kang */ 2361acebad3SYuan Kang if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN + 2371acebad3SYuan Kang ctx->split_key_pad_len + ctx->enckeylen <= 2381acebad3SYuan Kang CAAM_DESC_BYTES_MAX) 2391acebad3SYuan Kang keys_fit_inline = 1; 2401acebad3SYuan Kang 2411acebad3SYuan Kang /* aead_encrypt shared descriptor */ 2421acebad3SYuan Kang desc = ctx->sh_desc_enc; 2431acebad3SYuan Kang 2441acebad3SYuan Kang init_sh_desc_key_aead(desc, ctx, keys_fit_inline); 2451acebad3SYuan Kang 2461acebad3SYuan Kang /* Class 2 operation */ 2471acebad3SYuan Kang append_operation(desc, ctx->class2_alg_type | 2481acebad3SYuan Kang OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); 2491acebad3SYuan Kang 2501acebad3SYuan Kang /* cryptlen = seqoutlen - authsize */ 2511acebad3SYuan Kang append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); 2521acebad3SYuan Kang 2531acebad3SYuan Kang /* assoclen + cryptlen = seqinlen - ivsize */ 2541acebad3SYuan Kang append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize); 2551acebad3SYuan Kang 2561acebad3SYuan Kang /* assoclen + cryptlen = (assoclen + cryptlen) - cryptlen */ 2571acebad3SYuan Kang append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ); 2581acebad3SYuan Kang 2591acebad3SYuan Kang /* read assoc before reading payload */ 2601acebad3SYuan Kang append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | 2611acebad3SYuan Kang KEY_VLF); 2621acebad3SYuan Kang aead_append_ld_iv(desc, tfm->ivsize); 2631acebad3SYuan Kang 2641acebad3SYuan Kang /* Class 1 operation */ 2651acebad3SYuan Kang append_operation(desc, ctx->class1_alg_type | 2661acebad3SYuan Kang OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); 2671acebad3SYuan Kang 2681acebad3SYuan Kang /* Read and write cryptlen bytes */ 2691acebad3SYuan Kang append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); 2701acebad3SYuan Kang append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); 2711acebad3SYuan Kang aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2); 2721acebad3SYuan Kang 2731acebad3SYuan Kang /* Write ICV */ 2741acebad3SYuan Kang append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | 2751acebad3SYuan Kang LDST_SRCDST_BYTE_CONTEXT); 2761acebad3SYuan Kang 2771acebad3SYuan Kang ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, 2781acebad3SYuan Kang desc_bytes(desc), 2791acebad3SYuan Kang DMA_TO_DEVICE); 2801acebad3SYuan Kang if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { 2811acebad3SYuan Kang dev_err(jrdev, "unable to map shared descriptor\n"); 2821acebad3SYuan Kang return -ENOMEM; 2831acebad3SYuan Kang } 2841acebad3SYuan Kang #ifdef DEBUG 2851acebad3SYuan Kang print_hex_dump(KERN_ERR, "aead enc shdesc@"xstr(__LINE__)": ", 2861acebad3SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, desc, 2871acebad3SYuan Kang desc_bytes(desc), 1); 2881acebad3SYuan Kang #endif 2891acebad3SYuan Kang 2901acebad3SYuan Kang /* 2911acebad3SYuan Kang * Job Descriptor and Shared Descriptors 2921acebad3SYuan Kang * must all fit into the 64-word Descriptor h/w Buffer 2931acebad3SYuan Kang */ 2941acebad3SYuan Kang if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN + 2951acebad3SYuan Kang ctx->split_key_pad_len + ctx->enckeylen <= 2961acebad3SYuan Kang CAAM_DESC_BYTES_MAX) 2971acebad3SYuan Kang keys_fit_inline = 1; 2981acebad3SYuan Kang 2991acebad3SYuan Kang desc = ctx->sh_desc_dec; 3001acebad3SYuan Kang 3011acebad3SYuan Kang /* aead_decrypt shared descriptor */ 3021acebad3SYuan Kang init_sh_desc(desc, HDR_SHARE_WAIT); 3031acebad3SYuan Kang 3041acebad3SYuan Kang /* Skip if already shared */ 3051acebad3SYuan Kang key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | 3061acebad3SYuan Kang JUMP_COND_SHRD); 3071acebad3SYuan Kang 3081acebad3SYuan Kang append_key_aead(desc, ctx, keys_fit_inline); 3091acebad3SYuan Kang 3101acebad3SYuan Kang /* Only propagate error immediately if shared */ 3111acebad3SYuan Kang jump_cmd = append_jump(desc, JUMP_TEST_ALL); 3121acebad3SYuan Kang set_jump_tgt_here(desc, key_jump_cmd); 313a2ecb155SKim Phillips append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); 3141acebad3SYuan Kang set_jump_tgt_here(desc, jump_cmd); 3151acebad3SYuan Kang 3161acebad3SYuan Kang /* Class 2 operation */ 3171acebad3SYuan Kang append_operation(desc, ctx->class2_alg_type | 3181acebad3SYuan Kang OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); 3191acebad3SYuan Kang 3201acebad3SYuan Kang /* assoclen + cryptlen = seqinlen - ivsize */ 3211acebad3SYuan Kang append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, 3221acebad3SYuan Kang ctx->authsize + tfm->ivsize) 3231acebad3SYuan Kang /* assoclen = (assoclen + cryptlen) - cryptlen */ 3241acebad3SYuan Kang append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); 3251acebad3SYuan Kang append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ); 3261acebad3SYuan Kang 3271acebad3SYuan Kang /* read assoc before reading payload */ 3281acebad3SYuan Kang append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | 3291acebad3SYuan Kang KEY_VLF); 3301acebad3SYuan Kang 3311acebad3SYuan Kang aead_append_ld_iv(desc, tfm->ivsize); 3321acebad3SYuan Kang 3331acebad3SYuan Kang append_dec_op1(desc, ctx->class1_alg_type); 3341acebad3SYuan Kang 3351acebad3SYuan Kang /* Read and write cryptlen bytes */ 3361acebad3SYuan Kang append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); 3371acebad3SYuan Kang append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ); 3381acebad3SYuan Kang aead_append_src_dst(desc, FIFOLD_TYPE_MSG); 3391acebad3SYuan Kang 3401acebad3SYuan Kang /* Load ICV */ 3411acebad3SYuan Kang append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 | 3421acebad3SYuan Kang FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); 3431acebad3SYuan Kang append_dec_shr_done(desc); 3441acebad3SYuan Kang 3451acebad3SYuan Kang ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, 3461acebad3SYuan Kang desc_bytes(desc), 3471acebad3SYuan Kang DMA_TO_DEVICE); 3481acebad3SYuan Kang if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { 3491acebad3SYuan Kang dev_err(jrdev, "unable to map shared descriptor\n"); 3501acebad3SYuan Kang return -ENOMEM; 3511acebad3SYuan Kang } 3521acebad3SYuan Kang #ifdef DEBUG 3531acebad3SYuan Kang print_hex_dump(KERN_ERR, "aead dec shdesc@"xstr(__LINE__)": ", 3541acebad3SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, desc, 3551acebad3SYuan Kang desc_bytes(desc), 1); 3561acebad3SYuan Kang #endif 3571acebad3SYuan Kang 3581acebad3SYuan Kang /* 3591acebad3SYuan Kang * Job Descriptor and Shared Descriptors 3601acebad3SYuan Kang * must all fit into the 64-word Descriptor h/w Buffer 3611acebad3SYuan Kang */ 3621acebad3SYuan Kang if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN + 3631acebad3SYuan Kang ctx->split_key_pad_len + ctx->enckeylen <= 3641acebad3SYuan Kang CAAM_DESC_BYTES_MAX) 3651acebad3SYuan Kang keys_fit_inline = 1; 3661acebad3SYuan Kang 3671acebad3SYuan Kang /* aead_givencrypt shared descriptor */ 3681acebad3SYuan Kang desc = ctx->sh_desc_givenc; 3691acebad3SYuan Kang 3701acebad3SYuan Kang init_sh_desc_key_aead(desc, ctx, keys_fit_inline); 3711acebad3SYuan Kang 3721acebad3SYuan Kang /* Generate IV */ 3731acebad3SYuan Kang geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO | 3741acebad3SYuan Kang NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | 3751acebad3SYuan Kang NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT); 3761acebad3SYuan Kang append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB | 3771acebad3SYuan Kang LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); 3781acebad3SYuan Kang append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); 3791acebad3SYuan Kang append_move(desc, MOVE_SRC_INFIFO | 3801acebad3SYuan Kang MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT)); 3811acebad3SYuan Kang append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); 3821acebad3SYuan Kang 3831acebad3SYuan Kang /* Copy IV to class 1 context */ 3841acebad3SYuan Kang append_move(desc, MOVE_SRC_CLASS1CTX | 3851acebad3SYuan Kang MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT)); 3861acebad3SYuan Kang 3871acebad3SYuan Kang /* Return to encryption */ 3881acebad3SYuan Kang append_operation(desc, ctx->class2_alg_type | 3891acebad3SYuan Kang OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); 3901acebad3SYuan Kang 3911acebad3SYuan Kang /* ivsize + cryptlen = seqoutlen - authsize */ 3921acebad3SYuan Kang append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); 3931acebad3SYuan Kang 3941acebad3SYuan Kang /* assoclen = seqinlen - (ivsize + cryptlen) */ 3951acebad3SYuan Kang append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ); 3961acebad3SYuan Kang 3971acebad3SYuan Kang /* read assoc before reading payload */ 3981acebad3SYuan Kang append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | 3991acebad3SYuan Kang KEY_VLF); 4001acebad3SYuan Kang 4011acebad3SYuan Kang /* Copy iv from class 1 ctx to class 2 fifo*/ 4021acebad3SYuan Kang moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 | 4031acebad3SYuan Kang NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT); 4041acebad3SYuan Kang append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB | 4051acebad3SYuan Kang LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); 4061acebad3SYuan Kang append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB | 4071acebad3SYuan Kang LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM); 4081acebad3SYuan Kang 4091acebad3SYuan Kang /* Class 1 operation */ 4101acebad3SYuan Kang append_operation(desc, ctx->class1_alg_type | 4111acebad3SYuan Kang OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); 4121acebad3SYuan Kang 4131acebad3SYuan Kang /* Will write ivsize + cryptlen */ 4141acebad3SYuan Kang append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); 4151acebad3SYuan Kang 4161acebad3SYuan Kang /* Not need to reload iv */ 4171acebad3SYuan Kang append_seq_fifo_load(desc, tfm->ivsize, 4181acebad3SYuan Kang FIFOLD_CLASS_SKIP); 4191acebad3SYuan Kang 4201acebad3SYuan Kang /* Will read cryptlen */ 4211acebad3SYuan Kang append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); 4221acebad3SYuan Kang aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2); 4231acebad3SYuan Kang 4241acebad3SYuan Kang /* Write ICV */ 4251acebad3SYuan Kang append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | 4261acebad3SYuan Kang LDST_SRCDST_BYTE_CONTEXT); 4271acebad3SYuan Kang 4281acebad3SYuan Kang ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc, 4291acebad3SYuan Kang desc_bytes(desc), 4301acebad3SYuan Kang DMA_TO_DEVICE); 4311acebad3SYuan Kang if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) { 4321acebad3SYuan Kang dev_err(jrdev, "unable to map shared descriptor\n"); 4331acebad3SYuan Kang return -ENOMEM; 4341acebad3SYuan Kang } 4351acebad3SYuan Kang #ifdef DEBUG 4361acebad3SYuan Kang print_hex_dump(KERN_ERR, "aead givenc shdesc@"xstr(__LINE__)": ", 4371acebad3SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, desc, 4381acebad3SYuan Kang desc_bytes(desc), 1); 4391acebad3SYuan Kang #endif 4401acebad3SYuan Kang 4411acebad3SYuan Kang return 0; 4421acebad3SYuan Kang } 4431acebad3SYuan Kang 4440e479300SYuan Kang static int aead_setauthsize(struct crypto_aead *authenc, 4458e8ec596SKim Phillips unsigned int authsize) 4468e8ec596SKim Phillips { 4478e8ec596SKim Phillips struct caam_ctx *ctx = crypto_aead_ctx(authenc); 4488e8ec596SKim Phillips 4498e8ec596SKim Phillips ctx->authsize = authsize; 4501acebad3SYuan Kang aead_set_sh_desc(authenc); 4518e8ec596SKim Phillips 4528e8ec596SKim Phillips return 0; 4538e8ec596SKim Phillips } 4548e8ec596SKim Phillips 4558e8ec596SKim Phillips struct split_key_result { 4568e8ec596SKim Phillips struct completion completion; 4578e8ec596SKim Phillips int err; 4588e8ec596SKim Phillips }; 4598e8ec596SKim Phillips 4608e8ec596SKim Phillips static void split_key_done(struct device *dev, u32 *desc, u32 err, 4618e8ec596SKim Phillips void *context) 4628e8ec596SKim Phillips { 4638e8ec596SKim Phillips struct split_key_result *res = context; 4648e8ec596SKim Phillips 4658e8ec596SKim Phillips #ifdef DEBUG 4668e8ec596SKim Phillips dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 4678e8ec596SKim Phillips #endif 4681acebad3SYuan Kang 4698e8ec596SKim Phillips if (err) { 470de2954d6SKim Phillips char tmp[CAAM_ERROR_STR_MAX]; 4718e8ec596SKim Phillips 4728e8ec596SKim Phillips dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); 4738e8ec596SKim Phillips } 4748e8ec596SKim Phillips 4758e8ec596SKim Phillips res->err = err; 4768e8ec596SKim Phillips 4778e8ec596SKim Phillips complete(&res->completion); 4788e8ec596SKim Phillips } 4798e8ec596SKim Phillips 4808e8ec596SKim Phillips /* 4818e8ec596SKim Phillips get a split ipad/opad key 4828e8ec596SKim Phillips 4838e8ec596SKim Phillips Split key generation----------------------------------------------- 4848e8ec596SKim Phillips 4858e8ec596SKim Phillips [00] 0xb0810008 jobdesc: stidx=1 share=never len=8 4868e8ec596SKim Phillips [01] 0x04000014 key: class2->keyreg len=20 4878e8ec596SKim Phillips @0xffe01000 4888e8ec596SKim Phillips [03] 0x84410014 operation: cls2-op sha1 hmac init dec 4898e8ec596SKim Phillips [04] 0x24940000 fifold: class2 msgdata-last2 len=0 imm 4908e8ec596SKim Phillips [05] 0xa4000001 jump: class2 local all ->1 [06] 4918e8ec596SKim Phillips [06] 0x64260028 fifostr: class2 mdsplit-jdk len=40 4928e8ec596SKim Phillips @0xffe04000 4938e8ec596SKim Phillips */ 4948e8ec596SKim Phillips static u32 gen_split_key(struct caam_ctx *ctx, const u8 *key_in, u32 authkeylen) 4958e8ec596SKim Phillips { 4968e8ec596SKim Phillips struct device *jrdev = ctx->jrdev; 4978e8ec596SKim Phillips u32 *desc; 4988e8ec596SKim Phillips struct split_key_result result; 4998e8ec596SKim Phillips dma_addr_t dma_addr_in, dma_addr_out; 5008e8ec596SKim Phillips int ret = 0; 5018e8ec596SKim Phillips 5028e8ec596SKim Phillips desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); 5038e8ec596SKim Phillips 5048e8ec596SKim Phillips init_job_desc(desc, 0); 5058e8ec596SKim Phillips 5068e8ec596SKim Phillips dma_addr_in = dma_map_single(jrdev, (void *)key_in, authkeylen, 5078e8ec596SKim Phillips DMA_TO_DEVICE); 5088e8ec596SKim Phillips if (dma_mapping_error(jrdev, dma_addr_in)) { 5098e8ec596SKim Phillips dev_err(jrdev, "unable to map key input memory\n"); 5108e8ec596SKim Phillips kfree(desc); 5118e8ec596SKim Phillips return -ENOMEM; 5128e8ec596SKim Phillips } 5138e8ec596SKim Phillips append_key(desc, dma_addr_in, authkeylen, CLASS_2 | 5148e8ec596SKim Phillips KEY_DEST_CLASS_REG); 5158e8ec596SKim Phillips 5168e8ec596SKim Phillips /* Sets MDHA up into an HMAC-INIT */ 5178e8ec596SKim Phillips append_operation(desc, ctx->alg_op | OP_ALG_DECRYPT | 5188e8ec596SKim Phillips OP_ALG_AS_INIT); 5198e8ec596SKim Phillips 5208e8ec596SKim Phillips /* 5218e8ec596SKim Phillips * do a FIFO_LOAD of zero, this will trigger the internal key expansion 5228e8ec596SKim Phillips into both pads inside MDHA 5238e8ec596SKim Phillips */ 5248e8ec596SKim Phillips append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB | 5258e8ec596SKim Phillips FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2); 5268e8ec596SKim Phillips 5278e8ec596SKim Phillips /* 5288e8ec596SKim Phillips * FIFO_STORE with the explicit split-key content store 5298e8ec596SKim Phillips * (0x26 output type) 5308e8ec596SKim Phillips */ 5318e8ec596SKim Phillips dma_addr_out = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len, 5328e8ec596SKim Phillips DMA_FROM_DEVICE); 5338e8ec596SKim Phillips if (dma_mapping_error(jrdev, dma_addr_out)) { 5348e8ec596SKim Phillips dev_err(jrdev, "unable to map key output memory\n"); 5358e8ec596SKim Phillips kfree(desc); 5368e8ec596SKim Phillips return -ENOMEM; 5378e8ec596SKim Phillips } 5388e8ec596SKim Phillips append_fifo_store(desc, dma_addr_out, ctx->split_key_len, 5398e8ec596SKim Phillips LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK); 5408e8ec596SKim Phillips 5418e8ec596SKim Phillips #ifdef DEBUG 5428e8ec596SKim Phillips print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ", 5438e8ec596SKim Phillips DUMP_PREFIX_ADDRESS, 16, 4, key_in, authkeylen, 1); 5448e8ec596SKim Phillips print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", 5458e8ec596SKim Phillips DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 5468e8ec596SKim Phillips #endif 5478e8ec596SKim Phillips 5488e8ec596SKim Phillips result.err = 0; 5498e8ec596SKim Phillips init_completion(&result.completion); 5508e8ec596SKim Phillips 5518e8ec596SKim Phillips ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); 5528e8ec596SKim Phillips if (!ret) { 5538e8ec596SKim Phillips /* in progress */ 5548e8ec596SKim Phillips wait_for_completion_interruptible(&result.completion); 5558e8ec596SKim Phillips ret = result.err; 5568e8ec596SKim Phillips #ifdef DEBUG 5578e8ec596SKim Phillips print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ", 5588e8ec596SKim Phillips DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, 5598e8ec596SKim Phillips ctx->split_key_pad_len, 1); 5608e8ec596SKim Phillips #endif 5618e8ec596SKim Phillips } 5628e8ec596SKim Phillips 5638e8ec596SKim Phillips dma_unmap_single(jrdev, dma_addr_out, ctx->split_key_pad_len, 5648e8ec596SKim Phillips DMA_FROM_DEVICE); 5658e8ec596SKim Phillips dma_unmap_single(jrdev, dma_addr_in, authkeylen, DMA_TO_DEVICE); 5668e8ec596SKim Phillips 5678e8ec596SKim Phillips kfree(desc); 5688e8ec596SKim Phillips 5698e8ec596SKim Phillips return ret; 5708e8ec596SKim Phillips } 5718e8ec596SKim Phillips 5720e479300SYuan Kang static int aead_setkey(struct crypto_aead *aead, 5738e8ec596SKim Phillips const u8 *key, unsigned int keylen) 5748e8ec596SKim Phillips { 5758e8ec596SKim Phillips /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */ 5768e8ec596SKim Phillips static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 }; 5778e8ec596SKim Phillips struct caam_ctx *ctx = crypto_aead_ctx(aead); 5788e8ec596SKim Phillips struct device *jrdev = ctx->jrdev; 5798e8ec596SKim Phillips struct rtattr *rta = (void *)key; 5808e8ec596SKim Phillips struct crypto_authenc_key_param *param; 5818e8ec596SKim Phillips unsigned int authkeylen; 5828e8ec596SKim Phillips unsigned int enckeylen; 5838e8ec596SKim Phillips int ret = 0; 5848e8ec596SKim Phillips 5858e8ec596SKim Phillips param = RTA_DATA(rta); 5868e8ec596SKim Phillips enckeylen = be32_to_cpu(param->enckeylen); 5878e8ec596SKim Phillips 5888e8ec596SKim Phillips key += RTA_ALIGN(rta->rta_len); 5898e8ec596SKim Phillips keylen -= RTA_ALIGN(rta->rta_len); 5908e8ec596SKim Phillips 5918e8ec596SKim Phillips if (keylen < enckeylen) 5928e8ec596SKim Phillips goto badkey; 5938e8ec596SKim Phillips 5948e8ec596SKim Phillips authkeylen = keylen - enckeylen; 5958e8ec596SKim Phillips 5968e8ec596SKim Phillips if (keylen > CAAM_MAX_KEY_SIZE) 5978e8ec596SKim Phillips goto badkey; 5988e8ec596SKim Phillips 5998e8ec596SKim Phillips /* Pick class 2 key length from algorithm submask */ 6008e8ec596SKim Phillips ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >> 6018e8ec596SKim Phillips OP_ALG_ALGSEL_SHIFT] * 2; 6028e8ec596SKim Phillips ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16); 6038e8ec596SKim Phillips 6048e8ec596SKim Phillips #ifdef DEBUG 6058e8ec596SKim Phillips printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n", 6068e8ec596SKim Phillips keylen, enckeylen, authkeylen); 6078e8ec596SKim Phillips printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n", 6088e8ec596SKim Phillips ctx->split_key_len, ctx->split_key_pad_len); 6098e8ec596SKim Phillips print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ", 6108e8ec596SKim Phillips DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 6118e8ec596SKim Phillips #endif 6128e8ec596SKim Phillips 6138e8ec596SKim Phillips ret = gen_split_key(ctx, key, authkeylen); 6148e8ec596SKim Phillips if (ret) { 6158e8ec596SKim Phillips goto badkey; 6168e8ec596SKim Phillips } 6178e8ec596SKim Phillips 6188e8ec596SKim Phillips /* postpend encryption key to auth split key */ 6198e8ec596SKim Phillips memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen); 6208e8ec596SKim Phillips 621885e9e2fSYuan Kang ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len + 6228e8ec596SKim Phillips enckeylen, DMA_TO_DEVICE); 623885e9e2fSYuan Kang if (dma_mapping_error(jrdev, ctx->key_dma)) { 6248e8ec596SKim Phillips dev_err(jrdev, "unable to map key i/o memory\n"); 6258e8ec596SKim Phillips return -ENOMEM; 6268e8ec596SKim Phillips } 6278e8ec596SKim Phillips #ifdef DEBUG 6288e8ec596SKim Phillips print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ", 6298e8ec596SKim Phillips DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, 6308e8ec596SKim Phillips ctx->split_key_pad_len + enckeylen, 1); 6318e8ec596SKim Phillips #endif 6328e8ec596SKim Phillips 6338e8ec596SKim Phillips ctx->enckeylen = enckeylen; 6348e8ec596SKim Phillips 6351acebad3SYuan Kang ret = aead_set_sh_desc(aead); 6368e8ec596SKim Phillips if (ret) { 637885e9e2fSYuan Kang dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len + 6388e8ec596SKim Phillips enckeylen, DMA_TO_DEVICE); 6398e8ec596SKim Phillips } 6408e8ec596SKim Phillips 6418e8ec596SKim Phillips return ret; 6428e8ec596SKim Phillips badkey: 6438e8ec596SKim Phillips crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); 6448e8ec596SKim Phillips return -EINVAL; 6458e8ec596SKim Phillips } 6468e8ec596SKim Phillips 647acdca31dSYuan Kang static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, 648acdca31dSYuan Kang const u8 *key, unsigned int keylen) 649acdca31dSYuan Kang { 650acdca31dSYuan Kang struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 651acdca31dSYuan Kang struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher; 652acdca31dSYuan Kang struct device *jrdev = ctx->jrdev; 653acdca31dSYuan Kang int ret = 0; 654acdca31dSYuan Kang u32 *key_jump_cmd, *jump_cmd; 655acdca31dSYuan Kang u32 *desc; 656acdca31dSYuan Kang 657acdca31dSYuan Kang #ifdef DEBUG 658acdca31dSYuan Kang print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ", 659acdca31dSYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 660acdca31dSYuan Kang #endif 661acdca31dSYuan Kang 662acdca31dSYuan Kang memcpy(ctx->key, key, keylen); 663acdca31dSYuan Kang ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, 664acdca31dSYuan Kang DMA_TO_DEVICE); 665acdca31dSYuan Kang if (dma_mapping_error(jrdev, ctx->key_dma)) { 666acdca31dSYuan Kang dev_err(jrdev, "unable to map key i/o memory\n"); 667acdca31dSYuan Kang return -ENOMEM; 668acdca31dSYuan Kang } 669acdca31dSYuan Kang ctx->enckeylen = keylen; 670acdca31dSYuan Kang 671acdca31dSYuan Kang /* ablkcipher_encrypt shared descriptor */ 672acdca31dSYuan Kang desc = ctx->sh_desc_enc; 673acdca31dSYuan Kang init_sh_desc(desc, HDR_SHARE_WAIT); 674acdca31dSYuan Kang /* Skip if already shared */ 675acdca31dSYuan Kang key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | 676acdca31dSYuan Kang JUMP_COND_SHRD); 677acdca31dSYuan Kang 678acdca31dSYuan Kang /* Load class1 key only */ 679acdca31dSYuan Kang append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, 680acdca31dSYuan Kang ctx->enckeylen, CLASS_1 | 681acdca31dSYuan Kang KEY_DEST_CLASS_REG); 682acdca31dSYuan Kang 683acdca31dSYuan Kang set_jump_tgt_here(desc, key_jump_cmd); 684acdca31dSYuan Kang 685acdca31dSYuan Kang /* Propagate errors from shared to job descriptor */ 686a2ecb155SKim Phillips append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); 687acdca31dSYuan Kang 688acdca31dSYuan Kang /* Load iv */ 689acdca31dSYuan Kang append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | 690acdca31dSYuan Kang LDST_CLASS_1_CCB | tfm->ivsize); 691acdca31dSYuan Kang 692acdca31dSYuan Kang /* Load operation */ 693acdca31dSYuan Kang append_operation(desc, ctx->class1_alg_type | 694acdca31dSYuan Kang OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); 695acdca31dSYuan Kang 696acdca31dSYuan Kang /* Perform operation */ 697acdca31dSYuan Kang ablkcipher_append_src_dst(desc); 698acdca31dSYuan Kang 699acdca31dSYuan Kang ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, 700acdca31dSYuan Kang desc_bytes(desc), 701acdca31dSYuan Kang DMA_TO_DEVICE); 702acdca31dSYuan Kang if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { 703acdca31dSYuan Kang dev_err(jrdev, "unable to map shared descriptor\n"); 704acdca31dSYuan Kang return -ENOMEM; 705acdca31dSYuan Kang } 706acdca31dSYuan Kang #ifdef DEBUG 707acdca31dSYuan Kang print_hex_dump(KERN_ERR, "ablkcipher enc shdesc@"xstr(__LINE__)": ", 708acdca31dSYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, desc, 709acdca31dSYuan Kang desc_bytes(desc), 1); 710acdca31dSYuan Kang #endif 711acdca31dSYuan Kang /* ablkcipher_decrypt shared descriptor */ 712acdca31dSYuan Kang desc = ctx->sh_desc_dec; 713acdca31dSYuan Kang 714acdca31dSYuan Kang init_sh_desc(desc, HDR_SHARE_WAIT); 715acdca31dSYuan Kang /* Skip if already shared */ 716acdca31dSYuan Kang key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | 717acdca31dSYuan Kang JUMP_COND_SHRD); 718acdca31dSYuan Kang 719acdca31dSYuan Kang /* Load class1 key only */ 720acdca31dSYuan Kang append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, 721acdca31dSYuan Kang ctx->enckeylen, CLASS_1 | 722acdca31dSYuan Kang KEY_DEST_CLASS_REG); 723acdca31dSYuan Kang 724acdca31dSYuan Kang /* For aead, only propagate error immediately if shared */ 725acdca31dSYuan Kang jump_cmd = append_jump(desc, JUMP_TEST_ALL); 726acdca31dSYuan Kang set_jump_tgt_here(desc, key_jump_cmd); 727a2ecb155SKim Phillips append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); 728acdca31dSYuan Kang set_jump_tgt_here(desc, jump_cmd); 729acdca31dSYuan Kang 730acdca31dSYuan Kang /* load IV */ 731acdca31dSYuan Kang append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | 732acdca31dSYuan Kang LDST_CLASS_1_CCB | tfm->ivsize); 733acdca31dSYuan Kang 734acdca31dSYuan Kang /* Choose operation */ 735acdca31dSYuan Kang append_dec_op1(desc, ctx->class1_alg_type); 736acdca31dSYuan Kang 737acdca31dSYuan Kang /* Perform operation */ 738acdca31dSYuan Kang ablkcipher_append_src_dst(desc); 739acdca31dSYuan Kang 740acdca31dSYuan Kang /* Wait for key to load before allowing propagating error */ 741acdca31dSYuan Kang append_dec_shr_done(desc); 742acdca31dSYuan Kang 743acdca31dSYuan Kang ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, 744acdca31dSYuan Kang desc_bytes(desc), 745acdca31dSYuan Kang DMA_TO_DEVICE); 746acdca31dSYuan Kang if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { 747acdca31dSYuan Kang dev_err(jrdev, "unable to map shared descriptor\n"); 748acdca31dSYuan Kang return -ENOMEM; 749acdca31dSYuan Kang } 750acdca31dSYuan Kang 751acdca31dSYuan Kang #ifdef DEBUG 752acdca31dSYuan Kang print_hex_dump(KERN_ERR, "ablkcipher dec shdesc@"xstr(__LINE__)": ", 753acdca31dSYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, desc, 754acdca31dSYuan Kang desc_bytes(desc), 1); 755acdca31dSYuan Kang #endif 756acdca31dSYuan Kang 757acdca31dSYuan Kang return ret; 758acdca31dSYuan Kang } 759acdca31dSYuan Kang 7608e8ec596SKim Phillips struct link_tbl_entry { 7618e8ec596SKim Phillips u64 ptr; 7628e8ec596SKim Phillips u32 len; 7638e8ec596SKim Phillips u8 reserved; 7648e8ec596SKim Phillips u8 buf_pool_id; 7658e8ec596SKim Phillips u16 offset; 7668e8ec596SKim Phillips }; 7678e8ec596SKim Phillips 7688e8ec596SKim Phillips /* 7691acebad3SYuan Kang * aead_edesc - s/w-extended aead descriptor 7701acebad3SYuan Kang * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist 7718e8ec596SKim Phillips * @src_nents: number of segments in input scatterlist 7728e8ec596SKim Phillips * @dst_nents: number of segments in output scatterlist 7731acebad3SYuan Kang * @iv_dma: dma address of iv for checking continuity and link table 7748e8ec596SKim Phillips * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) 7758e8ec596SKim Phillips * @link_tbl_bytes: length of dma mapped link_tbl space 7768e8ec596SKim Phillips * @link_tbl_dma: bus physical mapped address of h/w link table 7778e8ec596SKim Phillips * @hw_desc: the h/w job descriptor followed by any referenced link tables 7788e8ec596SKim Phillips */ 7790e479300SYuan Kang struct aead_edesc { 7808e8ec596SKim Phillips int assoc_nents; 7818e8ec596SKim Phillips int src_nents; 7828e8ec596SKim Phillips int dst_nents; 7831acebad3SYuan Kang dma_addr_t iv_dma; 7848e8ec596SKim Phillips int link_tbl_bytes; 7858e8ec596SKim Phillips dma_addr_t link_tbl_dma; 7868e8ec596SKim Phillips struct link_tbl_entry *link_tbl; 7878e8ec596SKim Phillips u32 hw_desc[0]; 7888e8ec596SKim Phillips }; 7898e8ec596SKim Phillips 790acdca31dSYuan Kang /* 791acdca31dSYuan Kang * ablkcipher_edesc - s/w-extended ablkcipher descriptor 792acdca31dSYuan Kang * @src_nents: number of segments in input scatterlist 793acdca31dSYuan Kang * @dst_nents: number of segments in output scatterlist 794acdca31dSYuan Kang * @iv_dma: dma address of iv for checking continuity and link table 795acdca31dSYuan Kang * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) 796acdca31dSYuan Kang * @link_tbl_bytes: length of dma mapped link_tbl space 797acdca31dSYuan Kang * @link_tbl_dma: bus physical mapped address of h/w link table 798acdca31dSYuan Kang * @hw_desc: the h/w job descriptor followed by any referenced link tables 799acdca31dSYuan Kang */ 800acdca31dSYuan Kang struct ablkcipher_edesc { 801acdca31dSYuan Kang int src_nents; 802acdca31dSYuan Kang int dst_nents; 803acdca31dSYuan Kang dma_addr_t iv_dma; 804acdca31dSYuan Kang int link_tbl_bytes; 805acdca31dSYuan Kang dma_addr_t link_tbl_dma; 806acdca31dSYuan Kang struct link_tbl_entry *link_tbl; 807acdca31dSYuan Kang u32 hw_desc[0]; 808acdca31dSYuan Kang }; 809acdca31dSYuan Kang 8101acebad3SYuan Kang static void caam_unmap(struct device *dev, struct scatterlist *src, 8111acebad3SYuan Kang struct scatterlist *dst, int src_nents, int dst_nents, 8121acebad3SYuan Kang dma_addr_t iv_dma, int ivsize, dma_addr_t link_tbl_dma, 8131acebad3SYuan Kang int link_tbl_bytes) 8141acebad3SYuan Kang { 8151acebad3SYuan Kang if (unlikely(dst != src)) { 8161acebad3SYuan Kang dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); 8171acebad3SYuan Kang dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); 8181acebad3SYuan Kang } else { 8191acebad3SYuan Kang dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); 8201acebad3SYuan Kang } 8211acebad3SYuan Kang 8221acebad3SYuan Kang if (iv_dma) 8231acebad3SYuan Kang dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); 8241acebad3SYuan Kang if (link_tbl_bytes) 8251acebad3SYuan Kang dma_unmap_single(dev, link_tbl_dma, link_tbl_bytes, 8261acebad3SYuan Kang DMA_TO_DEVICE); 8271acebad3SYuan Kang } 8281acebad3SYuan Kang 8290e479300SYuan Kang static void aead_unmap(struct device *dev, 8300e479300SYuan Kang struct aead_edesc *edesc, 8310e479300SYuan Kang struct aead_request *req) 8328e8ec596SKim Phillips { 8331acebad3SYuan Kang struct crypto_aead *aead = crypto_aead_reqtfm(req); 8341acebad3SYuan Kang int ivsize = crypto_aead_ivsize(aead); 8351acebad3SYuan Kang 8360e479300SYuan Kang dma_unmap_sg(dev, req->assoc, edesc->assoc_nents, DMA_TO_DEVICE); 8378e8ec596SKim Phillips 8381acebad3SYuan Kang caam_unmap(dev, req->src, req->dst, 8391acebad3SYuan Kang edesc->src_nents, edesc->dst_nents, 8401acebad3SYuan Kang edesc->iv_dma, ivsize, edesc->link_tbl_dma, 8411acebad3SYuan Kang edesc->link_tbl_bytes); 8428e8ec596SKim Phillips } 8438e8ec596SKim Phillips 844acdca31dSYuan Kang static void ablkcipher_unmap(struct device *dev, 845acdca31dSYuan Kang struct ablkcipher_edesc *edesc, 846acdca31dSYuan Kang struct ablkcipher_request *req) 847acdca31dSYuan Kang { 848acdca31dSYuan Kang struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 849acdca31dSYuan Kang int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 850acdca31dSYuan Kang 851acdca31dSYuan Kang caam_unmap(dev, req->src, req->dst, 852acdca31dSYuan Kang edesc->src_nents, edesc->dst_nents, 853acdca31dSYuan Kang edesc->iv_dma, ivsize, edesc->link_tbl_dma, 854acdca31dSYuan Kang edesc->link_tbl_bytes); 855acdca31dSYuan Kang } 856acdca31dSYuan Kang 8570e479300SYuan Kang static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, 8588e8ec596SKim Phillips void *context) 8598e8ec596SKim Phillips { 8600e479300SYuan Kang struct aead_request *req = context; 8610e479300SYuan Kang struct aead_edesc *edesc; 8628e8ec596SKim Phillips #ifdef DEBUG 8630e479300SYuan Kang struct crypto_aead *aead = crypto_aead_reqtfm(req); 8648e8ec596SKim Phillips struct caam_ctx *ctx = crypto_aead_ctx(aead); 8651acebad3SYuan Kang int ivsize = crypto_aead_ivsize(aead); 8668e8ec596SKim Phillips 8678e8ec596SKim Phillips dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 8688e8ec596SKim Phillips #endif 8691acebad3SYuan Kang 8700e479300SYuan Kang edesc = (struct aead_edesc *)((char *)desc - 8710e479300SYuan Kang offsetof(struct aead_edesc, hw_desc)); 8728e8ec596SKim Phillips 8738e8ec596SKim Phillips if (err) { 874de2954d6SKim Phillips char tmp[CAAM_ERROR_STR_MAX]; 8758e8ec596SKim Phillips 8768e8ec596SKim Phillips dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); 8778e8ec596SKim Phillips } 8788e8ec596SKim Phillips 8790e479300SYuan Kang aead_unmap(jrdev, edesc, req); 8808e8ec596SKim Phillips 8818e8ec596SKim Phillips #ifdef DEBUG 8828e8ec596SKim Phillips print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", 8830e479300SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), 8840e479300SYuan Kang req->assoclen , 1); 8858e8ec596SKim Phillips print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", 8860e479300SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize, 8878e8ec596SKim Phillips edesc->src_nents ? 100 : ivsize, 1); 8888e8ec596SKim Phillips print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", 8890e479300SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), 8900e479300SYuan Kang edesc->src_nents ? 100 : req->cryptlen + 8918e8ec596SKim Phillips ctx->authsize + 4, 1); 8928e8ec596SKim Phillips #endif 8938e8ec596SKim Phillips 8948e8ec596SKim Phillips kfree(edesc); 8958e8ec596SKim Phillips 8960e479300SYuan Kang aead_request_complete(req, err); 8978e8ec596SKim Phillips } 8988e8ec596SKim Phillips 8990e479300SYuan Kang static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, 9008e8ec596SKim Phillips void *context) 9018e8ec596SKim Phillips { 9020e479300SYuan Kang struct aead_request *req = context; 9030e479300SYuan Kang struct aead_edesc *edesc; 9048e8ec596SKim Phillips #ifdef DEBUG 9050e479300SYuan Kang struct crypto_aead *aead = crypto_aead_reqtfm(req); 9068e8ec596SKim Phillips struct caam_ctx *ctx = crypto_aead_ctx(aead); 9071acebad3SYuan Kang int ivsize = crypto_aead_ivsize(aead); 9088e8ec596SKim Phillips 9098e8ec596SKim Phillips dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 9108e8ec596SKim Phillips #endif 9111acebad3SYuan Kang 9120e479300SYuan Kang edesc = (struct aead_edesc *)((char *)desc - 9130e479300SYuan Kang offsetof(struct aead_edesc, hw_desc)); 9148e8ec596SKim Phillips 9151acebad3SYuan Kang #ifdef DEBUG 9161acebad3SYuan Kang print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", 9171acebad3SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, req->iv, 9181acebad3SYuan Kang ivsize, 1); 9191acebad3SYuan Kang print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", 9201acebad3SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst), 9211acebad3SYuan Kang req->cryptlen, 1); 9221acebad3SYuan Kang #endif 9231acebad3SYuan Kang 9248e8ec596SKim Phillips if (err) { 925de2954d6SKim Phillips char tmp[CAAM_ERROR_STR_MAX]; 9268e8ec596SKim Phillips 9278e8ec596SKim Phillips dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); 9288e8ec596SKim Phillips } 9298e8ec596SKim Phillips 9300e479300SYuan Kang aead_unmap(jrdev, edesc, req); 9318e8ec596SKim Phillips 9328e8ec596SKim Phillips /* 9338e8ec596SKim Phillips * verify hw auth check passed else return -EBADMSG 9348e8ec596SKim Phillips */ 9358e8ec596SKim Phillips if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK) 9368e8ec596SKim Phillips err = -EBADMSG; 9378e8ec596SKim Phillips 9388e8ec596SKim Phillips #ifdef DEBUG 9398e8ec596SKim Phillips print_hex_dump(KERN_ERR, "iphdrout@"xstr(__LINE__)": ", 9408e8ec596SKim Phillips DUMP_PREFIX_ADDRESS, 16, 4, 9410e479300SYuan Kang ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)), 9420e479300SYuan Kang sizeof(struct iphdr) + req->assoclen + 9430e479300SYuan Kang ((req->cryptlen > 1500) ? 1500 : req->cryptlen) + 9448e8ec596SKim Phillips ctx->authsize + 36, 1); 9458e8ec596SKim Phillips if (!err && edesc->link_tbl_bytes) { 9460e479300SYuan Kang struct scatterlist *sg = sg_last(req->src, edesc->src_nents); 9478e8ec596SKim Phillips print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ", 9488e8ec596SKim Phillips DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg), 9498e8ec596SKim Phillips sg->length + ctx->authsize + 16, 1); 9508e8ec596SKim Phillips } 9518e8ec596SKim Phillips #endif 9521acebad3SYuan Kang 9538e8ec596SKim Phillips kfree(edesc); 9548e8ec596SKim Phillips 9550e479300SYuan Kang aead_request_complete(req, err); 9568e8ec596SKim Phillips } 9578e8ec596SKim Phillips 958acdca31dSYuan Kang static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, 959acdca31dSYuan Kang void *context) 960acdca31dSYuan Kang { 961acdca31dSYuan Kang struct ablkcipher_request *req = context; 962acdca31dSYuan Kang struct ablkcipher_edesc *edesc; 963acdca31dSYuan Kang #ifdef DEBUG 964acdca31dSYuan Kang struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 965acdca31dSYuan Kang int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 966acdca31dSYuan Kang 967acdca31dSYuan Kang dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 968acdca31dSYuan Kang #endif 969acdca31dSYuan Kang 970acdca31dSYuan Kang edesc = (struct ablkcipher_edesc *)((char *)desc - 971acdca31dSYuan Kang offsetof(struct ablkcipher_edesc, hw_desc)); 972acdca31dSYuan Kang 973acdca31dSYuan Kang if (err) { 974acdca31dSYuan Kang char tmp[CAAM_ERROR_STR_MAX]; 975acdca31dSYuan Kang 976acdca31dSYuan Kang dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); 977acdca31dSYuan Kang } 978acdca31dSYuan Kang 979acdca31dSYuan Kang #ifdef DEBUG 980acdca31dSYuan Kang print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", 981acdca31dSYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, req->info, 982acdca31dSYuan Kang edesc->src_nents > 1 ? 100 : ivsize, 1); 983acdca31dSYuan Kang print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", 984acdca31dSYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), 985acdca31dSYuan Kang edesc->dst_nents > 1 ? 100 : req->nbytes, 1); 986acdca31dSYuan Kang #endif 987acdca31dSYuan Kang 988acdca31dSYuan Kang ablkcipher_unmap(jrdev, edesc, req); 989acdca31dSYuan Kang kfree(edesc); 990acdca31dSYuan Kang 991acdca31dSYuan Kang ablkcipher_request_complete(req, err); 992acdca31dSYuan Kang } 993acdca31dSYuan Kang 994acdca31dSYuan Kang static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, 995acdca31dSYuan Kang void *context) 996acdca31dSYuan Kang { 997acdca31dSYuan Kang struct ablkcipher_request *req = context; 998acdca31dSYuan Kang struct ablkcipher_edesc *edesc; 999acdca31dSYuan Kang #ifdef DEBUG 1000acdca31dSYuan Kang struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1001acdca31dSYuan Kang int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 1002acdca31dSYuan Kang 1003acdca31dSYuan Kang dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 1004acdca31dSYuan Kang #endif 1005acdca31dSYuan Kang 1006acdca31dSYuan Kang edesc = (struct ablkcipher_edesc *)((char *)desc - 1007acdca31dSYuan Kang offsetof(struct ablkcipher_edesc, hw_desc)); 1008acdca31dSYuan Kang if (err) { 1009acdca31dSYuan Kang char tmp[CAAM_ERROR_STR_MAX]; 1010acdca31dSYuan Kang 1011acdca31dSYuan Kang dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); 1012acdca31dSYuan Kang } 1013acdca31dSYuan Kang 1014acdca31dSYuan Kang #ifdef DEBUG 1015acdca31dSYuan Kang print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", 1016acdca31dSYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, req->info, 1017acdca31dSYuan Kang ivsize, 1); 1018acdca31dSYuan Kang print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", 1019acdca31dSYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), 1020acdca31dSYuan Kang edesc->dst_nents > 1 ? 100 : req->nbytes, 1); 1021acdca31dSYuan Kang #endif 1022acdca31dSYuan Kang 1023acdca31dSYuan Kang ablkcipher_unmap(jrdev, edesc, req); 1024acdca31dSYuan Kang kfree(edesc); 1025acdca31dSYuan Kang 1026acdca31dSYuan Kang ablkcipher_request_complete(req, err); 1027acdca31dSYuan Kang } 1028acdca31dSYuan Kang 10291acebad3SYuan Kang static void sg_to_link_tbl_one(struct link_tbl_entry *link_tbl_ptr, 10301acebad3SYuan Kang dma_addr_t dma, u32 len, u32 offset) 10311acebad3SYuan Kang { 10321acebad3SYuan Kang link_tbl_ptr->ptr = dma; 10331acebad3SYuan Kang link_tbl_ptr->len = len; 10341acebad3SYuan Kang link_tbl_ptr->reserved = 0; 10351acebad3SYuan Kang link_tbl_ptr->buf_pool_id = 0; 10361acebad3SYuan Kang link_tbl_ptr->offset = offset; 10371acebad3SYuan Kang #ifdef DEBUG 10381acebad3SYuan Kang print_hex_dump(KERN_ERR, "link_tbl_ptr@"xstr(__LINE__)": ", 10391acebad3SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, link_tbl_ptr, 10401acebad3SYuan Kang sizeof(struct link_tbl_entry), 1); 10411acebad3SYuan Kang #endif 10421acebad3SYuan Kang } 10431acebad3SYuan Kang 10441acebad3SYuan Kang /* 10451acebad3SYuan Kang * convert scatterlist to h/w link table format 10461acebad3SYuan Kang * but does not have final bit; instead, returns last entry 10471acebad3SYuan Kang */ 10481acebad3SYuan Kang static struct link_tbl_entry *sg_to_link_tbl(struct scatterlist *sg, 10491acebad3SYuan Kang int sg_count, struct link_tbl_entry 10501acebad3SYuan Kang *link_tbl_ptr, u32 offset) 10511acebad3SYuan Kang { 10521acebad3SYuan Kang while (sg_count) { 10531acebad3SYuan Kang sg_to_link_tbl_one(link_tbl_ptr, sg_dma_address(sg), 10541acebad3SYuan Kang sg_dma_len(sg), offset); 10551acebad3SYuan Kang link_tbl_ptr++; 10561acebad3SYuan Kang sg = sg_next(sg); 10571acebad3SYuan Kang sg_count--; 10581acebad3SYuan Kang } 10591acebad3SYuan Kang return link_tbl_ptr - 1; 10601acebad3SYuan Kang } 10611acebad3SYuan Kang 10628e8ec596SKim Phillips /* 10638e8ec596SKim Phillips * convert scatterlist to h/w link table format 10648e8ec596SKim Phillips * scatterlist must have been previously dma mapped 10658e8ec596SKim Phillips */ 10661acebad3SYuan Kang static void sg_to_link_tbl_last(struct scatterlist *sg, int sg_count, 10678e8ec596SKim Phillips struct link_tbl_entry *link_tbl_ptr, u32 offset) 10688e8ec596SKim Phillips { 10691acebad3SYuan Kang link_tbl_ptr = sg_to_link_tbl(sg, sg_count, link_tbl_ptr, offset); 10708e8ec596SKim Phillips link_tbl_ptr->len |= 0x40000000; 10718e8ec596SKim Phillips } 10728e8ec596SKim Phillips 10738e8ec596SKim Phillips /* 10741acebad3SYuan Kang * Fill in aead job descriptor 10758e8ec596SKim Phillips */ 10761acebad3SYuan Kang static void init_aead_job(u32 *sh_desc, dma_addr_t ptr, 10771acebad3SYuan Kang struct aead_edesc *edesc, 10781acebad3SYuan Kang struct aead_request *req, 10791acebad3SYuan Kang bool all_contig, bool encrypt) 10808e8ec596SKim Phillips { 10810e479300SYuan Kang struct crypto_aead *aead = crypto_aead_reqtfm(req); 10828e8ec596SKim Phillips struct caam_ctx *ctx = crypto_aead_ctx(aead); 10838e8ec596SKim Phillips int ivsize = crypto_aead_ivsize(aead); 10848e8ec596SKim Phillips int authsize = ctx->authsize; 10851acebad3SYuan Kang u32 *desc = edesc->hw_desc; 10861acebad3SYuan Kang u32 out_options = 0, in_options; 10871acebad3SYuan Kang dma_addr_t dst_dma, src_dma; 10881acebad3SYuan Kang int len, link_tbl_index = 0; 10898e8ec596SKim Phillips 10901acebad3SYuan Kang #ifdef DEBUG 10918e8ec596SKim Phillips debug("assoclen %d cryptlen %d authsize %d\n", 10920e479300SYuan Kang req->assoclen, req->cryptlen, authsize); 10938e8ec596SKim Phillips print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", 10940e479300SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), 10950e479300SYuan Kang req->assoclen , 1); 10968e8ec596SKim Phillips print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", 10971acebad3SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, req->iv, 10988e8ec596SKim Phillips edesc->src_nents ? 100 : ivsize, 1); 10998e8ec596SKim Phillips print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ", 11000e479300SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), 11011acebad3SYuan Kang edesc->src_nents ? 100 : req->cryptlen, 1); 11028e8ec596SKim Phillips print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ", 11038e8ec596SKim Phillips DUMP_PREFIX_ADDRESS, 16, 4, sh_desc, 11048e8ec596SKim Phillips desc_bytes(sh_desc), 1); 11058e8ec596SKim Phillips #endif 11061acebad3SYuan Kang 11071acebad3SYuan Kang len = desc_len(sh_desc); 11081acebad3SYuan Kang init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); 11091acebad3SYuan Kang 11101acebad3SYuan Kang if (all_contig) { 11111acebad3SYuan Kang src_dma = sg_dma_address(req->assoc); 11121acebad3SYuan Kang in_options = 0; 11131acebad3SYuan Kang } else { 11141acebad3SYuan Kang src_dma = edesc->link_tbl_dma; 11151acebad3SYuan Kang link_tbl_index += (edesc->assoc_nents ? : 1) + 1 + 11161acebad3SYuan Kang (edesc->src_nents ? : 1); 11171acebad3SYuan Kang in_options = LDST_SGF; 11181acebad3SYuan Kang } 11191acebad3SYuan Kang if (encrypt) 11201acebad3SYuan Kang append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + 11211acebad3SYuan Kang req->cryptlen - authsize, in_options); 11228e8ec596SKim Phillips else 11231acebad3SYuan Kang append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + 11241acebad3SYuan Kang req->cryptlen, in_options); 11258e8ec596SKim Phillips 11261acebad3SYuan Kang if (likely(req->src == req->dst)) { 11271acebad3SYuan Kang if (all_contig) { 11281acebad3SYuan Kang dst_dma = sg_dma_address(req->src); 11298e8ec596SKim Phillips } else { 11301acebad3SYuan Kang dst_dma = src_dma + sizeof(struct link_tbl_entry) * 11311acebad3SYuan Kang ((edesc->assoc_nents ? : 1) + 1); 11321acebad3SYuan Kang out_options = LDST_SGF; 11338e8ec596SKim Phillips } 11348e8ec596SKim Phillips } else { 11358e8ec596SKim Phillips if (!edesc->dst_nents) { 11360e479300SYuan Kang dst_dma = sg_dma_address(req->dst); 11378e8ec596SKim Phillips } else { 11381acebad3SYuan Kang dst_dma = edesc->link_tbl_dma + 11391acebad3SYuan Kang link_tbl_index * 11408e8ec596SKim Phillips sizeof(struct link_tbl_entry); 11411acebad3SYuan Kang out_options = LDST_SGF; 11428e8ec596SKim Phillips } 11438e8ec596SKim Phillips } 11448e8ec596SKim Phillips if (encrypt) 11451acebad3SYuan Kang append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options); 11468e8ec596SKim Phillips else 11471acebad3SYuan Kang append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize, 11481acebad3SYuan Kang out_options); 11491acebad3SYuan Kang } 11501acebad3SYuan Kang 11511acebad3SYuan Kang /* 11521acebad3SYuan Kang * Fill in aead givencrypt job descriptor 11531acebad3SYuan Kang */ 11541acebad3SYuan Kang static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr, 11551acebad3SYuan Kang struct aead_edesc *edesc, 11561acebad3SYuan Kang struct aead_request *req, 11571acebad3SYuan Kang int contig) 11581acebad3SYuan Kang { 11591acebad3SYuan Kang struct crypto_aead *aead = crypto_aead_reqtfm(req); 11601acebad3SYuan Kang struct caam_ctx *ctx = crypto_aead_ctx(aead); 11611acebad3SYuan Kang int ivsize = crypto_aead_ivsize(aead); 11621acebad3SYuan Kang int authsize = ctx->authsize; 11631acebad3SYuan Kang u32 *desc = edesc->hw_desc; 11641acebad3SYuan Kang u32 out_options = 0, in_options; 11651acebad3SYuan Kang dma_addr_t dst_dma, src_dma; 11661acebad3SYuan Kang int len, link_tbl_index = 0; 11678e8ec596SKim Phillips 11688e8ec596SKim Phillips #ifdef DEBUG 11691acebad3SYuan Kang debug("assoclen %d cryptlen %d authsize %d\n", 11701acebad3SYuan Kang req->assoclen, req->cryptlen, authsize); 11711acebad3SYuan Kang print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", 11721acebad3SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), 11731acebad3SYuan Kang req->assoclen , 1); 11741acebad3SYuan Kang print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", 11751acebad3SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1); 11761acebad3SYuan Kang print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ", 11771acebad3SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), 11781acebad3SYuan Kang edesc->src_nents > 1 ? 100 : req->cryptlen, 1); 11791acebad3SYuan Kang print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ", 11801acebad3SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, sh_desc, 11811acebad3SYuan Kang desc_bytes(sh_desc), 1); 11828e8ec596SKim Phillips #endif 11838e8ec596SKim Phillips 11841acebad3SYuan Kang len = desc_len(sh_desc); 11851acebad3SYuan Kang init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); 11861acebad3SYuan Kang 11871acebad3SYuan Kang if (contig & GIV_SRC_CONTIG) { 11881acebad3SYuan Kang src_dma = sg_dma_address(req->assoc); 11891acebad3SYuan Kang in_options = 0; 11901acebad3SYuan Kang } else { 11911acebad3SYuan Kang src_dma = edesc->link_tbl_dma; 11921acebad3SYuan Kang link_tbl_index += edesc->assoc_nents + 1 + edesc->src_nents; 11931acebad3SYuan Kang in_options = LDST_SGF; 11941acebad3SYuan Kang } 11951acebad3SYuan Kang append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + 11961acebad3SYuan Kang req->cryptlen - authsize, in_options); 11971acebad3SYuan Kang 11981acebad3SYuan Kang if (contig & GIV_DST_CONTIG) { 11991acebad3SYuan Kang dst_dma = edesc->iv_dma; 12001acebad3SYuan Kang } else { 12011acebad3SYuan Kang if (likely(req->src == req->dst)) { 12021acebad3SYuan Kang dst_dma = src_dma + sizeof(struct link_tbl_entry) * 12031acebad3SYuan Kang edesc->assoc_nents; 12041acebad3SYuan Kang out_options = LDST_SGF; 12051acebad3SYuan Kang } else { 12061acebad3SYuan Kang dst_dma = edesc->link_tbl_dma + 12071acebad3SYuan Kang link_tbl_index * 12081acebad3SYuan Kang sizeof(struct link_tbl_entry); 12091acebad3SYuan Kang out_options = LDST_SGF; 12101acebad3SYuan Kang } 12118e8ec596SKim Phillips } 12128e8ec596SKim Phillips 12131acebad3SYuan Kang append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen, out_options); 12148e8ec596SKim Phillips } 12158e8ec596SKim Phillips 12168e8ec596SKim Phillips /* 1217acdca31dSYuan Kang * Fill in ablkcipher job descriptor 1218acdca31dSYuan Kang */ 1219acdca31dSYuan Kang static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, 1220acdca31dSYuan Kang struct ablkcipher_edesc *edesc, 1221acdca31dSYuan Kang struct ablkcipher_request *req, 1222acdca31dSYuan Kang bool iv_contig) 1223acdca31dSYuan Kang { 1224acdca31dSYuan Kang struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1225acdca31dSYuan Kang int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 1226acdca31dSYuan Kang u32 *desc = edesc->hw_desc; 1227acdca31dSYuan Kang u32 out_options = 0, in_options; 1228acdca31dSYuan Kang dma_addr_t dst_dma, src_dma; 1229acdca31dSYuan Kang int len, link_tbl_index = 0; 1230acdca31dSYuan Kang 1231acdca31dSYuan Kang #ifdef DEBUG 1232acdca31dSYuan Kang print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", 1233acdca31dSYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, req->info, 1234acdca31dSYuan Kang ivsize, 1); 1235acdca31dSYuan Kang print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ", 1236acdca31dSYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), 1237acdca31dSYuan Kang edesc->src_nents ? 100 : req->nbytes, 1); 1238acdca31dSYuan Kang #endif 1239acdca31dSYuan Kang 1240acdca31dSYuan Kang len = desc_len(sh_desc); 1241acdca31dSYuan Kang init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); 1242acdca31dSYuan Kang 1243acdca31dSYuan Kang if (iv_contig) { 1244acdca31dSYuan Kang src_dma = edesc->iv_dma; 1245acdca31dSYuan Kang in_options = 0; 1246acdca31dSYuan Kang } else { 1247acdca31dSYuan Kang src_dma = edesc->link_tbl_dma; 1248acdca31dSYuan Kang link_tbl_index += (iv_contig ? 0 : 1) + edesc->src_nents; 1249acdca31dSYuan Kang in_options = LDST_SGF; 1250acdca31dSYuan Kang } 1251acdca31dSYuan Kang append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options); 1252acdca31dSYuan Kang 1253acdca31dSYuan Kang if (likely(req->src == req->dst)) { 1254acdca31dSYuan Kang if (!edesc->src_nents && iv_contig) { 1255acdca31dSYuan Kang dst_dma = sg_dma_address(req->src); 1256acdca31dSYuan Kang } else { 1257acdca31dSYuan Kang dst_dma = edesc->link_tbl_dma + 1258acdca31dSYuan Kang sizeof(struct link_tbl_entry); 1259acdca31dSYuan Kang out_options = LDST_SGF; 1260acdca31dSYuan Kang } 1261acdca31dSYuan Kang } else { 1262acdca31dSYuan Kang if (!edesc->dst_nents) { 1263acdca31dSYuan Kang dst_dma = sg_dma_address(req->dst); 1264acdca31dSYuan Kang } else { 1265acdca31dSYuan Kang dst_dma = edesc->link_tbl_dma + 1266acdca31dSYuan Kang link_tbl_index * sizeof(struct link_tbl_entry); 1267acdca31dSYuan Kang out_options = LDST_SGF; 1268acdca31dSYuan Kang } 1269acdca31dSYuan Kang } 1270acdca31dSYuan Kang append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options); 1271acdca31dSYuan Kang } 1272acdca31dSYuan Kang 1273acdca31dSYuan Kang /* 12748e8ec596SKim Phillips * derive number of elements in scatterlist 12758e8ec596SKim Phillips */ 12761acebad3SYuan Kang static int sg_count(struct scatterlist *sg_list, int nbytes) 12778e8ec596SKim Phillips { 12788e8ec596SKim Phillips struct scatterlist *sg = sg_list; 12798e8ec596SKim Phillips int sg_nents = 0; 12808e8ec596SKim Phillips 12818e8ec596SKim Phillips while (nbytes > 0) { 12828e8ec596SKim Phillips sg_nents++; 12838e8ec596SKim Phillips nbytes -= sg->length; 12848e8ec596SKim Phillips if (!sg_is_last(sg) && (sg + 1)->length == 0) 12851acebad3SYuan Kang BUG(); /* Not support chaining */ 12868e8ec596SKim Phillips sg = scatterwalk_sg_next(sg); 12878e8ec596SKim Phillips } 12888e8ec596SKim Phillips 12891acebad3SYuan Kang if (likely(sg_nents == 1)) 12901acebad3SYuan Kang return 0; 12911acebad3SYuan Kang 12928e8ec596SKim Phillips return sg_nents; 12938e8ec596SKim Phillips } 12948e8ec596SKim Phillips 12958e8ec596SKim Phillips /* 12961acebad3SYuan Kang * allocate and map the aead extended descriptor 12978e8ec596SKim Phillips */ 12980e479300SYuan Kang static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, 12991acebad3SYuan Kang int desc_bytes, bool *all_contig_ptr) 13008e8ec596SKim Phillips { 13010e479300SYuan Kang struct crypto_aead *aead = crypto_aead_reqtfm(req); 13028e8ec596SKim Phillips struct caam_ctx *ctx = crypto_aead_ctx(aead); 13038e8ec596SKim Phillips struct device *jrdev = ctx->jrdev; 13041acebad3SYuan Kang gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 13051acebad3SYuan Kang CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 13061acebad3SYuan Kang int assoc_nents, src_nents, dst_nents = 0; 13070e479300SYuan Kang struct aead_edesc *edesc; 13081acebad3SYuan Kang dma_addr_t iv_dma = 0; 13091acebad3SYuan Kang int sgc; 13101acebad3SYuan Kang bool all_contig = true; 13111acebad3SYuan Kang int ivsize = crypto_aead_ivsize(aead); 13121acebad3SYuan Kang int link_tbl_index, link_tbl_len = 0, link_tbl_bytes; 13138e8ec596SKim Phillips 13141acebad3SYuan Kang assoc_nents = sg_count(req->assoc, req->assoclen); 13151acebad3SYuan Kang src_nents = sg_count(req->src, req->cryptlen); 13168e8ec596SKim Phillips 13171acebad3SYuan Kang if (unlikely(req->dst != req->src)) 13181acebad3SYuan Kang dst_nents = sg_count(req->dst, req->cryptlen); 13198e8ec596SKim Phillips 13201acebad3SYuan Kang sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1, 13211acebad3SYuan Kang DMA_BIDIRECTIONAL); 13221acebad3SYuan Kang if (likely(req->src == req->dst)) { 13231acebad3SYuan Kang sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, 13241acebad3SYuan Kang DMA_BIDIRECTIONAL); 13251acebad3SYuan Kang } else { 13261acebad3SYuan Kang sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, 13271acebad3SYuan Kang DMA_TO_DEVICE); 13281acebad3SYuan Kang sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, 13291acebad3SYuan Kang DMA_FROM_DEVICE); 13308e8ec596SKim Phillips } 13318e8ec596SKim Phillips 13321acebad3SYuan Kang /* Check if data are contiguous */ 13331acebad3SYuan Kang iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE); 13341acebad3SYuan Kang if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen != 13351acebad3SYuan Kang iv_dma || src_nents || iv_dma + ivsize != 13361acebad3SYuan Kang sg_dma_address(req->src)) { 13371acebad3SYuan Kang all_contig = false; 13381acebad3SYuan Kang assoc_nents = assoc_nents ? : 1; 13391acebad3SYuan Kang src_nents = src_nents ? : 1; 13401acebad3SYuan Kang link_tbl_len = assoc_nents + 1 + src_nents; 13411acebad3SYuan Kang } 13421acebad3SYuan Kang link_tbl_len += dst_nents; 13431acebad3SYuan Kang 13441acebad3SYuan Kang link_tbl_bytes = link_tbl_len * sizeof(struct link_tbl_entry); 13458e8ec596SKim Phillips 13468e8ec596SKim Phillips /* allocate space for base edesc and hw desc commands, link tables */ 13470e479300SYuan Kang edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + 13488e8ec596SKim Phillips link_tbl_bytes, GFP_DMA | flags); 13498e8ec596SKim Phillips if (!edesc) { 13508e8ec596SKim Phillips dev_err(jrdev, "could not allocate extended descriptor\n"); 13518e8ec596SKim Phillips return ERR_PTR(-ENOMEM); 13528e8ec596SKim Phillips } 13538e8ec596SKim Phillips 13548e8ec596SKim Phillips edesc->assoc_nents = assoc_nents; 13558e8ec596SKim Phillips edesc->src_nents = src_nents; 13568e8ec596SKim Phillips edesc->dst_nents = dst_nents; 13571acebad3SYuan Kang edesc->iv_dma = iv_dma; 13581acebad3SYuan Kang edesc->link_tbl_bytes = link_tbl_bytes; 13590e479300SYuan Kang edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) + 13608e8ec596SKim Phillips desc_bytes; 13618e8ec596SKim Phillips edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl, 13628e8ec596SKim Phillips link_tbl_bytes, DMA_TO_DEVICE); 13631acebad3SYuan Kang *all_contig_ptr = all_contig; 13641acebad3SYuan Kang 13651acebad3SYuan Kang link_tbl_index = 0; 13661acebad3SYuan Kang if (!all_contig) { 13671acebad3SYuan Kang sg_to_link_tbl(req->assoc, 13681acebad3SYuan Kang (assoc_nents ? : 1), 13691acebad3SYuan Kang edesc->link_tbl + 13701acebad3SYuan Kang link_tbl_index, 0); 13711acebad3SYuan Kang link_tbl_index += assoc_nents ? : 1; 13721acebad3SYuan Kang sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index, 13731acebad3SYuan Kang iv_dma, ivsize, 0); 13741acebad3SYuan Kang link_tbl_index += 1; 13751acebad3SYuan Kang sg_to_link_tbl_last(req->src, 13761acebad3SYuan Kang (src_nents ? : 1), 13771acebad3SYuan Kang edesc->link_tbl + 13781acebad3SYuan Kang link_tbl_index, 0); 13791acebad3SYuan Kang link_tbl_index += src_nents ? : 1; 13801acebad3SYuan Kang } 13811acebad3SYuan Kang if (dst_nents) { 13821acebad3SYuan Kang sg_to_link_tbl_last(req->dst, dst_nents, 13831acebad3SYuan Kang edesc->link_tbl + link_tbl_index, 0); 13841acebad3SYuan Kang } 13858e8ec596SKim Phillips 13868e8ec596SKim Phillips return edesc; 13878e8ec596SKim Phillips } 13888e8ec596SKim Phillips 13890e479300SYuan Kang static int aead_encrypt(struct aead_request *req) 13908e8ec596SKim Phillips { 13910e479300SYuan Kang struct aead_edesc *edesc; 13928e8ec596SKim Phillips struct crypto_aead *aead = crypto_aead_reqtfm(req); 13938e8ec596SKim Phillips struct caam_ctx *ctx = crypto_aead_ctx(aead); 13948e8ec596SKim Phillips struct device *jrdev = ctx->jrdev; 13951acebad3SYuan Kang bool all_contig; 13968e8ec596SKim Phillips u32 *desc; 13971acebad3SYuan Kang int ret = 0; 13981acebad3SYuan Kang 13991acebad3SYuan Kang req->cryptlen += ctx->authsize; 14008e8ec596SKim Phillips 14018e8ec596SKim Phillips /* allocate extended descriptor */ 14021acebad3SYuan Kang edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * 14031acebad3SYuan Kang CAAM_CMD_SZ, &all_contig); 14048e8ec596SKim Phillips if (IS_ERR(edesc)) 14058e8ec596SKim Phillips return PTR_ERR(edesc); 14068e8ec596SKim Phillips 14071acebad3SYuan Kang /* Create and submit job descriptor */ 14081acebad3SYuan Kang init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req, 14091acebad3SYuan Kang all_contig, true); 14101acebad3SYuan Kang #ifdef DEBUG 14111acebad3SYuan Kang print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ", 14121acebad3SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 14131acebad3SYuan Kang desc_bytes(edesc->hw_desc), 1); 14141acebad3SYuan Kang #endif 14151acebad3SYuan Kang 14168e8ec596SKim Phillips desc = edesc->hw_desc; 14171acebad3SYuan Kang ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); 14181acebad3SYuan Kang if (!ret) { 14191acebad3SYuan Kang ret = -EINPROGRESS; 14201acebad3SYuan Kang } else { 14211acebad3SYuan Kang aead_unmap(jrdev, edesc, req); 14221acebad3SYuan Kang kfree(edesc); 14231acebad3SYuan Kang } 14248e8ec596SKim Phillips 14251acebad3SYuan Kang return ret; 14268e8ec596SKim Phillips } 14278e8ec596SKim Phillips 14280e479300SYuan Kang static int aead_decrypt(struct aead_request *req) 14298e8ec596SKim Phillips { 14301acebad3SYuan Kang struct aead_edesc *edesc; 14310e479300SYuan Kang struct crypto_aead *aead = crypto_aead_reqtfm(req); 14320e479300SYuan Kang struct caam_ctx *ctx = crypto_aead_ctx(aead); 14330e479300SYuan Kang struct device *jrdev = ctx->jrdev; 14341acebad3SYuan Kang bool all_contig; 14350e479300SYuan Kang u32 *desc; 14361acebad3SYuan Kang int ret = 0; 14370e479300SYuan Kang 14380e479300SYuan Kang /* allocate extended descriptor */ 14391acebad3SYuan Kang edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * 14401acebad3SYuan Kang CAAM_CMD_SZ, &all_contig); 14410e479300SYuan Kang if (IS_ERR(edesc)) 14420e479300SYuan Kang return PTR_ERR(edesc); 14430e479300SYuan Kang 14441acebad3SYuan Kang #ifdef DEBUG 14451acebad3SYuan Kang print_hex_dump(KERN_ERR, "dec src@"xstr(__LINE__)": ", 14461acebad3SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), 14471acebad3SYuan Kang req->cryptlen, 1); 14481acebad3SYuan Kang #endif 14491acebad3SYuan Kang 14501acebad3SYuan Kang /* Create and submit job descriptor*/ 14511acebad3SYuan Kang init_aead_job(ctx->sh_desc_dec, 14521acebad3SYuan Kang ctx->sh_desc_dec_dma, edesc, req, all_contig, false); 14531acebad3SYuan Kang #ifdef DEBUG 14541acebad3SYuan Kang print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ", 14551acebad3SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 14561acebad3SYuan Kang desc_bytes(edesc->hw_desc), 1); 14571acebad3SYuan Kang #endif 14581acebad3SYuan Kang 14590e479300SYuan Kang desc = edesc->hw_desc; 14601acebad3SYuan Kang ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); 14611acebad3SYuan Kang if (!ret) { 14621acebad3SYuan Kang ret = -EINPROGRESS; 14631acebad3SYuan Kang } else { 14641acebad3SYuan Kang aead_unmap(jrdev, edesc, req); 14651acebad3SYuan Kang kfree(edesc); 14661acebad3SYuan Kang } 14670e479300SYuan Kang 14681acebad3SYuan Kang return ret; 14691acebad3SYuan Kang } 14700e479300SYuan Kang 14711acebad3SYuan Kang /* 14721acebad3SYuan Kang * allocate and map the aead extended descriptor for aead givencrypt 14731acebad3SYuan Kang */ 14741acebad3SYuan Kang static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request 14751acebad3SYuan Kang *greq, int desc_bytes, 14761acebad3SYuan Kang u32 *contig_ptr) 14771acebad3SYuan Kang { 14781acebad3SYuan Kang struct aead_request *req = &greq->areq; 14791acebad3SYuan Kang struct crypto_aead *aead = crypto_aead_reqtfm(req); 14801acebad3SYuan Kang struct caam_ctx *ctx = crypto_aead_ctx(aead); 14811acebad3SYuan Kang struct device *jrdev = ctx->jrdev; 14821acebad3SYuan Kang gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 14831acebad3SYuan Kang CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 14841acebad3SYuan Kang int assoc_nents, src_nents, dst_nents = 0; 14851acebad3SYuan Kang struct aead_edesc *edesc; 14861acebad3SYuan Kang dma_addr_t iv_dma = 0; 14871acebad3SYuan Kang int sgc; 14881acebad3SYuan Kang u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG; 14891acebad3SYuan Kang int ivsize = crypto_aead_ivsize(aead); 14901acebad3SYuan Kang int link_tbl_index, link_tbl_len = 0, link_tbl_bytes; 14910e479300SYuan Kang 14921acebad3SYuan Kang assoc_nents = sg_count(req->assoc, req->assoclen); 14931acebad3SYuan Kang src_nents = sg_count(req->src, req->cryptlen); 14940e479300SYuan Kang 14951acebad3SYuan Kang if (unlikely(req->dst != req->src)) 14961acebad3SYuan Kang dst_nents = sg_count(req->dst, req->cryptlen); 14971acebad3SYuan Kang 14981acebad3SYuan Kang sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1, 14991acebad3SYuan Kang DMA_BIDIRECTIONAL); 15001acebad3SYuan Kang if (likely(req->src == req->dst)) { 15011acebad3SYuan Kang sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, 15021acebad3SYuan Kang DMA_BIDIRECTIONAL); 15031acebad3SYuan Kang } else { 15041acebad3SYuan Kang sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, 15051acebad3SYuan Kang DMA_TO_DEVICE); 15061acebad3SYuan Kang sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, 15071acebad3SYuan Kang DMA_FROM_DEVICE); 15081acebad3SYuan Kang } 15091acebad3SYuan Kang 15101acebad3SYuan Kang /* Check if data are contiguous */ 15111acebad3SYuan Kang iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE); 15121acebad3SYuan Kang if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen != 15131acebad3SYuan Kang iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src)) 15141acebad3SYuan Kang contig &= ~GIV_SRC_CONTIG; 15151acebad3SYuan Kang if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst)) 15161acebad3SYuan Kang contig &= ~GIV_DST_CONTIG; 15171acebad3SYuan Kang if (unlikely(req->src != req->dst)) { 15181acebad3SYuan Kang dst_nents = dst_nents ? : 1; 15191acebad3SYuan Kang link_tbl_len += 1; 15201acebad3SYuan Kang } 15211acebad3SYuan Kang if (!(contig & GIV_SRC_CONTIG)) { 15221acebad3SYuan Kang assoc_nents = assoc_nents ? : 1; 15231acebad3SYuan Kang src_nents = src_nents ? : 1; 15241acebad3SYuan Kang link_tbl_len += assoc_nents + 1 + src_nents; 15251acebad3SYuan Kang if (likely(req->src == req->dst)) 15261acebad3SYuan Kang contig &= ~GIV_DST_CONTIG; 15271acebad3SYuan Kang } 15281acebad3SYuan Kang link_tbl_len += dst_nents; 15291acebad3SYuan Kang 15301acebad3SYuan Kang link_tbl_bytes = link_tbl_len * sizeof(struct link_tbl_entry); 15311acebad3SYuan Kang 15321acebad3SYuan Kang /* allocate space for base edesc and hw desc commands, link tables */ 15331acebad3SYuan Kang edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + 15341acebad3SYuan Kang link_tbl_bytes, GFP_DMA | flags); 15351acebad3SYuan Kang if (!edesc) { 15361acebad3SYuan Kang dev_err(jrdev, "could not allocate extended descriptor\n"); 15371acebad3SYuan Kang return ERR_PTR(-ENOMEM); 15381acebad3SYuan Kang } 15391acebad3SYuan Kang 15401acebad3SYuan Kang edesc->assoc_nents = assoc_nents; 15411acebad3SYuan Kang edesc->src_nents = src_nents; 15421acebad3SYuan Kang edesc->dst_nents = dst_nents; 15431acebad3SYuan Kang edesc->iv_dma = iv_dma; 15441acebad3SYuan Kang edesc->link_tbl_bytes = link_tbl_bytes; 15451acebad3SYuan Kang edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) + 15461acebad3SYuan Kang desc_bytes; 15471acebad3SYuan Kang edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl, 15481acebad3SYuan Kang link_tbl_bytes, DMA_TO_DEVICE); 15491acebad3SYuan Kang *contig_ptr = contig; 15501acebad3SYuan Kang 15511acebad3SYuan Kang link_tbl_index = 0; 15521acebad3SYuan Kang if (!(contig & GIV_SRC_CONTIG)) { 15531acebad3SYuan Kang sg_to_link_tbl(req->assoc, assoc_nents, 15541acebad3SYuan Kang edesc->link_tbl + 15551acebad3SYuan Kang link_tbl_index, 0); 15561acebad3SYuan Kang link_tbl_index += assoc_nents; 15571acebad3SYuan Kang sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index, 15581acebad3SYuan Kang iv_dma, ivsize, 0); 15591acebad3SYuan Kang link_tbl_index += 1; 15601acebad3SYuan Kang sg_to_link_tbl_last(req->src, src_nents, 15611acebad3SYuan Kang edesc->link_tbl + 15621acebad3SYuan Kang link_tbl_index, 0); 15631acebad3SYuan Kang link_tbl_index += src_nents; 15641acebad3SYuan Kang } 15651acebad3SYuan Kang if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) { 15661acebad3SYuan Kang sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index, 15671acebad3SYuan Kang iv_dma, ivsize, 0); 15681acebad3SYuan Kang link_tbl_index += 1; 15691acebad3SYuan Kang sg_to_link_tbl_last(req->dst, dst_nents, 15701acebad3SYuan Kang edesc->link_tbl + link_tbl_index, 0); 15711acebad3SYuan Kang } 15721acebad3SYuan Kang 15731acebad3SYuan Kang return edesc; 15740e479300SYuan Kang } 15750e479300SYuan Kang 15760e479300SYuan Kang static int aead_givencrypt(struct aead_givcrypt_request *areq) 15770e479300SYuan Kang { 15780e479300SYuan Kang struct aead_request *req = &areq->areq; 15790e479300SYuan Kang struct aead_edesc *edesc; 15800e479300SYuan Kang struct crypto_aead *aead = crypto_aead_reqtfm(req); 15818e8ec596SKim Phillips struct caam_ctx *ctx = crypto_aead_ctx(aead); 15828e8ec596SKim Phillips struct device *jrdev = ctx->jrdev; 15831acebad3SYuan Kang u32 contig; 15848e8ec596SKim Phillips u32 *desc; 15851acebad3SYuan Kang int ret = 0; 15868e8ec596SKim Phillips 15871acebad3SYuan Kang req->cryptlen += ctx->authsize; 15888e8ec596SKim Phillips 15898e8ec596SKim Phillips /* allocate extended descriptor */ 15901acebad3SYuan Kang edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN * 15911acebad3SYuan Kang CAAM_CMD_SZ, &contig); 15921acebad3SYuan Kang 15938e8ec596SKim Phillips if (IS_ERR(edesc)) 15948e8ec596SKim Phillips return PTR_ERR(edesc); 15958e8ec596SKim Phillips 15961acebad3SYuan Kang #ifdef DEBUG 15971acebad3SYuan Kang print_hex_dump(KERN_ERR, "giv src@"xstr(__LINE__)": ", 15981acebad3SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), 15991acebad3SYuan Kang req->cryptlen, 1); 16001acebad3SYuan Kang #endif 16011acebad3SYuan Kang 16021acebad3SYuan Kang /* Create and submit job descriptor*/ 16031acebad3SYuan Kang init_aead_giv_job(ctx->sh_desc_givenc, 16041acebad3SYuan Kang ctx->sh_desc_givenc_dma, edesc, req, contig); 16051acebad3SYuan Kang #ifdef DEBUG 16061acebad3SYuan Kang print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ", 16071acebad3SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 16081acebad3SYuan Kang desc_bytes(edesc->hw_desc), 1); 16091acebad3SYuan Kang #endif 16101acebad3SYuan Kang 16118e8ec596SKim Phillips desc = edesc->hw_desc; 16121acebad3SYuan Kang ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); 16131acebad3SYuan Kang if (!ret) { 16141acebad3SYuan Kang ret = -EINPROGRESS; 16151acebad3SYuan Kang } else { 16161acebad3SYuan Kang aead_unmap(jrdev, edesc, req); 16171acebad3SYuan Kang kfree(edesc); 16181acebad3SYuan Kang } 16198e8ec596SKim Phillips 16201acebad3SYuan Kang return ret; 16218e8ec596SKim Phillips } 16228e8ec596SKim Phillips 1623acdca31dSYuan Kang /* 1624acdca31dSYuan Kang * allocate and map the ablkcipher extended descriptor for ablkcipher 1625acdca31dSYuan Kang */ 1626acdca31dSYuan Kang static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request 1627acdca31dSYuan Kang *req, int desc_bytes, 1628acdca31dSYuan Kang bool *iv_contig_out) 1629acdca31dSYuan Kang { 1630acdca31dSYuan Kang struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1631acdca31dSYuan Kang struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 1632acdca31dSYuan Kang struct device *jrdev = ctx->jrdev; 1633acdca31dSYuan Kang gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 1634acdca31dSYuan Kang CRYPTO_TFM_REQ_MAY_SLEEP)) ? 1635acdca31dSYuan Kang GFP_KERNEL : GFP_ATOMIC; 1636acdca31dSYuan Kang int src_nents, dst_nents = 0, link_tbl_bytes; 1637acdca31dSYuan Kang struct ablkcipher_edesc *edesc; 1638acdca31dSYuan Kang dma_addr_t iv_dma = 0; 1639acdca31dSYuan Kang bool iv_contig = false; 1640acdca31dSYuan Kang int sgc; 1641acdca31dSYuan Kang int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 1642acdca31dSYuan Kang int link_tbl_index; 1643acdca31dSYuan Kang 1644acdca31dSYuan Kang src_nents = sg_count(req->src, req->nbytes); 1645acdca31dSYuan Kang 1646acdca31dSYuan Kang if (unlikely(req->dst != req->src)) 1647acdca31dSYuan Kang dst_nents = sg_count(req->dst, req->nbytes); 1648acdca31dSYuan Kang 1649acdca31dSYuan Kang if (likely(req->src == req->dst)) { 1650acdca31dSYuan Kang sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, 1651acdca31dSYuan Kang DMA_BIDIRECTIONAL); 1652acdca31dSYuan Kang } else { 1653acdca31dSYuan Kang sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, 1654acdca31dSYuan Kang DMA_TO_DEVICE); 1655acdca31dSYuan Kang sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, 1656acdca31dSYuan Kang DMA_FROM_DEVICE); 1657acdca31dSYuan Kang } 1658acdca31dSYuan Kang 1659acdca31dSYuan Kang /* 1660acdca31dSYuan Kang * Check if iv can be contiguous with source and destination. 1661acdca31dSYuan Kang * If so, include it. If not, create scatterlist. 1662acdca31dSYuan Kang */ 1663acdca31dSYuan Kang iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE); 1664acdca31dSYuan Kang if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src)) 1665acdca31dSYuan Kang iv_contig = true; 1666acdca31dSYuan Kang else 1667acdca31dSYuan Kang src_nents = src_nents ? : 1; 1668acdca31dSYuan Kang link_tbl_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) * 1669acdca31dSYuan Kang sizeof(struct link_tbl_entry); 1670acdca31dSYuan Kang 1671acdca31dSYuan Kang /* allocate space for base edesc and hw desc commands, link tables */ 1672acdca31dSYuan Kang edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes + 1673acdca31dSYuan Kang link_tbl_bytes, GFP_DMA | flags); 1674acdca31dSYuan Kang if (!edesc) { 1675acdca31dSYuan Kang dev_err(jrdev, "could not allocate extended descriptor\n"); 1676acdca31dSYuan Kang return ERR_PTR(-ENOMEM); 1677acdca31dSYuan Kang } 1678acdca31dSYuan Kang 1679acdca31dSYuan Kang edesc->src_nents = src_nents; 1680acdca31dSYuan Kang edesc->dst_nents = dst_nents; 1681acdca31dSYuan Kang edesc->link_tbl_bytes = link_tbl_bytes; 1682acdca31dSYuan Kang edesc->link_tbl = (void *)edesc + sizeof(struct ablkcipher_edesc) + 1683acdca31dSYuan Kang desc_bytes; 1684acdca31dSYuan Kang 1685acdca31dSYuan Kang link_tbl_index = 0; 1686acdca31dSYuan Kang if (!iv_contig) { 1687acdca31dSYuan Kang sg_to_link_tbl_one(edesc->link_tbl, iv_dma, ivsize, 0); 1688acdca31dSYuan Kang sg_to_link_tbl_last(req->src, src_nents, 1689acdca31dSYuan Kang edesc->link_tbl + 1, 0); 1690acdca31dSYuan Kang link_tbl_index += 1 + src_nents; 1691acdca31dSYuan Kang } 1692acdca31dSYuan Kang 1693acdca31dSYuan Kang if (unlikely(dst_nents)) { 1694acdca31dSYuan Kang sg_to_link_tbl_last(req->dst, dst_nents, 1695acdca31dSYuan Kang edesc->link_tbl + link_tbl_index, 0); 1696acdca31dSYuan Kang } 1697acdca31dSYuan Kang 1698acdca31dSYuan Kang edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl, 1699acdca31dSYuan Kang link_tbl_bytes, DMA_TO_DEVICE); 1700acdca31dSYuan Kang edesc->iv_dma = iv_dma; 1701acdca31dSYuan Kang 1702acdca31dSYuan Kang #ifdef DEBUG 1703acdca31dSYuan Kang print_hex_dump(KERN_ERR, "ablkcipher link_tbl@"xstr(__LINE__)": ", 1704acdca31dSYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, edesc->link_tbl, 1705acdca31dSYuan Kang link_tbl_bytes, 1); 1706acdca31dSYuan Kang #endif 1707acdca31dSYuan Kang 1708acdca31dSYuan Kang *iv_contig_out = iv_contig; 1709acdca31dSYuan Kang return edesc; 1710acdca31dSYuan Kang } 1711acdca31dSYuan Kang 1712acdca31dSYuan Kang static int ablkcipher_encrypt(struct ablkcipher_request *req) 1713acdca31dSYuan Kang { 1714acdca31dSYuan Kang struct ablkcipher_edesc *edesc; 1715acdca31dSYuan Kang struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1716acdca31dSYuan Kang struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 1717acdca31dSYuan Kang struct device *jrdev = ctx->jrdev; 1718acdca31dSYuan Kang bool iv_contig; 1719acdca31dSYuan Kang u32 *desc; 1720acdca31dSYuan Kang int ret = 0; 1721acdca31dSYuan Kang 1722acdca31dSYuan Kang /* allocate extended descriptor */ 1723acdca31dSYuan Kang edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * 1724acdca31dSYuan Kang CAAM_CMD_SZ, &iv_contig); 1725acdca31dSYuan Kang if (IS_ERR(edesc)) 1726acdca31dSYuan Kang return PTR_ERR(edesc); 1727acdca31dSYuan Kang 1728acdca31dSYuan Kang /* Create and submit job descriptor*/ 1729acdca31dSYuan Kang init_ablkcipher_job(ctx->sh_desc_enc, 1730acdca31dSYuan Kang ctx->sh_desc_enc_dma, edesc, req, iv_contig); 1731acdca31dSYuan Kang #ifdef DEBUG 1732acdca31dSYuan Kang print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ", 1733acdca31dSYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1734acdca31dSYuan Kang desc_bytes(edesc->hw_desc), 1); 1735acdca31dSYuan Kang #endif 1736acdca31dSYuan Kang desc = edesc->hw_desc; 1737acdca31dSYuan Kang ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req); 1738acdca31dSYuan Kang 1739acdca31dSYuan Kang if (!ret) { 1740acdca31dSYuan Kang ret = -EINPROGRESS; 1741acdca31dSYuan Kang } else { 1742acdca31dSYuan Kang ablkcipher_unmap(jrdev, edesc, req); 1743acdca31dSYuan Kang kfree(edesc); 1744acdca31dSYuan Kang } 1745acdca31dSYuan Kang 1746acdca31dSYuan Kang return ret; 1747acdca31dSYuan Kang } 1748acdca31dSYuan Kang 1749acdca31dSYuan Kang static int ablkcipher_decrypt(struct ablkcipher_request *req) 1750acdca31dSYuan Kang { 1751acdca31dSYuan Kang struct ablkcipher_edesc *edesc; 1752acdca31dSYuan Kang struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1753acdca31dSYuan Kang struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 1754acdca31dSYuan Kang struct device *jrdev = ctx->jrdev; 1755acdca31dSYuan Kang bool iv_contig; 1756acdca31dSYuan Kang u32 *desc; 1757acdca31dSYuan Kang int ret = 0; 1758acdca31dSYuan Kang 1759acdca31dSYuan Kang /* allocate extended descriptor */ 1760acdca31dSYuan Kang edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * 1761acdca31dSYuan Kang CAAM_CMD_SZ, &iv_contig); 1762acdca31dSYuan Kang if (IS_ERR(edesc)) 1763acdca31dSYuan Kang return PTR_ERR(edesc); 1764acdca31dSYuan Kang 1765acdca31dSYuan Kang /* Create and submit job descriptor*/ 1766acdca31dSYuan Kang init_ablkcipher_job(ctx->sh_desc_dec, 1767acdca31dSYuan Kang ctx->sh_desc_dec_dma, edesc, req, iv_contig); 1768acdca31dSYuan Kang desc = edesc->hw_desc; 1769acdca31dSYuan Kang #ifdef DEBUG 1770acdca31dSYuan Kang print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ", 1771acdca31dSYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1772acdca31dSYuan Kang desc_bytes(edesc->hw_desc), 1); 1773acdca31dSYuan Kang #endif 1774acdca31dSYuan Kang 1775acdca31dSYuan Kang ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req); 1776acdca31dSYuan Kang if (!ret) { 1777acdca31dSYuan Kang ret = -EINPROGRESS; 1778acdca31dSYuan Kang } else { 1779acdca31dSYuan Kang ablkcipher_unmap(jrdev, edesc, req); 1780acdca31dSYuan Kang kfree(edesc); 1781acdca31dSYuan Kang } 1782acdca31dSYuan Kang 1783acdca31dSYuan Kang return ret; 1784acdca31dSYuan Kang } 1785acdca31dSYuan Kang 1786885e9e2fSYuan Kang #define template_aead template_u.aead 1787acdca31dSYuan Kang #define template_ablkcipher template_u.ablkcipher 17888e8ec596SKim Phillips struct caam_alg_template { 17898e8ec596SKim Phillips char name[CRYPTO_MAX_ALG_NAME]; 17908e8ec596SKim Phillips char driver_name[CRYPTO_MAX_ALG_NAME]; 17918e8ec596SKim Phillips unsigned int blocksize; 1792885e9e2fSYuan Kang u32 type; 1793885e9e2fSYuan Kang union { 1794885e9e2fSYuan Kang struct ablkcipher_alg ablkcipher; 17958e8ec596SKim Phillips struct aead_alg aead; 1796885e9e2fSYuan Kang struct blkcipher_alg blkcipher; 1797885e9e2fSYuan Kang struct cipher_alg cipher; 1798885e9e2fSYuan Kang struct compress_alg compress; 1799885e9e2fSYuan Kang struct rng_alg rng; 1800885e9e2fSYuan Kang } template_u; 18018e8ec596SKim Phillips u32 class1_alg_type; 18028e8ec596SKim Phillips u32 class2_alg_type; 18038e8ec596SKim Phillips u32 alg_op; 18048e8ec596SKim Phillips }; 18058e8ec596SKim Phillips 18068e8ec596SKim Phillips static struct caam_alg_template driver_algs[] = { 18078e8ec596SKim Phillips /* single-pass ipsec_esp descriptor */ 18088e8ec596SKim Phillips { 18098b4d43a4SKim Phillips .name = "authenc(hmac(md5),cbc(aes))", 18108b4d43a4SKim Phillips .driver_name = "authenc-hmac-md5-cbc-aes-caam", 18118b4d43a4SKim Phillips .blocksize = AES_BLOCK_SIZE, 18128b4d43a4SKim Phillips .type = CRYPTO_ALG_TYPE_AEAD, 18138b4d43a4SKim Phillips .template_aead = { 18148b4d43a4SKim Phillips .setkey = aead_setkey, 18158b4d43a4SKim Phillips .setauthsize = aead_setauthsize, 18168b4d43a4SKim Phillips .encrypt = aead_encrypt, 18178b4d43a4SKim Phillips .decrypt = aead_decrypt, 18188b4d43a4SKim Phillips .givencrypt = aead_givencrypt, 18198b4d43a4SKim Phillips .geniv = "<built-in>", 18208b4d43a4SKim Phillips .ivsize = AES_BLOCK_SIZE, 18218b4d43a4SKim Phillips .maxauthsize = MD5_DIGEST_SIZE, 18228b4d43a4SKim Phillips }, 18238b4d43a4SKim Phillips .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 18248b4d43a4SKim Phillips .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, 18258b4d43a4SKim Phillips .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, 18268b4d43a4SKim Phillips }, 18278b4d43a4SKim Phillips { 18288e8ec596SKim Phillips .name = "authenc(hmac(sha1),cbc(aes))", 18298e8ec596SKim Phillips .driver_name = "authenc-hmac-sha1-cbc-aes-caam", 18308e8ec596SKim Phillips .blocksize = AES_BLOCK_SIZE, 1831885e9e2fSYuan Kang .type = CRYPTO_ALG_TYPE_AEAD, 1832885e9e2fSYuan Kang .template_aead = { 18330e479300SYuan Kang .setkey = aead_setkey, 18340e479300SYuan Kang .setauthsize = aead_setauthsize, 18350e479300SYuan Kang .encrypt = aead_encrypt, 18360e479300SYuan Kang .decrypt = aead_decrypt, 18370e479300SYuan Kang .givencrypt = aead_givencrypt, 18388e8ec596SKim Phillips .geniv = "<built-in>", 18398e8ec596SKim Phillips .ivsize = AES_BLOCK_SIZE, 18408e8ec596SKim Phillips .maxauthsize = SHA1_DIGEST_SIZE, 18418e8ec596SKim Phillips }, 18428e8ec596SKim Phillips .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 18438e8ec596SKim Phillips .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, 18448e8ec596SKim Phillips .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, 18458e8ec596SKim Phillips }, 18468e8ec596SKim Phillips { 1847e863f9ccSHemant Agrawal .name = "authenc(hmac(sha224),cbc(aes))", 1848e863f9ccSHemant Agrawal .driver_name = "authenc-hmac-sha224-cbc-aes-caam", 1849e863f9ccSHemant Agrawal .blocksize = AES_BLOCK_SIZE, 1850e863f9ccSHemant Agrawal .template_aead = { 1851e863f9ccSHemant Agrawal .setkey = aead_setkey, 1852e863f9ccSHemant Agrawal .setauthsize = aead_setauthsize, 1853e863f9ccSHemant Agrawal .encrypt = aead_encrypt, 1854e863f9ccSHemant Agrawal .decrypt = aead_decrypt, 1855e863f9ccSHemant Agrawal .givencrypt = aead_givencrypt, 1856e863f9ccSHemant Agrawal .geniv = "<built-in>", 1857e863f9ccSHemant Agrawal .ivsize = AES_BLOCK_SIZE, 1858e863f9ccSHemant Agrawal .maxauthsize = SHA224_DIGEST_SIZE, 1859e863f9ccSHemant Agrawal }, 1860e863f9ccSHemant Agrawal .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1861e863f9ccSHemant Agrawal .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 1862e863f9ccSHemant Agrawal OP_ALG_AAI_HMAC_PRECOMP, 1863e863f9ccSHemant Agrawal .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, 1864e863f9ccSHemant Agrawal }, 1865e863f9ccSHemant Agrawal { 18668e8ec596SKim Phillips .name = "authenc(hmac(sha256),cbc(aes))", 18678e8ec596SKim Phillips .driver_name = "authenc-hmac-sha256-cbc-aes-caam", 18688e8ec596SKim Phillips .blocksize = AES_BLOCK_SIZE, 1869885e9e2fSYuan Kang .type = CRYPTO_ALG_TYPE_AEAD, 1870885e9e2fSYuan Kang .template_aead = { 18710e479300SYuan Kang .setkey = aead_setkey, 18720e479300SYuan Kang .setauthsize = aead_setauthsize, 18730e479300SYuan Kang .encrypt = aead_encrypt, 18740e479300SYuan Kang .decrypt = aead_decrypt, 18750e479300SYuan Kang .givencrypt = aead_givencrypt, 18768e8ec596SKim Phillips .geniv = "<built-in>", 18778e8ec596SKim Phillips .ivsize = AES_BLOCK_SIZE, 18788e8ec596SKim Phillips .maxauthsize = SHA256_DIGEST_SIZE, 18798e8ec596SKim Phillips }, 18808e8ec596SKim Phillips .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 18818e8ec596SKim Phillips .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 18828e8ec596SKim Phillips OP_ALG_AAI_HMAC_PRECOMP, 18838e8ec596SKim Phillips .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, 18848e8ec596SKim Phillips }, 18858e8ec596SKim Phillips { 1886e863f9ccSHemant Agrawal .name = "authenc(hmac(sha384),cbc(aes))", 1887e863f9ccSHemant Agrawal .driver_name = "authenc-hmac-sha384-cbc-aes-caam", 1888e863f9ccSHemant Agrawal .blocksize = AES_BLOCK_SIZE, 1889e863f9ccSHemant Agrawal .template_aead = { 1890e863f9ccSHemant Agrawal .setkey = aead_setkey, 1891e863f9ccSHemant Agrawal .setauthsize = aead_setauthsize, 1892e863f9ccSHemant Agrawal .encrypt = aead_encrypt, 1893e863f9ccSHemant Agrawal .decrypt = aead_decrypt, 1894e863f9ccSHemant Agrawal .givencrypt = aead_givencrypt, 1895e863f9ccSHemant Agrawal .geniv = "<built-in>", 1896e863f9ccSHemant Agrawal .ivsize = AES_BLOCK_SIZE, 1897e863f9ccSHemant Agrawal .maxauthsize = SHA384_DIGEST_SIZE, 1898e863f9ccSHemant Agrawal }, 1899e863f9ccSHemant Agrawal .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1900e863f9ccSHemant Agrawal .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 1901e863f9ccSHemant Agrawal OP_ALG_AAI_HMAC_PRECOMP, 1902e863f9ccSHemant Agrawal .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, 1903e863f9ccSHemant Agrawal }, 1904e863f9ccSHemant Agrawal 1905e863f9ccSHemant Agrawal { 19064427b1b4SKim Phillips .name = "authenc(hmac(sha512),cbc(aes))", 19074427b1b4SKim Phillips .driver_name = "authenc-hmac-sha512-cbc-aes-caam", 19084427b1b4SKim Phillips .blocksize = AES_BLOCK_SIZE, 1909885e9e2fSYuan Kang .type = CRYPTO_ALG_TYPE_AEAD, 1910885e9e2fSYuan Kang .template_aead = { 19110e479300SYuan Kang .setkey = aead_setkey, 19120e479300SYuan Kang .setauthsize = aead_setauthsize, 19130e479300SYuan Kang .encrypt = aead_encrypt, 19140e479300SYuan Kang .decrypt = aead_decrypt, 19150e479300SYuan Kang .givencrypt = aead_givencrypt, 19164427b1b4SKim Phillips .geniv = "<built-in>", 19174427b1b4SKim Phillips .ivsize = AES_BLOCK_SIZE, 19184427b1b4SKim Phillips .maxauthsize = SHA512_DIGEST_SIZE, 19194427b1b4SKim Phillips }, 19204427b1b4SKim Phillips .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 19214427b1b4SKim Phillips .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 19224427b1b4SKim Phillips OP_ALG_AAI_HMAC_PRECOMP, 19234427b1b4SKim Phillips .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, 19244427b1b4SKim Phillips }, 19254427b1b4SKim Phillips { 19268b4d43a4SKim Phillips .name = "authenc(hmac(md5),cbc(des3_ede))", 19278b4d43a4SKim Phillips .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam", 19288b4d43a4SKim Phillips .blocksize = DES3_EDE_BLOCK_SIZE, 19298b4d43a4SKim Phillips .type = CRYPTO_ALG_TYPE_AEAD, 19308b4d43a4SKim Phillips .template_aead = { 19318b4d43a4SKim Phillips .setkey = aead_setkey, 19328b4d43a4SKim Phillips .setauthsize = aead_setauthsize, 19338b4d43a4SKim Phillips .encrypt = aead_encrypt, 19348b4d43a4SKim Phillips .decrypt = aead_decrypt, 19358b4d43a4SKim Phillips .givencrypt = aead_givencrypt, 19368b4d43a4SKim Phillips .geniv = "<built-in>", 19378b4d43a4SKim Phillips .ivsize = DES3_EDE_BLOCK_SIZE, 19388b4d43a4SKim Phillips .maxauthsize = MD5_DIGEST_SIZE, 19398b4d43a4SKim Phillips }, 19408b4d43a4SKim Phillips .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 19418b4d43a4SKim Phillips .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, 19428b4d43a4SKim Phillips .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, 19438b4d43a4SKim Phillips }, 19448b4d43a4SKim Phillips { 19458e8ec596SKim Phillips .name = "authenc(hmac(sha1),cbc(des3_ede))", 19468e8ec596SKim Phillips .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam", 19478e8ec596SKim Phillips .blocksize = DES3_EDE_BLOCK_SIZE, 1948885e9e2fSYuan Kang .type = CRYPTO_ALG_TYPE_AEAD, 1949885e9e2fSYuan Kang .template_aead = { 19500e479300SYuan Kang .setkey = aead_setkey, 19510e479300SYuan Kang .setauthsize = aead_setauthsize, 19520e479300SYuan Kang .encrypt = aead_encrypt, 19530e479300SYuan Kang .decrypt = aead_decrypt, 19540e479300SYuan Kang .givencrypt = aead_givencrypt, 19558e8ec596SKim Phillips .geniv = "<built-in>", 19568e8ec596SKim Phillips .ivsize = DES3_EDE_BLOCK_SIZE, 19578e8ec596SKim Phillips .maxauthsize = SHA1_DIGEST_SIZE, 19588e8ec596SKim Phillips }, 19598e8ec596SKim Phillips .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 19608e8ec596SKim Phillips .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, 19618e8ec596SKim Phillips .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, 19628e8ec596SKim Phillips }, 19638e8ec596SKim Phillips { 1964e863f9ccSHemant Agrawal .name = "authenc(hmac(sha224),cbc(des3_ede))", 1965e863f9ccSHemant Agrawal .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam", 1966e863f9ccSHemant Agrawal .blocksize = DES3_EDE_BLOCK_SIZE, 1967e863f9ccSHemant Agrawal .template_aead = { 1968e863f9ccSHemant Agrawal .setkey = aead_setkey, 1969e863f9ccSHemant Agrawal .setauthsize = aead_setauthsize, 1970e863f9ccSHemant Agrawal .encrypt = aead_encrypt, 1971e863f9ccSHemant Agrawal .decrypt = aead_decrypt, 1972e863f9ccSHemant Agrawal .givencrypt = aead_givencrypt, 1973e863f9ccSHemant Agrawal .geniv = "<built-in>", 1974e863f9ccSHemant Agrawal .ivsize = DES3_EDE_BLOCK_SIZE, 1975e863f9ccSHemant Agrawal .maxauthsize = SHA224_DIGEST_SIZE, 1976e863f9ccSHemant Agrawal }, 1977e863f9ccSHemant Agrawal .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1978e863f9ccSHemant Agrawal .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 1979e863f9ccSHemant Agrawal OP_ALG_AAI_HMAC_PRECOMP, 1980e863f9ccSHemant Agrawal .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, 1981e863f9ccSHemant Agrawal }, 1982e863f9ccSHemant Agrawal { 19838e8ec596SKim Phillips .name = "authenc(hmac(sha256),cbc(des3_ede))", 19848e8ec596SKim Phillips .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam", 19858e8ec596SKim Phillips .blocksize = DES3_EDE_BLOCK_SIZE, 1986885e9e2fSYuan Kang .type = CRYPTO_ALG_TYPE_AEAD, 1987885e9e2fSYuan Kang .template_aead = { 19880e479300SYuan Kang .setkey = aead_setkey, 19890e479300SYuan Kang .setauthsize = aead_setauthsize, 19900e479300SYuan Kang .encrypt = aead_encrypt, 19910e479300SYuan Kang .decrypt = aead_decrypt, 19920e479300SYuan Kang .givencrypt = aead_givencrypt, 19938e8ec596SKim Phillips .geniv = "<built-in>", 19948e8ec596SKim Phillips .ivsize = DES3_EDE_BLOCK_SIZE, 19958e8ec596SKim Phillips .maxauthsize = SHA256_DIGEST_SIZE, 19968e8ec596SKim Phillips }, 19978e8ec596SKim Phillips .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 19988e8ec596SKim Phillips .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 19998e8ec596SKim Phillips OP_ALG_AAI_HMAC_PRECOMP, 20008e8ec596SKim Phillips .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, 20018e8ec596SKim Phillips }, 20028e8ec596SKim Phillips { 2003e863f9ccSHemant Agrawal .name = "authenc(hmac(sha384),cbc(des3_ede))", 2004e863f9ccSHemant Agrawal .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam", 2005e863f9ccSHemant Agrawal .blocksize = DES3_EDE_BLOCK_SIZE, 2006e863f9ccSHemant Agrawal .template_aead = { 2007e863f9ccSHemant Agrawal .setkey = aead_setkey, 2008e863f9ccSHemant Agrawal .setauthsize = aead_setauthsize, 2009e863f9ccSHemant Agrawal .encrypt = aead_encrypt, 2010e863f9ccSHemant Agrawal .decrypt = aead_decrypt, 2011e863f9ccSHemant Agrawal .givencrypt = aead_givencrypt, 2012e863f9ccSHemant Agrawal .geniv = "<built-in>", 2013e863f9ccSHemant Agrawal .ivsize = DES3_EDE_BLOCK_SIZE, 2014e863f9ccSHemant Agrawal .maxauthsize = SHA384_DIGEST_SIZE, 2015e863f9ccSHemant Agrawal }, 2016e863f9ccSHemant Agrawal .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2017e863f9ccSHemant Agrawal .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2018e863f9ccSHemant Agrawal OP_ALG_AAI_HMAC_PRECOMP, 2019e863f9ccSHemant Agrawal .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, 2020e863f9ccSHemant Agrawal }, 2021e863f9ccSHemant Agrawal { 20224427b1b4SKim Phillips .name = "authenc(hmac(sha512),cbc(des3_ede))", 20234427b1b4SKim Phillips .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam", 20244427b1b4SKim Phillips .blocksize = DES3_EDE_BLOCK_SIZE, 2025885e9e2fSYuan Kang .type = CRYPTO_ALG_TYPE_AEAD, 2026885e9e2fSYuan Kang .template_aead = { 20270e479300SYuan Kang .setkey = aead_setkey, 20280e479300SYuan Kang .setauthsize = aead_setauthsize, 20290e479300SYuan Kang .encrypt = aead_encrypt, 20300e479300SYuan Kang .decrypt = aead_decrypt, 20310e479300SYuan Kang .givencrypt = aead_givencrypt, 20324427b1b4SKim Phillips .geniv = "<built-in>", 20334427b1b4SKim Phillips .ivsize = DES3_EDE_BLOCK_SIZE, 20344427b1b4SKim Phillips .maxauthsize = SHA512_DIGEST_SIZE, 20354427b1b4SKim Phillips }, 20364427b1b4SKim Phillips .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 20374427b1b4SKim Phillips .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 20384427b1b4SKim Phillips OP_ALG_AAI_HMAC_PRECOMP, 20394427b1b4SKim Phillips .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, 20404427b1b4SKim Phillips }, 20414427b1b4SKim Phillips { 20428b4d43a4SKim Phillips .name = "authenc(hmac(md5),cbc(des))", 20438b4d43a4SKim Phillips .driver_name = "authenc-hmac-md5-cbc-des-caam", 20448b4d43a4SKim Phillips .blocksize = DES_BLOCK_SIZE, 20458b4d43a4SKim Phillips .type = CRYPTO_ALG_TYPE_AEAD, 20468b4d43a4SKim Phillips .template_aead = { 20478b4d43a4SKim Phillips .setkey = aead_setkey, 20488b4d43a4SKim Phillips .setauthsize = aead_setauthsize, 20498b4d43a4SKim Phillips .encrypt = aead_encrypt, 20508b4d43a4SKim Phillips .decrypt = aead_decrypt, 20518b4d43a4SKim Phillips .givencrypt = aead_givencrypt, 20528b4d43a4SKim Phillips .geniv = "<built-in>", 20538b4d43a4SKim Phillips .ivsize = DES_BLOCK_SIZE, 20548b4d43a4SKim Phillips .maxauthsize = MD5_DIGEST_SIZE, 20558b4d43a4SKim Phillips }, 20568b4d43a4SKim Phillips .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 20578b4d43a4SKim Phillips .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, 20588b4d43a4SKim Phillips .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, 20598b4d43a4SKim Phillips }, 20608b4d43a4SKim Phillips { 20618e8ec596SKim Phillips .name = "authenc(hmac(sha1),cbc(des))", 20628e8ec596SKim Phillips .driver_name = "authenc-hmac-sha1-cbc-des-caam", 20638e8ec596SKim Phillips .blocksize = DES_BLOCK_SIZE, 2064885e9e2fSYuan Kang .type = CRYPTO_ALG_TYPE_AEAD, 2065885e9e2fSYuan Kang .template_aead = { 20660e479300SYuan Kang .setkey = aead_setkey, 20670e479300SYuan Kang .setauthsize = aead_setauthsize, 20680e479300SYuan Kang .encrypt = aead_encrypt, 20690e479300SYuan Kang .decrypt = aead_decrypt, 20700e479300SYuan Kang .givencrypt = aead_givencrypt, 20718e8ec596SKim Phillips .geniv = "<built-in>", 20728e8ec596SKim Phillips .ivsize = DES_BLOCK_SIZE, 20738e8ec596SKim Phillips .maxauthsize = SHA1_DIGEST_SIZE, 20748e8ec596SKim Phillips }, 20758e8ec596SKim Phillips .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 20768e8ec596SKim Phillips .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, 20778e8ec596SKim Phillips .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, 20788e8ec596SKim Phillips }, 20798e8ec596SKim Phillips { 2080e863f9ccSHemant Agrawal .name = "authenc(hmac(sha224),cbc(des))", 2081e863f9ccSHemant Agrawal .driver_name = "authenc-hmac-sha224-cbc-des-caam", 2082e863f9ccSHemant Agrawal .blocksize = DES_BLOCK_SIZE, 2083e863f9ccSHemant Agrawal .template_aead = { 2084e863f9ccSHemant Agrawal .setkey = aead_setkey, 2085e863f9ccSHemant Agrawal .setauthsize = aead_setauthsize, 2086e863f9ccSHemant Agrawal .encrypt = aead_encrypt, 2087e863f9ccSHemant Agrawal .decrypt = aead_decrypt, 2088e863f9ccSHemant Agrawal .givencrypt = aead_givencrypt, 2089e863f9ccSHemant Agrawal .geniv = "<built-in>", 2090e863f9ccSHemant Agrawal .ivsize = DES_BLOCK_SIZE, 2091e863f9ccSHemant Agrawal .maxauthsize = SHA224_DIGEST_SIZE, 2092e863f9ccSHemant Agrawal }, 2093e863f9ccSHemant Agrawal .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2094e863f9ccSHemant Agrawal .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 2095e863f9ccSHemant Agrawal OP_ALG_AAI_HMAC_PRECOMP, 2096e863f9ccSHemant Agrawal .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, 2097e863f9ccSHemant Agrawal }, 2098e863f9ccSHemant Agrawal { 20998e8ec596SKim Phillips .name = "authenc(hmac(sha256),cbc(des))", 21008e8ec596SKim Phillips .driver_name = "authenc-hmac-sha256-cbc-des-caam", 21018e8ec596SKim Phillips .blocksize = DES_BLOCK_SIZE, 2102885e9e2fSYuan Kang .type = CRYPTO_ALG_TYPE_AEAD, 2103885e9e2fSYuan Kang .template_aead = { 21040e479300SYuan Kang .setkey = aead_setkey, 21050e479300SYuan Kang .setauthsize = aead_setauthsize, 21060e479300SYuan Kang .encrypt = aead_encrypt, 21070e479300SYuan Kang .decrypt = aead_decrypt, 21080e479300SYuan Kang .givencrypt = aead_givencrypt, 21098e8ec596SKim Phillips .geniv = "<built-in>", 21108e8ec596SKim Phillips .ivsize = DES_BLOCK_SIZE, 21118e8ec596SKim Phillips .maxauthsize = SHA256_DIGEST_SIZE, 21128e8ec596SKim Phillips }, 21138e8ec596SKim Phillips .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 21148e8ec596SKim Phillips .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 21158e8ec596SKim Phillips OP_ALG_AAI_HMAC_PRECOMP, 21168e8ec596SKim Phillips .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, 21178e8ec596SKim Phillips }, 21184427b1b4SKim Phillips { 2119e863f9ccSHemant Agrawal .name = "authenc(hmac(sha384),cbc(des))", 2120e863f9ccSHemant Agrawal .driver_name = "authenc-hmac-sha384-cbc-des-caam", 2121e863f9ccSHemant Agrawal .blocksize = DES_BLOCK_SIZE, 2122e863f9ccSHemant Agrawal .template_aead = { 2123e863f9ccSHemant Agrawal .setkey = aead_setkey, 2124e863f9ccSHemant Agrawal .setauthsize = aead_setauthsize, 2125e863f9ccSHemant Agrawal .encrypt = aead_encrypt, 2126e863f9ccSHemant Agrawal .decrypt = aead_decrypt, 2127e863f9ccSHemant Agrawal .givencrypt = aead_givencrypt, 2128e863f9ccSHemant Agrawal .geniv = "<built-in>", 2129e863f9ccSHemant Agrawal .ivsize = DES_BLOCK_SIZE, 2130e863f9ccSHemant Agrawal .maxauthsize = SHA384_DIGEST_SIZE, 2131e863f9ccSHemant Agrawal }, 2132e863f9ccSHemant Agrawal .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2133e863f9ccSHemant Agrawal .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2134e863f9ccSHemant Agrawal OP_ALG_AAI_HMAC_PRECOMP, 2135e863f9ccSHemant Agrawal .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, 2136e863f9ccSHemant Agrawal }, 2137e863f9ccSHemant Agrawal { 21384427b1b4SKim Phillips .name = "authenc(hmac(sha512),cbc(des))", 21394427b1b4SKim Phillips .driver_name = "authenc-hmac-sha512-cbc-des-caam", 21404427b1b4SKim Phillips .blocksize = DES_BLOCK_SIZE, 2141885e9e2fSYuan Kang .type = CRYPTO_ALG_TYPE_AEAD, 2142885e9e2fSYuan Kang .template_aead = { 21430e479300SYuan Kang .setkey = aead_setkey, 21440e479300SYuan Kang .setauthsize = aead_setauthsize, 21450e479300SYuan Kang .encrypt = aead_encrypt, 21460e479300SYuan Kang .decrypt = aead_decrypt, 21470e479300SYuan Kang .givencrypt = aead_givencrypt, 21484427b1b4SKim Phillips .geniv = "<built-in>", 21494427b1b4SKim Phillips .ivsize = DES_BLOCK_SIZE, 21504427b1b4SKim Phillips .maxauthsize = SHA512_DIGEST_SIZE, 21514427b1b4SKim Phillips }, 21524427b1b4SKim Phillips .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 21534427b1b4SKim Phillips .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 21544427b1b4SKim Phillips OP_ALG_AAI_HMAC_PRECOMP, 21554427b1b4SKim Phillips .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, 21564427b1b4SKim Phillips }, 2157acdca31dSYuan Kang /* ablkcipher descriptor */ 2158acdca31dSYuan Kang { 2159acdca31dSYuan Kang .name = "cbc(aes)", 2160acdca31dSYuan Kang .driver_name = "cbc-aes-caam", 2161acdca31dSYuan Kang .blocksize = AES_BLOCK_SIZE, 2162acdca31dSYuan Kang .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 2163acdca31dSYuan Kang .template_ablkcipher = { 2164acdca31dSYuan Kang .setkey = ablkcipher_setkey, 2165acdca31dSYuan Kang .encrypt = ablkcipher_encrypt, 2166acdca31dSYuan Kang .decrypt = ablkcipher_decrypt, 2167acdca31dSYuan Kang .geniv = "eseqiv", 2168acdca31dSYuan Kang .min_keysize = AES_MIN_KEY_SIZE, 2169acdca31dSYuan Kang .max_keysize = AES_MAX_KEY_SIZE, 2170acdca31dSYuan Kang .ivsize = AES_BLOCK_SIZE, 2171acdca31dSYuan Kang }, 2172acdca31dSYuan Kang .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 2173acdca31dSYuan Kang }, 2174acdca31dSYuan Kang { 2175acdca31dSYuan Kang .name = "cbc(des3_ede)", 2176acdca31dSYuan Kang .driver_name = "cbc-3des-caam", 2177acdca31dSYuan Kang .blocksize = DES3_EDE_BLOCK_SIZE, 2178acdca31dSYuan Kang .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 2179acdca31dSYuan Kang .template_ablkcipher = { 2180acdca31dSYuan Kang .setkey = ablkcipher_setkey, 2181acdca31dSYuan Kang .encrypt = ablkcipher_encrypt, 2182acdca31dSYuan Kang .decrypt = ablkcipher_decrypt, 2183acdca31dSYuan Kang .geniv = "eseqiv", 2184acdca31dSYuan Kang .min_keysize = DES3_EDE_KEY_SIZE, 2185acdca31dSYuan Kang .max_keysize = DES3_EDE_KEY_SIZE, 2186acdca31dSYuan Kang .ivsize = DES3_EDE_BLOCK_SIZE, 2187acdca31dSYuan Kang }, 2188acdca31dSYuan Kang .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2189acdca31dSYuan Kang }, 2190acdca31dSYuan Kang { 2191acdca31dSYuan Kang .name = "cbc(des)", 2192acdca31dSYuan Kang .driver_name = "cbc-des-caam", 2193acdca31dSYuan Kang .blocksize = DES_BLOCK_SIZE, 2194acdca31dSYuan Kang .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 2195acdca31dSYuan Kang .template_ablkcipher = { 2196acdca31dSYuan Kang .setkey = ablkcipher_setkey, 2197acdca31dSYuan Kang .encrypt = ablkcipher_encrypt, 2198acdca31dSYuan Kang .decrypt = ablkcipher_decrypt, 2199acdca31dSYuan Kang .geniv = "eseqiv", 2200acdca31dSYuan Kang .min_keysize = DES_KEY_SIZE, 2201acdca31dSYuan Kang .max_keysize = DES_KEY_SIZE, 2202acdca31dSYuan Kang .ivsize = DES_BLOCK_SIZE, 2203acdca31dSYuan Kang }, 2204acdca31dSYuan Kang .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2205acdca31dSYuan Kang } 22068e8ec596SKim Phillips }; 22078e8ec596SKim Phillips 22088e8ec596SKim Phillips struct caam_crypto_alg { 22098e8ec596SKim Phillips struct list_head entry; 22108e8ec596SKim Phillips struct device *ctrldev; 22118e8ec596SKim Phillips int class1_alg_type; 22128e8ec596SKim Phillips int class2_alg_type; 22138e8ec596SKim Phillips int alg_op; 22148e8ec596SKim Phillips struct crypto_alg crypto_alg; 22158e8ec596SKim Phillips }; 22168e8ec596SKim Phillips 22178e8ec596SKim Phillips static int caam_cra_init(struct crypto_tfm *tfm) 22188e8ec596SKim Phillips { 22198e8ec596SKim Phillips struct crypto_alg *alg = tfm->__crt_alg; 22208e8ec596SKim Phillips struct caam_crypto_alg *caam_alg = 22218e8ec596SKim Phillips container_of(alg, struct caam_crypto_alg, crypto_alg); 22228e8ec596SKim Phillips struct caam_ctx *ctx = crypto_tfm_ctx(tfm); 22238e8ec596SKim Phillips struct caam_drv_private *priv = dev_get_drvdata(caam_alg->ctrldev); 22248e8ec596SKim Phillips int tgt_jr = atomic_inc_return(&priv->tfm_count); 22258e8ec596SKim Phillips 22268e8ec596SKim Phillips /* 22278e8ec596SKim Phillips * distribute tfms across job rings to ensure in-order 22288e8ec596SKim Phillips * crypto request processing per tfm 22298e8ec596SKim Phillips */ 22308e8ec596SKim Phillips ctx->jrdev = priv->algapi_jr[(tgt_jr / 2) % priv->num_jrs_for_algapi]; 22318e8ec596SKim Phillips 22328e8ec596SKim Phillips /* copy descriptor header template value */ 22338e8ec596SKim Phillips ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type; 22348e8ec596SKim Phillips ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type; 22358e8ec596SKim Phillips ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op; 22368e8ec596SKim Phillips 22378e8ec596SKim Phillips return 0; 22388e8ec596SKim Phillips } 22398e8ec596SKim Phillips 22408e8ec596SKim Phillips static void caam_cra_exit(struct crypto_tfm *tfm) 22418e8ec596SKim Phillips { 22428e8ec596SKim Phillips struct caam_ctx *ctx = crypto_tfm_ctx(tfm); 22438e8ec596SKim Phillips 22441acebad3SYuan Kang if (ctx->sh_desc_enc_dma && 22451acebad3SYuan Kang !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma)) 22461acebad3SYuan Kang dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma, 22471acebad3SYuan Kang desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE); 22481acebad3SYuan Kang if (ctx->sh_desc_dec_dma && 22491acebad3SYuan Kang !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma)) 22501acebad3SYuan Kang dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma, 22511acebad3SYuan Kang desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE); 22521acebad3SYuan Kang if (ctx->sh_desc_givenc_dma && 22531acebad3SYuan Kang !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma)) 22541acebad3SYuan Kang dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma, 22551acebad3SYuan Kang desc_bytes(ctx->sh_desc_givenc), 22564427b1b4SKim Phillips DMA_TO_DEVICE); 22578e8ec596SKim Phillips } 22588e8ec596SKim Phillips 22598e8ec596SKim Phillips static void __exit caam_algapi_exit(void) 22608e8ec596SKim Phillips { 22618e8ec596SKim Phillips 22628e8ec596SKim Phillips struct device_node *dev_node; 22638e8ec596SKim Phillips struct platform_device *pdev; 22648e8ec596SKim Phillips struct device *ctrldev; 22658e8ec596SKim Phillips struct caam_drv_private *priv; 22668e8ec596SKim Phillips struct caam_crypto_alg *t_alg, *n; 22678e8ec596SKim Phillips int i, err; 22688e8ec596SKim Phillips 226954e198d4SKim Phillips dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 2270a0ea0f6dSShengzhou Liu if (!dev_node) { 2271a0ea0f6dSShengzhou Liu dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); 22728e8ec596SKim Phillips if (!dev_node) 22738e8ec596SKim Phillips return; 2274a0ea0f6dSShengzhou Liu } 22758e8ec596SKim Phillips 22768e8ec596SKim Phillips pdev = of_find_device_by_node(dev_node); 22778e8ec596SKim Phillips if (!pdev) 22788e8ec596SKim Phillips return; 22798e8ec596SKim Phillips 22808e8ec596SKim Phillips ctrldev = &pdev->dev; 22818e8ec596SKim Phillips of_node_put(dev_node); 22828e8ec596SKim Phillips priv = dev_get_drvdata(ctrldev); 22838e8ec596SKim Phillips 22848e8ec596SKim Phillips if (!priv->alg_list.next) 22858e8ec596SKim Phillips return; 22868e8ec596SKim Phillips 22878e8ec596SKim Phillips list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) { 22888e8ec596SKim Phillips crypto_unregister_alg(&t_alg->crypto_alg); 22898e8ec596SKim Phillips list_del(&t_alg->entry); 22908e8ec596SKim Phillips kfree(t_alg); 22918e8ec596SKim Phillips } 22928e8ec596SKim Phillips 22938e8ec596SKim Phillips for (i = 0; i < priv->total_jobrs; i++) { 22948e8ec596SKim Phillips err = caam_jr_deregister(priv->algapi_jr[i]); 22958e8ec596SKim Phillips if (err < 0) 22968e8ec596SKim Phillips break; 22978e8ec596SKim Phillips } 22988e8ec596SKim Phillips kfree(priv->algapi_jr); 22998e8ec596SKim Phillips } 23008e8ec596SKim Phillips 23018e8ec596SKim Phillips static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev, 23028e8ec596SKim Phillips struct caam_alg_template 23038e8ec596SKim Phillips *template) 23048e8ec596SKim Phillips { 23058e8ec596SKim Phillips struct caam_crypto_alg *t_alg; 23068e8ec596SKim Phillips struct crypto_alg *alg; 23078e8ec596SKim Phillips 23088e8ec596SKim Phillips t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL); 23098e8ec596SKim Phillips if (!t_alg) { 23108e8ec596SKim Phillips dev_err(ctrldev, "failed to allocate t_alg\n"); 23118e8ec596SKim Phillips return ERR_PTR(-ENOMEM); 23128e8ec596SKim Phillips } 23138e8ec596SKim Phillips 23148e8ec596SKim Phillips alg = &t_alg->crypto_alg; 23158e8ec596SKim Phillips 23168e8ec596SKim Phillips snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name); 23178e8ec596SKim Phillips snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 23188e8ec596SKim Phillips template->driver_name); 23198e8ec596SKim Phillips alg->cra_module = THIS_MODULE; 23208e8ec596SKim Phillips alg->cra_init = caam_cra_init; 23218e8ec596SKim Phillips alg->cra_exit = caam_cra_exit; 23228e8ec596SKim Phillips alg->cra_priority = CAAM_CRA_PRIORITY; 23238e8ec596SKim Phillips alg->cra_blocksize = template->blocksize; 23248e8ec596SKim Phillips alg->cra_alignmask = 0; 23258e8ec596SKim Phillips alg->cra_ctxsize = sizeof(struct caam_ctx); 2326d912bb76SNikos Mavrogiannopoulos alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY | 2327d912bb76SNikos Mavrogiannopoulos template->type; 2328885e9e2fSYuan Kang switch (template->type) { 2329acdca31dSYuan Kang case CRYPTO_ALG_TYPE_ABLKCIPHER: 2330acdca31dSYuan Kang alg->cra_type = &crypto_ablkcipher_type; 2331acdca31dSYuan Kang alg->cra_ablkcipher = template->template_ablkcipher; 2332acdca31dSYuan Kang break; 2333885e9e2fSYuan Kang case CRYPTO_ALG_TYPE_AEAD: 2334885e9e2fSYuan Kang alg->cra_type = &crypto_aead_type; 2335885e9e2fSYuan Kang alg->cra_aead = template->template_aead; 2336885e9e2fSYuan Kang break; 2337885e9e2fSYuan Kang } 23388e8ec596SKim Phillips 23398e8ec596SKim Phillips t_alg->class1_alg_type = template->class1_alg_type; 23408e8ec596SKim Phillips t_alg->class2_alg_type = template->class2_alg_type; 23418e8ec596SKim Phillips t_alg->alg_op = template->alg_op; 23428e8ec596SKim Phillips t_alg->ctrldev = ctrldev; 23438e8ec596SKim Phillips 23448e8ec596SKim Phillips return t_alg; 23458e8ec596SKim Phillips } 23468e8ec596SKim Phillips 23478e8ec596SKim Phillips static int __init caam_algapi_init(void) 23488e8ec596SKim Phillips { 23498e8ec596SKim Phillips struct device_node *dev_node; 23508e8ec596SKim Phillips struct platform_device *pdev; 23518e8ec596SKim Phillips struct device *ctrldev, **jrdev; 23528e8ec596SKim Phillips struct caam_drv_private *priv; 23538e8ec596SKim Phillips int i = 0, err = 0; 23548e8ec596SKim Phillips 235554e198d4SKim Phillips dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 2356a0ea0f6dSShengzhou Liu if (!dev_node) { 2357a0ea0f6dSShengzhou Liu dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); 23588e8ec596SKim Phillips if (!dev_node) 23598e8ec596SKim Phillips return -ENODEV; 2360a0ea0f6dSShengzhou Liu } 23618e8ec596SKim Phillips 23628e8ec596SKim Phillips pdev = of_find_device_by_node(dev_node); 23638e8ec596SKim Phillips if (!pdev) 23648e8ec596SKim Phillips return -ENODEV; 23658e8ec596SKim Phillips 23668e8ec596SKim Phillips ctrldev = &pdev->dev; 23678e8ec596SKim Phillips priv = dev_get_drvdata(ctrldev); 23688e8ec596SKim Phillips of_node_put(dev_node); 23698e8ec596SKim Phillips 23708e8ec596SKim Phillips INIT_LIST_HEAD(&priv->alg_list); 23718e8ec596SKim Phillips 23728e8ec596SKim Phillips jrdev = kmalloc(sizeof(*jrdev) * priv->total_jobrs, GFP_KERNEL); 23738e8ec596SKim Phillips if (!jrdev) 23748e8ec596SKim Phillips return -ENOMEM; 23758e8ec596SKim Phillips 23768e8ec596SKim Phillips for (i = 0; i < priv->total_jobrs; i++) { 23778e8ec596SKim Phillips err = caam_jr_register(ctrldev, &jrdev[i]); 23788e8ec596SKim Phillips if (err < 0) 23798e8ec596SKim Phillips break; 23808e8ec596SKim Phillips } 23818e8ec596SKim Phillips if (err < 0 && i == 0) { 23828e8ec596SKim Phillips dev_err(ctrldev, "algapi error in job ring registration: %d\n", 23838e8ec596SKim Phillips err); 2384b3b7f055SJulia Lawall kfree(jrdev); 23858e8ec596SKim Phillips return err; 23868e8ec596SKim Phillips } 23878e8ec596SKim Phillips 23888e8ec596SKim Phillips priv->num_jrs_for_algapi = i; 23898e8ec596SKim Phillips priv->algapi_jr = jrdev; 23908e8ec596SKim Phillips atomic_set(&priv->tfm_count, -1); 23918e8ec596SKim Phillips 23928e8ec596SKim Phillips /* register crypto algorithms the device supports */ 23938e8ec596SKim Phillips for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 23948e8ec596SKim Phillips /* TODO: check if h/w supports alg */ 23958e8ec596SKim Phillips struct caam_crypto_alg *t_alg; 23968e8ec596SKim Phillips 23978e8ec596SKim Phillips t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]); 23988e8ec596SKim Phillips if (IS_ERR(t_alg)) { 23998e8ec596SKim Phillips err = PTR_ERR(t_alg); 24008e8ec596SKim Phillips dev_warn(ctrldev, "%s alg allocation failed\n", 2401cdc712d8SDan Carpenter driver_algs[i].driver_name); 24028e8ec596SKim Phillips continue; 24038e8ec596SKim Phillips } 24048e8ec596SKim Phillips 24058e8ec596SKim Phillips err = crypto_register_alg(&t_alg->crypto_alg); 24068e8ec596SKim Phillips if (err) { 24078e8ec596SKim Phillips dev_warn(ctrldev, "%s alg registration failed\n", 24088e8ec596SKim Phillips t_alg->crypto_alg.cra_driver_name); 24098e8ec596SKim Phillips kfree(t_alg); 24100113529fSKim Phillips } else 24118e8ec596SKim Phillips list_add_tail(&t_alg->entry, &priv->alg_list); 24128e8ec596SKim Phillips } 24130113529fSKim Phillips if (!list_empty(&priv->alg_list)) 24140113529fSKim Phillips dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n", 24150113529fSKim Phillips (char *)of_get_property(dev_node, "compatible", NULL)); 24168e8ec596SKim Phillips 24178e8ec596SKim Phillips return err; 24188e8ec596SKim Phillips } 24198e8ec596SKim Phillips 24208e8ec596SKim Phillips module_init(caam_algapi_init); 24218e8ec596SKim Phillips module_exit(caam_algapi_exit); 24228e8ec596SKim Phillips 24238e8ec596SKim Phillips MODULE_LICENSE("GPL"); 24248e8ec596SKim Phillips MODULE_DESCRIPTION("FSL CAAM support for crypto API"); 24258e8ec596SKim Phillips MODULE_AUTHOR("Freescale Semiconductor - NMG/STC"); 2426