18e8ec596SKim Phillips /* 28e8ec596SKim Phillips * caam - Freescale FSL CAAM support for crypto API 38e8ec596SKim Phillips * 48e8ec596SKim Phillips * Copyright 2008-2011 Freescale Semiconductor, Inc. 58e8ec596SKim Phillips * 68e8ec596SKim Phillips * Based on talitos crypto API driver. 78e8ec596SKim Phillips * 88e8ec596SKim Phillips * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008): 98e8ec596SKim Phillips * 108e8ec596SKim Phillips * --------------- --------------- 118e8ec596SKim Phillips * | JobDesc #1 |-------------------->| ShareDesc | 128e8ec596SKim Phillips * | *(packet 1) | | (PDB) | 138e8ec596SKim Phillips * --------------- |------------->| (hashKey) | 148e8ec596SKim Phillips * . | | (cipherKey) | 158e8ec596SKim Phillips * . | |-------->| (operation) | 168e8ec596SKim Phillips * --------------- | | --------------- 178e8ec596SKim Phillips * | JobDesc #2 |------| | 188e8ec596SKim Phillips * | *(packet 2) | | 198e8ec596SKim Phillips * --------------- | 208e8ec596SKim Phillips * . | 218e8ec596SKim Phillips * . | 228e8ec596SKim Phillips * --------------- | 238e8ec596SKim Phillips * | JobDesc #3 |------------ 248e8ec596SKim Phillips * | *(packet 3) | 258e8ec596SKim Phillips * --------------- 268e8ec596SKim Phillips * 278e8ec596SKim Phillips * The SharedDesc never changes for a connection unless rekeyed, but 288e8ec596SKim Phillips * each packet will likely be in a different place. So all we need 298e8ec596SKim Phillips * to know to process the packet is where the input is, where the 308e8ec596SKim Phillips * output goes, and what context we want to process with. Context is 318e8ec596SKim Phillips * in the SharedDesc, packet references in the JobDesc. 328e8ec596SKim Phillips * 338e8ec596SKim Phillips * So, a job desc looks like: 348e8ec596SKim Phillips * 358e8ec596SKim Phillips * --------------------- 368e8ec596SKim Phillips * | Header | 378e8ec596SKim Phillips * | ShareDesc Pointer | 388e8ec596SKim Phillips * | SEQ_OUT_PTR | 398e8ec596SKim Phillips * | (output buffer) | 406ec47334SYuan Kang * | (output length) | 418e8ec596SKim Phillips * | SEQ_IN_PTR | 428e8ec596SKim Phillips * | (input buffer) | 436ec47334SYuan Kang * | (input length) | 448e8ec596SKim Phillips * --------------------- 458e8ec596SKim Phillips */ 468e8ec596SKim Phillips 478e8ec596SKim Phillips #include "compat.h" 488e8ec596SKim Phillips 498e8ec596SKim Phillips #include "regs.h" 508e8ec596SKim Phillips #include "intern.h" 518e8ec596SKim Phillips #include "desc_constr.h" 528e8ec596SKim Phillips #include "jr.h" 538e8ec596SKim Phillips #include "error.h" 54a299c837SYuan Kang #include "sg_sw_sec4.h" 554c1ec1f9SYuan Kang #include "key_gen.h" 568e8ec596SKim Phillips 578e8ec596SKim Phillips /* 588e8ec596SKim Phillips * crypto alg 598e8ec596SKim Phillips */ 608e8ec596SKim Phillips #define CAAM_CRA_PRIORITY 3000 618e8ec596SKim Phillips /* max key is sum of AES_MAX_KEY_SIZE, max split key size */ 628e8ec596SKim Phillips #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \ 638e8ec596SKim Phillips SHA512_DIGEST_SIZE * 2) 648e8ec596SKim Phillips /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ 658e8ec596SKim Phillips #define CAAM_MAX_IV_LENGTH 16 668e8ec596SKim Phillips 674427b1b4SKim Phillips /* length of descriptors text */ 681acebad3SYuan Kang #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ) 691acebad3SYuan Kang #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ) 701acebad3SYuan Kang #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ) 711acebad3SYuan Kang #define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ) 721acebad3SYuan Kang 73acdca31dSYuan Kang #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ) 74acdca31dSYuan Kang #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \ 75acdca31dSYuan Kang 20 * CAAM_CMD_SZ) 76acdca31dSYuan Kang #define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \ 77acdca31dSYuan Kang 15 * CAAM_CMD_SZ) 78acdca31dSYuan Kang 791acebad3SYuan Kang #define DESC_MAX_USED_BYTES (DESC_AEAD_GIVENC_LEN + \ 801acebad3SYuan Kang CAAM_MAX_KEY_SIZE) 811acebad3SYuan Kang #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) 824427b1b4SKim Phillips 838e8ec596SKim Phillips #ifdef DEBUG 848e8ec596SKim Phillips /* for print_hex_dumps with line references */ 858e8ec596SKim Phillips #define debug(format, arg...) printk(format, arg) 868e8ec596SKim Phillips #else 878e8ec596SKim Phillips #define debug(format, arg...) 888e8ec596SKim Phillips #endif 89cfc6f11bSRuchika Gupta static struct list_head alg_list; 908e8ec596SKim Phillips 911acebad3SYuan Kang /* Set DK bit in class 1 operation if shared */ 921acebad3SYuan Kang static inline void append_dec_op1(u32 *desc, u32 type) 931acebad3SYuan Kang { 941acebad3SYuan Kang u32 *jump_cmd, *uncond_jump_cmd; 951acebad3SYuan Kang 961acebad3SYuan Kang jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); 971acebad3SYuan Kang append_operation(desc, type | OP_ALG_AS_INITFINAL | 981acebad3SYuan Kang OP_ALG_DECRYPT); 991acebad3SYuan Kang uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL); 1001acebad3SYuan Kang set_jump_tgt_here(desc, jump_cmd); 1011acebad3SYuan Kang append_operation(desc, type | OP_ALG_AS_INITFINAL | 1021acebad3SYuan Kang OP_ALG_DECRYPT | OP_ALG_AAI_DK); 1031acebad3SYuan Kang set_jump_tgt_here(desc, uncond_jump_cmd); 1041acebad3SYuan Kang } 1051acebad3SYuan Kang 1061acebad3SYuan Kang /* 1071acebad3SYuan Kang * Wait for completion of class 1 key loading before allowing 1081acebad3SYuan Kang * error propagation 1091acebad3SYuan Kang */ 1101acebad3SYuan Kang static inline void append_dec_shr_done(u32 *desc) 1111acebad3SYuan Kang { 1121acebad3SYuan Kang u32 *jump_cmd; 1131acebad3SYuan Kang 1141acebad3SYuan Kang jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL); 1151acebad3SYuan Kang set_jump_tgt_here(desc, jump_cmd); 116a2ecb155SKim Phillips append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); 1171acebad3SYuan Kang } 1181acebad3SYuan Kang 1191acebad3SYuan Kang /* 1201acebad3SYuan Kang * For aead functions, read payload and write payload, 1211acebad3SYuan Kang * both of which are specified in req->src and req->dst 1221acebad3SYuan Kang */ 1231acebad3SYuan Kang static inline void aead_append_src_dst(u32 *desc, u32 msg_type) 1241acebad3SYuan Kang { 1251acebad3SYuan Kang append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | 1261acebad3SYuan Kang KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH); 1271acebad3SYuan Kang append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); 1281acebad3SYuan Kang } 1291acebad3SYuan Kang 1301acebad3SYuan Kang /* 1311acebad3SYuan Kang * For aead encrypt and decrypt, read iv for both classes 1321acebad3SYuan Kang */ 1331acebad3SYuan Kang static inline void aead_append_ld_iv(u32 *desc, int ivsize) 1341acebad3SYuan Kang { 1351acebad3SYuan Kang append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | 1361acebad3SYuan Kang LDST_CLASS_1_CCB | ivsize); 1371acebad3SYuan Kang append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize); 1381acebad3SYuan Kang } 1391acebad3SYuan Kang 1401acebad3SYuan Kang /* 141acdca31dSYuan Kang * For ablkcipher encrypt and decrypt, read from req->src and 142acdca31dSYuan Kang * write to req->dst 143acdca31dSYuan Kang */ 144acdca31dSYuan Kang static inline void ablkcipher_append_src_dst(u32 *desc) 145acdca31dSYuan Kang { 14670d793ccSKim Phillips append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); 14770d793ccSKim Phillips append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); 14870d793ccSKim Phillips append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | 14970d793ccSKim Phillips KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); 15070d793ccSKim Phillips append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); 151acdca31dSYuan Kang } 152acdca31dSYuan Kang 153acdca31dSYuan Kang /* 1541acebad3SYuan Kang * If all data, including src (with assoc and iv) or dst (with iv only) are 1551acebad3SYuan Kang * contiguous 1561acebad3SYuan Kang */ 1571acebad3SYuan Kang #define GIV_SRC_CONTIG 1 1581acebad3SYuan Kang #define GIV_DST_CONTIG (1 << 1) 1591acebad3SYuan Kang 1608e8ec596SKim Phillips /* 1618e8ec596SKim Phillips * per-session context 1628e8ec596SKim Phillips */ 1638e8ec596SKim Phillips struct caam_ctx { 1648e8ec596SKim Phillips struct device *jrdev; 1651acebad3SYuan Kang u32 sh_desc_enc[DESC_MAX_USED_LEN]; 1661acebad3SYuan Kang u32 sh_desc_dec[DESC_MAX_USED_LEN]; 1671acebad3SYuan Kang u32 sh_desc_givenc[DESC_MAX_USED_LEN]; 1681acebad3SYuan Kang dma_addr_t sh_desc_enc_dma; 1691acebad3SYuan Kang dma_addr_t sh_desc_dec_dma; 1701acebad3SYuan Kang dma_addr_t sh_desc_givenc_dma; 1718e8ec596SKim Phillips u32 class1_alg_type; 1728e8ec596SKim Phillips u32 class2_alg_type; 1738e8ec596SKim Phillips u32 alg_op; 1741acebad3SYuan Kang u8 key[CAAM_MAX_KEY_SIZE]; 175885e9e2fSYuan Kang dma_addr_t key_dma; 1768e8ec596SKim Phillips unsigned int enckeylen; 1778e8ec596SKim Phillips unsigned int split_key_len; 1788e8ec596SKim Phillips unsigned int split_key_pad_len; 1798e8ec596SKim Phillips unsigned int authsize; 1808e8ec596SKim Phillips }; 1818e8ec596SKim Phillips 1821acebad3SYuan Kang static void append_key_aead(u32 *desc, struct caam_ctx *ctx, 1831acebad3SYuan Kang int keys_fit_inline) 1841acebad3SYuan Kang { 1851acebad3SYuan Kang if (keys_fit_inline) { 1861acebad3SYuan Kang append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, 1871acebad3SYuan Kang ctx->split_key_len, CLASS_2 | 1881acebad3SYuan Kang KEY_DEST_MDHA_SPLIT | KEY_ENC); 1891acebad3SYuan Kang append_key_as_imm(desc, (void *)ctx->key + 1901acebad3SYuan Kang ctx->split_key_pad_len, ctx->enckeylen, 1911acebad3SYuan Kang ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); 1921acebad3SYuan Kang } else { 1931acebad3SYuan Kang append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 | 1941acebad3SYuan Kang KEY_DEST_MDHA_SPLIT | KEY_ENC); 1951acebad3SYuan Kang append_key(desc, ctx->key_dma + ctx->split_key_pad_len, 1961acebad3SYuan Kang ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); 1971acebad3SYuan Kang } 1981acebad3SYuan Kang } 1991acebad3SYuan Kang 2001acebad3SYuan Kang static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx, 2011acebad3SYuan Kang int keys_fit_inline) 2021acebad3SYuan Kang { 2031acebad3SYuan Kang u32 *key_jump_cmd; 2041acebad3SYuan Kang 20561bb86bbSKim Phillips init_sh_desc(desc, HDR_SHARE_SERIAL); 2061acebad3SYuan Kang 2071acebad3SYuan Kang /* Skip if already shared */ 2081acebad3SYuan Kang key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | 2091acebad3SYuan Kang JUMP_COND_SHRD); 2101acebad3SYuan Kang 2111acebad3SYuan Kang append_key_aead(desc, ctx, keys_fit_inline); 2121acebad3SYuan Kang 2131acebad3SYuan Kang set_jump_tgt_here(desc, key_jump_cmd); 2141acebad3SYuan Kang 2151acebad3SYuan Kang /* Propagate errors from shared to job descriptor */ 216a2ecb155SKim Phillips append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); 2171acebad3SYuan Kang } 2181acebad3SYuan Kang 2191acebad3SYuan Kang static int aead_set_sh_desc(struct crypto_aead *aead) 2201acebad3SYuan Kang { 2211acebad3SYuan Kang struct aead_tfm *tfm = &aead->base.crt_aead; 2221acebad3SYuan Kang struct caam_ctx *ctx = crypto_aead_ctx(aead); 2231acebad3SYuan Kang struct device *jrdev = ctx->jrdev; 2242af8f4a2SKim Phillips bool keys_fit_inline = false; 2251acebad3SYuan Kang u32 *key_jump_cmd, *jump_cmd; 2261acebad3SYuan Kang u32 geniv, moveiv; 2271acebad3SYuan Kang u32 *desc; 2281acebad3SYuan Kang 2291acebad3SYuan Kang if (!ctx->enckeylen || !ctx->authsize) 2301acebad3SYuan Kang return 0; 2311acebad3SYuan Kang 2321acebad3SYuan Kang /* 2331acebad3SYuan Kang * Job Descriptor and Shared Descriptors 2341acebad3SYuan Kang * must all fit into the 64-word Descriptor h/w Buffer 2351acebad3SYuan Kang */ 2361acebad3SYuan Kang if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN + 2371acebad3SYuan Kang ctx->split_key_pad_len + ctx->enckeylen <= 2381acebad3SYuan Kang CAAM_DESC_BYTES_MAX) 2392af8f4a2SKim Phillips keys_fit_inline = true; 2401acebad3SYuan Kang 2411acebad3SYuan Kang /* aead_encrypt shared descriptor */ 2421acebad3SYuan Kang desc = ctx->sh_desc_enc; 2431acebad3SYuan Kang 2441acebad3SYuan Kang init_sh_desc_key_aead(desc, ctx, keys_fit_inline); 2451acebad3SYuan Kang 2461acebad3SYuan Kang /* Class 2 operation */ 2471acebad3SYuan Kang append_operation(desc, ctx->class2_alg_type | 2481acebad3SYuan Kang OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); 2491acebad3SYuan Kang 2501acebad3SYuan Kang /* cryptlen = seqoutlen - authsize */ 2511acebad3SYuan Kang append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); 2521acebad3SYuan Kang 2531acebad3SYuan Kang /* assoclen + cryptlen = seqinlen - ivsize */ 2541acebad3SYuan Kang append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize); 2551acebad3SYuan Kang 2561acebad3SYuan Kang /* assoclen + cryptlen = (assoclen + cryptlen) - cryptlen */ 2571acebad3SYuan Kang append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ); 2581acebad3SYuan Kang 2591acebad3SYuan Kang /* read assoc before reading payload */ 2601acebad3SYuan Kang append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | 2611acebad3SYuan Kang KEY_VLF); 2621acebad3SYuan Kang aead_append_ld_iv(desc, tfm->ivsize); 2631acebad3SYuan Kang 2641acebad3SYuan Kang /* Class 1 operation */ 2651acebad3SYuan Kang append_operation(desc, ctx->class1_alg_type | 2661acebad3SYuan Kang OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); 2671acebad3SYuan Kang 2681acebad3SYuan Kang /* Read and write cryptlen bytes */ 2691acebad3SYuan Kang append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); 2701acebad3SYuan Kang append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); 2711acebad3SYuan Kang aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2); 2721acebad3SYuan Kang 2731acebad3SYuan Kang /* Write ICV */ 2741acebad3SYuan Kang append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | 2751acebad3SYuan Kang LDST_SRCDST_BYTE_CONTEXT); 2761acebad3SYuan Kang 2771acebad3SYuan Kang ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, 2781acebad3SYuan Kang desc_bytes(desc), 2791acebad3SYuan Kang DMA_TO_DEVICE); 2801acebad3SYuan Kang if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { 2811acebad3SYuan Kang dev_err(jrdev, "unable to map shared descriptor\n"); 2821acebad3SYuan Kang return -ENOMEM; 2831acebad3SYuan Kang } 2841acebad3SYuan Kang #ifdef DEBUG 285514df281SAlex Porosanu print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ", 2861acebad3SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, desc, 2871acebad3SYuan Kang desc_bytes(desc), 1); 2881acebad3SYuan Kang #endif 2891acebad3SYuan Kang 2901acebad3SYuan Kang /* 2911acebad3SYuan Kang * Job Descriptor and Shared Descriptors 2921acebad3SYuan Kang * must all fit into the 64-word Descriptor h/w Buffer 2931acebad3SYuan Kang */ 2941acebad3SYuan Kang if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN + 2951acebad3SYuan Kang ctx->split_key_pad_len + ctx->enckeylen <= 2961acebad3SYuan Kang CAAM_DESC_BYTES_MAX) 2972af8f4a2SKim Phillips keys_fit_inline = true; 2981acebad3SYuan Kang 2991acebad3SYuan Kang desc = ctx->sh_desc_dec; 3001acebad3SYuan Kang 3011acebad3SYuan Kang /* aead_decrypt shared descriptor */ 30261bb86bbSKim Phillips init_sh_desc(desc, HDR_SHARE_SERIAL); 3031acebad3SYuan Kang 3041acebad3SYuan Kang /* Skip if already shared */ 3051acebad3SYuan Kang key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | 3061acebad3SYuan Kang JUMP_COND_SHRD); 3071acebad3SYuan Kang 3081acebad3SYuan Kang append_key_aead(desc, ctx, keys_fit_inline); 3091acebad3SYuan Kang 3101acebad3SYuan Kang /* Only propagate error immediately if shared */ 3111acebad3SYuan Kang jump_cmd = append_jump(desc, JUMP_TEST_ALL); 3121acebad3SYuan Kang set_jump_tgt_here(desc, key_jump_cmd); 313a2ecb155SKim Phillips append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); 3141acebad3SYuan Kang set_jump_tgt_here(desc, jump_cmd); 3151acebad3SYuan Kang 3161acebad3SYuan Kang /* Class 2 operation */ 3171acebad3SYuan Kang append_operation(desc, ctx->class2_alg_type | 3181acebad3SYuan Kang OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); 3191acebad3SYuan Kang 3201acebad3SYuan Kang /* assoclen + cryptlen = seqinlen - ivsize */ 3211acebad3SYuan Kang append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, 3221acebad3SYuan Kang ctx->authsize + tfm->ivsize) 3231acebad3SYuan Kang /* assoclen = (assoclen + cryptlen) - cryptlen */ 3241acebad3SYuan Kang append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); 3251acebad3SYuan Kang append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ); 3261acebad3SYuan Kang 3271acebad3SYuan Kang /* read assoc before reading payload */ 3281acebad3SYuan Kang append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | 3291acebad3SYuan Kang KEY_VLF); 3301acebad3SYuan Kang 3311acebad3SYuan Kang aead_append_ld_iv(desc, tfm->ivsize); 3321acebad3SYuan Kang 3331acebad3SYuan Kang append_dec_op1(desc, ctx->class1_alg_type); 3341acebad3SYuan Kang 3351acebad3SYuan Kang /* Read and write cryptlen bytes */ 3361acebad3SYuan Kang append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); 3371acebad3SYuan Kang append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ); 3381acebad3SYuan Kang aead_append_src_dst(desc, FIFOLD_TYPE_MSG); 3391acebad3SYuan Kang 3401acebad3SYuan Kang /* Load ICV */ 3411acebad3SYuan Kang append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 | 3421acebad3SYuan Kang FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); 3431acebad3SYuan Kang append_dec_shr_done(desc); 3441acebad3SYuan Kang 3451acebad3SYuan Kang ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, 3461acebad3SYuan Kang desc_bytes(desc), 3471acebad3SYuan Kang DMA_TO_DEVICE); 3481acebad3SYuan Kang if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { 3491acebad3SYuan Kang dev_err(jrdev, "unable to map shared descriptor\n"); 3501acebad3SYuan Kang return -ENOMEM; 3511acebad3SYuan Kang } 3521acebad3SYuan Kang #ifdef DEBUG 353514df281SAlex Porosanu print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ", 3541acebad3SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, desc, 3551acebad3SYuan Kang desc_bytes(desc), 1); 3561acebad3SYuan Kang #endif 3571acebad3SYuan Kang 3581acebad3SYuan Kang /* 3591acebad3SYuan Kang * Job Descriptor and Shared Descriptors 3601acebad3SYuan Kang * must all fit into the 64-word Descriptor h/w Buffer 3611acebad3SYuan Kang */ 3621acebad3SYuan Kang if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN + 3631acebad3SYuan Kang ctx->split_key_pad_len + ctx->enckeylen <= 3641acebad3SYuan Kang CAAM_DESC_BYTES_MAX) 3652af8f4a2SKim Phillips keys_fit_inline = true; 3661acebad3SYuan Kang 3671acebad3SYuan Kang /* aead_givencrypt shared descriptor */ 3681acebad3SYuan Kang desc = ctx->sh_desc_givenc; 3691acebad3SYuan Kang 3701acebad3SYuan Kang init_sh_desc_key_aead(desc, ctx, keys_fit_inline); 3711acebad3SYuan Kang 3721acebad3SYuan Kang /* Generate IV */ 3731acebad3SYuan Kang geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO | 3741acebad3SYuan Kang NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | 3751acebad3SYuan Kang NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT); 3761acebad3SYuan Kang append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB | 3771acebad3SYuan Kang LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); 3781acebad3SYuan Kang append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); 3791acebad3SYuan Kang append_move(desc, MOVE_SRC_INFIFO | 3801acebad3SYuan Kang MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT)); 3811acebad3SYuan Kang append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); 3821acebad3SYuan Kang 3831acebad3SYuan Kang /* Copy IV to class 1 context */ 3841acebad3SYuan Kang append_move(desc, MOVE_SRC_CLASS1CTX | 3851acebad3SYuan Kang MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT)); 3861acebad3SYuan Kang 3871acebad3SYuan Kang /* Return to encryption */ 3881acebad3SYuan Kang append_operation(desc, ctx->class2_alg_type | 3891acebad3SYuan Kang OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); 3901acebad3SYuan Kang 3911acebad3SYuan Kang /* ivsize + cryptlen = seqoutlen - authsize */ 3921acebad3SYuan Kang append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); 3931acebad3SYuan Kang 3941acebad3SYuan Kang /* assoclen = seqinlen - (ivsize + cryptlen) */ 3951acebad3SYuan Kang append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ); 3961acebad3SYuan Kang 3971acebad3SYuan Kang /* read assoc before reading payload */ 3981acebad3SYuan Kang append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | 3991acebad3SYuan Kang KEY_VLF); 4001acebad3SYuan Kang 4011acebad3SYuan Kang /* Copy iv from class 1 ctx to class 2 fifo*/ 4021acebad3SYuan Kang moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 | 4031acebad3SYuan Kang NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT); 4041acebad3SYuan Kang append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB | 4051acebad3SYuan Kang LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); 4061acebad3SYuan Kang append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB | 4071acebad3SYuan Kang LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM); 4081acebad3SYuan Kang 4091acebad3SYuan Kang /* Class 1 operation */ 4101acebad3SYuan Kang append_operation(desc, ctx->class1_alg_type | 4111acebad3SYuan Kang OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); 4121acebad3SYuan Kang 4131acebad3SYuan Kang /* Will write ivsize + cryptlen */ 4141acebad3SYuan Kang append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); 4151acebad3SYuan Kang 4161acebad3SYuan Kang /* Not need to reload iv */ 4171acebad3SYuan Kang append_seq_fifo_load(desc, tfm->ivsize, 4181acebad3SYuan Kang FIFOLD_CLASS_SKIP); 4191acebad3SYuan Kang 4201acebad3SYuan Kang /* Will read cryptlen */ 4211acebad3SYuan Kang append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); 4221acebad3SYuan Kang aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2); 4231acebad3SYuan Kang 4241acebad3SYuan Kang /* Write ICV */ 4251acebad3SYuan Kang append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | 4261acebad3SYuan Kang LDST_SRCDST_BYTE_CONTEXT); 4271acebad3SYuan Kang 4281acebad3SYuan Kang ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc, 4291acebad3SYuan Kang desc_bytes(desc), 4301acebad3SYuan Kang DMA_TO_DEVICE); 4311acebad3SYuan Kang if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) { 4321acebad3SYuan Kang dev_err(jrdev, "unable to map shared descriptor\n"); 4331acebad3SYuan Kang return -ENOMEM; 4341acebad3SYuan Kang } 4351acebad3SYuan Kang #ifdef DEBUG 436514df281SAlex Porosanu print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ", 4371acebad3SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, desc, 4381acebad3SYuan Kang desc_bytes(desc), 1); 4391acebad3SYuan Kang #endif 4401acebad3SYuan Kang 4411acebad3SYuan Kang return 0; 4421acebad3SYuan Kang } 4431acebad3SYuan Kang 4440e479300SYuan Kang static int aead_setauthsize(struct crypto_aead *authenc, 4458e8ec596SKim Phillips unsigned int authsize) 4468e8ec596SKim Phillips { 4478e8ec596SKim Phillips struct caam_ctx *ctx = crypto_aead_ctx(authenc); 4488e8ec596SKim Phillips 4498e8ec596SKim Phillips ctx->authsize = authsize; 4501acebad3SYuan Kang aead_set_sh_desc(authenc); 4518e8ec596SKim Phillips 4528e8ec596SKim Phillips return 0; 4538e8ec596SKim Phillips } 4548e8ec596SKim Phillips 4554c1ec1f9SYuan Kang static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in, 4564c1ec1f9SYuan Kang u32 authkeylen) 4578e8ec596SKim Phillips { 4584c1ec1f9SYuan Kang return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len, 4594c1ec1f9SYuan Kang ctx->split_key_pad_len, key_in, authkeylen, 4604c1ec1f9SYuan Kang ctx->alg_op); 4618e8ec596SKim Phillips } 4628e8ec596SKim Phillips 4630e479300SYuan Kang static int aead_setkey(struct crypto_aead *aead, 4648e8ec596SKim Phillips const u8 *key, unsigned int keylen) 4658e8ec596SKim Phillips { 4668e8ec596SKim Phillips /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */ 4678e8ec596SKim Phillips static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 }; 4688e8ec596SKim Phillips struct caam_ctx *ctx = crypto_aead_ctx(aead); 4698e8ec596SKim Phillips struct device *jrdev = ctx->jrdev; 4708e8ec596SKim Phillips struct rtattr *rta = (void *)key; 4718e8ec596SKim Phillips struct crypto_authenc_key_param *param; 4728e8ec596SKim Phillips unsigned int authkeylen; 4738e8ec596SKim Phillips unsigned int enckeylen; 4748e8ec596SKim Phillips int ret = 0; 4758e8ec596SKim Phillips 4768e8ec596SKim Phillips param = RTA_DATA(rta); 4778e8ec596SKim Phillips enckeylen = be32_to_cpu(param->enckeylen); 4788e8ec596SKim Phillips 4798e8ec596SKim Phillips key += RTA_ALIGN(rta->rta_len); 4808e8ec596SKim Phillips keylen -= RTA_ALIGN(rta->rta_len); 4818e8ec596SKim Phillips 4828e8ec596SKim Phillips if (keylen < enckeylen) 4838e8ec596SKim Phillips goto badkey; 4848e8ec596SKim Phillips 4858e8ec596SKim Phillips authkeylen = keylen - enckeylen; 4868e8ec596SKim Phillips 4878e8ec596SKim Phillips if (keylen > CAAM_MAX_KEY_SIZE) 4888e8ec596SKim Phillips goto badkey; 4898e8ec596SKim Phillips 4908e8ec596SKim Phillips /* Pick class 2 key length from algorithm submask */ 4918e8ec596SKim Phillips ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >> 4928e8ec596SKim Phillips OP_ALG_ALGSEL_SHIFT] * 2; 4938e8ec596SKim Phillips ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16); 4948e8ec596SKim Phillips 4958e8ec596SKim Phillips #ifdef DEBUG 4968e8ec596SKim Phillips printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n", 4978e8ec596SKim Phillips keylen, enckeylen, authkeylen); 4988e8ec596SKim Phillips printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n", 4998e8ec596SKim Phillips ctx->split_key_len, ctx->split_key_pad_len); 500514df281SAlex Porosanu print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", 5018e8ec596SKim Phillips DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 5028e8ec596SKim Phillips #endif 5038e8ec596SKim Phillips 5044c1ec1f9SYuan Kang ret = gen_split_aead_key(ctx, key, authkeylen); 5058e8ec596SKim Phillips if (ret) { 5068e8ec596SKim Phillips goto badkey; 5078e8ec596SKim Phillips } 5088e8ec596SKim Phillips 5098e8ec596SKim Phillips /* postpend encryption key to auth split key */ 5108e8ec596SKim Phillips memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen); 5118e8ec596SKim Phillips 512885e9e2fSYuan Kang ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len + 5138e8ec596SKim Phillips enckeylen, DMA_TO_DEVICE); 514885e9e2fSYuan Kang if (dma_mapping_error(jrdev, ctx->key_dma)) { 5158e8ec596SKim Phillips dev_err(jrdev, "unable to map key i/o memory\n"); 5168e8ec596SKim Phillips return -ENOMEM; 5178e8ec596SKim Phillips } 5188e8ec596SKim Phillips #ifdef DEBUG 519514df281SAlex Porosanu print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", 5208e8ec596SKim Phillips DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, 5218e8ec596SKim Phillips ctx->split_key_pad_len + enckeylen, 1); 5228e8ec596SKim Phillips #endif 5238e8ec596SKim Phillips 5248e8ec596SKim Phillips ctx->enckeylen = enckeylen; 5258e8ec596SKim Phillips 5261acebad3SYuan Kang ret = aead_set_sh_desc(aead); 5278e8ec596SKim Phillips if (ret) { 528885e9e2fSYuan Kang dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len + 5298e8ec596SKim Phillips enckeylen, DMA_TO_DEVICE); 5308e8ec596SKim Phillips } 5318e8ec596SKim Phillips 5328e8ec596SKim Phillips return ret; 5338e8ec596SKim Phillips badkey: 5348e8ec596SKim Phillips crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); 5358e8ec596SKim Phillips return -EINVAL; 5368e8ec596SKim Phillips } 5378e8ec596SKim Phillips 538acdca31dSYuan Kang static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, 539acdca31dSYuan Kang const u8 *key, unsigned int keylen) 540acdca31dSYuan Kang { 541acdca31dSYuan Kang struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 542acdca31dSYuan Kang struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher; 543acdca31dSYuan Kang struct device *jrdev = ctx->jrdev; 544acdca31dSYuan Kang int ret = 0; 545acdca31dSYuan Kang u32 *key_jump_cmd, *jump_cmd; 546acdca31dSYuan Kang u32 *desc; 547acdca31dSYuan Kang 548acdca31dSYuan Kang #ifdef DEBUG 549514df281SAlex Porosanu print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", 550acdca31dSYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 551acdca31dSYuan Kang #endif 552acdca31dSYuan Kang 553acdca31dSYuan Kang memcpy(ctx->key, key, keylen); 554acdca31dSYuan Kang ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, 555acdca31dSYuan Kang DMA_TO_DEVICE); 556acdca31dSYuan Kang if (dma_mapping_error(jrdev, ctx->key_dma)) { 557acdca31dSYuan Kang dev_err(jrdev, "unable to map key i/o memory\n"); 558acdca31dSYuan Kang return -ENOMEM; 559acdca31dSYuan Kang } 560acdca31dSYuan Kang ctx->enckeylen = keylen; 561acdca31dSYuan Kang 562acdca31dSYuan Kang /* ablkcipher_encrypt shared descriptor */ 563acdca31dSYuan Kang desc = ctx->sh_desc_enc; 56461bb86bbSKim Phillips init_sh_desc(desc, HDR_SHARE_SERIAL); 565acdca31dSYuan Kang /* Skip if already shared */ 566acdca31dSYuan Kang key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | 567acdca31dSYuan Kang JUMP_COND_SHRD); 568acdca31dSYuan Kang 569acdca31dSYuan Kang /* Load class1 key only */ 570acdca31dSYuan Kang append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, 571acdca31dSYuan Kang ctx->enckeylen, CLASS_1 | 572acdca31dSYuan Kang KEY_DEST_CLASS_REG); 573acdca31dSYuan Kang 574acdca31dSYuan Kang set_jump_tgt_here(desc, key_jump_cmd); 575acdca31dSYuan Kang 576acdca31dSYuan Kang /* Propagate errors from shared to job descriptor */ 577a2ecb155SKim Phillips append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); 578acdca31dSYuan Kang 579acdca31dSYuan Kang /* Load iv */ 580acdca31dSYuan Kang append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | 581acdca31dSYuan Kang LDST_CLASS_1_CCB | tfm->ivsize); 582acdca31dSYuan Kang 583acdca31dSYuan Kang /* Load operation */ 584acdca31dSYuan Kang append_operation(desc, ctx->class1_alg_type | 585acdca31dSYuan Kang OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); 586acdca31dSYuan Kang 587acdca31dSYuan Kang /* Perform operation */ 588acdca31dSYuan Kang ablkcipher_append_src_dst(desc); 589acdca31dSYuan Kang 590acdca31dSYuan Kang ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, 591acdca31dSYuan Kang desc_bytes(desc), 592acdca31dSYuan Kang DMA_TO_DEVICE); 593acdca31dSYuan Kang if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { 594acdca31dSYuan Kang dev_err(jrdev, "unable to map shared descriptor\n"); 595acdca31dSYuan Kang return -ENOMEM; 596acdca31dSYuan Kang } 597acdca31dSYuan Kang #ifdef DEBUG 598514df281SAlex Porosanu print_hex_dump(KERN_ERR, 599514df281SAlex Porosanu "ablkcipher enc shdesc@"__stringify(__LINE__)": ", 600acdca31dSYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, desc, 601acdca31dSYuan Kang desc_bytes(desc), 1); 602acdca31dSYuan Kang #endif 603acdca31dSYuan Kang /* ablkcipher_decrypt shared descriptor */ 604acdca31dSYuan Kang desc = ctx->sh_desc_dec; 605acdca31dSYuan Kang 60661bb86bbSKim Phillips init_sh_desc(desc, HDR_SHARE_SERIAL); 607acdca31dSYuan Kang /* Skip if already shared */ 608acdca31dSYuan Kang key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | 609acdca31dSYuan Kang JUMP_COND_SHRD); 610acdca31dSYuan Kang 611acdca31dSYuan Kang /* Load class1 key only */ 612acdca31dSYuan Kang append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, 613acdca31dSYuan Kang ctx->enckeylen, CLASS_1 | 614acdca31dSYuan Kang KEY_DEST_CLASS_REG); 615acdca31dSYuan Kang 616acdca31dSYuan Kang /* For aead, only propagate error immediately if shared */ 617acdca31dSYuan Kang jump_cmd = append_jump(desc, JUMP_TEST_ALL); 618acdca31dSYuan Kang set_jump_tgt_here(desc, key_jump_cmd); 619a2ecb155SKim Phillips append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); 620acdca31dSYuan Kang set_jump_tgt_here(desc, jump_cmd); 621acdca31dSYuan Kang 622acdca31dSYuan Kang /* load IV */ 623acdca31dSYuan Kang append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | 624acdca31dSYuan Kang LDST_CLASS_1_CCB | tfm->ivsize); 625acdca31dSYuan Kang 626acdca31dSYuan Kang /* Choose operation */ 627acdca31dSYuan Kang append_dec_op1(desc, ctx->class1_alg_type); 628acdca31dSYuan Kang 629acdca31dSYuan Kang /* Perform operation */ 630acdca31dSYuan Kang ablkcipher_append_src_dst(desc); 631acdca31dSYuan Kang 632acdca31dSYuan Kang /* Wait for key to load before allowing propagating error */ 633acdca31dSYuan Kang append_dec_shr_done(desc); 634acdca31dSYuan Kang 635acdca31dSYuan Kang ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, 636acdca31dSYuan Kang desc_bytes(desc), 637acdca31dSYuan Kang DMA_TO_DEVICE); 638acdca31dSYuan Kang if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { 639acdca31dSYuan Kang dev_err(jrdev, "unable to map shared descriptor\n"); 640acdca31dSYuan Kang return -ENOMEM; 641acdca31dSYuan Kang } 642acdca31dSYuan Kang 643acdca31dSYuan Kang #ifdef DEBUG 644514df281SAlex Porosanu print_hex_dump(KERN_ERR, 645514df281SAlex Porosanu "ablkcipher dec shdesc@"__stringify(__LINE__)": ", 646acdca31dSYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, desc, 647acdca31dSYuan Kang desc_bytes(desc), 1); 648acdca31dSYuan Kang #endif 649acdca31dSYuan Kang 650acdca31dSYuan Kang return ret; 651acdca31dSYuan Kang } 652acdca31dSYuan Kang 6538e8ec596SKim Phillips /* 6541acebad3SYuan Kang * aead_edesc - s/w-extended aead descriptor 6551acebad3SYuan Kang * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist 656643b39b0SYuan Kang * @assoc_chained: if source is chained 6578e8ec596SKim Phillips * @src_nents: number of segments in input scatterlist 658643b39b0SYuan Kang * @src_chained: if source is chained 6598e8ec596SKim Phillips * @dst_nents: number of segments in output scatterlist 660643b39b0SYuan Kang * @dst_chained: if destination is chained 6611acebad3SYuan Kang * @iv_dma: dma address of iv for checking continuity and link table 6628e8ec596SKim Phillips * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) 663a299c837SYuan Kang * @sec4_sg_bytes: length of dma mapped sec4_sg space 664a299c837SYuan Kang * @sec4_sg_dma: bus physical mapped address of h/w link table 6658e8ec596SKim Phillips * @hw_desc: the h/w job descriptor followed by any referenced link tables 6668e8ec596SKim Phillips */ 6670e479300SYuan Kang struct aead_edesc { 6688e8ec596SKim Phillips int assoc_nents; 669643b39b0SYuan Kang bool assoc_chained; 6708e8ec596SKim Phillips int src_nents; 671643b39b0SYuan Kang bool src_chained; 6728e8ec596SKim Phillips int dst_nents; 673643b39b0SYuan Kang bool dst_chained; 6741acebad3SYuan Kang dma_addr_t iv_dma; 675a299c837SYuan Kang int sec4_sg_bytes; 676a299c837SYuan Kang dma_addr_t sec4_sg_dma; 677a299c837SYuan Kang struct sec4_sg_entry *sec4_sg; 6788e8ec596SKim Phillips u32 hw_desc[0]; 6798e8ec596SKim Phillips }; 6808e8ec596SKim Phillips 681acdca31dSYuan Kang /* 682acdca31dSYuan Kang * ablkcipher_edesc - s/w-extended ablkcipher descriptor 683acdca31dSYuan Kang * @src_nents: number of segments in input scatterlist 684643b39b0SYuan Kang * @src_chained: if source is chained 685acdca31dSYuan Kang * @dst_nents: number of segments in output scatterlist 686643b39b0SYuan Kang * @dst_chained: if destination is chained 687acdca31dSYuan Kang * @iv_dma: dma address of iv for checking continuity and link table 688acdca31dSYuan Kang * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) 689a299c837SYuan Kang * @sec4_sg_bytes: length of dma mapped sec4_sg space 690a299c837SYuan Kang * @sec4_sg_dma: bus physical mapped address of h/w link table 691acdca31dSYuan Kang * @hw_desc: the h/w job descriptor followed by any referenced link tables 692acdca31dSYuan Kang */ 693acdca31dSYuan Kang struct ablkcipher_edesc { 694acdca31dSYuan Kang int src_nents; 695643b39b0SYuan Kang bool src_chained; 696acdca31dSYuan Kang int dst_nents; 697643b39b0SYuan Kang bool dst_chained; 698acdca31dSYuan Kang dma_addr_t iv_dma; 699a299c837SYuan Kang int sec4_sg_bytes; 700a299c837SYuan Kang dma_addr_t sec4_sg_dma; 701a299c837SYuan Kang struct sec4_sg_entry *sec4_sg; 702acdca31dSYuan Kang u32 hw_desc[0]; 703acdca31dSYuan Kang }; 704acdca31dSYuan Kang 7051acebad3SYuan Kang static void caam_unmap(struct device *dev, struct scatterlist *src, 706643b39b0SYuan Kang struct scatterlist *dst, int src_nents, 707643b39b0SYuan Kang bool src_chained, int dst_nents, bool dst_chained, 708a299c837SYuan Kang dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma, 709a299c837SYuan Kang int sec4_sg_bytes) 7101acebad3SYuan Kang { 711643b39b0SYuan Kang if (dst != src) { 712643b39b0SYuan Kang dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE, 713643b39b0SYuan Kang src_chained); 714643b39b0SYuan Kang dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE, 715643b39b0SYuan Kang dst_chained); 7161acebad3SYuan Kang } else { 717643b39b0SYuan Kang dma_unmap_sg_chained(dev, src, src_nents ? : 1, 718643b39b0SYuan Kang DMA_BIDIRECTIONAL, src_chained); 7191acebad3SYuan Kang } 7201acebad3SYuan Kang 7211acebad3SYuan Kang if (iv_dma) 7221acebad3SYuan Kang dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); 723a299c837SYuan Kang if (sec4_sg_bytes) 724a299c837SYuan Kang dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes, 7251acebad3SYuan Kang DMA_TO_DEVICE); 7261acebad3SYuan Kang } 7271acebad3SYuan Kang 7280e479300SYuan Kang static void aead_unmap(struct device *dev, 7290e479300SYuan Kang struct aead_edesc *edesc, 7300e479300SYuan Kang struct aead_request *req) 7318e8ec596SKim Phillips { 7321acebad3SYuan Kang struct crypto_aead *aead = crypto_aead_reqtfm(req); 7331acebad3SYuan Kang int ivsize = crypto_aead_ivsize(aead); 7341acebad3SYuan Kang 735643b39b0SYuan Kang dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents, 736643b39b0SYuan Kang DMA_TO_DEVICE, edesc->assoc_chained); 7378e8ec596SKim Phillips 7381acebad3SYuan Kang caam_unmap(dev, req->src, req->dst, 739643b39b0SYuan Kang edesc->src_nents, edesc->src_chained, edesc->dst_nents, 740643b39b0SYuan Kang edesc->dst_chained, edesc->iv_dma, ivsize, 741643b39b0SYuan Kang edesc->sec4_sg_dma, edesc->sec4_sg_bytes); 7428e8ec596SKim Phillips } 7438e8ec596SKim Phillips 744acdca31dSYuan Kang static void ablkcipher_unmap(struct device *dev, 745acdca31dSYuan Kang struct ablkcipher_edesc *edesc, 746acdca31dSYuan Kang struct ablkcipher_request *req) 747acdca31dSYuan Kang { 748acdca31dSYuan Kang struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 749acdca31dSYuan Kang int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 750acdca31dSYuan Kang 751acdca31dSYuan Kang caam_unmap(dev, req->src, req->dst, 752643b39b0SYuan Kang edesc->src_nents, edesc->src_chained, edesc->dst_nents, 753643b39b0SYuan Kang edesc->dst_chained, edesc->iv_dma, ivsize, 754643b39b0SYuan Kang edesc->sec4_sg_dma, edesc->sec4_sg_bytes); 755acdca31dSYuan Kang } 756acdca31dSYuan Kang 7570e479300SYuan Kang static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, 7588e8ec596SKim Phillips void *context) 7598e8ec596SKim Phillips { 7600e479300SYuan Kang struct aead_request *req = context; 7610e479300SYuan Kang struct aead_edesc *edesc; 7628e8ec596SKim Phillips #ifdef DEBUG 7630e479300SYuan Kang struct crypto_aead *aead = crypto_aead_reqtfm(req); 7648e8ec596SKim Phillips struct caam_ctx *ctx = crypto_aead_ctx(aead); 7651acebad3SYuan Kang int ivsize = crypto_aead_ivsize(aead); 7668e8ec596SKim Phillips 7678e8ec596SKim Phillips dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 7688e8ec596SKim Phillips #endif 7691acebad3SYuan Kang 7700e479300SYuan Kang edesc = (struct aead_edesc *)((char *)desc - 7710e479300SYuan Kang offsetof(struct aead_edesc, hw_desc)); 7728e8ec596SKim Phillips 7738e8ec596SKim Phillips if (err) { 774de2954d6SKim Phillips char tmp[CAAM_ERROR_STR_MAX]; 7758e8ec596SKim Phillips 7768e8ec596SKim Phillips dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); 7778e8ec596SKim Phillips } 7788e8ec596SKim Phillips 7790e479300SYuan Kang aead_unmap(jrdev, edesc, req); 7808e8ec596SKim Phillips 7818e8ec596SKim Phillips #ifdef DEBUG 782514df281SAlex Porosanu print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ", 7830e479300SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), 7840e479300SYuan Kang req->assoclen , 1); 785514df281SAlex Porosanu print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", 7860e479300SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize, 7878e8ec596SKim Phillips edesc->src_nents ? 100 : ivsize, 1); 788514df281SAlex Porosanu print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", 7890e479300SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), 7900e479300SYuan Kang edesc->src_nents ? 100 : req->cryptlen + 7918e8ec596SKim Phillips ctx->authsize + 4, 1); 7928e8ec596SKim Phillips #endif 7938e8ec596SKim Phillips 7948e8ec596SKim Phillips kfree(edesc); 7958e8ec596SKim Phillips 7960e479300SYuan Kang aead_request_complete(req, err); 7978e8ec596SKim Phillips } 7988e8ec596SKim Phillips 7990e479300SYuan Kang static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, 8008e8ec596SKim Phillips void *context) 8018e8ec596SKim Phillips { 8020e479300SYuan Kang struct aead_request *req = context; 8030e479300SYuan Kang struct aead_edesc *edesc; 8048e8ec596SKim Phillips #ifdef DEBUG 8050e479300SYuan Kang struct crypto_aead *aead = crypto_aead_reqtfm(req); 8068e8ec596SKim Phillips struct caam_ctx *ctx = crypto_aead_ctx(aead); 8071acebad3SYuan Kang int ivsize = crypto_aead_ivsize(aead); 8088e8ec596SKim Phillips 8098e8ec596SKim Phillips dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 8108e8ec596SKim Phillips #endif 8111acebad3SYuan Kang 8120e479300SYuan Kang edesc = (struct aead_edesc *)((char *)desc - 8130e479300SYuan Kang offsetof(struct aead_edesc, hw_desc)); 8148e8ec596SKim Phillips 8151acebad3SYuan Kang #ifdef DEBUG 816514df281SAlex Porosanu print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", 8171acebad3SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, req->iv, 8181acebad3SYuan Kang ivsize, 1); 819514df281SAlex Porosanu print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", 8201acebad3SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst), 821bbf9c893SHoria Geanta req->cryptlen - ctx->authsize, 1); 8221acebad3SYuan Kang #endif 8231acebad3SYuan Kang 8248e8ec596SKim Phillips if (err) { 825de2954d6SKim Phillips char tmp[CAAM_ERROR_STR_MAX]; 8268e8ec596SKim Phillips 8278e8ec596SKim Phillips dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); 8288e8ec596SKim Phillips } 8298e8ec596SKim Phillips 8300e479300SYuan Kang aead_unmap(jrdev, edesc, req); 8318e8ec596SKim Phillips 8328e8ec596SKim Phillips /* 8338e8ec596SKim Phillips * verify hw auth check passed else return -EBADMSG 8348e8ec596SKim Phillips */ 8358e8ec596SKim Phillips if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK) 8368e8ec596SKim Phillips err = -EBADMSG; 8378e8ec596SKim Phillips 8388e8ec596SKim Phillips #ifdef DEBUG 839514df281SAlex Porosanu print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ", 8408e8ec596SKim Phillips DUMP_PREFIX_ADDRESS, 16, 4, 8410e479300SYuan Kang ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)), 8420e479300SYuan Kang sizeof(struct iphdr) + req->assoclen + 8430e479300SYuan Kang ((req->cryptlen > 1500) ? 1500 : req->cryptlen) + 8448e8ec596SKim Phillips ctx->authsize + 36, 1); 845a299c837SYuan Kang if (!err && edesc->sec4_sg_bytes) { 8460e479300SYuan Kang struct scatterlist *sg = sg_last(req->src, edesc->src_nents); 847514df281SAlex Porosanu print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ", 8488e8ec596SKim Phillips DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg), 8498e8ec596SKim Phillips sg->length + ctx->authsize + 16, 1); 8508e8ec596SKim Phillips } 8518e8ec596SKim Phillips #endif 8521acebad3SYuan Kang 8538e8ec596SKim Phillips kfree(edesc); 8548e8ec596SKim Phillips 8550e479300SYuan Kang aead_request_complete(req, err); 8568e8ec596SKim Phillips } 8578e8ec596SKim Phillips 858acdca31dSYuan Kang static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, 859acdca31dSYuan Kang void *context) 860acdca31dSYuan Kang { 861acdca31dSYuan Kang struct ablkcipher_request *req = context; 862acdca31dSYuan Kang struct ablkcipher_edesc *edesc; 863acdca31dSYuan Kang #ifdef DEBUG 864acdca31dSYuan Kang struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 865acdca31dSYuan Kang int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 866acdca31dSYuan Kang 867acdca31dSYuan Kang dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 868acdca31dSYuan Kang #endif 869acdca31dSYuan Kang 870acdca31dSYuan Kang edesc = (struct ablkcipher_edesc *)((char *)desc - 871acdca31dSYuan Kang offsetof(struct ablkcipher_edesc, hw_desc)); 872acdca31dSYuan Kang 873acdca31dSYuan Kang if (err) { 874acdca31dSYuan Kang char tmp[CAAM_ERROR_STR_MAX]; 875acdca31dSYuan Kang 876acdca31dSYuan Kang dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); 877acdca31dSYuan Kang } 878acdca31dSYuan Kang 879acdca31dSYuan Kang #ifdef DEBUG 880514df281SAlex Porosanu print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", 881acdca31dSYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, req->info, 882acdca31dSYuan Kang edesc->src_nents > 1 ? 100 : ivsize, 1); 883514df281SAlex Porosanu print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", 884acdca31dSYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), 885acdca31dSYuan Kang edesc->dst_nents > 1 ? 100 : req->nbytes, 1); 886acdca31dSYuan Kang #endif 887acdca31dSYuan Kang 888acdca31dSYuan Kang ablkcipher_unmap(jrdev, edesc, req); 889acdca31dSYuan Kang kfree(edesc); 890acdca31dSYuan Kang 891acdca31dSYuan Kang ablkcipher_request_complete(req, err); 892acdca31dSYuan Kang } 893acdca31dSYuan Kang 894acdca31dSYuan Kang static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, 895acdca31dSYuan Kang void *context) 896acdca31dSYuan Kang { 897acdca31dSYuan Kang struct ablkcipher_request *req = context; 898acdca31dSYuan Kang struct ablkcipher_edesc *edesc; 899acdca31dSYuan Kang #ifdef DEBUG 900acdca31dSYuan Kang struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 901acdca31dSYuan Kang int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 902acdca31dSYuan Kang 903acdca31dSYuan Kang dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 904acdca31dSYuan Kang #endif 905acdca31dSYuan Kang 906acdca31dSYuan Kang edesc = (struct ablkcipher_edesc *)((char *)desc - 907acdca31dSYuan Kang offsetof(struct ablkcipher_edesc, hw_desc)); 908acdca31dSYuan Kang if (err) { 909acdca31dSYuan Kang char tmp[CAAM_ERROR_STR_MAX]; 910acdca31dSYuan Kang 911acdca31dSYuan Kang dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); 912acdca31dSYuan Kang } 913acdca31dSYuan Kang 914acdca31dSYuan Kang #ifdef DEBUG 915514df281SAlex Porosanu print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", 916acdca31dSYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, req->info, 917acdca31dSYuan Kang ivsize, 1); 918514df281SAlex Porosanu print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", 919acdca31dSYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), 920acdca31dSYuan Kang edesc->dst_nents > 1 ? 100 : req->nbytes, 1); 921acdca31dSYuan Kang #endif 922acdca31dSYuan Kang 923acdca31dSYuan Kang ablkcipher_unmap(jrdev, edesc, req); 924acdca31dSYuan Kang kfree(edesc); 925acdca31dSYuan Kang 926acdca31dSYuan Kang ablkcipher_request_complete(req, err); 927acdca31dSYuan Kang } 928acdca31dSYuan Kang 9298e8ec596SKim Phillips /* 9301acebad3SYuan Kang * Fill in aead job descriptor 9318e8ec596SKim Phillips */ 9321acebad3SYuan Kang static void init_aead_job(u32 *sh_desc, dma_addr_t ptr, 9331acebad3SYuan Kang struct aead_edesc *edesc, 9341acebad3SYuan Kang struct aead_request *req, 9351acebad3SYuan Kang bool all_contig, bool encrypt) 9368e8ec596SKim Phillips { 9370e479300SYuan Kang struct crypto_aead *aead = crypto_aead_reqtfm(req); 9388e8ec596SKim Phillips struct caam_ctx *ctx = crypto_aead_ctx(aead); 9398e8ec596SKim Phillips int ivsize = crypto_aead_ivsize(aead); 9408e8ec596SKim Phillips int authsize = ctx->authsize; 9411acebad3SYuan Kang u32 *desc = edesc->hw_desc; 9421acebad3SYuan Kang u32 out_options = 0, in_options; 9431acebad3SYuan Kang dma_addr_t dst_dma, src_dma; 944a299c837SYuan Kang int len, sec4_sg_index = 0; 9458e8ec596SKim Phillips 9461acebad3SYuan Kang #ifdef DEBUG 9478e8ec596SKim Phillips debug("assoclen %d cryptlen %d authsize %d\n", 9480e479300SYuan Kang req->assoclen, req->cryptlen, authsize); 949514df281SAlex Porosanu print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ", 9500e479300SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), 9510e479300SYuan Kang req->assoclen , 1); 952514df281SAlex Porosanu print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ", 9531acebad3SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, req->iv, 9548e8ec596SKim Phillips edesc->src_nents ? 100 : ivsize, 1); 955514df281SAlex Porosanu print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ", 9560e479300SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), 9571acebad3SYuan Kang edesc->src_nents ? 100 : req->cryptlen, 1); 958514df281SAlex Porosanu print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ", 9598e8ec596SKim Phillips DUMP_PREFIX_ADDRESS, 16, 4, sh_desc, 9608e8ec596SKim Phillips desc_bytes(sh_desc), 1); 9618e8ec596SKim Phillips #endif 9621acebad3SYuan Kang 9631acebad3SYuan Kang len = desc_len(sh_desc); 9641acebad3SYuan Kang init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); 9651acebad3SYuan Kang 9661acebad3SYuan Kang if (all_contig) { 9671acebad3SYuan Kang src_dma = sg_dma_address(req->assoc); 9681acebad3SYuan Kang in_options = 0; 9691acebad3SYuan Kang } else { 970a299c837SYuan Kang src_dma = edesc->sec4_sg_dma; 971a299c837SYuan Kang sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 + 9721acebad3SYuan Kang (edesc->src_nents ? : 1); 9731acebad3SYuan Kang in_options = LDST_SGF; 9741acebad3SYuan Kang } 975bbf9c893SHoria Geanta 976bbf9c893SHoria Geanta append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen, 977bbf9c893SHoria Geanta in_options); 9788e8ec596SKim Phillips 9791acebad3SYuan Kang if (likely(req->src == req->dst)) { 9801acebad3SYuan Kang if (all_contig) { 9811acebad3SYuan Kang dst_dma = sg_dma_address(req->src); 9828e8ec596SKim Phillips } else { 983a299c837SYuan Kang dst_dma = src_dma + sizeof(struct sec4_sg_entry) * 9841acebad3SYuan Kang ((edesc->assoc_nents ? : 1) + 1); 9851acebad3SYuan Kang out_options = LDST_SGF; 9868e8ec596SKim Phillips } 9878e8ec596SKim Phillips } else { 9888e8ec596SKim Phillips if (!edesc->dst_nents) { 9890e479300SYuan Kang dst_dma = sg_dma_address(req->dst); 9908e8ec596SKim Phillips } else { 991a299c837SYuan Kang dst_dma = edesc->sec4_sg_dma + 992a299c837SYuan Kang sec4_sg_index * 993a299c837SYuan Kang sizeof(struct sec4_sg_entry); 9941acebad3SYuan Kang out_options = LDST_SGF; 9958e8ec596SKim Phillips } 9968e8ec596SKim Phillips } 9978e8ec596SKim Phillips if (encrypt) 998bbf9c893SHoria Geanta append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize, 999bbf9c893SHoria Geanta out_options); 10008e8ec596SKim Phillips else 10011acebad3SYuan Kang append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize, 10021acebad3SYuan Kang out_options); 10031acebad3SYuan Kang } 10041acebad3SYuan Kang 10051acebad3SYuan Kang /* 10061acebad3SYuan Kang * Fill in aead givencrypt job descriptor 10071acebad3SYuan Kang */ 10081acebad3SYuan Kang static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr, 10091acebad3SYuan Kang struct aead_edesc *edesc, 10101acebad3SYuan Kang struct aead_request *req, 10111acebad3SYuan Kang int contig) 10121acebad3SYuan Kang { 10131acebad3SYuan Kang struct crypto_aead *aead = crypto_aead_reqtfm(req); 10141acebad3SYuan Kang struct caam_ctx *ctx = crypto_aead_ctx(aead); 10151acebad3SYuan Kang int ivsize = crypto_aead_ivsize(aead); 10161acebad3SYuan Kang int authsize = ctx->authsize; 10171acebad3SYuan Kang u32 *desc = edesc->hw_desc; 10181acebad3SYuan Kang u32 out_options = 0, in_options; 10191acebad3SYuan Kang dma_addr_t dst_dma, src_dma; 1020a299c837SYuan Kang int len, sec4_sg_index = 0; 10218e8ec596SKim Phillips 10228e8ec596SKim Phillips #ifdef DEBUG 10231acebad3SYuan Kang debug("assoclen %d cryptlen %d authsize %d\n", 10241acebad3SYuan Kang req->assoclen, req->cryptlen, authsize); 1025514df281SAlex Porosanu print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ", 10261acebad3SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), 10271acebad3SYuan Kang req->assoclen , 1); 1028514df281SAlex Porosanu print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ", 10291acebad3SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1); 1030514df281SAlex Porosanu print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ", 10311acebad3SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), 10321acebad3SYuan Kang edesc->src_nents > 1 ? 100 : req->cryptlen, 1); 1033514df281SAlex Porosanu print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ", 10341acebad3SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, sh_desc, 10351acebad3SYuan Kang desc_bytes(sh_desc), 1); 10368e8ec596SKim Phillips #endif 10378e8ec596SKim Phillips 10381acebad3SYuan Kang len = desc_len(sh_desc); 10391acebad3SYuan Kang init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); 10401acebad3SYuan Kang 10411acebad3SYuan Kang if (contig & GIV_SRC_CONTIG) { 10421acebad3SYuan Kang src_dma = sg_dma_address(req->assoc); 10431acebad3SYuan Kang in_options = 0; 10441acebad3SYuan Kang } else { 1045a299c837SYuan Kang src_dma = edesc->sec4_sg_dma; 1046a299c837SYuan Kang sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents; 10471acebad3SYuan Kang in_options = LDST_SGF; 10481acebad3SYuan Kang } 1049bbf9c893SHoria Geanta append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen, 1050bbf9c893SHoria Geanta in_options); 10511acebad3SYuan Kang 10521acebad3SYuan Kang if (contig & GIV_DST_CONTIG) { 10531acebad3SYuan Kang dst_dma = edesc->iv_dma; 10541acebad3SYuan Kang } else { 10551acebad3SYuan Kang if (likely(req->src == req->dst)) { 1056a299c837SYuan Kang dst_dma = src_dma + sizeof(struct sec4_sg_entry) * 10571acebad3SYuan Kang edesc->assoc_nents; 10581acebad3SYuan Kang out_options = LDST_SGF; 10591acebad3SYuan Kang } else { 1060a299c837SYuan Kang dst_dma = edesc->sec4_sg_dma + 1061a299c837SYuan Kang sec4_sg_index * 1062a299c837SYuan Kang sizeof(struct sec4_sg_entry); 10631acebad3SYuan Kang out_options = LDST_SGF; 10641acebad3SYuan Kang } 10658e8ec596SKim Phillips } 10668e8ec596SKim Phillips 1067bbf9c893SHoria Geanta append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize, 1068bbf9c893SHoria Geanta out_options); 10698e8ec596SKim Phillips } 10708e8ec596SKim Phillips 10718e8ec596SKim Phillips /* 1072acdca31dSYuan Kang * Fill in ablkcipher job descriptor 1073acdca31dSYuan Kang */ 1074acdca31dSYuan Kang static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, 1075acdca31dSYuan Kang struct ablkcipher_edesc *edesc, 1076acdca31dSYuan Kang struct ablkcipher_request *req, 1077acdca31dSYuan Kang bool iv_contig) 1078acdca31dSYuan Kang { 1079acdca31dSYuan Kang struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1080acdca31dSYuan Kang int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 1081acdca31dSYuan Kang u32 *desc = edesc->hw_desc; 1082acdca31dSYuan Kang u32 out_options = 0, in_options; 1083acdca31dSYuan Kang dma_addr_t dst_dma, src_dma; 1084a299c837SYuan Kang int len, sec4_sg_index = 0; 1085acdca31dSYuan Kang 1086acdca31dSYuan Kang #ifdef DEBUG 1087514df281SAlex Porosanu print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ", 1088acdca31dSYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, req->info, 1089acdca31dSYuan Kang ivsize, 1); 1090514df281SAlex Porosanu print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ", 1091acdca31dSYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), 1092acdca31dSYuan Kang edesc->src_nents ? 100 : req->nbytes, 1); 1093acdca31dSYuan Kang #endif 1094acdca31dSYuan Kang 1095acdca31dSYuan Kang len = desc_len(sh_desc); 1096acdca31dSYuan Kang init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); 1097acdca31dSYuan Kang 1098acdca31dSYuan Kang if (iv_contig) { 1099acdca31dSYuan Kang src_dma = edesc->iv_dma; 1100acdca31dSYuan Kang in_options = 0; 1101acdca31dSYuan Kang } else { 1102a299c837SYuan Kang src_dma = edesc->sec4_sg_dma; 1103a299c837SYuan Kang sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents; 1104acdca31dSYuan Kang in_options = LDST_SGF; 1105acdca31dSYuan Kang } 1106acdca31dSYuan Kang append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options); 1107acdca31dSYuan Kang 1108acdca31dSYuan Kang if (likely(req->src == req->dst)) { 1109acdca31dSYuan Kang if (!edesc->src_nents && iv_contig) { 1110acdca31dSYuan Kang dst_dma = sg_dma_address(req->src); 1111acdca31dSYuan Kang } else { 1112a299c837SYuan Kang dst_dma = edesc->sec4_sg_dma + 1113a299c837SYuan Kang sizeof(struct sec4_sg_entry); 1114acdca31dSYuan Kang out_options = LDST_SGF; 1115acdca31dSYuan Kang } 1116acdca31dSYuan Kang } else { 1117acdca31dSYuan Kang if (!edesc->dst_nents) { 1118acdca31dSYuan Kang dst_dma = sg_dma_address(req->dst); 1119acdca31dSYuan Kang } else { 1120a299c837SYuan Kang dst_dma = edesc->sec4_sg_dma + 1121a299c837SYuan Kang sec4_sg_index * sizeof(struct sec4_sg_entry); 1122acdca31dSYuan Kang out_options = LDST_SGF; 1123acdca31dSYuan Kang } 1124acdca31dSYuan Kang } 1125acdca31dSYuan Kang append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options); 1126acdca31dSYuan Kang } 1127acdca31dSYuan Kang 1128acdca31dSYuan Kang /* 11291acebad3SYuan Kang * allocate and map the aead extended descriptor 11308e8ec596SKim Phillips */ 11310e479300SYuan Kang static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, 1132bbf9c893SHoria Geanta int desc_bytes, bool *all_contig_ptr, 1133bbf9c893SHoria Geanta bool encrypt) 11348e8ec596SKim Phillips { 11350e479300SYuan Kang struct crypto_aead *aead = crypto_aead_reqtfm(req); 11368e8ec596SKim Phillips struct caam_ctx *ctx = crypto_aead_ctx(aead); 11378e8ec596SKim Phillips struct device *jrdev = ctx->jrdev; 11381acebad3SYuan Kang gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 11391acebad3SYuan Kang CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 11401acebad3SYuan Kang int assoc_nents, src_nents, dst_nents = 0; 11410e479300SYuan Kang struct aead_edesc *edesc; 11421acebad3SYuan Kang dma_addr_t iv_dma = 0; 11431acebad3SYuan Kang int sgc; 11441acebad3SYuan Kang bool all_contig = true; 1145643b39b0SYuan Kang bool assoc_chained = false, src_chained = false, dst_chained = false; 11461acebad3SYuan Kang int ivsize = crypto_aead_ivsize(aead); 1147a299c837SYuan Kang int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; 1148bbf9c893SHoria Geanta unsigned int authsize = ctx->authsize; 11498e8ec596SKim Phillips 1150643b39b0SYuan Kang assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained); 11518e8ec596SKim Phillips 1152bbf9c893SHoria Geanta if (unlikely(req->dst != req->src)) { 1153bbf9c893SHoria Geanta src_nents = sg_count(req->src, req->cryptlen, &src_chained); 1154bbf9c893SHoria Geanta dst_nents = sg_count(req->dst, 1155bbf9c893SHoria Geanta req->cryptlen + 1156bbf9c893SHoria Geanta (encrypt ? authsize : (-authsize)), 1157bbf9c893SHoria Geanta &dst_chained); 1158bbf9c893SHoria Geanta } else { 1159bbf9c893SHoria Geanta src_nents = sg_count(req->src, 1160bbf9c893SHoria Geanta req->cryptlen + 1161bbf9c893SHoria Geanta (encrypt ? authsize : 0), 1162bbf9c893SHoria Geanta &src_chained); 1163bbf9c893SHoria Geanta } 11648e8ec596SKim Phillips 1165643b39b0SYuan Kang sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, 1166286233e6SHoria Geanta DMA_TO_DEVICE, assoc_chained); 11671acebad3SYuan Kang if (likely(req->src == req->dst)) { 1168643b39b0SYuan Kang sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, 1169643b39b0SYuan Kang DMA_BIDIRECTIONAL, src_chained); 11701acebad3SYuan Kang } else { 1171643b39b0SYuan Kang sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, 1172643b39b0SYuan Kang DMA_TO_DEVICE, src_chained); 1173643b39b0SYuan Kang sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, 1174643b39b0SYuan Kang DMA_FROM_DEVICE, dst_chained); 11758e8ec596SKim Phillips } 11768e8ec596SKim Phillips 11771acebad3SYuan Kang /* Check if data are contiguous */ 11781acebad3SYuan Kang iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE); 11791acebad3SYuan Kang if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen != 11801acebad3SYuan Kang iv_dma || src_nents || iv_dma + ivsize != 11811acebad3SYuan Kang sg_dma_address(req->src)) { 11821acebad3SYuan Kang all_contig = false; 11831acebad3SYuan Kang assoc_nents = assoc_nents ? : 1; 11841acebad3SYuan Kang src_nents = src_nents ? : 1; 1185a299c837SYuan Kang sec4_sg_len = assoc_nents + 1 + src_nents; 11861acebad3SYuan Kang } 1187a299c837SYuan Kang sec4_sg_len += dst_nents; 11881acebad3SYuan Kang 1189a299c837SYuan Kang sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); 11908e8ec596SKim Phillips 11918e8ec596SKim Phillips /* allocate space for base edesc and hw desc commands, link tables */ 11920e479300SYuan Kang edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + 1193a299c837SYuan Kang sec4_sg_bytes, GFP_DMA | flags); 11948e8ec596SKim Phillips if (!edesc) { 11958e8ec596SKim Phillips dev_err(jrdev, "could not allocate extended descriptor\n"); 11968e8ec596SKim Phillips return ERR_PTR(-ENOMEM); 11978e8ec596SKim Phillips } 11988e8ec596SKim Phillips 11998e8ec596SKim Phillips edesc->assoc_nents = assoc_nents; 1200643b39b0SYuan Kang edesc->assoc_chained = assoc_chained; 12018e8ec596SKim Phillips edesc->src_nents = src_nents; 1202643b39b0SYuan Kang edesc->src_chained = src_chained; 12038e8ec596SKim Phillips edesc->dst_nents = dst_nents; 1204643b39b0SYuan Kang edesc->dst_chained = dst_chained; 12051acebad3SYuan Kang edesc->iv_dma = iv_dma; 1206a299c837SYuan Kang edesc->sec4_sg_bytes = sec4_sg_bytes; 1207a299c837SYuan Kang edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + 12088e8ec596SKim Phillips desc_bytes; 1209a299c837SYuan Kang edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 1210a299c837SYuan Kang sec4_sg_bytes, DMA_TO_DEVICE); 12111acebad3SYuan Kang *all_contig_ptr = all_contig; 12121acebad3SYuan Kang 1213a299c837SYuan Kang sec4_sg_index = 0; 12141acebad3SYuan Kang if (!all_contig) { 1215a299c837SYuan Kang sg_to_sec4_sg(req->assoc, 12161acebad3SYuan Kang (assoc_nents ? : 1), 1217a299c837SYuan Kang edesc->sec4_sg + 1218a299c837SYuan Kang sec4_sg_index, 0); 1219a299c837SYuan Kang sec4_sg_index += assoc_nents ? : 1; 1220a299c837SYuan Kang dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, 12211acebad3SYuan Kang iv_dma, ivsize, 0); 1222a299c837SYuan Kang sec4_sg_index += 1; 1223a299c837SYuan Kang sg_to_sec4_sg_last(req->src, 12241acebad3SYuan Kang (src_nents ? : 1), 1225a299c837SYuan Kang edesc->sec4_sg + 1226a299c837SYuan Kang sec4_sg_index, 0); 1227a299c837SYuan Kang sec4_sg_index += src_nents ? : 1; 12281acebad3SYuan Kang } 12291acebad3SYuan Kang if (dst_nents) { 1230a299c837SYuan Kang sg_to_sec4_sg_last(req->dst, dst_nents, 1231a299c837SYuan Kang edesc->sec4_sg + sec4_sg_index, 0); 12321acebad3SYuan Kang } 12338e8ec596SKim Phillips 12348e8ec596SKim Phillips return edesc; 12358e8ec596SKim Phillips } 12368e8ec596SKim Phillips 12370e479300SYuan Kang static int aead_encrypt(struct aead_request *req) 12388e8ec596SKim Phillips { 12390e479300SYuan Kang struct aead_edesc *edesc; 12408e8ec596SKim Phillips struct crypto_aead *aead = crypto_aead_reqtfm(req); 12418e8ec596SKim Phillips struct caam_ctx *ctx = crypto_aead_ctx(aead); 12428e8ec596SKim Phillips struct device *jrdev = ctx->jrdev; 12431acebad3SYuan Kang bool all_contig; 12448e8ec596SKim Phillips u32 *desc; 12451acebad3SYuan Kang int ret = 0; 12461acebad3SYuan Kang 12478e8ec596SKim Phillips /* allocate extended descriptor */ 12481acebad3SYuan Kang edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * 1249bbf9c893SHoria Geanta CAAM_CMD_SZ, &all_contig, true); 12508e8ec596SKim Phillips if (IS_ERR(edesc)) 12518e8ec596SKim Phillips return PTR_ERR(edesc); 12528e8ec596SKim Phillips 12531acebad3SYuan Kang /* Create and submit job descriptor */ 12541acebad3SYuan Kang init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req, 12551acebad3SYuan Kang all_contig, true); 12561acebad3SYuan Kang #ifdef DEBUG 1257514df281SAlex Porosanu print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", 12581acebad3SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 12591acebad3SYuan Kang desc_bytes(edesc->hw_desc), 1); 12601acebad3SYuan Kang #endif 12611acebad3SYuan Kang 12628e8ec596SKim Phillips desc = edesc->hw_desc; 12631acebad3SYuan Kang ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); 12641acebad3SYuan Kang if (!ret) { 12651acebad3SYuan Kang ret = -EINPROGRESS; 12661acebad3SYuan Kang } else { 12671acebad3SYuan Kang aead_unmap(jrdev, edesc, req); 12681acebad3SYuan Kang kfree(edesc); 12691acebad3SYuan Kang } 12708e8ec596SKim Phillips 12711acebad3SYuan Kang return ret; 12728e8ec596SKim Phillips } 12738e8ec596SKim Phillips 12740e479300SYuan Kang static int aead_decrypt(struct aead_request *req) 12758e8ec596SKim Phillips { 12761acebad3SYuan Kang struct aead_edesc *edesc; 12770e479300SYuan Kang struct crypto_aead *aead = crypto_aead_reqtfm(req); 12780e479300SYuan Kang struct caam_ctx *ctx = crypto_aead_ctx(aead); 12790e479300SYuan Kang struct device *jrdev = ctx->jrdev; 12801acebad3SYuan Kang bool all_contig; 12810e479300SYuan Kang u32 *desc; 12821acebad3SYuan Kang int ret = 0; 12830e479300SYuan Kang 12840e479300SYuan Kang /* allocate extended descriptor */ 12851acebad3SYuan Kang edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * 1286bbf9c893SHoria Geanta CAAM_CMD_SZ, &all_contig, false); 12870e479300SYuan Kang if (IS_ERR(edesc)) 12880e479300SYuan Kang return PTR_ERR(edesc); 12890e479300SYuan Kang 12901acebad3SYuan Kang #ifdef DEBUG 1291514df281SAlex Porosanu print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ", 12921acebad3SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), 12931acebad3SYuan Kang req->cryptlen, 1); 12941acebad3SYuan Kang #endif 12951acebad3SYuan Kang 12961acebad3SYuan Kang /* Create and submit job descriptor*/ 12971acebad3SYuan Kang init_aead_job(ctx->sh_desc_dec, 12981acebad3SYuan Kang ctx->sh_desc_dec_dma, edesc, req, all_contig, false); 12991acebad3SYuan Kang #ifdef DEBUG 1300514df281SAlex Porosanu print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", 13011acebad3SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 13021acebad3SYuan Kang desc_bytes(edesc->hw_desc), 1); 13031acebad3SYuan Kang #endif 13041acebad3SYuan Kang 13050e479300SYuan Kang desc = edesc->hw_desc; 13061acebad3SYuan Kang ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); 13071acebad3SYuan Kang if (!ret) { 13081acebad3SYuan Kang ret = -EINPROGRESS; 13091acebad3SYuan Kang } else { 13101acebad3SYuan Kang aead_unmap(jrdev, edesc, req); 13111acebad3SYuan Kang kfree(edesc); 13121acebad3SYuan Kang } 13130e479300SYuan Kang 13141acebad3SYuan Kang return ret; 13151acebad3SYuan Kang } 13160e479300SYuan Kang 13171acebad3SYuan Kang /* 13181acebad3SYuan Kang * allocate and map the aead extended descriptor for aead givencrypt 13191acebad3SYuan Kang */ 13201acebad3SYuan Kang static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request 13211acebad3SYuan Kang *greq, int desc_bytes, 13221acebad3SYuan Kang u32 *contig_ptr) 13231acebad3SYuan Kang { 13241acebad3SYuan Kang struct aead_request *req = &greq->areq; 13251acebad3SYuan Kang struct crypto_aead *aead = crypto_aead_reqtfm(req); 13261acebad3SYuan Kang struct caam_ctx *ctx = crypto_aead_ctx(aead); 13271acebad3SYuan Kang struct device *jrdev = ctx->jrdev; 13281acebad3SYuan Kang gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 13291acebad3SYuan Kang CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 13301acebad3SYuan Kang int assoc_nents, src_nents, dst_nents = 0; 13311acebad3SYuan Kang struct aead_edesc *edesc; 13321acebad3SYuan Kang dma_addr_t iv_dma = 0; 13331acebad3SYuan Kang int sgc; 13341acebad3SYuan Kang u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG; 13351acebad3SYuan Kang int ivsize = crypto_aead_ivsize(aead); 1336643b39b0SYuan Kang bool assoc_chained = false, src_chained = false, dst_chained = false; 1337a299c837SYuan Kang int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; 13380e479300SYuan Kang 1339643b39b0SYuan Kang assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained); 1340643b39b0SYuan Kang src_nents = sg_count(req->src, req->cryptlen, &src_chained); 13410e479300SYuan Kang 13421acebad3SYuan Kang if (unlikely(req->dst != req->src)) 1343bbf9c893SHoria Geanta dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize, 1344bbf9c893SHoria Geanta &dst_chained); 13451acebad3SYuan Kang 1346643b39b0SYuan Kang sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, 1347286233e6SHoria Geanta DMA_TO_DEVICE, assoc_chained); 13481acebad3SYuan Kang if (likely(req->src == req->dst)) { 1349643b39b0SYuan Kang sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, 1350643b39b0SYuan Kang DMA_BIDIRECTIONAL, src_chained); 13511acebad3SYuan Kang } else { 1352643b39b0SYuan Kang sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, 1353643b39b0SYuan Kang DMA_TO_DEVICE, src_chained); 1354643b39b0SYuan Kang sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, 1355643b39b0SYuan Kang DMA_FROM_DEVICE, dst_chained); 13561acebad3SYuan Kang } 13571acebad3SYuan Kang 13581acebad3SYuan Kang /* Check if data are contiguous */ 13591acebad3SYuan Kang iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE); 13601acebad3SYuan Kang if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen != 13611acebad3SYuan Kang iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src)) 13621acebad3SYuan Kang contig &= ~GIV_SRC_CONTIG; 13631acebad3SYuan Kang if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst)) 13641acebad3SYuan Kang contig &= ~GIV_DST_CONTIG; 13651acebad3SYuan Kang if (unlikely(req->src != req->dst)) { 13661acebad3SYuan Kang dst_nents = dst_nents ? : 1; 1367a299c837SYuan Kang sec4_sg_len += 1; 13681acebad3SYuan Kang } 13691acebad3SYuan Kang if (!(contig & GIV_SRC_CONTIG)) { 13701acebad3SYuan Kang assoc_nents = assoc_nents ? : 1; 13711acebad3SYuan Kang src_nents = src_nents ? : 1; 1372a299c837SYuan Kang sec4_sg_len += assoc_nents + 1 + src_nents; 13731acebad3SYuan Kang if (likely(req->src == req->dst)) 13741acebad3SYuan Kang contig &= ~GIV_DST_CONTIG; 13751acebad3SYuan Kang } 1376a299c837SYuan Kang sec4_sg_len += dst_nents; 13771acebad3SYuan Kang 1378a299c837SYuan Kang sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); 13791acebad3SYuan Kang 13801acebad3SYuan Kang /* allocate space for base edesc and hw desc commands, link tables */ 13811acebad3SYuan Kang edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + 1382a299c837SYuan Kang sec4_sg_bytes, GFP_DMA | flags); 13831acebad3SYuan Kang if (!edesc) { 13841acebad3SYuan Kang dev_err(jrdev, "could not allocate extended descriptor\n"); 13851acebad3SYuan Kang return ERR_PTR(-ENOMEM); 13861acebad3SYuan Kang } 13871acebad3SYuan Kang 13881acebad3SYuan Kang edesc->assoc_nents = assoc_nents; 1389643b39b0SYuan Kang edesc->assoc_chained = assoc_chained; 13901acebad3SYuan Kang edesc->src_nents = src_nents; 1391643b39b0SYuan Kang edesc->src_chained = src_chained; 13921acebad3SYuan Kang edesc->dst_nents = dst_nents; 1393643b39b0SYuan Kang edesc->dst_chained = dst_chained; 13941acebad3SYuan Kang edesc->iv_dma = iv_dma; 1395a299c837SYuan Kang edesc->sec4_sg_bytes = sec4_sg_bytes; 1396a299c837SYuan Kang edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + 13971acebad3SYuan Kang desc_bytes; 1398a299c837SYuan Kang edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 1399a299c837SYuan Kang sec4_sg_bytes, DMA_TO_DEVICE); 14001acebad3SYuan Kang *contig_ptr = contig; 14011acebad3SYuan Kang 1402a299c837SYuan Kang sec4_sg_index = 0; 14031acebad3SYuan Kang if (!(contig & GIV_SRC_CONTIG)) { 1404a299c837SYuan Kang sg_to_sec4_sg(req->assoc, assoc_nents, 1405a299c837SYuan Kang edesc->sec4_sg + 1406a299c837SYuan Kang sec4_sg_index, 0); 1407a299c837SYuan Kang sec4_sg_index += assoc_nents; 1408a299c837SYuan Kang dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, 14091acebad3SYuan Kang iv_dma, ivsize, 0); 1410a299c837SYuan Kang sec4_sg_index += 1; 1411a299c837SYuan Kang sg_to_sec4_sg_last(req->src, src_nents, 1412a299c837SYuan Kang edesc->sec4_sg + 1413a299c837SYuan Kang sec4_sg_index, 0); 1414a299c837SYuan Kang sec4_sg_index += src_nents; 14151acebad3SYuan Kang } 14161acebad3SYuan Kang if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) { 1417a299c837SYuan Kang dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, 14181acebad3SYuan Kang iv_dma, ivsize, 0); 1419a299c837SYuan Kang sec4_sg_index += 1; 1420a299c837SYuan Kang sg_to_sec4_sg_last(req->dst, dst_nents, 1421a299c837SYuan Kang edesc->sec4_sg + sec4_sg_index, 0); 14221acebad3SYuan Kang } 14231acebad3SYuan Kang 14241acebad3SYuan Kang return edesc; 14250e479300SYuan Kang } 14260e479300SYuan Kang 14270e479300SYuan Kang static int aead_givencrypt(struct aead_givcrypt_request *areq) 14280e479300SYuan Kang { 14290e479300SYuan Kang struct aead_request *req = &areq->areq; 14300e479300SYuan Kang struct aead_edesc *edesc; 14310e479300SYuan Kang struct crypto_aead *aead = crypto_aead_reqtfm(req); 14328e8ec596SKim Phillips struct caam_ctx *ctx = crypto_aead_ctx(aead); 14338e8ec596SKim Phillips struct device *jrdev = ctx->jrdev; 14341acebad3SYuan Kang u32 contig; 14358e8ec596SKim Phillips u32 *desc; 14361acebad3SYuan Kang int ret = 0; 14378e8ec596SKim Phillips 14388e8ec596SKim Phillips /* allocate extended descriptor */ 14391acebad3SYuan Kang edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN * 14401acebad3SYuan Kang CAAM_CMD_SZ, &contig); 14411acebad3SYuan Kang 14428e8ec596SKim Phillips if (IS_ERR(edesc)) 14438e8ec596SKim Phillips return PTR_ERR(edesc); 14448e8ec596SKim Phillips 14451acebad3SYuan Kang #ifdef DEBUG 1446514df281SAlex Porosanu print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ", 14471acebad3SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), 14481acebad3SYuan Kang req->cryptlen, 1); 14491acebad3SYuan Kang #endif 14501acebad3SYuan Kang 14511acebad3SYuan Kang /* Create and submit job descriptor*/ 14521acebad3SYuan Kang init_aead_giv_job(ctx->sh_desc_givenc, 14531acebad3SYuan Kang ctx->sh_desc_givenc_dma, edesc, req, contig); 14541acebad3SYuan Kang #ifdef DEBUG 1455514df281SAlex Porosanu print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", 14561acebad3SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 14571acebad3SYuan Kang desc_bytes(edesc->hw_desc), 1); 14581acebad3SYuan Kang #endif 14591acebad3SYuan Kang 14608e8ec596SKim Phillips desc = edesc->hw_desc; 14611acebad3SYuan Kang ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); 14621acebad3SYuan Kang if (!ret) { 14631acebad3SYuan Kang ret = -EINPROGRESS; 14641acebad3SYuan Kang } else { 14651acebad3SYuan Kang aead_unmap(jrdev, edesc, req); 14661acebad3SYuan Kang kfree(edesc); 14671acebad3SYuan Kang } 14688e8ec596SKim Phillips 14691acebad3SYuan Kang return ret; 14708e8ec596SKim Phillips } 14718e8ec596SKim Phillips 1472acdca31dSYuan Kang /* 1473acdca31dSYuan Kang * allocate and map the ablkcipher extended descriptor for ablkcipher 1474acdca31dSYuan Kang */ 1475acdca31dSYuan Kang static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request 1476acdca31dSYuan Kang *req, int desc_bytes, 1477acdca31dSYuan Kang bool *iv_contig_out) 1478acdca31dSYuan Kang { 1479acdca31dSYuan Kang struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1480acdca31dSYuan Kang struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 1481acdca31dSYuan Kang struct device *jrdev = ctx->jrdev; 1482acdca31dSYuan Kang gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 1483acdca31dSYuan Kang CRYPTO_TFM_REQ_MAY_SLEEP)) ? 1484acdca31dSYuan Kang GFP_KERNEL : GFP_ATOMIC; 1485a299c837SYuan Kang int src_nents, dst_nents = 0, sec4_sg_bytes; 1486acdca31dSYuan Kang struct ablkcipher_edesc *edesc; 1487acdca31dSYuan Kang dma_addr_t iv_dma = 0; 1488acdca31dSYuan Kang bool iv_contig = false; 1489acdca31dSYuan Kang int sgc; 1490acdca31dSYuan Kang int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 1491643b39b0SYuan Kang bool src_chained = false, dst_chained = false; 1492a299c837SYuan Kang int sec4_sg_index; 1493acdca31dSYuan Kang 1494643b39b0SYuan Kang src_nents = sg_count(req->src, req->nbytes, &src_chained); 1495acdca31dSYuan Kang 1496643b39b0SYuan Kang if (req->dst != req->src) 1497643b39b0SYuan Kang dst_nents = sg_count(req->dst, req->nbytes, &dst_chained); 1498acdca31dSYuan Kang 1499acdca31dSYuan Kang if (likely(req->src == req->dst)) { 1500643b39b0SYuan Kang sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, 1501643b39b0SYuan Kang DMA_BIDIRECTIONAL, src_chained); 1502acdca31dSYuan Kang } else { 1503643b39b0SYuan Kang sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, 1504643b39b0SYuan Kang DMA_TO_DEVICE, src_chained); 1505643b39b0SYuan Kang sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, 1506643b39b0SYuan Kang DMA_FROM_DEVICE, dst_chained); 1507acdca31dSYuan Kang } 1508acdca31dSYuan Kang 1509acdca31dSYuan Kang /* 1510acdca31dSYuan Kang * Check if iv can be contiguous with source and destination. 1511acdca31dSYuan Kang * If so, include it. If not, create scatterlist. 1512acdca31dSYuan Kang */ 1513acdca31dSYuan Kang iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE); 1514acdca31dSYuan Kang if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src)) 1515acdca31dSYuan Kang iv_contig = true; 1516acdca31dSYuan Kang else 1517acdca31dSYuan Kang src_nents = src_nents ? : 1; 1518a299c837SYuan Kang sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) * 1519a299c837SYuan Kang sizeof(struct sec4_sg_entry); 1520acdca31dSYuan Kang 1521acdca31dSYuan Kang /* allocate space for base edesc and hw desc commands, link tables */ 1522acdca31dSYuan Kang edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes + 1523a299c837SYuan Kang sec4_sg_bytes, GFP_DMA | flags); 1524acdca31dSYuan Kang if (!edesc) { 1525acdca31dSYuan Kang dev_err(jrdev, "could not allocate extended descriptor\n"); 1526acdca31dSYuan Kang return ERR_PTR(-ENOMEM); 1527acdca31dSYuan Kang } 1528acdca31dSYuan Kang 1529acdca31dSYuan Kang edesc->src_nents = src_nents; 1530643b39b0SYuan Kang edesc->src_chained = src_chained; 1531acdca31dSYuan Kang edesc->dst_nents = dst_nents; 1532643b39b0SYuan Kang edesc->dst_chained = dst_chained; 1533a299c837SYuan Kang edesc->sec4_sg_bytes = sec4_sg_bytes; 1534a299c837SYuan Kang edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + 1535acdca31dSYuan Kang desc_bytes; 1536acdca31dSYuan Kang 1537a299c837SYuan Kang sec4_sg_index = 0; 1538acdca31dSYuan Kang if (!iv_contig) { 1539a299c837SYuan Kang dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0); 1540a299c837SYuan Kang sg_to_sec4_sg_last(req->src, src_nents, 1541a299c837SYuan Kang edesc->sec4_sg + 1, 0); 1542a299c837SYuan Kang sec4_sg_index += 1 + src_nents; 1543acdca31dSYuan Kang } 1544acdca31dSYuan Kang 1545643b39b0SYuan Kang if (dst_nents) { 1546a299c837SYuan Kang sg_to_sec4_sg_last(req->dst, dst_nents, 1547a299c837SYuan Kang edesc->sec4_sg + sec4_sg_index, 0); 1548acdca31dSYuan Kang } 1549acdca31dSYuan Kang 1550a299c837SYuan Kang edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 1551a299c837SYuan Kang sec4_sg_bytes, DMA_TO_DEVICE); 1552acdca31dSYuan Kang edesc->iv_dma = iv_dma; 1553acdca31dSYuan Kang 1554acdca31dSYuan Kang #ifdef DEBUG 1555514df281SAlex Porosanu print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ", 1556a299c837SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, 1557a299c837SYuan Kang sec4_sg_bytes, 1); 1558acdca31dSYuan Kang #endif 1559acdca31dSYuan Kang 1560acdca31dSYuan Kang *iv_contig_out = iv_contig; 1561acdca31dSYuan Kang return edesc; 1562acdca31dSYuan Kang } 1563acdca31dSYuan Kang 1564acdca31dSYuan Kang static int ablkcipher_encrypt(struct ablkcipher_request *req) 1565acdca31dSYuan Kang { 1566acdca31dSYuan Kang struct ablkcipher_edesc *edesc; 1567acdca31dSYuan Kang struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1568acdca31dSYuan Kang struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 1569acdca31dSYuan Kang struct device *jrdev = ctx->jrdev; 1570acdca31dSYuan Kang bool iv_contig; 1571acdca31dSYuan Kang u32 *desc; 1572acdca31dSYuan Kang int ret = 0; 1573acdca31dSYuan Kang 1574acdca31dSYuan Kang /* allocate extended descriptor */ 1575acdca31dSYuan Kang edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * 1576acdca31dSYuan Kang CAAM_CMD_SZ, &iv_contig); 1577acdca31dSYuan Kang if (IS_ERR(edesc)) 1578acdca31dSYuan Kang return PTR_ERR(edesc); 1579acdca31dSYuan Kang 1580acdca31dSYuan Kang /* Create and submit job descriptor*/ 1581acdca31dSYuan Kang init_ablkcipher_job(ctx->sh_desc_enc, 1582acdca31dSYuan Kang ctx->sh_desc_enc_dma, edesc, req, iv_contig); 1583acdca31dSYuan Kang #ifdef DEBUG 1584514df281SAlex Porosanu print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ", 1585acdca31dSYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1586acdca31dSYuan Kang desc_bytes(edesc->hw_desc), 1); 1587acdca31dSYuan Kang #endif 1588acdca31dSYuan Kang desc = edesc->hw_desc; 1589acdca31dSYuan Kang ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req); 1590acdca31dSYuan Kang 1591acdca31dSYuan Kang if (!ret) { 1592acdca31dSYuan Kang ret = -EINPROGRESS; 1593acdca31dSYuan Kang } else { 1594acdca31dSYuan Kang ablkcipher_unmap(jrdev, edesc, req); 1595acdca31dSYuan Kang kfree(edesc); 1596acdca31dSYuan Kang } 1597acdca31dSYuan Kang 1598acdca31dSYuan Kang return ret; 1599acdca31dSYuan Kang } 1600acdca31dSYuan Kang 1601acdca31dSYuan Kang static int ablkcipher_decrypt(struct ablkcipher_request *req) 1602acdca31dSYuan Kang { 1603acdca31dSYuan Kang struct ablkcipher_edesc *edesc; 1604acdca31dSYuan Kang struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1605acdca31dSYuan Kang struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 1606acdca31dSYuan Kang struct device *jrdev = ctx->jrdev; 1607acdca31dSYuan Kang bool iv_contig; 1608acdca31dSYuan Kang u32 *desc; 1609acdca31dSYuan Kang int ret = 0; 1610acdca31dSYuan Kang 1611acdca31dSYuan Kang /* allocate extended descriptor */ 1612acdca31dSYuan Kang edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * 1613acdca31dSYuan Kang CAAM_CMD_SZ, &iv_contig); 1614acdca31dSYuan Kang if (IS_ERR(edesc)) 1615acdca31dSYuan Kang return PTR_ERR(edesc); 1616acdca31dSYuan Kang 1617acdca31dSYuan Kang /* Create and submit job descriptor*/ 1618acdca31dSYuan Kang init_ablkcipher_job(ctx->sh_desc_dec, 1619acdca31dSYuan Kang ctx->sh_desc_dec_dma, edesc, req, iv_contig); 1620acdca31dSYuan Kang desc = edesc->hw_desc; 1621acdca31dSYuan Kang #ifdef DEBUG 1622514df281SAlex Porosanu print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ", 1623acdca31dSYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1624acdca31dSYuan Kang desc_bytes(edesc->hw_desc), 1); 1625acdca31dSYuan Kang #endif 1626acdca31dSYuan Kang 1627acdca31dSYuan Kang ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req); 1628acdca31dSYuan Kang if (!ret) { 1629acdca31dSYuan Kang ret = -EINPROGRESS; 1630acdca31dSYuan Kang } else { 1631acdca31dSYuan Kang ablkcipher_unmap(jrdev, edesc, req); 1632acdca31dSYuan Kang kfree(edesc); 1633acdca31dSYuan Kang } 1634acdca31dSYuan Kang 1635acdca31dSYuan Kang return ret; 1636acdca31dSYuan Kang } 1637acdca31dSYuan Kang 1638885e9e2fSYuan Kang #define template_aead template_u.aead 1639acdca31dSYuan Kang #define template_ablkcipher template_u.ablkcipher 16408e8ec596SKim Phillips struct caam_alg_template { 16418e8ec596SKim Phillips char name[CRYPTO_MAX_ALG_NAME]; 16428e8ec596SKim Phillips char driver_name[CRYPTO_MAX_ALG_NAME]; 16438e8ec596SKim Phillips unsigned int blocksize; 1644885e9e2fSYuan Kang u32 type; 1645885e9e2fSYuan Kang union { 1646885e9e2fSYuan Kang struct ablkcipher_alg ablkcipher; 16478e8ec596SKim Phillips struct aead_alg aead; 1648885e9e2fSYuan Kang struct blkcipher_alg blkcipher; 1649885e9e2fSYuan Kang struct cipher_alg cipher; 1650885e9e2fSYuan Kang struct compress_alg compress; 1651885e9e2fSYuan Kang struct rng_alg rng; 1652885e9e2fSYuan Kang } template_u; 16538e8ec596SKim Phillips u32 class1_alg_type; 16548e8ec596SKim Phillips u32 class2_alg_type; 16558e8ec596SKim Phillips u32 alg_op; 16568e8ec596SKim Phillips }; 16578e8ec596SKim Phillips 16588e8ec596SKim Phillips static struct caam_alg_template driver_algs[] = { 1659246bbedbSHoria Geanta /* single-pass ipsec_esp descriptor */ 16608e8ec596SKim Phillips { 16618b4d43a4SKim Phillips .name = "authenc(hmac(md5),cbc(aes))", 16628b4d43a4SKim Phillips .driver_name = "authenc-hmac-md5-cbc-aes-caam", 16638b4d43a4SKim Phillips .blocksize = AES_BLOCK_SIZE, 16648b4d43a4SKim Phillips .type = CRYPTO_ALG_TYPE_AEAD, 16658b4d43a4SKim Phillips .template_aead = { 16668b4d43a4SKim Phillips .setkey = aead_setkey, 16678b4d43a4SKim Phillips .setauthsize = aead_setauthsize, 16688b4d43a4SKim Phillips .encrypt = aead_encrypt, 16698b4d43a4SKim Phillips .decrypt = aead_decrypt, 16708b4d43a4SKim Phillips .givencrypt = aead_givencrypt, 16718b4d43a4SKim Phillips .geniv = "<built-in>", 16728b4d43a4SKim Phillips .ivsize = AES_BLOCK_SIZE, 16738b4d43a4SKim Phillips .maxauthsize = MD5_DIGEST_SIZE, 16748b4d43a4SKim Phillips }, 16758b4d43a4SKim Phillips .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 16768b4d43a4SKim Phillips .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, 16778b4d43a4SKim Phillips .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, 16788b4d43a4SKim Phillips }, 16798b4d43a4SKim Phillips { 16808e8ec596SKim Phillips .name = "authenc(hmac(sha1),cbc(aes))", 16818e8ec596SKim Phillips .driver_name = "authenc-hmac-sha1-cbc-aes-caam", 16828e8ec596SKim Phillips .blocksize = AES_BLOCK_SIZE, 1683885e9e2fSYuan Kang .type = CRYPTO_ALG_TYPE_AEAD, 1684885e9e2fSYuan Kang .template_aead = { 16850e479300SYuan Kang .setkey = aead_setkey, 16860e479300SYuan Kang .setauthsize = aead_setauthsize, 16870e479300SYuan Kang .encrypt = aead_encrypt, 16880e479300SYuan Kang .decrypt = aead_decrypt, 16890e479300SYuan Kang .givencrypt = aead_givencrypt, 16908e8ec596SKim Phillips .geniv = "<built-in>", 16918e8ec596SKim Phillips .ivsize = AES_BLOCK_SIZE, 16928e8ec596SKim Phillips .maxauthsize = SHA1_DIGEST_SIZE, 16938e8ec596SKim Phillips }, 16948e8ec596SKim Phillips .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 16958e8ec596SKim Phillips .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, 16968e8ec596SKim Phillips .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, 16978e8ec596SKim Phillips }, 16988e8ec596SKim Phillips { 1699e863f9ccSHemant Agrawal .name = "authenc(hmac(sha224),cbc(aes))", 1700e863f9ccSHemant Agrawal .driver_name = "authenc-hmac-sha224-cbc-aes-caam", 1701e863f9ccSHemant Agrawal .blocksize = AES_BLOCK_SIZE, 1702cb7d5662SVakul Garg .type = CRYPTO_ALG_TYPE_AEAD, 1703e863f9ccSHemant Agrawal .template_aead = { 1704e863f9ccSHemant Agrawal .setkey = aead_setkey, 1705e863f9ccSHemant Agrawal .setauthsize = aead_setauthsize, 1706e863f9ccSHemant Agrawal .encrypt = aead_encrypt, 1707e863f9ccSHemant Agrawal .decrypt = aead_decrypt, 1708e863f9ccSHemant Agrawal .givencrypt = aead_givencrypt, 1709e863f9ccSHemant Agrawal .geniv = "<built-in>", 1710e863f9ccSHemant Agrawal .ivsize = AES_BLOCK_SIZE, 1711e863f9ccSHemant Agrawal .maxauthsize = SHA224_DIGEST_SIZE, 1712e863f9ccSHemant Agrawal }, 1713e863f9ccSHemant Agrawal .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1714e863f9ccSHemant Agrawal .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 1715e863f9ccSHemant Agrawal OP_ALG_AAI_HMAC_PRECOMP, 1716e863f9ccSHemant Agrawal .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, 1717e863f9ccSHemant Agrawal }, 1718e863f9ccSHemant Agrawal { 17198e8ec596SKim Phillips .name = "authenc(hmac(sha256),cbc(aes))", 17208e8ec596SKim Phillips .driver_name = "authenc-hmac-sha256-cbc-aes-caam", 17218e8ec596SKim Phillips .blocksize = AES_BLOCK_SIZE, 1722885e9e2fSYuan Kang .type = CRYPTO_ALG_TYPE_AEAD, 1723885e9e2fSYuan Kang .template_aead = { 17240e479300SYuan Kang .setkey = aead_setkey, 17250e479300SYuan Kang .setauthsize = aead_setauthsize, 17260e479300SYuan Kang .encrypt = aead_encrypt, 17270e479300SYuan Kang .decrypt = aead_decrypt, 17280e479300SYuan Kang .givencrypt = aead_givencrypt, 17298e8ec596SKim Phillips .geniv = "<built-in>", 17308e8ec596SKim Phillips .ivsize = AES_BLOCK_SIZE, 17318e8ec596SKim Phillips .maxauthsize = SHA256_DIGEST_SIZE, 17328e8ec596SKim Phillips }, 17338e8ec596SKim Phillips .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 17348e8ec596SKim Phillips .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 17358e8ec596SKim Phillips OP_ALG_AAI_HMAC_PRECOMP, 17368e8ec596SKim Phillips .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, 17378e8ec596SKim Phillips }, 17388e8ec596SKim Phillips { 1739e863f9ccSHemant Agrawal .name = "authenc(hmac(sha384),cbc(aes))", 1740e863f9ccSHemant Agrawal .driver_name = "authenc-hmac-sha384-cbc-aes-caam", 1741e863f9ccSHemant Agrawal .blocksize = AES_BLOCK_SIZE, 1742cb7d5662SVakul Garg .type = CRYPTO_ALG_TYPE_AEAD, 1743e863f9ccSHemant Agrawal .template_aead = { 1744e863f9ccSHemant Agrawal .setkey = aead_setkey, 1745e863f9ccSHemant Agrawal .setauthsize = aead_setauthsize, 1746e863f9ccSHemant Agrawal .encrypt = aead_encrypt, 1747e863f9ccSHemant Agrawal .decrypt = aead_decrypt, 1748e863f9ccSHemant Agrawal .givencrypt = aead_givencrypt, 1749e863f9ccSHemant Agrawal .geniv = "<built-in>", 1750e863f9ccSHemant Agrawal .ivsize = AES_BLOCK_SIZE, 1751e863f9ccSHemant Agrawal .maxauthsize = SHA384_DIGEST_SIZE, 1752e863f9ccSHemant Agrawal }, 1753e863f9ccSHemant Agrawal .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1754e863f9ccSHemant Agrawal .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 1755e863f9ccSHemant Agrawal OP_ALG_AAI_HMAC_PRECOMP, 1756e863f9ccSHemant Agrawal .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, 1757e863f9ccSHemant Agrawal }, 1758e863f9ccSHemant Agrawal 1759e863f9ccSHemant Agrawal { 17604427b1b4SKim Phillips .name = "authenc(hmac(sha512),cbc(aes))", 17614427b1b4SKim Phillips .driver_name = "authenc-hmac-sha512-cbc-aes-caam", 17624427b1b4SKim Phillips .blocksize = AES_BLOCK_SIZE, 1763885e9e2fSYuan Kang .type = CRYPTO_ALG_TYPE_AEAD, 1764885e9e2fSYuan Kang .template_aead = { 17650e479300SYuan Kang .setkey = aead_setkey, 17660e479300SYuan Kang .setauthsize = aead_setauthsize, 17670e479300SYuan Kang .encrypt = aead_encrypt, 17680e479300SYuan Kang .decrypt = aead_decrypt, 17690e479300SYuan Kang .givencrypt = aead_givencrypt, 17704427b1b4SKim Phillips .geniv = "<built-in>", 17714427b1b4SKim Phillips .ivsize = AES_BLOCK_SIZE, 17724427b1b4SKim Phillips .maxauthsize = SHA512_DIGEST_SIZE, 17734427b1b4SKim Phillips }, 17744427b1b4SKim Phillips .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 17754427b1b4SKim Phillips .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 17764427b1b4SKim Phillips OP_ALG_AAI_HMAC_PRECOMP, 17774427b1b4SKim Phillips .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, 17784427b1b4SKim Phillips }, 17794427b1b4SKim Phillips { 17808b4d43a4SKim Phillips .name = "authenc(hmac(md5),cbc(des3_ede))", 17818b4d43a4SKim Phillips .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam", 17828b4d43a4SKim Phillips .blocksize = DES3_EDE_BLOCK_SIZE, 17838b4d43a4SKim Phillips .type = CRYPTO_ALG_TYPE_AEAD, 17848b4d43a4SKim Phillips .template_aead = { 17858b4d43a4SKim Phillips .setkey = aead_setkey, 17868b4d43a4SKim Phillips .setauthsize = aead_setauthsize, 17878b4d43a4SKim Phillips .encrypt = aead_encrypt, 17888b4d43a4SKim Phillips .decrypt = aead_decrypt, 17898b4d43a4SKim Phillips .givencrypt = aead_givencrypt, 17908b4d43a4SKim Phillips .geniv = "<built-in>", 17918b4d43a4SKim Phillips .ivsize = DES3_EDE_BLOCK_SIZE, 17928b4d43a4SKim Phillips .maxauthsize = MD5_DIGEST_SIZE, 17938b4d43a4SKim Phillips }, 17948b4d43a4SKim Phillips .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 17958b4d43a4SKim Phillips .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, 17968b4d43a4SKim Phillips .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, 17978b4d43a4SKim Phillips }, 17988b4d43a4SKim Phillips { 17998e8ec596SKim Phillips .name = "authenc(hmac(sha1),cbc(des3_ede))", 18008e8ec596SKim Phillips .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam", 18018e8ec596SKim Phillips .blocksize = DES3_EDE_BLOCK_SIZE, 1802885e9e2fSYuan Kang .type = CRYPTO_ALG_TYPE_AEAD, 1803885e9e2fSYuan Kang .template_aead = { 18040e479300SYuan Kang .setkey = aead_setkey, 18050e479300SYuan Kang .setauthsize = aead_setauthsize, 18060e479300SYuan Kang .encrypt = aead_encrypt, 18070e479300SYuan Kang .decrypt = aead_decrypt, 18080e479300SYuan Kang .givencrypt = aead_givencrypt, 18098e8ec596SKim Phillips .geniv = "<built-in>", 18108e8ec596SKim Phillips .ivsize = DES3_EDE_BLOCK_SIZE, 18118e8ec596SKim Phillips .maxauthsize = SHA1_DIGEST_SIZE, 18128e8ec596SKim Phillips }, 18138e8ec596SKim Phillips .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 18148e8ec596SKim Phillips .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, 18158e8ec596SKim Phillips .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, 18168e8ec596SKim Phillips }, 18178e8ec596SKim Phillips { 1818e863f9ccSHemant Agrawal .name = "authenc(hmac(sha224),cbc(des3_ede))", 1819e863f9ccSHemant Agrawal .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam", 1820e863f9ccSHemant Agrawal .blocksize = DES3_EDE_BLOCK_SIZE, 1821cb7d5662SVakul Garg .type = CRYPTO_ALG_TYPE_AEAD, 1822e863f9ccSHemant Agrawal .template_aead = { 1823e863f9ccSHemant Agrawal .setkey = aead_setkey, 1824e863f9ccSHemant Agrawal .setauthsize = aead_setauthsize, 1825e863f9ccSHemant Agrawal .encrypt = aead_encrypt, 1826e863f9ccSHemant Agrawal .decrypt = aead_decrypt, 1827e863f9ccSHemant Agrawal .givencrypt = aead_givencrypt, 1828e863f9ccSHemant Agrawal .geniv = "<built-in>", 1829e863f9ccSHemant Agrawal .ivsize = DES3_EDE_BLOCK_SIZE, 1830e863f9ccSHemant Agrawal .maxauthsize = SHA224_DIGEST_SIZE, 1831e863f9ccSHemant Agrawal }, 1832e863f9ccSHemant Agrawal .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1833e863f9ccSHemant Agrawal .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 1834e863f9ccSHemant Agrawal OP_ALG_AAI_HMAC_PRECOMP, 1835e863f9ccSHemant Agrawal .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, 1836e863f9ccSHemant Agrawal }, 1837e863f9ccSHemant Agrawal { 18388e8ec596SKim Phillips .name = "authenc(hmac(sha256),cbc(des3_ede))", 18398e8ec596SKim Phillips .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam", 18408e8ec596SKim Phillips .blocksize = DES3_EDE_BLOCK_SIZE, 1841885e9e2fSYuan Kang .type = CRYPTO_ALG_TYPE_AEAD, 1842885e9e2fSYuan Kang .template_aead = { 18430e479300SYuan Kang .setkey = aead_setkey, 18440e479300SYuan Kang .setauthsize = aead_setauthsize, 18450e479300SYuan Kang .encrypt = aead_encrypt, 18460e479300SYuan Kang .decrypt = aead_decrypt, 18470e479300SYuan Kang .givencrypt = aead_givencrypt, 18488e8ec596SKim Phillips .geniv = "<built-in>", 18498e8ec596SKim Phillips .ivsize = DES3_EDE_BLOCK_SIZE, 18508e8ec596SKim Phillips .maxauthsize = SHA256_DIGEST_SIZE, 18518e8ec596SKim Phillips }, 18528e8ec596SKim Phillips .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 18538e8ec596SKim Phillips .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 18548e8ec596SKim Phillips OP_ALG_AAI_HMAC_PRECOMP, 18558e8ec596SKim Phillips .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, 18568e8ec596SKim Phillips }, 18578e8ec596SKim Phillips { 1858e863f9ccSHemant Agrawal .name = "authenc(hmac(sha384),cbc(des3_ede))", 1859e863f9ccSHemant Agrawal .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam", 1860e863f9ccSHemant Agrawal .blocksize = DES3_EDE_BLOCK_SIZE, 1861cb7d5662SVakul Garg .type = CRYPTO_ALG_TYPE_AEAD, 1862e863f9ccSHemant Agrawal .template_aead = { 1863e863f9ccSHemant Agrawal .setkey = aead_setkey, 1864e863f9ccSHemant Agrawal .setauthsize = aead_setauthsize, 1865e863f9ccSHemant Agrawal .encrypt = aead_encrypt, 1866e863f9ccSHemant Agrawal .decrypt = aead_decrypt, 1867e863f9ccSHemant Agrawal .givencrypt = aead_givencrypt, 1868e863f9ccSHemant Agrawal .geniv = "<built-in>", 1869e863f9ccSHemant Agrawal .ivsize = DES3_EDE_BLOCK_SIZE, 1870e863f9ccSHemant Agrawal .maxauthsize = SHA384_DIGEST_SIZE, 1871e863f9ccSHemant Agrawal }, 1872e863f9ccSHemant Agrawal .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1873e863f9ccSHemant Agrawal .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 1874e863f9ccSHemant Agrawal OP_ALG_AAI_HMAC_PRECOMP, 1875e863f9ccSHemant Agrawal .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, 1876e863f9ccSHemant Agrawal }, 1877e863f9ccSHemant Agrawal { 18784427b1b4SKim Phillips .name = "authenc(hmac(sha512),cbc(des3_ede))", 18794427b1b4SKim Phillips .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam", 18804427b1b4SKim Phillips .blocksize = DES3_EDE_BLOCK_SIZE, 1881885e9e2fSYuan Kang .type = CRYPTO_ALG_TYPE_AEAD, 1882885e9e2fSYuan Kang .template_aead = { 18830e479300SYuan Kang .setkey = aead_setkey, 18840e479300SYuan Kang .setauthsize = aead_setauthsize, 18850e479300SYuan Kang .encrypt = aead_encrypt, 18860e479300SYuan Kang .decrypt = aead_decrypt, 18870e479300SYuan Kang .givencrypt = aead_givencrypt, 18884427b1b4SKim Phillips .geniv = "<built-in>", 18894427b1b4SKim Phillips .ivsize = DES3_EDE_BLOCK_SIZE, 18904427b1b4SKim Phillips .maxauthsize = SHA512_DIGEST_SIZE, 18914427b1b4SKim Phillips }, 18924427b1b4SKim Phillips .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 18934427b1b4SKim Phillips .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 18944427b1b4SKim Phillips OP_ALG_AAI_HMAC_PRECOMP, 18954427b1b4SKim Phillips .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, 18964427b1b4SKim Phillips }, 18974427b1b4SKim Phillips { 18988b4d43a4SKim Phillips .name = "authenc(hmac(md5),cbc(des))", 18998b4d43a4SKim Phillips .driver_name = "authenc-hmac-md5-cbc-des-caam", 19008b4d43a4SKim Phillips .blocksize = DES_BLOCK_SIZE, 19018b4d43a4SKim Phillips .type = CRYPTO_ALG_TYPE_AEAD, 19028b4d43a4SKim Phillips .template_aead = { 19038b4d43a4SKim Phillips .setkey = aead_setkey, 19048b4d43a4SKim Phillips .setauthsize = aead_setauthsize, 19058b4d43a4SKim Phillips .encrypt = aead_encrypt, 19068b4d43a4SKim Phillips .decrypt = aead_decrypt, 19078b4d43a4SKim Phillips .givencrypt = aead_givencrypt, 19088b4d43a4SKim Phillips .geniv = "<built-in>", 19098b4d43a4SKim Phillips .ivsize = DES_BLOCK_SIZE, 19108b4d43a4SKim Phillips .maxauthsize = MD5_DIGEST_SIZE, 19118b4d43a4SKim Phillips }, 19128b4d43a4SKim Phillips .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 19138b4d43a4SKim Phillips .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, 19148b4d43a4SKim Phillips .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, 19158b4d43a4SKim Phillips }, 19168b4d43a4SKim Phillips { 19178e8ec596SKim Phillips .name = "authenc(hmac(sha1),cbc(des))", 19188e8ec596SKim Phillips .driver_name = "authenc-hmac-sha1-cbc-des-caam", 19198e8ec596SKim Phillips .blocksize = DES_BLOCK_SIZE, 1920885e9e2fSYuan Kang .type = CRYPTO_ALG_TYPE_AEAD, 1921885e9e2fSYuan Kang .template_aead = { 19220e479300SYuan Kang .setkey = aead_setkey, 19230e479300SYuan Kang .setauthsize = aead_setauthsize, 19240e479300SYuan Kang .encrypt = aead_encrypt, 19250e479300SYuan Kang .decrypt = aead_decrypt, 19260e479300SYuan Kang .givencrypt = aead_givencrypt, 19278e8ec596SKim Phillips .geniv = "<built-in>", 19288e8ec596SKim Phillips .ivsize = DES_BLOCK_SIZE, 19298e8ec596SKim Phillips .maxauthsize = SHA1_DIGEST_SIZE, 19308e8ec596SKim Phillips }, 19318e8ec596SKim Phillips .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 19328e8ec596SKim Phillips .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, 19338e8ec596SKim Phillips .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, 19348e8ec596SKim Phillips }, 19358e8ec596SKim Phillips { 1936e863f9ccSHemant Agrawal .name = "authenc(hmac(sha224),cbc(des))", 1937e863f9ccSHemant Agrawal .driver_name = "authenc-hmac-sha224-cbc-des-caam", 1938e863f9ccSHemant Agrawal .blocksize = DES_BLOCK_SIZE, 1939cb7d5662SVakul Garg .type = CRYPTO_ALG_TYPE_AEAD, 1940e863f9ccSHemant Agrawal .template_aead = { 1941e863f9ccSHemant Agrawal .setkey = aead_setkey, 1942e863f9ccSHemant Agrawal .setauthsize = aead_setauthsize, 1943e863f9ccSHemant Agrawal .encrypt = aead_encrypt, 1944e863f9ccSHemant Agrawal .decrypt = aead_decrypt, 1945e863f9ccSHemant Agrawal .givencrypt = aead_givencrypt, 1946e863f9ccSHemant Agrawal .geniv = "<built-in>", 1947e863f9ccSHemant Agrawal .ivsize = DES_BLOCK_SIZE, 1948e863f9ccSHemant Agrawal .maxauthsize = SHA224_DIGEST_SIZE, 1949e863f9ccSHemant Agrawal }, 1950e863f9ccSHemant Agrawal .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 1951e863f9ccSHemant Agrawal .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 1952e863f9ccSHemant Agrawal OP_ALG_AAI_HMAC_PRECOMP, 1953e863f9ccSHemant Agrawal .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, 1954e863f9ccSHemant Agrawal }, 1955e863f9ccSHemant Agrawal { 19568e8ec596SKim Phillips .name = "authenc(hmac(sha256),cbc(des))", 19578e8ec596SKim Phillips .driver_name = "authenc-hmac-sha256-cbc-des-caam", 19588e8ec596SKim Phillips .blocksize = DES_BLOCK_SIZE, 1959885e9e2fSYuan Kang .type = CRYPTO_ALG_TYPE_AEAD, 1960885e9e2fSYuan Kang .template_aead = { 19610e479300SYuan Kang .setkey = aead_setkey, 19620e479300SYuan Kang .setauthsize = aead_setauthsize, 19630e479300SYuan Kang .encrypt = aead_encrypt, 19640e479300SYuan Kang .decrypt = aead_decrypt, 19650e479300SYuan Kang .givencrypt = aead_givencrypt, 19668e8ec596SKim Phillips .geniv = "<built-in>", 19678e8ec596SKim Phillips .ivsize = DES_BLOCK_SIZE, 19688e8ec596SKim Phillips .maxauthsize = SHA256_DIGEST_SIZE, 19698e8ec596SKim Phillips }, 19708e8ec596SKim Phillips .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 19718e8ec596SKim Phillips .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 19728e8ec596SKim Phillips OP_ALG_AAI_HMAC_PRECOMP, 19738e8ec596SKim Phillips .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, 19748e8ec596SKim Phillips }, 19754427b1b4SKim Phillips { 1976e863f9ccSHemant Agrawal .name = "authenc(hmac(sha384),cbc(des))", 1977e863f9ccSHemant Agrawal .driver_name = "authenc-hmac-sha384-cbc-des-caam", 1978e863f9ccSHemant Agrawal .blocksize = DES_BLOCK_SIZE, 1979cb7d5662SVakul Garg .type = CRYPTO_ALG_TYPE_AEAD, 1980e863f9ccSHemant Agrawal .template_aead = { 1981e863f9ccSHemant Agrawal .setkey = aead_setkey, 1982e863f9ccSHemant Agrawal .setauthsize = aead_setauthsize, 1983e863f9ccSHemant Agrawal .encrypt = aead_encrypt, 1984e863f9ccSHemant Agrawal .decrypt = aead_decrypt, 1985e863f9ccSHemant Agrawal .givencrypt = aead_givencrypt, 1986e863f9ccSHemant Agrawal .geniv = "<built-in>", 1987e863f9ccSHemant Agrawal .ivsize = DES_BLOCK_SIZE, 1988e863f9ccSHemant Agrawal .maxauthsize = SHA384_DIGEST_SIZE, 1989e863f9ccSHemant Agrawal }, 1990e863f9ccSHemant Agrawal .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 1991e863f9ccSHemant Agrawal .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 1992e863f9ccSHemant Agrawal OP_ALG_AAI_HMAC_PRECOMP, 1993e863f9ccSHemant Agrawal .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, 1994e863f9ccSHemant Agrawal }, 1995e863f9ccSHemant Agrawal { 19964427b1b4SKim Phillips .name = "authenc(hmac(sha512),cbc(des))", 19974427b1b4SKim Phillips .driver_name = "authenc-hmac-sha512-cbc-des-caam", 19984427b1b4SKim Phillips .blocksize = DES_BLOCK_SIZE, 1999885e9e2fSYuan Kang .type = CRYPTO_ALG_TYPE_AEAD, 2000885e9e2fSYuan Kang .template_aead = { 20010e479300SYuan Kang .setkey = aead_setkey, 20020e479300SYuan Kang .setauthsize = aead_setauthsize, 20030e479300SYuan Kang .encrypt = aead_encrypt, 20040e479300SYuan Kang .decrypt = aead_decrypt, 20050e479300SYuan Kang .givencrypt = aead_givencrypt, 20064427b1b4SKim Phillips .geniv = "<built-in>", 20074427b1b4SKim Phillips .ivsize = DES_BLOCK_SIZE, 20084427b1b4SKim Phillips .maxauthsize = SHA512_DIGEST_SIZE, 20094427b1b4SKim Phillips }, 20104427b1b4SKim Phillips .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 20114427b1b4SKim Phillips .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 20124427b1b4SKim Phillips OP_ALG_AAI_HMAC_PRECOMP, 20134427b1b4SKim Phillips .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, 20144427b1b4SKim Phillips }, 2015acdca31dSYuan Kang /* ablkcipher descriptor */ 2016acdca31dSYuan Kang { 2017acdca31dSYuan Kang .name = "cbc(aes)", 2018acdca31dSYuan Kang .driver_name = "cbc-aes-caam", 2019acdca31dSYuan Kang .blocksize = AES_BLOCK_SIZE, 2020acdca31dSYuan Kang .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 2021acdca31dSYuan Kang .template_ablkcipher = { 2022acdca31dSYuan Kang .setkey = ablkcipher_setkey, 2023acdca31dSYuan Kang .encrypt = ablkcipher_encrypt, 2024acdca31dSYuan Kang .decrypt = ablkcipher_decrypt, 2025acdca31dSYuan Kang .geniv = "eseqiv", 2026acdca31dSYuan Kang .min_keysize = AES_MIN_KEY_SIZE, 2027acdca31dSYuan Kang .max_keysize = AES_MAX_KEY_SIZE, 2028acdca31dSYuan Kang .ivsize = AES_BLOCK_SIZE, 2029acdca31dSYuan Kang }, 2030acdca31dSYuan Kang .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 2031acdca31dSYuan Kang }, 2032acdca31dSYuan Kang { 2033acdca31dSYuan Kang .name = "cbc(des3_ede)", 2034acdca31dSYuan Kang .driver_name = "cbc-3des-caam", 2035acdca31dSYuan Kang .blocksize = DES3_EDE_BLOCK_SIZE, 2036acdca31dSYuan Kang .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 2037acdca31dSYuan Kang .template_ablkcipher = { 2038acdca31dSYuan Kang .setkey = ablkcipher_setkey, 2039acdca31dSYuan Kang .encrypt = ablkcipher_encrypt, 2040acdca31dSYuan Kang .decrypt = ablkcipher_decrypt, 2041acdca31dSYuan Kang .geniv = "eseqiv", 2042acdca31dSYuan Kang .min_keysize = DES3_EDE_KEY_SIZE, 2043acdca31dSYuan Kang .max_keysize = DES3_EDE_KEY_SIZE, 2044acdca31dSYuan Kang .ivsize = DES3_EDE_BLOCK_SIZE, 2045acdca31dSYuan Kang }, 2046acdca31dSYuan Kang .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2047acdca31dSYuan Kang }, 2048acdca31dSYuan Kang { 2049acdca31dSYuan Kang .name = "cbc(des)", 2050acdca31dSYuan Kang .driver_name = "cbc-des-caam", 2051acdca31dSYuan Kang .blocksize = DES_BLOCK_SIZE, 2052acdca31dSYuan Kang .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 2053acdca31dSYuan Kang .template_ablkcipher = { 2054acdca31dSYuan Kang .setkey = ablkcipher_setkey, 2055acdca31dSYuan Kang .encrypt = ablkcipher_encrypt, 2056acdca31dSYuan Kang .decrypt = ablkcipher_decrypt, 2057acdca31dSYuan Kang .geniv = "eseqiv", 2058acdca31dSYuan Kang .min_keysize = DES_KEY_SIZE, 2059acdca31dSYuan Kang .max_keysize = DES_KEY_SIZE, 2060acdca31dSYuan Kang .ivsize = DES_BLOCK_SIZE, 2061acdca31dSYuan Kang }, 2062acdca31dSYuan Kang .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2063acdca31dSYuan Kang } 20648e8ec596SKim Phillips }; 20658e8ec596SKim Phillips 20668e8ec596SKim Phillips struct caam_crypto_alg { 20678e8ec596SKim Phillips struct list_head entry; 20688e8ec596SKim Phillips int class1_alg_type; 20698e8ec596SKim Phillips int class2_alg_type; 20708e8ec596SKim Phillips int alg_op; 20718e8ec596SKim Phillips struct crypto_alg crypto_alg; 20728e8ec596SKim Phillips }; 20738e8ec596SKim Phillips 20748e8ec596SKim Phillips static int caam_cra_init(struct crypto_tfm *tfm) 20758e8ec596SKim Phillips { 20768e8ec596SKim Phillips struct crypto_alg *alg = tfm->__crt_alg; 20778e8ec596SKim Phillips struct caam_crypto_alg *caam_alg = 20788e8ec596SKim Phillips container_of(alg, struct caam_crypto_alg, crypto_alg); 20798e8ec596SKim Phillips struct caam_ctx *ctx = crypto_tfm_ctx(tfm); 20808e8ec596SKim Phillips 2081cfc6f11bSRuchika Gupta ctx->jrdev = caam_jr_alloc(); 2082cfc6f11bSRuchika Gupta if (IS_ERR(ctx->jrdev)) { 2083cfc6f11bSRuchika Gupta pr_err("Job Ring Device allocation for transform failed\n"); 2084cfc6f11bSRuchika Gupta return PTR_ERR(ctx->jrdev); 2085cfc6f11bSRuchika Gupta } 20868e8ec596SKim Phillips 20878e8ec596SKim Phillips /* copy descriptor header template value */ 20888e8ec596SKim Phillips ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type; 20898e8ec596SKim Phillips ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type; 20908e8ec596SKim Phillips ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op; 20918e8ec596SKim Phillips 20928e8ec596SKim Phillips return 0; 20938e8ec596SKim Phillips } 20948e8ec596SKim Phillips 20958e8ec596SKim Phillips static void caam_cra_exit(struct crypto_tfm *tfm) 20968e8ec596SKim Phillips { 20978e8ec596SKim Phillips struct caam_ctx *ctx = crypto_tfm_ctx(tfm); 20988e8ec596SKim Phillips 20991acebad3SYuan Kang if (ctx->sh_desc_enc_dma && 21001acebad3SYuan Kang !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma)) 21011acebad3SYuan Kang dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma, 21021acebad3SYuan Kang desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE); 21031acebad3SYuan Kang if (ctx->sh_desc_dec_dma && 21041acebad3SYuan Kang !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma)) 21051acebad3SYuan Kang dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma, 21061acebad3SYuan Kang desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE); 21071acebad3SYuan Kang if (ctx->sh_desc_givenc_dma && 21081acebad3SYuan Kang !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma)) 21091acebad3SYuan Kang dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma, 21101acebad3SYuan Kang desc_bytes(ctx->sh_desc_givenc), 21114427b1b4SKim Phillips DMA_TO_DEVICE); 2112cfc6f11bSRuchika Gupta 2113cfc6f11bSRuchika Gupta caam_jr_free(ctx->jrdev); 21148e8ec596SKim Phillips } 21158e8ec596SKim Phillips 21168e8ec596SKim Phillips static void __exit caam_algapi_exit(void) 21178e8ec596SKim Phillips { 21188e8ec596SKim Phillips 21198e8ec596SKim Phillips struct caam_crypto_alg *t_alg, *n; 21208e8ec596SKim Phillips 2121cfc6f11bSRuchika Gupta if (!alg_list.next) 21228e8ec596SKim Phillips return; 21238e8ec596SKim Phillips 2124cfc6f11bSRuchika Gupta list_for_each_entry_safe(t_alg, n, &alg_list, entry) { 21258e8ec596SKim Phillips crypto_unregister_alg(&t_alg->crypto_alg); 21268e8ec596SKim Phillips list_del(&t_alg->entry); 21278e8ec596SKim Phillips kfree(t_alg); 21288e8ec596SKim Phillips } 21298e8ec596SKim Phillips } 21308e8ec596SKim Phillips 2131cfc6f11bSRuchika Gupta static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template 21328e8ec596SKim Phillips *template) 21338e8ec596SKim Phillips { 21348e8ec596SKim Phillips struct caam_crypto_alg *t_alg; 21358e8ec596SKim Phillips struct crypto_alg *alg; 21368e8ec596SKim Phillips 21378e8ec596SKim Phillips t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL); 21388e8ec596SKim Phillips if (!t_alg) { 2139cfc6f11bSRuchika Gupta pr_err("failed to allocate t_alg\n"); 21408e8ec596SKim Phillips return ERR_PTR(-ENOMEM); 21418e8ec596SKim Phillips } 21428e8ec596SKim Phillips 21438e8ec596SKim Phillips alg = &t_alg->crypto_alg; 21448e8ec596SKim Phillips 21458e8ec596SKim Phillips snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name); 21468e8ec596SKim Phillips snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 21478e8ec596SKim Phillips template->driver_name); 21488e8ec596SKim Phillips alg->cra_module = THIS_MODULE; 21498e8ec596SKim Phillips alg->cra_init = caam_cra_init; 21508e8ec596SKim Phillips alg->cra_exit = caam_cra_exit; 21518e8ec596SKim Phillips alg->cra_priority = CAAM_CRA_PRIORITY; 21528e8ec596SKim Phillips alg->cra_blocksize = template->blocksize; 21538e8ec596SKim Phillips alg->cra_alignmask = 0; 21548e8ec596SKim Phillips alg->cra_ctxsize = sizeof(struct caam_ctx); 2155d912bb76SNikos Mavrogiannopoulos alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY | 2156d912bb76SNikos Mavrogiannopoulos template->type; 2157885e9e2fSYuan Kang switch (template->type) { 2158acdca31dSYuan Kang case CRYPTO_ALG_TYPE_ABLKCIPHER: 2159acdca31dSYuan Kang alg->cra_type = &crypto_ablkcipher_type; 2160acdca31dSYuan Kang alg->cra_ablkcipher = template->template_ablkcipher; 2161acdca31dSYuan Kang break; 2162885e9e2fSYuan Kang case CRYPTO_ALG_TYPE_AEAD: 2163885e9e2fSYuan Kang alg->cra_type = &crypto_aead_type; 2164885e9e2fSYuan Kang alg->cra_aead = template->template_aead; 2165885e9e2fSYuan Kang break; 2166885e9e2fSYuan Kang } 21678e8ec596SKim Phillips 21688e8ec596SKim Phillips t_alg->class1_alg_type = template->class1_alg_type; 21698e8ec596SKim Phillips t_alg->class2_alg_type = template->class2_alg_type; 21708e8ec596SKim Phillips t_alg->alg_op = template->alg_op; 21718e8ec596SKim Phillips 21728e8ec596SKim Phillips return t_alg; 21738e8ec596SKim Phillips } 21748e8ec596SKim Phillips 21758e8ec596SKim Phillips static int __init caam_algapi_init(void) 21768e8ec596SKim Phillips { 21778e8ec596SKim Phillips int i = 0, err = 0; 21788e8ec596SKim Phillips 2179cfc6f11bSRuchika Gupta INIT_LIST_HEAD(&alg_list); 21808e8ec596SKim Phillips 21818e8ec596SKim Phillips /* register crypto algorithms the device supports */ 21828e8ec596SKim Phillips for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 21838e8ec596SKim Phillips /* TODO: check if h/w supports alg */ 21848e8ec596SKim Phillips struct caam_crypto_alg *t_alg; 21858e8ec596SKim Phillips 2186cfc6f11bSRuchika Gupta t_alg = caam_alg_alloc(&driver_algs[i]); 21878e8ec596SKim Phillips if (IS_ERR(t_alg)) { 21888e8ec596SKim Phillips err = PTR_ERR(t_alg); 2189cfc6f11bSRuchika Gupta pr_warn("%s alg allocation failed\n", 2190cdc712d8SDan Carpenter driver_algs[i].driver_name); 21918e8ec596SKim Phillips continue; 21928e8ec596SKim Phillips } 21938e8ec596SKim Phillips 21948e8ec596SKim Phillips err = crypto_register_alg(&t_alg->crypto_alg); 21958e8ec596SKim Phillips if (err) { 2196cfc6f11bSRuchika Gupta pr_warn("%s alg registration failed\n", 21978e8ec596SKim Phillips t_alg->crypto_alg.cra_driver_name); 21988e8ec596SKim Phillips kfree(t_alg); 2199246bbedbSHoria Geanta } else 2200cfc6f11bSRuchika Gupta list_add_tail(&t_alg->entry, &alg_list); 22018e8ec596SKim Phillips } 2202cfc6f11bSRuchika Gupta if (!list_empty(&alg_list)) 2203cfc6f11bSRuchika Gupta pr_info("caam algorithms registered in /proc/crypto\n"); 22048e8ec596SKim Phillips 22058e8ec596SKim Phillips return err; 22068e8ec596SKim Phillips } 22078e8ec596SKim Phillips 22088e8ec596SKim Phillips module_init(caam_algapi_init); 22098e8ec596SKim Phillips module_exit(caam_algapi_exit); 22108e8ec596SKim Phillips 22118e8ec596SKim Phillips MODULE_LICENSE("GPL"); 22128e8ec596SKim Phillips MODULE_DESCRIPTION("FSL CAAM support for crypto API"); 22138e8ec596SKim Phillips MODULE_AUTHOR("Freescale Semiconductor - NMG/STC"); 2214