xref: /openbmc/linux/drivers/crypto/caam/caamalg.c (revision 61bb86bb)
18e8ec596SKim Phillips /*
28e8ec596SKim Phillips  * caam - Freescale FSL CAAM support for crypto API
38e8ec596SKim Phillips  *
48e8ec596SKim Phillips  * Copyright 2008-2011 Freescale Semiconductor, Inc.
58e8ec596SKim Phillips  *
68e8ec596SKim Phillips  * Based on talitos crypto API driver.
78e8ec596SKim Phillips  *
88e8ec596SKim Phillips  * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
98e8ec596SKim Phillips  *
108e8ec596SKim Phillips  * ---------------                     ---------------
118e8ec596SKim Phillips  * | JobDesc #1  |-------------------->|  ShareDesc  |
128e8ec596SKim Phillips  * | *(packet 1) |                     |   (PDB)     |
138e8ec596SKim Phillips  * ---------------      |------------->|  (hashKey)  |
148e8ec596SKim Phillips  *       .              |              | (cipherKey) |
158e8ec596SKim Phillips  *       .              |    |-------->| (operation) |
168e8ec596SKim Phillips  * ---------------      |    |         ---------------
178e8ec596SKim Phillips  * | JobDesc #2  |------|    |
188e8ec596SKim Phillips  * | *(packet 2) |           |
198e8ec596SKim Phillips  * ---------------           |
208e8ec596SKim Phillips  *       .                   |
218e8ec596SKim Phillips  *       .                   |
228e8ec596SKim Phillips  * ---------------           |
238e8ec596SKim Phillips  * | JobDesc #3  |------------
248e8ec596SKim Phillips  * | *(packet 3) |
258e8ec596SKim Phillips  * ---------------
268e8ec596SKim Phillips  *
278e8ec596SKim Phillips  * The SharedDesc never changes for a connection unless rekeyed, but
288e8ec596SKim Phillips  * each packet will likely be in a different place. So all we need
298e8ec596SKim Phillips  * to know to process the packet is where the input is, where the
308e8ec596SKim Phillips  * output goes, and what context we want to process with. Context is
318e8ec596SKim Phillips  * in the SharedDesc, packet references in the JobDesc.
328e8ec596SKim Phillips  *
338e8ec596SKim Phillips  * So, a job desc looks like:
348e8ec596SKim Phillips  *
358e8ec596SKim Phillips  * ---------------------
368e8ec596SKim Phillips  * | Header            |
378e8ec596SKim Phillips  * | ShareDesc Pointer |
388e8ec596SKim Phillips  * | SEQ_OUT_PTR       |
398e8ec596SKim Phillips  * | (output buffer)   |
406ec47334SYuan Kang  * | (output length)   |
418e8ec596SKim Phillips  * | SEQ_IN_PTR        |
428e8ec596SKim Phillips  * | (input buffer)    |
436ec47334SYuan Kang  * | (input length)    |
448e8ec596SKim Phillips  * ---------------------
458e8ec596SKim Phillips  */
468e8ec596SKim Phillips 
478e8ec596SKim Phillips #include "compat.h"
488e8ec596SKim Phillips 
498e8ec596SKim Phillips #include "regs.h"
508e8ec596SKim Phillips #include "intern.h"
518e8ec596SKim Phillips #include "desc_constr.h"
528e8ec596SKim Phillips #include "jr.h"
538e8ec596SKim Phillips #include "error.h"
54a299c837SYuan Kang #include "sg_sw_sec4.h"
554c1ec1f9SYuan Kang #include "key_gen.h"
568e8ec596SKim Phillips 
578e8ec596SKim Phillips /*
588e8ec596SKim Phillips  * crypto alg
598e8ec596SKim Phillips  */
608e8ec596SKim Phillips #define CAAM_CRA_PRIORITY		3000
618e8ec596SKim Phillips /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
628e8ec596SKim Phillips #define CAAM_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + \
638e8ec596SKim Phillips 					 SHA512_DIGEST_SIZE * 2)
648e8ec596SKim Phillips /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
658e8ec596SKim Phillips #define CAAM_MAX_IV_LENGTH		16
668e8ec596SKim Phillips 
674427b1b4SKim Phillips /* length of descriptors text */
686ec47334SYuan Kang #define DESC_JOB_IO_LEN			(CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
691acebad3SYuan Kang 
701acebad3SYuan Kang #define DESC_AEAD_BASE			(4 * CAAM_CMD_SZ)
711acebad3SYuan Kang #define DESC_AEAD_ENC_LEN		(DESC_AEAD_BASE + 16 * CAAM_CMD_SZ)
721acebad3SYuan Kang #define DESC_AEAD_DEC_LEN		(DESC_AEAD_BASE + 21 * CAAM_CMD_SZ)
731acebad3SYuan Kang #define DESC_AEAD_GIVENC_LEN		(DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
741acebad3SYuan Kang 
75acdca31dSYuan Kang #define DESC_ABLKCIPHER_BASE		(3 * CAAM_CMD_SZ)
76acdca31dSYuan Kang #define DESC_ABLKCIPHER_ENC_LEN		(DESC_ABLKCIPHER_BASE + \
77acdca31dSYuan Kang 					 20 * CAAM_CMD_SZ)
78acdca31dSYuan Kang #define DESC_ABLKCIPHER_DEC_LEN		(DESC_ABLKCIPHER_BASE + \
79acdca31dSYuan Kang 					 15 * CAAM_CMD_SZ)
80acdca31dSYuan Kang 
811acebad3SYuan Kang #define DESC_MAX_USED_BYTES		(DESC_AEAD_GIVENC_LEN + \
821acebad3SYuan Kang 					 CAAM_MAX_KEY_SIZE)
831acebad3SYuan Kang #define DESC_MAX_USED_LEN		(DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
844427b1b4SKim Phillips 
858e8ec596SKim Phillips #ifdef DEBUG
868e8ec596SKim Phillips /* for print_hex_dumps with line references */
878e8ec596SKim Phillips #define xstr(s) str(s)
888e8ec596SKim Phillips #define str(s) #s
898e8ec596SKim Phillips #define debug(format, arg...) printk(format, arg)
908e8ec596SKim Phillips #else
918e8ec596SKim Phillips #define debug(format, arg...)
928e8ec596SKim Phillips #endif
938e8ec596SKim Phillips 
941acebad3SYuan Kang /* Set DK bit in class 1 operation if shared */
951acebad3SYuan Kang static inline void append_dec_op1(u32 *desc, u32 type)
961acebad3SYuan Kang {
971acebad3SYuan Kang 	u32 *jump_cmd, *uncond_jump_cmd;
981acebad3SYuan Kang 
991acebad3SYuan Kang 	jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
1001acebad3SYuan Kang 	append_operation(desc, type | OP_ALG_AS_INITFINAL |
1011acebad3SYuan Kang 			 OP_ALG_DECRYPT);
1021acebad3SYuan Kang 	uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
1031acebad3SYuan Kang 	set_jump_tgt_here(desc, jump_cmd);
1041acebad3SYuan Kang 	append_operation(desc, type | OP_ALG_AS_INITFINAL |
1051acebad3SYuan Kang 			 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
1061acebad3SYuan Kang 	set_jump_tgt_here(desc, uncond_jump_cmd);
1071acebad3SYuan Kang }
1081acebad3SYuan Kang 
1091acebad3SYuan Kang /*
1101acebad3SYuan Kang  * Wait for completion of class 1 key loading before allowing
1111acebad3SYuan Kang  * error propagation
1121acebad3SYuan Kang  */
1131acebad3SYuan Kang static inline void append_dec_shr_done(u32 *desc)
1141acebad3SYuan Kang {
1151acebad3SYuan Kang 	u32 *jump_cmd;
1161acebad3SYuan Kang 
1171acebad3SYuan Kang 	jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL);
1181acebad3SYuan Kang 	set_jump_tgt_here(desc, jump_cmd);
119a2ecb155SKim Phillips 	append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
1201acebad3SYuan Kang }
1211acebad3SYuan Kang 
1221acebad3SYuan Kang /*
1231acebad3SYuan Kang  * For aead functions, read payload and write payload,
1241acebad3SYuan Kang  * both of which are specified in req->src and req->dst
1251acebad3SYuan Kang  */
1261acebad3SYuan Kang static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
1271acebad3SYuan Kang {
1281acebad3SYuan Kang 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
1291acebad3SYuan Kang 			     KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
1301acebad3SYuan Kang 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
1311acebad3SYuan Kang }
1321acebad3SYuan Kang 
1331acebad3SYuan Kang /*
1341acebad3SYuan Kang  * For aead encrypt and decrypt, read iv for both classes
1351acebad3SYuan Kang  */
1361acebad3SYuan Kang static inline void aead_append_ld_iv(u32 *desc, int ivsize)
1371acebad3SYuan Kang {
1381acebad3SYuan Kang 	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
1391acebad3SYuan Kang 		   LDST_CLASS_1_CCB | ivsize);
1401acebad3SYuan Kang 	append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
1411acebad3SYuan Kang }
1421acebad3SYuan Kang 
1431acebad3SYuan Kang /*
144acdca31dSYuan Kang  * For ablkcipher encrypt and decrypt, read from req->src and
145acdca31dSYuan Kang  * write to req->dst
146acdca31dSYuan Kang  */
147acdca31dSYuan Kang static inline void ablkcipher_append_src_dst(u32 *desc)
148acdca31dSYuan Kang {
14970d793ccSKim Phillips 	append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
15070d793ccSKim Phillips 	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
15170d793ccSKim Phillips 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
15270d793ccSKim Phillips 			     KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
15370d793ccSKim Phillips 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
154acdca31dSYuan Kang }
155acdca31dSYuan Kang 
156acdca31dSYuan Kang /*
1571acebad3SYuan Kang  * If all data, including src (with assoc and iv) or dst (with iv only) are
1581acebad3SYuan Kang  * contiguous
1591acebad3SYuan Kang  */
1601acebad3SYuan Kang #define GIV_SRC_CONTIG		1
1611acebad3SYuan Kang #define GIV_DST_CONTIG		(1 << 1)
1621acebad3SYuan Kang 
1638e8ec596SKim Phillips /*
1648e8ec596SKim Phillips  * per-session context
1658e8ec596SKim Phillips  */
1668e8ec596SKim Phillips struct caam_ctx {
1678e8ec596SKim Phillips 	struct device *jrdev;
1681acebad3SYuan Kang 	u32 sh_desc_enc[DESC_MAX_USED_LEN];
1691acebad3SYuan Kang 	u32 sh_desc_dec[DESC_MAX_USED_LEN];
1701acebad3SYuan Kang 	u32 sh_desc_givenc[DESC_MAX_USED_LEN];
1711acebad3SYuan Kang 	dma_addr_t sh_desc_enc_dma;
1721acebad3SYuan Kang 	dma_addr_t sh_desc_dec_dma;
1731acebad3SYuan Kang 	dma_addr_t sh_desc_givenc_dma;
1748e8ec596SKim Phillips 	u32 class1_alg_type;
1758e8ec596SKim Phillips 	u32 class2_alg_type;
1768e8ec596SKim Phillips 	u32 alg_op;
1771acebad3SYuan Kang 	u8 key[CAAM_MAX_KEY_SIZE];
178885e9e2fSYuan Kang 	dma_addr_t key_dma;
1798e8ec596SKim Phillips 	unsigned int enckeylen;
1808e8ec596SKim Phillips 	unsigned int split_key_len;
1818e8ec596SKim Phillips 	unsigned int split_key_pad_len;
1828e8ec596SKim Phillips 	unsigned int authsize;
1838e8ec596SKim Phillips };
1848e8ec596SKim Phillips 
1851acebad3SYuan Kang static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
1861acebad3SYuan Kang 			    int keys_fit_inline)
1871acebad3SYuan Kang {
1881acebad3SYuan Kang 	if (keys_fit_inline) {
1891acebad3SYuan Kang 		append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
1901acebad3SYuan Kang 				  ctx->split_key_len, CLASS_2 |
1911acebad3SYuan Kang 				  KEY_DEST_MDHA_SPLIT | KEY_ENC);
1921acebad3SYuan Kang 		append_key_as_imm(desc, (void *)ctx->key +
1931acebad3SYuan Kang 				  ctx->split_key_pad_len, ctx->enckeylen,
1941acebad3SYuan Kang 				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1951acebad3SYuan Kang 	} else {
1961acebad3SYuan Kang 		append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
1971acebad3SYuan Kang 			   KEY_DEST_MDHA_SPLIT | KEY_ENC);
1981acebad3SYuan Kang 		append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
1991acebad3SYuan Kang 			   ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
2001acebad3SYuan Kang 	}
2011acebad3SYuan Kang }
2021acebad3SYuan Kang 
2031acebad3SYuan Kang static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
2041acebad3SYuan Kang 				  int keys_fit_inline)
2051acebad3SYuan Kang {
2061acebad3SYuan Kang 	u32 *key_jump_cmd;
2071acebad3SYuan Kang 
20861bb86bbSKim Phillips 	init_sh_desc(desc, HDR_SHARE_SERIAL);
2091acebad3SYuan Kang 
2101acebad3SYuan Kang 	/* Skip if already shared */
2111acebad3SYuan Kang 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
2121acebad3SYuan Kang 				   JUMP_COND_SHRD);
2131acebad3SYuan Kang 
2141acebad3SYuan Kang 	append_key_aead(desc, ctx, keys_fit_inline);
2151acebad3SYuan Kang 
2161acebad3SYuan Kang 	set_jump_tgt_here(desc, key_jump_cmd);
2171acebad3SYuan Kang 
2181acebad3SYuan Kang 	/* Propagate errors from shared to job descriptor */
219a2ecb155SKim Phillips 	append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
2201acebad3SYuan Kang }
2211acebad3SYuan Kang 
2221acebad3SYuan Kang static int aead_set_sh_desc(struct crypto_aead *aead)
2231acebad3SYuan Kang {
2241acebad3SYuan Kang 	struct aead_tfm *tfm = &aead->base.crt_aead;
2251acebad3SYuan Kang 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
2261acebad3SYuan Kang 	struct device *jrdev = ctx->jrdev;
2271acebad3SYuan Kang 	bool keys_fit_inline = 0;
2281acebad3SYuan Kang 	u32 *key_jump_cmd, *jump_cmd;
2291acebad3SYuan Kang 	u32 geniv, moveiv;
2301acebad3SYuan Kang 	u32 *desc;
2311acebad3SYuan Kang 
2321acebad3SYuan Kang 	if (!ctx->enckeylen || !ctx->authsize)
2331acebad3SYuan Kang 		return 0;
2341acebad3SYuan Kang 
2351acebad3SYuan Kang 	/*
2361acebad3SYuan Kang 	 * Job Descriptor and Shared Descriptors
2371acebad3SYuan Kang 	 * must all fit into the 64-word Descriptor h/w Buffer
2381acebad3SYuan Kang 	 */
2391acebad3SYuan Kang 	if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
2401acebad3SYuan Kang 	    ctx->split_key_pad_len + ctx->enckeylen <=
2411acebad3SYuan Kang 	    CAAM_DESC_BYTES_MAX)
2421acebad3SYuan Kang 		keys_fit_inline = 1;
2431acebad3SYuan Kang 
2441acebad3SYuan Kang 	/* aead_encrypt shared descriptor */
2451acebad3SYuan Kang 	desc = ctx->sh_desc_enc;
2461acebad3SYuan Kang 
2471acebad3SYuan Kang 	init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
2481acebad3SYuan Kang 
2491acebad3SYuan Kang 	/* Class 2 operation */
2501acebad3SYuan Kang 	append_operation(desc, ctx->class2_alg_type |
2511acebad3SYuan Kang 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
2521acebad3SYuan Kang 
2531acebad3SYuan Kang 	/* cryptlen = seqoutlen - authsize */
2541acebad3SYuan Kang 	append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
2551acebad3SYuan Kang 
2561acebad3SYuan Kang 	/* assoclen + cryptlen = seqinlen - ivsize */
2571acebad3SYuan Kang 	append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
2581acebad3SYuan Kang 
2591acebad3SYuan Kang 	/* assoclen + cryptlen = (assoclen + cryptlen) - cryptlen */
2601acebad3SYuan Kang 	append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
2611acebad3SYuan Kang 
2621acebad3SYuan Kang 	/* read assoc before reading payload */
2631acebad3SYuan Kang 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
2641acebad3SYuan Kang 			     KEY_VLF);
2651acebad3SYuan Kang 	aead_append_ld_iv(desc, tfm->ivsize);
2661acebad3SYuan Kang 
2671acebad3SYuan Kang 	/* Class 1 operation */
2681acebad3SYuan Kang 	append_operation(desc, ctx->class1_alg_type |
2691acebad3SYuan Kang 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
2701acebad3SYuan Kang 
2711acebad3SYuan Kang 	/* Read and write cryptlen bytes */
2721acebad3SYuan Kang 	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2731acebad3SYuan Kang 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
2741acebad3SYuan Kang 	aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
2751acebad3SYuan Kang 
2761acebad3SYuan Kang 	/* Write ICV */
2771acebad3SYuan Kang 	append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
2781acebad3SYuan Kang 			 LDST_SRCDST_BYTE_CONTEXT);
2791acebad3SYuan Kang 
2801acebad3SYuan Kang 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
2811acebad3SYuan Kang 					      desc_bytes(desc),
2821acebad3SYuan Kang 					      DMA_TO_DEVICE);
2831acebad3SYuan Kang 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
2841acebad3SYuan Kang 		dev_err(jrdev, "unable to map shared descriptor\n");
2851acebad3SYuan Kang 		return -ENOMEM;
2861acebad3SYuan Kang 	}
2871acebad3SYuan Kang #ifdef DEBUG
2881acebad3SYuan Kang 	print_hex_dump(KERN_ERR, "aead enc shdesc@"xstr(__LINE__)": ",
2891acebad3SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
2901acebad3SYuan Kang 		       desc_bytes(desc), 1);
2911acebad3SYuan Kang #endif
2921acebad3SYuan Kang 
2931acebad3SYuan Kang 	/*
2941acebad3SYuan Kang 	 * Job Descriptor and Shared Descriptors
2951acebad3SYuan Kang 	 * must all fit into the 64-word Descriptor h/w Buffer
2961acebad3SYuan Kang 	 */
2971acebad3SYuan Kang 	if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
2981acebad3SYuan Kang 	    ctx->split_key_pad_len + ctx->enckeylen <=
2991acebad3SYuan Kang 	    CAAM_DESC_BYTES_MAX)
3001acebad3SYuan Kang 		keys_fit_inline = 1;
3011acebad3SYuan Kang 
3021acebad3SYuan Kang 	desc = ctx->sh_desc_dec;
3031acebad3SYuan Kang 
3041acebad3SYuan Kang 	/* aead_decrypt shared descriptor */
30561bb86bbSKim Phillips 	init_sh_desc(desc, HDR_SHARE_SERIAL);
3061acebad3SYuan Kang 
3071acebad3SYuan Kang 	/* Skip if already shared */
3081acebad3SYuan Kang 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
3091acebad3SYuan Kang 				   JUMP_COND_SHRD);
3101acebad3SYuan Kang 
3111acebad3SYuan Kang 	append_key_aead(desc, ctx, keys_fit_inline);
3121acebad3SYuan Kang 
3131acebad3SYuan Kang 	/* Only propagate error immediately if shared */
3141acebad3SYuan Kang 	jump_cmd = append_jump(desc, JUMP_TEST_ALL);
3151acebad3SYuan Kang 	set_jump_tgt_here(desc, key_jump_cmd);
316a2ecb155SKim Phillips 	append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
3171acebad3SYuan Kang 	set_jump_tgt_here(desc, jump_cmd);
3181acebad3SYuan Kang 
3191acebad3SYuan Kang 	/* Class 2 operation */
3201acebad3SYuan Kang 	append_operation(desc, ctx->class2_alg_type |
3211acebad3SYuan Kang 			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
3221acebad3SYuan Kang 
3231acebad3SYuan Kang 	/* assoclen + cryptlen = seqinlen - ivsize */
3241acebad3SYuan Kang 	append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
3251acebad3SYuan Kang 				ctx->authsize + tfm->ivsize)
3261acebad3SYuan Kang 	/* assoclen = (assoclen + cryptlen) - cryptlen */
3271acebad3SYuan Kang 	append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
3281acebad3SYuan Kang 	append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
3291acebad3SYuan Kang 
3301acebad3SYuan Kang 	/* read assoc before reading payload */
3311acebad3SYuan Kang 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
3321acebad3SYuan Kang 			     KEY_VLF);
3331acebad3SYuan Kang 
3341acebad3SYuan Kang 	aead_append_ld_iv(desc, tfm->ivsize);
3351acebad3SYuan Kang 
3361acebad3SYuan Kang 	append_dec_op1(desc, ctx->class1_alg_type);
3371acebad3SYuan Kang 
3381acebad3SYuan Kang 	/* Read and write cryptlen bytes */
3391acebad3SYuan Kang 	append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
3401acebad3SYuan Kang 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
3411acebad3SYuan Kang 	aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
3421acebad3SYuan Kang 
3431acebad3SYuan Kang 	/* Load ICV */
3441acebad3SYuan Kang 	append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
3451acebad3SYuan Kang 			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
3461acebad3SYuan Kang 	append_dec_shr_done(desc);
3471acebad3SYuan Kang 
3481acebad3SYuan Kang 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
3491acebad3SYuan Kang 					      desc_bytes(desc),
3501acebad3SYuan Kang 					      DMA_TO_DEVICE);
3511acebad3SYuan Kang 	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
3521acebad3SYuan Kang 		dev_err(jrdev, "unable to map shared descriptor\n");
3531acebad3SYuan Kang 		return -ENOMEM;
3541acebad3SYuan Kang 	}
3551acebad3SYuan Kang #ifdef DEBUG
3561acebad3SYuan Kang 	print_hex_dump(KERN_ERR, "aead dec shdesc@"xstr(__LINE__)": ",
3571acebad3SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
3581acebad3SYuan Kang 		       desc_bytes(desc), 1);
3591acebad3SYuan Kang #endif
3601acebad3SYuan Kang 
3611acebad3SYuan Kang 	/*
3621acebad3SYuan Kang 	 * Job Descriptor and Shared Descriptors
3631acebad3SYuan Kang 	 * must all fit into the 64-word Descriptor h/w Buffer
3641acebad3SYuan Kang 	 */
3651acebad3SYuan Kang 	if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
3661acebad3SYuan Kang 	    ctx->split_key_pad_len + ctx->enckeylen <=
3671acebad3SYuan Kang 	    CAAM_DESC_BYTES_MAX)
3681acebad3SYuan Kang 		keys_fit_inline = 1;
3691acebad3SYuan Kang 
3701acebad3SYuan Kang 	/* aead_givencrypt shared descriptor */
3711acebad3SYuan Kang 	desc = ctx->sh_desc_givenc;
3721acebad3SYuan Kang 
3731acebad3SYuan Kang 	init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
3741acebad3SYuan Kang 
3751acebad3SYuan Kang 	/* Generate IV */
3761acebad3SYuan Kang 	geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
3771acebad3SYuan Kang 		NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
3781acebad3SYuan Kang 		NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
3791acebad3SYuan Kang 	append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
3801acebad3SYuan Kang 			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
3811acebad3SYuan Kang 	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
3821acebad3SYuan Kang 	append_move(desc, MOVE_SRC_INFIFO |
3831acebad3SYuan Kang 		    MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT));
3841acebad3SYuan Kang 	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
3851acebad3SYuan Kang 
3861acebad3SYuan Kang 	/* Copy IV to class 1 context */
3871acebad3SYuan Kang 	append_move(desc, MOVE_SRC_CLASS1CTX |
3881acebad3SYuan Kang 		    MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT));
3891acebad3SYuan Kang 
3901acebad3SYuan Kang 	/* Return to encryption */
3911acebad3SYuan Kang 	append_operation(desc, ctx->class2_alg_type |
3921acebad3SYuan Kang 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
3931acebad3SYuan Kang 
3941acebad3SYuan Kang 	/* ivsize + cryptlen = seqoutlen - authsize */
3951acebad3SYuan Kang 	append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
3961acebad3SYuan Kang 
3971acebad3SYuan Kang 	/* assoclen = seqinlen - (ivsize + cryptlen) */
3981acebad3SYuan Kang 	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
3991acebad3SYuan Kang 
4001acebad3SYuan Kang 	/* read assoc before reading payload */
4011acebad3SYuan Kang 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
4021acebad3SYuan Kang 			     KEY_VLF);
4031acebad3SYuan Kang 
4041acebad3SYuan Kang 	/* Copy iv from class 1 ctx to class 2 fifo*/
4051acebad3SYuan Kang 	moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
4061acebad3SYuan Kang 		 NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
4071acebad3SYuan Kang 	append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
4081acebad3SYuan Kang 			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
4091acebad3SYuan Kang 	append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
4101acebad3SYuan Kang 			    LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
4111acebad3SYuan Kang 
4121acebad3SYuan Kang 	/* Class 1 operation */
4131acebad3SYuan Kang 	append_operation(desc, ctx->class1_alg_type |
4141acebad3SYuan Kang 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
4151acebad3SYuan Kang 
4161acebad3SYuan Kang 	/* Will write ivsize + cryptlen */
4171acebad3SYuan Kang 	append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
4181acebad3SYuan Kang 
4191acebad3SYuan Kang 	/* Not need to reload iv */
4201acebad3SYuan Kang 	append_seq_fifo_load(desc, tfm->ivsize,
4211acebad3SYuan Kang 			     FIFOLD_CLASS_SKIP);
4221acebad3SYuan Kang 
4231acebad3SYuan Kang 	/* Will read cryptlen */
4241acebad3SYuan Kang 	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
4251acebad3SYuan Kang 	aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
4261acebad3SYuan Kang 
4271acebad3SYuan Kang 	/* Write ICV */
4281acebad3SYuan Kang 	append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
4291acebad3SYuan Kang 			 LDST_SRCDST_BYTE_CONTEXT);
4301acebad3SYuan Kang 
4311acebad3SYuan Kang 	ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
4321acebad3SYuan Kang 						 desc_bytes(desc),
4331acebad3SYuan Kang 						 DMA_TO_DEVICE);
4341acebad3SYuan Kang 	if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
4351acebad3SYuan Kang 		dev_err(jrdev, "unable to map shared descriptor\n");
4361acebad3SYuan Kang 		return -ENOMEM;
4371acebad3SYuan Kang 	}
4381acebad3SYuan Kang #ifdef DEBUG
4391acebad3SYuan Kang 	print_hex_dump(KERN_ERR, "aead givenc shdesc@"xstr(__LINE__)": ",
4401acebad3SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
4411acebad3SYuan Kang 		       desc_bytes(desc), 1);
4421acebad3SYuan Kang #endif
4431acebad3SYuan Kang 
4441acebad3SYuan Kang 	return 0;
4451acebad3SYuan Kang }
4461acebad3SYuan Kang 
4470e479300SYuan Kang static int aead_setauthsize(struct crypto_aead *authenc,
4488e8ec596SKim Phillips 				    unsigned int authsize)
4498e8ec596SKim Phillips {
4508e8ec596SKim Phillips 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
4518e8ec596SKim Phillips 
4528e8ec596SKim Phillips 	ctx->authsize = authsize;
4531acebad3SYuan Kang 	aead_set_sh_desc(authenc);
4548e8ec596SKim Phillips 
4558e8ec596SKim Phillips 	return 0;
4568e8ec596SKim Phillips }
4578e8ec596SKim Phillips 
4584c1ec1f9SYuan Kang static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
4594c1ec1f9SYuan Kang 			      u32 authkeylen)
4608e8ec596SKim Phillips {
4614c1ec1f9SYuan Kang 	return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
4624c1ec1f9SYuan Kang 			       ctx->split_key_pad_len, key_in, authkeylen,
4634c1ec1f9SYuan Kang 			       ctx->alg_op);
4648e8ec596SKim Phillips }
4658e8ec596SKim Phillips 
4660e479300SYuan Kang static int aead_setkey(struct crypto_aead *aead,
4678e8ec596SKim Phillips 			       const u8 *key, unsigned int keylen)
4688e8ec596SKim Phillips {
4698e8ec596SKim Phillips 	/* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
4708e8ec596SKim Phillips 	static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
4718e8ec596SKim Phillips 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
4728e8ec596SKim Phillips 	struct device *jrdev = ctx->jrdev;
4738e8ec596SKim Phillips 	struct rtattr *rta = (void *)key;
4748e8ec596SKim Phillips 	struct crypto_authenc_key_param *param;
4758e8ec596SKim Phillips 	unsigned int authkeylen;
4768e8ec596SKim Phillips 	unsigned int enckeylen;
4778e8ec596SKim Phillips 	int ret = 0;
4788e8ec596SKim Phillips 
4798e8ec596SKim Phillips 	param = RTA_DATA(rta);
4808e8ec596SKim Phillips 	enckeylen = be32_to_cpu(param->enckeylen);
4818e8ec596SKim Phillips 
4828e8ec596SKim Phillips 	key += RTA_ALIGN(rta->rta_len);
4838e8ec596SKim Phillips 	keylen -= RTA_ALIGN(rta->rta_len);
4848e8ec596SKim Phillips 
4858e8ec596SKim Phillips 	if (keylen < enckeylen)
4868e8ec596SKim Phillips 		goto badkey;
4878e8ec596SKim Phillips 
4888e8ec596SKim Phillips 	authkeylen = keylen - enckeylen;
4898e8ec596SKim Phillips 
4908e8ec596SKim Phillips 	if (keylen > CAAM_MAX_KEY_SIZE)
4918e8ec596SKim Phillips 		goto badkey;
4928e8ec596SKim Phillips 
4938e8ec596SKim Phillips 	/* Pick class 2 key length from algorithm submask */
4948e8ec596SKim Phillips 	ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
4958e8ec596SKim Phillips 				      OP_ALG_ALGSEL_SHIFT] * 2;
4968e8ec596SKim Phillips 	ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
4978e8ec596SKim Phillips 
4988e8ec596SKim Phillips #ifdef DEBUG
4998e8ec596SKim Phillips 	printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
5008e8ec596SKim Phillips 	       keylen, enckeylen, authkeylen);
5018e8ec596SKim Phillips 	printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
5028e8ec596SKim Phillips 	       ctx->split_key_len, ctx->split_key_pad_len);
5038e8ec596SKim Phillips 	print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
5048e8ec596SKim Phillips 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
5058e8ec596SKim Phillips #endif
5068e8ec596SKim Phillips 
5074c1ec1f9SYuan Kang 	ret = gen_split_aead_key(ctx, key, authkeylen);
5088e8ec596SKim Phillips 	if (ret) {
5098e8ec596SKim Phillips 		goto badkey;
5108e8ec596SKim Phillips 	}
5118e8ec596SKim Phillips 
5128e8ec596SKim Phillips 	/* postpend encryption key to auth split key */
5138e8ec596SKim Phillips 	memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen);
5148e8ec596SKim Phillips 
515885e9e2fSYuan Kang 	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
5168e8ec596SKim Phillips 				       enckeylen, DMA_TO_DEVICE);
517885e9e2fSYuan Kang 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
5188e8ec596SKim Phillips 		dev_err(jrdev, "unable to map key i/o memory\n");
5198e8ec596SKim Phillips 		return -ENOMEM;
5208e8ec596SKim Phillips 	}
5218e8ec596SKim Phillips #ifdef DEBUG
5228e8ec596SKim Phillips 	print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
5238e8ec596SKim Phillips 		       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
5248e8ec596SKim Phillips 		       ctx->split_key_pad_len + enckeylen, 1);
5258e8ec596SKim Phillips #endif
5268e8ec596SKim Phillips 
5278e8ec596SKim Phillips 	ctx->enckeylen = enckeylen;
5288e8ec596SKim Phillips 
5291acebad3SYuan Kang 	ret = aead_set_sh_desc(aead);
5308e8ec596SKim Phillips 	if (ret) {
531885e9e2fSYuan Kang 		dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
5328e8ec596SKim Phillips 				 enckeylen, DMA_TO_DEVICE);
5338e8ec596SKim Phillips 	}
5348e8ec596SKim Phillips 
5358e8ec596SKim Phillips 	return ret;
5368e8ec596SKim Phillips badkey:
5378e8ec596SKim Phillips 	crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
5388e8ec596SKim Phillips 	return -EINVAL;
5398e8ec596SKim Phillips }
5408e8ec596SKim Phillips 
541acdca31dSYuan Kang static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
542acdca31dSYuan Kang 			     const u8 *key, unsigned int keylen)
543acdca31dSYuan Kang {
544acdca31dSYuan Kang 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
545acdca31dSYuan Kang 	struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
546acdca31dSYuan Kang 	struct device *jrdev = ctx->jrdev;
547acdca31dSYuan Kang 	int ret = 0;
548acdca31dSYuan Kang 	u32 *key_jump_cmd, *jump_cmd;
549acdca31dSYuan Kang 	u32 *desc;
550acdca31dSYuan Kang 
551acdca31dSYuan Kang #ifdef DEBUG
552acdca31dSYuan Kang 	print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
553acdca31dSYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
554acdca31dSYuan Kang #endif
555acdca31dSYuan Kang 
556acdca31dSYuan Kang 	memcpy(ctx->key, key, keylen);
557acdca31dSYuan Kang 	ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
558acdca31dSYuan Kang 				      DMA_TO_DEVICE);
559acdca31dSYuan Kang 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
560acdca31dSYuan Kang 		dev_err(jrdev, "unable to map key i/o memory\n");
561acdca31dSYuan Kang 		return -ENOMEM;
562acdca31dSYuan Kang 	}
563acdca31dSYuan Kang 	ctx->enckeylen = keylen;
564acdca31dSYuan Kang 
565acdca31dSYuan Kang 	/* ablkcipher_encrypt shared descriptor */
566acdca31dSYuan Kang 	desc = ctx->sh_desc_enc;
56761bb86bbSKim Phillips 	init_sh_desc(desc, HDR_SHARE_SERIAL);
568acdca31dSYuan Kang 	/* Skip if already shared */
569acdca31dSYuan Kang 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
570acdca31dSYuan Kang 				   JUMP_COND_SHRD);
571acdca31dSYuan Kang 
572acdca31dSYuan Kang 	/* Load class1 key only */
573acdca31dSYuan Kang 	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
574acdca31dSYuan Kang 			  ctx->enckeylen, CLASS_1 |
575acdca31dSYuan Kang 			  KEY_DEST_CLASS_REG);
576acdca31dSYuan Kang 
577acdca31dSYuan Kang 	set_jump_tgt_here(desc, key_jump_cmd);
578acdca31dSYuan Kang 
579acdca31dSYuan Kang 	/* Propagate errors from shared to job descriptor */
580a2ecb155SKim Phillips 	append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
581acdca31dSYuan Kang 
582acdca31dSYuan Kang 	/* Load iv */
583acdca31dSYuan Kang 	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
584acdca31dSYuan Kang 		   LDST_CLASS_1_CCB | tfm->ivsize);
585acdca31dSYuan Kang 
586acdca31dSYuan Kang 	/* Load operation */
587acdca31dSYuan Kang 	append_operation(desc, ctx->class1_alg_type |
588acdca31dSYuan Kang 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
589acdca31dSYuan Kang 
590acdca31dSYuan Kang 	/* Perform operation */
591acdca31dSYuan Kang 	ablkcipher_append_src_dst(desc);
592acdca31dSYuan Kang 
593acdca31dSYuan Kang 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
594acdca31dSYuan Kang 					      desc_bytes(desc),
595acdca31dSYuan Kang 					      DMA_TO_DEVICE);
596acdca31dSYuan Kang 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
597acdca31dSYuan Kang 		dev_err(jrdev, "unable to map shared descriptor\n");
598acdca31dSYuan Kang 		return -ENOMEM;
599acdca31dSYuan Kang 	}
600acdca31dSYuan Kang #ifdef DEBUG
601acdca31dSYuan Kang 	print_hex_dump(KERN_ERR, "ablkcipher enc shdesc@"xstr(__LINE__)": ",
602acdca31dSYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
603acdca31dSYuan Kang 		       desc_bytes(desc), 1);
604acdca31dSYuan Kang #endif
605acdca31dSYuan Kang 	/* ablkcipher_decrypt shared descriptor */
606acdca31dSYuan Kang 	desc = ctx->sh_desc_dec;
607acdca31dSYuan Kang 
60861bb86bbSKim Phillips 	init_sh_desc(desc, HDR_SHARE_SERIAL);
609acdca31dSYuan Kang 	/* Skip if already shared */
610acdca31dSYuan Kang 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
611acdca31dSYuan Kang 				   JUMP_COND_SHRD);
612acdca31dSYuan Kang 
613acdca31dSYuan Kang 	/* Load class1 key only */
614acdca31dSYuan Kang 	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
615acdca31dSYuan Kang 			  ctx->enckeylen, CLASS_1 |
616acdca31dSYuan Kang 			  KEY_DEST_CLASS_REG);
617acdca31dSYuan Kang 
618acdca31dSYuan Kang 	/* For aead, only propagate error immediately if shared */
619acdca31dSYuan Kang 	jump_cmd = append_jump(desc, JUMP_TEST_ALL);
620acdca31dSYuan Kang 	set_jump_tgt_here(desc, key_jump_cmd);
621a2ecb155SKim Phillips 	append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
622acdca31dSYuan Kang 	set_jump_tgt_here(desc, jump_cmd);
623acdca31dSYuan Kang 
624acdca31dSYuan Kang 	/* load IV */
625acdca31dSYuan Kang 	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
626acdca31dSYuan Kang 		   LDST_CLASS_1_CCB | tfm->ivsize);
627acdca31dSYuan Kang 
628acdca31dSYuan Kang 	/* Choose operation */
629acdca31dSYuan Kang 	append_dec_op1(desc, ctx->class1_alg_type);
630acdca31dSYuan Kang 
631acdca31dSYuan Kang 	/* Perform operation */
632acdca31dSYuan Kang 	ablkcipher_append_src_dst(desc);
633acdca31dSYuan Kang 
634acdca31dSYuan Kang 	/* Wait for key to load before allowing propagating error */
635acdca31dSYuan Kang 	append_dec_shr_done(desc);
636acdca31dSYuan Kang 
637acdca31dSYuan Kang 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
638acdca31dSYuan Kang 					      desc_bytes(desc),
639acdca31dSYuan Kang 					      DMA_TO_DEVICE);
640acdca31dSYuan Kang 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
641acdca31dSYuan Kang 		dev_err(jrdev, "unable to map shared descriptor\n");
642acdca31dSYuan Kang 		return -ENOMEM;
643acdca31dSYuan Kang 	}
644acdca31dSYuan Kang 
645acdca31dSYuan Kang #ifdef DEBUG
646acdca31dSYuan Kang 	print_hex_dump(KERN_ERR, "ablkcipher dec shdesc@"xstr(__LINE__)": ",
647acdca31dSYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
648acdca31dSYuan Kang 		       desc_bytes(desc), 1);
649acdca31dSYuan Kang #endif
650acdca31dSYuan Kang 
651acdca31dSYuan Kang 	return ret;
652acdca31dSYuan Kang }
653acdca31dSYuan Kang 
6548e8ec596SKim Phillips /*
6551acebad3SYuan Kang  * aead_edesc - s/w-extended aead descriptor
6561acebad3SYuan Kang  * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
657643b39b0SYuan Kang  * @assoc_chained: if source is chained
6588e8ec596SKim Phillips  * @src_nents: number of segments in input scatterlist
659643b39b0SYuan Kang  * @src_chained: if source is chained
6608e8ec596SKim Phillips  * @dst_nents: number of segments in output scatterlist
661643b39b0SYuan Kang  * @dst_chained: if destination is chained
6621acebad3SYuan Kang  * @iv_dma: dma address of iv for checking continuity and link table
6638e8ec596SKim Phillips  * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
664a299c837SYuan Kang  * @sec4_sg_bytes: length of dma mapped sec4_sg space
665a299c837SYuan Kang  * @sec4_sg_dma: bus physical mapped address of h/w link table
6668e8ec596SKim Phillips  * @hw_desc: the h/w job descriptor followed by any referenced link tables
6678e8ec596SKim Phillips  */
6680e479300SYuan Kang struct aead_edesc {
6698e8ec596SKim Phillips 	int assoc_nents;
670643b39b0SYuan Kang 	bool assoc_chained;
6718e8ec596SKim Phillips 	int src_nents;
672643b39b0SYuan Kang 	bool src_chained;
6738e8ec596SKim Phillips 	int dst_nents;
674643b39b0SYuan Kang 	bool dst_chained;
6751acebad3SYuan Kang 	dma_addr_t iv_dma;
676a299c837SYuan Kang 	int sec4_sg_bytes;
677a299c837SYuan Kang 	dma_addr_t sec4_sg_dma;
678a299c837SYuan Kang 	struct sec4_sg_entry *sec4_sg;
6798e8ec596SKim Phillips 	u32 hw_desc[0];
6808e8ec596SKim Phillips };
6818e8ec596SKim Phillips 
682acdca31dSYuan Kang /*
683acdca31dSYuan Kang  * ablkcipher_edesc - s/w-extended ablkcipher descriptor
684acdca31dSYuan Kang  * @src_nents: number of segments in input scatterlist
685643b39b0SYuan Kang  * @src_chained: if source is chained
686acdca31dSYuan Kang  * @dst_nents: number of segments in output scatterlist
687643b39b0SYuan Kang  * @dst_chained: if destination is chained
688acdca31dSYuan Kang  * @iv_dma: dma address of iv for checking continuity and link table
689acdca31dSYuan Kang  * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
690a299c837SYuan Kang  * @sec4_sg_bytes: length of dma mapped sec4_sg space
691a299c837SYuan Kang  * @sec4_sg_dma: bus physical mapped address of h/w link table
692acdca31dSYuan Kang  * @hw_desc: the h/w job descriptor followed by any referenced link tables
693acdca31dSYuan Kang  */
694acdca31dSYuan Kang struct ablkcipher_edesc {
695acdca31dSYuan Kang 	int src_nents;
696643b39b0SYuan Kang 	bool src_chained;
697acdca31dSYuan Kang 	int dst_nents;
698643b39b0SYuan Kang 	bool dst_chained;
699acdca31dSYuan Kang 	dma_addr_t iv_dma;
700a299c837SYuan Kang 	int sec4_sg_bytes;
701a299c837SYuan Kang 	dma_addr_t sec4_sg_dma;
702a299c837SYuan Kang 	struct sec4_sg_entry *sec4_sg;
703acdca31dSYuan Kang 	u32 hw_desc[0];
704acdca31dSYuan Kang };
705acdca31dSYuan Kang 
7061acebad3SYuan Kang static void caam_unmap(struct device *dev, struct scatterlist *src,
707643b39b0SYuan Kang 		       struct scatterlist *dst, int src_nents,
708643b39b0SYuan Kang 		       bool src_chained, int dst_nents, bool dst_chained,
709a299c837SYuan Kang 		       dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
710a299c837SYuan Kang 		       int sec4_sg_bytes)
7111acebad3SYuan Kang {
712643b39b0SYuan Kang 	if (dst != src) {
713643b39b0SYuan Kang 		dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE,
714643b39b0SYuan Kang 				     src_chained);
715643b39b0SYuan Kang 		dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE,
716643b39b0SYuan Kang 				     dst_chained);
7171acebad3SYuan Kang 	} else {
718643b39b0SYuan Kang 		dma_unmap_sg_chained(dev, src, src_nents ? : 1,
719643b39b0SYuan Kang 				     DMA_BIDIRECTIONAL, src_chained);
7201acebad3SYuan Kang 	}
7211acebad3SYuan Kang 
7221acebad3SYuan Kang 	if (iv_dma)
7231acebad3SYuan Kang 		dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
724a299c837SYuan Kang 	if (sec4_sg_bytes)
725a299c837SYuan Kang 		dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
7261acebad3SYuan Kang 				 DMA_TO_DEVICE);
7271acebad3SYuan Kang }
7281acebad3SYuan Kang 
7290e479300SYuan Kang static void aead_unmap(struct device *dev,
7300e479300SYuan Kang 		       struct aead_edesc *edesc,
7310e479300SYuan Kang 		       struct aead_request *req)
7328e8ec596SKim Phillips {
7331acebad3SYuan Kang 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
7341acebad3SYuan Kang 	int ivsize = crypto_aead_ivsize(aead);
7351acebad3SYuan Kang 
736643b39b0SYuan Kang 	dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents,
737643b39b0SYuan Kang 			     DMA_TO_DEVICE, edesc->assoc_chained);
7388e8ec596SKim Phillips 
7391acebad3SYuan Kang 	caam_unmap(dev, req->src, req->dst,
740643b39b0SYuan Kang 		   edesc->src_nents, edesc->src_chained, edesc->dst_nents,
741643b39b0SYuan Kang 		   edesc->dst_chained, edesc->iv_dma, ivsize,
742643b39b0SYuan Kang 		   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
7438e8ec596SKim Phillips }
7448e8ec596SKim Phillips 
745acdca31dSYuan Kang static void ablkcipher_unmap(struct device *dev,
746acdca31dSYuan Kang 			     struct ablkcipher_edesc *edesc,
747acdca31dSYuan Kang 			     struct ablkcipher_request *req)
748acdca31dSYuan Kang {
749acdca31dSYuan Kang 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
750acdca31dSYuan Kang 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
751acdca31dSYuan Kang 
752acdca31dSYuan Kang 	caam_unmap(dev, req->src, req->dst,
753643b39b0SYuan Kang 		   edesc->src_nents, edesc->src_chained, edesc->dst_nents,
754643b39b0SYuan Kang 		   edesc->dst_chained, edesc->iv_dma, ivsize,
755643b39b0SYuan Kang 		   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
756acdca31dSYuan Kang }
757acdca31dSYuan Kang 
7580e479300SYuan Kang static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
7598e8ec596SKim Phillips 				   void *context)
7608e8ec596SKim Phillips {
7610e479300SYuan Kang 	struct aead_request *req = context;
7620e479300SYuan Kang 	struct aead_edesc *edesc;
7638e8ec596SKim Phillips #ifdef DEBUG
7640e479300SYuan Kang 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
7658e8ec596SKim Phillips 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
7661acebad3SYuan Kang 	int ivsize = crypto_aead_ivsize(aead);
7678e8ec596SKim Phillips 
7688e8ec596SKim Phillips 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
7698e8ec596SKim Phillips #endif
7701acebad3SYuan Kang 
7710e479300SYuan Kang 	edesc = (struct aead_edesc *)((char *)desc -
7720e479300SYuan Kang 		 offsetof(struct aead_edesc, hw_desc));
7738e8ec596SKim Phillips 
7748e8ec596SKim Phillips 	if (err) {
775de2954d6SKim Phillips 		char tmp[CAAM_ERROR_STR_MAX];
7768e8ec596SKim Phillips 
7778e8ec596SKim Phillips 		dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
7788e8ec596SKim Phillips 	}
7798e8ec596SKim Phillips 
7800e479300SYuan Kang 	aead_unmap(jrdev, edesc, req);
7818e8ec596SKim Phillips 
7828e8ec596SKim Phillips #ifdef DEBUG
7838e8ec596SKim Phillips 	print_hex_dump(KERN_ERR, "assoc  @"xstr(__LINE__)": ",
7840e479300SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
7850e479300SYuan Kang 		       req->assoclen , 1);
7868e8ec596SKim Phillips 	print_hex_dump(KERN_ERR, "dstiv  @"xstr(__LINE__)": ",
7870e479300SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
7888e8ec596SKim Phillips 		       edesc->src_nents ? 100 : ivsize, 1);
7898e8ec596SKim Phillips 	print_hex_dump(KERN_ERR, "dst    @"xstr(__LINE__)": ",
7900e479300SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
7910e479300SYuan Kang 		       edesc->src_nents ? 100 : req->cryptlen +
7928e8ec596SKim Phillips 		       ctx->authsize + 4, 1);
7938e8ec596SKim Phillips #endif
7948e8ec596SKim Phillips 
7958e8ec596SKim Phillips 	kfree(edesc);
7968e8ec596SKim Phillips 
7970e479300SYuan Kang 	aead_request_complete(req, err);
7988e8ec596SKim Phillips }
7998e8ec596SKim Phillips 
8000e479300SYuan Kang static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
8018e8ec596SKim Phillips 				   void *context)
8028e8ec596SKim Phillips {
8030e479300SYuan Kang 	struct aead_request *req = context;
8040e479300SYuan Kang 	struct aead_edesc *edesc;
8058e8ec596SKim Phillips #ifdef DEBUG
8060e479300SYuan Kang 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
8078e8ec596SKim Phillips 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
8081acebad3SYuan Kang 	int ivsize = crypto_aead_ivsize(aead);
8098e8ec596SKim Phillips 
8108e8ec596SKim Phillips 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
8118e8ec596SKim Phillips #endif
8121acebad3SYuan Kang 
8130e479300SYuan Kang 	edesc = (struct aead_edesc *)((char *)desc -
8140e479300SYuan Kang 		 offsetof(struct aead_edesc, hw_desc));
8158e8ec596SKim Phillips 
8161acebad3SYuan Kang #ifdef DEBUG
8171acebad3SYuan Kang 	print_hex_dump(KERN_ERR, "dstiv  @"xstr(__LINE__)": ",
8181acebad3SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
8191acebad3SYuan Kang 		       ivsize, 1);
8201acebad3SYuan Kang 	print_hex_dump(KERN_ERR, "dst    @"xstr(__LINE__)": ",
8211acebad3SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
8221acebad3SYuan Kang 		       req->cryptlen, 1);
8231acebad3SYuan Kang #endif
8241acebad3SYuan Kang 
8258e8ec596SKim Phillips 	if (err) {
826de2954d6SKim Phillips 		char tmp[CAAM_ERROR_STR_MAX];
8278e8ec596SKim Phillips 
8288e8ec596SKim Phillips 		dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
8298e8ec596SKim Phillips 	}
8308e8ec596SKim Phillips 
8310e479300SYuan Kang 	aead_unmap(jrdev, edesc, req);
8328e8ec596SKim Phillips 
8338e8ec596SKim Phillips 	/*
8348e8ec596SKim Phillips 	 * verify hw auth check passed else return -EBADMSG
8358e8ec596SKim Phillips 	 */
8368e8ec596SKim Phillips 	if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
8378e8ec596SKim Phillips 		err = -EBADMSG;
8388e8ec596SKim Phillips 
8398e8ec596SKim Phillips #ifdef DEBUG
8408e8ec596SKim Phillips 	print_hex_dump(KERN_ERR, "iphdrout@"xstr(__LINE__)": ",
8418e8ec596SKim Phillips 		       DUMP_PREFIX_ADDRESS, 16, 4,
8420e479300SYuan Kang 		       ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
8430e479300SYuan Kang 		       sizeof(struct iphdr) + req->assoclen +
8440e479300SYuan Kang 		       ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
8458e8ec596SKim Phillips 		       ctx->authsize + 36, 1);
846a299c837SYuan Kang 	if (!err && edesc->sec4_sg_bytes) {
8470e479300SYuan Kang 		struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
8488e8ec596SKim Phillips 		print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ",
8498e8ec596SKim Phillips 			       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
8508e8ec596SKim Phillips 			sg->length + ctx->authsize + 16, 1);
8518e8ec596SKim Phillips 	}
8528e8ec596SKim Phillips #endif
8531acebad3SYuan Kang 
8548e8ec596SKim Phillips 	kfree(edesc);
8558e8ec596SKim Phillips 
8560e479300SYuan Kang 	aead_request_complete(req, err);
8578e8ec596SKim Phillips }
8588e8ec596SKim Phillips 
859acdca31dSYuan Kang static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
860acdca31dSYuan Kang 				   void *context)
861acdca31dSYuan Kang {
862acdca31dSYuan Kang 	struct ablkcipher_request *req = context;
863acdca31dSYuan Kang 	struct ablkcipher_edesc *edesc;
864acdca31dSYuan Kang #ifdef DEBUG
865acdca31dSYuan Kang 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
866acdca31dSYuan Kang 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
867acdca31dSYuan Kang 
868acdca31dSYuan Kang 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
869acdca31dSYuan Kang #endif
870acdca31dSYuan Kang 
871acdca31dSYuan Kang 	edesc = (struct ablkcipher_edesc *)((char *)desc -
872acdca31dSYuan Kang 		 offsetof(struct ablkcipher_edesc, hw_desc));
873acdca31dSYuan Kang 
874acdca31dSYuan Kang 	if (err) {
875acdca31dSYuan Kang 		char tmp[CAAM_ERROR_STR_MAX];
876acdca31dSYuan Kang 
877acdca31dSYuan Kang 		dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
878acdca31dSYuan Kang 	}
879acdca31dSYuan Kang 
880acdca31dSYuan Kang #ifdef DEBUG
881acdca31dSYuan Kang 	print_hex_dump(KERN_ERR, "dstiv  @"xstr(__LINE__)": ",
882acdca31dSYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
883acdca31dSYuan Kang 		       edesc->src_nents > 1 ? 100 : ivsize, 1);
884acdca31dSYuan Kang 	print_hex_dump(KERN_ERR, "dst    @"xstr(__LINE__)": ",
885acdca31dSYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
886acdca31dSYuan Kang 		       edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
887acdca31dSYuan Kang #endif
888acdca31dSYuan Kang 
889acdca31dSYuan Kang 	ablkcipher_unmap(jrdev, edesc, req);
890acdca31dSYuan Kang 	kfree(edesc);
891acdca31dSYuan Kang 
892acdca31dSYuan Kang 	ablkcipher_request_complete(req, err);
893acdca31dSYuan Kang }
894acdca31dSYuan Kang 
895acdca31dSYuan Kang static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
896acdca31dSYuan Kang 				    void *context)
897acdca31dSYuan Kang {
898acdca31dSYuan Kang 	struct ablkcipher_request *req = context;
899acdca31dSYuan Kang 	struct ablkcipher_edesc *edesc;
900acdca31dSYuan Kang #ifdef DEBUG
901acdca31dSYuan Kang 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
902acdca31dSYuan Kang 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
903acdca31dSYuan Kang 
904acdca31dSYuan Kang 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
905acdca31dSYuan Kang #endif
906acdca31dSYuan Kang 
907acdca31dSYuan Kang 	edesc = (struct ablkcipher_edesc *)((char *)desc -
908acdca31dSYuan Kang 		 offsetof(struct ablkcipher_edesc, hw_desc));
909acdca31dSYuan Kang 	if (err) {
910acdca31dSYuan Kang 		char tmp[CAAM_ERROR_STR_MAX];
911acdca31dSYuan Kang 
912acdca31dSYuan Kang 		dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
913acdca31dSYuan Kang 	}
914acdca31dSYuan Kang 
915acdca31dSYuan Kang #ifdef DEBUG
916acdca31dSYuan Kang 	print_hex_dump(KERN_ERR, "dstiv  @"xstr(__LINE__)": ",
917acdca31dSYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
918acdca31dSYuan Kang 		       ivsize, 1);
919acdca31dSYuan Kang 	print_hex_dump(KERN_ERR, "dst    @"xstr(__LINE__)": ",
920acdca31dSYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
921acdca31dSYuan Kang 		       edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
922acdca31dSYuan Kang #endif
923acdca31dSYuan Kang 
924acdca31dSYuan Kang 	ablkcipher_unmap(jrdev, edesc, req);
925acdca31dSYuan Kang 	kfree(edesc);
926acdca31dSYuan Kang 
927acdca31dSYuan Kang 	ablkcipher_request_complete(req, err);
928acdca31dSYuan Kang }
929acdca31dSYuan Kang 
9308e8ec596SKim Phillips /*
9311acebad3SYuan Kang  * Fill in aead job descriptor
9328e8ec596SKim Phillips  */
9331acebad3SYuan Kang static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
9341acebad3SYuan Kang 			  struct aead_edesc *edesc,
9351acebad3SYuan Kang 			  struct aead_request *req,
9361acebad3SYuan Kang 			  bool all_contig, bool encrypt)
9378e8ec596SKim Phillips {
9380e479300SYuan Kang 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
9398e8ec596SKim Phillips 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
9408e8ec596SKim Phillips 	int ivsize = crypto_aead_ivsize(aead);
9418e8ec596SKim Phillips 	int authsize = ctx->authsize;
9421acebad3SYuan Kang 	u32 *desc = edesc->hw_desc;
9431acebad3SYuan Kang 	u32 out_options = 0, in_options;
9441acebad3SYuan Kang 	dma_addr_t dst_dma, src_dma;
945a299c837SYuan Kang 	int len, sec4_sg_index = 0;
9468e8ec596SKim Phillips 
9471acebad3SYuan Kang #ifdef DEBUG
9488e8ec596SKim Phillips 	debug("assoclen %d cryptlen %d authsize %d\n",
9490e479300SYuan Kang 	      req->assoclen, req->cryptlen, authsize);
9508e8ec596SKim Phillips 	print_hex_dump(KERN_ERR, "assoc  @"xstr(__LINE__)": ",
9510e479300SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
9520e479300SYuan Kang 		       req->assoclen , 1);
9538e8ec596SKim Phillips 	print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
9541acebad3SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
9558e8ec596SKim Phillips 		       edesc->src_nents ? 100 : ivsize, 1);
9568e8ec596SKim Phillips 	print_hex_dump(KERN_ERR, "src    @"xstr(__LINE__)": ",
9570e479300SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
9581acebad3SYuan Kang 			edesc->src_nents ? 100 : req->cryptlen, 1);
9598e8ec596SKim Phillips 	print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
9608e8ec596SKim Phillips 		       DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
9618e8ec596SKim Phillips 		       desc_bytes(sh_desc), 1);
9628e8ec596SKim Phillips #endif
9631acebad3SYuan Kang 
9641acebad3SYuan Kang 	len = desc_len(sh_desc);
9651acebad3SYuan Kang 	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
9661acebad3SYuan Kang 
9671acebad3SYuan Kang 	if (all_contig) {
9681acebad3SYuan Kang 		src_dma = sg_dma_address(req->assoc);
9691acebad3SYuan Kang 		in_options = 0;
9701acebad3SYuan Kang 	} else {
971a299c837SYuan Kang 		src_dma = edesc->sec4_sg_dma;
972a299c837SYuan Kang 		sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
9731acebad3SYuan Kang 				 (edesc->src_nents ? : 1);
9741acebad3SYuan Kang 		in_options = LDST_SGF;
9751acebad3SYuan Kang 	}
9761acebad3SYuan Kang 	if (encrypt)
9771acebad3SYuan Kang 		append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
9781acebad3SYuan Kang 				  req->cryptlen - authsize, in_options);
9798e8ec596SKim Phillips 	else
9801acebad3SYuan Kang 		append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
9811acebad3SYuan Kang 				  req->cryptlen, in_options);
9828e8ec596SKim Phillips 
9831acebad3SYuan Kang 	if (likely(req->src == req->dst)) {
9841acebad3SYuan Kang 		if (all_contig) {
9851acebad3SYuan Kang 			dst_dma = sg_dma_address(req->src);
9868e8ec596SKim Phillips 		} else {
987a299c837SYuan Kang 			dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
9881acebad3SYuan Kang 				  ((edesc->assoc_nents ? : 1) + 1);
9891acebad3SYuan Kang 			out_options = LDST_SGF;
9908e8ec596SKim Phillips 		}
9918e8ec596SKim Phillips 	} else {
9928e8ec596SKim Phillips 		if (!edesc->dst_nents) {
9930e479300SYuan Kang 			dst_dma = sg_dma_address(req->dst);
9948e8ec596SKim Phillips 		} else {
995a299c837SYuan Kang 			dst_dma = edesc->sec4_sg_dma +
996a299c837SYuan Kang 				  sec4_sg_index *
997a299c837SYuan Kang 				  sizeof(struct sec4_sg_entry);
9981acebad3SYuan Kang 			out_options = LDST_SGF;
9998e8ec596SKim Phillips 		}
10008e8ec596SKim Phillips 	}
10018e8ec596SKim Phillips 	if (encrypt)
10021acebad3SYuan Kang 		append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options);
10038e8ec596SKim Phillips 	else
10041acebad3SYuan Kang 		append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
10051acebad3SYuan Kang 				   out_options);
10061acebad3SYuan Kang }
10071acebad3SYuan Kang 
10081acebad3SYuan Kang /*
10091acebad3SYuan Kang  * Fill in aead givencrypt job descriptor
10101acebad3SYuan Kang  */
10111acebad3SYuan Kang static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
10121acebad3SYuan Kang 			      struct aead_edesc *edesc,
10131acebad3SYuan Kang 			      struct aead_request *req,
10141acebad3SYuan Kang 			      int contig)
10151acebad3SYuan Kang {
10161acebad3SYuan Kang 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
10171acebad3SYuan Kang 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
10181acebad3SYuan Kang 	int ivsize = crypto_aead_ivsize(aead);
10191acebad3SYuan Kang 	int authsize = ctx->authsize;
10201acebad3SYuan Kang 	u32 *desc = edesc->hw_desc;
10211acebad3SYuan Kang 	u32 out_options = 0, in_options;
10221acebad3SYuan Kang 	dma_addr_t dst_dma, src_dma;
1023a299c837SYuan Kang 	int len, sec4_sg_index = 0;
10248e8ec596SKim Phillips 
10258e8ec596SKim Phillips #ifdef DEBUG
10261acebad3SYuan Kang 	debug("assoclen %d cryptlen %d authsize %d\n",
10271acebad3SYuan Kang 	      req->assoclen, req->cryptlen, authsize);
10281acebad3SYuan Kang 	print_hex_dump(KERN_ERR, "assoc  @"xstr(__LINE__)": ",
10291acebad3SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
10301acebad3SYuan Kang 		       req->assoclen , 1);
10311acebad3SYuan Kang 	print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
10321acebad3SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
10331acebad3SYuan Kang 	print_hex_dump(KERN_ERR, "src    @"xstr(__LINE__)": ",
10341acebad3SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
10351acebad3SYuan Kang 			edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
10361acebad3SYuan Kang 	print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
10371acebad3SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
10381acebad3SYuan Kang 		       desc_bytes(sh_desc), 1);
10398e8ec596SKim Phillips #endif
10408e8ec596SKim Phillips 
10411acebad3SYuan Kang 	len = desc_len(sh_desc);
10421acebad3SYuan Kang 	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
10431acebad3SYuan Kang 
10441acebad3SYuan Kang 	if (contig & GIV_SRC_CONTIG) {
10451acebad3SYuan Kang 		src_dma = sg_dma_address(req->assoc);
10461acebad3SYuan Kang 		in_options = 0;
10471acebad3SYuan Kang 	} else {
1048a299c837SYuan Kang 		src_dma = edesc->sec4_sg_dma;
1049a299c837SYuan Kang 		sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
10501acebad3SYuan Kang 		in_options = LDST_SGF;
10511acebad3SYuan Kang 	}
10521acebad3SYuan Kang 	append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
10531acebad3SYuan Kang 			  req->cryptlen - authsize, in_options);
10541acebad3SYuan Kang 
10551acebad3SYuan Kang 	if (contig & GIV_DST_CONTIG) {
10561acebad3SYuan Kang 		dst_dma = edesc->iv_dma;
10571acebad3SYuan Kang 	} else {
10581acebad3SYuan Kang 		if (likely(req->src == req->dst)) {
1059a299c837SYuan Kang 			dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
10601acebad3SYuan Kang 				  edesc->assoc_nents;
10611acebad3SYuan Kang 			out_options = LDST_SGF;
10621acebad3SYuan Kang 		} else {
1063a299c837SYuan Kang 			dst_dma = edesc->sec4_sg_dma +
1064a299c837SYuan Kang 				  sec4_sg_index *
1065a299c837SYuan Kang 				  sizeof(struct sec4_sg_entry);
10661acebad3SYuan Kang 			out_options = LDST_SGF;
10671acebad3SYuan Kang 		}
10688e8ec596SKim Phillips 	}
10698e8ec596SKim Phillips 
10701acebad3SYuan Kang 	append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen, out_options);
10718e8ec596SKim Phillips }
10728e8ec596SKim Phillips 
10738e8ec596SKim Phillips /*
1074acdca31dSYuan Kang  * Fill in ablkcipher job descriptor
1075acdca31dSYuan Kang  */
1076acdca31dSYuan Kang static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1077acdca31dSYuan Kang 				struct ablkcipher_edesc *edesc,
1078acdca31dSYuan Kang 				struct ablkcipher_request *req,
1079acdca31dSYuan Kang 				bool iv_contig)
1080acdca31dSYuan Kang {
1081acdca31dSYuan Kang 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1082acdca31dSYuan Kang 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1083acdca31dSYuan Kang 	u32 *desc = edesc->hw_desc;
1084acdca31dSYuan Kang 	u32 out_options = 0, in_options;
1085acdca31dSYuan Kang 	dma_addr_t dst_dma, src_dma;
1086a299c837SYuan Kang 	int len, sec4_sg_index = 0;
1087acdca31dSYuan Kang 
1088acdca31dSYuan Kang #ifdef DEBUG
1089acdca31dSYuan Kang 	print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
1090acdca31dSYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1091acdca31dSYuan Kang 		       ivsize, 1);
1092acdca31dSYuan Kang 	print_hex_dump(KERN_ERR, "src    @"xstr(__LINE__)": ",
1093acdca31dSYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1094acdca31dSYuan Kang 		       edesc->src_nents ? 100 : req->nbytes, 1);
1095acdca31dSYuan Kang #endif
1096acdca31dSYuan Kang 
1097acdca31dSYuan Kang 	len = desc_len(sh_desc);
1098acdca31dSYuan Kang 	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1099acdca31dSYuan Kang 
1100acdca31dSYuan Kang 	if (iv_contig) {
1101acdca31dSYuan Kang 		src_dma = edesc->iv_dma;
1102acdca31dSYuan Kang 		in_options = 0;
1103acdca31dSYuan Kang 	} else {
1104a299c837SYuan Kang 		src_dma = edesc->sec4_sg_dma;
1105a299c837SYuan Kang 		sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents;
1106acdca31dSYuan Kang 		in_options = LDST_SGF;
1107acdca31dSYuan Kang 	}
1108acdca31dSYuan Kang 	append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
1109acdca31dSYuan Kang 
1110acdca31dSYuan Kang 	if (likely(req->src == req->dst)) {
1111acdca31dSYuan Kang 		if (!edesc->src_nents && iv_contig) {
1112acdca31dSYuan Kang 			dst_dma = sg_dma_address(req->src);
1113acdca31dSYuan Kang 		} else {
1114a299c837SYuan Kang 			dst_dma = edesc->sec4_sg_dma +
1115a299c837SYuan Kang 				sizeof(struct sec4_sg_entry);
1116acdca31dSYuan Kang 			out_options = LDST_SGF;
1117acdca31dSYuan Kang 		}
1118acdca31dSYuan Kang 	} else {
1119acdca31dSYuan Kang 		if (!edesc->dst_nents) {
1120acdca31dSYuan Kang 			dst_dma = sg_dma_address(req->dst);
1121acdca31dSYuan Kang 		} else {
1122a299c837SYuan Kang 			dst_dma = edesc->sec4_sg_dma +
1123a299c837SYuan Kang 				sec4_sg_index * sizeof(struct sec4_sg_entry);
1124acdca31dSYuan Kang 			out_options = LDST_SGF;
1125acdca31dSYuan Kang 		}
1126acdca31dSYuan Kang 	}
1127acdca31dSYuan Kang 	append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
1128acdca31dSYuan Kang }
1129acdca31dSYuan Kang 
1130acdca31dSYuan Kang /*
11311acebad3SYuan Kang  * allocate and map the aead extended descriptor
11328e8ec596SKim Phillips  */
11330e479300SYuan Kang static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
11341acebad3SYuan Kang 					   int desc_bytes, bool *all_contig_ptr)
11358e8ec596SKim Phillips {
11360e479300SYuan Kang 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
11378e8ec596SKim Phillips 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
11388e8ec596SKim Phillips 	struct device *jrdev = ctx->jrdev;
11391acebad3SYuan Kang 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
11401acebad3SYuan Kang 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
11411acebad3SYuan Kang 	int assoc_nents, src_nents, dst_nents = 0;
11420e479300SYuan Kang 	struct aead_edesc *edesc;
11431acebad3SYuan Kang 	dma_addr_t iv_dma = 0;
11441acebad3SYuan Kang 	int sgc;
11451acebad3SYuan Kang 	bool all_contig = true;
1146643b39b0SYuan Kang 	bool assoc_chained = false, src_chained = false, dst_chained = false;
11471acebad3SYuan Kang 	int ivsize = crypto_aead_ivsize(aead);
1148a299c837SYuan Kang 	int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
11498e8ec596SKim Phillips 
1150643b39b0SYuan Kang 	assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
1151643b39b0SYuan Kang 	src_nents = sg_count(req->src, req->cryptlen, &src_chained);
11528e8ec596SKim Phillips 
11531acebad3SYuan Kang 	if (unlikely(req->dst != req->src))
1154643b39b0SYuan Kang 		dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained);
11558e8ec596SKim Phillips 
1156643b39b0SYuan Kang 	sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
1157643b39b0SYuan Kang 				 DMA_BIDIRECTIONAL, assoc_chained);
11581acebad3SYuan Kang 	if (likely(req->src == req->dst)) {
1159643b39b0SYuan Kang 		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1160643b39b0SYuan Kang 					 DMA_BIDIRECTIONAL, src_chained);
11611acebad3SYuan Kang 	} else {
1162643b39b0SYuan Kang 		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1163643b39b0SYuan Kang 					 DMA_TO_DEVICE, src_chained);
1164643b39b0SYuan Kang 		sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1165643b39b0SYuan Kang 					 DMA_FROM_DEVICE, dst_chained);
11668e8ec596SKim Phillips 	}
11678e8ec596SKim Phillips 
11681acebad3SYuan Kang 	/* Check if data are contiguous */
11691acebad3SYuan Kang 	iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
11701acebad3SYuan Kang 	if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
11711acebad3SYuan Kang 	    iv_dma || src_nents || iv_dma + ivsize !=
11721acebad3SYuan Kang 	    sg_dma_address(req->src)) {
11731acebad3SYuan Kang 		all_contig = false;
11741acebad3SYuan Kang 		assoc_nents = assoc_nents ? : 1;
11751acebad3SYuan Kang 		src_nents = src_nents ? : 1;
1176a299c837SYuan Kang 		sec4_sg_len = assoc_nents + 1 + src_nents;
11771acebad3SYuan Kang 	}
1178a299c837SYuan Kang 	sec4_sg_len += dst_nents;
11791acebad3SYuan Kang 
1180a299c837SYuan Kang 	sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
11818e8ec596SKim Phillips 
11828e8ec596SKim Phillips 	/* allocate space for base edesc and hw desc commands, link tables */
11830e479300SYuan Kang 	edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
1184a299c837SYuan Kang 			sec4_sg_bytes, GFP_DMA | flags);
11858e8ec596SKim Phillips 	if (!edesc) {
11868e8ec596SKim Phillips 		dev_err(jrdev, "could not allocate extended descriptor\n");
11878e8ec596SKim Phillips 		return ERR_PTR(-ENOMEM);
11888e8ec596SKim Phillips 	}
11898e8ec596SKim Phillips 
11908e8ec596SKim Phillips 	edesc->assoc_nents = assoc_nents;
1191643b39b0SYuan Kang 	edesc->assoc_chained = assoc_chained;
11928e8ec596SKim Phillips 	edesc->src_nents = src_nents;
1193643b39b0SYuan Kang 	edesc->src_chained = src_chained;
11948e8ec596SKim Phillips 	edesc->dst_nents = dst_nents;
1195643b39b0SYuan Kang 	edesc->dst_chained = dst_chained;
11961acebad3SYuan Kang 	edesc->iv_dma = iv_dma;
1197a299c837SYuan Kang 	edesc->sec4_sg_bytes = sec4_sg_bytes;
1198a299c837SYuan Kang 	edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
11998e8ec596SKim Phillips 			 desc_bytes;
1200a299c837SYuan Kang 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1201a299c837SYuan Kang 					    sec4_sg_bytes, DMA_TO_DEVICE);
12021acebad3SYuan Kang 	*all_contig_ptr = all_contig;
12031acebad3SYuan Kang 
1204a299c837SYuan Kang 	sec4_sg_index = 0;
12051acebad3SYuan Kang 	if (!all_contig) {
1206a299c837SYuan Kang 		sg_to_sec4_sg(req->assoc,
12071acebad3SYuan Kang 			      (assoc_nents ? : 1),
1208a299c837SYuan Kang 			      edesc->sec4_sg +
1209a299c837SYuan Kang 			      sec4_sg_index, 0);
1210a299c837SYuan Kang 		sec4_sg_index += assoc_nents ? : 1;
1211a299c837SYuan Kang 		dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
12121acebad3SYuan Kang 				   iv_dma, ivsize, 0);
1213a299c837SYuan Kang 		sec4_sg_index += 1;
1214a299c837SYuan Kang 		sg_to_sec4_sg_last(req->src,
12151acebad3SYuan Kang 				   (src_nents ? : 1),
1216a299c837SYuan Kang 				   edesc->sec4_sg +
1217a299c837SYuan Kang 				   sec4_sg_index, 0);
1218a299c837SYuan Kang 		sec4_sg_index += src_nents ? : 1;
12191acebad3SYuan Kang 	}
12201acebad3SYuan Kang 	if (dst_nents) {
1221a299c837SYuan Kang 		sg_to_sec4_sg_last(req->dst, dst_nents,
1222a299c837SYuan Kang 				   edesc->sec4_sg + sec4_sg_index, 0);
12231acebad3SYuan Kang 	}
12248e8ec596SKim Phillips 
12258e8ec596SKim Phillips 	return edesc;
12268e8ec596SKim Phillips }
12278e8ec596SKim Phillips 
12280e479300SYuan Kang static int aead_encrypt(struct aead_request *req)
12298e8ec596SKim Phillips {
12300e479300SYuan Kang 	struct aead_edesc *edesc;
12318e8ec596SKim Phillips 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
12328e8ec596SKim Phillips 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
12338e8ec596SKim Phillips 	struct device *jrdev = ctx->jrdev;
12341acebad3SYuan Kang 	bool all_contig;
12358e8ec596SKim Phillips 	u32 *desc;
12361acebad3SYuan Kang 	int ret = 0;
12371acebad3SYuan Kang 
12381acebad3SYuan Kang 	req->cryptlen += ctx->authsize;
12398e8ec596SKim Phillips 
12408e8ec596SKim Phillips 	/* allocate extended descriptor */
12411acebad3SYuan Kang 	edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
12421acebad3SYuan Kang 				 CAAM_CMD_SZ, &all_contig);
12438e8ec596SKim Phillips 	if (IS_ERR(edesc))
12448e8ec596SKim Phillips 		return PTR_ERR(edesc);
12458e8ec596SKim Phillips 
12461acebad3SYuan Kang 	/* Create and submit job descriptor */
12471acebad3SYuan Kang 	init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
12481acebad3SYuan Kang 		      all_contig, true);
12491acebad3SYuan Kang #ifdef DEBUG
12501acebad3SYuan Kang 	print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
12511acebad3SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
12521acebad3SYuan Kang 		       desc_bytes(edesc->hw_desc), 1);
12531acebad3SYuan Kang #endif
12541acebad3SYuan Kang 
12558e8ec596SKim Phillips 	desc = edesc->hw_desc;
12561acebad3SYuan Kang 	ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
12571acebad3SYuan Kang 	if (!ret) {
12581acebad3SYuan Kang 		ret = -EINPROGRESS;
12591acebad3SYuan Kang 	} else {
12601acebad3SYuan Kang 		aead_unmap(jrdev, edesc, req);
12611acebad3SYuan Kang 		kfree(edesc);
12621acebad3SYuan Kang 	}
12638e8ec596SKim Phillips 
12641acebad3SYuan Kang 	return ret;
12658e8ec596SKim Phillips }
12668e8ec596SKim Phillips 
12670e479300SYuan Kang static int aead_decrypt(struct aead_request *req)
12688e8ec596SKim Phillips {
12691acebad3SYuan Kang 	struct aead_edesc *edesc;
12700e479300SYuan Kang 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
12710e479300SYuan Kang 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
12720e479300SYuan Kang 	struct device *jrdev = ctx->jrdev;
12731acebad3SYuan Kang 	bool all_contig;
12740e479300SYuan Kang 	u32 *desc;
12751acebad3SYuan Kang 	int ret = 0;
12760e479300SYuan Kang 
12770e479300SYuan Kang 	/* allocate extended descriptor */
12781acebad3SYuan Kang 	edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
12791acebad3SYuan Kang 				 CAAM_CMD_SZ, &all_contig);
12800e479300SYuan Kang 	if (IS_ERR(edesc))
12810e479300SYuan Kang 		return PTR_ERR(edesc);
12820e479300SYuan Kang 
12831acebad3SYuan Kang #ifdef DEBUG
12841acebad3SYuan Kang 	print_hex_dump(KERN_ERR, "dec src@"xstr(__LINE__)": ",
12851acebad3SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
12861acebad3SYuan Kang 		       req->cryptlen, 1);
12871acebad3SYuan Kang #endif
12881acebad3SYuan Kang 
12891acebad3SYuan Kang 	/* Create and submit job descriptor*/
12901acebad3SYuan Kang 	init_aead_job(ctx->sh_desc_dec,
12911acebad3SYuan Kang 		      ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
12921acebad3SYuan Kang #ifdef DEBUG
12931acebad3SYuan Kang 	print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
12941acebad3SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
12951acebad3SYuan Kang 		       desc_bytes(edesc->hw_desc), 1);
12961acebad3SYuan Kang #endif
12971acebad3SYuan Kang 
12980e479300SYuan Kang 	desc = edesc->hw_desc;
12991acebad3SYuan Kang 	ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
13001acebad3SYuan Kang 	if (!ret) {
13011acebad3SYuan Kang 		ret = -EINPROGRESS;
13021acebad3SYuan Kang 	} else {
13031acebad3SYuan Kang 		aead_unmap(jrdev, edesc, req);
13041acebad3SYuan Kang 		kfree(edesc);
13051acebad3SYuan Kang 	}
13060e479300SYuan Kang 
13071acebad3SYuan Kang 	return ret;
13081acebad3SYuan Kang }
13090e479300SYuan Kang 
13101acebad3SYuan Kang /*
13111acebad3SYuan Kang  * allocate and map the aead extended descriptor for aead givencrypt
13121acebad3SYuan Kang  */
13131acebad3SYuan Kang static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
13141acebad3SYuan Kang 					       *greq, int desc_bytes,
13151acebad3SYuan Kang 					       u32 *contig_ptr)
13161acebad3SYuan Kang {
13171acebad3SYuan Kang 	struct aead_request *req = &greq->areq;
13181acebad3SYuan Kang 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
13191acebad3SYuan Kang 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
13201acebad3SYuan Kang 	struct device *jrdev = ctx->jrdev;
13211acebad3SYuan Kang 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
13221acebad3SYuan Kang 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
13231acebad3SYuan Kang 	int assoc_nents, src_nents, dst_nents = 0;
13241acebad3SYuan Kang 	struct aead_edesc *edesc;
13251acebad3SYuan Kang 	dma_addr_t iv_dma = 0;
13261acebad3SYuan Kang 	int sgc;
13271acebad3SYuan Kang 	u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
13281acebad3SYuan Kang 	int ivsize = crypto_aead_ivsize(aead);
1329643b39b0SYuan Kang 	bool assoc_chained = false, src_chained = false, dst_chained = false;
1330a299c837SYuan Kang 	int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
13310e479300SYuan Kang 
1332643b39b0SYuan Kang 	assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
1333643b39b0SYuan Kang 	src_nents = sg_count(req->src, req->cryptlen, &src_chained);
13340e479300SYuan Kang 
13351acebad3SYuan Kang 	if (unlikely(req->dst != req->src))
1336643b39b0SYuan Kang 		dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained);
13371acebad3SYuan Kang 
1338643b39b0SYuan Kang 	sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
1339643b39b0SYuan Kang 				 DMA_BIDIRECTIONAL, assoc_chained);
13401acebad3SYuan Kang 	if (likely(req->src == req->dst)) {
1341643b39b0SYuan Kang 		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1342643b39b0SYuan Kang 					 DMA_BIDIRECTIONAL, src_chained);
13431acebad3SYuan Kang 	} else {
1344643b39b0SYuan Kang 		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1345643b39b0SYuan Kang 					 DMA_TO_DEVICE, src_chained);
1346643b39b0SYuan Kang 		sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1347643b39b0SYuan Kang 					 DMA_FROM_DEVICE, dst_chained);
13481acebad3SYuan Kang 	}
13491acebad3SYuan Kang 
13501acebad3SYuan Kang 	/* Check if data are contiguous */
13511acebad3SYuan Kang 	iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
13521acebad3SYuan Kang 	if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
13531acebad3SYuan Kang 	    iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src))
13541acebad3SYuan Kang 		contig &= ~GIV_SRC_CONTIG;
13551acebad3SYuan Kang 	if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
13561acebad3SYuan Kang 		contig &= ~GIV_DST_CONTIG;
13571acebad3SYuan Kang 		if (unlikely(req->src != req->dst)) {
13581acebad3SYuan Kang 			dst_nents = dst_nents ? : 1;
1359a299c837SYuan Kang 			sec4_sg_len += 1;
13601acebad3SYuan Kang 		}
13611acebad3SYuan Kang 	if (!(contig & GIV_SRC_CONTIG)) {
13621acebad3SYuan Kang 		assoc_nents = assoc_nents ? : 1;
13631acebad3SYuan Kang 		src_nents = src_nents ? : 1;
1364a299c837SYuan Kang 		sec4_sg_len += assoc_nents + 1 + src_nents;
13651acebad3SYuan Kang 		if (likely(req->src == req->dst))
13661acebad3SYuan Kang 			contig &= ~GIV_DST_CONTIG;
13671acebad3SYuan Kang 	}
1368a299c837SYuan Kang 	sec4_sg_len += dst_nents;
13691acebad3SYuan Kang 
1370a299c837SYuan Kang 	sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
13711acebad3SYuan Kang 
13721acebad3SYuan Kang 	/* allocate space for base edesc and hw desc commands, link tables */
13731acebad3SYuan Kang 	edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
1374a299c837SYuan Kang 			sec4_sg_bytes, GFP_DMA | flags);
13751acebad3SYuan Kang 	if (!edesc) {
13761acebad3SYuan Kang 		dev_err(jrdev, "could not allocate extended descriptor\n");
13771acebad3SYuan Kang 		return ERR_PTR(-ENOMEM);
13781acebad3SYuan Kang 	}
13791acebad3SYuan Kang 
13801acebad3SYuan Kang 	edesc->assoc_nents = assoc_nents;
1381643b39b0SYuan Kang 	edesc->assoc_chained = assoc_chained;
13821acebad3SYuan Kang 	edesc->src_nents = src_nents;
1383643b39b0SYuan Kang 	edesc->src_chained = src_chained;
13841acebad3SYuan Kang 	edesc->dst_nents = dst_nents;
1385643b39b0SYuan Kang 	edesc->dst_chained = dst_chained;
13861acebad3SYuan Kang 	edesc->iv_dma = iv_dma;
1387a299c837SYuan Kang 	edesc->sec4_sg_bytes = sec4_sg_bytes;
1388a299c837SYuan Kang 	edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
13891acebad3SYuan Kang 			 desc_bytes;
1390a299c837SYuan Kang 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1391a299c837SYuan Kang 					    sec4_sg_bytes, DMA_TO_DEVICE);
13921acebad3SYuan Kang 	*contig_ptr = contig;
13931acebad3SYuan Kang 
1394a299c837SYuan Kang 	sec4_sg_index = 0;
13951acebad3SYuan Kang 	if (!(contig & GIV_SRC_CONTIG)) {
1396a299c837SYuan Kang 		sg_to_sec4_sg(req->assoc, assoc_nents,
1397a299c837SYuan Kang 			      edesc->sec4_sg +
1398a299c837SYuan Kang 			      sec4_sg_index, 0);
1399a299c837SYuan Kang 		sec4_sg_index += assoc_nents;
1400a299c837SYuan Kang 		dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
14011acebad3SYuan Kang 				   iv_dma, ivsize, 0);
1402a299c837SYuan Kang 		sec4_sg_index += 1;
1403a299c837SYuan Kang 		sg_to_sec4_sg_last(req->src, src_nents,
1404a299c837SYuan Kang 				   edesc->sec4_sg +
1405a299c837SYuan Kang 				   sec4_sg_index, 0);
1406a299c837SYuan Kang 		sec4_sg_index += src_nents;
14071acebad3SYuan Kang 	}
14081acebad3SYuan Kang 	if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
1409a299c837SYuan Kang 		dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
14101acebad3SYuan Kang 				   iv_dma, ivsize, 0);
1411a299c837SYuan Kang 		sec4_sg_index += 1;
1412a299c837SYuan Kang 		sg_to_sec4_sg_last(req->dst, dst_nents,
1413a299c837SYuan Kang 				   edesc->sec4_sg + sec4_sg_index, 0);
14141acebad3SYuan Kang 	}
14151acebad3SYuan Kang 
14161acebad3SYuan Kang 	return edesc;
14170e479300SYuan Kang }
14180e479300SYuan Kang 
14190e479300SYuan Kang static int aead_givencrypt(struct aead_givcrypt_request *areq)
14200e479300SYuan Kang {
14210e479300SYuan Kang 	struct aead_request *req = &areq->areq;
14220e479300SYuan Kang 	struct aead_edesc *edesc;
14230e479300SYuan Kang 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
14248e8ec596SKim Phillips 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
14258e8ec596SKim Phillips 	struct device *jrdev = ctx->jrdev;
14261acebad3SYuan Kang 	u32 contig;
14278e8ec596SKim Phillips 	u32 *desc;
14281acebad3SYuan Kang 	int ret = 0;
14298e8ec596SKim Phillips 
14301acebad3SYuan Kang 	req->cryptlen += ctx->authsize;
14318e8ec596SKim Phillips 
14328e8ec596SKim Phillips 	/* allocate extended descriptor */
14331acebad3SYuan Kang 	edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
14341acebad3SYuan Kang 				     CAAM_CMD_SZ, &contig);
14351acebad3SYuan Kang 
14368e8ec596SKim Phillips 	if (IS_ERR(edesc))
14378e8ec596SKim Phillips 		return PTR_ERR(edesc);
14388e8ec596SKim Phillips 
14391acebad3SYuan Kang #ifdef DEBUG
14401acebad3SYuan Kang 	print_hex_dump(KERN_ERR, "giv src@"xstr(__LINE__)": ",
14411acebad3SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
14421acebad3SYuan Kang 		       req->cryptlen, 1);
14431acebad3SYuan Kang #endif
14441acebad3SYuan Kang 
14451acebad3SYuan Kang 	/* Create and submit job descriptor*/
14461acebad3SYuan Kang 	init_aead_giv_job(ctx->sh_desc_givenc,
14471acebad3SYuan Kang 			  ctx->sh_desc_givenc_dma, edesc, req, contig);
14481acebad3SYuan Kang #ifdef DEBUG
14491acebad3SYuan Kang 	print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
14501acebad3SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
14511acebad3SYuan Kang 		       desc_bytes(edesc->hw_desc), 1);
14521acebad3SYuan Kang #endif
14531acebad3SYuan Kang 
14548e8ec596SKim Phillips 	desc = edesc->hw_desc;
14551acebad3SYuan Kang 	ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
14561acebad3SYuan Kang 	if (!ret) {
14571acebad3SYuan Kang 		ret = -EINPROGRESS;
14581acebad3SYuan Kang 	} else {
14591acebad3SYuan Kang 		aead_unmap(jrdev, edesc, req);
14601acebad3SYuan Kang 		kfree(edesc);
14611acebad3SYuan Kang 	}
14628e8ec596SKim Phillips 
14631acebad3SYuan Kang 	return ret;
14648e8ec596SKim Phillips }
14658e8ec596SKim Phillips 
1466acdca31dSYuan Kang /*
1467acdca31dSYuan Kang  * allocate and map the ablkcipher extended descriptor for ablkcipher
1468acdca31dSYuan Kang  */
1469acdca31dSYuan Kang static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1470acdca31dSYuan Kang 						       *req, int desc_bytes,
1471acdca31dSYuan Kang 						       bool *iv_contig_out)
1472acdca31dSYuan Kang {
1473acdca31dSYuan Kang 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1474acdca31dSYuan Kang 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1475acdca31dSYuan Kang 	struct device *jrdev = ctx->jrdev;
1476acdca31dSYuan Kang 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1477acdca31dSYuan Kang 					  CRYPTO_TFM_REQ_MAY_SLEEP)) ?
1478acdca31dSYuan Kang 		       GFP_KERNEL : GFP_ATOMIC;
1479a299c837SYuan Kang 	int src_nents, dst_nents = 0, sec4_sg_bytes;
1480acdca31dSYuan Kang 	struct ablkcipher_edesc *edesc;
1481acdca31dSYuan Kang 	dma_addr_t iv_dma = 0;
1482acdca31dSYuan Kang 	bool iv_contig = false;
1483acdca31dSYuan Kang 	int sgc;
1484acdca31dSYuan Kang 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1485643b39b0SYuan Kang 	bool src_chained = false, dst_chained = false;
1486a299c837SYuan Kang 	int sec4_sg_index;
1487acdca31dSYuan Kang 
1488643b39b0SYuan Kang 	src_nents = sg_count(req->src, req->nbytes, &src_chained);
1489acdca31dSYuan Kang 
1490643b39b0SYuan Kang 	if (req->dst != req->src)
1491643b39b0SYuan Kang 		dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
1492acdca31dSYuan Kang 
1493acdca31dSYuan Kang 	if (likely(req->src == req->dst)) {
1494643b39b0SYuan Kang 		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1495643b39b0SYuan Kang 					 DMA_BIDIRECTIONAL, src_chained);
1496acdca31dSYuan Kang 	} else {
1497643b39b0SYuan Kang 		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1498643b39b0SYuan Kang 					 DMA_TO_DEVICE, src_chained);
1499643b39b0SYuan Kang 		sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1500643b39b0SYuan Kang 					 DMA_FROM_DEVICE, dst_chained);
1501acdca31dSYuan Kang 	}
1502acdca31dSYuan Kang 
1503acdca31dSYuan Kang 	/*
1504acdca31dSYuan Kang 	 * Check if iv can be contiguous with source and destination.
1505acdca31dSYuan Kang 	 * If so, include it. If not, create scatterlist.
1506acdca31dSYuan Kang 	 */
1507acdca31dSYuan Kang 	iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
1508acdca31dSYuan Kang 	if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
1509acdca31dSYuan Kang 		iv_contig = true;
1510acdca31dSYuan Kang 	else
1511acdca31dSYuan Kang 		src_nents = src_nents ? : 1;
1512a299c837SYuan Kang 	sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
1513a299c837SYuan Kang 			sizeof(struct sec4_sg_entry);
1514acdca31dSYuan Kang 
1515acdca31dSYuan Kang 	/* allocate space for base edesc and hw desc commands, link tables */
1516acdca31dSYuan Kang 	edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
1517a299c837SYuan Kang 			sec4_sg_bytes, GFP_DMA | flags);
1518acdca31dSYuan Kang 	if (!edesc) {
1519acdca31dSYuan Kang 		dev_err(jrdev, "could not allocate extended descriptor\n");
1520acdca31dSYuan Kang 		return ERR_PTR(-ENOMEM);
1521acdca31dSYuan Kang 	}
1522acdca31dSYuan Kang 
1523acdca31dSYuan Kang 	edesc->src_nents = src_nents;
1524643b39b0SYuan Kang 	edesc->src_chained = src_chained;
1525acdca31dSYuan Kang 	edesc->dst_nents = dst_nents;
1526643b39b0SYuan Kang 	edesc->dst_chained = dst_chained;
1527a299c837SYuan Kang 	edesc->sec4_sg_bytes = sec4_sg_bytes;
1528a299c837SYuan Kang 	edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
1529acdca31dSYuan Kang 			 desc_bytes;
1530acdca31dSYuan Kang 
1531a299c837SYuan Kang 	sec4_sg_index = 0;
1532acdca31dSYuan Kang 	if (!iv_contig) {
1533a299c837SYuan Kang 		dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
1534a299c837SYuan Kang 		sg_to_sec4_sg_last(req->src, src_nents,
1535a299c837SYuan Kang 				   edesc->sec4_sg + 1, 0);
1536a299c837SYuan Kang 		sec4_sg_index += 1 + src_nents;
1537acdca31dSYuan Kang 	}
1538acdca31dSYuan Kang 
1539643b39b0SYuan Kang 	if (dst_nents) {
1540a299c837SYuan Kang 		sg_to_sec4_sg_last(req->dst, dst_nents,
1541a299c837SYuan Kang 			edesc->sec4_sg + sec4_sg_index, 0);
1542acdca31dSYuan Kang 	}
1543acdca31dSYuan Kang 
1544a299c837SYuan Kang 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1545a299c837SYuan Kang 					    sec4_sg_bytes, DMA_TO_DEVICE);
1546acdca31dSYuan Kang 	edesc->iv_dma = iv_dma;
1547acdca31dSYuan Kang 
1548acdca31dSYuan Kang #ifdef DEBUG
1549a299c837SYuan Kang 	print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"xstr(__LINE__)": ",
1550a299c837SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
1551a299c837SYuan Kang 		       sec4_sg_bytes, 1);
1552acdca31dSYuan Kang #endif
1553acdca31dSYuan Kang 
1554acdca31dSYuan Kang 	*iv_contig_out = iv_contig;
1555acdca31dSYuan Kang 	return edesc;
1556acdca31dSYuan Kang }
1557acdca31dSYuan Kang 
1558acdca31dSYuan Kang static int ablkcipher_encrypt(struct ablkcipher_request *req)
1559acdca31dSYuan Kang {
1560acdca31dSYuan Kang 	struct ablkcipher_edesc *edesc;
1561acdca31dSYuan Kang 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1562acdca31dSYuan Kang 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1563acdca31dSYuan Kang 	struct device *jrdev = ctx->jrdev;
1564acdca31dSYuan Kang 	bool iv_contig;
1565acdca31dSYuan Kang 	u32 *desc;
1566acdca31dSYuan Kang 	int ret = 0;
1567acdca31dSYuan Kang 
1568acdca31dSYuan Kang 	/* allocate extended descriptor */
1569acdca31dSYuan Kang 	edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1570acdca31dSYuan Kang 				       CAAM_CMD_SZ, &iv_contig);
1571acdca31dSYuan Kang 	if (IS_ERR(edesc))
1572acdca31dSYuan Kang 		return PTR_ERR(edesc);
1573acdca31dSYuan Kang 
1574acdca31dSYuan Kang 	/* Create and submit job descriptor*/
1575acdca31dSYuan Kang 	init_ablkcipher_job(ctx->sh_desc_enc,
1576acdca31dSYuan Kang 		ctx->sh_desc_enc_dma, edesc, req, iv_contig);
1577acdca31dSYuan Kang #ifdef DEBUG
1578acdca31dSYuan Kang 	print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ",
1579acdca31dSYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1580acdca31dSYuan Kang 		       desc_bytes(edesc->hw_desc), 1);
1581acdca31dSYuan Kang #endif
1582acdca31dSYuan Kang 	desc = edesc->hw_desc;
1583acdca31dSYuan Kang 	ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
1584acdca31dSYuan Kang 
1585acdca31dSYuan Kang 	if (!ret) {
1586acdca31dSYuan Kang 		ret = -EINPROGRESS;
1587acdca31dSYuan Kang 	} else {
1588acdca31dSYuan Kang 		ablkcipher_unmap(jrdev, edesc, req);
1589acdca31dSYuan Kang 		kfree(edesc);
1590acdca31dSYuan Kang 	}
1591acdca31dSYuan Kang 
1592acdca31dSYuan Kang 	return ret;
1593acdca31dSYuan Kang }
1594acdca31dSYuan Kang 
1595acdca31dSYuan Kang static int ablkcipher_decrypt(struct ablkcipher_request *req)
1596acdca31dSYuan Kang {
1597acdca31dSYuan Kang 	struct ablkcipher_edesc *edesc;
1598acdca31dSYuan Kang 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1599acdca31dSYuan Kang 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1600acdca31dSYuan Kang 	struct device *jrdev = ctx->jrdev;
1601acdca31dSYuan Kang 	bool iv_contig;
1602acdca31dSYuan Kang 	u32 *desc;
1603acdca31dSYuan Kang 	int ret = 0;
1604acdca31dSYuan Kang 
1605acdca31dSYuan Kang 	/* allocate extended descriptor */
1606acdca31dSYuan Kang 	edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1607acdca31dSYuan Kang 				       CAAM_CMD_SZ, &iv_contig);
1608acdca31dSYuan Kang 	if (IS_ERR(edesc))
1609acdca31dSYuan Kang 		return PTR_ERR(edesc);
1610acdca31dSYuan Kang 
1611acdca31dSYuan Kang 	/* Create and submit job descriptor*/
1612acdca31dSYuan Kang 	init_ablkcipher_job(ctx->sh_desc_dec,
1613acdca31dSYuan Kang 		ctx->sh_desc_dec_dma, edesc, req, iv_contig);
1614acdca31dSYuan Kang 	desc = edesc->hw_desc;
1615acdca31dSYuan Kang #ifdef DEBUG
1616acdca31dSYuan Kang 	print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ",
1617acdca31dSYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1618acdca31dSYuan Kang 		       desc_bytes(edesc->hw_desc), 1);
1619acdca31dSYuan Kang #endif
1620acdca31dSYuan Kang 
1621acdca31dSYuan Kang 	ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
1622acdca31dSYuan Kang 	if (!ret) {
1623acdca31dSYuan Kang 		ret = -EINPROGRESS;
1624acdca31dSYuan Kang 	} else {
1625acdca31dSYuan Kang 		ablkcipher_unmap(jrdev, edesc, req);
1626acdca31dSYuan Kang 		kfree(edesc);
1627acdca31dSYuan Kang 	}
1628acdca31dSYuan Kang 
1629acdca31dSYuan Kang 	return ret;
1630acdca31dSYuan Kang }
1631acdca31dSYuan Kang 
1632885e9e2fSYuan Kang #define template_aead		template_u.aead
1633acdca31dSYuan Kang #define template_ablkcipher	template_u.ablkcipher
16348e8ec596SKim Phillips struct caam_alg_template {
16358e8ec596SKim Phillips 	char name[CRYPTO_MAX_ALG_NAME];
16368e8ec596SKim Phillips 	char driver_name[CRYPTO_MAX_ALG_NAME];
16378e8ec596SKim Phillips 	unsigned int blocksize;
1638885e9e2fSYuan Kang 	u32 type;
1639885e9e2fSYuan Kang 	union {
1640885e9e2fSYuan Kang 		struct ablkcipher_alg ablkcipher;
16418e8ec596SKim Phillips 		struct aead_alg aead;
1642885e9e2fSYuan Kang 		struct blkcipher_alg blkcipher;
1643885e9e2fSYuan Kang 		struct cipher_alg cipher;
1644885e9e2fSYuan Kang 		struct compress_alg compress;
1645885e9e2fSYuan Kang 		struct rng_alg rng;
1646885e9e2fSYuan Kang 	} template_u;
16478e8ec596SKim Phillips 	u32 class1_alg_type;
16488e8ec596SKim Phillips 	u32 class2_alg_type;
16498e8ec596SKim Phillips 	u32 alg_op;
16508e8ec596SKim Phillips };
16518e8ec596SKim Phillips 
16528e8ec596SKim Phillips static struct caam_alg_template driver_algs[] = {
16538e8ec596SKim Phillips 	/* single-pass ipsec_esp descriptor */
16548e8ec596SKim Phillips 	{
16558b4d43a4SKim Phillips 		.name = "authenc(hmac(md5),cbc(aes))",
16568b4d43a4SKim Phillips 		.driver_name = "authenc-hmac-md5-cbc-aes-caam",
16578b4d43a4SKim Phillips 		.blocksize = AES_BLOCK_SIZE,
16588b4d43a4SKim Phillips 		.type = CRYPTO_ALG_TYPE_AEAD,
16598b4d43a4SKim Phillips 		.template_aead = {
16608b4d43a4SKim Phillips 			.setkey = aead_setkey,
16618b4d43a4SKim Phillips 			.setauthsize = aead_setauthsize,
16628b4d43a4SKim Phillips 			.encrypt = aead_encrypt,
16638b4d43a4SKim Phillips 			.decrypt = aead_decrypt,
16648b4d43a4SKim Phillips 			.givencrypt = aead_givencrypt,
16658b4d43a4SKim Phillips 			.geniv = "<built-in>",
16668b4d43a4SKim Phillips 			.ivsize = AES_BLOCK_SIZE,
16678b4d43a4SKim Phillips 			.maxauthsize = MD5_DIGEST_SIZE,
16688b4d43a4SKim Phillips 			},
16698b4d43a4SKim Phillips 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
16708b4d43a4SKim Phillips 		.class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
16718b4d43a4SKim Phillips 		.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
16728b4d43a4SKim Phillips 	},
16738b4d43a4SKim Phillips 	{
16748e8ec596SKim Phillips 		.name = "authenc(hmac(sha1),cbc(aes))",
16758e8ec596SKim Phillips 		.driver_name = "authenc-hmac-sha1-cbc-aes-caam",
16768e8ec596SKim Phillips 		.blocksize = AES_BLOCK_SIZE,
1677885e9e2fSYuan Kang 		.type = CRYPTO_ALG_TYPE_AEAD,
1678885e9e2fSYuan Kang 		.template_aead = {
16790e479300SYuan Kang 			.setkey = aead_setkey,
16800e479300SYuan Kang 			.setauthsize = aead_setauthsize,
16810e479300SYuan Kang 			.encrypt = aead_encrypt,
16820e479300SYuan Kang 			.decrypt = aead_decrypt,
16830e479300SYuan Kang 			.givencrypt = aead_givencrypt,
16848e8ec596SKim Phillips 			.geniv = "<built-in>",
16858e8ec596SKim Phillips 			.ivsize = AES_BLOCK_SIZE,
16868e8ec596SKim Phillips 			.maxauthsize = SHA1_DIGEST_SIZE,
16878e8ec596SKim Phillips 			},
16888e8ec596SKim Phillips 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
16898e8ec596SKim Phillips 		.class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
16908e8ec596SKim Phillips 		.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
16918e8ec596SKim Phillips 	},
16928e8ec596SKim Phillips 	{
1693e863f9ccSHemant Agrawal 		.name = "authenc(hmac(sha224),cbc(aes))",
1694e863f9ccSHemant Agrawal 		.driver_name = "authenc-hmac-sha224-cbc-aes-caam",
1695e863f9ccSHemant Agrawal 		.blocksize = AES_BLOCK_SIZE,
1696e863f9ccSHemant Agrawal 		.template_aead = {
1697e863f9ccSHemant Agrawal 			.setkey = aead_setkey,
1698e863f9ccSHemant Agrawal 			.setauthsize = aead_setauthsize,
1699e863f9ccSHemant Agrawal 			.encrypt = aead_encrypt,
1700e863f9ccSHemant Agrawal 			.decrypt = aead_decrypt,
1701e863f9ccSHemant Agrawal 			.givencrypt = aead_givencrypt,
1702e863f9ccSHemant Agrawal 			.geniv = "<built-in>",
1703e863f9ccSHemant Agrawal 			.ivsize = AES_BLOCK_SIZE,
1704e863f9ccSHemant Agrawal 			.maxauthsize = SHA224_DIGEST_SIZE,
1705e863f9ccSHemant Agrawal 			},
1706e863f9ccSHemant Agrawal 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1707e863f9ccSHemant Agrawal 		.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1708e863f9ccSHemant Agrawal 				   OP_ALG_AAI_HMAC_PRECOMP,
1709e863f9ccSHemant Agrawal 		.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1710e863f9ccSHemant Agrawal 	},
1711e863f9ccSHemant Agrawal 	{
17128e8ec596SKim Phillips 		.name = "authenc(hmac(sha256),cbc(aes))",
17138e8ec596SKim Phillips 		.driver_name = "authenc-hmac-sha256-cbc-aes-caam",
17148e8ec596SKim Phillips 		.blocksize = AES_BLOCK_SIZE,
1715885e9e2fSYuan Kang 		.type = CRYPTO_ALG_TYPE_AEAD,
1716885e9e2fSYuan Kang 		.template_aead = {
17170e479300SYuan Kang 			.setkey = aead_setkey,
17180e479300SYuan Kang 			.setauthsize = aead_setauthsize,
17190e479300SYuan Kang 			.encrypt = aead_encrypt,
17200e479300SYuan Kang 			.decrypt = aead_decrypt,
17210e479300SYuan Kang 			.givencrypt = aead_givencrypt,
17228e8ec596SKim Phillips 			.geniv = "<built-in>",
17238e8ec596SKim Phillips 			.ivsize = AES_BLOCK_SIZE,
17248e8ec596SKim Phillips 			.maxauthsize = SHA256_DIGEST_SIZE,
17258e8ec596SKim Phillips 			},
17268e8ec596SKim Phillips 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17278e8ec596SKim Phillips 		.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
17288e8ec596SKim Phillips 				   OP_ALG_AAI_HMAC_PRECOMP,
17298e8ec596SKim Phillips 		.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
17308e8ec596SKim Phillips 	},
17318e8ec596SKim Phillips 	{
1732e863f9ccSHemant Agrawal 		.name = "authenc(hmac(sha384),cbc(aes))",
1733e863f9ccSHemant Agrawal 		.driver_name = "authenc-hmac-sha384-cbc-aes-caam",
1734e863f9ccSHemant Agrawal 		.blocksize = AES_BLOCK_SIZE,
1735e863f9ccSHemant Agrawal 		.template_aead = {
1736e863f9ccSHemant Agrawal 			.setkey = aead_setkey,
1737e863f9ccSHemant Agrawal 			.setauthsize = aead_setauthsize,
1738e863f9ccSHemant Agrawal 			.encrypt = aead_encrypt,
1739e863f9ccSHemant Agrawal 			.decrypt = aead_decrypt,
1740e863f9ccSHemant Agrawal 			.givencrypt = aead_givencrypt,
1741e863f9ccSHemant Agrawal 			.geniv = "<built-in>",
1742e863f9ccSHemant Agrawal 			.ivsize = AES_BLOCK_SIZE,
1743e863f9ccSHemant Agrawal 			.maxauthsize = SHA384_DIGEST_SIZE,
1744e863f9ccSHemant Agrawal 			},
1745e863f9ccSHemant Agrawal 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1746e863f9ccSHemant Agrawal 		.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1747e863f9ccSHemant Agrawal 				   OP_ALG_AAI_HMAC_PRECOMP,
1748e863f9ccSHemant Agrawal 		.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1749e863f9ccSHemant Agrawal 	},
1750e863f9ccSHemant Agrawal 
1751e863f9ccSHemant Agrawal 	{
17524427b1b4SKim Phillips 		.name = "authenc(hmac(sha512),cbc(aes))",
17534427b1b4SKim Phillips 		.driver_name = "authenc-hmac-sha512-cbc-aes-caam",
17544427b1b4SKim Phillips 		.blocksize = AES_BLOCK_SIZE,
1755885e9e2fSYuan Kang 		.type = CRYPTO_ALG_TYPE_AEAD,
1756885e9e2fSYuan Kang 		.template_aead = {
17570e479300SYuan Kang 			.setkey = aead_setkey,
17580e479300SYuan Kang 			.setauthsize = aead_setauthsize,
17590e479300SYuan Kang 			.encrypt = aead_encrypt,
17600e479300SYuan Kang 			.decrypt = aead_decrypt,
17610e479300SYuan Kang 			.givencrypt = aead_givencrypt,
17624427b1b4SKim Phillips 			.geniv = "<built-in>",
17634427b1b4SKim Phillips 			.ivsize = AES_BLOCK_SIZE,
17644427b1b4SKim Phillips 			.maxauthsize = SHA512_DIGEST_SIZE,
17654427b1b4SKim Phillips 			},
17664427b1b4SKim Phillips 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17674427b1b4SKim Phillips 		.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
17684427b1b4SKim Phillips 				   OP_ALG_AAI_HMAC_PRECOMP,
17694427b1b4SKim Phillips 		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
17704427b1b4SKim Phillips 	},
17714427b1b4SKim Phillips 	{
17728b4d43a4SKim Phillips 		.name = "authenc(hmac(md5),cbc(des3_ede))",
17738b4d43a4SKim Phillips 		.driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
17748b4d43a4SKim Phillips 		.blocksize = DES3_EDE_BLOCK_SIZE,
17758b4d43a4SKim Phillips 		.type = CRYPTO_ALG_TYPE_AEAD,
17768b4d43a4SKim Phillips 		.template_aead = {
17778b4d43a4SKim Phillips 			.setkey = aead_setkey,
17788b4d43a4SKim Phillips 			.setauthsize = aead_setauthsize,
17798b4d43a4SKim Phillips 			.encrypt = aead_encrypt,
17808b4d43a4SKim Phillips 			.decrypt = aead_decrypt,
17818b4d43a4SKim Phillips 			.givencrypt = aead_givencrypt,
17828b4d43a4SKim Phillips 			.geniv = "<built-in>",
17838b4d43a4SKim Phillips 			.ivsize = DES3_EDE_BLOCK_SIZE,
17848b4d43a4SKim Phillips 			.maxauthsize = MD5_DIGEST_SIZE,
17858b4d43a4SKim Phillips 			},
17868b4d43a4SKim Phillips 		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17878b4d43a4SKim Phillips 		.class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
17888b4d43a4SKim Phillips 		.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
17898b4d43a4SKim Phillips 	},
17908b4d43a4SKim Phillips 	{
17918e8ec596SKim Phillips 		.name = "authenc(hmac(sha1),cbc(des3_ede))",
17928e8ec596SKim Phillips 		.driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
17938e8ec596SKim Phillips 		.blocksize = DES3_EDE_BLOCK_SIZE,
1794885e9e2fSYuan Kang 		.type = CRYPTO_ALG_TYPE_AEAD,
1795885e9e2fSYuan Kang 		.template_aead = {
17960e479300SYuan Kang 			.setkey = aead_setkey,
17970e479300SYuan Kang 			.setauthsize = aead_setauthsize,
17980e479300SYuan Kang 			.encrypt = aead_encrypt,
17990e479300SYuan Kang 			.decrypt = aead_decrypt,
18000e479300SYuan Kang 			.givencrypt = aead_givencrypt,
18018e8ec596SKim Phillips 			.geniv = "<built-in>",
18028e8ec596SKim Phillips 			.ivsize = DES3_EDE_BLOCK_SIZE,
18038e8ec596SKim Phillips 			.maxauthsize = SHA1_DIGEST_SIZE,
18048e8ec596SKim Phillips 			},
18058e8ec596SKim Phillips 		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18068e8ec596SKim Phillips 		.class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
18078e8ec596SKim Phillips 		.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
18088e8ec596SKim Phillips 	},
18098e8ec596SKim Phillips 	{
1810e863f9ccSHemant Agrawal 		.name = "authenc(hmac(sha224),cbc(des3_ede))",
1811e863f9ccSHemant Agrawal 		.driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
1812e863f9ccSHemant Agrawal 		.blocksize = DES3_EDE_BLOCK_SIZE,
1813e863f9ccSHemant Agrawal 		.template_aead = {
1814e863f9ccSHemant Agrawal 			.setkey = aead_setkey,
1815e863f9ccSHemant Agrawal 			.setauthsize = aead_setauthsize,
1816e863f9ccSHemant Agrawal 			.encrypt = aead_encrypt,
1817e863f9ccSHemant Agrawal 			.decrypt = aead_decrypt,
1818e863f9ccSHemant Agrawal 			.givencrypt = aead_givencrypt,
1819e863f9ccSHemant Agrawal 			.geniv = "<built-in>",
1820e863f9ccSHemant Agrawal 			.ivsize = DES3_EDE_BLOCK_SIZE,
1821e863f9ccSHemant Agrawal 			.maxauthsize = SHA224_DIGEST_SIZE,
1822e863f9ccSHemant Agrawal 			},
1823e863f9ccSHemant Agrawal 		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1824e863f9ccSHemant Agrawal 		.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1825e863f9ccSHemant Agrawal 				   OP_ALG_AAI_HMAC_PRECOMP,
1826e863f9ccSHemant Agrawal 		.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1827e863f9ccSHemant Agrawal 	},
1828e863f9ccSHemant Agrawal 	{
18298e8ec596SKim Phillips 		.name = "authenc(hmac(sha256),cbc(des3_ede))",
18308e8ec596SKim Phillips 		.driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
18318e8ec596SKim Phillips 		.blocksize = DES3_EDE_BLOCK_SIZE,
1832885e9e2fSYuan Kang 		.type = CRYPTO_ALG_TYPE_AEAD,
1833885e9e2fSYuan Kang 		.template_aead = {
18340e479300SYuan Kang 			.setkey = aead_setkey,
18350e479300SYuan Kang 			.setauthsize = aead_setauthsize,
18360e479300SYuan Kang 			.encrypt = aead_encrypt,
18370e479300SYuan Kang 			.decrypt = aead_decrypt,
18380e479300SYuan Kang 			.givencrypt = aead_givencrypt,
18398e8ec596SKim Phillips 			.geniv = "<built-in>",
18408e8ec596SKim Phillips 			.ivsize = DES3_EDE_BLOCK_SIZE,
18418e8ec596SKim Phillips 			.maxauthsize = SHA256_DIGEST_SIZE,
18428e8ec596SKim Phillips 			},
18438e8ec596SKim Phillips 		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18448e8ec596SKim Phillips 		.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18458e8ec596SKim Phillips 				   OP_ALG_AAI_HMAC_PRECOMP,
18468e8ec596SKim Phillips 		.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
18478e8ec596SKim Phillips 	},
18488e8ec596SKim Phillips 	{
1849e863f9ccSHemant Agrawal 		.name = "authenc(hmac(sha384),cbc(des3_ede))",
1850e863f9ccSHemant Agrawal 		.driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
1851e863f9ccSHemant Agrawal 		.blocksize = DES3_EDE_BLOCK_SIZE,
1852e863f9ccSHemant Agrawal 		.template_aead = {
1853e863f9ccSHemant Agrawal 			.setkey = aead_setkey,
1854e863f9ccSHemant Agrawal 			.setauthsize = aead_setauthsize,
1855e863f9ccSHemant Agrawal 			.encrypt = aead_encrypt,
1856e863f9ccSHemant Agrawal 			.decrypt = aead_decrypt,
1857e863f9ccSHemant Agrawal 			.givencrypt = aead_givencrypt,
1858e863f9ccSHemant Agrawal 			.geniv = "<built-in>",
1859e863f9ccSHemant Agrawal 			.ivsize = DES3_EDE_BLOCK_SIZE,
1860e863f9ccSHemant Agrawal 			.maxauthsize = SHA384_DIGEST_SIZE,
1861e863f9ccSHemant Agrawal 			},
1862e863f9ccSHemant Agrawal 		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1863e863f9ccSHemant Agrawal 		.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1864e863f9ccSHemant Agrawal 				   OP_ALG_AAI_HMAC_PRECOMP,
1865e863f9ccSHemant Agrawal 		.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1866e863f9ccSHemant Agrawal 	},
1867e863f9ccSHemant Agrawal 	{
18684427b1b4SKim Phillips 		.name = "authenc(hmac(sha512),cbc(des3_ede))",
18694427b1b4SKim Phillips 		.driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
18704427b1b4SKim Phillips 		.blocksize = DES3_EDE_BLOCK_SIZE,
1871885e9e2fSYuan Kang 		.type = CRYPTO_ALG_TYPE_AEAD,
1872885e9e2fSYuan Kang 		.template_aead = {
18730e479300SYuan Kang 			.setkey = aead_setkey,
18740e479300SYuan Kang 			.setauthsize = aead_setauthsize,
18750e479300SYuan Kang 			.encrypt = aead_encrypt,
18760e479300SYuan Kang 			.decrypt = aead_decrypt,
18770e479300SYuan Kang 			.givencrypt = aead_givencrypt,
18784427b1b4SKim Phillips 			.geniv = "<built-in>",
18794427b1b4SKim Phillips 			.ivsize = DES3_EDE_BLOCK_SIZE,
18804427b1b4SKim Phillips 			.maxauthsize = SHA512_DIGEST_SIZE,
18814427b1b4SKim Phillips 			},
18824427b1b4SKim Phillips 		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18834427b1b4SKim Phillips 		.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18844427b1b4SKim Phillips 				   OP_ALG_AAI_HMAC_PRECOMP,
18854427b1b4SKim Phillips 		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
18864427b1b4SKim Phillips 	},
18874427b1b4SKim Phillips 	{
18888b4d43a4SKim Phillips 		.name = "authenc(hmac(md5),cbc(des))",
18898b4d43a4SKim Phillips 		.driver_name = "authenc-hmac-md5-cbc-des-caam",
18908b4d43a4SKim Phillips 		.blocksize = DES_BLOCK_SIZE,
18918b4d43a4SKim Phillips 		.type = CRYPTO_ALG_TYPE_AEAD,
18928b4d43a4SKim Phillips 		.template_aead = {
18938b4d43a4SKim Phillips 			.setkey = aead_setkey,
18948b4d43a4SKim Phillips 			.setauthsize = aead_setauthsize,
18958b4d43a4SKim Phillips 			.encrypt = aead_encrypt,
18968b4d43a4SKim Phillips 			.decrypt = aead_decrypt,
18978b4d43a4SKim Phillips 			.givencrypt = aead_givencrypt,
18988b4d43a4SKim Phillips 			.geniv = "<built-in>",
18998b4d43a4SKim Phillips 			.ivsize = DES_BLOCK_SIZE,
19008b4d43a4SKim Phillips 			.maxauthsize = MD5_DIGEST_SIZE,
19018b4d43a4SKim Phillips 			},
19028b4d43a4SKim Phillips 		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
19038b4d43a4SKim Phillips 		.class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
19048b4d43a4SKim Phillips 		.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
19058b4d43a4SKim Phillips 	},
19068b4d43a4SKim Phillips 	{
19078e8ec596SKim Phillips 		.name = "authenc(hmac(sha1),cbc(des))",
19088e8ec596SKim Phillips 		.driver_name = "authenc-hmac-sha1-cbc-des-caam",
19098e8ec596SKim Phillips 		.blocksize = DES_BLOCK_SIZE,
1910885e9e2fSYuan Kang 		.type = CRYPTO_ALG_TYPE_AEAD,
1911885e9e2fSYuan Kang 		.template_aead = {
19120e479300SYuan Kang 			.setkey = aead_setkey,
19130e479300SYuan Kang 			.setauthsize = aead_setauthsize,
19140e479300SYuan Kang 			.encrypt = aead_encrypt,
19150e479300SYuan Kang 			.decrypt = aead_decrypt,
19160e479300SYuan Kang 			.givencrypt = aead_givencrypt,
19178e8ec596SKim Phillips 			.geniv = "<built-in>",
19188e8ec596SKim Phillips 			.ivsize = DES_BLOCK_SIZE,
19198e8ec596SKim Phillips 			.maxauthsize = SHA1_DIGEST_SIZE,
19208e8ec596SKim Phillips 			},
19218e8ec596SKim Phillips 		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
19228e8ec596SKim Phillips 		.class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
19238e8ec596SKim Phillips 		.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
19248e8ec596SKim Phillips 	},
19258e8ec596SKim Phillips 	{
1926e863f9ccSHemant Agrawal 		.name = "authenc(hmac(sha224),cbc(des))",
1927e863f9ccSHemant Agrawal 		.driver_name = "authenc-hmac-sha224-cbc-des-caam",
1928e863f9ccSHemant Agrawal 		.blocksize = DES_BLOCK_SIZE,
1929e863f9ccSHemant Agrawal 		.template_aead = {
1930e863f9ccSHemant Agrawal 			.setkey = aead_setkey,
1931e863f9ccSHemant Agrawal 			.setauthsize = aead_setauthsize,
1932e863f9ccSHemant Agrawal 			.encrypt = aead_encrypt,
1933e863f9ccSHemant Agrawal 			.decrypt = aead_decrypt,
1934e863f9ccSHemant Agrawal 			.givencrypt = aead_givencrypt,
1935e863f9ccSHemant Agrawal 			.geniv = "<built-in>",
1936e863f9ccSHemant Agrawal 			.ivsize = DES_BLOCK_SIZE,
1937e863f9ccSHemant Agrawal 			.maxauthsize = SHA224_DIGEST_SIZE,
1938e863f9ccSHemant Agrawal 			},
1939e863f9ccSHemant Agrawal 		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1940e863f9ccSHemant Agrawal 		.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1941e863f9ccSHemant Agrawal 				   OP_ALG_AAI_HMAC_PRECOMP,
1942e863f9ccSHemant Agrawal 		.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1943e863f9ccSHemant Agrawal 	},
1944e863f9ccSHemant Agrawal 	{
19458e8ec596SKim Phillips 		.name = "authenc(hmac(sha256),cbc(des))",
19468e8ec596SKim Phillips 		.driver_name = "authenc-hmac-sha256-cbc-des-caam",
19478e8ec596SKim Phillips 		.blocksize = DES_BLOCK_SIZE,
1948885e9e2fSYuan Kang 		.type = CRYPTO_ALG_TYPE_AEAD,
1949885e9e2fSYuan Kang 		.template_aead = {
19500e479300SYuan Kang 			.setkey = aead_setkey,
19510e479300SYuan Kang 			.setauthsize = aead_setauthsize,
19520e479300SYuan Kang 			.encrypt = aead_encrypt,
19530e479300SYuan Kang 			.decrypt = aead_decrypt,
19540e479300SYuan Kang 			.givencrypt = aead_givencrypt,
19558e8ec596SKim Phillips 			.geniv = "<built-in>",
19568e8ec596SKim Phillips 			.ivsize = DES_BLOCK_SIZE,
19578e8ec596SKim Phillips 			.maxauthsize = SHA256_DIGEST_SIZE,
19588e8ec596SKim Phillips 			},
19598e8ec596SKim Phillips 		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
19608e8ec596SKim Phillips 		.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
19618e8ec596SKim Phillips 				   OP_ALG_AAI_HMAC_PRECOMP,
19628e8ec596SKim Phillips 		.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
19638e8ec596SKim Phillips 	},
19644427b1b4SKim Phillips 	{
1965e863f9ccSHemant Agrawal 		.name = "authenc(hmac(sha384),cbc(des))",
1966e863f9ccSHemant Agrawal 		.driver_name = "authenc-hmac-sha384-cbc-des-caam",
1967e863f9ccSHemant Agrawal 		.blocksize = DES_BLOCK_SIZE,
1968e863f9ccSHemant Agrawal 		.template_aead = {
1969e863f9ccSHemant Agrawal 			.setkey = aead_setkey,
1970e863f9ccSHemant Agrawal 			.setauthsize = aead_setauthsize,
1971e863f9ccSHemant Agrawal 			.encrypt = aead_encrypt,
1972e863f9ccSHemant Agrawal 			.decrypt = aead_decrypt,
1973e863f9ccSHemant Agrawal 			.givencrypt = aead_givencrypt,
1974e863f9ccSHemant Agrawal 			.geniv = "<built-in>",
1975e863f9ccSHemant Agrawal 			.ivsize = DES_BLOCK_SIZE,
1976e863f9ccSHemant Agrawal 			.maxauthsize = SHA384_DIGEST_SIZE,
1977e863f9ccSHemant Agrawal 			},
1978e863f9ccSHemant Agrawal 		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1979e863f9ccSHemant Agrawal 		.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1980e863f9ccSHemant Agrawal 				   OP_ALG_AAI_HMAC_PRECOMP,
1981e863f9ccSHemant Agrawal 		.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1982e863f9ccSHemant Agrawal 	},
1983e863f9ccSHemant Agrawal 	{
19844427b1b4SKim Phillips 		.name = "authenc(hmac(sha512),cbc(des))",
19854427b1b4SKim Phillips 		.driver_name = "authenc-hmac-sha512-cbc-des-caam",
19864427b1b4SKim Phillips 		.blocksize = DES_BLOCK_SIZE,
1987885e9e2fSYuan Kang 		.type = CRYPTO_ALG_TYPE_AEAD,
1988885e9e2fSYuan Kang 		.template_aead = {
19890e479300SYuan Kang 			.setkey = aead_setkey,
19900e479300SYuan Kang 			.setauthsize = aead_setauthsize,
19910e479300SYuan Kang 			.encrypt = aead_encrypt,
19920e479300SYuan Kang 			.decrypt = aead_decrypt,
19930e479300SYuan Kang 			.givencrypt = aead_givencrypt,
19944427b1b4SKim Phillips 			.geniv = "<built-in>",
19954427b1b4SKim Phillips 			.ivsize = DES_BLOCK_SIZE,
19964427b1b4SKim Phillips 			.maxauthsize = SHA512_DIGEST_SIZE,
19974427b1b4SKim Phillips 			},
19984427b1b4SKim Phillips 		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
19994427b1b4SKim Phillips 		.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
20004427b1b4SKim Phillips 				   OP_ALG_AAI_HMAC_PRECOMP,
20014427b1b4SKim Phillips 		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
20024427b1b4SKim Phillips 	},
2003acdca31dSYuan Kang 	/* ablkcipher descriptor */
2004acdca31dSYuan Kang 	{
2005acdca31dSYuan Kang 		.name = "cbc(aes)",
2006acdca31dSYuan Kang 		.driver_name = "cbc-aes-caam",
2007acdca31dSYuan Kang 		.blocksize = AES_BLOCK_SIZE,
2008acdca31dSYuan Kang 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2009acdca31dSYuan Kang 		.template_ablkcipher = {
2010acdca31dSYuan Kang 			.setkey = ablkcipher_setkey,
2011acdca31dSYuan Kang 			.encrypt = ablkcipher_encrypt,
2012acdca31dSYuan Kang 			.decrypt = ablkcipher_decrypt,
2013acdca31dSYuan Kang 			.geniv = "eseqiv",
2014acdca31dSYuan Kang 			.min_keysize = AES_MIN_KEY_SIZE,
2015acdca31dSYuan Kang 			.max_keysize = AES_MAX_KEY_SIZE,
2016acdca31dSYuan Kang 			.ivsize = AES_BLOCK_SIZE,
2017acdca31dSYuan Kang 			},
2018acdca31dSYuan Kang 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2019acdca31dSYuan Kang 	},
2020acdca31dSYuan Kang 	{
2021acdca31dSYuan Kang 		.name = "cbc(des3_ede)",
2022acdca31dSYuan Kang 		.driver_name = "cbc-3des-caam",
2023acdca31dSYuan Kang 		.blocksize = DES3_EDE_BLOCK_SIZE,
2024acdca31dSYuan Kang 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2025acdca31dSYuan Kang 		.template_ablkcipher = {
2026acdca31dSYuan Kang 			.setkey = ablkcipher_setkey,
2027acdca31dSYuan Kang 			.encrypt = ablkcipher_encrypt,
2028acdca31dSYuan Kang 			.decrypt = ablkcipher_decrypt,
2029acdca31dSYuan Kang 			.geniv = "eseqiv",
2030acdca31dSYuan Kang 			.min_keysize = DES3_EDE_KEY_SIZE,
2031acdca31dSYuan Kang 			.max_keysize = DES3_EDE_KEY_SIZE,
2032acdca31dSYuan Kang 			.ivsize = DES3_EDE_BLOCK_SIZE,
2033acdca31dSYuan Kang 			},
2034acdca31dSYuan Kang 		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2035acdca31dSYuan Kang 	},
2036acdca31dSYuan Kang 	{
2037acdca31dSYuan Kang 		.name = "cbc(des)",
2038acdca31dSYuan Kang 		.driver_name = "cbc-des-caam",
2039acdca31dSYuan Kang 		.blocksize = DES_BLOCK_SIZE,
2040acdca31dSYuan Kang 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2041acdca31dSYuan Kang 		.template_ablkcipher = {
2042acdca31dSYuan Kang 			.setkey = ablkcipher_setkey,
2043acdca31dSYuan Kang 			.encrypt = ablkcipher_encrypt,
2044acdca31dSYuan Kang 			.decrypt = ablkcipher_decrypt,
2045acdca31dSYuan Kang 			.geniv = "eseqiv",
2046acdca31dSYuan Kang 			.min_keysize = DES_KEY_SIZE,
2047acdca31dSYuan Kang 			.max_keysize = DES_KEY_SIZE,
2048acdca31dSYuan Kang 			.ivsize = DES_BLOCK_SIZE,
2049acdca31dSYuan Kang 			},
2050acdca31dSYuan Kang 		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2051acdca31dSYuan Kang 	}
20528e8ec596SKim Phillips };
20538e8ec596SKim Phillips 
20548e8ec596SKim Phillips struct caam_crypto_alg {
20558e8ec596SKim Phillips 	struct list_head entry;
20568e8ec596SKim Phillips 	struct device *ctrldev;
20578e8ec596SKim Phillips 	int class1_alg_type;
20588e8ec596SKim Phillips 	int class2_alg_type;
20598e8ec596SKim Phillips 	int alg_op;
20608e8ec596SKim Phillips 	struct crypto_alg crypto_alg;
20618e8ec596SKim Phillips };
20628e8ec596SKim Phillips 
20638e8ec596SKim Phillips static int caam_cra_init(struct crypto_tfm *tfm)
20648e8ec596SKim Phillips {
20658e8ec596SKim Phillips 	struct crypto_alg *alg = tfm->__crt_alg;
20668e8ec596SKim Phillips 	struct caam_crypto_alg *caam_alg =
20678e8ec596SKim Phillips 		 container_of(alg, struct caam_crypto_alg, crypto_alg);
20688e8ec596SKim Phillips 	struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
20698e8ec596SKim Phillips 	struct caam_drv_private *priv = dev_get_drvdata(caam_alg->ctrldev);
20708e8ec596SKim Phillips 	int tgt_jr = atomic_inc_return(&priv->tfm_count);
20718e8ec596SKim Phillips 
20728e8ec596SKim Phillips 	/*
20738e8ec596SKim Phillips 	 * distribute tfms across job rings to ensure in-order
20748e8ec596SKim Phillips 	 * crypto request processing per tfm
20758e8ec596SKim Phillips 	 */
20768009a383SYuan Kang 	ctx->jrdev = priv->jrdev[(tgt_jr / 2) % priv->total_jobrs];
20778e8ec596SKim Phillips 
20788e8ec596SKim Phillips 	/* copy descriptor header template value */
20798e8ec596SKim Phillips 	ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
20808e8ec596SKim Phillips 	ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type;
20818e8ec596SKim Phillips 	ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op;
20828e8ec596SKim Phillips 
20838e8ec596SKim Phillips 	return 0;
20848e8ec596SKim Phillips }
20858e8ec596SKim Phillips 
20868e8ec596SKim Phillips static void caam_cra_exit(struct crypto_tfm *tfm)
20878e8ec596SKim Phillips {
20888e8ec596SKim Phillips 	struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
20898e8ec596SKim Phillips 
20901acebad3SYuan Kang 	if (ctx->sh_desc_enc_dma &&
20911acebad3SYuan Kang 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
20921acebad3SYuan Kang 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
20931acebad3SYuan Kang 				 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
20941acebad3SYuan Kang 	if (ctx->sh_desc_dec_dma &&
20951acebad3SYuan Kang 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
20961acebad3SYuan Kang 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
20971acebad3SYuan Kang 				 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
20981acebad3SYuan Kang 	if (ctx->sh_desc_givenc_dma &&
20991acebad3SYuan Kang 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
21001acebad3SYuan Kang 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
21011acebad3SYuan Kang 				 desc_bytes(ctx->sh_desc_givenc),
21024427b1b4SKim Phillips 				 DMA_TO_DEVICE);
21038e8ec596SKim Phillips }
21048e8ec596SKim Phillips 
21058e8ec596SKim Phillips static void __exit caam_algapi_exit(void)
21068e8ec596SKim Phillips {
21078e8ec596SKim Phillips 
21088e8ec596SKim Phillips 	struct device_node *dev_node;
21098e8ec596SKim Phillips 	struct platform_device *pdev;
21108e8ec596SKim Phillips 	struct device *ctrldev;
21118e8ec596SKim Phillips 	struct caam_drv_private *priv;
21128e8ec596SKim Phillips 	struct caam_crypto_alg *t_alg, *n;
21138e8ec596SKim Phillips 
211454e198d4SKim Phillips 	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2115a0ea0f6dSShengzhou Liu 	if (!dev_node) {
2116a0ea0f6dSShengzhou Liu 		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
21178e8ec596SKim Phillips 		if (!dev_node)
21188e8ec596SKim Phillips 			return;
2119a0ea0f6dSShengzhou Liu 	}
21208e8ec596SKim Phillips 
21218e8ec596SKim Phillips 	pdev = of_find_device_by_node(dev_node);
21228e8ec596SKim Phillips 	if (!pdev)
21238e8ec596SKim Phillips 		return;
21248e8ec596SKim Phillips 
21258e8ec596SKim Phillips 	ctrldev = &pdev->dev;
21268e8ec596SKim Phillips 	of_node_put(dev_node);
21278e8ec596SKim Phillips 	priv = dev_get_drvdata(ctrldev);
21288e8ec596SKim Phillips 
21298e8ec596SKim Phillips 	if (!priv->alg_list.next)
21308e8ec596SKim Phillips 		return;
21318e8ec596SKim Phillips 
21328e8ec596SKim Phillips 	list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
21338e8ec596SKim Phillips 		crypto_unregister_alg(&t_alg->crypto_alg);
21348e8ec596SKim Phillips 		list_del(&t_alg->entry);
21358e8ec596SKim Phillips 		kfree(t_alg);
21368e8ec596SKim Phillips 	}
21378e8ec596SKim Phillips }
21388e8ec596SKim Phillips 
21398e8ec596SKim Phillips static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
21408e8ec596SKim Phillips 					      struct caam_alg_template
21418e8ec596SKim Phillips 					      *template)
21428e8ec596SKim Phillips {
21438e8ec596SKim Phillips 	struct caam_crypto_alg *t_alg;
21448e8ec596SKim Phillips 	struct crypto_alg *alg;
21458e8ec596SKim Phillips 
21468e8ec596SKim Phillips 	t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
21478e8ec596SKim Phillips 	if (!t_alg) {
21488e8ec596SKim Phillips 		dev_err(ctrldev, "failed to allocate t_alg\n");
21498e8ec596SKim Phillips 		return ERR_PTR(-ENOMEM);
21508e8ec596SKim Phillips 	}
21518e8ec596SKim Phillips 
21528e8ec596SKim Phillips 	alg = &t_alg->crypto_alg;
21538e8ec596SKim Phillips 
21548e8ec596SKim Phillips 	snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
21558e8ec596SKim Phillips 	snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
21568e8ec596SKim Phillips 		 template->driver_name);
21578e8ec596SKim Phillips 	alg->cra_module = THIS_MODULE;
21588e8ec596SKim Phillips 	alg->cra_init = caam_cra_init;
21598e8ec596SKim Phillips 	alg->cra_exit = caam_cra_exit;
21608e8ec596SKim Phillips 	alg->cra_priority = CAAM_CRA_PRIORITY;
21618e8ec596SKim Phillips 	alg->cra_blocksize = template->blocksize;
21628e8ec596SKim Phillips 	alg->cra_alignmask = 0;
21638e8ec596SKim Phillips 	alg->cra_ctxsize = sizeof(struct caam_ctx);
2164d912bb76SNikos Mavrogiannopoulos 	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
2165d912bb76SNikos Mavrogiannopoulos 			 template->type;
2166885e9e2fSYuan Kang 	switch (template->type) {
2167acdca31dSYuan Kang 	case CRYPTO_ALG_TYPE_ABLKCIPHER:
2168acdca31dSYuan Kang 		alg->cra_type = &crypto_ablkcipher_type;
2169acdca31dSYuan Kang 		alg->cra_ablkcipher = template->template_ablkcipher;
2170acdca31dSYuan Kang 		break;
2171885e9e2fSYuan Kang 	case CRYPTO_ALG_TYPE_AEAD:
2172885e9e2fSYuan Kang 		alg->cra_type = &crypto_aead_type;
2173885e9e2fSYuan Kang 		alg->cra_aead = template->template_aead;
2174885e9e2fSYuan Kang 		break;
2175885e9e2fSYuan Kang 	}
21768e8ec596SKim Phillips 
21778e8ec596SKim Phillips 	t_alg->class1_alg_type = template->class1_alg_type;
21788e8ec596SKim Phillips 	t_alg->class2_alg_type = template->class2_alg_type;
21798e8ec596SKim Phillips 	t_alg->alg_op = template->alg_op;
21808e8ec596SKim Phillips 	t_alg->ctrldev = ctrldev;
21818e8ec596SKim Phillips 
21828e8ec596SKim Phillips 	return t_alg;
21838e8ec596SKim Phillips }
21848e8ec596SKim Phillips 
21858e8ec596SKim Phillips static int __init caam_algapi_init(void)
21868e8ec596SKim Phillips {
21878e8ec596SKim Phillips 	struct device_node *dev_node;
21888e8ec596SKim Phillips 	struct platform_device *pdev;
21898009a383SYuan Kang 	struct device *ctrldev;
21908e8ec596SKim Phillips 	struct caam_drv_private *priv;
21918e8ec596SKim Phillips 	int i = 0, err = 0;
21928e8ec596SKim Phillips 
219354e198d4SKim Phillips 	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2194a0ea0f6dSShengzhou Liu 	if (!dev_node) {
2195a0ea0f6dSShengzhou Liu 		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
21968e8ec596SKim Phillips 		if (!dev_node)
21978e8ec596SKim Phillips 			return -ENODEV;
2198a0ea0f6dSShengzhou Liu 	}
21998e8ec596SKim Phillips 
22008e8ec596SKim Phillips 	pdev = of_find_device_by_node(dev_node);
22018e8ec596SKim Phillips 	if (!pdev)
22028e8ec596SKim Phillips 		return -ENODEV;
22038e8ec596SKim Phillips 
22048e8ec596SKim Phillips 	ctrldev = &pdev->dev;
22058e8ec596SKim Phillips 	priv = dev_get_drvdata(ctrldev);
22068e8ec596SKim Phillips 	of_node_put(dev_node);
22078e8ec596SKim Phillips 
22088e8ec596SKim Phillips 	INIT_LIST_HEAD(&priv->alg_list);
22098e8ec596SKim Phillips 
22108e8ec596SKim Phillips 	atomic_set(&priv->tfm_count, -1);
22118e8ec596SKim Phillips 
22128e8ec596SKim Phillips 	/* register crypto algorithms the device supports */
22138e8ec596SKim Phillips 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
22148e8ec596SKim Phillips 		/* TODO: check if h/w supports alg */
22158e8ec596SKim Phillips 		struct caam_crypto_alg *t_alg;
22168e8ec596SKim Phillips 
22178e8ec596SKim Phillips 		t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]);
22188e8ec596SKim Phillips 		if (IS_ERR(t_alg)) {
22198e8ec596SKim Phillips 			err = PTR_ERR(t_alg);
22208e8ec596SKim Phillips 			dev_warn(ctrldev, "%s alg allocation failed\n",
2221cdc712d8SDan Carpenter 				 driver_algs[i].driver_name);
22228e8ec596SKim Phillips 			continue;
22238e8ec596SKim Phillips 		}
22248e8ec596SKim Phillips 
22258e8ec596SKim Phillips 		err = crypto_register_alg(&t_alg->crypto_alg);
22268e8ec596SKim Phillips 		if (err) {
22278e8ec596SKim Phillips 			dev_warn(ctrldev, "%s alg registration failed\n",
22288e8ec596SKim Phillips 				t_alg->crypto_alg.cra_driver_name);
22298e8ec596SKim Phillips 			kfree(t_alg);
22300113529fSKim Phillips 		} else
22318e8ec596SKim Phillips 			list_add_tail(&t_alg->entry, &priv->alg_list);
22328e8ec596SKim Phillips 	}
22330113529fSKim Phillips 	if (!list_empty(&priv->alg_list))
22340113529fSKim Phillips 		dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n",
22350113529fSKim Phillips 			 (char *)of_get_property(dev_node, "compatible", NULL));
22368e8ec596SKim Phillips 
22378e8ec596SKim Phillips 	return err;
22388e8ec596SKim Phillips }
22398e8ec596SKim Phillips 
22408e8ec596SKim Phillips module_init(caam_algapi_init);
22418e8ec596SKim Phillips module_exit(caam_algapi_exit);
22428e8ec596SKim Phillips 
22438e8ec596SKim Phillips MODULE_LICENSE("GPL");
22448e8ec596SKim Phillips MODULE_DESCRIPTION("FSL CAAM support for crypto API");
22458e8ec596SKim Phillips MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
2246