xref: /openbmc/linux/drivers/crypto/caam/caamalg.c (revision a2cce7a9)
1 /*
2  * caam - Freescale FSL CAAM support for crypto API
3  *
4  * Copyright 2008-2011 Freescale Semiconductor, Inc.
5  *
6  * Based on talitos crypto API driver.
7  *
8  * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
9  *
10  * ---------------                     ---------------
11  * | JobDesc #1  |-------------------->|  ShareDesc  |
12  * | *(packet 1) |                     |   (PDB)     |
13  * ---------------      |------------->|  (hashKey)  |
14  *       .              |              | (cipherKey) |
15  *       .              |    |-------->| (operation) |
16  * ---------------      |    |         ---------------
17  * | JobDesc #2  |------|    |
18  * | *(packet 2) |           |
19  * ---------------           |
20  *       .                   |
21  *       .                   |
22  * ---------------           |
23  * | JobDesc #3  |------------
24  * | *(packet 3) |
25  * ---------------
26  *
27  * The SharedDesc never changes for a connection unless rekeyed, but
28  * each packet will likely be in a different place. So all we need
29  * to know to process the packet is where the input is, where the
30  * output goes, and what context we want to process with. Context is
31  * in the SharedDesc, packet references in the JobDesc.
32  *
33  * So, a job desc looks like:
34  *
35  * ---------------------
36  * | Header            |
37  * | ShareDesc Pointer |
38  * | SEQ_OUT_PTR       |
39  * | (output buffer)   |
40  * | (output length)   |
41  * | SEQ_IN_PTR        |
42  * | (input buffer)    |
43  * | (input length)    |
44  * ---------------------
45  */
46 
47 #include "compat.h"
48 
49 #include "regs.h"
50 #include "intern.h"
51 #include "desc_constr.h"
52 #include "jr.h"
53 #include "error.h"
54 #include "sg_sw_sec4.h"
55 #include "key_gen.h"
56 
57 /*
58  * crypto alg
59  */
60 #define CAAM_CRA_PRIORITY		3000
61 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
62 #define CAAM_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + \
63 					 CTR_RFC3686_NONCE_SIZE + \
64 					 SHA512_DIGEST_SIZE * 2)
65 /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
66 #define CAAM_MAX_IV_LENGTH		16
67 
68 #define AEAD_DESC_JOB_IO_LEN		(DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
69 #define GCM_DESC_JOB_IO_LEN		(AEAD_DESC_JOB_IO_LEN + \
70 					 CAAM_CMD_SZ * 4)
71 #define AUTHENC_DESC_JOB_IO_LEN		(AEAD_DESC_JOB_IO_LEN + \
72 					 CAAM_CMD_SZ * 5)
73 
74 /* length of descriptors text */
75 #define DESC_AEAD_BASE			(4 * CAAM_CMD_SZ)
76 #define DESC_AEAD_ENC_LEN		(DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
77 #define DESC_AEAD_DEC_LEN		(DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
78 #define DESC_AEAD_GIVENC_LEN		(DESC_AEAD_ENC_LEN + 9 * CAAM_CMD_SZ)
79 
80 /* Note: Nonce is counted in enckeylen */
81 #define DESC_AEAD_CTR_RFC3686_LEN	(4 * CAAM_CMD_SZ)
82 
83 #define DESC_AEAD_NULL_BASE		(3 * CAAM_CMD_SZ)
84 #define DESC_AEAD_NULL_ENC_LEN		(DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
85 #define DESC_AEAD_NULL_DEC_LEN		(DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
86 
87 #define DESC_GCM_BASE			(3 * CAAM_CMD_SZ)
88 #define DESC_GCM_ENC_LEN		(DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
89 #define DESC_GCM_DEC_LEN		(DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
90 
91 #define DESC_RFC4106_BASE		(3 * CAAM_CMD_SZ)
92 #define DESC_RFC4106_ENC_LEN		(DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
93 #define DESC_RFC4106_DEC_LEN		(DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
94 
95 #define DESC_RFC4543_BASE		(3 * CAAM_CMD_SZ)
96 #define DESC_RFC4543_ENC_LEN		(DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
97 #define DESC_RFC4543_DEC_LEN		(DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
98 
99 #define DESC_ABLKCIPHER_BASE		(3 * CAAM_CMD_SZ)
100 #define DESC_ABLKCIPHER_ENC_LEN		(DESC_ABLKCIPHER_BASE + \
101 					 20 * CAAM_CMD_SZ)
102 #define DESC_ABLKCIPHER_DEC_LEN		(DESC_ABLKCIPHER_BASE + \
103 					 15 * CAAM_CMD_SZ)
104 
105 #define DESC_MAX_USED_BYTES		(CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
106 #define DESC_MAX_USED_LEN		(DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
107 
108 #ifdef DEBUG
109 /* for print_hex_dumps with line references */
110 #define debug(format, arg...) printk(format, arg)
111 #else
112 #define debug(format, arg...)
113 #endif
114 static struct list_head alg_list;
115 
116 struct caam_alg_entry {
117 	int class1_alg_type;
118 	int class2_alg_type;
119 	int alg_op;
120 	bool rfc3686;
121 	bool geniv;
122 };
123 
124 struct caam_aead_alg {
125 	struct aead_alg aead;
126 	struct caam_alg_entry caam;
127 	bool registered;
128 };
129 
130 /* Set DK bit in class 1 operation if shared */
131 static inline void append_dec_op1(u32 *desc, u32 type)
132 {
133 	u32 *jump_cmd, *uncond_jump_cmd;
134 
135 	/* DK bit is valid only for AES */
136 	if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
137 		append_operation(desc, type | OP_ALG_AS_INITFINAL |
138 				 OP_ALG_DECRYPT);
139 		return;
140 	}
141 
142 	jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
143 	append_operation(desc, type | OP_ALG_AS_INITFINAL |
144 			 OP_ALG_DECRYPT);
145 	uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
146 	set_jump_tgt_here(desc, jump_cmd);
147 	append_operation(desc, type | OP_ALG_AS_INITFINAL |
148 			 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
149 	set_jump_tgt_here(desc, uncond_jump_cmd);
150 }
151 
152 /*
153  * For aead functions, read payload and write payload,
154  * both of which are specified in req->src and req->dst
155  */
156 static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
157 {
158 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
159 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
160 			     KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
161 }
162 
163 /*
164  * For ablkcipher encrypt and decrypt, read from req->src and
165  * write to req->dst
166  */
167 static inline void ablkcipher_append_src_dst(u32 *desc)
168 {
169 	append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
170 	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
171 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
172 			     KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
173 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
174 }
175 
176 /*
177  * per-session context
178  */
179 struct caam_ctx {
180 	struct device *jrdev;
181 	u32 sh_desc_enc[DESC_MAX_USED_LEN];
182 	u32 sh_desc_dec[DESC_MAX_USED_LEN];
183 	u32 sh_desc_givenc[DESC_MAX_USED_LEN];
184 	dma_addr_t sh_desc_enc_dma;
185 	dma_addr_t sh_desc_dec_dma;
186 	dma_addr_t sh_desc_givenc_dma;
187 	u32 class1_alg_type;
188 	u32 class2_alg_type;
189 	u32 alg_op;
190 	u8 key[CAAM_MAX_KEY_SIZE];
191 	dma_addr_t key_dma;
192 	unsigned int enckeylen;
193 	unsigned int split_key_len;
194 	unsigned int split_key_pad_len;
195 	unsigned int authsize;
196 };
197 
198 static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
199 			    int keys_fit_inline, bool is_rfc3686)
200 {
201 	u32 *nonce;
202 	unsigned int enckeylen = ctx->enckeylen;
203 
204 	/*
205 	 * RFC3686 specific:
206 	 *	| ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
207 	 *	| enckeylen = encryption key size + nonce size
208 	 */
209 	if (is_rfc3686)
210 		enckeylen -= CTR_RFC3686_NONCE_SIZE;
211 
212 	if (keys_fit_inline) {
213 		append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
214 				  ctx->split_key_len, CLASS_2 |
215 				  KEY_DEST_MDHA_SPLIT | KEY_ENC);
216 		append_key_as_imm(desc, (void *)ctx->key +
217 				  ctx->split_key_pad_len, enckeylen,
218 				  enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
219 	} else {
220 		append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
221 			   KEY_DEST_MDHA_SPLIT | KEY_ENC);
222 		append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
223 			   enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
224 	}
225 
226 	/* Load Counter into CONTEXT1 reg */
227 	if (is_rfc3686) {
228 		nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
229 			       enckeylen);
230 		append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
231 				    LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
232 		append_move(desc,
233 			    MOVE_SRC_OUTFIFO |
234 			    MOVE_DEST_CLASS1CTX |
235 			    (16 << MOVE_OFFSET_SHIFT) |
236 			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
237 	}
238 }
239 
240 static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
241 				  int keys_fit_inline, bool is_rfc3686)
242 {
243 	u32 *key_jump_cmd;
244 
245 	/* Note: Context registers are saved. */
246 	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
247 
248 	/* Skip if already shared */
249 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
250 				   JUMP_COND_SHRD);
251 
252 	append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
253 
254 	set_jump_tgt_here(desc, key_jump_cmd);
255 }
256 
257 static int aead_null_set_sh_desc(struct crypto_aead *aead)
258 {
259 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
260 	struct device *jrdev = ctx->jrdev;
261 	bool keys_fit_inline = false;
262 	u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
263 	u32 *desc;
264 
265 	/*
266 	 * Job Descriptor and Shared Descriptors
267 	 * must all fit into the 64-word Descriptor h/w Buffer
268 	 */
269 	if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN +
270 	    ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
271 		keys_fit_inline = true;
272 
273 	/* aead_encrypt shared descriptor */
274 	desc = ctx->sh_desc_enc;
275 
276 	init_sh_desc(desc, HDR_SHARE_SERIAL);
277 
278 	/* Skip if already shared */
279 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
280 				   JUMP_COND_SHRD);
281 	if (keys_fit_inline)
282 		append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
283 				  ctx->split_key_len, CLASS_2 |
284 				  KEY_DEST_MDHA_SPLIT | KEY_ENC);
285 	else
286 		append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
287 			   KEY_DEST_MDHA_SPLIT | KEY_ENC);
288 	set_jump_tgt_here(desc, key_jump_cmd);
289 
290 	/* assoclen + cryptlen = seqinlen */
291 	append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
292 
293 	/* Prepare to read and write cryptlen + assoclen bytes */
294 	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
295 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
296 
297 	/*
298 	 * MOVE_LEN opcode is not available in all SEC HW revisions,
299 	 * thus need to do some magic, i.e. self-patch the descriptor
300 	 * buffer.
301 	 */
302 	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
303 				    MOVE_DEST_MATH3 |
304 				    (0x6 << MOVE_LEN_SHIFT));
305 	write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
306 				     MOVE_DEST_DESCBUF |
307 				     MOVE_WAITCOMP |
308 				     (0x8 << MOVE_LEN_SHIFT));
309 
310 	/* Class 2 operation */
311 	append_operation(desc, ctx->class2_alg_type |
312 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
313 
314 	/* Read and write cryptlen bytes */
315 	aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
316 
317 	set_move_tgt_here(desc, read_move_cmd);
318 	set_move_tgt_here(desc, write_move_cmd);
319 	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
320 	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
321 		    MOVE_AUX_LS);
322 
323 	/* Write ICV */
324 	append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
325 			 LDST_SRCDST_BYTE_CONTEXT);
326 
327 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
328 					      desc_bytes(desc),
329 					      DMA_TO_DEVICE);
330 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
331 		dev_err(jrdev, "unable to map shared descriptor\n");
332 		return -ENOMEM;
333 	}
334 #ifdef DEBUG
335 	print_hex_dump(KERN_ERR,
336 		       "aead null enc shdesc@"__stringify(__LINE__)": ",
337 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
338 		       desc_bytes(desc), 1);
339 #endif
340 
341 	/*
342 	 * Job Descriptor and Shared Descriptors
343 	 * must all fit into the 64-word Descriptor h/w Buffer
344 	 */
345 	keys_fit_inline = false;
346 	if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
347 	    ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
348 		keys_fit_inline = true;
349 
350 	desc = ctx->sh_desc_dec;
351 
352 	/* aead_decrypt shared descriptor */
353 	init_sh_desc(desc, HDR_SHARE_SERIAL);
354 
355 	/* Skip if already shared */
356 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
357 				   JUMP_COND_SHRD);
358 	if (keys_fit_inline)
359 		append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
360 				  ctx->split_key_len, CLASS_2 |
361 				  KEY_DEST_MDHA_SPLIT | KEY_ENC);
362 	else
363 		append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
364 			   KEY_DEST_MDHA_SPLIT | KEY_ENC);
365 	set_jump_tgt_here(desc, key_jump_cmd);
366 
367 	/* Class 2 operation */
368 	append_operation(desc, ctx->class2_alg_type |
369 			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
370 
371 	/* assoclen + cryptlen = seqoutlen */
372 	append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
373 
374 	/* Prepare to read and write cryptlen + assoclen bytes */
375 	append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
376 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
377 
378 	/*
379 	 * MOVE_LEN opcode is not available in all SEC HW revisions,
380 	 * thus need to do some magic, i.e. self-patch the descriptor
381 	 * buffer.
382 	 */
383 	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
384 				    MOVE_DEST_MATH2 |
385 				    (0x6 << MOVE_LEN_SHIFT));
386 	write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
387 				     MOVE_DEST_DESCBUF |
388 				     MOVE_WAITCOMP |
389 				     (0x8 << MOVE_LEN_SHIFT));
390 
391 	/* Read and write cryptlen bytes */
392 	aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
393 
394 	/*
395 	 * Insert a NOP here, since we need at least 4 instructions between
396 	 * code patching the descriptor buffer and the location being patched.
397 	 */
398 	jump_cmd = append_jump(desc, JUMP_TEST_ALL);
399 	set_jump_tgt_here(desc, jump_cmd);
400 
401 	set_move_tgt_here(desc, read_move_cmd);
402 	set_move_tgt_here(desc, write_move_cmd);
403 	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
404 	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
405 		    MOVE_AUX_LS);
406 	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
407 
408 	/* Load ICV */
409 	append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
410 			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
411 
412 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
413 					      desc_bytes(desc),
414 					      DMA_TO_DEVICE);
415 	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
416 		dev_err(jrdev, "unable to map shared descriptor\n");
417 		return -ENOMEM;
418 	}
419 #ifdef DEBUG
420 	print_hex_dump(KERN_ERR,
421 		       "aead null dec shdesc@"__stringify(__LINE__)": ",
422 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
423 		       desc_bytes(desc), 1);
424 #endif
425 
426 	return 0;
427 }
428 
429 static int aead_set_sh_desc(struct crypto_aead *aead)
430 {
431 	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
432 						 struct caam_aead_alg, aead);
433 	unsigned int ivsize = crypto_aead_ivsize(aead);
434 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
435 	struct device *jrdev = ctx->jrdev;
436 	bool keys_fit_inline;
437 	u32 geniv, moveiv;
438 	u32 ctx1_iv_off = 0;
439 	u32 *desc;
440 	const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
441 			       OP_ALG_AAI_CTR_MOD128);
442 	const bool is_rfc3686 = alg->caam.rfc3686;
443 
444 	/* NULL encryption / decryption */
445 	if (!ctx->enckeylen)
446 		return aead_null_set_sh_desc(aead);
447 
448 	/*
449 	 * AES-CTR needs to load IV in CONTEXT1 reg
450 	 * at an offset of 128bits (16bytes)
451 	 * CONTEXT1[255:128] = IV
452 	 */
453 	if (ctr_mode)
454 		ctx1_iv_off = 16;
455 
456 	/*
457 	 * RFC3686 specific:
458 	 *	CONTEXT1[255:128] = {NONCE, IV, COUNTER}
459 	 */
460 	if (is_rfc3686)
461 		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
462 
463 	if (alg->caam.geniv)
464 		goto skip_enc;
465 
466 	/*
467 	 * Job Descriptor and Shared Descriptors
468 	 * must all fit into the 64-word Descriptor h/w Buffer
469 	 */
470 	keys_fit_inline = false;
471 	if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
472 	    ctx->split_key_pad_len + ctx->enckeylen +
473 	    (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
474 	    CAAM_DESC_BYTES_MAX)
475 		keys_fit_inline = true;
476 
477 	/* aead_encrypt shared descriptor */
478 	desc = ctx->sh_desc_enc;
479 
480 	/* Note: Context registers are saved. */
481 	init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
482 
483 	/* Class 2 operation */
484 	append_operation(desc, ctx->class2_alg_type |
485 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
486 
487 	/* Read and write assoclen bytes */
488 	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
489 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
490 
491 	/* Skip assoc data */
492 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
493 
494 	/* read assoc before reading payload */
495 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
496 				      FIFOLDST_VLF);
497 
498 	/* Load Counter into CONTEXT1 reg */
499 	if (is_rfc3686)
500 		append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
501 				    LDST_CLASS_1_CCB |
502 				    LDST_SRCDST_BYTE_CONTEXT |
503 				    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
504 				     LDST_OFFSET_SHIFT));
505 
506 	/* Class 1 operation */
507 	append_operation(desc, ctx->class1_alg_type |
508 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
509 
510 	/* Read and write cryptlen bytes */
511 	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
512 	append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
513 	aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
514 
515 	/* Write ICV */
516 	append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
517 			 LDST_SRCDST_BYTE_CONTEXT);
518 
519 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
520 					      desc_bytes(desc),
521 					      DMA_TO_DEVICE);
522 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
523 		dev_err(jrdev, "unable to map shared descriptor\n");
524 		return -ENOMEM;
525 	}
526 #ifdef DEBUG
527 	print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
528 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
529 		       desc_bytes(desc), 1);
530 #endif
531 
532 skip_enc:
533 	/*
534 	 * Job Descriptor and Shared Descriptors
535 	 * must all fit into the 64-word Descriptor h/w Buffer
536 	 */
537 	keys_fit_inline = false;
538 	if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN +
539 	    ctx->split_key_pad_len + ctx->enckeylen +
540 	    (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
541 	    CAAM_DESC_BYTES_MAX)
542 		keys_fit_inline = true;
543 
544 	/* aead_decrypt shared descriptor */
545 	desc = ctx->sh_desc_dec;
546 
547 	/* Note: Context registers are saved. */
548 	init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
549 
550 	/* Class 2 operation */
551 	append_operation(desc, ctx->class2_alg_type |
552 			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
553 
554 	/* Read and write assoclen bytes */
555 	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
556 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
557 
558 	/* Skip assoc data */
559 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
560 
561 	/* read assoc before reading payload */
562 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
563 			     KEY_VLF);
564 
565 	/* Load Counter into CONTEXT1 reg */
566 	if (is_rfc3686)
567 		append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
568 				    LDST_CLASS_1_CCB |
569 				    LDST_SRCDST_BYTE_CONTEXT |
570 				    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
571 				     LDST_OFFSET_SHIFT));
572 
573 	/* Choose operation */
574 	if (ctr_mode)
575 		append_operation(desc, ctx->class1_alg_type |
576 				 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
577 	else
578 		append_dec_op1(desc, ctx->class1_alg_type);
579 
580 	/* Read and write cryptlen bytes */
581 	append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
582 	append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
583 	aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
584 
585 	/* Load ICV */
586 	append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
587 			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
588 
589 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
590 					      desc_bytes(desc),
591 					      DMA_TO_DEVICE);
592 	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
593 		dev_err(jrdev, "unable to map shared descriptor\n");
594 		return -ENOMEM;
595 	}
596 #ifdef DEBUG
597 	print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
598 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
599 		       desc_bytes(desc), 1);
600 #endif
601 
602 	if (!alg->caam.geniv)
603 		goto skip_givenc;
604 
605 	/*
606 	 * Job Descriptor and Shared Descriptors
607 	 * must all fit into the 64-word Descriptor h/w Buffer
608 	 */
609 	keys_fit_inline = false;
610 	if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
611 	    ctx->split_key_pad_len + ctx->enckeylen +
612 	    (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
613 	    CAAM_DESC_BYTES_MAX)
614 		keys_fit_inline = true;
615 
616 	/* aead_givencrypt shared descriptor */
617 	desc = ctx->sh_desc_givenc;
618 
619 	/* Note: Context registers are saved. */
620 	init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
621 
622 	if (is_rfc3686)
623 		goto copy_iv;
624 
625 	/* Generate IV */
626 	geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
627 		NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
628 		NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
629 	append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
630 			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
631 	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
632 	append_move(desc, MOVE_WAITCOMP |
633 		    MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
634 		    (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
635 		    (ivsize << MOVE_LEN_SHIFT));
636 	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
637 
638 copy_iv:
639 	/* Copy IV to class 1 context */
640 	append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
641 		    (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
642 		    (ivsize << MOVE_LEN_SHIFT));
643 
644 	/* Return to encryption */
645 	append_operation(desc, ctx->class2_alg_type |
646 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
647 
648 	/* ivsize + cryptlen = seqoutlen - authsize */
649 	append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
650 
651 	/* Read and write assoclen bytes */
652 	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
653 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
654 
655 	/* Skip assoc data */
656 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
657 
658 	/* read assoc before reading payload */
659 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
660 			     KEY_VLF);
661 
662 	/* Copy iv from outfifo to class 2 fifo */
663 	moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
664 		 NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
665 	append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
666 			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
667 	append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
668 			    LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
669 
670 	/* Load Counter into CONTEXT1 reg */
671 	if (is_rfc3686)
672 		append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
673 				    LDST_CLASS_1_CCB |
674 				    LDST_SRCDST_BYTE_CONTEXT |
675 				    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
676 				     LDST_OFFSET_SHIFT));
677 
678 	/* Class 1 operation */
679 	append_operation(desc, ctx->class1_alg_type |
680 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
681 
682 	/* Will write ivsize + cryptlen */
683 	append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
684 
685 	/* Not need to reload iv */
686 	append_seq_fifo_load(desc, ivsize,
687 			     FIFOLD_CLASS_SKIP);
688 
689 	/* Will read cryptlen */
690 	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
691 	aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
692 
693 	/* Write ICV */
694 	append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
695 			 LDST_SRCDST_BYTE_CONTEXT);
696 
697 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
698 					      desc_bytes(desc),
699 					      DMA_TO_DEVICE);
700 	if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
701 		dev_err(jrdev, "unable to map shared descriptor\n");
702 		return -ENOMEM;
703 	}
704 #ifdef DEBUG
705 	print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
706 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
707 		       desc_bytes(desc), 1);
708 #endif
709 
710 skip_givenc:
711 	return 0;
712 }
713 
714 static int aead_setauthsize(struct crypto_aead *authenc,
715 				    unsigned int authsize)
716 {
717 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
718 
719 	ctx->authsize = authsize;
720 	aead_set_sh_desc(authenc);
721 
722 	return 0;
723 }
724 
725 static int gcm_set_sh_desc(struct crypto_aead *aead)
726 {
727 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
728 	struct device *jrdev = ctx->jrdev;
729 	bool keys_fit_inline = false;
730 	u32 *key_jump_cmd, *zero_payload_jump_cmd,
731 	    *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
732 	u32 *desc;
733 
734 	if (!ctx->enckeylen || !ctx->authsize)
735 		return 0;
736 
737 	/*
738 	 * AES GCM encrypt shared descriptor
739 	 * Job Descriptor and Shared Descriptor
740 	 * must fit into the 64-word Descriptor h/w Buffer
741 	 */
742 	if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN +
743 	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
744 		keys_fit_inline = true;
745 
746 	desc = ctx->sh_desc_enc;
747 
748 	init_sh_desc(desc, HDR_SHARE_SERIAL);
749 
750 	/* skip key loading if they are loaded due to sharing */
751 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
752 				   JUMP_COND_SHRD | JUMP_COND_SELF);
753 	if (keys_fit_inline)
754 		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
755 				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
756 	else
757 		append_key(desc, ctx->key_dma, ctx->enckeylen,
758 			   CLASS_1 | KEY_DEST_CLASS_REG);
759 	set_jump_tgt_here(desc, key_jump_cmd);
760 
761 	/* class 1 operation */
762 	append_operation(desc, ctx->class1_alg_type |
763 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
764 
765 	/* if assoclen + cryptlen is ZERO, skip to ICV write */
766 	append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
767 	zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
768 						 JUMP_COND_MATH_Z);
769 
770 	/* if assoclen is ZERO, skip reading the assoc data */
771 	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
772 	zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
773 						 JUMP_COND_MATH_Z);
774 
775 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
776 
777 	/* skip assoc data */
778 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
779 
780 	/* cryptlen = seqinlen - assoclen */
781 	append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
782 
783 	/* if cryptlen is ZERO jump to zero-payload commands */
784 	zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
785 					    JUMP_COND_MATH_Z);
786 
787 	/* read assoc data */
788 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
789 			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
790 	set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
791 
792 	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
793 
794 	/* write encrypted data */
795 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
796 
797 	/* read payload data */
798 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
799 			     FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
800 
801 	/* jump the zero-payload commands */
802 	append_jump(desc, JUMP_TEST_ALL | 2);
803 
804 	/* zero-payload commands */
805 	set_jump_tgt_here(desc, zero_payload_jump_cmd);
806 
807 	/* read assoc data */
808 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
809 			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
810 
811 	/* There is no input data */
812 	set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
813 
814 	/* write ICV */
815 	append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
816 			 LDST_SRCDST_BYTE_CONTEXT);
817 
818 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
819 					      desc_bytes(desc),
820 					      DMA_TO_DEVICE);
821 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
822 		dev_err(jrdev, "unable to map shared descriptor\n");
823 		return -ENOMEM;
824 	}
825 #ifdef DEBUG
826 	print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
827 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
828 		       desc_bytes(desc), 1);
829 #endif
830 
831 	/*
832 	 * Job Descriptor and Shared Descriptors
833 	 * must all fit into the 64-word Descriptor h/w Buffer
834 	 */
835 	keys_fit_inline = false;
836 	if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN +
837 	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
838 		keys_fit_inline = true;
839 
840 	desc = ctx->sh_desc_dec;
841 
842 	init_sh_desc(desc, HDR_SHARE_SERIAL);
843 
844 	/* skip key loading if they are loaded due to sharing */
845 	key_jump_cmd = append_jump(desc, JUMP_JSL |
846 				   JUMP_TEST_ALL | JUMP_COND_SHRD |
847 				   JUMP_COND_SELF);
848 	if (keys_fit_inline)
849 		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
850 				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
851 	else
852 		append_key(desc, ctx->key_dma, ctx->enckeylen,
853 			   CLASS_1 | KEY_DEST_CLASS_REG);
854 	set_jump_tgt_here(desc, key_jump_cmd);
855 
856 	/* class 1 operation */
857 	append_operation(desc, ctx->class1_alg_type |
858 			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
859 
860 	/* if assoclen is ZERO, skip reading the assoc data */
861 	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
862 	zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
863 						 JUMP_COND_MATH_Z);
864 
865 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
866 
867 	/* skip assoc data */
868 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
869 
870 	/* read assoc data */
871 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
872 			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
873 
874 	set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
875 
876 	/* cryptlen = seqoutlen - assoclen */
877 	append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
878 
879 	/* jump to zero-payload command if cryptlen is zero */
880 	zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
881 					    JUMP_COND_MATH_Z);
882 
883 	append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
884 
885 	/* store encrypted data */
886 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
887 
888 	/* read payload data */
889 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
890 			     FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
891 
892 	/* zero-payload command */
893 	set_jump_tgt_here(desc, zero_payload_jump_cmd);
894 
895 	/* read ICV */
896 	append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
897 			     FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
898 
899 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
900 					      desc_bytes(desc),
901 					      DMA_TO_DEVICE);
902 	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
903 		dev_err(jrdev, "unable to map shared descriptor\n");
904 		return -ENOMEM;
905 	}
906 #ifdef DEBUG
907 	print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
908 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
909 		       desc_bytes(desc), 1);
910 #endif
911 
912 	return 0;
913 }
914 
915 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
916 {
917 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
918 
919 	ctx->authsize = authsize;
920 	gcm_set_sh_desc(authenc);
921 
922 	return 0;
923 }
924 
925 static int rfc4106_set_sh_desc(struct crypto_aead *aead)
926 {
927 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
928 	struct device *jrdev = ctx->jrdev;
929 	bool keys_fit_inline = false;
930 	u32 *key_jump_cmd;
931 	u32 *desc;
932 
933 	if (!ctx->enckeylen || !ctx->authsize)
934 		return 0;
935 
936 	/*
937 	 * RFC4106 encrypt shared descriptor
938 	 * Job Descriptor and Shared Descriptor
939 	 * must fit into the 64-word Descriptor h/w Buffer
940 	 */
941 	if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN +
942 	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
943 		keys_fit_inline = true;
944 
945 	desc = ctx->sh_desc_enc;
946 
947 	init_sh_desc(desc, HDR_SHARE_SERIAL);
948 
949 	/* Skip key loading if it is loaded due to sharing */
950 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
951 				   JUMP_COND_SHRD);
952 	if (keys_fit_inline)
953 		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
954 				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
955 	else
956 		append_key(desc, ctx->key_dma, ctx->enckeylen,
957 			   CLASS_1 | KEY_DEST_CLASS_REG);
958 	set_jump_tgt_here(desc, key_jump_cmd);
959 
960 	/* Class 1 operation */
961 	append_operation(desc, ctx->class1_alg_type |
962 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
963 
964 	append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
965 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
966 
967 	/* Read assoc data */
968 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
969 			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
970 
971 	/* Skip IV */
972 	append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
973 
974 	/* Will read cryptlen bytes */
975 	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
976 
977 	/* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
978 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
979 
980 	/* Skip assoc data */
981 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
982 
983 	/* cryptlen = seqoutlen - assoclen */
984 	append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
985 
986 	/* Write encrypted data */
987 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
988 
989 	/* Read payload data */
990 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
991 			     FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
992 
993 	/* Write ICV */
994 	append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
995 			 LDST_SRCDST_BYTE_CONTEXT);
996 
997 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
998 					      desc_bytes(desc),
999 					      DMA_TO_DEVICE);
1000 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1001 		dev_err(jrdev, "unable to map shared descriptor\n");
1002 		return -ENOMEM;
1003 	}
1004 #ifdef DEBUG
1005 	print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
1006 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1007 		       desc_bytes(desc), 1);
1008 #endif
1009 
1010 	/*
1011 	 * Job Descriptor and Shared Descriptors
1012 	 * must all fit into the 64-word Descriptor h/w Buffer
1013 	 */
1014 	keys_fit_inline = false;
1015 	if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
1016 	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1017 		keys_fit_inline = true;
1018 
1019 	desc = ctx->sh_desc_dec;
1020 
1021 	init_sh_desc(desc, HDR_SHARE_SERIAL);
1022 
1023 	/* Skip key loading if it is loaded due to sharing */
1024 	key_jump_cmd = append_jump(desc, JUMP_JSL |
1025 				   JUMP_TEST_ALL | JUMP_COND_SHRD);
1026 	if (keys_fit_inline)
1027 		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1028 				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1029 	else
1030 		append_key(desc, ctx->key_dma, ctx->enckeylen,
1031 			   CLASS_1 | KEY_DEST_CLASS_REG);
1032 	set_jump_tgt_here(desc, key_jump_cmd);
1033 
1034 	/* Class 1 operation */
1035 	append_operation(desc, ctx->class1_alg_type |
1036 			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
1037 
1038 	append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
1039 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
1040 
1041 	/* Read assoc data */
1042 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1043 			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
1044 
1045 	/* Skip IV */
1046 	append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
1047 
1048 	/* Will read cryptlen bytes */
1049 	append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
1050 
1051 	/* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
1052 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
1053 
1054 	/* Skip assoc data */
1055 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
1056 
1057 	/* Will write cryptlen bytes */
1058 	append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1059 
1060 	/* Store payload data */
1061 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1062 
1063 	/* Read encrypted data */
1064 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1065 			     FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
1066 
1067 	/* Read ICV */
1068 	append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1069 			     FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1070 
1071 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1072 					      desc_bytes(desc),
1073 					      DMA_TO_DEVICE);
1074 	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1075 		dev_err(jrdev, "unable to map shared descriptor\n");
1076 		return -ENOMEM;
1077 	}
1078 #ifdef DEBUG
1079 	print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
1080 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1081 		       desc_bytes(desc), 1);
1082 #endif
1083 
1084 	return 0;
1085 }
1086 
1087 static int rfc4106_setauthsize(struct crypto_aead *authenc,
1088 			       unsigned int authsize)
1089 {
1090 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1091 
1092 	ctx->authsize = authsize;
1093 	rfc4106_set_sh_desc(authenc);
1094 
1095 	return 0;
1096 }
1097 
1098 static int rfc4543_set_sh_desc(struct crypto_aead *aead)
1099 {
1100 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1101 	struct device *jrdev = ctx->jrdev;
1102 	bool keys_fit_inline = false;
1103 	u32 *key_jump_cmd;
1104 	u32 *read_move_cmd, *write_move_cmd;
1105 	u32 *desc;
1106 
1107 	if (!ctx->enckeylen || !ctx->authsize)
1108 		return 0;
1109 
1110 	/*
1111 	 * RFC4543 encrypt shared descriptor
1112 	 * Job Descriptor and Shared Descriptor
1113 	 * must fit into the 64-word Descriptor h/w Buffer
1114 	 */
1115 	if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN +
1116 	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1117 		keys_fit_inline = true;
1118 
1119 	desc = ctx->sh_desc_enc;
1120 
1121 	init_sh_desc(desc, HDR_SHARE_SERIAL);
1122 
1123 	/* Skip key loading if it is loaded due to sharing */
1124 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1125 				   JUMP_COND_SHRD);
1126 	if (keys_fit_inline)
1127 		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1128 				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1129 	else
1130 		append_key(desc, ctx->key_dma, ctx->enckeylen,
1131 			   CLASS_1 | KEY_DEST_CLASS_REG);
1132 	set_jump_tgt_here(desc, key_jump_cmd);
1133 
1134 	/* Class 1 operation */
1135 	append_operation(desc, ctx->class1_alg_type |
1136 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1137 
1138 	/* assoclen + cryptlen = seqinlen */
1139 	append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
1140 
1141 	/*
1142 	 * MOVE_LEN opcode is not available in all SEC HW revisions,
1143 	 * thus need to do some magic, i.e. self-patch the descriptor
1144 	 * buffer.
1145 	 */
1146 	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1147 				    (0x6 << MOVE_LEN_SHIFT));
1148 	write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1149 				     (0x8 << MOVE_LEN_SHIFT));
1150 
1151 	/* Will read assoclen + cryptlen bytes */
1152 	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1153 
1154 	/* Will write assoclen + cryptlen bytes */
1155 	append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1156 
1157 	/* Read and write assoclen + cryptlen bytes */
1158 	aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
1159 
1160 	set_move_tgt_here(desc, read_move_cmd);
1161 	set_move_tgt_here(desc, write_move_cmd);
1162 	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1163 	/* Move payload data to OFIFO */
1164 	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1165 
1166 	/* Write ICV */
1167 	append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1168 			 LDST_SRCDST_BYTE_CONTEXT);
1169 
1170 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1171 					      desc_bytes(desc),
1172 					      DMA_TO_DEVICE);
1173 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1174 		dev_err(jrdev, "unable to map shared descriptor\n");
1175 		return -ENOMEM;
1176 	}
1177 #ifdef DEBUG
1178 	print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
1179 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1180 		       desc_bytes(desc), 1);
1181 #endif
1182 
1183 	/*
1184 	 * Job Descriptor and Shared Descriptors
1185 	 * must all fit into the 64-word Descriptor h/w Buffer
1186 	 */
1187 	keys_fit_inline = false;
1188 	if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN +
1189 	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1190 		keys_fit_inline = true;
1191 
1192 	desc = ctx->sh_desc_dec;
1193 
1194 	init_sh_desc(desc, HDR_SHARE_SERIAL);
1195 
1196 	/* Skip key loading if it is loaded due to sharing */
1197 	key_jump_cmd = append_jump(desc, JUMP_JSL |
1198 				   JUMP_TEST_ALL | JUMP_COND_SHRD);
1199 	if (keys_fit_inline)
1200 		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1201 				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1202 	else
1203 		append_key(desc, ctx->key_dma, ctx->enckeylen,
1204 			   CLASS_1 | KEY_DEST_CLASS_REG);
1205 	set_jump_tgt_here(desc, key_jump_cmd);
1206 
1207 	/* Class 1 operation */
1208 	append_operation(desc, ctx->class1_alg_type |
1209 			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
1210 
1211 	/* assoclen + cryptlen = seqoutlen */
1212 	append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1213 
1214 	/*
1215 	 * MOVE_LEN opcode is not available in all SEC HW revisions,
1216 	 * thus need to do some magic, i.e. self-patch the descriptor
1217 	 * buffer.
1218 	 */
1219 	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1220 				    (0x6 << MOVE_LEN_SHIFT));
1221 	write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1222 				     (0x8 << MOVE_LEN_SHIFT));
1223 
1224 	/* Will read assoclen + cryptlen bytes */
1225 	append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1226 
1227 	/* Will write assoclen + cryptlen bytes */
1228 	append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1229 
1230 	/* Store payload data */
1231 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1232 
1233 	/* In-snoop assoclen + cryptlen data */
1234 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
1235 			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
1236 
1237 	set_move_tgt_here(desc, read_move_cmd);
1238 	set_move_tgt_here(desc, write_move_cmd);
1239 	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1240 	/* Move payload data to OFIFO */
1241 	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1242 	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1243 
1244 	/* Read ICV */
1245 	append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1246 			     FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1247 
1248 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1249 					      desc_bytes(desc),
1250 					      DMA_TO_DEVICE);
1251 	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1252 		dev_err(jrdev, "unable to map shared descriptor\n");
1253 		return -ENOMEM;
1254 	}
1255 #ifdef DEBUG
1256 	print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
1257 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1258 		       desc_bytes(desc), 1);
1259 #endif
1260 
1261 	return 0;
1262 }
1263 
1264 static int rfc4543_setauthsize(struct crypto_aead *authenc,
1265 			       unsigned int authsize)
1266 {
1267 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1268 
1269 	ctx->authsize = authsize;
1270 	rfc4543_set_sh_desc(authenc);
1271 
1272 	return 0;
1273 }
1274 
1275 static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
1276 			      u32 authkeylen)
1277 {
1278 	return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
1279 			       ctx->split_key_pad_len, key_in, authkeylen,
1280 			       ctx->alg_op);
1281 }
1282 
1283 static int aead_setkey(struct crypto_aead *aead,
1284 			       const u8 *key, unsigned int keylen)
1285 {
1286 	/* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
1287 	static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
1288 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1289 	struct device *jrdev = ctx->jrdev;
1290 	struct crypto_authenc_keys keys;
1291 	int ret = 0;
1292 
1293 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1294 		goto badkey;
1295 
1296 	/* Pick class 2 key length from algorithm submask */
1297 	ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1298 				      OP_ALG_ALGSEL_SHIFT] * 2;
1299 	ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
1300 
1301 	if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
1302 		goto badkey;
1303 
1304 #ifdef DEBUG
1305 	printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
1306 	       keys.authkeylen + keys.enckeylen, keys.enckeylen,
1307 	       keys.authkeylen);
1308 	printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
1309 	       ctx->split_key_len, ctx->split_key_pad_len);
1310 	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1311 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1312 #endif
1313 
1314 	ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
1315 	if (ret) {
1316 		goto badkey;
1317 	}
1318 
1319 	/* postpend encryption key to auth split key */
1320 	memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
1321 
1322 	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
1323 				      keys.enckeylen, DMA_TO_DEVICE);
1324 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
1325 		dev_err(jrdev, "unable to map key i/o memory\n");
1326 		return -ENOMEM;
1327 	}
1328 #ifdef DEBUG
1329 	print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
1330 		       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
1331 		       ctx->split_key_pad_len + keys.enckeylen, 1);
1332 #endif
1333 
1334 	ctx->enckeylen = keys.enckeylen;
1335 
1336 	ret = aead_set_sh_desc(aead);
1337 	if (ret) {
1338 		dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
1339 				 keys.enckeylen, DMA_TO_DEVICE);
1340 	}
1341 
1342 	return ret;
1343 badkey:
1344 	crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
1345 	return -EINVAL;
1346 }
1347 
1348 static int gcm_setkey(struct crypto_aead *aead,
1349 		      const u8 *key, unsigned int keylen)
1350 {
1351 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1352 	struct device *jrdev = ctx->jrdev;
1353 	int ret = 0;
1354 
1355 #ifdef DEBUG
1356 	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1357 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1358 #endif
1359 
1360 	memcpy(ctx->key, key, keylen);
1361 	ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1362 				      DMA_TO_DEVICE);
1363 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
1364 		dev_err(jrdev, "unable to map key i/o memory\n");
1365 		return -ENOMEM;
1366 	}
1367 	ctx->enckeylen = keylen;
1368 
1369 	ret = gcm_set_sh_desc(aead);
1370 	if (ret) {
1371 		dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1372 				 DMA_TO_DEVICE);
1373 	}
1374 
1375 	return ret;
1376 }
1377 
1378 static int rfc4106_setkey(struct crypto_aead *aead,
1379 			  const u8 *key, unsigned int keylen)
1380 {
1381 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1382 	struct device *jrdev = ctx->jrdev;
1383 	int ret = 0;
1384 
1385 	if (keylen < 4)
1386 		return -EINVAL;
1387 
1388 #ifdef DEBUG
1389 	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1390 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1391 #endif
1392 
1393 	memcpy(ctx->key, key, keylen);
1394 
1395 	/*
1396 	 * The last four bytes of the key material are used as the salt value
1397 	 * in the nonce. Update the AES key length.
1398 	 */
1399 	ctx->enckeylen = keylen - 4;
1400 
1401 	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1402 				      DMA_TO_DEVICE);
1403 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
1404 		dev_err(jrdev, "unable to map key i/o memory\n");
1405 		return -ENOMEM;
1406 	}
1407 
1408 	ret = rfc4106_set_sh_desc(aead);
1409 	if (ret) {
1410 		dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1411 				 DMA_TO_DEVICE);
1412 	}
1413 
1414 	return ret;
1415 }
1416 
1417 static int rfc4543_setkey(struct crypto_aead *aead,
1418 			  const u8 *key, unsigned int keylen)
1419 {
1420 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1421 	struct device *jrdev = ctx->jrdev;
1422 	int ret = 0;
1423 
1424 	if (keylen < 4)
1425 		return -EINVAL;
1426 
1427 #ifdef DEBUG
1428 	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1429 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1430 #endif
1431 
1432 	memcpy(ctx->key, key, keylen);
1433 
1434 	/*
1435 	 * The last four bytes of the key material are used as the salt value
1436 	 * in the nonce. Update the AES key length.
1437 	 */
1438 	ctx->enckeylen = keylen - 4;
1439 
1440 	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1441 				      DMA_TO_DEVICE);
1442 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
1443 		dev_err(jrdev, "unable to map key i/o memory\n");
1444 		return -ENOMEM;
1445 	}
1446 
1447 	ret = rfc4543_set_sh_desc(aead);
1448 	if (ret) {
1449 		dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1450 				 DMA_TO_DEVICE);
1451 	}
1452 
1453 	return ret;
1454 }
1455 
1456 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1457 			     const u8 *key, unsigned int keylen)
1458 {
1459 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1460 	struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
1461 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
1462 	const char *alg_name = crypto_tfm_alg_name(tfm);
1463 	struct device *jrdev = ctx->jrdev;
1464 	int ret = 0;
1465 	u32 *key_jump_cmd;
1466 	u32 *desc;
1467 	u32 *nonce;
1468 	u32 geniv;
1469 	u32 ctx1_iv_off = 0;
1470 	const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
1471 			       OP_ALG_AAI_CTR_MOD128);
1472 	const bool is_rfc3686 = (ctr_mode &&
1473 				 (strstr(alg_name, "rfc3686") != NULL));
1474 
1475 #ifdef DEBUG
1476 	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1477 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1478 #endif
1479 	/*
1480 	 * AES-CTR needs to load IV in CONTEXT1 reg
1481 	 * at an offset of 128bits (16bytes)
1482 	 * CONTEXT1[255:128] = IV
1483 	 */
1484 	if (ctr_mode)
1485 		ctx1_iv_off = 16;
1486 
1487 	/*
1488 	 * RFC3686 specific:
1489 	 *	| CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1490 	 *	| *key = {KEY, NONCE}
1491 	 */
1492 	if (is_rfc3686) {
1493 		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
1494 		keylen -= CTR_RFC3686_NONCE_SIZE;
1495 	}
1496 
1497 	memcpy(ctx->key, key, keylen);
1498 	ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1499 				      DMA_TO_DEVICE);
1500 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
1501 		dev_err(jrdev, "unable to map key i/o memory\n");
1502 		return -ENOMEM;
1503 	}
1504 	ctx->enckeylen = keylen;
1505 
1506 	/* ablkcipher_encrypt shared descriptor */
1507 	desc = ctx->sh_desc_enc;
1508 	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1509 	/* Skip if already shared */
1510 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1511 				   JUMP_COND_SHRD);
1512 
1513 	/* Load class1 key only */
1514 	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1515 			  ctx->enckeylen, CLASS_1 |
1516 			  KEY_DEST_CLASS_REG);
1517 
1518 	/* Load nonce into CONTEXT1 reg */
1519 	if (is_rfc3686) {
1520 		nonce = (u32 *)(key + keylen);
1521 		append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
1522 				    LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1523 		append_move(desc, MOVE_WAITCOMP |
1524 			    MOVE_SRC_OUTFIFO |
1525 			    MOVE_DEST_CLASS1CTX |
1526 			    (16 << MOVE_OFFSET_SHIFT) |
1527 			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1528 	}
1529 
1530 	set_jump_tgt_here(desc, key_jump_cmd);
1531 
1532 	/* Load iv */
1533 	append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
1534 			LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
1535 
1536 	/* Load counter into CONTEXT1 reg */
1537 	if (is_rfc3686)
1538 		append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
1539 				    LDST_CLASS_1_CCB |
1540 				    LDST_SRCDST_BYTE_CONTEXT |
1541 				    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1542 				     LDST_OFFSET_SHIFT));
1543 
1544 	/* Load operation */
1545 	append_operation(desc, ctx->class1_alg_type |
1546 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1547 
1548 	/* Perform operation */
1549 	ablkcipher_append_src_dst(desc);
1550 
1551 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1552 					      desc_bytes(desc),
1553 					      DMA_TO_DEVICE);
1554 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1555 		dev_err(jrdev, "unable to map shared descriptor\n");
1556 		return -ENOMEM;
1557 	}
1558 #ifdef DEBUG
1559 	print_hex_dump(KERN_ERR,
1560 		       "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
1561 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1562 		       desc_bytes(desc), 1);
1563 #endif
1564 	/* ablkcipher_decrypt shared descriptor */
1565 	desc = ctx->sh_desc_dec;
1566 
1567 	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1568 	/* Skip if already shared */
1569 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1570 				   JUMP_COND_SHRD);
1571 
1572 	/* Load class1 key only */
1573 	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1574 			  ctx->enckeylen, CLASS_1 |
1575 			  KEY_DEST_CLASS_REG);
1576 
1577 	/* Load nonce into CONTEXT1 reg */
1578 	if (is_rfc3686) {
1579 		nonce = (u32 *)(key + keylen);
1580 		append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
1581 				    LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1582 		append_move(desc, MOVE_WAITCOMP |
1583 			    MOVE_SRC_OUTFIFO |
1584 			    MOVE_DEST_CLASS1CTX |
1585 			    (16 << MOVE_OFFSET_SHIFT) |
1586 			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1587 	}
1588 
1589 	set_jump_tgt_here(desc, key_jump_cmd);
1590 
1591 	/* load IV */
1592 	append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
1593 			LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
1594 
1595 	/* Load counter into CONTEXT1 reg */
1596 	if (is_rfc3686)
1597 		append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
1598 				    LDST_CLASS_1_CCB |
1599 				    LDST_SRCDST_BYTE_CONTEXT |
1600 				    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1601 				     LDST_OFFSET_SHIFT));
1602 
1603 	/* Choose operation */
1604 	if (ctr_mode)
1605 		append_operation(desc, ctx->class1_alg_type |
1606 				 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
1607 	else
1608 		append_dec_op1(desc, ctx->class1_alg_type);
1609 
1610 	/* Perform operation */
1611 	ablkcipher_append_src_dst(desc);
1612 
1613 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1614 					      desc_bytes(desc),
1615 					      DMA_TO_DEVICE);
1616 	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1617 		dev_err(jrdev, "unable to map shared descriptor\n");
1618 		return -ENOMEM;
1619 	}
1620 
1621 #ifdef DEBUG
1622 	print_hex_dump(KERN_ERR,
1623 		       "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
1624 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1625 		       desc_bytes(desc), 1);
1626 #endif
1627 	/* ablkcipher_givencrypt shared descriptor */
1628 	desc = ctx->sh_desc_givenc;
1629 
1630 	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1631 	/* Skip if already shared */
1632 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1633 				   JUMP_COND_SHRD);
1634 
1635 	/* Load class1 key only */
1636 	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1637 			  ctx->enckeylen, CLASS_1 |
1638 			  KEY_DEST_CLASS_REG);
1639 
1640 	/* Load Nonce into CONTEXT1 reg */
1641 	if (is_rfc3686) {
1642 		nonce = (u32 *)(key + keylen);
1643 		append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
1644 				    LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1645 		append_move(desc, MOVE_WAITCOMP |
1646 			    MOVE_SRC_OUTFIFO |
1647 			    MOVE_DEST_CLASS1CTX |
1648 			    (16 << MOVE_OFFSET_SHIFT) |
1649 			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1650 	}
1651 	set_jump_tgt_here(desc, key_jump_cmd);
1652 
1653 	/* Generate IV */
1654 	geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
1655 		NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
1656 		NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
1657 	append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
1658 			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
1659 	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1660 	append_move(desc, MOVE_WAITCOMP |
1661 		    MOVE_SRC_INFIFO |
1662 		    MOVE_DEST_CLASS1CTX |
1663 		    (crt->ivsize << MOVE_LEN_SHIFT) |
1664 		    (ctx1_iv_off << MOVE_OFFSET_SHIFT));
1665 	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1666 
1667 	/* Copy generated IV to memory */
1668 	append_seq_store(desc, crt->ivsize,
1669 			 LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
1670 			 (ctx1_iv_off << LDST_OFFSET_SHIFT));
1671 
1672 	/* Load Counter into CONTEXT1 reg */
1673 	if (is_rfc3686)
1674 		append_load_imm_u32(desc, (u32)1, LDST_IMM |
1675 				    LDST_CLASS_1_CCB |
1676 				    LDST_SRCDST_BYTE_CONTEXT |
1677 				    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1678 				     LDST_OFFSET_SHIFT));
1679 
1680 	if (ctx1_iv_off)
1681 		append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
1682 			    (1 << JUMP_OFFSET_SHIFT));
1683 
1684 	/* Load operation */
1685 	append_operation(desc, ctx->class1_alg_type |
1686 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1687 
1688 	/* Perform operation */
1689 	ablkcipher_append_src_dst(desc);
1690 
1691 	ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
1692 						 desc_bytes(desc),
1693 						 DMA_TO_DEVICE);
1694 	if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
1695 		dev_err(jrdev, "unable to map shared descriptor\n");
1696 		return -ENOMEM;
1697 	}
1698 #ifdef DEBUG
1699 	print_hex_dump(KERN_ERR,
1700 		       "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
1701 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1702 		       desc_bytes(desc), 1);
1703 #endif
1704 
1705 	return ret;
1706 }
1707 
1708 /*
1709  * aead_edesc - s/w-extended aead descriptor
1710  * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
1711  * @assoc_chained: if source is chained
1712  * @src_nents: number of segments in input scatterlist
1713  * @src_chained: if source is chained
1714  * @dst_nents: number of segments in output scatterlist
1715  * @dst_chained: if destination is chained
1716  * @iv_dma: dma address of iv for checking continuity and link table
1717  * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
1718  * @sec4_sg_bytes: length of dma mapped sec4_sg space
1719  * @sec4_sg_dma: bus physical mapped address of h/w link table
1720  * @hw_desc: the h/w job descriptor followed by any referenced link tables
1721  */
1722 struct aead_edesc {
1723 	int assoc_nents;
1724 	bool assoc_chained;
1725 	int src_nents;
1726 	bool src_chained;
1727 	int dst_nents;
1728 	bool dst_chained;
1729 	dma_addr_t iv_dma;
1730 	int sec4_sg_bytes;
1731 	dma_addr_t sec4_sg_dma;
1732 	struct sec4_sg_entry *sec4_sg;
1733 	u32 hw_desc[];
1734 };
1735 
1736 /*
1737  * ablkcipher_edesc - s/w-extended ablkcipher descriptor
1738  * @src_nents: number of segments in input scatterlist
1739  * @src_chained: if source is chained
1740  * @dst_nents: number of segments in output scatterlist
1741  * @dst_chained: if destination is chained
1742  * @iv_dma: dma address of iv for checking continuity and link table
1743  * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
1744  * @sec4_sg_bytes: length of dma mapped sec4_sg space
1745  * @sec4_sg_dma: bus physical mapped address of h/w link table
1746  * @hw_desc: the h/w job descriptor followed by any referenced link tables
1747  */
1748 struct ablkcipher_edesc {
1749 	int src_nents;
1750 	bool src_chained;
1751 	int dst_nents;
1752 	bool dst_chained;
1753 	dma_addr_t iv_dma;
1754 	int sec4_sg_bytes;
1755 	dma_addr_t sec4_sg_dma;
1756 	struct sec4_sg_entry *sec4_sg;
1757 	u32 hw_desc[0];
1758 };
1759 
1760 static void caam_unmap(struct device *dev, struct scatterlist *src,
1761 		       struct scatterlist *dst, int src_nents,
1762 		       bool src_chained, int dst_nents, bool dst_chained,
1763 		       dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
1764 		       int sec4_sg_bytes)
1765 {
1766 	if (dst != src) {
1767 		dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE,
1768 				     src_chained);
1769 		dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE,
1770 				     dst_chained);
1771 	} else {
1772 		dma_unmap_sg_chained(dev, src, src_nents ? : 1,
1773 				     DMA_BIDIRECTIONAL, src_chained);
1774 	}
1775 
1776 	if (iv_dma)
1777 		dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1778 	if (sec4_sg_bytes)
1779 		dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
1780 				 DMA_TO_DEVICE);
1781 }
1782 
1783 static void aead_unmap(struct device *dev,
1784 		       struct aead_edesc *edesc,
1785 		       struct aead_request *req)
1786 {
1787 	caam_unmap(dev, req->src, req->dst,
1788 		   edesc->src_nents, edesc->src_chained, edesc->dst_nents,
1789 		   edesc->dst_chained, 0, 0,
1790 		   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
1791 }
1792 
1793 static void ablkcipher_unmap(struct device *dev,
1794 			     struct ablkcipher_edesc *edesc,
1795 			     struct ablkcipher_request *req)
1796 {
1797 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1798 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1799 
1800 	caam_unmap(dev, req->src, req->dst,
1801 		   edesc->src_nents, edesc->src_chained, edesc->dst_nents,
1802 		   edesc->dst_chained, edesc->iv_dma, ivsize,
1803 		   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
1804 }
1805 
1806 static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
1807 				   void *context)
1808 {
1809 	struct aead_request *req = context;
1810 	struct aead_edesc *edesc;
1811 
1812 #ifdef DEBUG
1813 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1814 #endif
1815 
1816 	edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
1817 
1818 	if (err)
1819 		caam_jr_strstatus(jrdev, err);
1820 
1821 	aead_unmap(jrdev, edesc, req);
1822 
1823 	kfree(edesc);
1824 
1825 	aead_request_complete(req, err);
1826 }
1827 
1828 static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
1829 				   void *context)
1830 {
1831 	struct aead_request *req = context;
1832 	struct aead_edesc *edesc;
1833 
1834 #ifdef DEBUG
1835 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1836 #endif
1837 
1838 	edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
1839 
1840 	if (err)
1841 		caam_jr_strstatus(jrdev, err);
1842 
1843 	aead_unmap(jrdev, edesc, req);
1844 
1845 	/*
1846 	 * verify hw auth check passed else return -EBADMSG
1847 	 */
1848 	if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
1849 		err = -EBADMSG;
1850 
1851 	kfree(edesc);
1852 
1853 	aead_request_complete(req, err);
1854 }
1855 
1856 static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
1857 				   void *context)
1858 {
1859 	struct ablkcipher_request *req = context;
1860 	struct ablkcipher_edesc *edesc;
1861 #ifdef DEBUG
1862 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1863 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1864 
1865 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1866 #endif
1867 
1868 	edesc = (struct ablkcipher_edesc *)((char *)desc -
1869 		 offsetof(struct ablkcipher_edesc, hw_desc));
1870 
1871 	if (err)
1872 		caam_jr_strstatus(jrdev, err);
1873 
1874 #ifdef DEBUG
1875 	print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
1876 		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1877 		       edesc->src_nents > 1 ? 100 : ivsize, 1);
1878 	print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
1879 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1880 		       edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
1881 #endif
1882 
1883 	ablkcipher_unmap(jrdev, edesc, req);
1884 	kfree(edesc);
1885 
1886 	ablkcipher_request_complete(req, err);
1887 }
1888 
1889 static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
1890 				    void *context)
1891 {
1892 	struct ablkcipher_request *req = context;
1893 	struct ablkcipher_edesc *edesc;
1894 #ifdef DEBUG
1895 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1896 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1897 
1898 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1899 #endif
1900 
1901 	edesc = (struct ablkcipher_edesc *)((char *)desc -
1902 		 offsetof(struct ablkcipher_edesc, hw_desc));
1903 	if (err)
1904 		caam_jr_strstatus(jrdev, err);
1905 
1906 #ifdef DEBUG
1907 	print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
1908 		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1909 		       ivsize, 1);
1910 	print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
1911 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1912 		       edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
1913 #endif
1914 
1915 	ablkcipher_unmap(jrdev, edesc, req);
1916 	kfree(edesc);
1917 
1918 	ablkcipher_request_complete(req, err);
1919 }
1920 
1921 /*
1922  * Fill in aead job descriptor
1923  */
1924 static void init_aead_job(struct aead_request *req,
1925 			  struct aead_edesc *edesc,
1926 			  bool all_contig, bool encrypt)
1927 {
1928 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1929 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1930 	int authsize = ctx->authsize;
1931 	u32 *desc = edesc->hw_desc;
1932 	u32 out_options, in_options;
1933 	dma_addr_t dst_dma, src_dma;
1934 	int len, sec4_sg_index = 0;
1935 	dma_addr_t ptr;
1936 	u32 *sh_desc;
1937 
1938 	sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
1939 	ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
1940 
1941 	len = desc_len(sh_desc);
1942 	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1943 
1944 	if (all_contig) {
1945 		src_dma = sg_dma_address(req->src);
1946 		in_options = 0;
1947 	} else {
1948 		src_dma = edesc->sec4_sg_dma;
1949 		sec4_sg_index += edesc->src_nents;
1950 		in_options = LDST_SGF;
1951 	}
1952 
1953 	append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen,
1954 			  in_options);
1955 
1956 	dst_dma = src_dma;
1957 	out_options = in_options;
1958 
1959 	if (unlikely(req->src != req->dst)) {
1960 		if (!edesc->dst_nents) {
1961 			dst_dma = sg_dma_address(req->dst);
1962 		} else {
1963 			dst_dma = edesc->sec4_sg_dma +
1964 				  sec4_sg_index *
1965 				  sizeof(struct sec4_sg_entry);
1966 			out_options = LDST_SGF;
1967 		}
1968 	}
1969 
1970 	if (encrypt)
1971 		append_seq_out_ptr(desc, dst_dma,
1972 				   req->assoclen + req->cryptlen + authsize,
1973 				   out_options);
1974 	else
1975 		append_seq_out_ptr(desc, dst_dma,
1976 				   req->assoclen + req->cryptlen - authsize,
1977 				   out_options);
1978 
1979 	/* REG3 = assoclen */
1980 	append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
1981 }
1982 
1983 static void init_gcm_job(struct aead_request *req,
1984 			 struct aead_edesc *edesc,
1985 			 bool all_contig, bool encrypt)
1986 {
1987 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1988 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1989 	unsigned int ivsize = crypto_aead_ivsize(aead);
1990 	u32 *desc = edesc->hw_desc;
1991 	bool generic_gcm = (ivsize == 12);
1992 	unsigned int last;
1993 
1994 	init_aead_job(req, edesc, all_contig, encrypt);
1995 
1996 	/* BUG This should not be specific to generic GCM. */
1997 	last = 0;
1998 	if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen))
1999 		last = FIFOLD_TYPE_LAST1;
2000 
2001 	/* Read GCM IV */
2002 	append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
2003 			 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
2004 	/* Append Salt */
2005 	if (!generic_gcm)
2006 		append_data(desc, ctx->key + ctx->enckeylen, 4);
2007 	/* Append IV */
2008 	append_data(desc, req->iv, ivsize);
2009 	/* End of blank commands */
2010 }
2011 
2012 static void init_authenc_job(struct aead_request *req,
2013 			     struct aead_edesc *edesc,
2014 			     bool all_contig, bool encrypt)
2015 {
2016 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2017 	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
2018 						 struct caam_aead_alg, aead);
2019 	unsigned int ivsize = crypto_aead_ivsize(aead);
2020 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
2021 	const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
2022 			       OP_ALG_AAI_CTR_MOD128);
2023 	const bool is_rfc3686 = alg->caam.rfc3686;
2024 	u32 *desc = edesc->hw_desc;
2025 	u32 ivoffset = 0;
2026 
2027 	/*
2028 	 * AES-CTR needs to load IV in CONTEXT1 reg
2029 	 * at an offset of 128bits (16bytes)
2030 	 * CONTEXT1[255:128] = IV
2031 	 */
2032 	if (ctr_mode)
2033 		ivoffset = 16;
2034 
2035 	/*
2036 	 * RFC3686 specific:
2037 	 *	CONTEXT1[255:128] = {NONCE, IV, COUNTER}
2038 	 */
2039 	if (is_rfc3686)
2040 		ivoffset = 16 + CTR_RFC3686_NONCE_SIZE;
2041 
2042 	init_aead_job(req, edesc, all_contig, encrypt);
2043 
2044 	if (ivsize && (is_rfc3686 || !(alg->caam.geniv && encrypt)))
2045 		append_load_as_imm(desc, req->iv, ivsize,
2046 				   LDST_CLASS_1_CCB |
2047 				   LDST_SRCDST_BYTE_CONTEXT |
2048 				   (ivoffset << LDST_OFFSET_SHIFT));
2049 }
2050 
2051 /*
2052  * Fill in ablkcipher job descriptor
2053  */
2054 static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
2055 				struct ablkcipher_edesc *edesc,
2056 				struct ablkcipher_request *req,
2057 				bool iv_contig)
2058 {
2059 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2060 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2061 	u32 *desc = edesc->hw_desc;
2062 	u32 out_options = 0, in_options;
2063 	dma_addr_t dst_dma, src_dma;
2064 	int len, sec4_sg_index = 0;
2065 
2066 #ifdef DEBUG
2067 	print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
2068 		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2069 		       ivsize, 1);
2070 	print_hex_dump(KERN_ERR, "src    @"__stringify(__LINE__)": ",
2071 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2072 		       edesc->src_nents ? 100 : req->nbytes, 1);
2073 #endif
2074 
2075 	len = desc_len(sh_desc);
2076 	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2077 
2078 	if (iv_contig) {
2079 		src_dma = edesc->iv_dma;
2080 		in_options = 0;
2081 	} else {
2082 		src_dma = edesc->sec4_sg_dma;
2083 		sec4_sg_index += edesc->src_nents + 1;
2084 		in_options = LDST_SGF;
2085 	}
2086 	append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
2087 
2088 	if (likely(req->src == req->dst)) {
2089 		if (!edesc->src_nents && iv_contig) {
2090 			dst_dma = sg_dma_address(req->src);
2091 		} else {
2092 			dst_dma = edesc->sec4_sg_dma +
2093 				sizeof(struct sec4_sg_entry);
2094 			out_options = LDST_SGF;
2095 		}
2096 	} else {
2097 		if (!edesc->dst_nents) {
2098 			dst_dma = sg_dma_address(req->dst);
2099 		} else {
2100 			dst_dma = edesc->sec4_sg_dma +
2101 				sec4_sg_index * sizeof(struct sec4_sg_entry);
2102 			out_options = LDST_SGF;
2103 		}
2104 	}
2105 	append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
2106 }
2107 
2108 /*
2109  * Fill in ablkcipher givencrypt job descriptor
2110  */
2111 static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
2112 				    struct ablkcipher_edesc *edesc,
2113 				    struct ablkcipher_request *req,
2114 				    bool iv_contig)
2115 {
2116 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2117 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2118 	u32 *desc = edesc->hw_desc;
2119 	u32 out_options, in_options;
2120 	dma_addr_t dst_dma, src_dma;
2121 	int len, sec4_sg_index = 0;
2122 
2123 #ifdef DEBUG
2124 	print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
2125 		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2126 		       ivsize, 1);
2127 	print_hex_dump(KERN_ERR, "src    @" __stringify(__LINE__) ": ",
2128 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2129 		       edesc->src_nents ? 100 : req->nbytes, 1);
2130 #endif
2131 
2132 	len = desc_len(sh_desc);
2133 	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2134 
2135 	if (!edesc->src_nents) {
2136 		src_dma = sg_dma_address(req->src);
2137 		in_options = 0;
2138 	} else {
2139 		src_dma = edesc->sec4_sg_dma;
2140 		sec4_sg_index += edesc->src_nents;
2141 		in_options = LDST_SGF;
2142 	}
2143 	append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
2144 
2145 	if (iv_contig) {
2146 		dst_dma = edesc->iv_dma;
2147 		out_options = 0;
2148 	} else {
2149 		dst_dma = edesc->sec4_sg_dma +
2150 			  sec4_sg_index * sizeof(struct sec4_sg_entry);
2151 		out_options = LDST_SGF;
2152 	}
2153 	append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
2154 }
2155 
2156 /*
2157  * allocate and map the aead extended descriptor
2158  */
2159 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
2160 					   int desc_bytes, bool *all_contig_ptr,
2161 					   bool encrypt)
2162 {
2163 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2164 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
2165 	struct device *jrdev = ctx->jrdev;
2166 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2167 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2168 	int src_nents, dst_nents = 0;
2169 	struct aead_edesc *edesc;
2170 	int sgc;
2171 	bool all_contig = true;
2172 	bool src_chained = false, dst_chained = false;
2173 	int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
2174 	unsigned int authsize = ctx->authsize;
2175 
2176 	if (unlikely(req->dst != req->src)) {
2177 		src_nents = sg_count(req->src, req->assoclen + req->cryptlen,
2178 				     &src_chained);
2179 		dst_nents = sg_count(req->dst,
2180 				     req->assoclen + req->cryptlen +
2181 					(encrypt ? authsize : (-authsize)),
2182 				     &dst_chained);
2183 	} else {
2184 		src_nents = sg_count(req->src,
2185 				     req->assoclen + req->cryptlen +
2186 					(encrypt ? authsize : 0),
2187 				     &src_chained);
2188 	}
2189 
2190 	/* Check if data are contiguous. */
2191 	all_contig = !src_nents;
2192 	if (!all_contig) {
2193 		src_nents = src_nents ? : 1;
2194 		sec4_sg_len = src_nents;
2195 	}
2196 
2197 	sec4_sg_len += dst_nents;
2198 
2199 	sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
2200 
2201 	/* allocate space for base edesc and hw desc commands, link tables */
2202 	edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
2203 			GFP_DMA | flags);
2204 	if (!edesc) {
2205 		dev_err(jrdev, "could not allocate extended descriptor\n");
2206 		return ERR_PTR(-ENOMEM);
2207 	}
2208 
2209 	if (likely(req->src == req->dst)) {
2210 		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2211 					 DMA_BIDIRECTIONAL, src_chained);
2212 		if (unlikely(!sgc)) {
2213 			dev_err(jrdev, "unable to map source\n");
2214 			kfree(edesc);
2215 			return ERR_PTR(-ENOMEM);
2216 		}
2217 	} else {
2218 		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2219 					 DMA_TO_DEVICE, src_chained);
2220 		if (unlikely(!sgc)) {
2221 			dev_err(jrdev, "unable to map source\n");
2222 			kfree(edesc);
2223 			return ERR_PTR(-ENOMEM);
2224 		}
2225 
2226 		sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
2227 					 DMA_FROM_DEVICE, dst_chained);
2228 		if (unlikely(!sgc)) {
2229 			dev_err(jrdev, "unable to map destination\n");
2230 			dma_unmap_sg_chained(jrdev, req->src, src_nents ? : 1,
2231 					     DMA_TO_DEVICE, src_chained);
2232 			kfree(edesc);
2233 			return ERR_PTR(-ENOMEM);
2234 		}
2235 	}
2236 
2237 	edesc->src_nents = src_nents;
2238 	edesc->src_chained = src_chained;
2239 	edesc->dst_nents = dst_nents;
2240 	edesc->dst_chained = dst_chained;
2241 	edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
2242 			 desc_bytes;
2243 	*all_contig_ptr = all_contig;
2244 
2245 	sec4_sg_index = 0;
2246 	if (!all_contig) {
2247 		sg_to_sec4_sg_last(req->src, src_nents,
2248 			      edesc->sec4_sg + sec4_sg_index, 0);
2249 		sec4_sg_index += src_nents;
2250 	}
2251 	if (dst_nents) {
2252 		sg_to_sec4_sg_last(req->dst, dst_nents,
2253 				   edesc->sec4_sg + sec4_sg_index, 0);
2254 	}
2255 
2256 	if (!sec4_sg_bytes)
2257 		return edesc;
2258 
2259 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2260 					    sec4_sg_bytes, DMA_TO_DEVICE);
2261 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2262 		dev_err(jrdev, "unable to map S/G table\n");
2263 		aead_unmap(jrdev, edesc, req);
2264 		kfree(edesc);
2265 		return ERR_PTR(-ENOMEM);
2266 	}
2267 
2268 	edesc->sec4_sg_bytes = sec4_sg_bytes;
2269 
2270 	return edesc;
2271 }
2272 
2273 static int gcm_encrypt(struct aead_request *req)
2274 {
2275 	struct aead_edesc *edesc;
2276 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2277 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
2278 	struct device *jrdev = ctx->jrdev;
2279 	bool all_contig;
2280 	u32 *desc;
2281 	int ret = 0;
2282 
2283 	/* allocate extended descriptor */
2284 	edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, true);
2285 	if (IS_ERR(edesc))
2286 		return PTR_ERR(edesc);
2287 
2288 	/* Create and submit job descriptor */
2289 	init_gcm_job(req, edesc, all_contig, true);
2290 #ifdef DEBUG
2291 	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2292 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2293 		       desc_bytes(edesc->hw_desc), 1);
2294 #endif
2295 
2296 	desc = edesc->hw_desc;
2297 	ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
2298 	if (!ret) {
2299 		ret = -EINPROGRESS;
2300 	} else {
2301 		aead_unmap(jrdev, edesc, req);
2302 		kfree(edesc);
2303 	}
2304 
2305 	return ret;
2306 }
2307 
2308 static int ipsec_gcm_encrypt(struct aead_request *req)
2309 {
2310 	if (req->assoclen < 8)
2311 		return -EINVAL;
2312 
2313 	return gcm_encrypt(req);
2314 }
2315 
2316 static int aead_encrypt(struct aead_request *req)
2317 {
2318 	struct aead_edesc *edesc;
2319 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2320 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
2321 	struct device *jrdev = ctx->jrdev;
2322 	bool all_contig;
2323 	u32 *desc;
2324 	int ret = 0;
2325 
2326 	/* allocate extended descriptor */
2327 	edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
2328 				 &all_contig, true);
2329 	if (IS_ERR(edesc))
2330 		return PTR_ERR(edesc);
2331 
2332 	/* Create and submit job descriptor */
2333 	init_authenc_job(req, edesc, all_contig, true);
2334 #ifdef DEBUG
2335 	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2336 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2337 		       desc_bytes(edesc->hw_desc), 1);
2338 #endif
2339 
2340 	desc = edesc->hw_desc;
2341 	ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
2342 	if (!ret) {
2343 		ret = -EINPROGRESS;
2344 	} else {
2345 		aead_unmap(jrdev, edesc, req);
2346 		kfree(edesc);
2347 	}
2348 
2349 	return ret;
2350 }
2351 
2352 static int gcm_decrypt(struct aead_request *req)
2353 {
2354 	struct aead_edesc *edesc;
2355 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2356 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
2357 	struct device *jrdev = ctx->jrdev;
2358 	bool all_contig;
2359 	u32 *desc;
2360 	int ret = 0;
2361 
2362 	/* allocate extended descriptor */
2363 	edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, false);
2364 	if (IS_ERR(edesc))
2365 		return PTR_ERR(edesc);
2366 
2367 	/* Create and submit job descriptor*/
2368 	init_gcm_job(req, edesc, all_contig, false);
2369 #ifdef DEBUG
2370 	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2371 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2372 		       desc_bytes(edesc->hw_desc), 1);
2373 #endif
2374 
2375 	desc = edesc->hw_desc;
2376 	ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
2377 	if (!ret) {
2378 		ret = -EINPROGRESS;
2379 	} else {
2380 		aead_unmap(jrdev, edesc, req);
2381 		kfree(edesc);
2382 	}
2383 
2384 	return ret;
2385 }
2386 
2387 static int ipsec_gcm_decrypt(struct aead_request *req)
2388 {
2389 	if (req->assoclen < 8)
2390 		return -EINVAL;
2391 
2392 	return gcm_decrypt(req);
2393 }
2394 
2395 static int aead_decrypt(struct aead_request *req)
2396 {
2397 	struct aead_edesc *edesc;
2398 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2399 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
2400 	struct device *jrdev = ctx->jrdev;
2401 	bool all_contig;
2402 	u32 *desc;
2403 	int ret = 0;
2404 
2405 	/* allocate extended descriptor */
2406 	edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
2407 				 &all_contig, false);
2408 	if (IS_ERR(edesc))
2409 		return PTR_ERR(edesc);
2410 
2411 #ifdef DEBUG
2412 	print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
2413 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2414 		       req->assoclen + req->cryptlen, 1);
2415 #endif
2416 
2417 	/* Create and submit job descriptor*/
2418 	init_authenc_job(req, edesc, all_contig, false);
2419 #ifdef DEBUG
2420 	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2421 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2422 		       desc_bytes(edesc->hw_desc), 1);
2423 #endif
2424 
2425 	desc = edesc->hw_desc;
2426 	ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
2427 	if (!ret) {
2428 		ret = -EINPROGRESS;
2429 	} else {
2430 		aead_unmap(jrdev, edesc, req);
2431 		kfree(edesc);
2432 	}
2433 
2434 	return ret;
2435 }
2436 
2437 static int aead_givdecrypt(struct aead_request *req)
2438 {
2439 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2440 	unsigned int ivsize = crypto_aead_ivsize(aead);
2441 
2442 	if (req->cryptlen < ivsize)
2443 		return -EINVAL;
2444 
2445 	req->cryptlen -= ivsize;
2446 	req->assoclen += ivsize;
2447 
2448 	return aead_decrypt(req);
2449 }
2450 
2451 /*
2452  * allocate and map the ablkcipher extended descriptor for ablkcipher
2453  */
2454 static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
2455 						       *req, int desc_bytes,
2456 						       bool *iv_contig_out)
2457 {
2458 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2459 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2460 	struct device *jrdev = ctx->jrdev;
2461 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2462 					  CRYPTO_TFM_REQ_MAY_SLEEP)) ?
2463 		       GFP_KERNEL : GFP_ATOMIC;
2464 	int src_nents, dst_nents = 0, sec4_sg_bytes;
2465 	struct ablkcipher_edesc *edesc;
2466 	dma_addr_t iv_dma = 0;
2467 	bool iv_contig = false;
2468 	int sgc;
2469 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2470 	bool src_chained = false, dst_chained = false;
2471 	int sec4_sg_index;
2472 
2473 	src_nents = sg_count(req->src, req->nbytes, &src_chained);
2474 
2475 	if (req->dst != req->src)
2476 		dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
2477 
2478 	if (likely(req->src == req->dst)) {
2479 		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2480 					 DMA_BIDIRECTIONAL, src_chained);
2481 	} else {
2482 		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2483 					 DMA_TO_DEVICE, src_chained);
2484 		sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
2485 					 DMA_FROM_DEVICE, dst_chained);
2486 	}
2487 
2488 	iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
2489 	if (dma_mapping_error(jrdev, iv_dma)) {
2490 		dev_err(jrdev, "unable to map IV\n");
2491 		return ERR_PTR(-ENOMEM);
2492 	}
2493 
2494 	/*
2495 	 * Check if iv can be contiguous with source and destination.
2496 	 * If so, include it. If not, create scatterlist.
2497 	 */
2498 	if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
2499 		iv_contig = true;
2500 	else
2501 		src_nents = src_nents ? : 1;
2502 	sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
2503 			sizeof(struct sec4_sg_entry);
2504 
2505 	/* allocate space for base edesc and hw desc commands, link tables */
2506 	edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
2507 			GFP_DMA | flags);
2508 	if (!edesc) {
2509 		dev_err(jrdev, "could not allocate extended descriptor\n");
2510 		return ERR_PTR(-ENOMEM);
2511 	}
2512 
2513 	edesc->src_nents = src_nents;
2514 	edesc->src_chained = src_chained;
2515 	edesc->dst_nents = dst_nents;
2516 	edesc->dst_chained = dst_chained;
2517 	edesc->sec4_sg_bytes = sec4_sg_bytes;
2518 	edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
2519 			 desc_bytes;
2520 
2521 	sec4_sg_index = 0;
2522 	if (!iv_contig) {
2523 		dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
2524 		sg_to_sec4_sg_last(req->src, src_nents,
2525 				   edesc->sec4_sg + 1, 0);
2526 		sec4_sg_index += 1 + src_nents;
2527 	}
2528 
2529 	if (dst_nents) {
2530 		sg_to_sec4_sg_last(req->dst, dst_nents,
2531 			edesc->sec4_sg + sec4_sg_index, 0);
2532 	}
2533 
2534 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2535 					    sec4_sg_bytes, DMA_TO_DEVICE);
2536 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2537 		dev_err(jrdev, "unable to map S/G table\n");
2538 		return ERR_PTR(-ENOMEM);
2539 	}
2540 
2541 	edesc->iv_dma = iv_dma;
2542 
2543 #ifdef DEBUG
2544 	print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
2545 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
2546 		       sec4_sg_bytes, 1);
2547 #endif
2548 
2549 	*iv_contig_out = iv_contig;
2550 	return edesc;
2551 }
2552 
2553 static int ablkcipher_encrypt(struct ablkcipher_request *req)
2554 {
2555 	struct ablkcipher_edesc *edesc;
2556 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2557 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2558 	struct device *jrdev = ctx->jrdev;
2559 	bool iv_contig;
2560 	u32 *desc;
2561 	int ret = 0;
2562 
2563 	/* allocate extended descriptor */
2564 	edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
2565 				       CAAM_CMD_SZ, &iv_contig);
2566 	if (IS_ERR(edesc))
2567 		return PTR_ERR(edesc);
2568 
2569 	/* Create and submit job descriptor*/
2570 	init_ablkcipher_job(ctx->sh_desc_enc,
2571 		ctx->sh_desc_enc_dma, edesc, req, iv_contig);
2572 #ifdef DEBUG
2573 	print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
2574 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2575 		       desc_bytes(edesc->hw_desc), 1);
2576 #endif
2577 	desc = edesc->hw_desc;
2578 	ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
2579 
2580 	if (!ret) {
2581 		ret = -EINPROGRESS;
2582 	} else {
2583 		ablkcipher_unmap(jrdev, edesc, req);
2584 		kfree(edesc);
2585 	}
2586 
2587 	return ret;
2588 }
2589 
2590 static int ablkcipher_decrypt(struct ablkcipher_request *req)
2591 {
2592 	struct ablkcipher_edesc *edesc;
2593 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2594 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2595 	struct device *jrdev = ctx->jrdev;
2596 	bool iv_contig;
2597 	u32 *desc;
2598 	int ret = 0;
2599 
2600 	/* allocate extended descriptor */
2601 	edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
2602 				       CAAM_CMD_SZ, &iv_contig);
2603 	if (IS_ERR(edesc))
2604 		return PTR_ERR(edesc);
2605 
2606 	/* Create and submit job descriptor*/
2607 	init_ablkcipher_job(ctx->sh_desc_dec,
2608 		ctx->sh_desc_dec_dma, edesc, req, iv_contig);
2609 	desc = edesc->hw_desc;
2610 #ifdef DEBUG
2611 	print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
2612 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2613 		       desc_bytes(edesc->hw_desc), 1);
2614 #endif
2615 
2616 	ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
2617 	if (!ret) {
2618 		ret = -EINPROGRESS;
2619 	} else {
2620 		ablkcipher_unmap(jrdev, edesc, req);
2621 		kfree(edesc);
2622 	}
2623 
2624 	return ret;
2625 }
2626 
2627 /*
2628  * allocate and map the ablkcipher extended descriptor
2629  * for ablkcipher givencrypt
2630  */
2631 static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
2632 				struct skcipher_givcrypt_request *greq,
2633 				int desc_bytes,
2634 				bool *iv_contig_out)
2635 {
2636 	struct ablkcipher_request *req = &greq->creq;
2637 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2638 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2639 	struct device *jrdev = ctx->jrdev;
2640 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2641 					  CRYPTO_TFM_REQ_MAY_SLEEP)) ?
2642 		       GFP_KERNEL : GFP_ATOMIC;
2643 	int src_nents, dst_nents = 0, sec4_sg_bytes;
2644 	struct ablkcipher_edesc *edesc;
2645 	dma_addr_t iv_dma = 0;
2646 	bool iv_contig = false;
2647 	int sgc;
2648 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2649 	bool src_chained = false, dst_chained = false;
2650 	int sec4_sg_index;
2651 
2652 	src_nents = sg_count(req->src, req->nbytes, &src_chained);
2653 
2654 	if (unlikely(req->dst != req->src))
2655 		dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
2656 
2657 	if (likely(req->src == req->dst)) {
2658 		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2659 					 DMA_BIDIRECTIONAL, src_chained);
2660 	} else {
2661 		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2662 					 DMA_TO_DEVICE, src_chained);
2663 		sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
2664 					 DMA_FROM_DEVICE, dst_chained);
2665 	}
2666 
2667 	/*
2668 	 * Check if iv can be contiguous with source and destination.
2669 	 * If so, include it. If not, create scatterlist.
2670 	 */
2671 	iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
2672 	if (dma_mapping_error(jrdev, iv_dma)) {
2673 		dev_err(jrdev, "unable to map IV\n");
2674 		return ERR_PTR(-ENOMEM);
2675 	}
2676 
2677 	if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst))
2678 		iv_contig = true;
2679 	else
2680 		dst_nents = dst_nents ? : 1;
2681 	sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
2682 			sizeof(struct sec4_sg_entry);
2683 
2684 	/* allocate space for base edesc and hw desc commands, link tables */
2685 	edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
2686 			GFP_DMA | flags);
2687 	if (!edesc) {
2688 		dev_err(jrdev, "could not allocate extended descriptor\n");
2689 		return ERR_PTR(-ENOMEM);
2690 	}
2691 
2692 	edesc->src_nents = src_nents;
2693 	edesc->src_chained = src_chained;
2694 	edesc->dst_nents = dst_nents;
2695 	edesc->dst_chained = dst_chained;
2696 	edesc->sec4_sg_bytes = sec4_sg_bytes;
2697 	edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
2698 			 desc_bytes;
2699 
2700 	sec4_sg_index = 0;
2701 	if (src_nents) {
2702 		sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
2703 		sec4_sg_index += src_nents;
2704 	}
2705 
2706 	if (!iv_contig) {
2707 		dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
2708 				   iv_dma, ivsize, 0);
2709 		sec4_sg_index += 1;
2710 		sg_to_sec4_sg_last(req->dst, dst_nents,
2711 				   edesc->sec4_sg + sec4_sg_index, 0);
2712 	}
2713 
2714 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2715 					    sec4_sg_bytes, DMA_TO_DEVICE);
2716 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2717 		dev_err(jrdev, "unable to map S/G table\n");
2718 		return ERR_PTR(-ENOMEM);
2719 	}
2720 	edesc->iv_dma = iv_dma;
2721 
2722 #ifdef DEBUG
2723 	print_hex_dump(KERN_ERR,
2724 		       "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
2725 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
2726 		       sec4_sg_bytes, 1);
2727 #endif
2728 
2729 	*iv_contig_out = iv_contig;
2730 	return edesc;
2731 }
2732 
2733 static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
2734 {
2735 	struct ablkcipher_request *req = &creq->creq;
2736 	struct ablkcipher_edesc *edesc;
2737 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2738 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2739 	struct device *jrdev = ctx->jrdev;
2740 	bool iv_contig;
2741 	u32 *desc;
2742 	int ret = 0;
2743 
2744 	/* allocate extended descriptor */
2745 	edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN *
2746 				       CAAM_CMD_SZ, &iv_contig);
2747 	if (IS_ERR(edesc))
2748 		return PTR_ERR(edesc);
2749 
2750 	/* Create and submit job descriptor*/
2751 	init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
2752 				edesc, req, iv_contig);
2753 #ifdef DEBUG
2754 	print_hex_dump(KERN_ERR,
2755 		       "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
2756 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2757 		       desc_bytes(edesc->hw_desc), 1);
2758 #endif
2759 	desc = edesc->hw_desc;
2760 	ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
2761 
2762 	if (!ret) {
2763 		ret = -EINPROGRESS;
2764 	} else {
2765 		ablkcipher_unmap(jrdev, edesc, req);
2766 		kfree(edesc);
2767 	}
2768 
2769 	return ret;
2770 }
2771 
2772 #define template_aead		template_u.aead
2773 #define template_ablkcipher	template_u.ablkcipher
2774 struct caam_alg_template {
2775 	char name[CRYPTO_MAX_ALG_NAME];
2776 	char driver_name[CRYPTO_MAX_ALG_NAME];
2777 	unsigned int blocksize;
2778 	u32 type;
2779 	union {
2780 		struct ablkcipher_alg ablkcipher;
2781 	} template_u;
2782 	u32 class1_alg_type;
2783 	u32 class2_alg_type;
2784 	u32 alg_op;
2785 };
2786 
2787 static struct caam_alg_template driver_algs[] = {
2788 	/* ablkcipher descriptor */
2789 	{
2790 		.name = "cbc(aes)",
2791 		.driver_name = "cbc-aes-caam",
2792 		.blocksize = AES_BLOCK_SIZE,
2793 		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
2794 		.template_ablkcipher = {
2795 			.setkey = ablkcipher_setkey,
2796 			.encrypt = ablkcipher_encrypt,
2797 			.decrypt = ablkcipher_decrypt,
2798 			.givencrypt = ablkcipher_givencrypt,
2799 			.geniv = "<built-in>",
2800 			.min_keysize = AES_MIN_KEY_SIZE,
2801 			.max_keysize = AES_MAX_KEY_SIZE,
2802 			.ivsize = AES_BLOCK_SIZE,
2803 			},
2804 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2805 	},
2806 	{
2807 		.name = "cbc(des3_ede)",
2808 		.driver_name = "cbc-3des-caam",
2809 		.blocksize = DES3_EDE_BLOCK_SIZE,
2810 		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
2811 		.template_ablkcipher = {
2812 			.setkey = ablkcipher_setkey,
2813 			.encrypt = ablkcipher_encrypt,
2814 			.decrypt = ablkcipher_decrypt,
2815 			.givencrypt = ablkcipher_givencrypt,
2816 			.geniv = "<built-in>",
2817 			.min_keysize = DES3_EDE_KEY_SIZE,
2818 			.max_keysize = DES3_EDE_KEY_SIZE,
2819 			.ivsize = DES3_EDE_BLOCK_SIZE,
2820 			},
2821 		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2822 	},
2823 	{
2824 		.name = "cbc(des)",
2825 		.driver_name = "cbc-des-caam",
2826 		.blocksize = DES_BLOCK_SIZE,
2827 		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
2828 		.template_ablkcipher = {
2829 			.setkey = ablkcipher_setkey,
2830 			.encrypt = ablkcipher_encrypt,
2831 			.decrypt = ablkcipher_decrypt,
2832 			.givencrypt = ablkcipher_givencrypt,
2833 			.geniv = "<built-in>",
2834 			.min_keysize = DES_KEY_SIZE,
2835 			.max_keysize = DES_KEY_SIZE,
2836 			.ivsize = DES_BLOCK_SIZE,
2837 			},
2838 		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2839 	},
2840 	{
2841 		.name = "ctr(aes)",
2842 		.driver_name = "ctr-aes-caam",
2843 		.blocksize = 1,
2844 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2845 		.template_ablkcipher = {
2846 			.setkey = ablkcipher_setkey,
2847 			.encrypt = ablkcipher_encrypt,
2848 			.decrypt = ablkcipher_decrypt,
2849 			.geniv = "chainiv",
2850 			.min_keysize = AES_MIN_KEY_SIZE,
2851 			.max_keysize = AES_MAX_KEY_SIZE,
2852 			.ivsize = AES_BLOCK_SIZE,
2853 			},
2854 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
2855 	},
2856 	{
2857 		.name = "rfc3686(ctr(aes))",
2858 		.driver_name = "rfc3686-ctr-aes-caam",
2859 		.blocksize = 1,
2860 		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
2861 		.template_ablkcipher = {
2862 			.setkey = ablkcipher_setkey,
2863 			.encrypt = ablkcipher_encrypt,
2864 			.decrypt = ablkcipher_decrypt,
2865 			.givencrypt = ablkcipher_givencrypt,
2866 			.geniv = "<built-in>",
2867 			.min_keysize = AES_MIN_KEY_SIZE +
2868 				       CTR_RFC3686_NONCE_SIZE,
2869 			.max_keysize = AES_MAX_KEY_SIZE +
2870 				       CTR_RFC3686_NONCE_SIZE,
2871 			.ivsize = CTR_RFC3686_IV_SIZE,
2872 			},
2873 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
2874 	}
2875 };
2876 
2877 static struct caam_aead_alg driver_aeads[] = {
2878 	{
2879 		.aead = {
2880 			.base = {
2881 				.cra_name = "rfc4106(gcm(aes))",
2882 				.cra_driver_name = "rfc4106-gcm-aes-caam",
2883 				.cra_blocksize = 1,
2884 			},
2885 			.setkey = rfc4106_setkey,
2886 			.setauthsize = rfc4106_setauthsize,
2887 			.encrypt = ipsec_gcm_encrypt,
2888 			.decrypt = ipsec_gcm_decrypt,
2889 			.ivsize = 8,
2890 			.maxauthsize = AES_BLOCK_SIZE,
2891 		},
2892 		.caam = {
2893 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
2894 		},
2895 	},
2896 	{
2897 		.aead = {
2898 			.base = {
2899 				.cra_name = "rfc4543(gcm(aes))",
2900 				.cra_driver_name = "rfc4543-gcm-aes-caam",
2901 				.cra_blocksize = 1,
2902 			},
2903 			.setkey = rfc4543_setkey,
2904 			.setauthsize = rfc4543_setauthsize,
2905 			.encrypt = ipsec_gcm_encrypt,
2906 			.decrypt = ipsec_gcm_decrypt,
2907 			.ivsize = 8,
2908 			.maxauthsize = AES_BLOCK_SIZE,
2909 		},
2910 		.caam = {
2911 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
2912 		},
2913 	},
2914 	/* Galois Counter Mode */
2915 	{
2916 		.aead = {
2917 			.base = {
2918 				.cra_name = "gcm(aes)",
2919 				.cra_driver_name = "gcm-aes-caam",
2920 				.cra_blocksize = 1,
2921 			},
2922 			.setkey = gcm_setkey,
2923 			.setauthsize = gcm_setauthsize,
2924 			.encrypt = gcm_encrypt,
2925 			.decrypt = gcm_decrypt,
2926 			.ivsize = 12,
2927 			.maxauthsize = AES_BLOCK_SIZE,
2928 		},
2929 		.caam = {
2930 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
2931 		},
2932 	},
2933 	/* single-pass ipsec_esp descriptor */
2934 	{
2935 		.aead = {
2936 			.base = {
2937 				.cra_name = "authenc(hmac(md5),"
2938 					    "ecb(cipher_null))",
2939 				.cra_driver_name = "authenc-hmac-md5-"
2940 						   "ecb-cipher_null-caam",
2941 				.cra_blocksize = NULL_BLOCK_SIZE,
2942 			},
2943 			.setkey = aead_setkey,
2944 			.setauthsize = aead_setauthsize,
2945 			.encrypt = aead_encrypt,
2946 			.decrypt = aead_decrypt,
2947 			.ivsize = NULL_IV_SIZE,
2948 			.maxauthsize = MD5_DIGEST_SIZE,
2949 		},
2950 		.caam = {
2951 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2952 					   OP_ALG_AAI_HMAC_PRECOMP,
2953 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
2954 		},
2955 	},
2956 	{
2957 		.aead = {
2958 			.base = {
2959 				.cra_name = "authenc(hmac(sha1),"
2960 					    "ecb(cipher_null))",
2961 				.cra_driver_name = "authenc-hmac-sha1-"
2962 						   "ecb-cipher_null-caam",
2963 				.cra_blocksize = NULL_BLOCK_SIZE,
2964 			},
2965 			.setkey = aead_setkey,
2966 			.setauthsize = aead_setauthsize,
2967 			.encrypt = aead_encrypt,
2968 			.decrypt = aead_decrypt,
2969 			.ivsize = NULL_IV_SIZE,
2970 			.maxauthsize = SHA1_DIGEST_SIZE,
2971 		},
2972 		.caam = {
2973 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2974 					   OP_ALG_AAI_HMAC_PRECOMP,
2975 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
2976 		},
2977 	},
2978 	{
2979 		.aead = {
2980 			.base = {
2981 				.cra_name = "authenc(hmac(sha224),"
2982 					    "ecb(cipher_null))",
2983 				.cra_driver_name = "authenc-hmac-sha224-"
2984 						   "ecb-cipher_null-caam",
2985 				.cra_blocksize = NULL_BLOCK_SIZE,
2986 			},
2987 			.setkey = aead_setkey,
2988 			.setauthsize = aead_setauthsize,
2989 			.encrypt = aead_encrypt,
2990 			.decrypt = aead_decrypt,
2991 			.ivsize = NULL_IV_SIZE,
2992 			.maxauthsize = SHA224_DIGEST_SIZE,
2993 		},
2994 		.caam = {
2995 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2996 					   OP_ALG_AAI_HMAC_PRECOMP,
2997 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
2998 		},
2999 	},
3000 	{
3001 		.aead = {
3002 			.base = {
3003 				.cra_name = "authenc(hmac(sha256),"
3004 					    "ecb(cipher_null))",
3005 				.cra_driver_name = "authenc-hmac-sha256-"
3006 						   "ecb-cipher_null-caam",
3007 				.cra_blocksize = NULL_BLOCK_SIZE,
3008 			},
3009 			.setkey = aead_setkey,
3010 			.setauthsize = aead_setauthsize,
3011 			.encrypt = aead_encrypt,
3012 			.decrypt = aead_decrypt,
3013 			.ivsize = NULL_IV_SIZE,
3014 			.maxauthsize = SHA256_DIGEST_SIZE,
3015 		},
3016 		.caam = {
3017 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3018 					   OP_ALG_AAI_HMAC_PRECOMP,
3019 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3020 		},
3021 	},
3022 	{
3023 		.aead = {
3024 			.base = {
3025 				.cra_name = "authenc(hmac(sha384),"
3026 					    "ecb(cipher_null))",
3027 				.cra_driver_name = "authenc-hmac-sha384-"
3028 						   "ecb-cipher_null-caam",
3029 				.cra_blocksize = NULL_BLOCK_SIZE,
3030 			},
3031 			.setkey = aead_setkey,
3032 			.setauthsize = aead_setauthsize,
3033 			.encrypt = aead_encrypt,
3034 			.decrypt = aead_decrypt,
3035 			.ivsize = NULL_IV_SIZE,
3036 			.maxauthsize = SHA384_DIGEST_SIZE,
3037 		},
3038 		.caam = {
3039 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3040 					   OP_ALG_AAI_HMAC_PRECOMP,
3041 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3042 		},
3043 	},
3044 	{
3045 		.aead = {
3046 			.base = {
3047 				.cra_name = "authenc(hmac(sha512),"
3048 					    "ecb(cipher_null))",
3049 				.cra_driver_name = "authenc-hmac-sha512-"
3050 						   "ecb-cipher_null-caam",
3051 				.cra_blocksize = NULL_BLOCK_SIZE,
3052 			},
3053 			.setkey = aead_setkey,
3054 			.setauthsize = aead_setauthsize,
3055 			.encrypt = aead_encrypt,
3056 			.decrypt = aead_decrypt,
3057 			.ivsize = NULL_IV_SIZE,
3058 			.maxauthsize = SHA512_DIGEST_SIZE,
3059 		},
3060 		.caam = {
3061 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3062 					   OP_ALG_AAI_HMAC_PRECOMP,
3063 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3064 		},
3065 	},
3066 	{
3067 		.aead = {
3068 			.base = {
3069 				.cra_name = "authenc(hmac(md5),cbc(aes))",
3070 				.cra_driver_name = "authenc-hmac-md5-"
3071 						   "cbc-aes-caam",
3072 				.cra_blocksize = AES_BLOCK_SIZE,
3073 			},
3074 			.setkey = aead_setkey,
3075 			.setauthsize = aead_setauthsize,
3076 			.encrypt = aead_encrypt,
3077 			.decrypt = aead_decrypt,
3078 			.ivsize = AES_BLOCK_SIZE,
3079 			.maxauthsize = MD5_DIGEST_SIZE,
3080 		},
3081 		.caam = {
3082 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3083 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
3084 					   OP_ALG_AAI_HMAC_PRECOMP,
3085 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3086 		},
3087 	},
3088 	{
3089 		.aead = {
3090 			.base = {
3091 				.cra_name = "echainiv(authenc(hmac(md5),"
3092 					    "cbc(aes)))",
3093 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
3094 						   "cbc-aes-caam",
3095 				.cra_blocksize = AES_BLOCK_SIZE,
3096 			},
3097 			.setkey = aead_setkey,
3098 			.setauthsize = aead_setauthsize,
3099 			.encrypt = aead_encrypt,
3100 			.decrypt = aead_givdecrypt,
3101 			.ivsize = AES_BLOCK_SIZE,
3102 			.maxauthsize = MD5_DIGEST_SIZE,
3103 		},
3104 		.caam = {
3105 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3106 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
3107 					   OP_ALG_AAI_HMAC_PRECOMP,
3108 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3109 			.geniv = true,
3110 		},
3111 	},
3112 	{
3113 		.aead = {
3114 			.base = {
3115 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
3116 				.cra_driver_name = "authenc-hmac-sha1-"
3117 						   "cbc-aes-caam",
3118 				.cra_blocksize = AES_BLOCK_SIZE,
3119 			},
3120 			.setkey = aead_setkey,
3121 			.setauthsize = aead_setauthsize,
3122 			.encrypt = aead_encrypt,
3123 			.decrypt = aead_decrypt,
3124 			.ivsize = AES_BLOCK_SIZE,
3125 			.maxauthsize = SHA1_DIGEST_SIZE,
3126 		},
3127 		.caam = {
3128 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3129 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3130 					   OP_ALG_AAI_HMAC_PRECOMP,
3131 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3132 		},
3133 	},
3134 	{
3135 		.aead = {
3136 			.base = {
3137 				.cra_name = "echainiv(authenc(hmac(sha1),"
3138 					    "cbc(aes)))",
3139 				.cra_driver_name = "echainiv-authenc-"
3140 						   "hmac-sha1-cbc-aes-caam",
3141 				.cra_blocksize = AES_BLOCK_SIZE,
3142 			},
3143 			.setkey = aead_setkey,
3144 			.setauthsize = aead_setauthsize,
3145 			.encrypt = aead_encrypt,
3146 			.decrypt = aead_givdecrypt,
3147 			.ivsize = AES_BLOCK_SIZE,
3148 			.maxauthsize = SHA1_DIGEST_SIZE,
3149 		},
3150 		.caam = {
3151 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3152 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3153 					   OP_ALG_AAI_HMAC_PRECOMP,
3154 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3155 			.geniv = true,
3156 		},
3157 	},
3158 	{
3159 		.aead = {
3160 			.base = {
3161 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
3162 				.cra_driver_name = "authenc-hmac-sha224-"
3163 						   "cbc-aes-caam",
3164 				.cra_blocksize = AES_BLOCK_SIZE,
3165 			},
3166 			.setkey = aead_setkey,
3167 			.setauthsize = aead_setauthsize,
3168 			.encrypt = aead_encrypt,
3169 			.decrypt = aead_decrypt,
3170 			.ivsize = AES_BLOCK_SIZE,
3171 			.maxauthsize = SHA224_DIGEST_SIZE,
3172 		},
3173 		.caam = {
3174 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3175 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3176 					   OP_ALG_AAI_HMAC_PRECOMP,
3177 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3178 		},
3179 	},
3180 	{
3181 		.aead = {
3182 			.base = {
3183 				.cra_name = "echainiv(authenc(hmac(sha224),"
3184 					    "cbc(aes)))",
3185 				.cra_driver_name = "echainiv-authenc-"
3186 						   "hmac-sha224-cbc-aes-caam",
3187 				.cra_blocksize = AES_BLOCK_SIZE,
3188 			},
3189 			.setkey = aead_setkey,
3190 			.setauthsize = aead_setauthsize,
3191 			.encrypt = aead_encrypt,
3192 			.decrypt = aead_givdecrypt,
3193 			.ivsize = AES_BLOCK_SIZE,
3194 			.maxauthsize = SHA224_DIGEST_SIZE,
3195 		},
3196 		.caam = {
3197 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3198 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3199 					   OP_ALG_AAI_HMAC_PRECOMP,
3200 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3201 			.geniv = true,
3202 		},
3203 	},
3204 	{
3205 		.aead = {
3206 			.base = {
3207 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
3208 				.cra_driver_name = "authenc-hmac-sha256-"
3209 						   "cbc-aes-caam",
3210 				.cra_blocksize = AES_BLOCK_SIZE,
3211 			},
3212 			.setkey = aead_setkey,
3213 			.setauthsize = aead_setauthsize,
3214 			.encrypt = aead_encrypt,
3215 			.decrypt = aead_decrypt,
3216 			.ivsize = AES_BLOCK_SIZE,
3217 			.maxauthsize = SHA256_DIGEST_SIZE,
3218 		},
3219 		.caam = {
3220 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3221 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3222 					   OP_ALG_AAI_HMAC_PRECOMP,
3223 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3224 		},
3225 	},
3226 	{
3227 		.aead = {
3228 			.base = {
3229 				.cra_name = "echainiv(authenc(hmac(sha256),"
3230 					    "cbc(aes)))",
3231 				.cra_driver_name = "echainiv-authenc-"
3232 						   "hmac-sha256-cbc-aes-caam",
3233 				.cra_blocksize = AES_BLOCK_SIZE,
3234 			},
3235 			.setkey = aead_setkey,
3236 			.setauthsize = aead_setauthsize,
3237 			.encrypt = aead_encrypt,
3238 			.decrypt = aead_givdecrypt,
3239 			.ivsize = AES_BLOCK_SIZE,
3240 			.maxauthsize = SHA256_DIGEST_SIZE,
3241 		},
3242 		.caam = {
3243 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3244 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3245 					   OP_ALG_AAI_HMAC_PRECOMP,
3246 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3247 			.geniv = true,
3248 		},
3249 	},
3250 	{
3251 		.aead = {
3252 			.base = {
3253 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
3254 				.cra_driver_name = "authenc-hmac-sha384-"
3255 						   "cbc-aes-caam",
3256 				.cra_blocksize = AES_BLOCK_SIZE,
3257 			},
3258 			.setkey = aead_setkey,
3259 			.setauthsize = aead_setauthsize,
3260 			.encrypt = aead_encrypt,
3261 			.decrypt = aead_decrypt,
3262 			.ivsize = AES_BLOCK_SIZE,
3263 			.maxauthsize = SHA384_DIGEST_SIZE,
3264 		},
3265 		.caam = {
3266 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3267 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3268 					   OP_ALG_AAI_HMAC_PRECOMP,
3269 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3270 		},
3271 	},
3272 	{
3273 		.aead = {
3274 			.base = {
3275 				.cra_name = "echainiv(authenc(hmac(sha384),"
3276 					    "cbc(aes)))",
3277 				.cra_driver_name = "echainiv-authenc-"
3278 						   "hmac-sha384-cbc-aes-caam",
3279 				.cra_blocksize = AES_BLOCK_SIZE,
3280 			},
3281 			.setkey = aead_setkey,
3282 			.setauthsize = aead_setauthsize,
3283 			.encrypt = aead_encrypt,
3284 			.decrypt = aead_givdecrypt,
3285 			.ivsize = AES_BLOCK_SIZE,
3286 			.maxauthsize = SHA384_DIGEST_SIZE,
3287 		},
3288 		.caam = {
3289 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3290 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3291 					   OP_ALG_AAI_HMAC_PRECOMP,
3292 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3293 			.geniv = true,
3294 		},
3295 	},
3296 	{
3297 		.aead = {
3298 			.base = {
3299 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
3300 				.cra_driver_name = "authenc-hmac-sha512-"
3301 						   "cbc-aes-caam",
3302 				.cra_blocksize = AES_BLOCK_SIZE,
3303 			},
3304 			.setkey = aead_setkey,
3305 			.setauthsize = aead_setauthsize,
3306 			.encrypt = aead_encrypt,
3307 			.decrypt = aead_decrypt,
3308 			.ivsize = AES_BLOCK_SIZE,
3309 			.maxauthsize = SHA512_DIGEST_SIZE,
3310 		},
3311 		.caam = {
3312 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3313 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3314 					   OP_ALG_AAI_HMAC_PRECOMP,
3315 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3316 		},
3317 	},
3318 	{
3319 		.aead = {
3320 			.base = {
3321 				.cra_name = "echainiv(authenc(hmac(sha512),"
3322 					    "cbc(aes)))",
3323 				.cra_driver_name = "echainiv-authenc-"
3324 						   "hmac-sha512-cbc-aes-caam",
3325 				.cra_blocksize = AES_BLOCK_SIZE,
3326 			},
3327 			.setkey = aead_setkey,
3328 			.setauthsize = aead_setauthsize,
3329 			.encrypt = aead_encrypt,
3330 			.decrypt = aead_givdecrypt,
3331 			.ivsize = AES_BLOCK_SIZE,
3332 			.maxauthsize = SHA512_DIGEST_SIZE,
3333 		},
3334 		.caam = {
3335 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3336 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3337 					   OP_ALG_AAI_HMAC_PRECOMP,
3338 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3339 			.geniv = true,
3340 		},
3341 	},
3342 	{
3343 		.aead = {
3344 			.base = {
3345 				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
3346 				.cra_driver_name = "authenc-hmac-md5-"
3347 						   "cbc-des3_ede-caam",
3348 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3349 			},
3350 			.setkey = aead_setkey,
3351 			.setauthsize = aead_setauthsize,
3352 			.encrypt = aead_encrypt,
3353 			.decrypt = aead_decrypt,
3354 			.ivsize = DES3_EDE_BLOCK_SIZE,
3355 			.maxauthsize = MD5_DIGEST_SIZE,
3356 		},
3357 		.caam = {
3358 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3359 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
3360 					   OP_ALG_AAI_HMAC_PRECOMP,
3361 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3362 		}
3363 	},
3364 	{
3365 		.aead = {
3366 			.base = {
3367 				.cra_name = "echainiv(authenc(hmac(md5),"
3368 					    "cbc(des3_ede)))",
3369 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
3370 						   "cbc-des3_ede-caam",
3371 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3372 			},
3373 			.setkey = aead_setkey,
3374 			.setauthsize = aead_setauthsize,
3375 			.encrypt = aead_encrypt,
3376 			.decrypt = aead_givdecrypt,
3377 			.ivsize = DES3_EDE_BLOCK_SIZE,
3378 			.maxauthsize = MD5_DIGEST_SIZE,
3379 		},
3380 		.caam = {
3381 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3382 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
3383 					   OP_ALG_AAI_HMAC_PRECOMP,
3384 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3385 			.geniv = true,
3386 		}
3387 	},
3388 	{
3389 		.aead = {
3390 			.base = {
3391 				.cra_name = "authenc(hmac(sha1),"
3392 					    "cbc(des3_ede))",
3393 				.cra_driver_name = "authenc-hmac-sha1-"
3394 						   "cbc-des3_ede-caam",
3395 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3396 			},
3397 			.setkey = aead_setkey,
3398 			.setauthsize = aead_setauthsize,
3399 			.encrypt = aead_encrypt,
3400 			.decrypt = aead_decrypt,
3401 			.ivsize = DES3_EDE_BLOCK_SIZE,
3402 			.maxauthsize = SHA1_DIGEST_SIZE,
3403 		},
3404 		.caam = {
3405 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3406 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3407 					   OP_ALG_AAI_HMAC_PRECOMP,
3408 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3409 		},
3410 	},
3411 	{
3412 		.aead = {
3413 			.base = {
3414 				.cra_name = "echainiv(authenc(hmac(sha1),"
3415 					    "cbc(des3_ede)))",
3416 				.cra_driver_name = "echainiv-authenc-"
3417 						   "hmac-sha1-"
3418 						   "cbc-des3_ede-caam",
3419 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3420 			},
3421 			.setkey = aead_setkey,
3422 			.setauthsize = aead_setauthsize,
3423 			.encrypt = aead_encrypt,
3424 			.decrypt = aead_givdecrypt,
3425 			.ivsize = DES3_EDE_BLOCK_SIZE,
3426 			.maxauthsize = SHA1_DIGEST_SIZE,
3427 		},
3428 		.caam = {
3429 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3430 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3431 					   OP_ALG_AAI_HMAC_PRECOMP,
3432 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3433 			.geniv = true,
3434 		},
3435 	},
3436 	{
3437 		.aead = {
3438 			.base = {
3439 				.cra_name = "authenc(hmac(sha224),"
3440 					    "cbc(des3_ede))",
3441 				.cra_driver_name = "authenc-hmac-sha224-"
3442 						   "cbc-des3_ede-caam",
3443 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3444 			},
3445 			.setkey = aead_setkey,
3446 			.setauthsize = aead_setauthsize,
3447 			.encrypt = aead_encrypt,
3448 			.decrypt = aead_decrypt,
3449 			.ivsize = DES3_EDE_BLOCK_SIZE,
3450 			.maxauthsize = SHA224_DIGEST_SIZE,
3451 		},
3452 		.caam = {
3453 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3454 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3455 					   OP_ALG_AAI_HMAC_PRECOMP,
3456 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3457 		},
3458 	},
3459 	{
3460 		.aead = {
3461 			.base = {
3462 				.cra_name = "echainiv(authenc(hmac(sha224),"
3463 					    "cbc(des3_ede)))",
3464 				.cra_driver_name = "echainiv-authenc-"
3465 						   "hmac-sha224-"
3466 						   "cbc-des3_ede-caam",
3467 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3468 			},
3469 			.setkey = aead_setkey,
3470 			.setauthsize = aead_setauthsize,
3471 			.encrypt = aead_encrypt,
3472 			.decrypt = aead_givdecrypt,
3473 			.ivsize = DES3_EDE_BLOCK_SIZE,
3474 			.maxauthsize = SHA224_DIGEST_SIZE,
3475 		},
3476 		.caam = {
3477 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3478 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3479 					   OP_ALG_AAI_HMAC_PRECOMP,
3480 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3481 			.geniv = true,
3482 		},
3483 	},
3484 	{
3485 		.aead = {
3486 			.base = {
3487 				.cra_name = "authenc(hmac(sha256),"
3488 					    "cbc(des3_ede))",
3489 				.cra_driver_name = "authenc-hmac-sha256-"
3490 						   "cbc-des3_ede-caam",
3491 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3492 			},
3493 			.setkey = aead_setkey,
3494 			.setauthsize = aead_setauthsize,
3495 			.encrypt = aead_encrypt,
3496 			.decrypt = aead_decrypt,
3497 			.ivsize = DES3_EDE_BLOCK_SIZE,
3498 			.maxauthsize = SHA256_DIGEST_SIZE,
3499 		},
3500 		.caam = {
3501 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3502 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3503 					   OP_ALG_AAI_HMAC_PRECOMP,
3504 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3505 		},
3506 	},
3507 	{
3508 		.aead = {
3509 			.base = {
3510 				.cra_name = "echainiv(authenc(hmac(sha256),"
3511 					    "cbc(des3_ede)))",
3512 				.cra_driver_name = "echainiv-authenc-"
3513 						   "hmac-sha256-"
3514 						   "cbc-des3_ede-caam",
3515 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3516 			},
3517 			.setkey = aead_setkey,
3518 			.setauthsize = aead_setauthsize,
3519 			.encrypt = aead_encrypt,
3520 			.decrypt = aead_givdecrypt,
3521 			.ivsize = DES3_EDE_BLOCK_SIZE,
3522 			.maxauthsize = SHA256_DIGEST_SIZE,
3523 		},
3524 		.caam = {
3525 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3526 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3527 					   OP_ALG_AAI_HMAC_PRECOMP,
3528 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3529 			.geniv = true,
3530 		},
3531 	},
3532 	{
3533 		.aead = {
3534 			.base = {
3535 				.cra_name = "authenc(hmac(sha384),"
3536 					    "cbc(des3_ede))",
3537 				.cra_driver_name = "authenc-hmac-sha384-"
3538 						   "cbc-des3_ede-caam",
3539 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3540 			},
3541 			.setkey = aead_setkey,
3542 			.setauthsize = aead_setauthsize,
3543 			.encrypt = aead_encrypt,
3544 			.decrypt = aead_decrypt,
3545 			.ivsize = DES3_EDE_BLOCK_SIZE,
3546 			.maxauthsize = SHA384_DIGEST_SIZE,
3547 		},
3548 		.caam = {
3549 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3550 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3551 					   OP_ALG_AAI_HMAC_PRECOMP,
3552 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3553 		},
3554 	},
3555 	{
3556 		.aead = {
3557 			.base = {
3558 				.cra_name = "echainiv(authenc(hmac(sha384),"
3559 					    "cbc(des3_ede)))",
3560 				.cra_driver_name = "echainiv-authenc-"
3561 						   "hmac-sha384-"
3562 						   "cbc-des3_ede-caam",
3563 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3564 			},
3565 			.setkey = aead_setkey,
3566 			.setauthsize = aead_setauthsize,
3567 			.encrypt = aead_encrypt,
3568 			.decrypt = aead_givdecrypt,
3569 			.ivsize = DES3_EDE_BLOCK_SIZE,
3570 			.maxauthsize = SHA384_DIGEST_SIZE,
3571 		},
3572 		.caam = {
3573 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3574 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3575 					   OP_ALG_AAI_HMAC_PRECOMP,
3576 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3577 			.geniv = true,
3578 		},
3579 	},
3580 	{
3581 		.aead = {
3582 			.base = {
3583 				.cra_name = "authenc(hmac(sha512),"
3584 					    "cbc(des3_ede))",
3585 				.cra_driver_name = "authenc-hmac-sha512-"
3586 						   "cbc-des3_ede-caam",
3587 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3588 			},
3589 			.setkey = aead_setkey,
3590 			.setauthsize = aead_setauthsize,
3591 			.encrypt = aead_encrypt,
3592 			.decrypt = aead_decrypt,
3593 			.ivsize = DES3_EDE_BLOCK_SIZE,
3594 			.maxauthsize = SHA512_DIGEST_SIZE,
3595 		},
3596 		.caam = {
3597 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3598 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3599 					   OP_ALG_AAI_HMAC_PRECOMP,
3600 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3601 		},
3602 	},
3603 	{
3604 		.aead = {
3605 			.base = {
3606 				.cra_name = "echainiv(authenc(hmac(sha512),"
3607 					    "cbc(des3_ede)))",
3608 				.cra_driver_name = "echainiv-authenc-"
3609 						   "hmac-sha512-"
3610 						   "cbc-des3_ede-caam",
3611 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3612 			},
3613 			.setkey = aead_setkey,
3614 			.setauthsize = aead_setauthsize,
3615 			.encrypt = aead_encrypt,
3616 			.decrypt = aead_givdecrypt,
3617 			.ivsize = DES3_EDE_BLOCK_SIZE,
3618 			.maxauthsize = SHA512_DIGEST_SIZE,
3619 		},
3620 		.caam = {
3621 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3622 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3623 					   OP_ALG_AAI_HMAC_PRECOMP,
3624 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3625 			.geniv = true,
3626 		},
3627 	},
3628 	{
3629 		.aead = {
3630 			.base = {
3631 				.cra_name = "authenc(hmac(md5),cbc(des))",
3632 				.cra_driver_name = "authenc-hmac-md5-"
3633 						   "cbc-des-caam",
3634 				.cra_blocksize = DES_BLOCK_SIZE,
3635 			},
3636 			.setkey = aead_setkey,
3637 			.setauthsize = aead_setauthsize,
3638 			.encrypt = aead_encrypt,
3639 			.decrypt = aead_decrypt,
3640 			.ivsize = DES_BLOCK_SIZE,
3641 			.maxauthsize = MD5_DIGEST_SIZE,
3642 		},
3643 		.caam = {
3644 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3645 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
3646 					   OP_ALG_AAI_HMAC_PRECOMP,
3647 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3648 		},
3649 	},
3650 	{
3651 		.aead = {
3652 			.base = {
3653 				.cra_name = "echainiv(authenc(hmac(md5),"
3654 					    "cbc(des)))",
3655 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
3656 						   "cbc-des-caam",
3657 				.cra_blocksize = DES_BLOCK_SIZE,
3658 			},
3659 			.setkey = aead_setkey,
3660 			.setauthsize = aead_setauthsize,
3661 			.encrypt = aead_encrypt,
3662 			.decrypt = aead_givdecrypt,
3663 			.ivsize = DES_BLOCK_SIZE,
3664 			.maxauthsize = MD5_DIGEST_SIZE,
3665 		},
3666 		.caam = {
3667 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3668 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
3669 					   OP_ALG_AAI_HMAC_PRECOMP,
3670 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3671 			.geniv = true,
3672 		},
3673 	},
3674 	{
3675 		.aead = {
3676 			.base = {
3677 				.cra_name = "authenc(hmac(sha1),cbc(des))",
3678 				.cra_driver_name = "authenc-hmac-sha1-"
3679 						   "cbc-des-caam",
3680 				.cra_blocksize = DES_BLOCK_SIZE,
3681 			},
3682 			.setkey = aead_setkey,
3683 			.setauthsize = aead_setauthsize,
3684 			.encrypt = aead_encrypt,
3685 			.decrypt = aead_decrypt,
3686 			.ivsize = DES_BLOCK_SIZE,
3687 			.maxauthsize = SHA1_DIGEST_SIZE,
3688 		},
3689 		.caam = {
3690 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3691 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3692 					   OP_ALG_AAI_HMAC_PRECOMP,
3693 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3694 		},
3695 	},
3696 	{
3697 		.aead = {
3698 			.base = {
3699 				.cra_name = "echainiv(authenc(hmac(sha1),"
3700 					    "cbc(des)))",
3701 				.cra_driver_name = "echainiv-authenc-"
3702 						   "hmac-sha1-cbc-des-caam",
3703 				.cra_blocksize = DES_BLOCK_SIZE,
3704 			},
3705 			.setkey = aead_setkey,
3706 			.setauthsize = aead_setauthsize,
3707 			.encrypt = aead_encrypt,
3708 			.decrypt = aead_givdecrypt,
3709 			.ivsize = DES_BLOCK_SIZE,
3710 			.maxauthsize = SHA1_DIGEST_SIZE,
3711 		},
3712 		.caam = {
3713 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3714 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3715 					   OP_ALG_AAI_HMAC_PRECOMP,
3716 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3717 			.geniv = true,
3718 		},
3719 	},
3720 	{
3721 		.aead = {
3722 			.base = {
3723 				.cra_name = "authenc(hmac(sha224),cbc(des))",
3724 				.cra_driver_name = "authenc-hmac-sha224-"
3725 						   "cbc-des-caam",
3726 				.cra_blocksize = DES_BLOCK_SIZE,
3727 			},
3728 			.setkey = aead_setkey,
3729 			.setauthsize = aead_setauthsize,
3730 			.encrypt = aead_encrypt,
3731 			.decrypt = aead_decrypt,
3732 			.ivsize = DES_BLOCK_SIZE,
3733 			.maxauthsize = SHA224_DIGEST_SIZE,
3734 		},
3735 		.caam = {
3736 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3737 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3738 					   OP_ALG_AAI_HMAC_PRECOMP,
3739 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3740 		},
3741 	},
3742 	{
3743 		.aead = {
3744 			.base = {
3745 				.cra_name = "echainiv(authenc(hmac(sha224),"
3746 					    "cbc(des)))",
3747 				.cra_driver_name = "echainiv-authenc-"
3748 						   "hmac-sha224-cbc-des-caam",
3749 				.cra_blocksize = DES_BLOCK_SIZE,
3750 			},
3751 			.setkey = aead_setkey,
3752 			.setauthsize = aead_setauthsize,
3753 			.encrypt = aead_encrypt,
3754 			.decrypt = aead_givdecrypt,
3755 			.ivsize = DES_BLOCK_SIZE,
3756 			.maxauthsize = SHA224_DIGEST_SIZE,
3757 		},
3758 		.caam = {
3759 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3760 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3761 					   OP_ALG_AAI_HMAC_PRECOMP,
3762 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3763 			.geniv = true,
3764 		},
3765 	},
3766 	{
3767 		.aead = {
3768 			.base = {
3769 				.cra_name = "authenc(hmac(sha256),cbc(des))",
3770 				.cra_driver_name = "authenc-hmac-sha256-"
3771 						   "cbc-des-caam",
3772 				.cra_blocksize = DES_BLOCK_SIZE,
3773 			},
3774 			.setkey = aead_setkey,
3775 			.setauthsize = aead_setauthsize,
3776 			.encrypt = aead_encrypt,
3777 			.decrypt = aead_decrypt,
3778 			.ivsize = DES_BLOCK_SIZE,
3779 			.maxauthsize = SHA256_DIGEST_SIZE,
3780 		},
3781 		.caam = {
3782 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3783 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3784 					   OP_ALG_AAI_HMAC_PRECOMP,
3785 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3786 		},
3787 	},
3788 	{
3789 		.aead = {
3790 			.base = {
3791 				.cra_name = "echainiv(authenc(hmac(sha256),"
3792 					    "cbc(des)))",
3793 				.cra_driver_name = "echainiv-authenc-"
3794 						   "hmac-sha256-cbc-des-caam",
3795 				.cra_blocksize = DES_BLOCK_SIZE,
3796 			},
3797 			.setkey = aead_setkey,
3798 			.setauthsize = aead_setauthsize,
3799 			.encrypt = aead_encrypt,
3800 			.decrypt = aead_givdecrypt,
3801 			.ivsize = DES_BLOCK_SIZE,
3802 			.maxauthsize = SHA256_DIGEST_SIZE,
3803 		},
3804 		.caam = {
3805 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3806 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3807 					   OP_ALG_AAI_HMAC_PRECOMP,
3808 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3809 			.geniv = true,
3810 		},
3811 	},
3812 	{
3813 		.aead = {
3814 			.base = {
3815 				.cra_name = "authenc(hmac(sha384),cbc(des))",
3816 				.cra_driver_name = "authenc-hmac-sha384-"
3817 						   "cbc-des-caam",
3818 				.cra_blocksize = DES_BLOCK_SIZE,
3819 			},
3820 			.setkey = aead_setkey,
3821 			.setauthsize = aead_setauthsize,
3822 			.encrypt = aead_encrypt,
3823 			.decrypt = aead_decrypt,
3824 			.ivsize = DES_BLOCK_SIZE,
3825 			.maxauthsize = SHA384_DIGEST_SIZE,
3826 		},
3827 		.caam = {
3828 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3829 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3830 					   OP_ALG_AAI_HMAC_PRECOMP,
3831 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3832 		},
3833 	},
3834 	{
3835 		.aead = {
3836 			.base = {
3837 				.cra_name = "echainiv(authenc(hmac(sha384),"
3838 					    "cbc(des)))",
3839 				.cra_driver_name = "echainiv-authenc-"
3840 						   "hmac-sha384-cbc-des-caam",
3841 				.cra_blocksize = DES_BLOCK_SIZE,
3842 			},
3843 			.setkey = aead_setkey,
3844 			.setauthsize = aead_setauthsize,
3845 			.encrypt = aead_encrypt,
3846 			.decrypt = aead_givdecrypt,
3847 			.ivsize = DES_BLOCK_SIZE,
3848 			.maxauthsize = SHA384_DIGEST_SIZE,
3849 		},
3850 		.caam = {
3851 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3852 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3853 					   OP_ALG_AAI_HMAC_PRECOMP,
3854 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3855 			.geniv = true,
3856 		},
3857 	},
3858 	{
3859 		.aead = {
3860 			.base = {
3861 				.cra_name = "authenc(hmac(sha512),cbc(des))",
3862 				.cra_driver_name = "authenc-hmac-sha512-"
3863 						   "cbc-des-caam",
3864 				.cra_blocksize = DES_BLOCK_SIZE,
3865 			},
3866 			.setkey = aead_setkey,
3867 			.setauthsize = aead_setauthsize,
3868 			.encrypt = aead_encrypt,
3869 			.decrypt = aead_decrypt,
3870 			.ivsize = DES_BLOCK_SIZE,
3871 			.maxauthsize = SHA512_DIGEST_SIZE,
3872 		},
3873 		.caam = {
3874 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3875 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3876 					   OP_ALG_AAI_HMAC_PRECOMP,
3877 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3878 		},
3879 	},
3880 	{
3881 		.aead = {
3882 			.base = {
3883 				.cra_name = "echainiv(authenc(hmac(sha512),"
3884 					    "cbc(des)))",
3885 				.cra_driver_name = "echainiv-authenc-"
3886 						   "hmac-sha512-cbc-des-caam",
3887 				.cra_blocksize = DES_BLOCK_SIZE,
3888 			},
3889 			.setkey = aead_setkey,
3890 			.setauthsize = aead_setauthsize,
3891 			.encrypt = aead_encrypt,
3892 			.decrypt = aead_givdecrypt,
3893 			.ivsize = DES_BLOCK_SIZE,
3894 			.maxauthsize = SHA512_DIGEST_SIZE,
3895 		},
3896 		.caam = {
3897 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3898 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3899 					   OP_ALG_AAI_HMAC_PRECOMP,
3900 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3901 			.geniv = true,
3902 		},
3903 	},
3904 	{
3905 		.aead = {
3906 			.base = {
3907 				.cra_name = "authenc(hmac(md5),"
3908 					    "rfc3686(ctr(aes)))",
3909 				.cra_driver_name = "authenc-hmac-md5-"
3910 						   "rfc3686-ctr-aes-caam",
3911 				.cra_blocksize = 1,
3912 			},
3913 			.setkey = aead_setkey,
3914 			.setauthsize = aead_setauthsize,
3915 			.encrypt = aead_encrypt,
3916 			.decrypt = aead_decrypt,
3917 			.ivsize = CTR_RFC3686_IV_SIZE,
3918 			.maxauthsize = MD5_DIGEST_SIZE,
3919 		},
3920 		.caam = {
3921 			.class1_alg_type = OP_ALG_ALGSEL_AES |
3922 					   OP_ALG_AAI_CTR_MOD128,
3923 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
3924 					   OP_ALG_AAI_HMAC_PRECOMP,
3925 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3926 			.rfc3686 = true,
3927 		},
3928 	},
3929 	{
3930 		.aead = {
3931 			.base = {
3932 				.cra_name = "seqiv(authenc("
3933 					    "hmac(md5),rfc3686(ctr(aes))))",
3934 				.cra_driver_name = "seqiv-authenc-hmac-md5-"
3935 						   "rfc3686-ctr-aes-caam",
3936 				.cra_blocksize = 1,
3937 			},
3938 			.setkey = aead_setkey,
3939 			.setauthsize = aead_setauthsize,
3940 			.encrypt = aead_encrypt,
3941 			.decrypt = aead_givdecrypt,
3942 			.ivsize = CTR_RFC3686_IV_SIZE,
3943 			.maxauthsize = MD5_DIGEST_SIZE,
3944 		},
3945 		.caam = {
3946 			.class1_alg_type = OP_ALG_ALGSEL_AES |
3947 					   OP_ALG_AAI_CTR_MOD128,
3948 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
3949 					   OP_ALG_AAI_HMAC_PRECOMP,
3950 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3951 			.rfc3686 = true,
3952 			.geniv = true,
3953 		},
3954 	},
3955 	{
3956 		.aead = {
3957 			.base = {
3958 				.cra_name = "authenc(hmac(sha1),"
3959 					    "rfc3686(ctr(aes)))",
3960 				.cra_driver_name = "authenc-hmac-sha1-"
3961 						   "rfc3686-ctr-aes-caam",
3962 				.cra_blocksize = 1,
3963 			},
3964 			.setkey = aead_setkey,
3965 			.setauthsize = aead_setauthsize,
3966 			.encrypt = aead_encrypt,
3967 			.decrypt = aead_decrypt,
3968 			.ivsize = CTR_RFC3686_IV_SIZE,
3969 			.maxauthsize = SHA1_DIGEST_SIZE,
3970 		},
3971 		.caam = {
3972 			.class1_alg_type = OP_ALG_ALGSEL_AES |
3973 					   OP_ALG_AAI_CTR_MOD128,
3974 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3975 					   OP_ALG_AAI_HMAC_PRECOMP,
3976 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3977 			.rfc3686 = true,
3978 		},
3979 	},
3980 	{
3981 		.aead = {
3982 			.base = {
3983 				.cra_name = "seqiv(authenc("
3984 					    "hmac(sha1),rfc3686(ctr(aes))))",
3985 				.cra_driver_name = "seqiv-authenc-hmac-sha1-"
3986 						   "rfc3686-ctr-aes-caam",
3987 				.cra_blocksize = 1,
3988 			},
3989 			.setkey = aead_setkey,
3990 			.setauthsize = aead_setauthsize,
3991 			.encrypt = aead_encrypt,
3992 			.decrypt = aead_givdecrypt,
3993 			.ivsize = CTR_RFC3686_IV_SIZE,
3994 			.maxauthsize = SHA1_DIGEST_SIZE,
3995 		},
3996 		.caam = {
3997 			.class1_alg_type = OP_ALG_ALGSEL_AES |
3998 					   OP_ALG_AAI_CTR_MOD128,
3999 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
4000 					   OP_ALG_AAI_HMAC_PRECOMP,
4001 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
4002 			.rfc3686 = true,
4003 			.geniv = true,
4004 		},
4005 	},
4006 	{
4007 		.aead = {
4008 			.base = {
4009 				.cra_name = "authenc(hmac(sha224),"
4010 					    "rfc3686(ctr(aes)))",
4011 				.cra_driver_name = "authenc-hmac-sha224-"
4012 						   "rfc3686-ctr-aes-caam",
4013 				.cra_blocksize = 1,
4014 			},
4015 			.setkey = aead_setkey,
4016 			.setauthsize = aead_setauthsize,
4017 			.encrypt = aead_encrypt,
4018 			.decrypt = aead_decrypt,
4019 			.ivsize = CTR_RFC3686_IV_SIZE,
4020 			.maxauthsize = SHA224_DIGEST_SIZE,
4021 		},
4022 		.caam = {
4023 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4024 					   OP_ALG_AAI_CTR_MOD128,
4025 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
4026 					   OP_ALG_AAI_HMAC_PRECOMP,
4027 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
4028 			.rfc3686 = true,
4029 		},
4030 	},
4031 	{
4032 		.aead = {
4033 			.base = {
4034 				.cra_name = "seqiv(authenc("
4035 					    "hmac(sha224),rfc3686(ctr(aes))))",
4036 				.cra_driver_name = "seqiv-authenc-hmac-sha224-"
4037 						   "rfc3686-ctr-aes-caam",
4038 				.cra_blocksize = 1,
4039 			},
4040 			.setkey = aead_setkey,
4041 			.setauthsize = aead_setauthsize,
4042 			.encrypt = aead_encrypt,
4043 			.decrypt = aead_givdecrypt,
4044 			.ivsize = CTR_RFC3686_IV_SIZE,
4045 			.maxauthsize = SHA224_DIGEST_SIZE,
4046 		},
4047 		.caam = {
4048 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4049 					   OP_ALG_AAI_CTR_MOD128,
4050 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
4051 					   OP_ALG_AAI_HMAC_PRECOMP,
4052 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
4053 			.rfc3686 = true,
4054 			.geniv = true,
4055 		},
4056 	},
4057 	{
4058 		.aead = {
4059 			.base = {
4060 				.cra_name = "authenc(hmac(sha256),"
4061 					    "rfc3686(ctr(aes)))",
4062 				.cra_driver_name = "authenc-hmac-sha256-"
4063 						   "rfc3686-ctr-aes-caam",
4064 				.cra_blocksize = 1,
4065 			},
4066 			.setkey = aead_setkey,
4067 			.setauthsize = aead_setauthsize,
4068 			.encrypt = aead_encrypt,
4069 			.decrypt = aead_decrypt,
4070 			.ivsize = CTR_RFC3686_IV_SIZE,
4071 			.maxauthsize = SHA256_DIGEST_SIZE,
4072 		},
4073 		.caam = {
4074 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4075 					   OP_ALG_AAI_CTR_MOD128,
4076 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
4077 					   OP_ALG_AAI_HMAC_PRECOMP,
4078 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
4079 			.rfc3686 = true,
4080 		},
4081 	},
4082 	{
4083 		.aead = {
4084 			.base = {
4085 				.cra_name = "seqiv(authenc(hmac(sha256),"
4086 					    "rfc3686(ctr(aes))))",
4087 				.cra_driver_name = "seqiv-authenc-hmac-sha256-"
4088 						   "rfc3686-ctr-aes-caam",
4089 				.cra_blocksize = 1,
4090 			},
4091 			.setkey = aead_setkey,
4092 			.setauthsize = aead_setauthsize,
4093 			.encrypt = aead_encrypt,
4094 			.decrypt = aead_givdecrypt,
4095 			.ivsize = CTR_RFC3686_IV_SIZE,
4096 			.maxauthsize = SHA256_DIGEST_SIZE,
4097 		},
4098 		.caam = {
4099 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4100 					   OP_ALG_AAI_CTR_MOD128,
4101 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
4102 					   OP_ALG_AAI_HMAC_PRECOMP,
4103 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
4104 			.rfc3686 = true,
4105 			.geniv = true,
4106 		},
4107 	},
4108 	{
4109 		.aead = {
4110 			.base = {
4111 				.cra_name = "authenc(hmac(sha384),"
4112 					    "rfc3686(ctr(aes)))",
4113 				.cra_driver_name = "authenc-hmac-sha384-"
4114 						   "rfc3686-ctr-aes-caam",
4115 				.cra_blocksize = 1,
4116 			},
4117 			.setkey = aead_setkey,
4118 			.setauthsize = aead_setauthsize,
4119 			.encrypt = aead_encrypt,
4120 			.decrypt = aead_decrypt,
4121 			.ivsize = CTR_RFC3686_IV_SIZE,
4122 			.maxauthsize = SHA384_DIGEST_SIZE,
4123 		},
4124 		.caam = {
4125 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4126 					   OP_ALG_AAI_CTR_MOD128,
4127 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
4128 					   OP_ALG_AAI_HMAC_PRECOMP,
4129 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
4130 			.rfc3686 = true,
4131 		},
4132 	},
4133 	{
4134 		.aead = {
4135 			.base = {
4136 				.cra_name = "seqiv(authenc(hmac(sha384),"
4137 					    "rfc3686(ctr(aes))))",
4138 				.cra_driver_name = "seqiv-authenc-hmac-sha384-"
4139 						   "rfc3686-ctr-aes-caam",
4140 				.cra_blocksize = 1,
4141 			},
4142 			.setkey = aead_setkey,
4143 			.setauthsize = aead_setauthsize,
4144 			.encrypt = aead_encrypt,
4145 			.decrypt = aead_givdecrypt,
4146 			.ivsize = CTR_RFC3686_IV_SIZE,
4147 			.maxauthsize = SHA384_DIGEST_SIZE,
4148 		},
4149 		.caam = {
4150 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4151 					   OP_ALG_AAI_CTR_MOD128,
4152 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
4153 					   OP_ALG_AAI_HMAC_PRECOMP,
4154 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
4155 			.rfc3686 = true,
4156 			.geniv = true,
4157 		},
4158 	},
4159 	{
4160 		.aead = {
4161 			.base = {
4162 				.cra_name = "authenc(hmac(sha512),"
4163 					    "rfc3686(ctr(aes)))",
4164 				.cra_driver_name = "authenc-hmac-sha512-"
4165 						   "rfc3686-ctr-aes-caam",
4166 				.cra_blocksize = 1,
4167 			},
4168 			.setkey = aead_setkey,
4169 			.setauthsize = aead_setauthsize,
4170 			.encrypt = aead_encrypt,
4171 			.decrypt = aead_decrypt,
4172 			.ivsize = CTR_RFC3686_IV_SIZE,
4173 			.maxauthsize = SHA512_DIGEST_SIZE,
4174 		},
4175 		.caam = {
4176 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4177 					   OP_ALG_AAI_CTR_MOD128,
4178 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
4179 					   OP_ALG_AAI_HMAC_PRECOMP,
4180 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
4181 			.rfc3686 = true,
4182 		},
4183 	},
4184 	{
4185 		.aead = {
4186 			.base = {
4187 				.cra_name = "seqiv(authenc(hmac(sha512),"
4188 					    "rfc3686(ctr(aes))))",
4189 				.cra_driver_name = "seqiv-authenc-hmac-sha512-"
4190 						   "rfc3686-ctr-aes-caam",
4191 				.cra_blocksize = 1,
4192 			},
4193 			.setkey = aead_setkey,
4194 			.setauthsize = aead_setauthsize,
4195 			.encrypt = aead_encrypt,
4196 			.decrypt = aead_givdecrypt,
4197 			.ivsize = CTR_RFC3686_IV_SIZE,
4198 			.maxauthsize = SHA512_DIGEST_SIZE,
4199 		},
4200 		.caam = {
4201 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4202 					   OP_ALG_AAI_CTR_MOD128,
4203 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
4204 					   OP_ALG_AAI_HMAC_PRECOMP,
4205 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
4206 			.rfc3686 = true,
4207 			.geniv = true,
4208 		},
4209 	},
4210 };
4211 
4212 struct caam_crypto_alg {
4213 	struct crypto_alg crypto_alg;
4214 	struct list_head entry;
4215 	struct caam_alg_entry caam;
4216 };
4217 
4218 static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
4219 {
4220 	ctx->jrdev = caam_jr_alloc();
4221 	if (IS_ERR(ctx->jrdev)) {
4222 		pr_err("Job Ring Device allocation for transform failed\n");
4223 		return PTR_ERR(ctx->jrdev);
4224 	}
4225 
4226 	/* copy descriptor header template value */
4227 	ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
4228 	ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
4229 	ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op;
4230 
4231 	return 0;
4232 }
4233 
4234 static int caam_cra_init(struct crypto_tfm *tfm)
4235 {
4236 	struct crypto_alg *alg = tfm->__crt_alg;
4237 	struct caam_crypto_alg *caam_alg =
4238 		 container_of(alg, struct caam_crypto_alg, crypto_alg);
4239 	struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
4240 
4241 	return caam_init_common(ctx, &caam_alg->caam);
4242 }
4243 
4244 static int caam_aead_init(struct crypto_aead *tfm)
4245 {
4246 	struct aead_alg *alg = crypto_aead_alg(tfm);
4247 	struct caam_aead_alg *caam_alg =
4248 		 container_of(alg, struct caam_aead_alg, aead);
4249 	struct caam_ctx *ctx = crypto_aead_ctx(tfm);
4250 
4251 	return caam_init_common(ctx, &caam_alg->caam);
4252 }
4253 
4254 static void caam_exit_common(struct caam_ctx *ctx)
4255 {
4256 	if (ctx->sh_desc_enc_dma &&
4257 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
4258 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
4259 				 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
4260 	if (ctx->sh_desc_dec_dma &&
4261 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
4262 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
4263 				 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
4264 	if (ctx->sh_desc_givenc_dma &&
4265 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
4266 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
4267 				 desc_bytes(ctx->sh_desc_givenc),
4268 				 DMA_TO_DEVICE);
4269 	if (ctx->key_dma &&
4270 	    !dma_mapping_error(ctx->jrdev, ctx->key_dma))
4271 		dma_unmap_single(ctx->jrdev, ctx->key_dma,
4272 				 ctx->enckeylen + ctx->split_key_pad_len,
4273 				 DMA_TO_DEVICE);
4274 
4275 	caam_jr_free(ctx->jrdev);
4276 }
4277 
4278 static void caam_cra_exit(struct crypto_tfm *tfm)
4279 {
4280 	caam_exit_common(crypto_tfm_ctx(tfm));
4281 }
4282 
4283 static void caam_aead_exit(struct crypto_aead *tfm)
4284 {
4285 	caam_exit_common(crypto_aead_ctx(tfm));
4286 }
4287 
4288 static void __exit caam_algapi_exit(void)
4289 {
4290 
4291 	struct caam_crypto_alg *t_alg, *n;
4292 	int i;
4293 
4294 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
4295 		struct caam_aead_alg *t_alg = driver_aeads + i;
4296 
4297 		if (t_alg->registered)
4298 			crypto_unregister_aead(&t_alg->aead);
4299 	}
4300 
4301 	if (!alg_list.next)
4302 		return;
4303 
4304 	list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
4305 		crypto_unregister_alg(&t_alg->crypto_alg);
4306 		list_del(&t_alg->entry);
4307 		kfree(t_alg);
4308 	}
4309 }
4310 
4311 static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
4312 					      *template)
4313 {
4314 	struct caam_crypto_alg *t_alg;
4315 	struct crypto_alg *alg;
4316 
4317 	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
4318 	if (!t_alg) {
4319 		pr_err("failed to allocate t_alg\n");
4320 		return ERR_PTR(-ENOMEM);
4321 	}
4322 
4323 	alg = &t_alg->crypto_alg;
4324 
4325 	snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
4326 	snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4327 		 template->driver_name);
4328 	alg->cra_module = THIS_MODULE;
4329 	alg->cra_init = caam_cra_init;
4330 	alg->cra_exit = caam_cra_exit;
4331 	alg->cra_priority = CAAM_CRA_PRIORITY;
4332 	alg->cra_blocksize = template->blocksize;
4333 	alg->cra_alignmask = 0;
4334 	alg->cra_ctxsize = sizeof(struct caam_ctx);
4335 	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
4336 			 template->type;
4337 	switch (template->type) {
4338 	case CRYPTO_ALG_TYPE_GIVCIPHER:
4339 		alg->cra_type = &crypto_givcipher_type;
4340 		alg->cra_ablkcipher = template->template_ablkcipher;
4341 		break;
4342 	case CRYPTO_ALG_TYPE_ABLKCIPHER:
4343 		alg->cra_type = &crypto_ablkcipher_type;
4344 		alg->cra_ablkcipher = template->template_ablkcipher;
4345 		break;
4346 	}
4347 
4348 	t_alg->caam.class1_alg_type = template->class1_alg_type;
4349 	t_alg->caam.class2_alg_type = template->class2_alg_type;
4350 	t_alg->caam.alg_op = template->alg_op;
4351 
4352 	return t_alg;
4353 }
4354 
4355 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
4356 {
4357 	struct aead_alg *alg = &t_alg->aead;
4358 
4359 	alg->base.cra_module = THIS_MODULE;
4360 	alg->base.cra_priority = CAAM_CRA_PRIORITY;
4361 	alg->base.cra_ctxsize = sizeof(struct caam_ctx);
4362 	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
4363 
4364 	alg->init = caam_aead_init;
4365 	alg->exit = caam_aead_exit;
4366 }
4367 
4368 static int __init caam_algapi_init(void)
4369 {
4370 	struct device_node *dev_node;
4371 	struct platform_device *pdev;
4372 	struct device *ctrldev;
4373 	struct caam_drv_private *priv;
4374 	int i = 0, err = 0;
4375 	u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
4376 	unsigned int md_limit = SHA512_DIGEST_SIZE;
4377 	bool registered = false;
4378 
4379 	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
4380 	if (!dev_node) {
4381 		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
4382 		if (!dev_node)
4383 			return -ENODEV;
4384 	}
4385 
4386 	pdev = of_find_device_by_node(dev_node);
4387 	if (!pdev) {
4388 		of_node_put(dev_node);
4389 		return -ENODEV;
4390 	}
4391 
4392 	ctrldev = &pdev->dev;
4393 	priv = dev_get_drvdata(ctrldev);
4394 	of_node_put(dev_node);
4395 
4396 	/*
4397 	 * If priv is NULL, it's probably because the caam driver wasn't
4398 	 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
4399 	 */
4400 	if (!priv)
4401 		return -ENODEV;
4402 
4403 
4404 	INIT_LIST_HEAD(&alg_list);
4405 
4406 	/*
4407 	 * Register crypto algorithms the device supports.
4408 	 * First, detect presence and attributes of DES, AES, and MD blocks.
4409 	 */
4410 	cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
4411 	cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
4412 	des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
4413 	aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
4414 	md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
4415 
4416 	/* If MD is present, limit digest size based on LP256 */
4417 	if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
4418 		md_limit = SHA256_DIGEST_SIZE;
4419 
4420 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4421 		struct caam_crypto_alg *t_alg;
4422 		struct caam_alg_template *alg = driver_algs + i;
4423 		u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
4424 
4425 		/* Skip DES algorithms if not supported by device */
4426 		if (!des_inst &&
4427 		    ((alg_sel == OP_ALG_ALGSEL_3DES) ||
4428 		     (alg_sel == OP_ALG_ALGSEL_DES)))
4429 				continue;
4430 
4431 		/* Skip AES algorithms if not supported by device */
4432 		if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
4433 				continue;
4434 
4435 		t_alg = caam_alg_alloc(alg);
4436 		if (IS_ERR(t_alg)) {
4437 			err = PTR_ERR(t_alg);
4438 			pr_warn("%s alg allocation failed\n", alg->driver_name);
4439 			continue;
4440 		}
4441 
4442 		err = crypto_register_alg(&t_alg->crypto_alg);
4443 		if (err) {
4444 			pr_warn("%s alg registration failed\n",
4445 				t_alg->crypto_alg.cra_driver_name);
4446 			kfree(t_alg);
4447 			continue;
4448 		}
4449 
4450 		list_add_tail(&t_alg->entry, &alg_list);
4451 		registered = true;
4452 	}
4453 
4454 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
4455 		struct caam_aead_alg *t_alg = driver_aeads + i;
4456 		u32 c1_alg_sel = t_alg->caam.class1_alg_type &
4457 				 OP_ALG_ALGSEL_MASK;
4458 		u32 c2_alg_sel = t_alg->caam.class2_alg_type &
4459 				 OP_ALG_ALGSEL_MASK;
4460 		u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
4461 
4462 		/* Skip DES algorithms if not supported by device */
4463 		if (!des_inst &&
4464 		    ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
4465 		     (c1_alg_sel == OP_ALG_ALGSEL_DES)))
4466 				continue;
4467 
4468 		/* Skip AES algorithms if not supported by device */
4469 		if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
4470 				continue;
4471 
4472 		/*
4473 		 * Check support for AES algorithms not available
4474 		 * on LP devices.
4475 		 */
4476 		if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
4477 			if (alg_aai == OP_ALG_AAI_GCM)
4478 				continue;
4479 
4480 		/*
4481 		 * Skip algorithms requiring message digests
4482 		 * if MD or MD size is not supported by device.
4483 		 */
4484 		if (c2_alg_sel &&
4485 		    (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
4486 				continue;
4487 
4488 		caam_aead_alg_init(t_alg);
4489 
4490 		err = crypto_register_aead(&t_alg->aead);
4491 		if (err) {
4492 			pr_warn("%s alg registration failed\n",
4493 				t_alg->aead.base.cra_driver_name);
4494 			continue;
4495 		}
4496 
4497 		t_alg->registered = true;
4498 		registered = true;
4499 	}
4500 
4501 	if (registered)
4502 		pr_info("caam algorithms registered in /proc/crypto\n");
4503 
4504 	return err;
4505 }
4506 
4507 module_init(caam_algapi_init);
4508 module_exit(caam_algapi_exit);
4509 
4510 MODULE_LICENSE("GPL");
4511 MODULE_DESCRIPTION("FSL CAAM support for crypto API");
4512 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
4513