xref: /openbmc/linux/drivers/crypto/caam/caamalg.c (revision bc5aa3a0)
1 /*
2  * caam - Freescale FSL CAAM support for crypto API
3  *
4  * Copyright 2008-2011 Freescale Semiconductor, Inc.
5  *
6  * Based on talitos crypto API driver.
7  *
8  * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
9  *
10  * ---------------                     ---------------
11  * | JobDesc #1  |-------------------->|  ShareDesc  |
12  * | *(packet 1) |                     |   (PDB)     |
13  * ---------------      |------------->|  (hashKey)  |
14  *       .              |              | (cipherKey) |
15  *       .              |    |-------->| (operation) |
16  * ---------------      |    |         ---------------
17  * | JobDesc #2  |------|    |
18  * | *(packet 2) |           |
19  * ---------------           |
20  *       .                   |
21  *       .                   |
22  * ---------------           |
23  * | JobDesc #3  |------------
24  * | *(packet 3) |
25  * ---------------
26  *
27  * The SharedDesc never changes for a connection unless rekeyed, but
28  * each packet will likely be in a different place. So all we need
29  * to know to process the packet is where the input is, where the
30  * output goes, and what context we want to process with. Context is
31  * in the SharedDesc, packet references in the JobDesc.
32  *
33  * So, a job desc looks like:
34  *
35  * ---------------------
36  * | Header            |
37  * | ShareDesc Pointer |
38  * | SEQ_OUT_PTR       |
39  * | (output buffer)   |
40  * | (output length)   |
41  * | SEQ_IN_PTR        |
42  * | (input buffer)    |
43  * | (input length)    |
44  * ---------------------
45  */
46 
47 #include "compat.h"
48 
49 #include "regs.h"
50 #include "intern.h"
51 #include "desc_constr.h"
52 #include "jr.h"
53 #include "error.h"
54 #include "sg_sw_sec4.h"
55 #include "key_gen.h"
56 
57 /*
58  * crypto alg
59  */
60 #define CAAM_CRA_PRIORITY		3000
61 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
62 #define CAAM_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + \
63 					 CTR_RFC3686_NONCE_SIZE + \
64 					 SHA512_DIGEST_SIZE * 2)
65 /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
66 #define CAAM_MAX_IV_LENGTH		16
67 
68 #define AEAD_DESC_JOB_IO_LEN		(DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
69 #define GCM_DESC_JOB_IO_LEN		(AEAD_DESC_JOB_IO_LEN + \
70 					 CAAM_CMD_SZ * 4)
71 #define AUTHENC_DESC_JOB_IO_LEN		(AEAD_DESC_JOB_IO_LEN + \
72 					 CAAM_CMD_SZ * 5)
73 
74 /* length of descriptors text */
75 #define DESC_AEAD_BASE			(4 * CAAM_CMD_SZ)
76 #define DESC_AEAD_ENC_LEN		(DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
77 #define DESC_AEAD_DEC_LEN		(DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
78 #define DESC_AEAD_GIVENC_LEN		(DESC_AEAD_ENC_LEN + 9 * CAAM_CMD_SZ)
79 
80 /* Note: Nonce is counted in enckeylen */
81 #define DESC_AEAD_CTR_RFC3686_LEN	(4 * CAAM_CMD_SZ)
82 
83 #define DESC_AEAD_NULL_BASE		(3 * CAAM_CMD_SZ)
84 #define DESC_AEAD_NULL_ENC_LEN		(DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
85 #define DESC_AEAD_NULL_DEC_LEN		(DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
86 
87 #define DESC_GCM_BASE			(3 * CAAM_CMD_SZ)
88 #define DESC_GCM_ENC_LEN		(DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
89 #define DESC_GCM_DEC_LEN		(DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
90 
91 #define DESC_RFC4106_BASE		(3 * CAAM_CMD_SZ)
92 #define DESC_RFC4106_ENC_LEN		(DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
93 #define DESC_RFC4106_DEC_LEN		(DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
94 
95 #define DESC_RFC4543_BASE		(3 * CAAM_CMD_SZ)
96 #define DESC_RFC4543_ENC_LEN		(DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
97 #define DESC_RFC4543_DEC_LEN		(DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
98 
99 #define DESC_ABLKCIPHER_BASE		(3 * CAAM_CMD_SZ)
100 #define DESC_ABLKCIPHER_ENC_LEN		(DESC_ABLKCIPHER_BASE + \
101 					 20 * CAAM_CMD_SZ)
102 #define DESC_ABLKCIPHER_DEC_LEN		(DESC_ABLKCIPHER_BASE + \
103 					 15 * CAAM_CMD_SZ)
104 
105 #define DESC_MAX_USED_BYTES		(CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
106 #define DESC_MAX_USED_LEN		(DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
107 
108 #ifdef DEBUG
109 /* for print_hex_dumps with line references */
110 #define debug(format, arg...) printk(format, arg)
111 #else
112 #define debug(format, arg...)
113 #endif
114 static struct list_head alg_list;
115 
116 struct caam_alg_entry {
117 	int class1_alg_type;
118 	int class2_alg_type;
119 	int alg_op;
120 	bool rfc3686;
121 	bool geniv;
122 };
123 
124 struct caam_aead_alg {
125 	struct aead_alg aead;
126 	struct caam_alg_entry caam;
127 	bool registered;
128 };
129 
130 /* Set DK bit in class 1 operation if shared */
131 static inline void append_dec_op1(u32 *desc, u32 type)
132 {
133 	u32 *jump_cmd, *uncond_jump_cmd;
134 
135 	/* DK bit is valid only for AES */
136 	if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
137 		append_operation(desc, type | OP_ALG_AS_INITFINAL |
138 				 OP_ALG_DECRYPT);
139 		return;
140 	}
141 
142 	jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
143 	append_operation(desc, type | OP_ALG_AS_INITFINAL |
144 			 OP_ALG_DECRYPT);
145 	uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
146 	set_jump_tgt_here(desc, jump_cmd);
147 	append_operation(desc, type | OP_ALG_AS_INITFINAL |
148 			 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
149 	set_jump_tgt_here(desc, uncond_jump_cmd);
150 }
151 
152 /*
153  * For aead functions, read payload and write payload,
154  * both of which are specified in req->src and req->dst
155  */
156 static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
157 {
158 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
159 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
160 			     KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
161 }
162 
163 /*
164  * For ablkcipher encrypt and decrypt, read from req->src and
165  * write to req->dst
166  */
167 static inline void ablkcipher_append_src_dst(u32 *desc)
168 {
169 	append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
170 	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
171 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
172 			     KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
173 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
174 }
175 
176 /*
177  * per-session context
178  */
179 struct caam_ctx {
180 	struct device *jrdev;
181 	u32 sh_desc_enc[DESC_MAX_USED_LEN];
182 	u32 sh_desc_dec[DESC_MAX_USED_LEN];
183 	u32 sh_desc_givenc[DESC_MAX_USED_LEN];
184 	dma_addr_t sh_desc_enc_dma;
185 	dma_addr_t sh_desc_dec_dma;
186 	dma_addr_t sh_desc_givenc_dma;
187 	u32 class1_alg_type;
188 	u32 class2_alg_type;
189 	u32 alg_op;
190 	u8 key[CAAM_MAX_KEY_SIZE];
191 	dma_addr_t key_dma;
192 	unsigned int enckeylen;
193 	unsigned int split_key_len;
194 	unsigned int split_key_pad_len;
195 	unsigned int authsize;
196 };
197 
198 static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
199 			    int keys_fit_inline, bool is_rfc3686)
200 {
201 	u32 *nonce;
202 	unsigned int enckeylen = ctx->enckeylen;
203 
204 	/*
205 	 * RFC3686 specific:
206 	 *	| ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
207 	 *	| enckeylen = encryption key size + nonce size
208 	 */
209 	if (is_rfc3686)
210 		enckeylen -= CTR_RFC3686_NONCE_SIZE;
211 
212 	if (keys_fit_inline) {
213 		append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
214 				  ctx->split_key_len, CLASS_2 |
215 				  KEY_DEST_MDHA_SPLIT | KEY_ENC);
216 		append_key_as_imm(desc, (void *)ctx->key +
217 				  ctx->split_key_pad_len, enckeylen,
218 				  enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
219 	} else {
220 		append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
221 			   KEY_DEST_MDHA_SPLIT | KEY_ENC);
222 		append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
223 			   enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
224 	}
225 
226 	/* Load Counter into CONTEXT1 reg */
227 	if (is_rfc3686) {
228 		nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
229 			       enckeylen);
230 		append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
231 				    LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
232 		append_move(desc,
233 			    MOVE_SRC_OUTFIFO |
234 			    MOVE_DEST_CLASS1CTX |
235 			    (16 << MOVE_OFFSET_SHIFT) |
236 			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
237 	}
238 }
239 
240 static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
241 				  int keys_fit_inline, bool is_rfc3686)
242 {
243 	u32 *key_jump_cmd;
244 
245 	/* Note: Context registers are saved. */
246 	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
247 
248 	/* Skip if already shared */
249 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
250 				   JUMP_COND_SHRD);
251 
252 	append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
253 
254 	set_jump_tgt_here(desc, key_jump_cmd);
255 }
256 
257 static int aead_null_set_sh_desc(struct crypto_aead *aead)
258 {
259 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
260 	struct device *jrdev = ctx->jrdev;
261 	bool keys_fit_inline = false;
262 	u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
263 	u32 *desc;
264 
265 	/*
266 	 * Job Descriptor and Shared Descriptors
267 	 * must all fit into the 64-word Descriptor h/w Buffer
268 	 */
269 	if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN +
270 	    ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
271 		keys_fit_inline = true;
272 
273 	/* aead_encrypt shared descriptor */
274 	desc = ctx->sh_desc_enc;
275 
276 	init_sh_desc(desc, HDR_SHARE_SERIAL);
277 
278 	/* Skip if already shared */
279 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
280 				   JUMP_COND_SHRD);
281 	if (keys_fit_inline)
282 		append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
283 				  ctx->split_key_len, CLASS_2 |
284 				  KEY_DEST_MDHA_SPLIT | KEY_ENC);
285 	else
286 		append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
287 			   KEY_DEST_MDHA_SPLIT | KEY_ENC);
288 	set_jump_tgt_here(desc, key_jump_cmd);
289 
290 	/* assoclen + cryptlen = seqinlen */
291 	append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
292 
293 	/* Prepare to read and write cryptlen + assoclen bytes */
294 	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
295 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
296 
297 	/*
298 	 * MOVE_LEN opcode is not available in all SEC HW revisions,
299 	 * thus need to do some magic, i.e. self-patch the descriptor
300 	 * buffer.
301 	 */
302 	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
303 				    MOVE_DEST_MATH3 |
304 				    (0x6 << MOVE_LEN_SHIFT));
305 	write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
306 				     MOVE_DEST_DESCBUF |
307 				     MOVE_WAITCOMP |
308 				     (0x8 << MOVE_LEN_SHIFT));
309 
310 	/* Class 2 operation */
311 	append_operation(desc, ctx->class2_alg_type |
312 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
313 
314 	/* Read and write cryptlen bytes */
315 	aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
316 
317 	set_move_tgt_here(desc, read_move_cmd);
318 	set_move_tgt_here(desc, write_move_cmd);
319 	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
320 	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
321 		    MOVE_AUX_LS);
322 
323 	/* Write ICV */
324 	append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
325 			 LDST_SRCDST_BYTE_CONTEXT);
326 
327 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
328 					      desc_bytes(desc),
329 					      DMA_TO_DEVICE);
330 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
331 		dev_err(jrdev, "unable to map shared descriptor\n");
332 		return -ENOMEM;
333 	}
334 #ifdef DEBUG
335 	print_hex_dump(KERN_ERR,
336 		       "aead null enc shdesc@"__stringify(__LINE__)": ",
337 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
338 		       desc_bytes(desc), 1);
339 #endif
340 
341 	/*
342 	 * Job Descriptor and Shared Descriptors
343 	 * must all fit into the 64-word Descriptor h/w Buffer
344 	 */
345 	keys_fit_inline = false;
346 	if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
347 	    ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
348 		keys_fit_inline = true;
349 
350 	desc = ctx->sh_desc_dec;
351 
352 	/* aead_decrypt shared descriptor */
353 	init_sh_desc(desc, HDR_SHARE_SERIAL);
354 
355 	/* Skip if already shared */
356 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
357 				   JUMP_COND_SHRD);
358 	if (keys_fit_inline)
359 		append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
360 				  ctx->split_key_len, CLASS_2 |
361 				  KEY_DEST_MDHA_SPLIT | KEY_ENC);
362 	else
363 		append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
364 			   KEY_DEST_MDHA_SPLIT | KEY_ENC);
365 	set_jump_tgt_here(desc, key_jump_cmd);
366 
367 	/* Class 2 operation */
368 	append_operation(desc, ctx->class2_alg_type |
369 			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
370 
371 	/* assoclen + cryptlen = seqoutlen */
372 	append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
373 
374 	/* Prepare to read and write cryptlen + assoclen bytes */
375 	append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
376 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
377 
378 	/*
379 	 * MOVE_LEN opcode is not available in all SEC HW revisions,
380 	 * thus need to do some magic, i.e. self-patch the descriptor
381 	 * buffer.
382 	 */
383 	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
384 				    MOVE_DEST_MATH2 |
385 				    (0x6 << MOVE_LEN_SHIFT));
386 	write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
387 				     MOVE_DEST_DESCBUF |
388 				     MOVE_WAITCOMP |
389 				     (0x8 << MOVE_LEN_SHIFT));
390 
391 	/* Read and write cryptlen bytes */
392 	aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
393 
394 	/*
395 	 * Insert a NOP here, since we need at least 4 instructions between
396 	 * code patching the descriptor buffer and the location being patched.
397 	 */
398 	jump_cmd = append_jump(desc, JUMP_TEST_ALL);
399 	set_jump_tgt_here(desc, jump_cmd);
400 
401 	set_move_tgt_here(desc, read_move_cmd);
402 	set_move_tgt_here(desc, write_move_cmd);
403 	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
404 	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
405 		    MOVE_AUX_LS);
406 	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
407 
408 	/* Load ICV */
409 	append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
410 			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
411 
412 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
413 					      desc_bytes(desc),
414 					      DMA_TO_DEVICE);
415 	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
416 		dev_err(jrdev, "unable to map shared descriptor\n");
417 		return -ENOMEM;
418 	}
419 #ifdef DEBUG
420 	print_hex_dump(KERN_ERR,
421 		       "aead null dec shdesc@"__stringify(__LINE__)": ",
422 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
423 		       desc_bytes(desc), 1);
424 #endif
425 
426 	return 0;
427 }
428 
429 static int aead_set_sh_desc(struct crypto_aead *aead)
430 {
431 	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
432 						 struct caam_aead_alg, aead);
433 	unsigned int ivsize = crypto_aead_ivsize(aead);
434 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
435 	struct device *jrdev = ctx->jrdev;
436 	bool keys_fit_inline;
437 	u32 geniv, moveiv;
438 	u32 ctx1_iv_off = 0;
439 	u32 *desc;
440 	const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
441 			       OP_ALG_AAI_CTR_MOD128);
442 	const bool is_rfc3686 = alg->caam.rfc3686;
443 
444 	if (!ctx->authsize)
445 		return 0;
446 
447 	/* NULL encryption / decryption */
448 	if (!ctx->enckeylen)
449 		return aead_null_set_sh_desc(aead);
450 
451 	/*
452 	 * AES-CTR needs to load IV in CONTEXT1 reg
453 	 * at an offset of 128bits (16bytes)
454 	 * CONTEXT1[255:128] = IV
455 	 */
456 	if (ctr_mode)
457 		ctx1_iv_off = 16;
458 
459 	/*
460 	 * RFC3686 specific:
461 	 *	CONTEXT1[255:128] = {NONCE, IV, COUNTER}
462 	 */
463 	if (is_rfc3686)
464 		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
465 
466 	if (alg->caam.geniv)
467 		goto skip_enc;
468 
469 	/*
470 	 * Job Descriptor and Shared Descriptors
471 	 * must all fit into the 64-word Descriptor h/w Buffer
472 	 */
473 	keys_fit_inline = false;
474 	if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
475 	    ctx->split_key_pad_len + ctx->enckeylen +
476 	    (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
477 	    CAAM_DESC_BYTES_MAX)
478 		keys_fit_inline = true;
479 
480 	/* aead_encrypt shared descriptor */
481 	desc = ctx->sh_desc_enc;
482 
483 	/* Note: Context registers are saved. */
484 	init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
485 
486 	/* Class 2 operation */
487 	append_operation(desc, ctx->class2_alg_type |
488 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
489 
490 	/* Read and write assoclen bytes */
491 	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
492 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
493 
494 	/* Skip assoc data */
495 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
496 
497 	/* read assoc before reading payload */
498 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
499 				      FIFOLDST_VLF);
500 
501 	/* Load Counter into CONTEXT1 reg */
502 	if (is_rfc3686)
503 		append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
504 				    LDST_CLASS_1_CCB |
505 				    LDST_SRCDST_BYTE_CONTEXT |
506 				    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
507 				     LDST_OFFSET_SHIFT));
508 
509 	/* Class 1 operation */
510 	append_operation(desc, ctx->class1_alg_type |
511 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
512 
513 	/* Read and write cryptlen bytes */
514 	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
515 	append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
516 	aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
517 
518 	/* Write ICV */
519 	append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
520 			 LDST_SRCDST_BYTE_CONTEXT);
521 
522 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
523 					      desc_bytes(desc),
524 					      DMA_TO_DEVICE);
525 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
526 		dev_err(jrdev, "unable to map shared descriptor\n");
527 		return -ENOMEM;
528 	}
529 #ifdef DEBUG
530 	print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
531 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
532 		       desc_bytes(desc), 1);
533 #endif
534 
535 skip_enc:
536 	/*
537 	 * Job Descriptor and Shared Descriptors
538 	 * must all fit into the 64-word Descriptor h/w Buffer
539 	 */
540 	keys_fit_inline = false;
541 	if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN +
542 	    ctx->split_key_pad_len + ctx->enckeylen +
543 	    (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
544 	    CAAM_DESC_BYTES_MAX)
545 		keys_fit_inline = true;
546 
547 	/* aead_decrypt shared descriptor */
548 	desc = ctx->sh_desc_dec;
549 
550 	/* Note: Context registers are saved. */
551 	init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
552 
553 	/* Class 2 operation */
554 	append_operation(desc, ctx->class2_alg_type |
555 			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
556 
557 	/* Read and write assoclen bytes */
558 	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
559 	if (alg->caam.geniv)
560 		append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
561 	else
562 		append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
563 
564 	/* Skip assoc data */
565 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
566 
567 	/* read assoc before reading payload */
568 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
569 			     KEY_VLF);
570 
571 	if (alg->caam.geniv) {
572 		append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
573 				LDST_SRCDST_BYTE_CONTEXT |
574 				(ctx1_iv_off << LDST_OFFSET_SHIFT));
575 		append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
576 			    (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
577 	}
578 
579 	/* Load Counter into CONTEXT1 reg */
580 	if (is_rfc3686)
581 		append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
582 				    LDST_CLASS_1_CCB |
583 				    LDST_SRCDST_BYTE_CONTEXT |
584 				    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
585 				     LDST_OFFSET_SHIFT));
586 
587 	/* Choose operation */
588 	if (ctr_mode)
589 		append_operation(desc, ctx->class1_alg_type |
590 				 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
591 	else
592 		append_dec_op1(desc, ctx->class1_alg_type);
593 
594 	/* Read and write cryptlen bytes */
595 	append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
596 	append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
597 	aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
598 
599 	/* Load ICV */
600 	append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
601 			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
602 
603 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
604 					      desc_bytes(desc),
605 					      DMA_TO_DEVICE);
606 	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
607 		dev_err(jrdev, "unable to map shared descriptor\n");
608 		return -ENOMEM;
609 	}
610 #ifdef DEBUG
611 	print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
612 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
613 		       desc_bytes(desc), 1);
614 #endif
615 
616 	if (!alg->caam.geniv)
617 		goto skip_givenc;
618 
619 	/*
620 	 * Job Descriptor and Shared Descriptors
621 	 * must all fit into the 64-word Descriptor h/w Buffer
622 	 */
623 	keys_fit_inline = false;
624 	if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
625 	    ctx->split_key_pad_len + ctx->enckeylen +
626 	    (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
627 	    CAAM_DESC_BYTES_MAX)
628 		keys_fit_inline = true;
629 
630 	/* aead_givencrypt shared descriptor */
631 	desc = ctx->sh_desc_enc;
632 
633 	/* Note: Context registers are saved. */
634 	init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
635 
636 	if (is_rfc3686)
637 		goto copy_iv;
638 
639 	/* Generate IV */
640 	geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
641 		NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
642 		NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
643 	append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
644 			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
645 	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
646 	append_move(desc, MOVE_WAITCOMP |
647 		    MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
648 		    (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
649 		    (ivsize << MOVE_LEN_SHIFT));
650 	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
651 
652 copy_iv:
653 	/* Copy IV to class 1 context */
654 	append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
655 		    (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
656 		    (ivsize << MOVE_LEN_SHIFT));
657 
658 	/* Return to encryption */
659 	append_operation(desc, ctx->class2_alg_type |
660 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
661 
662 	/* Read and write assoclen bytes */
663 	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
664 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
665 
666 	/* ivsize + cryptlen = seqoutlen - authsize */
667 	append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
668 
669 	/* Skip assoc data */
670 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
671 
672 	/* read assoc before reading payload */
673 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
674 			     KEY_VLF);
675 
676 	/* Copy iv from outfifo to class 2 fifo */
677 	moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
678 		 NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
679 	append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
680 			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
681 	append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
682 			    LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
683 
684 	/* Load Counter into CONTEXT1 reg */
685 	if (is_rfc3686)
686 		append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
687 				    LDST_CLASS_1_CCB |
688 				    LDST_SRCDST_BYTE_CONTEXT |
689 				    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
690 				     LDST_OFFSET_SHIFT));
691 
692 	/* Class 1 operation */
693 	append_operation(desc, ctx->class1_alg_type |
694 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
695 
696 	/* Will write ivsize + cryptlen */
697 	append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
698 
699 	/* Not need to reload iv */
700 	append_seq_fifo_load(desc, ivsize,
701 			     FIFOLD_CLASS_SKIP);
702 
703 	/* Will read cryptlen */
704 	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
705 	aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
706 
707 	/* Write ICV */
708 	append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
709 			 LDST_SRCDST_BYTE_CONTEXT);
710 
711 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
712 					      desc_bytes(desc),
713 					      DMA_TO_DEVICE);
714 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
715 		dev_err(jrdev, "unable to map shared descriptor\n");
716 		return -ENOMEM;
717 	}
718 #ifdef DEBUG
719 	print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
720 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
721 		       desc_bytes(desc), 1);
722 #endif
723 
724 skip_givenc:
725 	return 0;
726 }
727 
728 static int aead_setauthsize(struct crypto_aead *authenc,
729 				    unsigned int authsize)
730 {
731 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
732 
733 	ctx->authsize = authsize;
734 	aead_set_sh_desc(authenc);
735 
736 	return 0;
737 }
738 
739 static int gcm_set_sh_desc(struct crypto_aead *aead)
740 {
741 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
742 	struct device *jrdev = ctx->jrdev;
743 	bool keys_fit_inline = false;
744 	u32 *key_jump_cmd, *zero_payload_jump_cmd,
745 	    *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
746 	u32 *desc;
747 
748 	if (!ctx->enckeylen || !ctx->authsize)
749 		return 0;
750 
751 	/*
752 	 * AES GCM encrypt shared descriptor
753 	 * Job Descriptor and Shared Descriptor
754 	 * must fit into the 64-word Descriptor h/w Buffer
755 	 */
756 	if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN +
757 	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
758 		keys_fit_inline = true;
759 
760 	desc = ctx->sh_desc_enc;
761 
762 	init_sh_desc(desc, HDR_SHARE_SERIAL);
763 
764 	/* skip key loading if they are loaded due to sharing */
765 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
766 				   JUMP_COND_SHRD | JUMP_COND_SELF);
767 	if (keys_fit_inline)
768 		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
769 				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
770 	else
771 		append_key(desc, ctx->key_dma, ctx->enckeylen,
772 			   CLASS_1 | KEY_DEST_CLASS_REG);
773 	set_jump_tgt_here(desc, key_jump_cmd);
774 
775 	/* class 1 operation */
776 	append_operation(desc, ctx->class1_alg_type |
777 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
778 
779 	/* if assoclen + cryptlen is ZERO, skip to ICV write */
780 	append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
781 	zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
782 						 JUMP_COND_MATH_Z);
783 
784 	/* if assoclen is ZERO, skip reading the assoc data */
785 	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
786 	zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
787 						 JUMP_COND_MATH_Z);
788 
789 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
790 
791 	/* skip assoc data */
792 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
793 
794 	/* cryptlen = seqinlen - assoclen */
795 	append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
796 
797 	/* if cryptlen is ZERO jump to zero-payload commands */
798 	zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
799 					    JUMP_COND_MATH_Z);
800 
801 	/* read assoc data */
802 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
803 			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
804 	set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
805 
806 	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
807 
808 	/* write encrypted data */
809 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
810 
811 	/* read payload data */
812 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
813 			     FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
814 
815 	/* jump the zero-payload commands */
816 	append_jump(desc, JUMP_TEST_ALL | 2);
817 
818 	/* zero-payload commands */
819 	set_jump_tgt_here(desc, zero_payload_jump_cmd);
820 
821 	/* read assoc data */
822 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
823 			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
824 
825 	/* There is no input data */
826 	set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
827 
828 	/* write ICV */
829 	append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
830 			 LDST_SRCDST_BYTE_CONTEXT);
831 
832 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
833 					      desc_bytes(desc),
834 					      DMA_TO_DEVICE);
835 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
836 		dev_err(jrdev, "unable to map shared descriptor\n");
837 		return -ENOMEM;
838 	}
839 #ifdef DEBUG
840 	print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
841 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
842 		       desc_bytes(desc), 1);
843 #endif
844 
845 	/*
846 	 * Job Descriptor and Shared Descriptors
847 	 * must all fit into the 64-word Descriptor h/w Buffer
848 	 */
849 	keys_fit_inline = false;
850 	if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN +
851 	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
852 		keys_fit_inline = true;
853 
854 	desc = ctx->sh_desc_dec;
855 
856 	init_sh_desc(desc, HDR_SHARE_SERIAL);
857 
858 	/* skip key loading if they are loaded due to sharing */
859 	key_jump_cmd = append_jump(desc, JUMP_JSL |
860 				   JUMP_TEST_ALL | JUMP_COND_SHRD |
861 				   JUMP_COND_SELF);
862 	if (keys_fit_inline)
863 		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
864 				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
865 	else
866 		append_key(desc, ctx->key_dma, ctx->enckeylen,
867 			   CLASS_1 | KEY_DEST_CLASS_REG);
868 	set_jump_tgt_here(desc, key_jump_cmd);
869 
870 	/* class 1 operation */
871 	append_operation(desc, ctx->class1_alg_type |
872 			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
873 
874 	/* if assoclen is ZERO, skip reading the assoc data */
875 	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
876 	zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
877 						 JUMP_COND_MATH_Z);
878 
879 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
880 
881 	/* skip assoc data */
882 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
883 
884 	/* read assoc data */
885 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
886 			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
887 
888 	set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
889 
890 	/* cryptlen = seqoutlen - assoclen */
891 	append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
892 
893 	/* jump to zero-payload command if cryptlen is zero */
894 	zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
895 					    JUMP_COND_MATH_Z);
896 
897 	append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
898 
899 	/* store encrypted data */
900 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
901 
902 	/* read payload data */
903 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
904 			     FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
905 
906 	/* zero-payload command */
907 	set_jump_tgt_here(desc, zero_payload_jump_cmd);
908 
909 	/* read ICV */
910 	append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
911 			     FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
912 
913 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
914 					      desc_bytes(desc),
915 					      DMA_TO_DEVICE);
916 	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
917 		dev_err(jrdev, "unable to map shared descriptor\n");
918 		return -ENOMEM;
919 	}
920 #ifdef DEBUG
921 	print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
922 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
923 		       desc_bytes(desc), 1);
924 #endif
925 
926 	return 0;
927 }
928 
929 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
930 {
931 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
932 
933 	ctx->authsize = authsize;
934 	gcm_set_sh_desc(authenc);
935 
936 	return 0;
937 }
938 
939 static int rfc4106_set_sh_desc(struct crypto_aead *aead)
940 {
941 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
942 	struct device *jrdev = ctx->jrdev;
943 	bool keys_fit_inline = false;
944 	u32 *key_jump_cmd;
945 	u32 *desc;
946 
947 	if (!ctx->enckeylen || !ctx->authsize)
948 		return 0;
949 
950 	/*
951 	 * RFC4106 encrypt shared descriptor
952 	 * Job Descriptor and Shared Descriptor
953 	 * must fit into the 64-word Descriptor h/w Buffer
954 	 */
955 	if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN +
956 	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
957 		keys_fit_inline = true;
958 
959 	desc = ctx->sh_desc_enc;
960 
961 	init_sh_desc(desc, HDR_SHARE_SERIAL);
962 
963 	/* Skip key loading if it is loaded due to sharing */
964 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
965 				   JUMP_COND_SHRD);
966 	if (keys_fit_inline)
967 		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
968 				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
969 	else
970 		append_key(desc, ctx->key_dma, ctx->enckeylen,
971 			   CLASS_1 | KEY_DEST_CLASS_REG);
972 	set_jump_tgt_here(desc, key_jump_cmd);
973 
974 	/* Class 1 operation */
975 	append_operation(desc, ctx->class1_alg_type |
976 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
977 
978 	append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
979 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
980 
981 	/* Read assoc data */
982 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
983 			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
984 
985 	/* Skip IV */
986 	append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
987 
988 	/* Will read cryptlen bytes */
989 	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
990 
991 	/* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
992 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
993 
994 	/* Skip assoc data */
995 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
996 
997 	/* cryptlen = seqoutlen - assoclen */
998 	append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
999 
1000 	/* Write encrypted data */
1001 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1002 
1003 	/* Read payload data */
1004 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1005 			     FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
1006 
1007 	/* Write ICV */
1008 	append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1009 			 LDST_SRCDST_BYTE_CONTEXT);
1010 
1011 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1012 					      desc_bytes(desc),
1013 					      DMA_TO_DEVICE);
1014 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1015 		dev_err(jrdev, "unable to map shared descriptor\n");
1016 		return -ENOMEM;
1017 	}
1018 #ifdef DEBUG
1019 	print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
1020 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1021 		       desc_bytes(desc), 1);
1022 #endif
1023 
1024 	/*
1025 	 * Job Descriptor and Shared Descriptors
1026 	 * must all fit into the 64-word Descriptor h/w Buffer
1027 	 */
1028 	keys_fit_inline = false;
1029 	if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
1030 	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1031 		keys_fit_inline = true;
1032 
1033 	desc = ctx->sh_desc_dec;
1034 
1035 	init_sh_desc(desc, HDR_SHARE_SERIAL);
1036 
1037 	/* Skip key loading if it is loaded due to sharing */
1038 	key_jump_cmd = append_jump(desc, JUMP_JSL |
1039 				   JUMP_TEST_ALL | JUMP_COND_SHRD);
1040 	if (keys_fit_inline)
1041 		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1042 				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1043 	else
1044 		append_key(desc, ctx->key_dma, ctx->enckeylen,
1045 			   CLASS_1 | KEY_DEST_CLASS_REG);
1046 	set_jump_tgt_here(desc, key_jump_cmd);
1047 
1048 	/* Class 1 operation */
1049 	append_operation(desc, ctx->class1_alg_type |
1050 			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
1051 
1052 	append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
1053 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
1054 
1055 	/* Read assoc data */
1056 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1057 			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
1058 
1059 	/* Skip IV */
1060 	append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
1061 
1062 	/* Will read cryptlen bytes */
1063 	append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
1064 
1065 	/* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
1066 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
1067 
1068 	/* Skip assoc data */
1069 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
1070 
1071 	/* Will write cryptlen bytes */
1072 	append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1073 
1074 	/* Store payload data */
1075 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1076 
1077 	/* Read encrypted data */
1078 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1079 			     FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
1080 
1081 	/* Read ICV */
1082 	append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1083 			     FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1084 
1085 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1086 					      desc_bytes(desc),
1087 					      DMA_TO_DEVICE);
1088 	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1089 		dev_err(jrdev, "unable to map shared descriptor\n");
1090 		return -ENOMEM;
1091 	}
1092 #ifdef DEBUG
1093 	print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
1094 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1095 		       desc_bytes(desc), 1);
1096 #endif
1097 
1098 	return 0;
1099 }
1100 
1101 static int rfc4106_setauthsize(struct crypto_aead *authenc,
1102 			       unsigned int authsize)
1103 {
1104 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1105 
1106 	ctx->authsize = authsize;
1107 	rfc4106_set_sh_desc(authenc);
1108 
1109 	return 0;
1110 }
1111 
1112 static int rfc4543_set_sh_desc(struct crypto_aead *aead)
1113 {
1114 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1115 	struct device *jrdev = ctx->jrdev;
1116 	bool keys_fit_inline = false;
1117 	u32 *key_jump_cmd;
1118 	u32 *read_move_cmd, *write_move_cmd;
1119 	u32 *desc;
1120 
1121 	if (!ctx->enckeylen || !ctx->authsize)
1122 		return 0;
1123 
1124 	/*
1125 	 * RFC4543 encrypt shared descriptor
1126 	 * Job Descriptor and Shared Descriptor
1127 	 * must fit into the 64-word Descriptor h/w Buffer
1128 	 */
1129 	if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN +
1130 	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1131 		keys_fit_inline = true;
1132 
1133 	desc = ctx->sh_desc_enc;
1134 
1135 	init_sh_desc(desc, HDR_SHARE_SERIAL);
1136 
1137 	/* Skip key loading if it is loaded due to sharing */
1138 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1139 				   JUMP_COND_SHRD);
1140 	if (keys_fit_inline)
1141 		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1142 				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1143 	else
1144 		append_key(desc, ctx->key_dma, ctx->enckeylen,
1145 			   CLASS_1 | KEY_DEST_CLASS_REG);
1146 	set_jump_tgt_here(desc, key_jump_cmd);
1147 
1148 	/* Class 1 operation */
1149 	append_operation(desc, ctx->class1_alg_type |
1150 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1151 
1152 	/* assoclen + cryptlen = seqinlen */
1153 	append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
1154 
1155 	/*
1156 	 * MOVE_LEN opcode is not available in all SEC HW revisions,
1157 	 * thus need to do some magic, i.e. self-patch the descriptor
1158 	 * buffer.
1159 	 */
1160 	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1161 				    (0x6 << MOVE_LEN_SHIFT));
1162 	write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1163 				     (0x8 << MOVE_LEN_SHIFT));
1164 
1165 	/* Will read assoclen + cryptlen bytes */
1166 	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1167 
1168 	/* Will write assoclen + cryptlen bytes */
1169 	append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1170 
1171 	/* Read and write assoclen + cryptlen bytes */
1172 	aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
1173 
1174 	set_move_tgt_here(desc, read_move_cmd);
1175 	set_move_tgt_here(desc, write_move_cmd);
1176 	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1177 	/* Move payload data to OFIFO */
1178 	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1179 
1180 	/* Write ICV */
1181 	append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1182 			 LDST_SRCDST_BYTE_CONTEXT);
1183 
1184 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1185 					      desc_bytes(desc),
1186 					      DMA_TO_DEVICE);
1187 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1188 		dev_err(jrdev, "unable to map shared descriptor\n");
1189 		return -ENOMEM;
1190 	}
1191 #ifdef DEBUG
1192 	print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
1193 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1194 		       desc_bytes(desc), 1);
1195 #endif
1196 
1197 	/*
1198 	 * Job Descriptor and Shared Descriptors
1199 	 * must all fit into the 64-word Descriptor h/w Buffer
1200 	 */
1201 	keys_fit_inline = false;
1202 	if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN +
1203 	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1204 		keys_fit_inline = true;
1205 
1206 	desc = ctx->sh_desc_dec;
1207 
1208 	init_sh_desc(desc, HDR_SHARE_SERIAL);
1209 
1210 	/* Skip key loading if it is loaded due to sharing */
1211 	key_jump_cmd = append_jump(desc, JUMP_JSL |
1212 				   JUMP_TEST_ALL | JUMP_COND_SHRD);
1213 	if (keys_fit_inline)
1214 		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1215 				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1216 	else
1217 		append_key(desc, ctx->key_dma, ctx->enckeylen,
1218 			   CLASS_1 | KEY_DEST_CLASS_REG);
1219 	set_jump_tgt_here(desc, key_jump_cmd);
1220 
1221 	/* Class 1 operation */
1222 	append_operation(desc, ctx->class1_alg_type |
1223 			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
1224 
1225 	/* assoclen + cryptlen = seqoutlen */
1226 	append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1227 
1228 	/*
1229 	 * MOVE_LEN opcode is not available in all SEC HW revisions,
1230 	 * thus need to do some magic, i.e. self-patch the descriptor
1231 	 * buffer.
1232 	 */
1233 	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1234 				    (0x6 << MOVE_LEN_SHIFT));
1235 	write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1236 				     (0x8 << MOVE_LEN_SHIFT));
1237 
1238 	/* Will read assoclen + cryptlen bytes */
1239 	append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1240 
1241 	/* Will write assoclen + cryptlen bytes */
1242 	append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1243 
1244 	/* Store payload data */
1245 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1246 
1247 	/* In-snoop assoclen + cryptlen data */
1248 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
1249 			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
1250 
1251 	set_move_tgt_here(desc, read_move_cmd);
1252 	set_move_tgt_here(desc, write_move_cmd);
1253 	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1254 	/* Move payload data to OFIFO */
1255 	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1256 	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1257 
1258 	/* Read ICV */
1259 	append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1260 			     FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1261 
1262 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1263 					      desc_bytes(desc),
1264 					      DMA_TO_DEVICE);
1265 	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1266 		dev_err(jrdev, "unable to map shared descriptor\n");
1267 		return -ENOMEM;
1268 	}
1269 #ifdef DEBUG
1270 	print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
1271 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1272 		       desc_bytes(desc), 1);
1273 #endif
1274 
1275 	return 0;
1276 }
1277 
1278 static int rfc4543_setauthsize(struct crypto_aead *authenc,
1279 			       unsigned int authsize)
1280 {
1281 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1282 
1283 	ctx->authsize = authsize;
1284 	rfc4543_set_sh_desc(authenc);
1285 
1286 	return 0;
1287 }
1288 
1289 static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
1290 			      u32 authkeylen)
1291 {
1292 	return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
1293 			       ctx->split_key_pad_len, key_in, authkeylen,
1294 			       ctx->alg_op);
1295 }
1296 
1297 static int aead_setkey(struct crypto_aead *aead,
1298 			       const u8 *key, unsigned int keylen)
1299 {
1300 	/* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
1301 	static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
1302 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1303 	struct device *jrdev = ctx->jrdev;
1304 	struct crypto_authenc_keys keys;
1305 	int ret = 0;
1306 
1307 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1308 		goto badkey;
1309 
1310 	/* Pick class 2 key length from algorithm submask */
1311 	ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1312 				      OP_ALG_ALGSEL_SHIFT] * 2;
1313 	ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
1314 
1315 	if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
1316 		goto badkey;
1317 
1318 #ifdef DEBUG
1319 	printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
1320 	       keys.authkeylen + keys.enckeylen, keys.enckeylen,
1321 	       keys.authkeylen);
1322 	printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
1323 	       ctx->split_key_len, ctx->split_key_pad_len);
1324 	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1325 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1326 #endif
1327 
1328 	ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
1329 	if (ret) {
1330 		goto badkey;
1331 	}
1332 
1333 	/* postpend encryption key to auth split key */
1334 	memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
1335 
1336 	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
1337 				      keys.enckeylen, DMA_TO_DEVICE);
1338 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
1339 		dev_err(jrdev, "unable to map key i/o memory\n");
1340 		return -ENOMEM;
1341 	}
1342 #ifdef DEBUG
1343 	print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
1344 		       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
1345 		       ctx->split_key_pad_len + keys.enckeylen, 1);
1346 #endif
1347 
1348 	ctx->enckeylen = keys.enckeylen;
1349 
1350 	ret = aead_set_sh_desc(aead);
1351 	if (ret) {
1352 		dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
1353 				 keys.enckeylen, DMA_TO_DEVICE);
1354 	}
1355 
1356 	return ret;
1357 badkey:
1358 	crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
1359 	return -EINVAL;
1360 }
1361 
1362 static int gcm_setkey(struct crypto_aead *aead,
1363 		      const u8 *key, unsigned int keylen)
1364 {
1365 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1366 	struct device *jrdev = ctx->jrdev;
1367 	int ret = 0;
1368 
1369 #ifdef DEBUG
1370 	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1371 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1372 #endif
1373 
1374 	memcpy(ctx->key, key, keylen);
1375 	ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1376 				      DMA_TO_DEVICE);
1377 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
1378 		dev_err(jrdev, "unable to map key i/o memory\n");
1379 		return -ENOMEM;
1380 	}
1381 	ctx->enckeylen = keylen;
1382 
1383 	ret = gcm_set_sh_desc(aead);
1384 	if (ret) {
1385 		dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1386 				 DMA_TO_DEVICE);
1387 	}
1388 
1389 	return ret;
1390 }
1391 
1392 static int rfc4106_setkey(struct crypto_aead *aead,
1393 			  const u8 *key, unsigned int keylen)
1394 {
1395 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1396 	struct device *jrdev = ctx->jrdev;
1397 	int ret = 0;
1398 
1399 	if (keylen < 4)
1400 		return -EINVAL;
1401 
1402 #ifdef DEBUG
1403 	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1404 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1405 #endif
1406 
1407 	memcpy(ctx->key, key, keylen);
1408 
1409 	/*
1410 	 * The last four bytes of the key material are used as the salt value
1411 	 * in the nonce. Update the AES key length.
1412 	 */
1413 	ctx->enckeylen = keylen - 4;
1414 
1415 	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1416 				      DMA_TO_DEVICE);
1417 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
1418 		dev_err(jrdev, "unable to map key i/o memory\n");
1419 		return -ENOMEM;
1420 	}
1421 
1422 	ret = rfc4106_set_sh_desc(aead);
1423 	if (ret) {
1424 		dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1425 				 DMA_TO_DEVICE);
1426 	}
1427 
1428 	return ret;
1429 }
1430 
1431 static int rfc4543_setkey(struct crypto_aead *aead,
1432 			  const u8 *key, unsigned int keylen)
1433 {
1434 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1435 	struct device *jrdev = ctx->jrdev;
1436 	int ret = 0;
1437 
1438 	if (keylen < 4)
1439 		return -EINVAL;
1440 
1441 #ifdef DEBUG
1442 	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1443 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1444 #endif
1445 
1446 	memcpy(ctx->key, key, keylen);
1447 
1448 	/*
1449 	 * The last four bytes of the key material are used as the salt value
1450 	 * in the nonce. Update the AES key length.
1451 	 */
1452 	ctx->enckeylen = keylen - 4;
1453 
1454 	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1455 				      DMA_TO_DEVICE);
1456 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
1457 		dev_err(jrdev, "unable to map key i/o memory\n");
1458 		return -ENOMEM;
1459 	}
1460 
1461 	ret = rfc4543_set_sh_desc(aead);
1462 	if (ret) {
1463 		dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1464 				 DMA_TO_DEVICE);
1465 	}
1466 
1467 	return ret;
1468 }
1469 
1470 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1471 			     const u8 *key, unsigned int keylen)
1472 {
1473 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1474 	struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
1475 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
1476 	const char *alg_name = crypto_tfm_alg_name(tfm);
1477 	struct device *jrdev = ctx->jrdev;
1478 	int ret = 0;
1479 	u32 *key_jump_cmd;
1480 	u32 *desc;
1481 	u32 *nonce;
1482 	u32 geniv;
1483 	u32 ctx1_iv_off = 0;
1484 	const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
1485 			       OP_ALG_AAI_CTR_MOD128);
1486 	const bool is_rfc3686 = (ctr_mode &&
1487 				 (strstr(alg_name, "rfc3686") != NULL));
1488 
1489 #ifdef DEBUG
1490 	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1491 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1492 #endif
1493 	/*
1494 	 * AES-CTR needs to load IV in CONTEXT1 reg
1495 	 * at an offset of 128bits (16bytes)
1496 	 * CONTEXT1[255:128] = IV
1497 	 */
1498 	if (ctr_mode)
1499 		ctx1_iv_off = 16;
1500 
1501 	/*
1502 	 * RFC3686 specific:
1503 	 *	| CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1504 	 *	| *key = {KEY, NONCE}
1505 	 */
1506 	if (is_rfc3686) {
1507 		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
1508 		keylen -= CTR_RFC3686_NONCE_SIZE;
1509 	}
1510 
1511 	memcpy(ctx->key, key, keylen);
1512 	ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1513 				      DMA_TO_DEVICE);
1514 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
1515 		dev_err(jrdev, "unable to map key i/o memory\n");
1516 		return -ENOMEM;
1517 	}
1518 	ctx->enckeylen = keylen;
1519 
1520 	/* ablkcipher_encrypt shared descriptor */
1521 	desc = ctx->sh_desc_enc;
1522 	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1523 	/* Skip if already shared */
1524 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1525 				   JUMP_COND_SHRD);
1526 
1527 	/* Load class1 key only */
1528 	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1529 			  ctx->enckeylen, CLASS_1 |
1530 			  KEY_DEST_CLASS_REG);
1531 
1532 	/* Load nonce into CONTEXT1 reg */
1533 	if (is_rfc3686) {
1534 		nonce = (u32 *)(key + keylen);
1535 		append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
1536 				    LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1537 		append_move(desc, MOVE_WAITCOMP |
1538 			    MOVE_SRC_OUTFIFO |
1539 			    MOVE_DEST_CLASS1CTX |
1540 			    (16 << MOVE_OFFSET_SHIFT) |
1541 			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1542 	}
1543 
1544 	set_jump_tgt_here(desc, key_jump_cmd);
1545 
1546 	/* Load iv */
1547 	append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
1548 			LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
1549 
1550 	/* Load counter into CONTEXT1 reg */
1551 	if (is_rfc3686)
1552 		append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
1553 				    LDST_CLASS_1_CCB |
1554 				    LDST_SRCDST_BYTE_CONTEXT |
1555 				    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1556 				     LDST_OFFSET_SHIFT));
1557 
1558 	/* Load operation */
1559 	append_operation(desc, ctx->class1_alg_type |
1560 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1561 
1562 	/* Perform operation */
1563 	ablkcipher_append_src_dst(desc);
1564 
1565 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1566 					      desc_bytes(desc),
1567 					      DMA_TO_DEVICE);
1568 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1569 		dev_err(jrdev, "unable to map shared descriptor\n");
1570 		return -ENOMEM;
1571 	}
1572 #ifdef DEBUG
1573 	print_hex_dump(KERN_ERR,
1574 		       "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
1575 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1576 		       desc_bytes(desc), 1);
1577 #endif
1578 	/* ablkcipher_decrypt shared descriptor */
1579 	desc = ctx->sh_desc_dec;
1580 
1581 	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1582 	/* Skip if already shared */
1583 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1584 				   JUMP_COND_SHRD);
1585 
1586 	/* Load class1 key only */
1587 	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1588 			  ctx->enckeylen, CLASS_1 |
1589 			  KEY_DEST_CLASS_REG);
1590 
1591 	/* Load nonce into CONTEXT1 reg */
1592 	if (is_rfc3686) {
1593 		nonce = (u32 *)(key + keylen);
1594 		append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
1595 				    LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1596 		append_move(desc, MOVE_WAITCOMP |
1597 			    MOVE_SRC_OUTFIFO |
1598 			    MOVE_DEST_CLASS1CTX |
1599 			    (16 << MOVE_OFFSET_SHIFT) |
1600 			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1601 	}
1602 
1603 	set_jump_tgt_here(desc, key_jump_cmd);
1604 
1605 	/* load IV */
1606 	append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
1607 			LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
1608 
1609 	/* Load counter into CONTEXT1 reg */
1610 	if (is_rfc3686)
1611 		append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
1612 				    LDST_CLASS_1_CCB |
1613 				    LDST_SRCDST_BYTE_CONTEXT |
1614 				    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1615 				     LDST_OFFSET_SHIFT));
1616 
1617 	/* Choose operation */
1618 	if (ctr_mode)
1619 		append_operation(desc, ctx->class1_alg_type |
1620 				 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
1621 	else
1622 		append_dec_op1(desc, ctx->class1_alg_type);
1623 
1624 	/* Perform operation */
1625 	ablkcipher_append_src_dst(desc);
1626 
1627 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1628 					      desc_bytes(desc),
1629 					      DMA_TO_DEVICE);
1630 	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1631 		dev_err(jrdev, "unable to map shared descriptor\n");
1632 		return -ENOMEM;
1633 	}
1634 
1635 #ifdef DEBUG
1636 	print_hex_dump(KERN_ERR,
1637 		       "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
1638 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1639 		       desc_bytes(desc), 1);
1640 #endif
1641 	/* ablkcipher_givencrypt shared descriptor */
1642 	desc = ctx->sh_desc_givenc;
1643 
1644 	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1645 	/* Skip if already shared */
1646 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1647 				   JUMP_COND_SHRD);
1648 
1649 	/* Load class1 key only */
1650 	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1651 			  ctx->enckeylen, CLASS_1 |
1652 			  KEY_DEST_CLASS_REG);
1653 
1654 	/* Load Nonce into CONTEXT1 reg */
1655 	if (is_rfc3686) {
1656 		nonce = (u32 *)(key + keylen);
1657 		append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
1658 				    LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1659 		append_move(desc, MOVE_WAITCOMP |
1660 			    MOVE_SRC_OUTFIFO |
1661 			    MOVE_DEST_CLASS1CTX |
1662 			    (16 << MOVE_OFFSET_SHIFT) |
1663 			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1664 	}
1665 	set_jump_tgt_here(desc, key_jump_cmd);
1666 
1667 	/* Generate IV */
1668 	geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
1669 		NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
1670 		NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
1671 	append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
1672 			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
1673 	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1674 	append_move(desc, MOVE_WAITCOMP |
1675 		    MOVE_SRC_INFIFO |
1676 		    MOVE_DEST_CLASS1CTX |
1677 		    (crt->ivsize << MOVE_LEN_SHIFT) |
1678 		    (ctx1_iv_off << MOVE_OFFSET_SHIFT));
1679 	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1680 
1681 	/* Copy generated IV to memory */
1682 	append_seq_store(desc, crt->ivsize,
1683 			 LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
1684 			 (ctx1_iv_off << LDST_OFFSET_SHIFT));
1685 
1686 	/* Load Counter into CONTEXT1 reg */
1687 	if (is_rfc3686)
1688 		append_load_imm_u32(desc, (u32)1, LDST_IMM |
1689 				    LDST_CLASS_1_CCB |
1690 				    LDST_SRCDST_BYTE_CONTEXT |
1691 				    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1692 				     LDST_OFFSET_SHIFT));
1693 
1694 	if (ctx1_iv_off)
1695 		append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
1696 			    (1 << JUMP_OFFSET_SHIFT));
1697 
1698 	/* Load operation */
1699 	append_operation(desc, ctx->class1_alg_type |
1700 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1701 
1702 	/* Perform operation */
1703 	ablkcipher_append_src_dst(desc);
1704 
1705 	ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
1706 						 desc_bytes(desc),
1707 						 DMA_TO_DEVICE);
1708 	if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
1709 		dev_err(jrdev, "unable to map shared descriptor\n");
1710 		return -ENOMEM;
1711 	}
1712 #ifdef DEBUG
1713 	print_hex_dump(KERN_ERR,
1714 		       "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
1715 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1716 		       desc_bytes(desc), 1);
1717 #endif
1718 
1719 	return ret;
1720 }
1721 
1722 static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1723 				 const u8 *key, unsigned int keylen)
1724 {
1725 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1726 	struct device *jrdev = ctx->jrdev;
1727 	u32 *key_jump_cmd, *desc;
1728 	__be64 sector_size = cpu_to_be64(512);
1729 
1730 	if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
1731 		crypto_ablkcipher_set_flags(ablkcipher,
1732 					    CRYPTO_TFM_RES_BAD_KEY_LEN);
1733 		dev_err(jrdev, "key size mismatch\n");
1734 		return -EINVAL;
1735 	}
1736 
1737 	memcpy(ctx->key, key, keylen);
1738 	ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, DMA_TO_DEVICE);
1739 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
1740 		dev_err(jrdev, "unable to map key i/o memory\n");
1741 		return -ENOMEM;
1742 	}
1743 	ctx->enckeylen = keylen;
1744 
1745 	/* xts_ablkcipher_encrypt shared descriptor */
1746 	desc = ctx->sh_desc_enc;
1747 	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1748 	/* Skip if already shared */
1749 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1750 				   JUMP_COND_SHRD);
1751 
1752 	/* Load class1 keys only */
1753 	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1754 			  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1755 
1756 	/* Load sector size with index 40 bytes (0x28) */
1757 	append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
1758 		   LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
1759 	append_data(desc, (void *)&sector_size, 8);
1760 
1761 	set_jump_tgt_here(desc, key_jump_cmd);
1762 
1763 	/*
1764 	 * create sequence for loading the sector index
1765 	 * Upper 8B of IV - will be used as sector index
1766 	 * Lower 8B of IV - will be discarded
1767 	 */
1768 	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
1769 		   LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
1770 	append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
1771 
1772 	/* Load operation */
1773 	append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL |
1774 			 OP_ALG_ENCRYPT);
1775 
1776 	/* Perform operation */
1777 	ablkcipher_append_src_dst(desc);
1778 
1779 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
1780 					      DMA_TO_DEVICE);
1781 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1782 		dev_err(jrdev, "unable to map shared descriptor\n");
1783 		return -ENOMEM;
1784 	}
1785 #ifdef DEBUG
1786 	print_hex_dump(KERN_ERR,
1787 		       "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
1788 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1789 #endif
1790 
1791 	/* xts_ablkcipher_decrypt shared descriptor */
1792 	desc = ctx->sh_desc_dec;
1793 
1794 	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1795 	/* Skip if already shared */
1796 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1797 				   JUMP_COND_SHRD);
1798 
1799 	/* Load class1 key only */
1800 	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1801 			  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1802 
1803 	/* Load sector size with index 40 bytes (0x28) */
1804 	append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
1805 		   LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
1806 	append_data(desc, (void *)&sector_size, 8);
1807 
1808 	set_jump_tgt_here(desc, key_jump_cmd);
1809 
1810 	/*
1811 	 * create sequence for loading the sector index
1812 	 * Upper 8B of IV - will be used as sector index
1813 	 * Lower 8B of IV - will be discarded
1814 	 */
1815 	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
1816 		   LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
1817 	append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
1818 
1819 	/* Load operation */
1820 	append_dec_op1(desc, ctx->class1_alg_type);
1821 
1822 	/* Perform operation */
1823 	ablkcipher_append_src_dst(desc);
1824 
1825 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
1826 					      DMA_TO_DEVICE);
1827 	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1828 		dma_unmap_single(jrdev, ctx->sh_desc_enc_dma,
1829 				 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
1830 		dev_err(jrdev, "unable to map shared descriptor\n");
1831 		return -ENOMEM;
1832 	}
1833 #ifdef DEBUG
1834 	print_hex_dump(KERN_ERR,
1835 		       "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
1836 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1837 #endif
1838 
1839 	return 0;
1840 }
1841 
1842 /*
1843  * aead_edesc - s/w-extended aead descriptor
1844  * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
1845  * @src_nents: number of segments in input scatterlist
1846  * @dst_nents: number of segments in output scatterlist
1847  * @iv_dma: dma address of iv for checking continuity and link table
1848  * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
1849  * @sec4_sg_bytes: length of dma mapped sec4_sg space
1850  * @sec4_sg_dma: bus physical mapped address of h/w link table
1851  * @hw_desc: the h/w job descriptor followed by any referenced link tables
1852  */
1853 struct aead_edesc {
1854 	int assoc_nents;
1855 	int src_nents;
1856 	int dst_nents;
1857 	dma_addr_t iv_dma;
1858 	int sec4_sg_bytes;
1859 	dma_addr_t sec4_sg_dma;
1860 	struct sec4_sg_entry *sec4_sg;
1861 	u32 hw_desc[];
1862 };
1863 
1864 /*
1865  * ablkcipher_edesc - s/w-extended ablkcipher descriptor
1866  * @src_nents: number of segments in input scatterlist
1867  * @dst_nents: number of segments in output scatterlist
1868  * @iv_dma: dma address of iv for checking continuity and link table
1869  * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
1870  * @sec4_sg_bytes: length of dma mapped sec4_sg space
1871  * @sec4_sg_dma: bus physical mapped address of h/w link table
1872  * @hw_desc: the h/w job descriptor followed by any referenced link tables
1873  */
1874 struct ablkcipher_edesc {
1875 	int src_nents;
1876 	int dst_nents;
1877 	dma_addr_t iv_dma;
1878 	int sec4_sg_bytes;
1879 	dma_addr_t sec4_sg_dma;
1880 	struct sec4_sg_entry *sec4_sg;
1881 	u32 hw_desc[0];
1882 };
1883 
1884 static void caam_unmap(struct device *dev, struct scatterlist *src,
1885 		       struct scatterlist *dst, int src_nents,
1886 		       int dst_nents,
1887 		       dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
1888 		       int sec4_sg_bytes)
1889 {
1890 	if (dst != src) {
1891 		dma_unmap_sg(dev, src, src_nents ? : 1, DMA_TO_DEVICE);
1892 		dma_unmap_sg(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE);
1893 	} else {
1894 		dma_unmap_sg(dev, src, src_nents ? : 1, DMA_BIDIRECTIONAL);
1895 	}
1896 
1897 	if (iv_dma)
1898 		dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1899 	if (sec4_sg_bytes)
1900 		dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
1901 				 DMA_TO_DEVICE);
1902 }
1903 
1904 static void aead_unmap(struct device *dev,
1905 		       struct aead_edesc *edesc,
1906 		       struct aead_request *req)
1907 {
1908 	caam_unmap(dev, req->src, req->dst,
1909 		   edesc->src_nents, edesc->dst_nents, 0, 0,
1910 		   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
1911 }
1912 
1913 static void ablkcipher_unmap(struct device *dev,
1914 			     struct ablkcipher_edesc *edesc,
1915 			     struct ablkcipher_request *req)
1916 {
1917 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1918 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1919 
1920 	caam_unmap(dev, req->src, req->dst,
1921 		   edesc->src_nents, edesc->dst_nents,
1922 		   edesc->iv_dma, ivsize,
1923 		   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
1924 }
1925 
1926 static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
1927 				   void *context)
1928 {
1929 	struct aead_request *req = context;
1930 	struct aead_edesc *edesc;
1931 
1932 #ifdef DEBUG
1933 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1934 #endif
1935 
1936 	edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
1937 
1938 	if (err)
1939 		caam_jr_strstatus(jrdev, err);
1940 
1941 	aead_unmap(jrdev, edesc, req);
1942 
1943 	kfree(edesc);
1944 
1945 	aead_request_complete(req, err);
1946 }
1947 
1948 static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
1949 				   void *context)
1950 {
1951 	struct aead_request *req = context;
1952 	struct aead_edesc *edesc;
1953 
1954 #ifdef DEBUG
1955 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1956 #endif
1957 
1958 	edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
1959 
1960 	if (err)
1961 		caam_jr_strstatus(jrdev, err);
1962 
1963 	aead_unmap(jrdev, edesc, req);
1964 
1965 	/*
1966 	 * verify hw auth check passed else return -EBADMSG
1967 	 */
1968 	if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
1969 		err = -EBADMSG;
1970 
1971 	kfree(edesc);
1972 
1973 	aead_request_complete(req, err);
1974 }
1975 
1976 static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
1977 				   void *context)
1978 {
1979 	struct ablkcipher_request *req = context;
1980 	struct ablkcipher_edesc *edesc;
1981 #ifdef DEBUG
1982 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1983 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1984 
1985 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1986 #endif
1987 
1988 	edesc = (struct ablkcipher_edesc *)((char *)desc -
1989 		 offsetof(struct ablkcipher_edesc, hw_desc));
1990 
1991 	if (err)
1992 		caam_jr_strstatus(jrdev, err);
1993 
1994 #ifdef DEBUG
1995 	print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
1996 		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1997 		       edesc->src_nents > 1 ? 100 : ivsize, 1);
1998 	print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
1999 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2000 		       edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
2001 #endif
2002 
2003 	ablkcipher_unmap(jrdev, edesc, req);
2004 	kfree(edesc);
2005 
2006 	ablkcipher_request_complete(req, err);
2007 }
2008 
2009 static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
2010 				    void *context)
2011 {
2012 	struct ablkcipher_request *req = context;
2013 	struct ablkcipher_edesc *edesc;
2014 #ifdef DEBUG
2015 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2016 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2017 
2018 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
2019 #endif
2020 
2021 	edesc = (struct ablkcipher_edesc *)((char *)desc -
2022 		 offsetof(struct ablkcipher_edesc, hw_desc));
2023 	if (err)
2024 		caam_jr_strstatus(jrdev, err);
2025 
2026 #ifdef DEBUG
2027 	print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
2028 		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2029 		       ivsize, 1);
2030 	print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
2031 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2032 		       edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
2033 #endif
2034 
2035 	ablkcipher_unmap(jrdev, edesc, req);
2036 	kfree(edesc);
2037 
2038 	ablkcipher_request_complete(req, err);
2039 }
2040 
2041 /*
2042  * Fill in aead job descriptor
2043  */
2044 static void init_aead_job(struct aead_request *req,
2045 			  struct aead_edesc *edesc,
2046 			  bool all_contig, bool encrypt)
2047 {
2048 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2049 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
2050 	int authsize = ctx->authsize;
2051 	u32 *desc = edesc->hw_desc;
2052 	u32 out_options, in_options;
2053 	dma_addr_t dst_dma, src_dma;
2054 	int len, sec4_sg_index = 0;
2055 	dma_addr_t ptr;
2056 	u32 *sh_desc;
2057 
2058 	sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
2059 	ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
2060 
2061 	len = desc_len(sh_desc);
2062 	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2063 
2064 	if (all_contig) {
2065 		src_dma = sg_dma_address(req->src);
2066 		in_options = 0;
2067 	} else {
2068 		src_dma = edesc->sec4_sg_dma;
2069 		sec4_sg_index += edesc->src_nents;
2070 		in_options = LDST_SGF;
2071 	}
2072 
2073 	append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen,
2074 			  in_options);
2075 
2076 	dst_dma = src_dma;
2077 	out_options = in_options;
2078 
2079 	if (unlikely(req->src != req->dst)) {
2080 		if (!edesc->dst_nents) {
2081 			dst_dma = sg_dma_address(req->dst);
2082 		} else {
2083 			dst_dma = edesc->sec4_sg_dma +
2084 				  sec4_sg_index *
2085 				  sizeof(struct sec4_sg_entry);
2086 			out_options = LDST_SGF;
2087 		}
2088 	}
2089 
2090 	if (encrypt)
2091 		append_seq_out_ptr(desc, dst_dma,
2092 				   req->assoclen + req->cryptlen + authsize,
2093 				   out_options);
2094 	else
2095 		append_seq_out_ptr(desc, dst_dma,
2096 				   req->assoclen + req->cryptlen - authsize,
2097 				   out_options);
2098 
2099 	/* REG3 = assoclen */
2100 	append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
2101 }
2102 
2103 static void init_gcm_job(struct aead_request *req,
2104 			 struct aead_edesc *edesc,
2105 			 bool all_contig, bool encrypt)
2106 {
2107 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2108 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
2109 	unsigned int ivsize = crypto_aead_ivsize(aead);
2110 	u32 *desc = edesc->hw_desc;
2111 	bool generic_gcm = (ivsize == 12);
2112 	unsigned int last;
2113 
2114 	init_aead_job(req, edesc, all_contig, encrypt);
2115 
2116 	/* BUG This should not be specific to generic GCM. */
2117 	last = 0;
2118 	if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen))
2119 		last = FIFOLD_TYPE_LAST1;
2120 
2121 	/* Read GCM IV */
2122 	append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
2123 			 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
2124 	/* Append Salt */
2125 	if (!generic_gcm)
2126 		append_data(desc, ctx->key + ctx->enckeylen, 4);
2127 	/* Append IV */
2128 	append_data(desc, req->iv, ivsize);
2129 	/* End of blank commands */
2130 }
2131 
2132 static void init_authenc_job(struct aead_request *req,
2133 			     struct aead_edesc *edesc,
2134 			     bool all_contig, bool encrypt)
2135 {
2136 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2137 	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
2138 						 struct caam_aead_alg, aead);
2139 	unsigned int ivsize = crypto_aead_ivsize(aead);
2140 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
2141 	const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
2142 			       OP_ALG_AAI_CTR_MOD128);
2143 	const bool is_rfc3686 = alg->caam.rfc3686;
2144 	u32 *desc = edesc->hw_desc;
2145 	u32 ivoffset = 0;
2146 
2147 	/*
2148 	 * AES-CTR needs to load IV in CONTEXT1 reg
2149 	 * at an offset of 128bits (16bytes)
2150 	 * CONTEXT1[255:128] = IV
2151 	 */
2152 	if (ctr_mode)
2153 		ivoffset = 16;
2154 
2155 	/*
2156 	 * RFC3686 specific:
2157 	 *	CONTEXT1[255:128] = {NONCE, IV, COUNTER}
2158 	 */
2159 	if (is_rfc3686)
2160 		ivoffset = 16 + CTR_RFC3686_NONCE_SIZE;
2161 
2162 	init_aead_job(req, edesc, all_contig, encrypt);
2163 
2164 	if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
2165 		append_load_as_imm(desc, req->iv, ivsize,
2166 				   LDST_CLASS_1_CCB |
2167 				   LDST_SRCDST_BYTE_CONTEXT |
2168 				   (ivoffset << LDST_OFFSET_SHIFT));
2169 }
2170 
2171 /*
2172  * Fill in ablkcipher job descriptor
2173  */
2174 static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
2175 				struct ablkcipher_edesc *edesc,
2176 				struct ablkcipher_request *req,
2177 				bool iv_contig)
2178 {
2179 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2180 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2181 	u32 *desc = edesc->hw_desc;
2182 	u32 out_options = 0, in_options;
2183 	dma_addr_t dst_dma, src_dma;
2184 	int len, sec4_sg_index = 0;
2185 
2186 #ifdef DEBUG
2187 	print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
2188 		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2189 		       ivsize, 1);
2190 	print_hex_dump(KERN_ERR, "src    @"__stringify(__LINE__)": ",
2191 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2192 		       edesc->src_nents ? 100 : req->nbytes, 1);
2193 #endif
2194 
2195 	len = desc_len(sh_desc);
2196 	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2197 
2198 	if (iv_contig) {
2199 		src_dma = edesc->iv_dma;
2200 		in_options = 0;
2201 	} else {
2202 		src_dma = edesc->sec4_sg_dma;
2203 		sec4_sg_index += edesc->src_nents + 1;
2204 		in_options = LDST_SGF;
2205 	}
2206 	append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
2207 
2208 	if (likely(req->src == req->dst)) {
2209 		if (!edesc->src_nents && iv_contig) {
2210 			dst_dma = sg_dma_address(req->src);
2211 		} else {
2212 			dst_dma = edesc->sec4_sg_dma +
2213 				sizeof(struct sec4_sg_entry);
2214 			out_options = LDST_SGF;
2215 		}
2216 	} else {
2217 		if (!edesc->dst_nents) {
2218 			dst_dma = sg_dma_address(req->dst);
2219 		} else {
2220 			dst_dma = edesc->sec4_sg_dma +
2221 				sec4_sg_index * sizeof(struct sec4_sg_entry);
2222 			out_options = LDST_SGF;
2223 		}
2224 	}
2225 	append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
2226 }
2227 
2228 /*
2229  * Fill in ablkcipher givencrypt job descriptor
2230  */
2231 static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
2232 				    struct ablkcipher_edesc *edesc,
2233 				    struct ablkcipher_request *req,
2234 				    bool iv_contig)
2235 {
2236 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2237 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2238 	u32 *desc = edesc->hw_desc;
2239 	u32 out_options, in_options;
2240 	dma_addr_t dst_dma, src_dma;
2241 	int len, sec4_sg_index = 0;
2242 
2243 #ifdef DEBUG
2244 	print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
2245 		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2246 		       ivsize, 1);
2247 	print_hex_dump(KERN_ERR, "src    @" __stringify(__LINE__) ": ",
2248 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2249 		       edesc->src_nents ? 100 : req->nbytes, 1);
2250 #endif
2251 
2252 	len = desc_len(sh_desc);
2253 	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2254 
2255 	if (!edesc->src_nents) {
2256 		src_dma = sg_dma_address(req->src);
2257 		in_options = 0;
2258 	} else {
2259 		src_dma = edesc->sec4_sg_dma;
2260 		sec4_sg_index += edesc->src_nents;
2261 		in_options = LDST_SGF;
2262 	}
2263 	append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
2264 
2265 	if (iv_contig) {
2266 		dst_dma = edesc->iv_dma;
2267 		out_options = 0;
2268 	} else {
2269 		dst_dma = edesc->sec4_sg_dma +
2270 			  sec4_sg_index * sizeof(struct sec4_sg_entry);
2271 		out_options = LDST_SGF;
2272 	}
2273 	append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
2274 }
2275 
2276 /*
2277  * allocate and map the aead extended descriptor
2278  */
2279 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
2280 					   int desc_bytes, bool *all_contig_ptr,
2281 					   bool encrypt)
2282 {
2283 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2284 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
2285 	struct device *jrdev = ctx->jrdev;
2286 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2287 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2288 	int src_nents, dst_nents = 0;
2289 	struct aead_edesc *edesc;
2290 	int sgc;
2291 	bool all_contig = true;
2292 	int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
2293 	unsigned int authsize = ctx->authsize;
2294 
2295 	if (unlikely(req->dst != req->src)) {
2296 		src_nents = sg_count(req->src, req->assoclen + req->cryptlen);
2297 		dst_nents = sg_count(req->dst,
2298 				     req->assoclen + req->cryptlen +
2299 					(encrypt ? authsize : (-authsize)));
2300 	} else {
2301 		src_nents = sg_count(req->src,
2302 				     req->assoclen + req->cryptlen +
2303 					(encrypt ? authsize : 0));
2304 	}
2305 
2306 	/* Check if data are contiguous. */
2307 	all_contig = !src_nents;
2308 	if (!all_contig) {
2309 		src_nents = src_nents ? : 1;
2310 		sec4_sg_len = src_nents;
2311 	}
2312 
2313 	sec4_sg_len += dst_nents;
2314 
2315 	sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
2316 
2317 	/* allocate space for base edesc and hw desc commands, link tables */
2318 	edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
2319 			GFP_DMA | flags);
2320 	if (!edesc) {
2321 		dev_err(jrdev, "could not allocate extended descriptor\n");
2322 		return ERR_PTR(-ENOMEM);
2323 	}
2324 
2325 	if (likely(req->src == req->dst)) {
2326 		sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2327 				 DMA_BIDIRECTIONAL);
2328 		if (unlikely(!sgc)) {
2329 			dev_err(jrdev, "unable to map source\n");
2330 			kfree(edesc);
2331 			return ERR_PTR(-ENOMEM);
2332 		}
2333 	} else {
2334 		sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2335 				 DMA_TO_DEVICE);
2336 		if (unlikely(!sgc)) {
2337 			dev_err(jrdev, "unable to map source\n");
2338 			kfree(edesc);
2339 			return ERR_PTR(-ENOMEM);
2340 		}
2341 
2342 		sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
2343 				 DMA_FROM_DEVICE);
2344 		if (unlikely(!sgc)) {
2345 			dev_err(jrdev, "unable to map destination\n");
2346 			dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
2347 				     DMA_TO_DEVICE);
2348 			kfree(edesc);
2349 			return ERR_PTR(-ENOMEM);
2350 		}
2351 	}
2352 
2353 	edesc->src_nents = src_nents;
2354 	edesc->dst_nents = dst_nents;
2355 	edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
2356 			 desc_bytes;
2357 	*all_contig_ptr = all_contig;
2358 
2359 	sec4_sg_index = 0;
2360 	if (!all_contig) {
2361 		sg_to_sec4_sg_last(req->src, src_nents,
2362 			      edesc->sec4_sg + sec4_sg_index, 0);
2363 		sec4_sg_index += src_nents;
2364 	}
2365 	if (dst_nents) {
2366 		sg_to_sec4_sg_last(req->dst, dst_nents,
2367 				   edesc->sec4_sg + sec4_sg_index, 0);
2368 	}
2369 
2370 	if (!sec4_sg_bytes)
2371 		return edesc;
2372 
2373 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2374 					    sec4_sg_bytes, DMA_TO_DEVICE);
2375 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2376 		dev_err(jrdev, "unable to map S/G table\n");
2377 		aead_unmap(jrdev, edesc, req);
2378 		kfree(edesc);
2379 		return ERR_PTR(-ENOMEM);
2380 	}
2381 
2382 	edesc->sec4_sg_bytes = sec4_sg_bytes;
2383 
2384 	return edesc;
2385 }
2386 
2387 static int gcm_encrypt(struct aead_request *req)
2388 {
2389 	struct aead_edesc *edesc;
2390 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2391 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
2392 	struct device *jrdev = ctx->jrdev;
2393 	bool all_contig;
2394 	u32 *desc;
2395 	int ret = 0;
2396 
2397 	/* allocate extended descriptor */
2398 	edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, true);
2399 	if (IS_ERR(edesc))
2400 		return PTR_ERR(edesc);
2401 
2402 	/* Create and submit job descriptor */
2403 	init_gcm_job(req, edesc, all_contig, true);
2404 #ifdef DEBUG
2405 	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2406 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2407 		       desc_bytes(edesc->hw_desc), 1);
2408 #endif
2409 
2410 	desc = edesc->hw_desc;
2411 	ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
2412 	if (!ret) {
2413 		ret = -EINPROGRESS;
2414 	} else {
2415 		aead_unmap(jrdev, edesc, req);
2416 		kfree(edesc);
2417 	}
2418 
2419 	return ret;
2420 }
2421 
2422 static int ipsec_gcm_encrypt(struct aead_request *req)
2423 {
2424 	if (req->assoclen < 8)
2425 		return -EINVAL;
2426 
2427 	return gcm_encrypt(req);
2428 }
2429 
2430 static int aead_encrypt(struct aead_request *req)
2431 {
2432 	struct aead_edesc *edesc;
2433 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2434 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
2435 	struct device *jrdev = ctx->jrdev;
2436 	bool all_contig;
2437 	u32 *desc;
2438 	int ret = 0;
2439 
2440 	/* allocate extended descriptor */
2441 	edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
2442 				 &all_contig, true);
2443 	if (IS_ERR(edesc))
2444 		return PTR_ERR(edesc);
2445 
2446 	/* Create and submit job descriptor */
2447 	init_authenc_job(req, edesc, all_contig, true);
2448 #ifdef DEBUG
2449 	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2450 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2451 		       desc_bytes(edesc->hw_desc), 1);
2452 #endif
2453 
2454 	desc = edesc->hw_desc;
2455 	ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
2456 	if (!ret) {
2457 		ret = -EINPROGRESS;
2458 	} else {
2459 		aead_unmap(jrdev, edesc, req);
2460 		kfree(edesc);
2461 	}
2462 
2463 	return ret;
2464 }
2465 
2466 static int gcm_decrypt(struct aead_request *req)
2467 {
2468 	struct aead_edesc *edesc;
2469 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2470 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
2471 	struct device *jrdev = ctx->jrdev;
2472 	bool all_contig;
2473 	u32 *desc;
2474 	int ret = 0;
2475 
2476 	/* allocate extended descriptor */
2477 	edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, false);
2478 	if (IS_ERR(edesc))
2479 		return PTR_ERR(edesc);
2480 
2481 	/* Create and submit job descriptor*/
2482 	init_gcm_job(req, edesc, all_contig, false);
2483 #ifdef DEBUG
2484 	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2485 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2486 		       desc_bytes(edesc->hw_desc), 1);
2487 #endif
2488 
2489 	desc = edesc->hw_desc;
2490 	ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
2491 	if (!ret) {
2492 		ret = -EINPROGRESS;
2493 	} else {
2494 		aead_unmap(jrdev, edesc, req);
2495 		kfree(edesc);
2496 	}
2497 
2498 	return ret;
2499 }
2500 
2501 static int ipsec_gcm_decrypt(struct aead_request *req)
2502 {
2503 	if (req->assoclen < 8)
2504 		return -EINVAL;
2505 
2506 	return gcm_decrypt(req);
2507 }
2508 
2509 static int aead_decrypt(struct aead_request *req)
2510 {
2511 	struct aead_edesc *edesc;
2512 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2513 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
2514 	struct device *jrdev = ctx->jrdev;
2515 	bool all_contig;
2516 	u32 *desc;
2517 	int ret = 0;
2518 
2519 	/* allocate extended descriptor */
2520 	edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
2521 				 &all_contig, false);
2522 	if (IS_ERR(edesc))
2523 		return PTR_ERR(edesc);
2524 
2525 #ifdef DEBUG
2526 	print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
2527 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2528 		       req->assoclen + req->cryptlen, 1);
2529 #endif
2530 
2531 	/* Create and submit job descriptor*/
2532 	init_authenc_job(req, edesc, all_contig, false);
2533 #ifdef DEBUG
2534 	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2535 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2536 		       desc_bytes(edesc->hw_desc), 1);
2537 #endif
2538 
2539 	desc = edesc->hw_desc;
2540 	ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
2541 	if (!ret) {
2542 		ret = -EINPROGRESS;
2543 	} else {
2544 		aead_unmap(jrdev, edesc, req);
2545 		kfree(edesc);
2546 	}
2547 
2548 	return ret;
2549 }
2550 
2551 /*
2552  * allocate and map the ablkcipher extended descriptor for ablkcipher
2553  */
2554 static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
2555 						       *req, int desc_bytes,
2556 						       bool *iv_contig_out)
2557 {
2558 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2559 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2560 	struct device *jrdev = ctx->jrdev;
2561 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2562 					  CRYPTO_TFM_REQ_MAY_SLEEP)) ?
2563 		       GFP_KERNEL : GFP_ATOMIC;
2564 	int src_nents, dst_nents = 0, sec4_sg_bytes;
2565 	struct ablkcipher_edesc *edesc;
2566 	dma_addr_t iv_dma = 0;
2567 	bool iv_contig = false;
2568 	int sgc;
2569 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2570 	int sec4_sg_index;
2571 
2572 	src_nents = sg_count(req->src, req->nbytes);
2573 
2574 	if (req->dst != req->src)
2575 		dst_nents = sg_count(req->dst, req->nbytes);
2576 
2577 	if (likely(req->src == req->dst)) {
2578 		sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2579 				 DMA_BIDIRECTIONAL);
2580 	} else {
2581 		sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2582 				 DMA_TO_DEVICE);
2583 		sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
2584 				 DMA_FROM_DEVICE);
2585 	}
2586 
2587 	iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
2588 	if (dma_mapping_error(jrdev, iv_dma)) {
2589 		dev_err(jrdev, "unable to map IV\n");
2590 		return ERR_PTR(-ENOMEM);
2591 	}
2592 
2593 	/*
2594 	 * Check if iv can be contiguous with source and destination.
2595 	 * If so, include it. If not, create scatterlist.
2596 	 */
2597 	if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
2598 		iv_contig = true;
2599 	else
2600 		src_nents = src_nents ? : 1;
2601 	sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
2602 			sizeof(struct sec4_sg_entry);
2603 
2604 	/* allocate space for base edesc and hw desc commands, link tables */
2605 	edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
2606 			GFP_DMA | flags);
2607 	if (!edesc) {
2608 		dev_err(jrdev, "could not allocate extended descriptor\n");
2609 		return ERR_PTR(-ENOMEM);
2610 	}
2611 
2612 	edesc->src_nents = src_nents;
2613 	edesc->dst_nents = dst_nents;
2614 	edesc->sec4_sg_bytes = sec4_sg_bytes;
2615 	edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
2616 			 desc_bytes;
2617 
2618 	sec4_sg_index = 0;
2619 	if (!iv_contig) {
2620 		dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
2621 		sg_to_sec4_sg_last(req->src, src_nents,
2622 				   edesc->sec4_sg + 1, 0);
2623 		sec4_sg_index += 1 + src_nents;
2624 	}
2625 
2626 	if (dst_nents) {
2627 		sg_to_sec4_sg_last(req->dst, dst_nents,
2628 			edesc->sec4_sg + sec4_sg_index, 0);
2629 	}
2630 
2631 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2632 					    sec4_sg_bytes, DMA_TO_DEVICE);
2633 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2634 		dev_err(jrdev, "unable to map S/G table\n");
2635 		return ERR_PTR(-ENOMEM);
2636 	}
2637 
2638 	edesc->iv_dma = iv_dma;
2639 
2640 #ifdef DEBUG
2641 	print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
2642 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
2643 		       sec4_sg_bytes, 1);
2644 #endif
2645 
2646 	*iv_contig_out = iv_contig;
2647 	return edesc;
2648 }
2649 
2650 static int ablkcipher_encrypt(struct ablkcipher_request *req)
2651 {
2652 	struct ablkcipher_edesc *edesc;
2653 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2654 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2655 	struct device *jrdev = ctx->jrdev;
2656 	bool iv_contig;
2657 	u32 *desc;
2658 	int ret = 0;
2659 
2660 	/* allocate extended descriptor */
2661 	edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
2662 				       CAAM_CMD_SZ, &iv_contig);
2663 	if (IS_ERR(edesc))
2664 		return PTR_ERR(edesc);
2665 
2666 	/* Create and submit job descriptor*/
2667 	init_ablkcipher_job(ctx->sh_desc_enc,
2668 		ctx->sh_desc_enc_dma, edesc, req, iv_contig);
2669 #ifdef DEBUG
2670 	print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
2671 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2672 		       desc_bytes(edesc->hw_desc), 1);
2673 #endif
2674 	desc = edesc->hw_desc;
2675 	ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
2676 
2677 	if (!ret) {
2678 		ret = -EINPROGRESS;
2679 	} else {
2680 		ablkcipher_unmap(jrdev, edesc, req);
2681 		kfree(edesc);
2682 	}
2683 
2684 	return ret;
2685 }
2686 
2687 static int ablkcipher_decrypt(struct ablkcipher_request *req)
2688 {
2689 	struct ablkcipher_edesc *edesc;
2690 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2691 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2692 	struct device *jrdev = ctx->jrdev;
2693 	bool iv_contig;
2694 	u32 *desc;
2695 	int ret = 0;
2696 
2697 	/* allocate extended descriptor */
2698 	edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
2699 				       CAAM_CMD_SZ, &iv_contig);
2700 	if (IS_ERR(edesc))
2701 		return PTR_ERR(edesc);
2702 
2703 	/* Create and submit job descriptor*/
2704 	init_ablkcipher_job(ctx->sh_desc_dec,
2705 		ctx->sh_desc_dec_dma, edesc, req, iv_contig);
2706 	desc = edesc->hw_desc;
2707 #ifdef DEBUG
2708 	print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
2709 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2710 		       desc_bytes(edesc->hw_desc), 1);
2711 #endif
2712 
2713 	ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
2714 	if (!ret) {
2715 		ret = -EINPROGRESS;
2716 	} else {
2717 		ablkcipher_unmap(jrdev, edesc, req);
2718 		kfree(edesc);
2719 	}
2720 
2721 	return ret;
2722 }
2723 
2724 /*
2725  * allocate and map the ablkcipher extended descriptor
2726  * for ablkcipher givencrypt
2727  */
2728 static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
2729 				struct skcipher_givcrypt_request *greq,
2730 				int desc_bytes,
2731 				bool *iv_contig_out)
2732 {
2733 	struct ablkcipher_request *req = &greq->creq;
2734 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2735 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2736 	struct device *jrdev = ctx->jrdev;
2737 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2738 					  CRYPTO_TFM_REQ_MAY_SLEEP)) ?
2739 		       GFP_KERNEL : GFP_ATOMIC;
2740 	int src_nents, dst_nents = 0, sec4_sg_bytes;
2741 	struct ablkcipher_edesc *edesc;
2742 	dma_addr_t iv_dma = 0;
2743 	bool iv_contig = false;
2744 	int sgc;
2745 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2746 	int sec4_sg_index;
2747 
2748 	src_nents = sg_count(req->src, req->nbytes);
2749 
2750 	if (unlikely(req->dst != req->src))
2751 		dst_nents = sg_count(req->dst, req->nbytes);
2752 
2753 	if (likely(req->src == req->dst)) {
2754 		sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2755 				 DMA_BIDIRECTIONAL);
2756 	} else {
2757 		sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2758 				 DMA_TO_DEVICE);
2759 		sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
2760 				 DMA_FROM_DEVICE);
2761 	}
2762 
2763 	/*
2764 	 * Check if iv can be contiguous with source and destination.
2765 	 * If so, include it. If not, create scatterlist.
2766 	 */
2767 	iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
2768 	if (dma_mapping_error(jrdev, iv_dma)) {
2769 		dev_err(jrdev, "unable to map IV\n");
2770 		return ERR_PTR(-ENOMEM);
2771 	}
2772 
2773 	if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst))
2774 		iv_contig = true;
2775 	else
2776 		dst_nents = dst_nents ? : 1;
2777 	sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
2778 			sizeof(struct sec4_sg_entry);
2779 
2780 	/* allocate space for base edesc and hw desc commands, link tables */
2781 	edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
2782 			GFP_DMA | flags);
2783 	if (!edesc) {
2784 		dev_err(jrdev, "could not allocate extended descriptor\n");
2785 		return ERR_PTR(-ENOMEM);
2786 	}
2787 
2788 	edesc->src_nents = src_nents;
2789 	edesc->dst_nents = dst_nents;
2790 	edesc->sec4_sg_bytes = sec4_sg_bytes;
2791 	edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
2792 			 desc_bytes;
2793 
2794 	sec4_sg_index = 0;
2795 	if (src_nents) {
2796 		sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
2797 		sec4_sg_index += src_nents;
2798 	}
2799 
2800 	if (!iv_contig) {
2801 		dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
2802 				   iv_dma, ivsize, 0);
2803 		sec4_sg_index += 1;
2804 		sg_to_sec4_sg_last(req->dst, dst_nents,
2805 				   edesc->sec4_sg + sec4_sg_index, 0);
2806 	}
2807 
2808 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2809 					    sec4_sg_bytes, DMA_TO_DEVICE);
2810 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2811 		dev_err(jrdev, "unable to map S/G table\n");
2812 		return ERR_PTR(-ENOMEM);
2813 	}
2814 	edesc->iv_dma = iv_dma;
2815 
2816 #ifdef DEBUG
2817 	print_hex_dump(KERN_ERR,
2818 		       "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
2819 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
2820 		       sec4_sg_bytes, 1);
2821 #endif
2822 
2823 	*iv_contig_out = iv_contig;
2824 	return edesc;
2825 }
2826 
2827 static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
2828 {
2829 	struct ablkcipher_request *req = &creq->creq;
2830 	struct ablkcipher_edesc *edesc;
2831 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2832 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2833 	struct device *jrdev = ctx->jrdev;
2834 	bool iv_contig;
2835 	u32 *desc;
2836 	int ret = 0;
2837 
2838 	/* allocate extended descriptor */
2839 	edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN *
2840 				       CAAM_CMD_SZ, &iv_contig);
2841 	if (IS_ERR(edesc))
2842 		return PTR_ERR(edesc);
2843 
2844 	/* Create and submit job descriptor*/
2845 	init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
2846 				edesc, req, iv_contig);
2847 #ifdef DEBUG
2848 	print_hex_dump(KERN_ERR,
2849 		       "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
2850 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2851 		       desc_bytes(edesc->hw_desc), 1);
2852 #endif
2853 	desc = edesc->hw_desc;
2854 	ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
2855 
2856 	if (!ret) {
2857 		ret = -EINPROGRESS;
2858 	} else {
2859 		ablkcipher_unmap(jrdev, edesc, req);
2860 		kfree(edesc);
2861 	}
2862 
2863 	return ret;
2864 }
2865 
2866 #define template_aead		template_u.aead
2867 #define template_ablkcipher	template_u.ablkcipher
2868 struct caam_alg_template {
2869 	char name[CRYPTO_MAX_ALG_NAME];
2870 	char driver_name[CRYPTO_MAX_ALG_NAME];
2871 	unsigned int blocksize;
2872 	u32 type;
2873 	union {
2874 		struct ablkcipher_alg ablkcipher;
2875 	} template_u;
2876 	u32 class1_alg_type;
2877 	u32 class2_alg_type;
2878 	u32 alg_op;
2879 };
2880 
2881 static struct caam_alg_template driver_algs[] = {
2882 	/* ablkcipher descriptor */
2883 	{
2884 		.name = "cbc(aes)",
2885 		.driver_name = "cbc-aes-caam",
2886 		.blocksize = AES_BLOCK_SIZE,
2887 		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
2888 		.template_ablkcipher = {
2889 			.setkey = ablkcipher_setkey,
2890 			.encrypt = ablkcipher_encrypt,
2891 			.decrypt = ablkcipher_decrypt,
2892 			.givencrypt = ablkcipher_givencrypt,
2893 			.geniv = "<built-in>",
2894 			.min_keysize = AES_MIN_KEY_SIZE,
2895 			.max_keysize = AES_MAX_KEY_SIZE,
2896 			.ivsize = AES_BLOCK_SIZE,
2897 			},
2898 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2899 	},
2900 	{
2901 		.name = "cbc(des3_ede)",
2902 		.driver_name = "cbc-3des-caam",
2903 		.blocksize = DES3_EDE_BLOCK_SIZE,
2904 		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
2905 		.template_ablkcipher = {
2906 			.setkey = ablkcipher_setkey,
2907 			.encrypt = ablkcipher_encrypt,
2908 			.decrypt = ablkcipher_decrypt,
2909 			.givencrypt = ablkcipher_givencrypt,
2910 			.geniv = "<built-in>",
2911 			.min_keysize = DES3_EDE_KEY_SIZE,
2912 			.max_keysize = DES3_EDE_KEY_SIZE,
2913 			.ivsize = DES3_EDE_BLOCK_SIZE,
2914 			},
2915 		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2916 	},
2917 	{
2918 		.name = "cbc(des)",
2919 		.driver_name = "cbc-des-caam",
2920 		.blocksize = DES_BLOCK_SIZE,
2921 		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
2922 		.template_ablkcipher = {
2923 			.setkey = ablkcipher_setkey,
2924 			.encrypt = ablkcipher_encrypt,
2925 			.decrypt = ablkcipher_decrypt,
2926 			.givencrypt = ablkcipher_givencrypt,
2927 			.geniv = "<built-in>",
2928 			.min_keysize = DES_KEY_SIZE,
2929 			.max_keysize = DES_KEY_SIZE,
2930 			.ivsize = DES_BLOCK_SIZE,
2931 			},
2932 		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2933 	},
2934 	{
2935 		.name = "ctr(aes)",
2936 		.driver_name = "ctr-aes-caam",
2937 		.blocksize = 1,
2938 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2939 		.template_ablkcipher = {
2940 			.setkey = ablkcipher_setkey,
2941 			.encrypt = ablkcipher_encrypt,
2942 			.decrypt = ablkcipher_decrypt,
2943 			.geniv = "chainiv",
2944 			.min_keysize = AES_MIN_KEY_SIZE,
2945 			.max_keysize = AES_MAX_KEY_SIZE,
2946 			.ivsize = AES_BLOCK_SIZE,
2947 			},
2948 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
2949 	},
2950 	{
2951 		.name = "rfc3686(ctr(aes))",
2952 		.driver_name = "rfc3686-ctr-aes-caam",
2953 		.blocksize = 1,
2954 		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
2955 		.template_ablkcipher = {
2956 			.setkey = ablkcipher_setkey,
2957 			.encrypt = ablkcipher_encrypt,
2958 			.decrypt = ablkcipher_decrypt,
2959 			.givencrypt = ablkcipher_givencrypt,
2960 			.geniv = "<built-in>",
2961 			.min_keysize = AES_MIN_KEY_SIZE +
2962 				       CTR_RFC3686_NONCE_SIZE,
2963 			.max_keysize = AES_MAX_KEY_SIZE +
2964 				       CTR_RFC3686_NONCE_SIZE,
2965 			.ivsize = CTR_RFC3686_IV_SIZE,
2966 			},
2967 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
2968 	},
2969 	{
2970 		.name = "xts(aes)",
2971 		.driver_name = "xts-aes-caam",
2972 		.blocksize = AES_BLOCK_SIZE,
2973 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2974 		.template_ablkcipher = {
2975 			.setkey = xts_ablkcipher_setkey,
2976 			.encrypt = ablkcipher_encrypt,
2977 			.decrypt = ablkcipher_decrypt,
2978 			.geniv = "eseqiv",
2979 			.min_keysize = 2 * AES_MIN_KEY_SIZE,
2980 			.max_keysize = 2 * AES_MAX_KEY_SIZE,
2981 			.ivsize = AES_BLOCK_SIZE,
2982 			},
2983 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
2984 	},
2985 };
2986 
2987 static struct caam_aead_alg driver_aeads[] = {
2988 	{
2989 		.aead = {
2990 			.base = {
2991 				.cra_name = "rfc4106(gcm(aes))",
2992 				.cra_driver_name = "rfc4106-gcm-aes-caam",
2993 				.cra_blocksize = 1,
2994 			},
2995 			.setkey = rfc4106_setkey,
2996 			.setauthsize = rfc4106_setauthsize,
2997 			.encrypt = ipsec_gcm_encrypt,
2998 			.decrypt = ipsec_gcm_decrypt,
2999 			.ivsize = 8,
3000 			.maxauthsize = AES_BLOCK_SIZE,
3001 		},
3002 		.caam = {
3003 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3004 		},
3005 	},
3006 	{
3007 		.aead = {
3008 			.base = {
3009 				.cra_name = "rfc4543(gcm(aes))",
3010 				.cra_driver_name = "rfc4543-gcm-aes-caam",
3011 				.cra_blocksize = 1,
3012 			},
3013 			.setkey = rfc4543_setkey,
3014 			.setauthsize = rfc4543_setauthsize,
3015 			.encrypt = ipsec_gcm_encrypt,
3016 			.decrypt = ipsec_gcm_decrypt,
3017 			.ivsize = 8,
3018 			.maxauthsize = AES_BLOCK_SIZE,
3019 		},
3020 		.caam = {
3021 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3022 		},
3023 	},
3024 	/* Galois Counter Mode */
3025 	{
3026 		.aead = {
3027 			.base = {
3028 				.cra_name = "gcm(aes)",
3029 				.cra_driver_name = "gcm-aes-caam",
3030 				.cra_blocksize = 1,
3031 			},
3032 			.setkey = gcm_setkey,
3033 			.setauthsize = gcm_setauthsize,
3034 			.encrypt = gcm_encrypt,
3035 			.decrypt = gcm_decrypt,
3036 			.ivsize = 12,
3037 			.maxauthsize = AES_BLOCK_SIZE,
3038 		},
3039 		.caam = {
3040 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3041 		},
3042 	},
3043 	/* single-pass ipsec_esp descriptor */
3044 	{
3045 		.aead = {
3046 			.base = {
3047 				.cra_name = "authenc(hmac(md5),"
3048 					    "ecb(cipher_null))",
3049 				.cra_driver_name = "authenc-hmac-md5-"
3050 						   "ecb-cipher_null-caam",
3051 				.cra_blocksize = NULL_BLOCK_SIZE,
3052 			},
3053 			.setkey = aead_setkey,
3054 			.setauthsize = aead_setauthsize,
3055 			.encrypt = aead_encrypt,
3056 			.decrypt = aead_decrypt,
3057 			.ivsize = NULL_IV_SIZE,
3058 			.maxauthsize = MD5_DIGEST_SIZE,
3059 		},
3060 		.caam = {
3061 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
3062 					   OP_ALG_AAI_HMAC_PRECOMP,
3063 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3064 		},
3065 	},
3066 	{
3067 		.aead = {
3068 			.base = {
3069 				.cra_name = "authenc(hmac(sha1),"
3070 					    "ecb(cipher_null))",
3071 				.cra_driver_name = "authenc-hmac-sha1-"
3072 						   "ecb-cipher_null-caam",
3073 				.cra_blocksize = NULL_BLOCK_SIZE,
3074 			},
3075 			.setkey = aead_setkey,
3076 			.setauthsize = aead_setauthsize,
3077 			.encrypt = aead_encrypt,
3078 			.decrypt = aead_decrypt,
3079 			.ivsize = NULL_IV_SIZE,
3080 			.maxauthsize = SHA1_DIGEST_SIZE,
3081 		},
3082 		.caam = {
3083 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3084 					   OP_ALG_AAI_HMAC_PRECOMP,
3085 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3086 		},
3087 	},
3088 	{
3089 		.aead = {
3090 			.base = {
3091 				.cra_name = "authenc(hmac(sha224),"
3092 					    "ecb(cipher_null))",
3093 				.cra_driver_name = "authenc-hmac-sha224-"
3094 						   "ecb-cipher_null-caam",
3095 				.cra_blocksize = NULL_BLOCK_SIZE,
3096 			},
3097 			.setkey = aead_setkey,
3098 			.setauthsize = aead_setauthsize,
3099 			.encrypt = aead_encrypt,
3100 			.decrypt = aead_decrypt,
3101 			.ivsize = NULL_IV_SIZE,
3102 			.maxauthsize = SHA224_DIGEST_SIZE,
3103 		},
3104 		.caam = {
3105 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3106 					   OP_ALG_AAI_HMAC_PRECOMP,
3107 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3108 		},
3109 	},
3110 	{
3111 		.aead = {
3112 			.base = {
3113 				.cra_name = "authenc(hmac(sha256),"
3114 					    "ecb(cipher_null))",
3115 				.cra_driver_name = "authenc-hmac-sha256-"
3116 						   "ecb-cipher_null-caam",
3117 				.cra_blocksize = NULL_BLOCK_SIZE,
3118 			},
3119 			.setkey = aead_setkey,
3120 			.setauthsize = aead_setauthsize,
3121 			.encrypt = aead_encrypt,
3122 			.decrypt = aead_decrypt,
3123 			.ivsize = NULL_IV_SIZE,
3124 			.maxauthsize = SHA256_DIGEST_SIZE,
3125 		},
3126 		.caam = {
3127 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3128 					   OP_ALG_AAI_HMAC_PRECOMP,
3129 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3130 		},
3131 	},
3132 	{
3133 		.aead = {
3134 			.base = {
3135 				.cra_name = "authenc(hmac(sha384),"
3136 					    "ecb(cipher_null))",
3137 				.cra_driver_name = "authenc-hmac-sha384-"
3138 						   "ecb-cipher_null-caam",
3139 				.cra_blocksize = NULL_BLOCK_SIZE,
3140 			},
3141 			.setkey = aead_setkey,
3142 			.setauthsize = aead_setauthsize,
3143 			.encrypt = aead_encrypt,
3144 			.decrypt = aead_decrypt,
3145 			.ivsize = NULL_IV_SIZE,
3146 			.maxauthsize = SHA384_DIGEST_SIZE,
3147 		},
3148 		.caam = {
3149 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3150 					   OP_ALG_AAI_HMAC_PRECOMP,
3151 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3152 		},
3153 	},
3154 	{
3155 		.aead = {
3156 			.base = {
3157 				.cra_name = "authenc(hmac(sha512),"
3158 					    "ecb(cipher_null))",
3159 				.cra_driver_name = "authenc-hmac-sha512-"
3160 						   "ecb-cipher_null-caam",
3161 				.cra_blocksize = NULL_BLOCK_SIZE,
3162 			},
3163 			.setkey = aead_setkey,
3164 			.setauthsize = aead_setauthsize,
3165 			.encrypt = aead_encrypt,
3166 			.decrypt = aead_decrypt,
3167 			.ivsize = NULL_IV_SIZE,
3168 			.maxauthsize = SHA512_DIGEST_SIZE,
3169 		},
3170 		.caam = {
3171 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3172 					   OP_ALG_AAI_HMAC_PRECOMP,
3173 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3174 		},
3175 	},
3176 	{
3177 		.aead = {
3178 			.base = {
3179 				.cra_name = "authenc(hmac(md5),cbc(aes))",
3180 				.cra_driver_name = "authenc-hmac-md5-"
3181 						   "cbc-aes-caam",
3182 				.cra_blocksize = AES_BLOCK_SIZE,
3183 			},
3184 			.setkey = aead_setkey,
3185 			.setauthsize = aead_setauthsize,
3186 			.encrypt = aead_encrypt,
3187 			.decrypt = aead_decrypt,
3188 			.ivsize = AES_BLOCK_SIZE,
3189 			.maxauthsize = MD5_DIGEST_SIZE,
3190 		},
3191 		.caam = {
3192 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3193 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
3194 					   OP_ALG_AAI_HMAC_PRECOMP,
3195 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3196 		},
3197 	},
3198 	{
3199 		.aead = {
3200 			.base = {
3201 				.cra_name = "echainiv(authenc(hmac(md5),"
3202 					    "cbc(aes)))",
3203 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
3204 						   "cbc-aes-caam",
3205 				.cra_blocksize = AES_BLOCK_SIZE,
3206 			},
3207 			.setkey = aead_setkey,
3208 			.setauthsize = aead_setauthsize,
3209 			.encrypt = aead_encrypt,
3210 			.decrypt = aead_decrypt,
3211 			.ivsize = AES_BLOCK_SIZE,
3212 			.maxauthsize = MD5_DIGEST_SIZE,
3213 		},
3214 		.caam = {
3215 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3216 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
3217 					   OP_ALG_AAI_HMAC_PRECOMP,
3218 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3219 			.geniv = true,
3220 		},
3221 	},
3222 	{
3223 		.aead = {
3224 			.base = {
3225 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
3226 				.cra_driver_name = "authenc-hmac-sha1-"
3227 						   "cbc-aes-caam",
3228 				.cra_blocksize = AES_BLOCK_SIZE,
3229 			},
3230 			.setkey = aead_setkey,
3231 			.setauthsize = aead_setauthsize,
3232 			.encrypt = aead_encrypt,
3233 			.decrypt = aead_decrypt,
3234 			.ivsize = AES_BLOCK_SIZE,
3235 			.maxauthsize = SHA1_DIGEST_SIZE,
3236 		},
3237 		.caam = {
3238 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3239 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3240 					   OP_ALG_AAI_HMAC_PRECOMP,
3241 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3242 		},
3243 	},
3244 	{
3245 		.aead = {
3246 			.base = {
3247 				.cra_name = "echainiv(authenc(hmac(sha1),"
3248 					    "cbc(aes)))",
3249 				.cra_driver_name = "echainiv-authenc-"
3250 						   "hmac-sha1-cbc-aes-caam",
3251 				.cra_blocksize = AES_BLOCK_SIZE,
3252 			},
3253 			.setkey = aead_setkey,
3254 			.setauthsize = aead_setauthsize,
3255 			.encrypt = aead_encrypt,
3256 			.decrypt = aead_decrypt,
3257 			.ivsize = AES_BLOCK_SIZE,
3258 			.maxauthsize = SHA1_DIGEST_SIZE,
3259 		},
3260 		.caam = {
3261 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3262 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3263 					   OP_ALG_AAI_HMAC_PRECOMP,
3264 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3265 			.geniv = true,
3266 		},
3267 	},
3268 	{
3269 		.aead = {
3270 			.base = {
3271 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
3272 				.cra_driver_name = "authenc-hmac-sha224-"
3273 						   "cbc-aes-caam",
3274 				.cra_blocksize = AES_BLOCK_SIZE,
3275 			},
3276 			.setkey = aead_setkey,
3277 			.setauthsize = aead_setauthsize,
3278 			.encrypt = aead_encrypt,
3279 			.decrypt = aead_decrypt,
3280 			.ivsize = AES_BLOCK_SIZE,
3281 			.maxauthsize = SHA224_DIGEST_SIZE,
3282 		},
3283 		.caam = {
3284 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3285 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3286 					   OP_ALG_AAI_HMAC_PRECOMP,
3287 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3288 		},
3289 	},
3290 	{
3291 		.aead = {
3292 			.base = {
3293 				.cra_name = "echainiv(authenc(hmac(sha224),"
3294 					    "cbc(aes)))",
3295 				.cra_driver_name = "echainiv-authenc-"
3296 						   "hmac-sha224-cbc-aes-caam",
3297 				.cra_blocksize = AES_BLOCK_SIZE,
3298 			},
3299 			.setkey = aead_setkey,
3300 			.setauthsize = aead_setauthsize,
3301 			.encrypt = aead_encrypt,
3302 			.decrypt = aead_decrypt,
3303 			.ivsize = AES_BLOCK_SIZE,
3304 			.maxauthsize = SHA224_DIGEST_SIZE,
3305 		},
3306 		.caam = {
3307 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3308 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3309 					   OP_ALG_AAI_HMAC_PRECOMP,
3310 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3311 			.geniv = true,
3312 		},
3313 	},
3314 	{
3315 		.aead = {
3316 			.base = {
3317 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
3318 				.cra_driver_name = "authenc-hmac-sha256-"
3319 						   "cbc-aes-caam",
3320 				.cra_blocksize = AES_BLOCK_SIZE,
3321 			},
3322 			.setkey = aead_setkey,
3323 			.setauthsize = aead_setauthsize,
3324 			.encrypt = aead_encrypt,
3325 			.decrypt = aead_decrypt,
3326 			.ivsize = AES_BLOCK_SIZE,
3327 			.maxauthsize = SHA256_DIGEST_SIZE,
3328 		},
3329 		.caam = {
3330 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3331 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3332 					   OP_ALG_AAI_HMAC_PRECOMP,
3333 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3334 		},
3335 	},
3336 	{
3337 		.aead = {
3338 			.base = {
3339 				.cra_name = "echainiv(authenc(hmac(sha256),"
3340 					    "cbc(aes)))",
3341 				.cra_driver_name = "echainiv-authenc-"
3342 						   "hmac-sha256-cbc-aes-caam",
3343 				.cra_blocksize = AES_BLOCK_SIZE,
3344 			},
3345 			.setkey = aead_setkey,
3346 			.setauthsize = aead_setauthsize,
3347 			.encrypt = aead_encrypt,
3348 			.decrypt = aead_decrypt,
3349 			.ivsize = AES_BLOCK_SIZE,
3350 			.maxauthsize = SHA256_DIGEST_SIZE,
3351 		},
3352 		.caam = {
3353 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3354 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3355 					   OP_ALG_AAI_HMAC_PRECOMP,
3356 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3357 			.geniv = true,
3358 		},
3359 	},
3360 	{
3361 		.aead = {
3362 			.base = {
3363 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
3364 				.cra_driver_name = "authenc-hmac-sha384-"
3365 						   "cbc-aes-caam",
3366 				.cra_blocksize = AES_BLOCK_SIZE,
3367 			},
3368 			.setkey = aead_setkey,
3369 			.setauthsize = aead_setauthsize,
3370 			.encrypt = aead_encrypt,
3371 			.decrypt = aead_decrypt,
3372 			.ivsize = AES_BLOCK_SIZE,
3373 			.maxauthsize = SHA384_DIGEST_SIZE,
3374 		},
3375 		.caam = {
3376 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3377 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3378 					   OP_ALG_AAI_HMAC_PRECOMP,
3379 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3380 		},
3381 	},
3382 	{
3383 		.aead = {
3384 			.base = {
3385 				.cra_name = "echainiv(authenc(hmac(sha384),"
3386 					    "cbc(aes)))",
3387 				.cra_driver_name = "echainiv-authenc-"
3388 						   "hmac-sha384-cbc-aes-caam",
3389 				.cra_blocksize = AES_BLOCK_SIZE,
3390 			},
3391 			.setkey = aead_setkey,
3392 			.setauthsize = aead_setauthsize,
3393 			.encrypt = aead_encrypt,
3394 			.decrypt = aead_decrypt,
3395 			.ivsize = AES_BLOCK_SIZE,
3396 			.maxauthsize = SHA384_DIGEST_SIZE,
3397 		},
3398 		.caam = {
3399 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3400 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3401 					   OP_ALG_AAI_HMAC_PRECOMP,
3402 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3403 			.geniv = true,
3404 		},
3405 	},
3406 	{
3407 		.aead = {
3408 			.base = {
3409 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
3410 				.cra_driver_name = "authenc-hmac-sha512-"
3411 						   "cbc-aes-caam",
3412 				.cra_blocksize = AES_BLOCK_SIZE,
3413 			},
3414 			.setkey = aead_setkey,
3415 			.setauthsize = aead_setauthsize,
3416 			.encrypt = aead_encrypt,
3417 			.decrypt = aead_decrypt,
3418 			.ivsize = AES_BLOCK_SIZE,
3419 			.maxauthsize = SHA512_DIGEST_SIZE,
3420 		},
3421 		.caam = {
3422 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3423 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3424 					   OP_ALG_AAI_HMAC_PRECOMP,
3425 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3426 		},
3427 	},
3428 	{
3429 		.aead = {
3430 			.base = {
3431 				.cra_name = "echainiv(authenc(hmac(sha512),"
3432 					    "cbc(aes)))",
3433 				.cra_driver_name = "echainiv-authenc-"
3434 						   "hmac-sha512-cbc-aes-caam",
3435 				.cra_blocksize = AES_BLOCK_SIZE,
3436 			},
3437 			.setkey = aead_setkey,
3438 			.setauthsize = aead_setauthsize,
3439 			.encrypt = aead_encrypt,
3440 			.decrypt = aead_decrypt,
3441 			.ivsize = AES_BLOCK_SIZE,
3442 			.maxauthsize = SHA512_DIGEST_SIZE,
3443 		},
3444 		.caam = {
3445 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3446 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3447 					   OP_ALG_AAI_HMAC_PRECOMP,
3448 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3449 			.geniv = true,
3450 		},
3451 	},
3452 	{
3453 		.aead = {
3454 			.base = {
3455 				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
3456 				.cra_driver_name = "authenc-hmac-md5-"
3457 						   "cbc-des3_ede-caam",
3458 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3459 			},
3460 			.setkey = aead_setkey,
3461 			.setauthsize = aead_setauthsize,
3462 			.encrypt = aead_encrypt,
3463 			.decrypt = aead_decrypt,
3464 			.ivsize = DES3_EDE_BLOCK_SIZE,
3465 			.maxauthsize = MD5_DIGEST_SIZE,
3466 		},
3467 		.caam = {
3468 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3469 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
3470 					   OP_ALG_AAI_HMAC_PRECOMP,
3471 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3472 		}
3473 	},
3474 	{
3475 		.aead = {
3476 			.base = {
3477 				.cra_name = "echainiv(authenc(hmac(md5),"
3478 					    "cbc(des3_ede)))",
3479 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
3480 						   "cbc-des3_ede-caam",
3481 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3482 			},
3483 			.setkey = aead_setkey,
3484 			.setauthsize = aead_setauthsize,
3485 			.encrypt = aead_encrypt,
3486 			.decrypt = aead_decrypt,
3487 			.ivsize = DES3_EDE_BLOCK_SIZE,
3488 			.maxauthsize = MD5_DIGEST_SIZE,
3489 		},
3490 		.caam = {
3491 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3492 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
3493 					   OP_ALG_AAI_HMAC_PRECOMP,
3494 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3495 			.geniv = true,
3496 		}
3497 	},
3498 	{
3499 		.aead = {
3500 			.base = {
3501 				.cra_name = "authenc(hmac(sha1),"
3502 					    "cbc(des3_ede))",
3503 				.cra_driver_name = "authenc-hmac-sha1-"
3504 						   "cbc-des3_ede-caam",
3505 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3506 			},
3507 			.setkey = aead_setkey,
3508 			.setauthsize = aead_setauthsize,
3509 			.encrypt = aead_encrypt,
3510 			.decrypt = aead_decrypt,
3511 			.ivsize = DES3_EDE_BLOCK_SIZE,
3512 			.maxauthsize = SHA1_DIGEST_SIZE,
3513 		},
3514 		.caam = {
3515 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3516 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3517 					   OP_ALG_AAI_HMAC_PRECOMP,
3518 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3519 		},
3520 	},
3521 	{
3522 		.aead = {
3523 			.base = {
3524 				.cra_name = "echainiv(authenc(hmac(sha1),"
3525 					    "cbc(des3_ede)))",
3526 				.cra_driver_name = "echainiv-authenc-"
3527 						   "hmac-sha1-"
3528 						   "cbc-des3_ede-caam",
3529 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3530 			},
3531 			.setkey = aead_setkey,
3532 			.setauthsize = aead_setauthsize,
3533 			.encrypt = aead_encrypt,
3534 			.decrypt = aead_decrypt,
3535 			.ivsize = DES3_EDE_BLOCK_SIZE,
3536 			.maxauthsize = SHA1_DIGEST_SIZE,
3537 		},
3538 		.caam = {
3539 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3540 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3541 					   OP_ALG_AAI_HMAC_PRECOMP,
3542 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3543 			.geniv = true,
3544 		},
3545 	},
3546 	{
3547 		.aead = {
3548 			.base = {
3549 				.cra_name = "authenc(hmac(sha224),"
3550 					    "cbc(des3_ede))",
3551 				.cra_driver_name = "authenc-hmac-sha224-"
3552 						   "cbc-des3_ede-caam",
3553 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3554 			},
3555 			.setkey = aead_setkey,
3556 			.setauthsize = aead_setauthsize,
3557 			.encrypt = aead_encrypt,
3558 			.decrypt = aead_decrypt,
3559 			.ivsize = DES3_EDE_BLOCK_SIZE,
3560 			.maxauthsize = SHA224_DIGEST_SIZE,
3561 		},
3562 		.caam = {
3563 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3564 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3565 					   OP_ALG_AAI_HMAC_PRECOMP,
3566 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3567 		},
3568 	},
3569 	{
3570 		.aead = {
3571 			.base = {
3572 				.cra_name = "echainiv(authenc(hmac(sha224),"
3573 					    "cbc(des3_ede)))",
3574 				.cra_driver_name = "echainiv-authenc-"
3575 						   "hmac-sha224-"
3576 						   "cbc-des3_ede-caam",
3577 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3578 			},
3579 			.setkey = aead_setkey,
3580 			.setauthsize = aead_setauthsize,
3581 			.encrypt = aead_encrypt,
3582 			.decrypt = aead_decrypt,
3583 			.ivsize = DES3_EDE_BLOCK_SIZE,
3584 			.maxauthsize = SHA224_DIGEST_SIZE,
3585 		},
3586 		.caam = {
3587 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3588 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3589 					   OP_ALG_AAI_HMAC_PRECOMP,
3590 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3591 			.geniv = true,
3592 		},
3593 	},
3594 	{
3595 		.aead = {
3596 			.base = {
3597 				.cra_name = "authenc(hmac(sha256),"
3598 					    "cbc(des3_ede))",
3599 				.cra_driver_name = "authenc-hmac-sha256-"
3600 						   "cbc-des3_ede-caam",
3601 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3602 			},
3603 			.setkey = aead_setkey,
3604 			.setauthsize = aead_setauthsize,
3605 			.encrypt = aead_encrypt,
3606 			.decrypt = aead_decrypt,
3607 			.ivsize = DES3_EDE_BLOCK_SIZE,
3608 			.maxauthsize = SHA256_DIGEST_SIZE,
3609 		},
3610 		.caam = {
3611 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3612 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3613 					   OP_ALG_AAI_HMAC_PRECOMP,
3614 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3615 		},
3616 	},
3617 	{
3618 		.aead = {
3619 			.base = {
3620 				.cra_name = "echainiv(authenc(hmac(sha256),"
3621 					    "cbc(des3_ede)))",
3622 				.cra_driver_name = "echainiv-authenc-"
3623 						   "hmac-sha256-"
3624 						   "cbc-des3_ede-caam",
3625 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3626 			},
3627 			.setkey = aead_setkey,
3628 			.setauthsize = aead_setauthsize,
3629 			.encrypt = aead_encrypt,
3630 			.decrypt = aead_decrypt,
3631 			.ivsize = DES3_EDE_BLOCK_SIZE,
3632 			.maxauthsize = SHA256_DIGEST_SIZE,
3633 		},
3634 		.caam = {
3635 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3636 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3637 					   OP_ALG_AAI_HMAC_PRECOMP,
3638 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3639 			.geniv = true,
3640 		},
3641 	},
3642 	{
3643 		.aead = {
3644 			.base = {
3645 				.cra_name = "authenc(hmac(sha384),"
3646 					    "cbc(des3_ede))",
3647 				.cra_driver_name = "authenc-hmac-sha384-"
3648 						   "cbc-des3_ede-caam",
3649 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3650 			},
3651 			.setkey = aead_setkey,
3652 			.setauthsize = aead_setauthsize,
3653 			.encrypt = aead_encrypt,
3654 			.decrypt = aead_decrypt,
3655 			.ivsize = DES3_EDE_BLOCK_SIZE,
3656 			.maxauthsize = SHA384_DIGEST_SIZE,
3657 		},
3658 		.caam = {
3659 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3660 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3661 					   OP_ALG_AAI_HMAC_PRECOMP,
3662 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3663 		},
3664 	},
3665 	{
3666 		.aead = {
3667 			.base = {
3668 				.cra_name = "echainiv(authenc(hmac(sha384),"
3669 					    "cbc(des3_ede)))",
3670 				.cra_driver_name = "echainiv-authenc-"
3671 						   "hmac-sha384-"
3672 						   "cbc-des3_ede-caam",
3673 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3674 			},
3675 			.setkey = aead_setkey,
3676 			.setauthsize = aead_setauthsize,
3677 			.encrypt = aead_encrypt,
3678 			.decrypt = aead_decrypt,
3679 			.ivsize = DES3_EDE_BLOCK_SIZE,
3680 			.maxauthsize = SHA384_DIGEST_SIZE,
3681 		},
3682 		.caam = {
3683 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3684 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3685 					   OP_ALG_AAI_HMAC_PRECOMP,
3686 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3687 			.geniv = true,
3688 		},
3689 	},
3690 	{
3691 		.aead = {
3692 			.base = {
3693 				.cra_name = "authenc(hmac(sha512),"
3694 					    "cbc(des3_ede))",
3695 				.cra_driver_name = "authenc-hmac-sha512-"
3696 						   "cbc-des3_ede-caam",
3697 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3698 			},
3699 			.setkey = aead_setkey,
3700 			.setauthsize = aead_setauthsize,
3701 			.encrypt = aead_encrypt,
3702 			.decrypt = aead_decrypt,
3703 			.ivsize = DES3_EDE_BLOCK_SIZE,
3704 			.maxauthsize = SHA512_DIGEST_SIZE,
3705 		},
3706 		.caam = {
3707 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3708 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3709 					   OP_ALG_AAI_HMAC_PRECOMP,
3710 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3711 		},
3712 	},
3713 	{
3714 		.aead = {
3715 			.base = {
3716 				.cra_name = "echainiv(authenc(hmac(sha512),"
3717 					    "cbc(des3_ede)))",
3718 				.cra_driver_name = "echainiv-authenc-"
3719 						   "hmac-sha512-"
3720 						   "cbc-des3_ede-caam",
3721 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3722 			},
3723 			.setkey = aead_setkey,
3724 			.setauthsize = aead_setauthsize,
3725 			.encrypt = aead_encrypt,
3726 			.decrypt = aead_decrypt,
3727 			.ivsize = DES3_EDE_BLOCK_SIZE,
3728 			.maxauthsize = SHA512_DIGEST_SIZE,
3729 		},
3730 		.caam = {
3731 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3732 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3733 					   OP_ALG_AAI_HMAC_PRECOMP,
3734 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3735 			.geniv = true,
3736 		},
3737 	},
3738 	{
3739 		.aead = {
3740 			.base = {
3741 				.cra_name = "authenc(hmac(md5),cbc(des))",
3742 				.cra_driver_name = "authenc-hmac-md5-"
3743 						   "cbc-des-caam",
3744 				.cra_blocksize = DES_BLOCK_SIZE,
3745 			},
3746 			.setkey = aead_setkey,
3747 			.setauthsize = aead_setauthsize,
3748 			.encrypt = aead_encrypt,
3749 			.decrypt = aead_decrypt,
3750 			.ivsize = DES_BLOCK_SIZE,
3751 			.maxauthsize = MD5_DIGEST_SIZE,
3752 		},
3753 		.caam = {
3754 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3755 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
3756 					   OP_ALG_AAI_HMAC_PRECOMP,
3757 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3758 		},
3759 	},
3760 	{
3761 		.aead = {
3762 			.base = {
3763 				.cra_name = "echainiv(authenc(hmac(md5),"
3764 					    "cbc(des)))",
3765 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
3766 						   "cbc-des-caam",
3767 				.cra_blocksize = DES_BLOCK_SIZE,
3768 			},
3769 			.setkey = aead_setkey,
3770 			.setauthsize = aead_setauthsize,
3771 			.encrypt = aead_encrypt,
3772 			.decrypt = aead_decrypt,
3773 			.ivsize = DES_BLOCK_SIZE,
3774 			.maxauthsize = MD5_DIGEST_SIZE,
3775 		},
3776 		.caam = {
3777 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3778 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
3779 					   OP_ALG_AAI_HMAC_PRECOMP,
3780 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3781 			.geniv = true,
3782 		},
3783 	},
3784 	{
3785 		.aead = {
3786 			.base = {
3787 				.cra_name = "authenc(hmac(sha1),cbc(des))",
3788 				.cra_driver_name = "authenc-hmac-sha1-"
3789 						   "cbc-des-caam",
3790 				.cra_blocksize = DES_BLOCK_SIZE,
3791 			},
3792 			.setkey = aead_setkey,
3793 			.setauthsize = aead_setauthsize,
3794 			.encrypt = aead_encrypt,
3795 			.decrypt = aead_decrypt,
3796 			.ivsize = DES_BLOCK_SIZE,
3797 			.maxauthsize = SHA1_DIGEST_SIZE,
3798 		},
3799 		.caam = {
3800 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3801 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3802 					   OP_ALG_AAI_HMAC_PRECOMP,
3803 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3804 		},
3805 	},
3806 	{
3807 		.aead = {
3808 			.base = {
3809 				.cra_name = "echainiv(authenc(hmac(sha1),"
3810 					    "cbc(des)))",
3811 				.cra_driver_name = "echainiv-authenc-"
3812 						   "hmac-sha1-cbc-des-caam",
3813 				.cra_blocksize = DES_BLOCK_SIZE,
3814 			},
3815 			.setkey = aead_setkey,
3816 			.setauthsize = aead_setauthsize,
3817 			.encrypt = aead_encrypt,
3818 			.decrypt = aead_decrypt,
3819 			.ivsize = DES_BLOCK_SIZE,
3820 			.maxauthsize = SHA1_DIGEST_SIZE,
3821 		},
3822 		.caam = {
3823 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3824 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3825 					   OP_ALG_AAI_HMAC_PRECOMP,
3826 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3827 			.geniv = true,
3828 		},
3829 	},
3830 	{
3831 		.aead = {
3832 			.base = {
3833 				.cra_name = "authenc(hmac(sha224),cbc(des))",
3834 				.cra_driver_name = "authenc-hmac-sha224-"
3835 						   "cbc-des-caam",
3836 				.cra_blocksize = DES_BLOCK_SIZE,
3837 			},
3838 			.setkey = aead_setkey,
3839 			.setauthsize = aead_setauthsize,
3840 			.encrypt = aead_encrypt,
3841 			.decrypt = aead_decrypt,
3842 			.ivsize = DES_BLOCK_SIZE,
3843 			.maxauthsize = SHA224_DIGEST_SIZE,
3844 		},
3845 		.caam = {
3846 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3847 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3848 					   OP_ALG_AAI_HMAC_PRECOMP,
3849 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3850 		},
3851 	},
3852 	{
3853 		.aead = {
3854 			.base = {
3855 				.cra_name = "echainiv(authenc(hmac(sha224),"
3856 					    "cbc(des)))",
3857 				.cra_driver_name = "echainiv-authenc-"
3858 						   "hmac-sha224-cbc-des-caam",
3859 				.cra_blocksize = DES_BLOCK_SIZE,
3860 			},
3861 			.setkey = aead_setkey,
3862 			.setauthsize = aead_setauthsize,
3863 			.encrypt = aead_encrypt,
3864 			.decrypt = aead_decrypt,
3865 			.ivsize = DES_BLOCK_SIZE,
3866 			.maxauthsize = SHA224_DIGEST_SIZE,
3867 		},
3868 		.caam = {
3869 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3870 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3871 					   OP_ALG_AAI_HMAC_PRECOMP,
3872 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3873 			.geniv = true,
3874 		},
3875 	},
3876 	{
3877 		.aead = {
3878 			.base = {
3879 				.cra_name = "authenc(hmac(sha256),cbc(des))",
3880 				.cra_driver_name = "authenc-hmac-sha256-"
3881 						   "cbc-des-caam",
3882 				.cra_blocksize = DES_BLOCK_SIZE,
3883 			},
3884 			.setkey = aead_setkey,
3885 			.setauthsize = aead_setauthsize,
3886 			.encrypt = aead_encrypt,
3887 			.decrypt = aead_decrypt,
3888 			.ivsize = DES_BLOCK_SIZE,
3889 			.maxauthsize = SHA256_DIGEST_SIZE,
3890 		},
3891 		.caam = {
3892 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3893 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3894 					   OP_ALG_AAI_HMAC_PRECOMP,
3895 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3896 		},
3897 	},
3898 	{
3899 		.aead = {
3900 			.base = {
3901 				.cra_name = "echainiv(authenc(hmac(sha256),"
3902 					    "cbc(des)))",
3903 				.cra_driver_name = "echainiv-authenc-"
3904 						   "hmac-sha256-cbc-des-caam",
3905 				.cra_blocksize = DES_BLOCK_SIZE,
3906 			},
3907 			.setkey = aead_setkey,
3908 			.setauthsize = aead_setauthsize,
3909 			.encrypt = aead_encrypt,
3910 			.decrypt = aead_decrypt,
3911 			.ivsize = DES_BLOCK_SIZE,
3912 			.maxauthsize = SHA256_DIGEST_SIZE,
3913 		},
3914 		.caam = {
3915 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3916 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3917 					   OP_ALG_AAI_HMAC_PRECOMP,
3918 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3919 			.geniv = true,
3920 		},
3921 	},
3922 	{
3923 		.aead = {
3924 			.base = {
3925 				.cra_name = "authenc(hmac(sha384),cbc(des))",
3926 				.cra_driver_name = "authenc-hmac-sha384-"
3927 						   "cbc-des-caam",
3928 				.cra_blocksize = DES_BLOCK_SIZE,
3929 			},
3930 			.setkey = aead_setkey,
3931 			.setauthsize = aead_setauthsize,
3932 			.encrypt = aead_encrypt,
3933 			.decrypt = aead_decrypt,
3934 			.ivsize = DES_BLOCK_SIZE,
3935 			.maxauthsize = SHA384_DIGEST_SIZE,
3936 		},
3937 		.caam = {
3938 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3939 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3940 					   OP_ALG_AAI_HMAC_PRECOMP,
3941 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3942 		},
3943 	},
3944 	{
3945 		.aead = {
3946 			.base = {
3947 				.cra_name = "echainiv(authenc(hmac(sha384),"
3948 					    "cbc(des)))",
3949 				.cra_driver_name = "echainiv-authenc-"
3950 						   "hmac-sha384-cbc-des-caam",
3951 				.cra_blocksize = DES_BLOCK_SIZE,
3952 			},
3953 			.setkey = aead_setkey,
3954 			.setauthsize = aead_setauthsize,
3955 			.encrypt = aead_encrypt,
3956 			.decrypt = aead_decrypt,
3957 			.ivsize = DES_BLOCK_SIZE,
3958 			.maxauthsize = SHA384_DIGEST_SIZE,
3959 		},
3960 		.caam = {
3961 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3962 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3963 					   OP_ALG_AAI_HMAC_PRECOMP,
3964 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3965 			.geniv = true,
3966 		},
3967 	},
3968 	{
3969 		.aead = {
3970 			.base = {
3971 				.cra_name = "authenc(hmac(sha512),cbc(des))",
3972 				.cra_driver_name = "authenc-hmac-sha512-"
3973 						   "cbc-des-caam",
3974 				.cra_blocksize = DES_BLOCK_SIZE,
3975 			},
3976 			.setkey = aead_setkey,
3977 			.setauthsize = aead_setauthsize,
3978 			.encrypt = aead_encrypt,
3979 			.decrypt = aead_decrypt,
3980 			.ivsize = DES_BLOCK_SIZE,
3981 			.maxauthsize = SHA512_DIGEST_SIZE,
3982 		},
3983 		.caam = {
3984 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3985 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3986 					   OP_ALG_AAI_HMAC_PRECOMP,
3987 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3988 		},
3989 	},
3990 	{
3991 		.aead = {
3992 			.base = {
3993 				.cra_name = "echainiv(authenc(hmac(sha512),"
3994 					    "cbc(des)))",
3995 				.cra_driver_name = "echainiv-authenc-"
3996 						   "hmac-sha512-cbc-des-caam",
3997 				.cra_blocksize = DES_BLOCK_SIZE,
3998 			},
3999 			.setkey = aead_setkey,
4000 			.setauthsize = aead_setauthsize,
4001 			.encrypt = aead_encrypt,
4002 			.decrypt = aead_decrypt,
4003 			.ivsize = DES_BLOCK_SIZE,
4004 			.maxauthsize = SHA512_DIGEST_SIZE,
4005 		},
4006 		.caam = {
4007 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
4008 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
4009 					   OP_ALG_AAI_HMAC_PRECOMP,
4010 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
4011 			.geniv = true,
4012 		},
4013 	},
4014 	{
4015 		.aead = {
4016 			.base = {
4017 				.cra_name = "authenc(hmac(md5),"
4018 					    "rfc3686(ctr(aes)))",
4019 				.cra_driver_name = "authenc-hmac-md5-"
4020 						   "rfc3686-ctr-aes-caam",
4021 				.cra_blocksize = 1,
4022 			},
4023 			.setkey = aead_setkey,
4024 			.setauthsize = aead_setauthsize,
4025 			.encrypt = aead_encrypt,
4026 			.decrypt = aead_decrypt,
4027 			.ivsize = CTR_RFC3686_IV_SIZE,
4028 			.maxauthsize = MD5_DIGEST_SIZE,
4029 		},
4030 		.caam = {
4031 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4032 					   OP_ALG_AAI_CTR_MOD128,
4033 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
4034 					   OP_ALG_AAI_HMAC_PRECOMP,
4035 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
4036 			.rfc3686 = true,
4037 		},
4038 	},
4039 	{
4040 		.aead = {
4041 			.base = {
4042 				.cra_name = "seqiv(authenc("
4043 					    "hmac(md5),rfc3686(ctr(aes))))",
4044 				.cra_driver_name = "seqiv-authenc-hmac-md5-"
4045 						   "rfc3686-ctr-aes-caam",
4046 				.cra_blocksize = 1,
4047 			},
4048 			.setkey = aead_setkey,
4049 			.setauthsize = aead_setauthsize,
4050 			.encrypt = aead_encrypt,
4051 			.decrypt = aead_decrypt,
4052 			.ivsize = CTR_RFC3686_IV_SIZE,
4053 			.maxauthsize = MD5_DIGEST_SIZE,
4054 		},
4055 		.caam = {
4056 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4057 					   OP_ALG_AAI_CTR_MOD128,
4058 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
4059 					   OP_ALG_AAI_HMAC_PRECOMP,
4060 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
4061 			.rfc3686 = true,
4062 			.geniv = true,
4063 		},
4064 	},
4065 	{
4066 		.aead = {
4067 			.base = {
4068 				.cra_name = "authenc(hmac(sha1),"
4069 					    "rfc3686(ctr(aes)))",
4070 				.cra_driver_name = "authenc-hmac-sha1-"
4071 						   "rfc3686-ctr-aes-caam",
4072 				.cra_blocksize = 1,
4073 			},
4074 			.setkey = aead_setkey,
4075 			.setauthsize = aead_setauthsize,
4076 			.encrypt = aead_encrypt,
4077 			.decrypt = aead_decrypt,
4078 			.ivsize = CTR_RFC3686_IV_SIZE,
4079 			.maxauthsize = SHA1_DIGEST_SIZE,
4080 		},
4081 		.caam = {
4082 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4083 					   OP_ALG_AAI_CTR_MOD128,
4084 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
4085 					   OP_ALG_AAI_HMAC_PRECOMP,
4086 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
4087 			.rfc3686 = true,
4088 		},
4089 	},
4090 	{
4091 		.aead = {
4092 			.base = {
4093 				.cra_name = "seqiv(authenc("
4094 					    "hmac(sha1),rfc3686(ctr(aes))))",
4095 				.cra_driver_name = "seqiv-authenc-hmac-sha1-"
4096 						   "rfc3686-ctr-aes-caam",
4097 				.cra_blocksize = 1,
4098 			},
4099 			.setkey = aead_setkey,
4100 			.setauthsize = aead_setauthsize,
4101 			.encrypt = aead_encrypt,
4102 			.decrypt = aead_decrypt,
4103 			.ivsize = CTR_RFC3686_IV_SIZE,
4104 			.maxauthsize = SHA1_DIGEST_SIZE,
4105 		},
4106 		.caam = {
4107 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4108 					   OP_ALG_AAI_CTR_MOD128,
4109 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
4110 					   OP_ALG_AAI_HMAC_PRECOMP,
4111 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
4112 			.rfc3686 = true,
4113 			.geniv = true,
4114 		},
4115 	},
4116 	{
4117 		.aead = {
4118 			.base = {
4119 				.cra_name = "authenc(hmac(sha224),"
4120 					    "rfc3686(ctr(aes)))",
4121 				.cra_driver_name = "authenc-hmac-sha224-"
4122 						   "rfc3686-ctr-aes-caam",
4123 				.cra_blocksize = 1,
4124 			},
4125 			.setkey = aead_setkey,
4126 			.setauthsize = aead_setauthsize,
4127 			.encrypt = aead_encrypt,
4128 			.decrypt = aead_decrypt,
4129 			.ivsize = CTR_RFC3686_IV_SIZE,
4130 			.maxauthsize = SHA224_DIGEST_SIZE,
4131 		},
4132 		.caam = {
4133 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4134 					   OP_ALG_AAI_CTR_MOD128,
4135 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
4136 					   OP_ALG_AAI_HMAC_PRECOMP,
4137 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
4138 			.rfc3686 = true,
4139 		},
4140 	},
4141 	{
4142 		.aead = {
4143 			.base = {
4144 				.cra_name = "seqiv(authenc("
4145 					    "hmac(sha224),rfc3686(ctr(aes))))",
4146 				.cra_driver_name = "seqiv-authenc-hmac-sha224-"
4147 						   "rfc3686-ctr-aes-caam",
4148 				.cra_blocksize = 1,
4149 			},
4150 			.setkey = aead_setkey,
4151 			.setauthsize = aead_setauthsize,
4152 			.encrypt = aead_encrypt,
4153 			.decrypt = aead_decrypt,
4154 			.ivsize = CTR_RFC3686_IV_SIZE,
4155 			.maxauthsize = SHA224_DIGEST_SIZE,
4156 		},
4157 		.caam = {
4158 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4159 					   OP_ALG_AAI_CTR_MOD128,
4160 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
4161 					   OP_ALG_AAI_HMAC_PRECOMP,
4162 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
4163 			.rfc3686 = true,
4164 			.geniv = true,
4165 		},
4166 	},
4167 	{
4168 		.aead = {
4169 			.base = {
4170 				.cra_name = "authenc(hmac(sha256),"
4171 					    "rfc3686(ctr(aes)))",
4172 				.cra_driver_name = "authenc-hmac-sha256-"
4173 						   "rfc3686-ctr-aes-caam",
4174 				.cra_blocksize = 1,
4175 			},
4176 			.setkey = aead_setkey,
4177 			.setauthsize = aead_setauthsize,
4178 			.encrypt = aead_encrypt,
4179 			.decrypt = aead_decrypt,
4180 			.ivsize = CTR_RFC3686_IV_SIZE,
4181 			.maxauthsize = SHA256_DIGEST_SIZE,
4182 		},
4183 		.caam = {
4184 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4185 					   OP_ALG_AAI_CTR_MOD128,
4186 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
4187 					   OP_ALG_AAI_HMAC_PRECOMP,
4188 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
4189 			.rfc3686 = true,
4190 		},
4191 	},
4192 	{
4193 		.aead = {
4194 			.base = {
4195 				.cra_name = "seqiv(authenc(hmac(sha256),"
4196 					    "rfc3686(ctr(aes))))",
4197 				.cra_driver_name = "seqiv-authenc-hmac-sha256-"
4198 						   "rfc3686-ctr-aes-caam",
4199 				.cra_blocksize = 1,
4200 			},
4201 			.setkey = aead_setkey,
4202 			.setauthsize = aead_setauthsize,
4203 			.encrypt = aead_encrypt,
4204 			.decrypt = aead_decrypt,
4205 			.ivsize = CTR_RFC3686_IV_SIZE,
4206 			.maxauthsize = SHA256_DIGEST_SIZE,
4207 		},
4208 		.caam = {
4209 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4210 					   OP_ALG_AAI_CTR_MOD128,
4211 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
4212 					   OP_ALG_AAI_HMAC_PRECOMP,
4213 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
4214 			.rfc3686 = true,
4215 			.geniv = true,
4216 		},
4217 	},
4218 	{
4219 		.aead = {
4220 			.base = {
4221 				.cra_name = "authenc(hmac(sha384),"
4222 					    "rfc3686(ctr(aes)))",
4223 				.cra_driver_name = "authenc-hmac-sha384-"
4224 						   "rfc3686-ctr-aes-caam",
4225 				.cra_blocksize = 1,
4226 			},
4227 			.setkey = aead_setkey,
4228 			.setauthsize = aead_setauthsize,
4229 			.encrypt = aead_encrypt,
4230 			.decrypt = aead_decrypt,
4231 			.ivsize = CTR_RFC3686_IV_SIZE,
4232 			.maxauthsize = SHA384_DIGEST_SIZE,
4233 		},
4234 		.caam = {
4235 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4236 					   OP_ALG_AAI_CTR_MOD128,
4237 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
4238 					   OP_ALG_AAI_HMAC_PRECOMP,
4239 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
4240 			.rfc3686 = true,
4241 		},
4242 	},
4243 	{
4244 		.aead = {
4245 			.base = {
4246 				.cra_name = "seqiv(authenc(hmac(sha384),"
4247 					    "rfc3686(ctr(aes))))",
4248 				.cra_driver_name = "seqiv-authenc-hmac-sha384-"
4249 						   "rfc3686-ctr-aes-caam",
4250 				.cra_blocksize = 1,
4251 			},
4252 			.setkey = aead_setkey,
4253 			.setauthsize = aead_setauthsize,
4254 			.encrypt = aead_encrypt,
4255 			.decrypt = aead_decrypt,
4256 			.ivsize = CTR_RFC3686_IV_SIZE,
4257 			.maxauthsize = SHA384_DIGEST_SIZE,
4258 		},
4259 		.caam = {
4260 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4261 					   OP_ALG_AAI_CTR_MOD128,
4262 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
4263 					   OP_ALG_AAI_HMAC_PRECOMP,
4264 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
4265 			.rfc3686 = true,
4266 			.geniv = true,
4267 		},
4268 	},
4269 	{
4270 		.aead = {
4271 			.base = {
4272 				.cra_name = "authenc(hmac(sha512),"
4273 					    "rfc3686(ctr(aes)))",
4274 				.cra_driver_name = "authenc-hmac-sha512-"
4275 						   "rfc3686-ctr-aes-caam",
4276 				.cra_blocksize = 1,
4277 			},
4278 			.setkey = aead_setkey,
4279 			.setauthsize = aead_setauthsize,
4280 			.encrypt = aead_encrypt,
4281 			.decrypt = aead_decrypt,
4282 			.ivsize = CTR_RFC3686_IV_SIZE,
4283 			.maxauthsize = SHA512_DIGEST_SIZE,
4284 		},
4285 		.caam = {
4286 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4287 					   OP_ALG_AAI_CTR_MOD128,
4288 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
4289 					   OP_ALG_AAI_HMAC_PRECOMP,
4290 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
4291 			.rfc3686 = true,
4292 		},
4293 	},
4294 	{
4295 		.aead = {
4296 			.base = {
4297 				.cra_name = "seqiv(authenc(hmac(sha512),"
4298 					    "rfc3686(ctr(aes))))",
4299 				.cra_driver_name = "seqiv-authenc-hmac-sha512-"
4300 						   "rfc3686-ctr-aes-caam",
4301 				.cra_blocksize = 1,
4302 			},
4303 			.setkey = aead_setkey,
4304 			.setauthsize = aead_setauthsize,
4305 			.encrypt = aead_encrypt,
4306 			.decrypt = aead_decrypt,
4307 			.ivsize = CTR_RFC3686_IV_SIZE,
4308 			.maxauthsize = SHA512_DIGEST_SIZE,
4309 		},
4310 		.caam = {
4311 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4312 					   OP_ALG_AAI_CTR_MOD128,
4313 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
4314 					   OP_ALG_AAI_HMAC_PRECOMP,
4315 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
4316 			.rfc3686 = true,
4317 			.geniv = true,
4318 		},
4319 	},
4320 };
4321 
4322 struct caam_crypto_alg {
4323 	struct crypto_alg crypto_alg;
4324 	struct list_head entry;
4325 	struct caam_alg_entry caam;
4326 };
4327 
4328 static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
4329 {
4330 	ctx->jrdev = caam_jr_alloc();
4331 	if (IS_ERR(ctx->jrdev)) {
4332 		pr_err("Job Ring Device allocation for transform failed\n");
4333 		return PTR_ERR(ctx->jrdev);
4334 	}
4335 
4336 	/* copy descriptor header template value */
4337 	ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
4338 	ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
4339 	ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op;
4340 
4341 	return 0;
4342 }
4343 
4344 static int caam_cra_init(struct crypto_tfm *tfm)
4345 {
4346 	struct crypto_alg *alg = tfm->__crt_alg;
4347 	struct caam_crypto_alg *caam_alg =
4348 		 container_of(alg, struct caam_crypto_alg, crypto_alg);
4349 	struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
4350 
4351 	return caam_init_common(ctx, &caam_alg->caam);
4352 }
4353 
4354 static int caam_aead_init(struct crypto_aead *tfm)
4355 {
4356 	struct aead_alg *alg = crypto_aead_alg(tfm);
4357 	struct caam_aead_alg *caam_alg =
4358 		 container_of(alg, struct caam_aead_alg, aead);
4359 	struct caam_ctx *ctx = crypto_aead_ctx(tfm);
4360 
4361 	return caam_init_common(ctx, &caam_alg->caam);
4362 }
4363 
4364 static void caam_exit_common(struct caam_ctx *ctx)
4365 {
4366 	if (ctx->sh_desc_enc_dma &&
4367 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
4368 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
4369 				 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
4370 	if (ctx->sh_desc_dec_dma &&
4371 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
4372 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
4373 				 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
4374 	if (ctx->sh_desc_givenc_dma &&
4375 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
4376 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
4377 				 desc_bytes(ctx->sh_desc_givenc),
4378 				 DMA_TO_DEVICE);
4379 	if (ctx->key_dma &&
4380 	    !dma_mapping_error(ctx->jrdev, ctx->key_dma))
4381 		dma_unmap_single(ctx->jrdev, ctx->key_dma,
4382 				 ctx->enckeylen + ctx->split_key_pad_len,
4383 				 DMA_TO_DEVICE);
4384 
4385 	caam_jr_free(ctx->jrdev);
4386 }
4387 
4388 static void caam_cra_exit(struct crypto_tfm *tfm)
4389 {
4390 	caam_exit_common(crypto_tfm_ctx(tfm));
4391 }
4392 
4393 static void caam_aead_exit(struct crypto_aead *tfm)
4394 {
4395 	caam_exit_common(crypto_aead_ctx(tfm));
4396 }
4397 
4398 static void __exit caam_algapi_exit(void)
4399 {
4400 
4401 	struct caam_crypto_alg *t_alg, *n;
4402 	int i;
4403 
4404 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
4405 		struct caam_aead_alg *t_alg = driver_aeads + i;
4406 
4407 		if (t_alg->registered)
4408 			crypto_unregister_aead(&t_alg->aead);
4409 	}
4410 
4411 	if (!alg_list.next)
4412 		return;
4413 
4414 	list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
4415 		crypto_unregister_alg(&t_alg->crypto_alg);
4416 		list_del(&t_alg->entry);
4417 		kfree(t_alg);
4418 	}
4419 }
4420 
4421 static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
4422 					      *template)
4423 {
4424 	struct caam_crypto_alg *t_alg;
4425 	struct crypto_alg *alg;
4426 
4427 	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
4428 	if (!t_alg) {
4429 		pr_err("failed to allocate t_alg\n");
4430 		return ERR_PTR(-ENOMEM);
4431 	}
4432 
4433 	alg = &t_alg->crypto_alg;
4434 
4435 	snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
4436 	snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4437 		 template->driver_name);
4438 	alg->cra_module = THIS_MODULE;
4439 	alg->cra_init = caam_cra_init;
4440 	alg->cra_exit = caam_cra_exit;
4441 	alg->cra_priority = CAAM_CRA_PRIORITY;
4442 	alg->cra_blocksize = template->blocksize;
4443 	alg->cra_alignmask = 0;
4444 	alg->cra_ctxsize = sizeof(struct caam_ctx);
4445 	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
4446 			 template->type;
4447 	switch (template->type) {
4448 	case CRYPTO_ALG_TYPE_GIVCIPHER:
4449 		alg->cra_type = &crypto_givcipher_type;
4450 		alg->cra_ablkcipher = template->template_ablkcipher;
4451 		break;
4452 	case CRYPTO_ALG_TYPE_ABLKCIPHER:
4453 		alg->cra_type = &crypto_ablkcipher_type;
4454 		alg->cra_ablkcipher = template->template_ablkcipher;
4455 		break;
4456 	}
4457 
4458 	t_alg->caam.class1_alg_type = template->class1_alg_type;
4459 	t_alg->caam.class2_alg_type = template->class2_alg_type;
4460 	t_alg->caam.alg_op = template->alg_op;
4461 
4462 	return t_alg;
4463 }
4464 
4465 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
4466 {
4467 	struct aead_alg *alg = &t_alg->aead;
4468 
4469 	alg->base.cra_module = THIS_MODULE;
4470 	alg->base.cra_priority = CAAM_CRA_PRIORITY;
4471 	alg->base.cra_ctxsize = sizeof(struct caam_ctx);
4472 	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
4473 
4474 	alg->init = caam_aead_init;
4475 	alg->exit = caam_aead_exit;
4476 }
4477 
4478 static int __init caam_algapi_init(void)
4479 {
4480 	struct device_node *dev_node;
4481 	struct platform_device *pdev;
4482 	struct device *ctrldev;
4483 	struct caam_drv_private *priv;
4484 	int i = 0, err = 0;
4485 	u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
4486 	unsigned int md_limit = SHA512_DIGEST_SIZE;
4487 	bool registered = false;
4488 
4489 	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
4490 	if (!dev_node) {
4491 		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
4492 		if (!dev_node)
4493 			return -ENODEV;
4494 	}
4495 
4496 	pdev = of_find_device_by_node(dev_node);
4497 	if (!pdev) {
4498 		of_node_put(dev_node);
4499 		return -ENODEV;
4500 	}
4501 
4502 	ctrldev = &pdev->dev;
4503 	priv = dev_get_drvdata(ctrldev);
4504 	of_node_put(dev_node);
4505 
4506 	/*
4507 	 * If priv is NULL, it's probably because the caam driver wasn't
4508 	 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
4509 	 */
4510 	if (!priv)
4511 		return -ENODEV;
4512 
4513 
4514 	INIT_LIST_HEAD(&alg_list);
4515 
4516 	/*
4517 	 * Register crypto algorithms the device supports.
4518 	 * First, detect presence and attributes of DES, AES, and MD blocks.
4519 	 */
4520 	cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
4521 	cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
4522 	des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
4523 	aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
4524 	md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
4525 
4526 	/* If MD is present, limit digest size based on LP256 */
4527 	if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
4528 		md_limit = SHA256_DIGEST_SIZE;
4529 
4530 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4531 		struct caam_crypto_alg *t_alg;
4532 		struct caam_alg_template *alg = driver_algs + i;
4533 		u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
4534 
4535 		/* Skip DES algorithms if not supported by device */
4536 		if (!des_inst &&
4537 		    ((alg_sel == OP_ALG_ALGSEL_3DES) ||
4538 		     (alg_sel == OP_ALG_ALGSEL_DES)))
4539 				continue;
4540 
4541 		/* Skip AES algorithms if not supported by device */
4542 		if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
4543 				continue;
4544 
4545 		t_alg = caam_alg_alloc(alg);
4546 		if (IS_ERR(t_alg)) {
4547 			err = PTR_ERR(t_alg);
4548 			pr_warn("%s alg allocation failed\n", alg->driver_name);
4549 			continue;
4550 		}
4551 
4552 		err = crypto_register_alg(&t_alg->crypto_alg);
4553 		if (err) {
4554 			pr_warn("%s alg registration failed\n",
4555 				t_alg->crypto_alg.cra_driver_name);
4556 			kfree(t_alg);
4557 			continue;
4558 		}
4559 
4560 		list_add_tail(&t_alg->entry, &alg_list);
4561 		registered = true;
4562 	}
4563 
4564 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
4565 		struct caam_aead_alg *t_alg = driver_aeads + i;
4566 		u32 c1_alg_sel = t_alg->caam.class1_alg_type &
4567 				 OP_ALG_ALGSEL_MASK;
4568 		u32 c2_alg_sel = t_alg->caam.class2_alg_type &
4569 				 OP_ALG_ALGSEL_MASK;
4570 		u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
4571 
4572 		/* Skip DES algorithms if not supported by device */
4573 		if (!des_inst &&
4574 		    ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
4575 		     (c1_alg_sel == OP_ALG_ALGSEL_DES)))
4576 				continue;
4577 
4578 		/* Skip AES algorithms if not supported by device */
4579 		if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
4580 				continue;
4581 
4582 		/*
4583 		 * Check support for AES algorithms not available
4584 		 * on LP devices.
4585 		 */
4586 		if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
4587 			if (alg_aai == OP_ALG_AAI_GCM)
4588 				continue;
4589 
4590 		/*
4591 		 * Skip algorithms requiring message digests
4592 		 * if MD or MD size is not supported by device.
4593 		 */
4594 		if (c2_alg_sel &&
4595 		    (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
4596 				continue;
4597 
4598 		caam_aead_alg_init(t_alg);
4599 
4600 		err = crypto_register_aead(&t_alg->aead);
4601 		if (err) {
4602 			pr_warn("%s alg registration failed\n",
4603 				t_alg->aead.base.cra_driver_name);
4604 			continue;
4605 		}
4606 
4607 		t_alg->registered = true;
4608 		registered = true;
4609 	}
4610 
4611 	if (registered)
4612 		pr_info("caam algorithms registered in /proc/crypto\n");
4613 
4614 	return err;
4615 }
4616 
4617 module_init(caam_algapi_init);
4618 module_exit(caam_algapi_exit);
4619 
4620 MODULE_LICENSE("GPL");
4621 MODULE_DESCRIPTION("FSL CAAM support for crypto API");
4622 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
4623