xref: /openbmc/linux/drivers/crypto/caam/caamalg.c (revision f5b06569)
1 /*
2  * caam - Freescale FSL CAAM support for crypto API
3  *
4  * Copyright 2008-2011 Freescale Semiconductor, Inc.
5  *
6  * Based on talitos crypto API driver.
7  *
8  * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
9  *
10  * ---------------                     ---------------
11  * | JobDesc #1  |-------------------->|  ShareDesc  |
12  * | *(packet 1) |                     |   (PDB)     |
13  * ---------------      |------------->|  (hashKey)  |
14  *       .              |              | (cipherKey) |
15  *       .              |    |-------->| (operation) |
16  * ---------------      |    |         ---------------
17  * | JobDesc #2  |------|    |
18  * | *(packet 2) |           |
19  * ---------------           |
20  *       .                   |
21  *       .                   |
22  * ---------------           |
23  * | JobDesc #3  |------------
24  * | *(packet 3) |
25  * ---------------
26  *
27  * The SharedDesc never changes for a connection unless rekeyed, but
28  * each packet will likely be in a different place. So all we need
29  * to know to process the packet is where the input is, where the
30  * output goes, and what context we want to process with. Context is
31  * in the SharedDesc, packet references in the JobDesc.
32  *
33  * So, a job desc looks like:
34  *
35  * ---------------------
36  * | Header            |
37  * | ShareDesc Pointer |
38  * | SEQ_OUT_PTR       |
39  * | (output buffer)   |
40  * | (output length)   |
41  * | SEQ_IN_PTR        |
42  * | (input buffer)    |
43  * | (input length)    |
44  * ---------------------
45  */
46 
47 #include "compat.h"
48 
49 #include "regs.h"
50 #include "intern.h"
51 #include "desc_constr.h"
52 #include "jr.h"
53 #include "error.h"
54 #include "sg_sw_sec4.h"
55 #include "key_gen.h"
56 
57 /*
58  * crypto alg
59  */
60 #define CAAM_CRA_PRIORITY		3000
61 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
62 #define CAAM_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + \
63 					 CTR_RFC3686_NONCE_SIZE + \
64 					 SHA512_DIGEST_SIZE * 2)
65 /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
66 #define CAAM_MAX_IV_LENGTH		16
67 
68 #define AEAD_DESC_JOB_IO_LEN		(DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
69 #define GCM_DESC_JOB_IO_LEN		(AEAD_DESC_JOB_IO_LEN + \
70 					 CAAM_CMD_SZ * 4)
71 #define AUTHENC_DESC_JOB_IO_LEN		(AEAD_DESC_JOB_IO_LEN + \
72 					 CAAM_CMD_SZ * 5)
73 
74 /* length of descriptors text */
75 #define DESC_AEAD_BASE			(4 * CAAM_CMD_SZ)
76 #define DESC_AEAD_ENC_LEN		(DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
77 #define DESC_AEAD_DEC_LEN		(DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
78 #define DESC_AEAD_GIVENC_LEN		(DESC_AEAD_ENC_LEN + 9 * CAAM_CMD_SZ)
79 
80 /* Note: Nonce is counted in enckeylen */
81 #define DESC_AEAD_CTR_RFC3686_LEN	(4 * CAAM_CMD_SZ)
82 
83 #define DESC_AEAD_NULL_BASE		(3 * CAAM_CMD_SZ)
84 #define DESC_AEAD_NULL_ENC_LEN		(DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
85 #define DESC_AEAD_NULL_DEC_LEN		(DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
86 
87 #define DESC_GCM_BASE			(3 * CAAM_CMD_SZ)
88 #define DESC_GCM_ENC_LEN		(DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
89 #define DESC_GCM_DEC_LEN		(DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
90 
91 #define DESC_RFC4106_BASE		(3 * CAAM_CMD_SZ)
92 #define DESC_RFC4106_ENC_LEN		(DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
93 #define DESC_RFC4106_DEC_LEN		(DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
94 
95 #define DESC_RFC4543_BASE		(3 * CAAM_CMD_SZ)
96 #define DESC_RFC4543_ENC_LEN		(DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
97 #define DESC_RFC4543_DEC_LEN		(DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
98 
99 #define DESC_ABLKCIPHER_BASE		(3 * CAAM_CMD_SZ)
100 #define DESC_ABLKCIPHER_ENC_LEN		(DESC_ABLKCIPHER_BASE + \
101 					 20 * CAAM_CMD_SZ)
102 #define DESC_ABLKCIPHER_DEC_LEN		(DESC_ABLKCIPHER_BASE + \
103 					 15 * CAAM_CMD_SZ)
104 
105 #define DESC_MAX_USED_BYTES		(CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
106 #define DESC_MAX_USED_LEN		(DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
107 
108 #ifdef DEBUG
109 /* for print_hex_dumps with line references */
110 #define debug(format, arg...) printk(format, arg)
111 #else
112 #define debug(format, arg...)
113 #endif
114 static struct list_head alg_list;
115 
116 struct caam_alg_entry {
117 	int class1_alg_type;
118 	int class2_alg_type;
119 	int alg_op;
120 	bool rfc3686;
121 	bool geniv;
122 };
123 
124 struct caam_aead_alg {
125 	struct aead_alg aead;
126 	struct caam_alg_entry caam;
127 	bool registered;
128 };
129 
130 /* Set DK bit in class 1 operation if shared */
131 static inline void append_dec_op1(u32 *desc, u32 type)
132 {
133 	u32 *jump_cmd, *uncond_jump_cmd;
134 
135 	/* DK bit is valid only for AES */
136 	if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
137 		append_operation(desc, type | OP_ALG_AS_INITFINAL |
138 				 OP_ALG_DECRYPT);
139 		return;
140 	}
141 
142 	jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
143 	append_operation(desc, type | OP_ALG_AS_INITFINAL |
144 			 OP_ALG_DECRYPT);
145 	uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
146 	set_jump_tgt_here(desc, jump_cmd);
147 	append_operation(desc, type | OP_ALG_AS_INITFINAL |
148 			 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
149 	set_jump_tgt_here(desc, uncond_jump_cmd);
150 }
151 
152 /*
153  * For aead functions, read payload and write payload,
154  * both of which are specified in req->src and req->dst
155  */
156 static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
157 {
158 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
159 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
160 			     KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
161 }
162 
163 /*
164  * For ablkcipher encrypt and decrypt, read from req->src and
165  * write to req->dst
166  */
167 static inline void ablkcipher_append_src_dst(u32 *desc)
168 {
169 	append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
170 	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
171 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
172 			     KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
173 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
174 }
175 
176 /*
177  * per-session context
178  */
179 struct caam_ctx {
180 	struct device *jrdev;
181 	u32 sh_desc_enc[DESC_MAX_USED_LEN];
182 	u32 sh_desc_dec[DESC_MAX_USED_LEN];
183 	u32 sh_desc_givenc[DESC_MAX_USED_LEN];
184 	dma_addr_t sh_desc_enc_dma;
185 	dma_addr_t sh_desc_dec_dma;
186 	dma_addr_t sh_desc_givenc_dma;
187 	u32 class1_alg_type;
188 	u32 class2_alg_type;
189 	u32 alg_op;
190 	u8 key[CAAM_MAX_KEY_SIZE];
191 	dma_addr_t key_dma;
192 	unsigned int enckeylen;
193 	unsigned int split_key_len;
194 	unsigned int split_key_pad_len;
195 	unsigned int authsize;
196 };
197 
198 static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
199 			    int keys_fit_inline, bool is_rfc3686)
200 {
201 	u32 *nonce;
202 	unsigned int enckeylen = ctx->enckeylen;
203 
204 	/*
205 	 * RFC3686 specific:
206 	 *	| ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
207 	 *	| enckeylen = encryption key size + nonce size
208 	 */
209 	if (is_rfc3686)
210 		enckeylen -= CTR_RFC3686_NONCE_SIZE;
211 
212 	if (keys_fit_inline) {
213 		append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
214 				  ctx->split_key_len, CLASS_2 |
215 				  KEY_DEST_MDHA_SPLIT | KEY_ENC);
216 		append_key_as_imm(desc, (void *)ctx->key +
217 				  ctx->split_key_pad_len, enckeylen,
218 				  enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
219 	} else {
220 		append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
221 			   KEY_DEST_MDHA_SPLIT | KEY_ENC);
222 		append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
223 			   enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
224 	}
225 
226 	/* Load Counter into CONTEXT1 reg */
227 	if (is_rfc3686) {
228 		nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
229 			       enckeylen);
230 		append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
231 				    LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
232 		append_move(desc,
233 			    MOVE_SRC_OUTFIFO |
234 			    MOVE_DEST_CLASS1CTX |
235 			    (16 << MOVE_OFFSET_SHIFT) |
236 			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
237 	}
238 }
239 
240 static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
241 				  int keys_fit_inline, bool is_rfc3686)
242 {
243 	u32 *key_jump_cmd;
244 
245 	/* Note: Context registers are saved. */
246 	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
247 
248 	/* Skip if already shared */
249 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
250 				   JUMP_COND_SHRD);
251 
252 	append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
253 
254 	set_jump_tgt_here(desc, key_jump_cmd);
255 }
256 
257 static int aead_null_set_sh_desc(struct crypto_aead *aead)
258 {
259 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
260 	struct device *jrdev = ctx->jrdev;
261 	bool keys_fit_inline = false;
262 	u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
263 	u32 *desc;
264 
265 	/*
266 	 * Job Descriptor and Shared Descriptors
267 	 * must all fit into the 64-word Descriptor h/w Buffer
268 	 */
269 	if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN +
270 	    ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
271 		keys_fit_inline = true;
272 
273 	/* aead_encrypt shared descriptor */
274 	desc = ctx->sh_desc_enc;
275 
276 	init_sh_desc(desc, HDR_SHARE_SERIAL);
277 
278 	/* Skip if already shared */
279 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
280 				   JUMP_COND_SHRD);
281 	if (keys_fit_inline)
282 		append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
283 				  ctx->split_key_len, CLASS_2 |
284 				  KEY_DEST_MDHA_SPLIT | KEY_ENC);
285 	else
286 		append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
287 			   KEY_DEST_MDHA_SPLIT | KEY_ENC);
288 	set_jump_tgt_here(desc, key_jump_cmd);
289 
290 	/* assoclen + cryptlen = seqinlen */
291 	append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
292 
293 	/* Prepare to read and write cryptlen + assoclen bytes */
294 	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
295 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
296 
297 	/*
298 	 * MOVE_LEN opcode is not available in all SEC HW revisions,
299 	 * thus need to do some magic, i.e. self-patch the descriptor
300 	 * buffer.
301 	 */
302 	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
303 				    MOVE_DEST_MATH3 |
304 				    (0x6 << MOVE_LEN_SHIFT));
305 	write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
306 				     MOVE_DEST_DESCBUF |
307 				     MOVE_WAITCOMP |
308 				     (0x8 << MOVE_LEN_SHIFT));
309 
310 	/* Class 2 operation */
311 	append_operation(desc, ctx->class2_alg_type |
312 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
313 
314 	/* Read and write cryptlen bytes */
315 	aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
316 
317 	set_move_tgt_here(desc, read_move_cmd);
318 	set_move_tgt_here(desc, write_move_cmd);
319 	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
320 	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
321 		    MOVE_AUX_LS);
322 
323 	/* Write ICV */
324 	append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
325 			 LDST_SRCDST_BYTE_CONTEXT);
326 
327 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
328 					      desc_bytes(desc),
329 					      DMA_TO_DEVICE);
330 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
331 		dev_err(jrdev, "unable to map shared descriptor\n");
332 		return -ENOMEM;
333 	}
334 #ifdef DEBUG
335 	print_hex_dump(KERN_ERR,
336 		       "aead null enc shdesc@"__stringify(__LINE__)": ",
337 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
338 		       desc_bytes(desc), 1);
339 #endif
340 
341 	/*
342 	 * Job Descriptor and Shared Descriptors
343 	 * must all fit into the 64-word Descriptor h/w Buffer
344 	 */
345 	keys_fit_inline = false;
346 	if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
347 	    ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
348 		keys_fit_inline = true;
349 
350 	desc = ctx->sh_desc_dec;
351 
352 	/* aead_decrypt shared descriptor */
353 	init_sh_desc(desc, HDR_SHARE_SERIAL);
354 
355 	/* Skip if already shared */
356 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
357 				   JUMP_COND_SHRD);
358 	if (keys_fit_inline)
359 		append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
360 				  ctx->split_key_len, CLASS_2 |
361 				  KEY_DEST_MDHA_SPLIT | KEY_ENC);
362 	else
363 		append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
364 			   KEY_DEST_MDHA_SPLIT | KEY_ENC);
365 	set_jump_tgt_here(desc, key_jump_cmd);
366 
367 	/* Class 2 operation */
368 	append_operation(desc, ctx->class2_alg_type |
369 			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
370 
371 	/* assoclen + cryptlen = seqoutlen */
372 	append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
373 
374 	/* Prepare to read and write cryptlen + assoclen bytes */
375 	append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
376 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
377 
378 	/*
379 	 * MOVE_LEN opcode is not available in all SEC HW revisions,
380 	 * thus need to do some magic, i.e. self-patch the descriptor
381 	 * buffer.
382 	 */
383 	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
384 				    MOVE_DEST_MATH2 |
385 				    (0x6 << MOVE_LEN_SHIFT));
386 	write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
387 				     MOVE_DEST_DESCBUF |
388 				     MOVE_WAITCOMP |
389 				     (0x8 << MOVE_LEN_SHIFT));
390 
391 	/* Read and write cryptlen bytes */
392 	aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
393 
394 	/*
395 	 * Insert a NOP here, since we need at least 4 instructions between
396 	 * code patching the descriptor buffer and the location being patched.
397 	 */
398 	jump_cmd = append_jump(desc, JUMP_TEST_ALL);
399 	set_jump_tgt_here(desc, jump_cmd);
400 
401 	set_move_tgt_here(desc, read_move_cmd);
402 	set_move_tgt_here(desc, write_move_cmd);
403 	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
404 	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
405 		    MOVE_AUX_LS);
406 	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
407 
408 	/* Load ICV */
409 	append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
410 			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
411 
412 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
413 					      desc_bytes(desc),
414 					      DMA_TO_DEVICE);
415 	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
416 		dev_err(jrdev, "unable to map shared descriptor\n");
417 		return -ENOMEM;
418 	}
419 #ifdef DEBUG
420 	print_hex_dump(KERN_ERR,
421 		       "aead null dec shdesc@"__stringify(__LINE__)": ",
422 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
423 		       desc_bytes(desc), 1);
424 #endif
425 
426 	return 0;
427 }
428 
429 static int aead_set_sh_desc(struct crypto_aead *aead)
430 {
431 	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
432 						 struct caam_aead_alg, aead);
433 	unsigned int ivsize = crypto_aead_ivsize(aead);
434 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
435 	struct device *jrdev = ctx->jrdev;
436 	bool keys_fit_inline;
437 	u32 geniv, moveiv;
438 	u32 ctx1_iv_off = 0;
439 	u32 *desc;
440 	const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
441 			       OP_ALG_AAI_CTR_MOD128);
442 	const bool is_rfc3686 = alg->caam.rfc3686;
443 
444 	if (!ctx->authsize)
445 		return 0;
446 
447 	/* NULL encryption / decryption */
448 	if (!ctx->enckeylen)
449 		return aead_null_set_sh_desc(aead);
450 
451 	/*
452 	 * AES-CTR needs to load IV in CONTEXT1 reg
453 	 * at an offset of 128bits (16bytes)
454 	 * CONTEXT1[255:128] = IV
455 	 */
456 	if (ctr_mode)
457 		ctx1_iv_off = 16;
458 
459 	/*
460 	 * RFC3686 specific:
461 	 *	CONTEXT1[255:128] = {NONCE, IV, COUNTER}
462 	 */
463 	if (is_rfc3686)
464 		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
465 
466 	if (alg->caam.geniv)
467 		goto skip_enc;
468 
469 	/*
470 	 * Job Descriptor and Shared Descriptors
471 	 * must all fit into the 64-word Descriptor h/w Buffer
472 	 */
473 	keys_fit_inline = false;
474 	if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
475 	    ctx->split_key_pad_len + ctx->enckeylen +
476 	    (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
477 	    CAAM_DESC_BYTES_MAX)
478 		keys_fit_inline = true;
479 
480 	/* aead_encrypt shared descriptor */
481 	desc = ctx->sh_desc_enc;
482 
483 	/* Note: Context registers are saved. */
484 	init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
485 
486 	/* Class 2 operation */
487 	append_operation(desc, ctx->class2_alg_type |
488 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
489 
490 	/* Read and write assoclen bytes */
491 	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
492 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
493 
494 	/* Skip assoc data */
495 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
496 
497 	/* read assoc before reading payload */
498 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
499 				      FIFOLDST_VLF);
500 
501 	/* Load Counter into CONTEXT1 reg */
502 	if (is_rfc3686)
503 		append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
504 				    LDST_CLASS_1_CCB |
505 				    LDST_SRCDST_BYTE_CONTEXT |
506 				    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
507 				     LDST_OFFSET_SHIFT));
508 
509 	/* Class 1 operation */
510 	append_operation(desc, ctx->class1_alg_type |
511 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
512 
513 	/* Read and write cryptlen bytes */
514 	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
515 	append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
516 	aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
517 
518 	/* Write ICV */
519 	append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
520 			 LDST_SRCDST_BYTE_CONTEXT);
521 
522 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
523 					      desc_bytes(desc),
524 					      DMA_TO_DEVICE);
525 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
526 		dev_err(jrdev, "unable to map shared descriptor\n");
527 		return -ENOMEM;
528 	}
529 #ifdef DEBUG
530 	print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
531 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
532 		       desc_bytes(desc), 1);
533 #endif
534 
535 skip_enc:
536 	/*
537 	 * Job Descriptor and Shared Descriptors
538 	 * must all fit into the 64-word Descriptor h/w Buffer
539 	 */
540 	keys_fit_inline = false;
541 	if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN +
542 	    ctx->split_key_pad_len + ctx->enckeylen +
543 	    (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
544 	    CAAM_DESC_BYTES_MAX)
545 		keys_fit_inline = true;
546 
547 	/* aead_decrypt shared descriptor */
548 	desc = ctx->sh_desc_dec;
549 
550 	/* Note: Context registers are saved. */
551 	init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
552 
553 	/* Class 2 operation */
554 	append_operation(desc, ctx->class2_alg_type |
555 			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
556 
557 	/* Read and write assoclen bytes */
558 	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
559 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
560 
561 	/* Skip assoc data */
562 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
563 
564 	/* read assoc before reading payload */
565 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
566 			     KEY_VLF);
567 
568 	/* Load Counter into CONTEXT1 reg */
569 	if (is_rfc3686)
570 		append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
571 				    LDST_CLASS_1_CCB |
572 				    LDST_SRCDST_BYTE_CONTEXT |
573 				    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
574 				     LDST_OFFSET_SHIFT));
575 
576 	/* Choose operation */
577 	if (ctr_mode)
578 		append_operation(desc, ctx->class1_alg_type |
579 				 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
580 	else
581 		append_dec_op1(desc, ctx->class1_alg_type);
582 
583 	/* Read and write cryptlen bytes */
584 	append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
585 	append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
586 	aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
587 
588 	/* Load ICV */
589 	append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
590 			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
591 
592 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
593 					      desc_bytes(desc),
594 					      DMA_TO_DEVICE);
595 	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
596 		dev_err(jrdev, "unable to map shared descriptor\n");
597 		return -ENOMEM;
598 	}
599 #ifdef DEBUG
600 	print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
601 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
602 		       desc_bytes(desc), 1);
603 #endif
604 
605 	if (!alg->caam.geniv)
606 		goto skip_givenc;
607 
608 	/*
609 	 * Job Descriptor and Shared Descriptors
610 	 * must all fit into the 64-word Descriptor h/w Buffer
611 	 */
612 	keys_fit_inline = false;
613 	if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
614 	    ctx->split_key_pad_len + ctx->enckeylen +
615 	    (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
616 	    CAAM_DESC_BYTES_MAX)
617 		keys_fit_inline = true;
618 
619 	/* aead_givencrypt shared descriptor */
620 	desc = ctx->sh_desc_enc;
621 
622 	/* Note: Context registers are saved. */
623 	init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
624 
625 	if (is_rfc3686)
626 		goto copy_iv;
627 
628 	/* Generate IV */
629 	geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
630 		NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
631 		NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
632 	append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
633 			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
634 	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
635 	append_move(desc, MOVE_WAITCOMP |
636 		    MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
637 		    (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
638 		    (ivsize << MOVE_LEN_SHIFT));
639 	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
640 
641 copy_iv:
642 	/* Copy IV to class 1 context */
643 	append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
644 		    (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
645 		    (ivsize << MOVE_LEN_SHIFT));
646 
647 	/* Return to encryption */
648 	append_operation(desc, ctx->class2_alg_type |
649 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
650 
651 	/* Read and write assoclen bytes */
652 	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
653 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
654 
655 	/* ivsize + cryptlen = seqoutlen - authsize */
656 	append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
657 
658 	/* Skip assoc data */
659 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
660 
661 	/* read assoc before reading payload */
662 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
663 			     KEY_VLF);
664 
665 	/* Copy iv from outfifo to class 2 fifo */
666 	moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
667 		 NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
668 	append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
669 			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
670 	append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
671 			    LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
672 
673 	/* Load Counter into CONTEXT1 reg */
674 	if (is_rfc3686)
675 		append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
676 				    LDST_CLASS_1_CCB |
677 				    LDST_SRCDST_BYTE_CONTEXT |
678 				    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
679 				     LDST_OFFSET_SHIFT));
680 
681 	/* Class 1 operation */
682 	append_operation(desc, ctx->class1_alg_type |
683 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
684 
685 	/* Will write ivsize + cryptlen */
686 	append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
687 
688 	/* Not need to reload iv */
689 	append_seq_fifo_load(desc, ivsize,
690 			     FIFOLD_CLASS_SKIP);
691 
692 	/* Will read cryptlen */
693 	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
694 	aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
695 
696 	/* Write ICV */
697 	append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
698 			 LDST_SRCDST_BYTE_CONTEXT);
699 
700 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
701 					      desc_bytes(desc),
702 					      DMA_TO_DEVICE);
703 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
704 		dev_err(jrdev, "unable to map shared descriptor\n");
705 		return -ENOMEM;
706 	}
707 #ifdef DEBUG
708 	print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
709 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
710 		       desc_bytes(desc), 1);
711 #endif
712 
713 skip_givenc:
714 	return 0;
715 }
716 
717 static int aead_setauthsize(struct crypto_aead *authenc,
718 				    unsigned int authsize)
719 {
720 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
721 
722 	ctx->authsize = authsize;
723 	aead_set_sh_desc(authenc);
724 
725 	return 0;
726 }
727 
728 static int gcm_set_sh_desc(struct crypto_aead *aead)
729 {
730 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
731 	struct device *jrdev = ctx->jrdev;
732 	bool keys_fit_inline = false;
733 	u32 *key_jump_cmd, *zero_payload_jump_cmd,
734 	    *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
735 	u32 *desc;
736 
737 	if (!ctx->enckeylen || !ctx->authsize)
738 		return 0;
739 
740 	/*
741 	 * AES GCM encrypt shared descriptor
742 	 * Job Descriptor and Shared Descriptor
743 	 * must fit into the 64-word Descriptor h/w Buffer
744 	 */
745 	if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN +
746 	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
747 		keys_fit_inline = true;
748 
749 	desc = ctx->sh_desc_enc;
750 
751 	init_sh_desc(desc, HDR_SHARE_SERIAL);
752 
753 	/* skip key loading if they are loaded due to sharing */
754 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
755 				   JUMP_COND_SHRD | JUMP_COND_SELF);
756 	if (keys_fit_inline)
757 		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
758 				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
759 	else
760 		append_key(desc, ctx->key_dma, ctx->enckeylen,
761 			   CLASS_1 | KEY_DEST_CLASS_REG);
762 	set_jump_tgt_here(desc, key_jump_cmd);
763 
764 	/* class 1 operation */
765 	append_operation(desc, ctx->class1_alg_type |
766 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
767 
768 	/* if assoclen + cryptlen is ZERO, skip to ICV write */
769 	append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
770 	zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
771 						 JUMP_COND_MATH_Z);
772 
773 	/* if assoclen is ZERO, skip reading the assoc data */
774 	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
775 	zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
776 						 JUMP_COND_MATH_Z);
777 
778 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
779 
780 	/* skip assoc data */
781 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
782 
783 	/* cryptlen = seqinlen - assoclen */
784 	append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
785 
786 	/* if cryptlen is ZERO jump to zero-payload commands */
787 	zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
788 					    JUMP_COND_MATH_Z);
789 
790 	/* read assoc data */
791 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
792 			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
793 	set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
794 
795 	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
796 
797 	/* write encrypted data */
798 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
799 
800 	/* read payload data */
801 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
802 			     FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
803 
804 	/* jump the zero-payload commands */
805 	append_jump(desc, JUMP_TEST_ALL | 2);
806 
807 	/* zero-payload commands */
808 	set_jump_tgt_here(desc, zero_payload_jump_cmd);
809 
810 	/* read assoc data */
811 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
812 			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
813 
814 	/* There is no input data */
815 	set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
816 
817 	/* write ICV */
818 	append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
819 			 LDST_SRCDST_BYTE_CONTEXT);
820 
821 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
822 					      desc_bytes(desc),
823 					      DMA_TO_DEVICE);
824 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
825 		dev_err(jrdev, "unable to map shared descriptor\n");
826 		return -ENOMEM;
827 	}
828 #ifdef DEBUG
829 	print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
830 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
831 		       desc_bytes(desc), 1);
832 #endif
833 
834 	/*
835 	 * Job Descriptor and Shared Descriptors
836 	 * must all fit into the 64-word Descriptor h/w Buffer
837 	 */
838 	keys_fit_inline = false;
839 	if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN +
840 	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
841 		keys_fit_inline = true;
842 
843 	desc = ctx->sh_desc_dec;
844 
845 	init_sh_desc(desc, HDR_SHARE_SERIAL);
846 
847 	/* skip key loading if they are loaded due to sharing */
848 	key_jump_cmd = append_jump(desc, JUMP_JSL |
849 				   JUMP_TEST_ALL | JUMP_COND_SHRD |
850 				   JUMP_COND_SELF);
851 	if (keys_fit_inline)
852 		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
853 				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
854 	else
855 		append_key(desc, ctx->key_dma, ctx->enckeylen,
856 			   CLASS_1 | KEY_DEST_CLASS_REG);
857 	set_jump_tgt_here(desc, key_jump_cmd);
858 
859 	/* class 1 operation */
860 	append_operation(desc, ctx->class1_alg_type |
861 			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
862 
863 	/* if assoclen is ZERO, skip reading the assoc data */
864 	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
865 	zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
866 						 JUMP_COND_MATH_Z);
867 
868 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
869 
870 	/* skip assoc data */
871 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
872 
873 	/* read assoc data */
874 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
875 			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
876 
877 	set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
878 
879 	/* cryptlen = seqoutlen - assoclen */
880 	append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
881 
882 	/* jump to zero-payload command if cryptlen is zero */
883 	zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
884 					    JUMP_COND_MATH_Z);
885 
886 	append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
887 
888 	/* store encrypted data */
889 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
890 
891 	/* read payload data */
892 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
893 			     FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
894 
895 	/* zero-payload command */
896 	set_jump_tgt_here(desc, zero_payload_jump_cmd);
897 
898 	/* read ICV */
899 	append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
900 			     FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
901 
902 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
903 					      desc_bytes(desc),
904 					      DMA_TO_DEVICE);
905 	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
906 		dev_err(jrdev, "unable to map shared descriptor\n");
907 		return -ENOMEM;
908 	}
909 #ifdef DEBUG
910 	print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
911 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
912 		       desc_bytes(desc), 1);
913 #endif
914 
915 	return 0;
916 }
917 
918 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
919 {
920 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
921 
922 	ctx->authsize = authsize;
923 	gcm_set_sh_desc(authenc);
924 
925 	return 0;
926 }
927 
928 static int rfc4106_set_sh_desc(struct crypto_aead *aead)
929 {
930 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
931 	struct device *jrdev = ctx->jrdev;
932 	bool keys_fit_inline = false;
933 	u32 *key_jump_cmd;
934 	u32 *desc;
935 
936 	if (!ctx->enckeylen || !ctx->authsize)
937 		return 0;
938 
939 	/*
940 	 * RFC4106 encrypt shared descriptor
941 	 * Job Descriptor and Shared Descriptor
942 	 * must fit into the 64-word Descriptor h/w Buffer
943 	 */
944 	if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN +
945 	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
946 		keys_fit_inline = true;
947 
948 	desc = ctx->sh_desc_enc;
949 
950 	init_sh_desc(desc, HDR_SHARE_SERIAL);
951 
952 	/* Skip key loading if it is loaded due to sharing */
953 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
954 				   JUMP_COND_SHRD);
955 	if (keys_fit_inline)
956 		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
957 				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
958 	else
959 		append_key(desc, ctx->key_dma, ctx->enckeylen,
960 			   CLASS_1 | KEY_DEST_CLASS_REG);
961 	set_jump_tgt_here(desc, key_jump_cmd);
962 
963 	/* Class 1 operation */
964 	append_operation(desc, ctx->class1_alg_type |
965 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
966 
967 	append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
968 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
969 
970 	/* Read assoc data */
971 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
972 			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
973 
974 	/* Skip IV */
975 	append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
976 
977 	/* Will read cryptlen bytes */
978 	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
979 
980 	/* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
981 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
982 
983 	/* Skip assoc data */
984 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
985 
986 	/* cryptlen = seqoutlen - assoclen */
987 	append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
988 
989 	/* Write encrypted data */
990 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
991 
992 	/* Read payload data */
993 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
994 			     FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
995 
996 	/* Write ICV */
997 	append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
998 			 LDST_SRCDST_BYTE_CONTEXT);
999 
1000 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1001 					      desc_bytes(desc),
1002 					      DMA_TO_DEVICE);
1003 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1004 		dev_err(jrdev, "unable to map shared descriptor\n");
1005 		return -ENOMEM;
1006 	}
1007 #ifdef DEBUG
1008 	print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
1009 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1010 		       desc_bytes(desc), 1);
1011 #endif
1012 
1013 	/*
1014 	 * Job Descriptor and Shared Descriptors
1015 	 * must all fit into the 64-word Descriptor h/w Buffer
1016 	 */
1017 	keys_fit_inline = false;
1018 	if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
1019 	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1020 		keys_fit_inline = true;
1021 
1022 	desc = ctx->sh_desc_dec;
1023 
1024 	init_sh_desc(desc, HDR_SHARE_SERIAL);
1025 
1026 	/* Skip key loading if it is loaded due to sharing */
1027 	key_jump_cmd = append_jump(desc, JUMP_JSL |
1028 				   JUMP_TEST_ALL | JUMP_COND_SHRD);
1029 	if (keys_fit_inline)
1030 		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1031 				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1032 	else
1033 		append_key(desc, ctx->key_dma, ctx->enckeylen,
1034 			   CLASS_1 | KEY_DEST_CLASS_REG);
1035 	set_jump_tgt_here(desc, key_jump_cmd);
1036 
1037 	/* Class 1 operation */
1038 	append_operation(desc, ctx->class1_alg_type |
1039 			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
1040 
1041 	append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
1042 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
1043 
1044 	/* Read assoc data */
1045 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1046 			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
1047 
1048 	/* Skip IV */
1049 	append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
1050 
1051 	/* Will read cryptlen bytes */
1052 	append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
1053 
1054 	/* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
1055 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
1056 
1057 	/* Skip assoc data */
1058 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
1059 
1060 	/* Will write cryptlen bytes */
1061 	append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1062 
1063 	/* Store payload data */
1064 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1065 
1066 	/* Read encrypted data */
1067 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1068 			     FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
1069 
1070 	/* Read ICV */
1071 	append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1072 			     FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1073 
1074 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1075 					      desc_bytes(desc),
1076 					      DMA_TO_DEVICE);
1077 	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1078 		dev_err(jrdev, "unable to map shared descriptor\n");
1079 		return -ENOMEM;
1080 	}
1081 #ifdef DEBUG
1082 	print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
1083 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1084 		       desc_bytes(desc), 1);
1085 #endif
1086 
1087 	return 0;
1088 }
1089 
1090 static int rfc4106_setauthsize(struct crypto_aead *authenc,
1091 			       unsigned int authsize)
1092 {
1093 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1094 
1095 	ctx->authsize = authsize;
1096 	rfc4106_set_sh_desc(authenc);
1097 
1098 	return 0;
1099 }
1100 
1101 static int rfc4543_set_sh_desc(struct crypto_aead *aead)
1102 {
1103 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1104 	struct device *jrdev = ctx->jrdev;
1105 	bool keys_fit_inline = false;
1106 	u32 *key_jump_cmd;
1107 	u32 *read_move_cmd, *write_move_cmd;
1108 	u32 *desc;
1109 
1110 	if (!ctx->enckeylen || !ctx->authsize)
1111 		return 0;
1112 
1113 	/*
1114 	 * RFC4543 encrypt shared descriptor
1115 	 * Job Descriptor and Shared Descriptor
1116 	 * must fit into the 64-word Descriptor h/w Buffer
1117 	 */
1118 	if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN +
1119 	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1120 		keys_fit_inline = true;
1121 
1122 	desc = ctx->sh_desc_enc;
1123 
1124 	init_sh_desc(desc, HDR_SHARE_SERIAL);
1125 
1126 	/* Skip key loading if it is loaded due to sharing */
1127 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1128 				   JUMP_COND_SHRD);
1129 	if (keys_fit_inline)
1130 		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1131 				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1132 	else
1133 		append_key(desc, ctx->key_dma, ctx->enckeylen,
1134 			   CLASS_1 | KEY_DEST_CLASS_REG);
1135 	set_jump_tgt_here(desc, key_jump_cmd);
1136 
1137 	/* Class 1 operation */
1138 	append_operation(desc, ctx->class1_alg_type |
1139 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1140 
1141 	/* assoclen + cryptlen = seqinlen */
1142 	append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
1143 
1144 	/*
1145 	 * MOVE_LEN opcode is not available in all SEC HW revisions,
1146 	 * thus need to do some magic, i.e. self-patch the descriptor
1147 	 * buffer.
1148 	 */
1149 	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1150 				    (0x6 << MOVE_LEN_SHIFT));
1151 	write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1152 				     (0x8 << MOVE_LEN_SHIFT));
1153 
1154 	/* Will read assoclen + cryptlen bytes */
1155 	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1156 
1157 	/* Will write assoclen + cryptlen bytes */
1158 	append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1159 
1160 	/* Read and write assoclen + cryptlen bytes */
1161 	aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
1162 
1163 	set_move_tgt_here(desc, read_move_cmd);
1164 	set_move_tgt_here(desc, write_move_cmd);
1165 	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1166 	/* Move payload data to OFIFO */
1167 	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1168 
1169 	/* Write ICV */
1170 	append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1171 			 LDST_SRCDST_BYTE_CONTEXT);
1172 
1173 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1174 					      desc_bytes(desc),
1175 					      DMA_TO_DEVICE);
1176 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1177 		dev_err(jrdev, "unable to map shared descriptor\n");
1178 		return -ENOMEM;
1179 	}
1180 #ifdef DEBUG
1181 	print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
1182 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1183 		       desc_bytes(desc), 1);
1184 #endif
1185 
1186 	/*
1187 	 * Job Descriptor and Shared Descriptors
1188 	 * must all fit into the 64-word Descriptor h/w Buffer
1189 	 */
1190 	keys_fit_inline = false;
1191 	if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN +
1192 	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1193 		keys_fit_inline = true;
1194 
1195 	desc = ctx->sh_desc_dec;
1196 
1197 	init_sh_desc(desc, HDR_SHARE_SERIAL);
1198 
1199 	/* Skip key loading if it is loaded due to sharing */
1200 	key_jump_cmd = append_jump(desc, JUMP_JSL |
1201 				   JUMP_TEST_ALL | JUMP_COND_SHRD);
1202 	if (keys_fit_inline)
1203 		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1204 				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1205 	else
1206 		append_key(desc, ctx->key_dma, ctx->enckeylen,
1207 			   CLASS_1 | KEY_DEST_CLASS_REG);
1208 	set_jump_tgt_here(desc, key_jump_cmd);
1209 
1210 	/* Class 1 operation */
1211 	append_operation(desc, ctx->class1_alg_type |
1212 			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
1213 
1214 	/* assoclen + cryptlen = seqoutlen */
1215 	append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1216 
1217 	/*
1218 	 * MOVE_LEN opcode is not available in all SEC HW revisions,
1219 	 * thus need to do some magic, i.e. self-patch the descriptor
1220 	 * buffer.
1221 	 */
1222 	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1223 				    (0x6 << MOVE_LEN_SHIFT));
1224 	write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1225 				     (0x8 << MOVE_LEN_SHIFT));
1226 
1227 	/* Will read assoclen + cryptlen bytes */
1228 	append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1229 
1230 	/* Will write assoclen + cryptlen bytes */
1231 	append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1232 
1233 	/* Store payload data */
1234 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1235 
1236 	/* In-snoop assoclen + cryptlen data */
1237 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
1238 			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
1239 
1240 	set_move_tgt_here(desc, read_move_cmd);
1241 	set_move_tgt_here(desc, write_move_cmd);
1242 	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1243 	/* Move payload data to OFIFO */
1244 	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1245 	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1246 
1247 	/* Read ICV */
1248 	append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1249 			     FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1250 
1251 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1252 					      desc_bytes(desc),
1253 					      DMA_TO_DEVICE);
1254 	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1255 		dev_err(jrdev, "unable to map shared descriptor\n");
1256 		return -ENOMEM;
1257 	}
1258 #ifdef DEBUG
1259 	print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
1260 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1261 		       desc_bytes(desc), 1);
1262 #endif
1263 
1264 	return 0;
1265 }
1266 
1267 static int rfc4543_setauthsize(struct crypto_aead *authenc,
1268 			       unsigned int authsize)
1269 {
1270 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1271 
1272 	ctx->authsize = authsize;
1273 	rfc4543_set_sh_desc(authenc);
1274 
1275 	return 0;
1276 }
1277 
1278 static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
1279 			      u32 authkeylen)
1280 {
1281 	return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
1282 			       ctx->split_key_pad_len, key_in, authkeylen,
1283 			       ctx->alg_op);
1284 }
1285 
1286 static int aead_setkey(struct crypto_aead *aead,
1287 			       const u8 *key, unsigned int keylen)
1288 {
1289 	/* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
1290 	static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
1291 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1292 	struct device *jrdev = ctx->jrdev;
1293 	struct crypto_authenc_keys keys;
1294 	int ret = 0;
1295 
1296 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1297 		goto badkey;
1298 
1299 	/* Pick class 2 key length from algorithm submask */
1300 	ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1301 				      OP_ALG_ALGSEL_SHIFT] * 2;
1302 	ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
1303 
1304 	if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
1305 		goto badkey;
1306 
1307 #ifdef DEBUG
1308 	printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
1309 	       keys.authkeylen + keys.enckeylen, keys.enckeylen,
1310 	       keys.authkeylen);
1311 	printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
1312 	       ctx->split_key_len, ctx->split_key_pad_len);
1313 	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1314 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1315 #endif
1316 
1317 	ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
1318 	if (ret) {
1319 		goto badkey;
1320 	}
1321 
1322 	/* postpend encryption key to auth split key */
1323 	memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
1324 
1325 	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
1326 				      keys.enckeylen, DMA_TO_DEVICE);
1327 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
1328 		dev_err(jrdev, "unable to map key i/o memory\n");
1329 		return -ENOMEM;
1330 	}
1331 #ifdef DEBUG
1332 	print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
1333 		       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
1334 		       ctx->split_key_pad_len + keys.enckeylen, 1);
1335 #endif
1336 
1337 	ctx->enckeylen = keys.enckeylen;
1338 
1339 	ret = aead_set_sh_desc(aead);
1340 	if (ret) {
1341 		dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
1342 				 keys.enckeylen, DMA_TO_DEVICE);
1343 	}
1344 
1345 	return ret;
1346 badkey:
1347 	crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
1348 	return -EINVAL;
1349 }
1350 
1351 static int gcm_setkey(struct crypto_aead *aead,
1352 		      const u8 *key, unsigned int keylen)
1353 {
1354 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1355 	struct device *jrdev = ctx->jrdev;
1356 	int ret = 0;
1357 
1358 #ifdef DEBUG
1359 	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1360 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1361 #endif
1362 
1363 	memcpy(ctx->key, key, keylen);
1364 	ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1365 				      DMA_TO_DEVICE);
1366 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
1367 		dev_err(jrdev, "unable to map key i/o memory\n");
1368 		return -ENOMEM;
1369 	}
1370 	ctx->enckeylen = keylen;
1371 
1372 	ret = gcm_set_sh_desc(aead);
1373 	if (ret) {
1374 		dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1375 				 DMA_TO_DEVICE);
1376 	}
1377 
1378 	return ret;
1379 }
1380 
1381 static int rfc4106_setkey(struct crypto_aead *aead,
1382 			  const u8 *key, unsigned int keylen)
1383 {
1384 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1385 	struct device *jrdev = ctx->jrdev;
1386 	int ret = 0;
1387 
1388 	if (keylen < 4)
1389 		return -EINVAL;
1390 
1391 #ifdef DEBUG
1392 	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1393 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1394 #endif
1395 
1396 	memcpy(ctx->key, key, keylen);
1397 
1398 	/*
1399 	 * The last four bytes of the key material are used as the salt value
1400 	 * in the nonce. Update the AES key length.
1401 	 */
1402 	ctx->enckeylen = keylen - 4;
1403 
1404 	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1405 				      DMA_TO_DEVICE);
1406 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
1407 		dev_err(jrdev, "unable to map key i/o memory\n");
1408 		return -ENOMEM;
1409 	}
1410 
1411 	ret = rfc4106_set_sh_desc(aead);
1412 	if (ret) {
1413 		dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1414 				 DMA_TO_DEVICE);
1415 	}
1416 
1417 	return ret;
1418 }
1419 
1420 static int rfc4543_setkey(struct crypto_aead *aead,
1421 			  const u8 *key, unsigned int keylen)
1422 {
1423 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1424 	struct device *jrdev = ctx->jrdev;
1425 	int ret = 0;
1426 
1427 	if (keylen < 4)
1428 		return -EINVAL;
1429 
1430 #ifdef DEBUG
1431 	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1432 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1433 #endif
1434 
1435 	memcpy(ctx->key, key, keylen);
1436 
1437 	/*
1438 	 * The last four bytes of the key material are used as the salt value
1439 	 * in the nonce. Update the AES key length.
1440 	 */
1441 	ctx->enckeylen = keylen - 4;
1442 
1443 	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1444 				      DMA_TO_DEVICE);
1445 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
1446 		dev_err(jrdev, "unable to map key i/o memory\n");
1447 		return -ENOMEM;
1448 	}
1449 
1450 	ret = rfc4543_set_sh_desc(aead);
1451 	if (ret) {
1452 		dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1453 				 DMA_TO_DEVICE);
1454 	}
1455 
1456 	return ret;
1457 }
1458 
1459 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1460 			     const u8 *key, unsigned int keylen)
1461 {
1462 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1463 	struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
1464 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
1465 	const char *alg_name = crypto_tfm_alg_name(tfm);
1466 	struct device *jrdev = ctx->jrdev;
1467 	int ret = 0;
1468 	u32 *key_jump_cmd;
1469 	u32 *desc;
1470 	u32 *nonce;
1471 	u32 geniv;
1472 	u32 ctx1_iv_off = 0;
1473 	const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
1474 			       OP_ALG_AAI_CTR_MOD128);
1475 	const bool is_rfc3686 = (ctr_mode &&
1476 				 (strstr(alg_name, "rfc3686") != NULL));
1477 
1478 #ifdef DEBUG
1479 	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1480 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1481 #endif
1482 	/*
1483 	 * AES-CTR needs to load IV in CONTEXT1 reg
1484 	 * at an offset of 128bits (16bytes)
1485 	 * CONTEXT1[255:128] = IV
1486 	 */
1487 	if (ctr_mode)
1488 		ctx1_iv_off = 16;
1489 
1490 	/*
1491 	 * RFC3686 specific:
1492 	 *	| CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1493 	 *	| *key = {KEY, NONCE}
1494 	 */
1495 	if (is_rfc3686) {
1496 		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
1497 		keylen -= CTR_RFC3686_NONCE_SIZE;
1498 	}
1499 
1500 	memcpy(ctx->key, key, keylen);
1501 	ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1502 				      DMA_TO_DEVICE);
1503 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
1504 		dev_err(jrdev, "unable to map key i/o memory\n");
1505 		return -ENOMEM;
1506 	}
1507 	ctx->enckeylen = keylen;
1508 
1509 	/* ablkcipher_encrypt shared descriptor */
1510 	desc = ctx->sh_desc_enc;
1511 	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1512 	/* Skip if already shared */
1513 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1514 				   JUMP_COND_SHRD);
1515 
1516 	/* Load class1 key only */
1517 	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1518 			  ctx->enckeylen, CLASS_1 |
1519 			  KEY_DEST_CLASS_REG);
1520 
1521 	/* Load nonce into CONTEXT1 reg */
1522 	if (is_rfc3686) {
1523 		nonce = (u32 *)(key + keylen);
1524 		append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
1525 				    LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1526 		append_move(desc, MOVE_WAITCOMP |
1527 			    MOVE_SRC_OUTFIFO |
1528 			    MOVE_DEST_CLASS1CTX |
1529 			    (16 << MOVE_OFFSET_SHIFT) |
1530 			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1531 	}
1532 
1533 	set_jump_tgt_here(desc, key_jump_cmd);
1534 
1535 	/* Load iv */
1536 	append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
1537 			LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
1538 
1539 	/* Load counter into CONTEXT1 reg */
1540 	if (is_rfc3686)
1541 		append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
1542 				    LDST_CLASS_1_CCB |
1543 				    LDST_SRCDST_BYTE_CONTEXT |
1544 				    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1545 				     LDST_OFFSET_SHIFT));
1546 
1547 	/* Load operation */
1548 	append_operation(desc, ctx->class1_alg_type |
1549 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1550 
1551 	/* Perform operation */
1552 	ablkcipher_append_src_dst(desc);
1553 
1554 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1555 					      desc_bytes(desc),
1556 					      DMA_TO_DEVICE);
1557 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1558 		dev_err(jrdev, "unable to map shared descriptor\n");
1559 		return -ENOMEM;
1560 	}
1561 #ifdef DEBUG
1562 	print_hex_dump(KERN_ERR,
1563 		       "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
1564 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1565 		       desc_bytes(desc), 1);
1566 #endif
1567 	/* ablkcipher_decrypt shared descriptor */
1568 	desc = ctx->sh_desc_dec;
1569 
1570 	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1571 	/* Skip if already shared */
1572 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1573 				   JUMP_COND_SHRD);
1574 
1575 	/* Load class1 key only */
1576 	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1577 			  ctx->enckeylen, CLASS_1 |
1578 			  KEY_DEST_CLASS_REG);
1579 
1580 	/* Load nonce into CONTEXT1 reg */
1581 	if (is_rfc3686) {
1582 		nonce = (u32 *)(key + keylen);
1583 		append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
1584 				    LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1585 		append_move(desc, MOVE_WAITCOMP |
1586 			    MOVE_SRC_OUTFIFO |
1587 			    MOVE_DEST_CLASS1CTX |
1588 			    (16 << MOVE_OFFSET_SHIFT) |
1589 			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1590 	}
1591 
1592 	set_jump_tgt_here(desc, key_jump_cmd);
1593 
1594 	/* load IV */
1595 	append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
1596 			LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
1597 
1598 	/* Load counter into CONTEXT1 reg */
1599 	if (is_rfc3686)
1600 		append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
1601 				    LDST_CLASS_1_CCB |
1602 				    LDST_SRCDST_BYTE_CONTEXT |
1603 				    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1604 				     LDST_OFFSET_SHIFT));
1605 
1606 	/* Choose operation */
1607 	if (ctr_mode)
1608 		append_operation(desc, ctx->class1_alg_type |
1609 				 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
1610 	else
1611 		append_dec_op1(desc, ctx->class1_alg_type);
1612 
1613 	/* Perform operation */
1614 	ablkcipher_append_src_dst(desc);
1615 
1616 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1617 					      desc_bytes(desc),
1618 					      DMA_TO_DEVICE);
1619 	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1620 		dev_err(jrdev, "unable to map shared descriptor\n");
1621 		return -ENOMEM;
1622 	}
1623 
1624 #ifdef DEBUG
1625 	print_hex_dump(KERN_ERR,
1626 		       "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
1627 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1628 		       desc_bytes(desc), 1);
1629 #endif
1630 	/* ablkcipher_givencrypt shared descriptor */
1631 	desc = ctx->sh_desc_givenc;
1632 
1633 	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1634 	/* Skip if already shared */
1635 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1636 				   JUMP_COND_SHRD);
1637 
1638 	/* Load class1 key only */
1639 	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1640 			  ctx->enckeylen, CLASS_1 |
1641 			  KEY_DEST_CLASS_REG);
1642 
1643 	/* Load Nonce into CONTEXT1 reg */
1644 	if (is_rfc3686) {
1645 		nonce = (u32 *)(key + keylen);
1646 		append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
1647 				    LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1648 		append_move(desc, MOVE_WAITCOMP |
1649 			    MOVE_SRC_OUTFIFO |
1650 			    MOVE_DEST_CLASS1CTX |
1651 			    (16 << MOVE_OFFSET_SHIFT) |
1652 			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1653 	}
1654 	set_jump_tgt_here(desc, key_jump_cmd);
1655 
1656 	/* Generate IV */
1657 	geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
1658 		NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
1659 		NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
1660 	append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
1661 			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
1662 	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1663 	append_move(desc, MOVE_WAITCOMP |
1664 		    MOVE_SRC_INFIFO |
1665 		    MOVE_DEST_CLASS1CTX |
1666 		    (crt->ivsize << MOVE_LEN_SHIFT) |
1667 		    (ctx1_iv_off << MOVE_OFFSET_SHIFT));
1668 	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1669 
1670 	/* Copy generated IV to memory */
1671 	append_seq_store(desc, crt->ivsize,
1672 			 LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
1673 			 (ctx1_iv_off << LDST_OFFSET_SHIFT));
1674 
1675 	/* Load Counter into CONTEXT1 reg */
1676 	if (is_rfc3686)
1677 		append_load_imm_u32(desc, (u32)1, LDST_IMM |
1678 				    LDST_CLASS_1_CCB |
1679 				    LDST_SRCDST_BYTE_CONTEXT |
1680 				    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1681 				     LDST_OFFSET_SHIFT));
1682 
1683 	if (ctx1_iv_off)
1684 		append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
1685 			    (1 << JUMP_OFFSET_SHIFT));
1686 
1687 	/* Load operation */
1688 	append_operation(desc, ctx->class1_alg_type |
1689 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1690 
1691 	/* Perform operation */
1692 	ablkcipher_append_src_dst(desc);
1693 
1694 	ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
1695 						 desc_bytes(desc),
1696 						 DMA_TO_DEVICE);
1697 	if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
1698 		dev_err(jrdev, "unable to map shared descriptor\n");
1699 		return -ENOMEM;
1700 	}
1701 #ifdef DEBUG
1702 	print_hex_dump(KERN_ERR,
1703 		       "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
1704 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1705 		       desc_bytes(desc), 1);
1706 #endif
1707 
1708 	return ret;
1709 }
1710 
1711 static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1712 				 const u8 *key, unsigned int keylen)
1713 {
1714 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1715 	struct device *jrdev = ctx->jrdev;
1716 	u32 *key_jump_cmd, *desc;
1717 	__be64 sector_size = cpu_to_be64(512);
1718 
1719 	if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
1720 		crypto_ablkcipher_set_flags(ablkcipher,
1721 					    CRYPTO_TFM_RES_BAD_KEY_LEN);
1722 		dev_err(jrdev, "key size mismatch\n");
1723 		return -EINVAL;
1724 	}
1725 
1726 	memcpy(ctx->key, key, keylen);
1727 	ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, DMA_TO_DEVICE);
1728 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
1729 		dev_err(jrdev, "unable to map key i/o memory\n");
1730 		return -ENOMEM;
1731 	}
1732 	ctx->enckeylen = keylen;
1733 
1734 	/* xts_ablkcipher_encrypt shared descriptor */
1735 	desc = ctx->sh_desc_enc;
1736 	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1737 	/* Skip if already shared */
1738 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1739 				   JUMP_COND_SHRD);
1740 
1741 	/* Load class1 keys only */
1742 	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1743 			  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1744 
1745 	/* Load sector size with index 40 bytes (0x28) */
1746 	append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
1747 		   LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
1748 	append_data(desc, (void *)&sector_size, 8);
1749 
1750 	set_jump_tgt_here(desc, key_jump_cmd);
1751 
1752 	/*
1753 	 * create sequence for loading the sector index
1754 	 * Upper 8B of IV - will be used as sector index
1755 	 * Lower 8B of IV - will be discarded
1756 	 */
1757 	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
1758 		   LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
1759 	append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
1760 
1761 	/* Load operation */
1762 	append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL |
1763 			 OP_ALG_ENCRYPT);
1764 
1765 	/* Perform operation */
1766 	ablkcipher_append_src_dst(desc);
1767 
1768 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
1769 					      DMA_TO_DEVICE);
1770 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1771 		dev_err(jrdev, "unable to map shared descriptor\n");
1772 		return -ENOMEM;
1773 	}
1774 #ifdef DEBUG
1775 	print_hex_dump(KERN_ERR,
1776 		       "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
1777 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1778 #endif
1779 
1780 	/* xts_ablkcipher_decrypt shared descriptor */
1781 	desc = ctx->sh_desc_dec;
1782 
1783 	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1784 	/* Skip if already shared */
1785 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1786 				   JUMP_COND_SHRD);
1787 
1788 	/* Load class1 key only */
1789 	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1790 			  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1791 
1792 	/* Load sector size with index 40 bytes (0x28) */
1793 	append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
1794 		   LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
1795 	append_data(desc, (void *)&sector_size, 8);
1796 
1797 	set_jump_tgt_here(desc, key_jump_cmd);
1798 
1799 	/*
1800 	 * create sequence for loading the sector index
1801 	 * Upper 8B of IV - will be used as sector index
1802 	 * Lower 8B of IV - will be discarded
1803 	 */
1804 	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
1805 		   LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
1806 	append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
1807 
1808 	/* Load operation */
1809 	append_dec_op1(desc, ctx->class1_alg_type);
1810 
1811 	/* Perform operation */
1812 	ablkcipher_append_src_dst(desc);
1813 
1814 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
1815 					      DMA_TO_DEVICE);
1816 	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1817 		dma_unmap_single(jrdev, ctx->sh_desc_enc_dma,
1818 				 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
1819 		dev_err(jrdev, "unable to map shared descriptor\n");
1820 		return -ENOMEM;
1821 	}
1822 #ifdef DEBUG
1823 	print_hex_dump(KERN_ERR,
1824 		       "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
1825 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1826 #endif
1827 
1828 	return 0;
1829 }
1830 
1831 /*
1832  * aead_edesc - s/w-extended aead descriptor
1833  * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
1834  * @src_nents: number of segments in input scatterlist
1835  * @dst_nents: number of segments in output scatterlist
1836  * @iv_dma: dma address of iv for checking continuity and link table
1837  * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
1838  * @sec4_sg_bytes: length of dma mapped sec4_sg space
1839  * @sec4_sg_dma: bus physical mapped address of h/w link table
1840  * @hw_desc: the h/w job descriptor followed by any referenced link tables
1841  */
1842 struct aead_edesc {
1843 	int assoc_nents;
1844 	int src_nents;
1845 	int dst_nents;
1846 	dma_addr_t iv_dma;
1847 	int sec4_sg_bytes;
1848 	dma_addr_t sec4_sg_dma;
1849 	struct sec4_sg_entry *sec4_sg;
1850 	u32 hw_desc[];
1851 };
1852 
1853 /*
1854  * ablkcipher_edesc - s/w-extended ablkcipher descriptor
1855  * @src_nents: number of segments in input scatterlist
1856  * @dst_nents: number of segments in output scatterlist
1857  * @iv_dma: dma address of iv for checking continuity and link table
1858  * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
1859  * @sec4_sg_bytes: length of dma mapped sec4_sg space
1860  * @sec4_sg_dma: bus physical mapped address of h/w link table
1861  * @hw_desc: the h/w job descriptor followed by any referenced link tables
1862  */
1863 struct ablkcipher_edesc {
1864 	int src_nents;
1865 	int dst_nents;
1866 	dma_addr_t iv_dma;
1867 	int sec4_sg_bytes;
1868 	dma_addr_t sec4_sg_dma;
1869 	struct sec4_sg_entry *sec4_sg;
1870 	u32 hw_desc[0];
1871 };
1872 
1873 static void caam_unmap(struct device *dev, struct scatterlist *src,
1874 		       struct scatterlist *dst, int src_nents,
1875 		       int dst_nents,
1876 		       dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
1877 		       int sec4_sg_bytes)
1878 {
1879 	if (dst != src) {
1880 		dma_unmap_sg(dev, src, src_nents ? : 1, DMA_TO_DEVICE);
1881 		dma_unmap_sg(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE);
1882 	} else {
1883 		dma_unmap_sg(dev, src, src_nents ? : 1, DMA_BIDIRECTIONAL);
1884 	}
1885 
1886 	if (iv_dma)
1887 		dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1888 	if (sec4_sg_bytes)
1889 		dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
1890 				 DMA_TO_DEVICE);
1891 }
1892 
1893 static void aead_unmap(struct device *dev,
1894 		       struct aead_edesc *edesc,
1895 		       struct aead_request *req)
1896 {
1897 	caam_unmap(dev, req->src, req->dst,
1898 		   edesc->src_nents, edesc->dst_nents, 0, 0,
1899 		   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
1900 }
1901 
1902 static void ablkcipher_unmap(struct device *dev,
1903 			     struct ablkcipher_edesc *edesc,
1904 			     struct ablkcipher_request *req)
1905 {
1906 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1907 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1908 
1909 	caam_unmap(dev, req->src, req->dst,
1910 		   edesc->src_nents, edesc->dst_nents,
1911 		   edesc->iv_dma, ivsize,
1912 		   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
1913 }
1914 
1915 static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
1916 				   void *context)
1917 {
1918 	struct aead_request *req = context;
1919 	struct aead_edesc *edesc;
1920 
1921 #ifdef DEBUG
1922 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1923 #endif
1924 
1925 	edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
1926 
1927 	if (err)
1928 		caam_jr_strstatus(jrdev, err);
1929 
1930 	aead_unmap(jrdev, edesc, req);
1931 
1932 	kfree(edesc);
1933 
1934 	aead_request_complete(req, err);
1935 }
1936 
1937 static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
1938 				   void *context)
1939 {
1940 	struct aead_request *req = context;
1941 	struct aead_edesc *edesc;
1942 
1943 #ifdef DEBUG
1944 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1945 #endif
1946 
1947 	edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
1948 
1949 	if (err)
1950 		caam_jr_strstatus(jrdev, err);
1951 
1952 	aead_unmap(jrdev, edesc, req);
1953 
1954 	/*
1955 	 * verify hw auth check passed else return -EBADMSG
1956 	 */
1957 	if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
1958 		err = -EBADMSG;
1959 
1960 	kfree(edesc);
1961 
1962 	aead_request_complete(req, err);
1963 }
1964 
1965 static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
1966 				   void *context)
1967 {
1968 	struct ablkcipher_request *req = context;
1969 	struct ablkcipher_edesc *edesc;
1970 #ifdef DEBUG
1971 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1972 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1973 
1974 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1975 #endif
1976 
1977 	edesc = (struct ablkcipher_edesc *)((char *)desc -
1978 		 offsetof(struct ablkcipher_edesc, hw_desc));
1979 
1980 	if (err)
1981 		caam_jr_strstatus(jrdev, err);
1982 
1983 #ifdef DEBUG
1984 	print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
1985 		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1986 		       edesc->src_nents > 1 ? 100 : ivsize, 1);
1987 	print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
1988 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1989 		       edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
1990 #endif
1991 
1992 	ablkcipher_unmap(jrdev, edesc, req);
1993 	kfree(edesc);
1994 
1995 	ablkcipher_request_complete(req, err);
1996 }
1997 
1998 static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
1999 				    void *context)
2000 {
2001 	struct ablkcipher_request *req = context;
2002 	struct ablkcipher_edesc *edesc;
2003 #ifdef DEBUG
2004 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2005 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2006 
2007 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
2008 #endif
2009 
2010 	edesc = (struct ablkcipher_edesc *)((char *)desc -
2011 		 offsetof(struct ablkcipher_edesc, hw_desc));
2012 	if (err)
2013 		caam_jr_strstatus(jrdev, err);
2014 
2015 #ifdef DEBUG
2016 	print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
2017 		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2018 		       ivsize, 1);
2019 	print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
2020 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2021 		       edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
2022 #endif
2023 
2024 	ablkcipher_unmap(jrdev, edesc, req);
2025 	kfree(edesc);
2026 
2027 	ablkcipher_request_complete(req, err);
2028 }
2029 
2030 /*
2031  * Fill in aead job descriptor
2032  */
2033 static void init_aead_job(struct aead_request *req,
2034 			  struct aead_edesc *edesc,
2035 			  bool all_contig, bool encrypt)
2036 {
2037 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2038 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
2039 	int authsize = ctx->authsize;
2040 	u32 *desc = edesc->hw_desc;
2041 	u32 out_options, in_options;
2042 	dma_addr_t dst_dma, src_dma;
2043 	int len, sec4_sg_index = 0;
2044 	dma_addr_t ptr;
2045 	u32 *sh_desc;
2046 
2047 	sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
2048 	ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
2049 
2050 	len = desc_len(sh_desc);
2051 	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2052 
2053 	if (all_contig) {
2054 		src_dma = sg_dma_address(req->src);
2055 		in_options = 0;
2056 	} else {
2057 		src_dma = edesc->sec4_sg_dma;
2058 		sec4_sg_index += edesc->src_nents;
2059 		in_options = LDST_SGF;
2060 	}
2061 
2062 	append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen,
2063 			  in_options);
2064 
2065 	dst_dma = src_dma;
2066 	out_options = in_options;
2067 
2068 	if (unlikely(req->src != req->dst)) {
2069 		if (!edesc->dst_nents) {
2070 			dst_dma = sg_dma_address(req->dst);
2071 		} else {
2072 			dst_dma = edesc->sec4_sg_dma +
2073 				  sec4_sg_index *
2074 				  sizeof(struct sec4_sg_entry);
2075 			out_options = LDST_SGF;
2076 		}
2077 	}
2078 
2079 	if (encrypt)
2080 		append_seq_out_ptr(desc, dst_dma,
2081 				   req->assoclen + req->cryptlen + authsize,
2082 				   out_options);
2083 	else
2084 		append_seq_out_ptr(desc, dst_dma,
2085 				   req->assoclen + req->cryptlen - authsize,
2086 				   out_options);
2087 
2088 	/* REG3 = assoclen */
2089 	append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
2090 }
2091 
2092 static void init_gcm_job(struct aead_request *req,
2093 			 struct aead_edesc *edesc,
2094 			 bool all_contig, bool encrypt)
2095 {
2096 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2097 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
2098 	unsigned int ivsize = crypto_aead_ivsize(aead);
2099 	u32 *desc = edesc->hw_desc;
2100 	bool generic_gcm = (ivsize == 12);
2101 	unsigned int last;
2102 
2103 	init_aead_job(req, edesc, all_contig, encrypt);
2104 
2105 	/* BUG This should not be specific to generic GCM. */
2106 	last = 0;
2107 	if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen))
2108 		last = FIFOLD_TYPE_LAST1;
2109 
2110 	/* Read GCM IV */
2111 	append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
2112 			 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
2113 	/* Append Salt */
2114 	if (!generic_gcm)
2115 		append_data(desc, ctx->key + ctx->enckeylen, 4);
2116 	/* Append IV */
2117 	append_data(desc, req->iv, ivsize);
2118 	/* End of blank commands */
2119 }
2120 
2121 static void init_authenc_job(struct aead_request *req,
2122 			     struct aead_edesc *edesc,
2123 			     bool all_contig, bool encrypt)
2124 {
2125 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2126 	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
2127 						 struct caam_aead_alg, aead);
2128 	unsigned int ivsize = crypto_aead_ivsize(aead);
2129 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
2130 	const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
2131 			       OP_ALG_AAI_CTR_MOD128);
2132 	const bool is_rfc3686 = alg->caam.rfc3686;
2133 	u32 *desc = edesc->hw_desc;
2134 	u32 ivoffset = 0;
2135 
2136 	/*
2137 	 * AES-CTR needs to load IV in CONTEXT1 reg
2138 	 * at an offset of 128bits (16bytes)
2139 	 * CONTEXT1[255:128] = IV
2140 	 */
2141 	if (ctr_mode)
2142 		ivoffset = 16;
2143 
2144 	/*
2145 	 * RFC3686 specific:
2146 	 *	CONTEXT1[255:128] = {NONCE, IV, COUNTER}
2147 	 */
2148 	if (is_rfc3686)
2149 		ivoffset = 16 + CTR_RFC3686_NONCE_SIZE;
2150 
2151 	init_aead_job(req, edesc, all_contig, encrypt);
2152 
2153 	if (ivsize && (is_rfc3686 || !(alg->caam.geniv && encrypt)))
2154 		append_load_as_imm(desc, req->iv, ivsize,
2155 				   LDST_CLASS_1_CCB |
2156 				   LDST_SRCDST_BYTE_CONTEXT |
2157 				   (ivoffset << LDST_OFFSET_SHIFT));
2158 }
2159 
2160 /*
2161  * Fill in ablkcipher job descriptor
2162  */
2163 static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
2164 				struct ablkcipher_edesc *edesc,
2165 				struct ablkcipher_request *req,
2166 				bool iv_contig)
2167 {
2168 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2169 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2170 	u32 *desc = edesc->hw_desc;
2171 	u32 out_options = 0, in_options;
2172 	dma_addr_t dst_dma, src_dma;
2173 	int len, sec4_sg_index = 0;
2174 
2175 #ifdef DEBUG
2176 	print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
2177 		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2178 		       ivsize, 1);
2179 	print_hex_dump(KERN_ERR, "src    @"__stringify(__LINE__)": ",
2180 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2181 		       edesc->src_nents ? 100 : req->nbytes, 1);
2182 #endif
2183 
2184 	len = desc_len(sh_desc);
2185 	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2186 
2187 	if (iv_contig) {
2188 		src_dma = edesc->iv_dma;
2189 		in_options = 0;
2190 	} else {
2191 		src_dma = edesc->sec4_sg_dma;
2192 		sec4_sg_index += edesc->src_nents + 1;
2193 		in_options = LDST_SGF;
2194 	}
2195 	append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
2196 
2197 	if (likely(req->src == req->dst)) {
2198 		if (!edesc->src_nents && iv_contig) {
2199 			dst_dma = sg_dma_address(req->src);
2200 		} else {
2201 			dst_dma = edesc->sec4_sg_dma +
2202 				sizeof(struct sec4_sg_entry);
2203 			out_options = LDST_SGF;
2204 		}
2205 	} else {
2206 		if (!edesc->dst_nents) {
2207 			dst_dma = sg_dma_address(req->dst);
2208 		} else {
2209 			dst_dma = edesc->sec4_sg_dma +
2210 				sec4_sg_index * sizeof(struct sec4_sg_entry);
2211 			out_options = LDST_SGF;
2212 		}
2213 	}
2214 	append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
2215 }
2216 
2217 /*
2218  * Fill in ablkcipher givencrypt job descriptor
2219  */
2220 static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
2221 				    struct ablkcipher_edesc *edesc,
2222 				    struct ablkcipher_request *req,
2223 				    bool iv_contig)
2224 {
2225 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2226 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2227 	u32 *desc = edesc->hw_desc;
2228 	u32 out_options, in_options;
2229 	dma_addr_t dst_dma, src_dma;
2230 	int len, sec4_sg_index = 0;
2231 
2232 #ifdef DEBUG
2233 	print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
2234 		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2235 		       ivsize, 1);
2236 	print_hex_dump(KERN_ERR, "src    @" __stringify(__LINE__) ": ",
2237 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2238 		       edesc->src_nents ? 100 : req->nbytes, 1);
2239 #endif
2240 
2241 	len = desc_len(sh_desc);
2242 	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2243 
2244 	if (!edesc->src_nents) {
2245 		src_dma = sg_dma_address(req->src);
2246 		in_options = 0;
2247 	} else {
2248 		src_dma = edesc->sec4_sg_dma;
2249 		sec4_sg_index += edesc->src_nents;
2250 		in_options = LDST_SGF;
2251 	}
2252 	append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
2253 
2254 	if (iv_contig) {
2255 		dst_dma = edesc->iv_dma;
2256 		out_options = 0;
2257 	} else {
2258 		dst_dma = edesc->sec4_sg_dma +
2259 			  sec4_sg_index * sizeof(struct sec4_sg_entry);
2260 		out_options = LDST_SGF;
2261 	}
2262 	append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
2263 }
2264 
2265 /*
2266  * allocate and map the aead extended descriptor
2267  */
2268 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
2269 					   int desc_bytes, bool *all_contig_ptr,
2270 					   bool encrypt)
2271 {
2272 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2273 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
2274 	struct device *jrdev = ctx->jrdev;
2275 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2276 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2277 	int src_nents, dst_nents = 0;
2278 	struct aead_edesc *edesc;
2279 	int sgc;
2280 	bool all_contig = true;
2281 	int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
2282 	unsigned int authsize = ctx->authsize;
2283 
2284 	if (unlikely(req->dst != req->src)) {
2285 		src_nents = sg_count(req->src, req->assoclen + req->cryptlen);
2286 		dst_nents = sg_count(req->dst,
2287 				     req->assoclen + req->cryptlen +
2288 					(encrypt ? authsize : (-authsize)));
2289 	} else {
2290 		src_nents = sg_count(req->src,
2291 				     req->assoclen + req->cryptlen +
2292 					(encrypt ? authsize : 0));
2293 	}
2294 
2295 	/* Check if data are contiguous. */
2296 	all_contig = !src_nents;
2297 	if (!all_contig) {
2298 		src_nents = src_nents ? : 1;
2299 		sec4_sg_len = src_nents;
2300 	}
2301 
2302 	sec4_sg_len += dst_nents;
2303 
2304 	sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
2305 
2306 	/* allocate space for base edesc and hw desc commands, link tables */
2307 	edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
2308 			GFP_DMA | flags);
2309 	if (!edesc) {
2310 		dev_err(jrdev, "could not allocate extended descriptor\n");
2311 		return ERR_PTR(-ENOMEM);
2312 	}
2313 
2314 	if (likely(req->src == req->dst)) {
2315 		sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2316 				 DMA_BIDIRECTIONAL);
2317 		if (unlikely(!sgc)) {
2318 			dev_err(jrdev, "unable to map source\n");
2319 			kfree(edesc);
2320 			return ERR_PTR(-ENOMEM);
2321 		}
2322 	} else {
2323 		sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2324 				 DMA_TO_DEVICE);
2325 		if (unlikely(!sgc)) {
2326 			dev_err(jrdev, "unable to map source\n");
2327 			kfree(edesc);
2328 			return ERR_PTR(-ENOMEM);
2329 		}
2330 
2331 		sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
2332 				 DMA_FROM_DEVICE);
2333 		if (unlikely(!sgc)) {
2334 			dev_err(jrdev, "unable to map destination\n");
2335 			dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
2336 				     DMA_TO_DEVICE);
2337 			kfree(edesc);
2338 			return ERR_PTR(-ENOMEM);
2339 		}
2340 	}
2341 
2342 	edesc->src_nents = src_nents;
2343 	edesc->dst_nents = dst_nents;
2344 	edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
2345 			 desc_bytes;
2346 	*all_contig_ptr = all_contig;
2347 
2348 	sec4_sg_index = 0;
2349 	if (!all_contig) {
2350 		sg_to_sec4_sg_last(req->src, src_nents,
2351 			      edesc->sec4_sg + sec4_sg_index, 0);
2352 		sec4_sg_index += src_nents;
2353 	}
2354 	if (dst_nents) {
2355 		sg_to_sec4_sg_last(req->dst, dst_nents,
2356 				   edesc->sec4_sg + sec4_sg_index, 0);
2357 	}
2358 
2359 	if (!sec4_sg_bytes)
2360 		return edesc;
2361 
2362 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2363 					    sec4_sg_bytes, DMA_TO_DEVICE);
2364 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2365 		dev_err(jrdev, "unable to map S/G table\n");
2366 		aead_unmap(jrdev, edesc, req);
2367 		kfree(edesc);
2368 		return ERR_PTR(-ENOMEM);
2369 	}
2370 
2371 	edesc->sec4_sg_bytes = sec4_sg_bytes;
2372 
2373 	return edesc;
2374 }
2375 
2376 static int gcm_encrypt(struct aead_request *req)
2377 {
2378 	struct aead_edesc *edesc;
2379 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2380 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
2381 	struct device *jrdev = ctx->jrdev;
2382 	bool all_contig;
2383 	u32 *desc;
2384 	int ret = 0;
2385 
2386 	/* allocate extended descriptor */
2387 	edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, true);
2388 	if (IS_ERR(edesc))
2389 		return PTR_ERR(edesc);
2390 
2391 	/* Create and submit job descriptor */
2392 	init_gcm_job(req, edesc, all_contig, true);
2393 #ifdef DEBUG
2394 	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2395 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2396 		       desc_bytes(edesc->hw_desc), 1);
2397 #endif
2398 
2399 	desc = edesc->hw_desc;
2400 	ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
2401 	if (!ret) {
2402 		ret = -EINPROGRESS;
2403 	} else {
2404 		aead_unmap(jrdev, edesc, req);
2405 		kfree(edesc);
2406 	}
2407 
2408 	return ret;
2409 }
2410 
2411 static int ipsec_gcm_encrypt(struct aead_request *req)
2412 {
2413 	if (req->assoclen < 8)
2414 		return -EINVAL;
2415 
2416 	return gcm_encrypt(req);
2417 }
2418 
2419 static int aead_encrypt(struct aead_request *req)
2420 {
2421 	struct aead_edesc *edesc;
2422 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2423 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
2424 	struct device *jrdev = ctx->jrdev;
2425 	bool all_contig;
2426 	u32 *desc;
2427 	int ret = 0;
2428 
2429 	/* allocate extended descriptor */
2430 	edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
2431 				 &all_contig, true);
2432 	if (IS_ERR(edesc))
2433 		return PTR_ERR(edesc);
2434 
2435 	/* Create and submit job descriptor */
2436 	init_authenc_job(req, edesc, all_contig, true);
2437 #ifdef DEBUG
2438 	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2439 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2440 		       desc_bytes(edesc->hw_desc), 1);
2441 #endif
2442 
2443 	desc = edesc->hw_desc;
2444 	ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
2445 	if (!ret) {
2446 		ret = -EINPROGRESS;
2447 	} else {
2448 		aead_unmap(jrdev, edesc, req);
2449 		kfree(edesc);
2450 	}
2451 
2452 	return ret;
2453 }
2454 
2455 static int gcm_decrypt(struct aead_request *req)
2456 {
2457 	struct aead_edesc *edesc;
2458 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2459 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
2460 	struct device *jrdev = ctx->jrdev;
2461 	bool all_contig;
2462 	u32 *desc;
2463 	int ret = 0;
2464 
2465 	/* allocate extended descriptor */
2466 	edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, false);
2467 	if (IS_ERR(edesc))
2468 		return PTR_ERR(edesc);
2469 
2470 	/* Create and submit job descriptor*/
2471 	init_gcm_job(req, edesc, all_contig, false);
2472 #ifdef DEBUG
2473 	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2474 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2475 		       desc_bytes(edesc->hw_desc), 1);
2476 #endif
2477 
2478 	desc = edesc->hw_desc;
2479 	ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
2480 	if (!ret) {
2481 		ret = -EINPROGRESS;
2482 	} else {
2483 		aead_unmap(jrdev, edesc, req);
2484 		kfree(edesc);
2485 	}
2486 
2487 	return ret;
2488 }
2489 
2490 static int ipsec_gcm_decrypt(struct aead_request *req)
2491 {
2492 	if (req->assoclen < 8)
2493 		return -EINVAL;
2494 
2495 	return gcm_decrypt(req);
2496 }
2497 
2498 static int aead_decrypt(struct aead_request *req)
2499 {
2500 	struct aead_edesc *edesc;
2501 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2502 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
2503 	struct device *jrdev = ctx->jrdev;
2504 	bool all_contig;
2505 	u32 *desc;
2506 	int ret = 0;
2507 
2508 	/* allocate extended descriptor */
2509 	edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
2510 				 &all_contig, false);
2511 	if (IS_ERR(edesc))
2512 		return PTR_ERR(edesc);
2513 
2514 #ifdef DEBUG
2515 	print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
2516 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2517 		       req->assoclen + req->cryptlen, 1);
2518 #endif
2519 
2520 	/* Create and submit job descriptor*/
2521 	init_authenc_job(req, edesc, all_contig, false);
2522 #ifdef DEBUG
2523 	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2524 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2525 		       desc_bytes(edesc->hw_desc), 1);
2526 #endif
2527 
2528 	desc = edesc->hw_desc;
2529 	ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
2530 	if (!ret) {
2531 		ret = -EINPROGRESS;
2532 	} else {
2533 		aead_unmap(jrdev, edesc, req);
2534 		kfree(edesc);
2535 	}
2536 
2537 	return ret;
2538 }
2539 
2540 static int aead_givdecrypt(struct aead_request *req)
2541 {
2542 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2543 	unsigned int ivsize = crypto_aead_ivsize(aead);
2544 
2545 	if (req->cryptlen < ivsize)
2546 		return -EINVAL;
2547 
2548 	req->cryptlen -= ivsize;
2549 	req->assoclen += ivsize;
2550 
2551 	return aead_decrypt(req);
2552 }
2553 
2554 /*
2555  * allocate and map the ablkcipher extended descriptor for ablkcipher
2556  */
2557 static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
2558 						       *req, int desc_bytes,
2559 						       bool *iv_contig_out)
2560 {
2561 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2562 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2563 	struct device *jrdev = ctx->jrdev;
2564 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2565 					  CRYPTO_TFM_REQ_MAY_SLEEP)) ?
2566 		       GFP_KERNEL : GFP_ATOMIC;
2567 	int src_nents, dst_nents = 0, sec4_sg_bytes;
2568 	struct ablkcipher_edesc *edesc;
2569 	dma_addr_t iv_dma = 0;
2570 	bool iv_contig = false;
2571 	int sgc;
2572 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2573 	int sec4_sg_index;
2574 
2575 	src_nents = sg_count(req->src, req->nbytes);
2576 
2577 	if (req->dst != req->src)
2578 		dst_nents = sg_count(req->dst, req->nbytes);
2579 
2580 	if (likely(req->src == req->dst)) {
2581 		sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2582 				 DMA_BIDIRECTIONAL);
2583 	} else {
2584 		sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2585 				 DMA_TO_DEVICE);
2586 		sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
2587 				 DMA_FROM_DEVICE);
2588 	}
2589 
2590 	iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
2591 	if (dma_mapping_error(jrdev, iv_dma)) {
2592 		dev_err(jrdev, "unable to map IV\n");
2593 		return ERR_PTR(-ENOMEM);
2594 	}
2595 
2596 	/*
2597 	 * Check if iv can be contiguous with source and destination.
2598 	 * If so, include it. If not, create scatterlist.
2599 	 */
2600 	if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
2601 		iv_contig = true;
2602 	else
2603 		src_nents = src_nents ? : 1;
2604 	sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
2605 			sizeof(struct sec4_sg_entry);
2606 
2607 	/* allocate space for base edesc and hw desc commands, link tables */
2608 	edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
2609 			GFP_DMA | flags);
2610 	if (!edesc) {
2611 		dev_err(jrdev, "could not allocate extended descriptor\n");
2612 		return ERR_PTR(-ENOMEM);
2613 	}
2614 
2615 	edesc->src_nents = src_nents;
2616 	edesc->dst_nents = dst_nents;
2617 	edesc->sec4_sg_bytes = sec4_sg_bytes;
2618 	edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
2619 			 desc_bytes;
2620 
2621 	sec4_sg_index = 0;
2622 	if (!iv_contig) {
2623 		dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
2624 		sg_to_sec4_sg_last(req->src, src_nents,
2625 				   edesc->sec4_sg + 1, 0);
2626 		sec4_sg_index += 1 + src_nents;
2627 	}
2628 
2629 	if (dst_nents) {
2630 		sg_to_sec4_sg_last(req->dst, dst_nents,
2631 			edesc->sec4_sg + sec4_sg_index, 0);
2632 	}
2633 
2634 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2635 					    sec4_sg_bytes, DMA_TO_DEVICE);
2636 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2637 		dev_err(jrdev, "unable to map S/G table\n");
2638 		return ERR_PTR(-ENOMEM);
2639 	}
2640 
2641 	edesc->iv_dma = iv_dma;
2642 
2643 #ifdef DEBUG
2644 	print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
2645 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
2646 		       sec4_sg_bytes, 1);
2647 #endif
2648 
2649 	*iv_contig_out = iv_contig;
2650 	return edesc;
2651 }
2652 
2653 static int ablkcipher_encrypt(struct ablkcipher_request *req)
2654 {
2655 	struct ablkcipher_edesc *edesc;
2656 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2657 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2658 	struct device *jrdev = ctx->jrdev;
2659 	bool iv_contig;
2660 	u32 *desc;
2661 	int ret = 0;
2662 
2663 	/* allocate extended descriptor */
2664 	edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
2665 				       CAAM_CMD_SZ, &iv_contig);
2666 	if (IS_ERR(edesc))
2667 		return PTR_ERR(edesc);
2668 
2669 	/* Create and submit job descriptor*/
2670 	init_ablkcipher_job(ctx->sh_desc_enc,
2671 		ctx->sh_desc_enc_dma, edesc, req, iv_contig);
2672 #ifdef DEBUG
2673 	print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
2674 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2675 		       desc_bytes(edesc->hw_desc), 1);
2676 #endif
2677 	desc = edesc->hw_desc;
2678 	ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
2679 
2680 	if (!ret) {
2681 		ret = -EINPROGRESS;
2682 	} else {
2683 		ablkcipher_unmap(jrdev, edesc, req);
2684 		kfree(edesc);
2685 	}
2686 
2687 	return ret;
2688 }
2689 
2690 static int ablkcipher_decrypt(struct ablkcipher_request *req)
2691 {
2692 	struct ablkcipher_edesc *edesc;
2693 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2694 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2695 	struct device *jrdev = ctx->jrdev;
2696 	bool iv_contig;
2697 	u32 *desc;
2698 	int ret = 0;
2699 
2700 	/* allocate extended descriptor */
2701 	edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
2702 				       CAAM_CMD_SZ, &iv_contig);
2703 	if (IS_ERR(edesc))
2704 		return PTR_ERR(edesc);
2705 
2706 	/* Create and submit job descriptor*/
2707 	init_ablkcipher_job(ctx->sh_desc_dec,
2708 		ctx->sh_desc_dec_dma, edesc, req, iv_contig);
2709 	desc = edesc->hw_desc;
2710 #ifdef DEBUG
2711 	print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
2712 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2713 		       desc_bytes(edesc->hw_desc), 1);
2714 #endif
2715 
2716 	ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
2717 	if (!ret) {
2718 		ret = -EINPROGRESS;
2719 	} else {
2720 		ablkcipher_unmap(jrdev, edesc, req);
2721 		kfree(edesc);
2722 	}
2723 
2724 	return ret;
2725 }
2726 
2727 /*
2728  * allocate and map the ablkcipher extended descriptor
2729  * for ablkcipher givencrypt
2730  */
2731 static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
2732 				struct skcipher_givcrypt_request *greq,
2733 				int desc_bytes,
2734 				bool *iv_contig_out)
2735 {
2736 	struct ablkcipher_request *req = &greq->creq;
2737 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2738 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2739 	struct device *jrdev = ctx->jrdev;
2740 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2741 					  CRYPTO_TFM_REQ_MAY_SLEEP)) ?
2742 		       GFP_KERNEL : GFP_ATOMIC;
2743 	int src_nents, dst_nents = 0, sec4_sg_bytes;
2744 	struct ablkcipher_edesc *edesc;
2745 	dma_addr_t iv_dma = 0;
2746 	bool iv_contig = false;
2747 	int sgc;
2748 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2749 	int sec4_sg_index;
2750 
2751 	src_nents = sg_count(req->src, req->nbytes);
2752 
2753 	if (unlikely(req->dst != req->src))
2754 		dst_nents = sg_count(req->dst, req->nbytes);
2755 
2756 	if (likely(req->src == req->dst)) {
2757 		sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2758 				 DMA_BIDIRECTIONAL);
2759 	} else {
2760 		sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2761 				 DMA_TO_DEVICE);
2762 		sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
2763 				 DMA_FROM_DEVICE);
2764 	}
2765 
2766 	/*
2767 	 * Check if iv can be contiguous with source and destination.
2768 	 * If so, include it. If not, create scatterlist.
2769 	 */
2770 	iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
2771 	if (dma_mapping_error(jrdev, iv_dma)) {
2772 		dev_err(jrdev, "unable to map IV\n");
2773 		return ERR_PTR(-ENOMEM);
2774 	}
2775 
2776 	if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst))
2777 		iv_contig = true;
2778 	else
2779 		dst_nents = dst_nents ? : 1;
2780 	sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
2781 			sizeof(struct sec4_sg_entry);
2782 
2783 	/* allocate space for base edesc and hw desc commands, link tables */
2784 	edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
2785 			GFP_DMA | flags);
2786 	if (!edesc) {
2787 		dev_err(jrdev, "could not allocate extended descriptor\n");
2788 		return ERR_PTR(-ENOMEM);
2789 	}
2790 
2791 	edesc->src_nents = src_nents;
2792 	edesc->dst_nents = dst_nents;
2793 	edesc->sec4_sg_bytes = sec4_sg_bytes;
2794 	edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
2795 			 desc_bytes;
2796 
2797 	sec4_sg_index = 0;
2798 	if (src_nents) {
2799 		sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
2800 		sec4_sg_index += src_nents;
2801 	}
2802 
2803 	if (!iv_contig) {
2804 		dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
2805 				   iv_dma, ivsize, 0);
2806 		sec4_sg_index += 1;
2807 		sg_to_sec4_sg_last(req->dst, dst_nents,
2808 				   edesc->sec4_sg + sec4_sg_index, 0);
2809 	}
2810 
2811 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2812 					    sec4_sg_bytes, DMA_TO_DEVICE);
2813 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2814 		dev_err(jrdev, "unable to map S/G table\n");
2815 		return ERR_PTR(-ENOMEM);
2816 	}
2817 	edesc->iv_dma = iv_dma;
2818 
2819 #ifdef DEBUG
2820 	print_hex_dump(KERN_ERR,
2821 		       "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
2822 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
2823 		       sec4_sg_bytes, 1);
2824 #endif
2825 
2826 	*iv_contig_out = iv_contig;
2827 	return edesc;
2828 }
2829 
2830 static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
2831 {
2832 	struct ablkcipher_request *req = &creq->creq;
2833 	struct ablkcipher_edesc *edesc;
2834 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2835 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2836 	struct device *jrdev = ctx->jrdev;
2837 	bool iv_contig;
2838 	u32 *desc;
2839 	int ret = 0;
2840 
2841 	/* allocate extended descriptor */
2842 	edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN *
2843 				       CAAM_CMD_SZ, &iv_contig);
2844 	if (IS_ERR(edesc))
2845 		return PTR_ERR(edesc);
2846 
2847 	/* Create and submit job descriptor*/
2848 	init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
2849 				edesc, req, iv_contig);
2850 #ifdef DEBUG
2851 	print_hex_dump(KERN_ERR,
2852 		       "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
2853 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2854 		       desc_bytes(edesc->hw_desc), 1);
2855 #endif
2856 	desc = edesc->hw_desc;
2857 	ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
2858 
2859 	if (!ret) {
2860 		ret = -EINPROGRESS;
2861 	} else {
2862 		ablkcipher_unmap(jrdev, edesc, req);
2863 		kfree(edesc);
2864 	}
2865 
2866 	return ret;
2867 }
2868 
2869 #define template_aead		template_u.aead
2870 #define template_ablkcipher	template_u.ablkcipher
2871 struct caam_alg_template {
2872 	char name[CRYPTO_MAX_ALG_NAME];
2873 	char driver_name[CRYPTO_MAX_ALG_NAME];
2874 	unsigned int blocksize;
2875 	u32 type;
2876 	union {
2877 		struct ablkcipher_alg ablkcipher;
2878 	} template_u;
2879 	u32 class1_alg_type;
2880 	u32 class2_alg_type;
2881 	u32 alg_op;
2882 };
2883 
2884 static struct caam_alg_template driver_algs[] = {
2885 	/* ablkcipher descriptor */
2886 	{
2887 		.name = "cbc(aes)",
2888 		.driver_name = "cbc-aes-caam",
2889 		.blocksize = AES_BLOCK_SIZE,
2890 		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
2891 		.template_ablkcipher = {
2892 			.setkey = ablkcipher_setkey,
2893 			.encrypt = ablkcipher_encrypt,
2894 			.decrypt = ablkcipher_decrypt,
2895 			.givencrypt = ablkcipher_givencrypt,
2896 			.geniv = "<built-in>",
2897 			.min_keysize = AES_MIN_KEY_SIZE,
2898 			.max_keysize = AES_MAX_KEY_SIZE,
2899 			.ivsize = AES_BLOCK_SIZE,
2900 			},
2901 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2902 	},
2903 	{
2904 		.name = "cbc(des3_ede)",
2905 		.driver_name = "cbc-3des-caam",
2906 		.blocksize = DES3_EDE_BLOCK_SIZE,
2907 		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
2908 		.template_ablkcipher = {
2909 			.setkey = ablkcipher_setkey,
2910 			.encrypt = ablkcipher_encrypt,
2911 			.decrypt = ablkcipher_decrypt,
2912 			.givencrypt = ablkcipher_givencrypt,
2913 			.geniv = "<built-in>",
2914 			.min_keysize = DES3_EDE_KEY_SIZE,
2915 			.max_keysize = DES3_EDE_KEY_SIZE,
2916 			.ivsize = DES3_EDE_BLOCK_SIZE,
2917 			},
2918 		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2919 	},
2920 	{
2921 		.name = "cbc(des)",
2922 		.driver_name = "cbc-des-caam",
2923 		.blocksize = DES_BLOCK_SIZE,
2924 		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
2925 		.template_ablkcipher = {
2926 			.setkey = ablkcipher_setkey,
2927 			.encrypt = ablkcipher_encrypt,
2928 			.decrypt = ablkcipher_decrypt,
2929 			.givencrypt = ablkcipher_givencrypt,
2930 			.geniv = "<built-in>",
2931 			.min_keysize = DES_KEY_SIZE,
2932 			.max_keysize = DES_KEY_SIZE,
2933 			.ivsize = DES_BLOCK_SIZE,
2934 			},
2935 		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2936 	},
2937 	{
2938 		.name = "ctr(aes)",
2939 		.driver_name = "ctr-aes-caam",
2940 		.blocksize = 1,
2941 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2942 		.template_ablkcipher = {
2943 			.setkey = ablkcipher_setkey,
2944 			.encrypt = ablkcipher_encrypt,
2945 			.decrypt = ablkcipher_decrypt,
2946 			.geniv = "chainiv",
2947 			.min_keysize = AES_MIN_KEY_SIZE,
2948 			.max_keysize = AES_MAX_KEY_SIZE,
2949 			.ivsize = AES_BLOCK_SIZE,
2950 			},
2951 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
2952 	},
2953 	{
2954 		.name = "rfc3686(ctr(aes))",
2955 		.driver_name = "rfc3686-ctr-aes-caam",
2956 		.blocksize = 1,
2957 		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
2958 		.template_ablkcipher = {
2959 			.setkey = ablkcipher_setkey,
2960 			.encrypt = ablkcipher_encrypt,
2961 			.decrypt = ablkcipher_decrypt,
2962 			.givencrypt = ablkcipher_givencrypt,
2963 			.geniv = "<built-in>",
2964 			.min_keysize = AES_MIN_KEY_SIZE +
2965 				       CTR_RFC3686_NONCE_SIZE,
2966 			.max_keysize = AES_MAX_KEY_SIZE +
2967 				       CTR_RFC3686_NONCE_SIZE,
2968 			.ivsize = CTR_RFC3686_IV_SIZE,
2969 			},
2970 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
2971 	},
2972 	{
2973 		.name = "xts(aes)",
2974 		.driver_name = "xts-aes-caam",
2975 		.blocksize = AES_BLOCK_SIZE,
2976 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2977 		.template_ablkcipher = {
2978 			.setkey = xts_ablkcipher_setkey,
2979 			.encrypt = ablkcipher_encrypt,
2980 			.decrypt = ablkcipher_decrypt,
2981 			.geniv = "eseqiv",
2982 			.min_keysize = 2 * AES_MIN_KEY_SIZE,
2983 			.max_keysize = 2 * AES_MAX_KEY_SIZE,
2984 			.ivsize = AES_BLOCK_SIZE,
2985 			},
2986 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
2987 	},
2988 };
2989 
2990 static struct caam_aead_alg driver_aeads[] = {
2991 	{
2992 		.aead = {
2993 			.base = {
2994 				.cra_name = "rfc4106(gcm(aes))",
2995 				.cra_driver_name = "rfc4106-gcm-aes-caam",
2996 				.cra_blocksize = 1,
2997 			},
2998 			.setkey = rfc4106_setkey,
2999 			.setauthsize = rfc4106_setauthsize,
3000 			.encrypt = ipsec_gcm_encrypt,
3001 			.decrypt = ipsec_gcm_decrypt,
3002 			.ivsize = 8,
3003 			.maxauthsize = AES_BLOCK_SIZE,
3004 		},
3005 		.caam = {
3006 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3007 		},
3008 	},
3009 	{
3010 		.aead = {
3011 			.base = {
3012 				.cra_name = "rfc4543(gcm(aes))",
3013 				.cra_driver_name = "rfc4543-gcm-aes-caam",
3014 				.cra_blocksize = 1,
3015 			},
3016 			.setkey = rfc4543_setkey,
3017 			.setauthsize = rfc4543_setauthsize,
3018 			.encrypt = ipsec_gcm_encrypt,
3019 			.decrypt = ipsec_gcm_decrypt,
3020 			.ivsize = 8,
3021 			.maxauthsize = AES_BLOCK_SIZE,
3022 		},
3023 		.caam = {
3024 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3025 		},
3026 	},
3027 	/* Galois Counter Mode */
3028 	{
3029 		.aead = {
3030 			.base = {
3031 				.cra_name = "gcm(aes)",
3032 				.cra_driver_name = "gcm-aes-caam",
3033 				.cra_blocksize = 1,
3034 			},
3035 			.setkey = gcm_setkey,
3036 			.setauthsize = gcm_setauthsize,
3037 			.encrypt = gcm_encrypt,
3038 			.decrypt = gcm_decrypt,
3039 			.ivsize = 12,
3040 			.maxauthsize = AES_BLOCK_SIZE,
3041 		},
3042 		.caam = {
3043 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3044 		},
3045 	},
3046 	/* single-pass ipsec_esp descriptor */
3047 	{
3048 		.aead = {
3049 			.base = {
3050 				.cra_name = "authenc(hmac(md5),"
3051 					    "ecb(cipher_null))",
3052 				.cra_driver_name = "authenc-hmac-md5-"
3053 						   "ecb-cipher_null-caam",
3054 				.cra_blocksize = NULL_BLOCK_SIZE,
3055 			},
3056 			.setkey = aead_setkey,
3057 			.setauthsize = aead_setauthsize,
3058 			.encrypt = aead_encrypt,
3059 			.decrypt = aead_decrypt,
3060 			.ivsize = NULL_IV_SIZE,
3061 			.maxauthsize = MD5_DIGEST_SIZE,
3062 		},
3063 		.caam = {
3064 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
3065 					   OP_ALG_AAI_HMAC_PRECOMP,
3066 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3067 		},
3068 	},
3069 	{
3070 		.aead = {
3071 			.base = {
3072 				.cra_name = "authenc(hmac(sha1),"
3073 					    "ecb(cipher_null))",
3074 				.cra_driver_name = "authenc-hmac-sha1-"
3075 						   "ecb-cipher_null-caam",
3076 				.cra_blocksize = NULL_BLOCK_SIZE,
3077 			},
3078 			.setkey = aead_setkey,
3079 			.setauthsize = aead_setauthsize,
3080 			.encrypt = aead_encrypt,
3081 			.decrypt = aead_decrypt,
3082 			.ivsize = NULL_IV_SIZE,
3083 			.maxauthsize = SHA1_DIGEST_SIZE,
3084 		},
3085 		.caam = {
3086 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3087 					   OP_ALG_AAI_HMAC_PRECOMP,
3088 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3089 		},
3090 	},
3091 	{
3092 		.aead = {
3093 			.base = {
3094 				.cra_name = "authenc(hmac(sha224),"
3095 					    "ecb(cipher_null))",
3096 				.cra_driver_name = "authenc-hmac-sha224-"
3097 						   "ecb-cipher_null-caam",
3098 				.cra_blocksize = NULL_BLOCK_SIZE,
3099 			},
3100 			.setkey = aead_setkey,
3101 			.setauthsize = aead_setauthsize,
3102 			.encrypt = aead_encrypt,
3103 			.decrypt = aead_decrypt,
3104 			.ivsize = NULL_IV_SIZE,
3105 			.maxauthsize = SHA224_DIGEST_SIZE,
3106 		},
3107 		.caam = {
3108 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3109 					   OP_ALG_AAI_HMAC_PRECOMP,
3110 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3111 		},
3112 	},
3113 	{
3114 		.aead = {
3115 			.base = {
3116 				.cra_name = "authenc(hmac(sha256),"
3117 					    "ecb(cipher_null))",
3118 				.cra_driver_name = "authenc-hmac-sha256-"
3119 						   "ecb-cipher_null-caam",
3120 				.cra_blocksize = NULL_BLOCK_SIZE,
3121 			},
3122 			.setkey = aead_setkey,
3123 			.setauthsize = aead_setauthsize,
3124 			.encrypt = aead_encrypt,
3125 			.decrypt = aead_decrypt,
3126 			.ivsize = NULL_IV_SIZE,
3127 			.maxauthsize = SHA256_DIGEST_SIZE,
3128 		},
3129 		.caam = {
3130 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3131 					   OP_ALG_AAI_HMAC_PRECOMP,
3132 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3133 		},
3134 	},
3135 	{
3136 		.aead = {
3137 			.base = {
3138 				.cra_name = "authenc(hmac(sha384),"
3139 					    "ecb(cipher_null))",
3140 				.cra_driver_name = "authenc-hmac-sha384-"
3141 						   "ecb-cipher_null-caam",
3142 				.cra_blocksize = NULL_BLOCK_SIZE,
3143 			},
3144 			.setkey = aead_setkey,
3145 			.setauthsize = aead_setauthsize,
3146 			.encrypt = aead_encrypt,
3147 			.decrypt = aead_decrypt,
3148 			.ivsize = NULL_IV_SIZE,
3149 			.maxauthsize = SHA384_DIGEST_SIZE,
3150 		},
3151 		.caam = {
3152 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3153 					   OP_ALG_AAI_HMAC_PRECOMP,
3154 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3155 		},
3156 	},
3157 	{
3158 		.aead = {
3159 			.base = {
3160 				.cra_name = "authenc(hmac(sha512),"
3161 					    "ecb(cipher_null))",
3162 				.cra_driver_name = "authenc-hmac-sha512-"
3163 						   "ecb-cipher_null-caam",
3164 				.cra_blocksize = NULL_BLOCK_SIZE,
3165 			},
3166 			.setkey = aead_setkey,
3167 			.setauthsize = aead_setauthsize,
3168 			.encrypt = aead_encrypt,
3169 			.decrypt = aead_decrypt,
3170 			.ivsize = NULL_IV_SIZE,
3171 			.maxauthsize = SHA512_DIGEST_SIZE,
3172 		},
3173 		.caam = {
3174 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3175 					   OP_ALG_AAI_HMAC_PRECOMP,
3176 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3177 		},
3178 	},
3179 	{
3180 		.aead = {
3181 			.base = {
3182 				.cra_name = "authenc(hmac(md5),cbc(aes))",
3183 				.cra_driver_name = "authenc-hmac-md5-"
3184 						   "cbc-aes-caam",
3185 				.cra_blocksize = AES_BLOCK_SIZE,
3186 			},
3187 			.setkey = aead_setkey,
3188 			.setauthsize = aead_setauthsize,
3189 			.encrypt = aead_encrypt,
3190 			.decrypt = aead_decrypt,
3191 			.ivsize = AES_BLOCK_SIZE,
3192 			.maxauthsize = MD5_DIGEST_SIZE,
3193 		},
3194 		.caam = {
3195 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3196 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
3197 					   OP_ALG_AAI_HMAC_PRECOMP,
3198 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3199 		},
3200 	},
3201 	{
3202 		.aead = {
3203 			.base = {
3204 				.cra_name = "echainiv(authenc(hmac(md5),"
3205 					    "cbc(aes)))",
3206 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
3207 						   "cbc-aes-caam",
3208 				.cra_blocksize = AES_BLOCK_SIZE,
3209 			},
3210 			.setkey = aead_setkey,
3211 			.setauthsize = aead_setauthsize,
3212 			.encrypt = aead_encrypt,
3213 			.decrypt = aead_givdecrypt,
3214 			.ivsize = AES_BLOCK_SIZE,
3215 			.maxauthsize = MD5_DIGEST_SIZE,
3216 		},
3217 		.caam = {
3218 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3219 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
3220 					   OP_ALG_AAI_HMAC_PRECOMP,
3221 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3222 			.geniv = true,
3223 		},
3224 	},
3225 	{
3226 		.aead = {
3227 			.base = {
3228 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
3229 				.cra_driver_name = "authenc-hmac-sha1-"
3230 						   "cbc-aes-caam",
3231 				.cra_blocksize = AES_BLOCK_SIZE,
3232 			},
3233 			.setkey = aead_setkey,
3234 			.setauthsize = aead_setauthsize,
3235 			.encrypt = aead_encrypt,
3236 			.decrypt = aead_decrypt,
3237 			.ivsize = AES_BLOCK_SIZE,
3238 			.maxauthsize = SHA1_DIGEST_SIZE,
3239 		},
3240 		.caam = {
3241 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3242 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3243 					   OP_ALG_AAI_HMAC_PRECOMP,
3244 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3245 		},
3246 	},
3247 	{
3248 		.aead = {
3249 			.base = {
3250 				.cra_name = "echainiv(authenc(hmac(sha1),"
3251 					    "cbc(aes)))",
3252 				.cra_driver_name = "echainiv-authenc-"
3253 						   "hmac-sha1-cbc-aes-caam",
3254 				.cra_blocksize = AES_BLOCK_SIZE,
3255 			},
3256 			.setkey = aead_setkey,
3257 			.setauthsize = aead_setauthsize,
3258 			.encrypt = aead_encrypt,
3259 			.decrypt = aead_givdecrypt,
3260 			.ivsize = AES_BLOCK_SIZE,
3261 			.maxauthsize = SHA1_DIGEST_SIZE,
3262 		},
3263 		.caam = {
3264 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3265 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3266 					   OP_ALG_AAI_HMAC_PRECOMP,
3267 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3268 			.geniv = true,
3269 		},
3270 	},
3271 	{
3272 		.aead = {
3273 			.base = {
3274 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
3275 				.cra_driver_name = "authenc-hmac-sha224-"
3276 						   "cbc-aes-caam",
3277 				.cra_blocksize = AES_BLOCK_SIZE,
3278 			},
3279 			.setkey = aead_setkey,
3280 			.setauthsize = aead_setauthsize,
3281 			.encrypt = aead_encrypt,
3282 			.decrypt = aead_decrypt,
3283 			.ivsize = AES_BLOCK_SIZE,
3284 			.maxauthsize = SHA224_DIGEST_SIZE,
3285 		},
3286 		.caam = {
3287 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3288 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3289 					   OP_ALG_AAI_HMAC_PRECOMP,
3290 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3291 		},
3292 	},
3293 	{
3294 		.aead = {
3295 			.base = {
3296 				.cra_name = "echainiv(authenc(hmac(sha224),"
3297 					    "cbc(aes)))",
3298 				.cra_driver_name = "echainiv-authenc-"
3299 						   "hmac-sha224-cbc-aes-caam",
3300 				.cra_blocksize = AES_BLOCK_SIZE,
3301 			},
3302 			.setkey = aead_setkey,
3303 			.setauthsize = aead_setauthsize,
3304 			.encrypt = aead_encrypt,
3305 			.decrypt = aead_givdecrypt,
3306 			.ivsize = AES_BLOCK_SIZE,
3307 			.maxauthsize = SHA224_DIGEST_SIZE,
3308 		},
3309 		.caam = {
3310 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3311 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3312 					   OP_ALG_AAI_HMAC_PRECOMP,
3313 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3314 			.geniv = true,
3315 		},
3316 	},
3317 	{
3318 		.aead = {
3319 			.base = {
3320 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
3321 				.cra_driver_name = "authenc-hmac-sha256-"
3322 						   "cbc-aes-caam",
3323 				.cra_blocksize = AES_BLOCK_SIZE,
3324 			},
3325 			.setkey = aead_setkey,
3326 			.setauthsize = aead_setauthsize,
3327 			.encrypt = aead_encrypt,
3328 			.decrypt = aead_decrypt,
3329 			.ivsize = AES_BLOCK_SIZE,
3330 			.maxauthsize = SHA256_DIGEST_SIZE,
3331 		},
3332 		.caam = {
3333 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3334 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3335 					   OP_ALG_AAI_HMAC_PRECOMP,
3336 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3337 		},
3338 	},
3339 	{
3340 		.aead = {
3341 			.base = {
3342 				.cra_name = "echainiv(authenc(hmac(sha256),"
3343 					    "cbc(aes)))",
3344 				.cra_driver_name = "echainiv-authenc-"
3345 						   "hmac-sha256-cbc-aes-caam",
3346 				.cra_blocksize = AES_BLOCK_SIZE,
3347 			},
3348 			.setkey = aead_setkey,
3349 			.setauthsize = aead_setauthsize,
3350 			.encrypt = aead_encrypt,
3351 			.decrypt = aead_givdecrypt,
3352 			.ivsize = AES_BLOCK_SIZE,
3353 			.maxauthsize = SHA256_DIGEST_SIZE,
3354 		},
3355 		.caam = {
3356 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3357 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3358 					   OP_ALG_AAI_HMAC_PRECOMP,
3359 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3360 			.geniv = true,
3361 		},
3362 	},
3363 	{
3364 		.aead = {
3365 			.base = {
3366 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
3367 				.cra_driver_name = "authenc-hmac-sha384-"
3368 						   "cbc-aes-caam",
3369 				.cra_blocksize = AES_BLOCK_SIZE,
3370 			},
3371 			.setkey = aead_setkey,
3372 			.setauthsize = aead_setauthsize,
3373 			.encrypt = aead_encrypt,
3374 			.decrypt = aead_decrypt,
3375 			.ivsize = AES_BLOCK_SIZE,
3376 			.maxauthsize = SHA384_DIGEST_SIZE,
3377 		},
3378 		.caam = {
3379 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3380 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3381 					   OP_ALG_AAI_HMAC_PRECOMP,
3382 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3383 		},
3384 	},
3385 	{
3386 		.aead = {
3387 			.base = {
3388 				.cra_name = "echainiv(authenc(hmac(sha384),"
3389 					    "cbc(aes)))",
3390 				.cra_driver_name = "echainiv-authenc-"
3391 						   "hmac-sha384-cbc-aes-caam",
3392 				.cra_blocksize = AES_BLOCK_SIZE,
3393 			},
3394 			.setkey = aead_setkey,
3395 			.setauthsize = aead_setauthsize,
3396 			.encrypt = aead_encrypt,
3397 			.decrypt = aead_givdecrypt,
3398 			.ivsize = AES_BLOCK_SIZE,
3399 			.maxauthsize = SHA384_DIGEST_SIZE,
3400 		},
3401 		.caam = {
3402 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3403 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3404 					   OP_ALG_AAI_HMAC_PRECOMP,
3405 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3406 			.geniv = true,
3407 		},
3408 	},
3409 	{
3410 		.aead = {
3411 			.base = {
3412 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
3413 				.cra_driver_name = "authenc-hmac-sha512-"
3414 						   "cbc-aes-caam",
3415 				.cra_blocksize = AES_BLOCK_SIZE,
3416 			},
3417 			.setkey = aead_setkey,
3418 			.setauthsize = aead_setauthsize,
3419 			.encrypt = aead_encrypt,
3420 			.decrypt = aead_decrypt,
3421 			.ivsize = AES_BLOCK_SIZE,
3422 			.maxauthsize = SHA512_DIGEST_SIZE,
3423 		},
3424 		.caam = {
3425 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3426 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3427 					   OP_ALG_AAI_HMAC_PRECOMP,
3428 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3429 		},
3430 	},
3431 	{
3432 		.aead = {
3433 			.base = {
3434 				.cra_name = "echainiv(authenc(hmac(sha512),"
3435 					    "cbc(aes)))",
3436 				.cra_driver_name = "echainiv-authenc-"
3437 						   "hmac-sha512-cbc-aes-caam",
3438 				.cra_blocksize = AES_BLOCK_SIZE,
3439 			},
3440 			.setkey = aead_setkey,
3441 			.setauthsize = aead_setauthsize,
3442 			.encrypt = aead_encrypt,
3443 			.decrypt = aead_givdecrypt,
3444 			.ivsize = AES_BLOCK_SIZE,
3445 			.maxauthsize = SHA512_DIGEST_SIZE,
3446 		},
3447 		.caam = {
3448 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3449 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3450 					   OP_ALG_AAI_HMAC_PRECOMP,
3451 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3452 			.geniv = true,
3453 		},
3454 	},
3455 	{
3456 		.aead = {
3457 			.base = {
3458 				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
3459 				.cra_driver_name = "authenc-hmac-md5-"
3460 						   "cbc-des3_ede-caam",
3461 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3462 			},
3463 			.setkey = aead_setkey,
3464 			.setauthsize = aead_setauthsize,
3465 			.encrypt = aead_encrypt,
3466 			.decrypt = aead_decrypt,
3467 			.ivsize = DES3_EDE_BLOCK_SIZE,
3468 			.maxauthsize = MD5_DIGEST_SIZE,
3469 		},
3470 		.caam = {
3471 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3472 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
3473 					   OP_ALG_AAI_HMAC_PRECOMP,
3474 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3475 		}
3476 	},
3477 	{
3478 		.aead = {
3479 			.base = {
3480 				.cra_name = "echainiv(authenc(hmac(md5),"
3481 					    "cbc(des3_ede)))",
3482 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
3483 						   "cbc-des3_ede-caam",
3484 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3485 			},
3486 			.setkey = aead_setkey,
3487 			.setauthsize = aead_setauthsize,
3488 			.encrypt = aead_encrypt,
3489 			.decrypt = aead_givdecrypt,
3490 			.ivsize = DES3_EDE_BLOCK_SIZE,
3491 			.maxauthsize = MD5_DIGEST_SIZE,
3492 		},
3493 		.caam = {
3494 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3495 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
3496 					   OP_ALG_AAI_HMAC_PRECOMP,
3497 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3498 			.geniv = true,
3499 		}
3500 	},
3501 	{
3502 		.aead = {
3503 			.base = {
3504 				.cra_name = "authenc(hmac(sha1),"
3505 					    "cbc(des3_ede))",
3506 				.cra_driver_name = "authenc-hmac-sha1-"
3507 						   "cbc-des3_ede-caam",
3508 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3509 			},
3510 			.setkey = aead_setkey,
3511 			.setauthsize = aead_setauthsize,
3512 			.encrypt = aead_encrypt,
3513 			.decrypt = aead_decrypt,
3514 			.ivsize = DES3_EDE_BLOCK_SIZE,
3515 			.maxauthsize = SHA1_DIGEST_SIZE,
3516 		},
3517 		.caam = {
3518 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3519 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3520 					   OP_ALG_AAI_HMAC_PRECOMP,
3521 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3522 		},
3523 	},
3524 	{
3525 		.aead = {
3526 			.base = {
3527 				.cra_name = "echainiv(authenc(hmac(sha1),"
3528 					    "cbc(des3_ede)))",
3529 				.cra_driver_name = "echainiv-authenc-"
3530 						   "hmac-sha1-"
3531 						   "cbc-des3_ede-caam",
3532 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3533 			},
3534 			.setkey = aead_setkey,
3535 			.setauthsize = aead_setauthsize,
3536 			.encrypt = aead_encrypt,
3537 			.decrypt = aead_givdecrypt,
3538 			.ivsize = DES3_EDE_BLOCK_SIZE,
3539 			.maxauthsize = SHA1_DIGEST_SIZE,
3540 		},
3541 		.caam = {
3542 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3543 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3544 					   OP_ALG_AAI_HMAC_PRECOMP,
3545 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3546 			.geniv = true,
3547 		},
3548 	},
3549 	{
3550 		.aead = {
3551 			.base = {
3552 				.cra_name = "authenc(hmac(sha224),"
3553 					    "cbc(des3_ede))",
3554 				.cra_driver_name = "authenc-hmac-sha224-"
3555 						   "cbc-des3_ede-caam",
3556 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3557 			},
3558 			.setkey = aead_setkey,
3559 			.setauthsize = aead_setauthsize,
3560 			.encrypt = aead_encrypt,
3561 			.decrypt = aead_decrypt,
3562 			.ivsize = DES3_EDE_BLOCK_SIZE,
3563 			.maxauthsize = SHA224_DIGEST_SIZE,
3564 		},
3565 		.caam = {
3566 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3567 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3568 					   OP_ALG_AAI_HMAC_PRECOMP,
3569 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3570 		},
3571 	},
3572 	{
3573 		.aead = {
3574 			.base = {
3575 				.cra_name = "echainiv(authenc(hmac(sha224),"
3576 					    "cbc(des3_ede)))",
3577 				.cra_driver_name = "echainiv-authenc-"
3578 						   "hmac-sha224-"
3579 						   "cbc-des3_ede-caam",
3580 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3581 			},
3582 			.setkey = aead_setkey,
3583 			.setauthsize = aead_setauthsize,
3584 			.encrypt = aead_encrypt,
3585 			.decrypt = aead_givdecrypt,
3586 			.ivsize = DES3_EDE_BLOCK_SIZE,
3587 			.maxauthsize = SHA224_DIGEST_SIZE,
3588 		},
3589 		.caam = {
3590 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3591 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3592 					   OP_ALG_AAI_HMAC_PRECOMP,
3593 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3594 			.geniv = true,
3595 		},
3596 	},
3597 	{
3598 		.aead = {
3599 			.base = {
3600 				.cra_name = "authenc(hmac(sha256),"
3601 					    "cbc(des3_ede))",
3602 				.cra_driver_name = "authenc-hmac-sha256-"
3603 						   "cbc-des3_ede-caam",
3604 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3605 			},
3606 			.setkey = aead_setkey,
3607 			.setauthsize = aead_setauthsize,
3608 			.encrypt = aead_encrypt,
3609 			.decrypt = aead_decrypt,
3610 			.ivsize = DES3_EDE_BLOCK_SIZE,
3611 			.maxauthsize = SHA256_DIGEST_SIZE,
3612 		},
3613 		.caam = {
3614 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3615 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3616 					   OP_ALG_AAI_HMAC_PRECOMP,
3617 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3618 		},
3619 	},
3620 	{
3621 		.aead = {
3622 			.base = {
3623 				.cra_name = "echainiv(authenc(hmac(sha256),"
3624 					    "cbc(des3_ede)))",
3625 				.cra_driver_name = "echainiv-authenc-"
3626 						   "hmac-sha256-"
3627 						   "cbc-des3_ede-caam",
3628 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3629 			},
3630 			.setkey = aead_setkey,
3631 			.setauthsize = aead_setauthsize,
3632 			.encrypt = aead_encrypt,
3633 			.decrypt = aead_givdecrypt,
3634 			.ivsize = DES3_EDE_BLOCK_SIZE,
3635 			.maxauthsize = SHA256_DIGEST_SIZE,
3636 		},
3637 		.caam = {
3638 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3639 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3640 					   OP_ALG_AAI_HMAC_PRECOMP,
3641 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3642 			.geniv = true,
3643 		},
3644 	},
3645 	{
3646 		.aead = {
3647 			.base = {
3648 				.cra_name = "authenc(hmac(sha384),"
3649 					    "cbc(des3_ede))",
3650 				.cra_driver_name = "authenc-hmac-sha384-"
3651 						   "cbc-des3_ede-caam",
3652 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3653 			},
3654 			.setkey = aead_setkey,
3655 			.setauthsize = aead_setauthsize,
3656 			.encrypt = aead_encrypt,
3657 			.decrypt = aead_decrypt,
3658 			.ivsize = DES3_EDE_BLOCK_SIZE,
3659 			.maxauthsize = SHA384_DIGEST_SIZE,
3660 		},
3661 		.caam = {
3662 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3663 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3664 					   OP_ALG_AAI_HMAC_PRECOMP,
3665 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3666 		},
3667 	},
3668 	{
3669 		.aead = {
3670 			.base = {
3671 				.cra_name = "echainiv(authenc(hmac(sha384),"
3672 					    "cbc(des3_ede)))",
3673 				.cra_driver_name = "echainiv-authenc-"
3674 						   "hmac-sha384-"
3675 						   "cbc-des3_ede-caam",
3676 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3677 			},
3678 			.setkey = aead_setkey,
3679 			.setauthsize = aead_setauthsize,
3680 			.encrypt = aead_encrypt,
3681 			.decrypt = aead_givdecrypt,
3682 			.ivsize = DES3_EDE_BLOCK_SIZE,
3683 			.maxauthsize = SHA384_DIGEST_SIZE,
3684 		},
3685 		.caam = {
3686 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3687 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3688 					   OP_ALG_AAI_HMAC_PRECOMP,
3689 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3690 			.geniv = true,
3691 		},
3692 	},
3693 	{
3694 		.aead = {
3695 			.base = {
3696 				.cra_name = "authenc(hmac(sha512),"
3697 					    "cbc(des3_ede))",
3698 				.cra_driver_name = "authenc-hmac-sha512-"
3699 						   "cbc-des3_ede-caam",
3700 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3701 			},
3702 			.setkey = aead_setkey,
3703 			.setauthsize = aead_setauthsize,
3704 			.encrypt = aead_encrypt,
3705 			.decrypt = aead_decrypt,
3706 			.ivsize = DES3_EDE_BLOCK_SIZE,
3707 			.maxauthsize = SHA512_DIGEST_SIZE,
3708 		},
3709 		.caam = {
3710 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3711 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3712 					   OP_ALG_AAI_HMAC_PRECOMP,
3713 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3714 		},
3715 	},
3716 	{
3717 		.aead = {
3718 			.base = {
3719 				.cra_name = "echainiv(authenc(hmac(sha512),"
3720 					    "cbc(des3_ede)))",
3721 				.cra_driver_name = "echainiv-authenc-"
3722 						   "hmac-sha512-"
3723 						   "cbc-des3_ede-caam",
3724 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3725 			},
3726 			.setkey = aead_setkey,
3727 			.setauthsize = aead_setauthsize,
3728 			.encrypt = aead_encrypt,
3729 			.decrypt = aead_givdecrypt,
3730 			.ivsize = DES3_EDE_BLOCK_SIZE,
3731 			.maxauthsize = SHA512_DIGEST_SIZE,
3732 		},
3733 		.caam = {
3734 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3735 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3736 					   OP_ALG_AAI_HMAC_PRECOMP,
3737 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3738 			.geniv = true,
3739 		},
3740 	},
3741 	{
3742 		.aead = {
3743 			.base = {
3744 				.cra_name = "authenc(hmac(md5),cbc(des))",
3745 				.cra_driver_name = "authenc-hmac-md5-"
3746 						   "cbc-des-caam",
3747 				.cra_blocksize = DES_BLOCK_SIZE,
3748 			},
3749 			.setkey = aead_setkey,
3750 			.setauthsize = aead_setauthsize,
3751 			.encrypt = aead_encrypt,
3752 			.decrypt = aead_decrypt,
3753 			.ivsize = DES_BLOCK_SIZE,
3754 			.maxauthsize = MD5_DIGEST_SIZE,
3755 		},
3756 		.caam = {
3757 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3758 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
3759 					   OP_ALG_AAI_HMAC_PRECOMP,
3760 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3761 		},
3762 	},
3763 	{
3764 		.aead = {
3765 			.base = {
3766 				.cra_name = "echainiv(authenc(hmac(md5),"
3767 					    "cbc(des)))",
3768 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
3769 						   "cbc-des-caam",
3770 				.cra_blocksize = DES_BLOCK_SIZE,
3771 			},
3772 			.setkey = aead_setkey,
3773 			.setauthsize = aead_setauthsize,
3774 			.encrypt = aead_encrypt,
3775 			.decrypt = aead_givdecrypt,
3776 			.ivsize = DES_BLOCK_SIZE,
3777 			.maxauthsize = MD5_DIGEST_SIZE,
3778 		},
3779 		.caam = {
3780 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3781 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
3782 					   OP_ALG_AAI_HMAC_PRECOMP,
3783 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3784 			.geniv = true,
3785 		},
3786 	},
3787 	{
3788 		.aead = {
3789 			.base = {
3790 				.cra_name = "authenc(hmac(sha1),cbc(des))",
3791 				.cra_driver_name = "authenc-hmac-sha1-"
3792 						   "cbc-des-caam",
3793 				.cra_blocksize = DES_BLOCK_SIZE,
3794 			},
3795 			.setkey = aead_setkey,
3796 			.setauthsize = aead_setauthsize,
3797 			.encrypt = aead_encrypt,
3798 			.decrypt = aead_decrypt,
3799 			.ivsize = DES_BLOCK_SIZE,
3800 			.maxauthsize = SHA1_DIGEST_SIZE,
3801 		},
3802 		.caam = {
3803 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3804 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3805 					   OP_ALG_AAI_HMAC_PRECOMP,
3806 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3807 		},
3808 	},
3809 	{
3810 		.aead = {
3811 			.base = {
3812 				.cra_name = "echainiv(authenc(hmac(sha1),"
3813 					    "cbc(des)))",
3814 				.cra_driver_name = "echainiv-authenc-"
3815 						   "hmac-sha1-cbc-des-caam",
3816 				.cra_blocksize = DES_BLOCK_SIZE,
3817 			},
3818 			.setkey = aead_setkey,
3819 			.setauthsize = aead_setauthsize,
3820 			.encrypt = aead_encrypt,
3821 			.decrypt = aead_givdecrypt,
3822 			.ivsize = DES_BLOCK_SIZE,
3823 			.maxauthsize = SHA1_DIGEST_SIZE,
3824 		},
3825 		.caam = {
3826 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3827 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3828 					   OP_ALG_AAI_HMAC_PRECOMP,
3829 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3830 			.geniv = true,
3831 		},
3832 	},
3833 	{
3834 		.aead = {
3835 			.base = {
3836 				.cra_name = "authenc(hmac(sha224),cbc(des))",
3837 				.cra_driver_name = "authenc-hmac-sha224-"
3838 						   "cbc-des-caam",
3839 				.cra_blocksize = DES_BLOCK_SIZE,
3840 			},
3841 			.setkey = aead_setkey,
3842 			.setauthsize = aead_setauthsize,
3843 			.encrypt = aead_encrypt,
3844 			.decrypt = aead_decrypt,
3845 			.ivsize = DES_BLOCK_SIZE,
3846 			.maxauthsize = SHA224_DIGEST_SIZE,
3847 		},
3848 		.caam = {
3849 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3850 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3851 					   OP_ALG_AAI_HMAC_PRECOMP,
3852 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3853 		},
3854 	},
3855 	{
3856 		.aead = {
3857 			.base = {
3858 				.cra_name = "echainiv(authenc(hmac(sha224),"
3859 					    "cbc(des)))",
3860 				.cra_driver_name = "echainiv-authenc-"
3861 						   "hmac-sha224-cbc-des-caam",
3862 				.cra_blocksize = DES_BLOCK_SIZE,
3863 			},
3864 			.setkey = aead_setkey,
3865 			.setauthsize = aead_setauthsize,
3866 			.encrypt = aead_encrypt,
3867 			.decrypt = aead_givdecrypt,
3868 			.ivsize = DES_BLOCK_SIZE,
3869 			.maxauthsize = SHA224_DIGEST_SIZE,
3870 		},
3871 		.caam = {
3872 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3873 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3874 					   OP_ALG_AAI_HMAC_PRECOMP,
3875 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3876 			.geniv = true,
3877 		},
3878 	},
3879 	{
3880 		.aead = {
3881 			.base = {
3882 				.cra_name = "authenc(hmac(sha256),cbc(des))",
3883 				.cra_driver_name = "authenc-hmac-sha256-"
3884 						   "cbc-des-caam",
3885 				.cra_blocksize = DES_BLOCK_SIZE,
3886 			},
3887 			.setkey = aead_setkey,
3888 			.setauthsize = aead_setauthsize,
3889 			.encrypt = aead_encrypt,
3890 			.decrypt = aead_decrypt,
3891 			.ivsize = DES_BLOCK_SIZE,
3892 			.maxauthsize = SHA256_DIGEST_SIZE,
3893 		},
3894 		.caam = {
3895 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3896 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3897 					   OP_ALG_AAI_HMAC_PRECOMP,
3898 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3899 		},
3900 	},
3901 	{
3902 		.aead = {
3903 			.base = {
3904 				.cra_name = "echainiv(authenc(hmac(sha256),"
3905 					    "cbc(des)))",
3906 				.cra_driver_name = "echainiv-authenc-"
3907 						   "hmac-sha256-cbc-des-caam",
3908 				.cra_blocksize = DES_BLOCK_SIZE,
3909 			},
3910 			.setkey = aead_setkey,
3911 			.setauthsize = aead_setauthsize,
3912 			.encrypt = aead_encrypt,
3913 			.decrypt = aead_givdecrypt,
3914 			.ivsize = DES_BLOCK_SIZE,
3915 			.maxauthsize = SHA256_DIGEST_SIZE,
3916 		},
3917 		.caam = {
3918 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3919 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3920 					   OP_ALG_AAI_HMAC_PRECOMP,
3921 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3922 			.geniv = true,
3923 		},
3924 	},
3925 	{
3926 		.aead = {
3927 			.base = {
3928 				.cra_name = "authenc(hmac(sha384),cbc(des))",
3929 				.cra_driver_name = "authenc-hmac-sha384-"
3930 						   "cbc-des-caam",
3931 				.cra_blocksize = DES_BLOCK_SIZE,
3932 			},
3933 			.setkey = aead_setkey,
3934 			.setauthsize = aead_setauthsize,
3935 			.encrypt = aead_encrypt,
3936 			.decrypt = aead_decrypt,
3937 			.ivsize = DES_BLOCK_SIZE,
3938 			.maxauthsize = SHA384_DIGEST_SIZE,
3939 		},
3940 		.caam = {
3941 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3942 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3943 					   OP_ALG_AAI_HMAC_PRECOMP,
3944 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3945 		},
3946 	},
3947 	{
3948 		.aead = {
3949 			.base = {
3950 				.cra_name = "echainiv(authenc(hmac(sha384),"
3951 					    "cbc(des)))",
3952 				.cra_driver_name = "echainiv-authenc-"
3953 						   "hmac-sha384-cbc-des-caam",
3954 				.cra_blocksize = DES_BLOCK_SIZE,
3955 			},
3956 			.setkey = aead_setkey,
3957 			.setauthsize = aead_setauthsize,
3958 			.encrypt = aead_encrypt,
3959 			.decrypt = aead_givdecrypt,
3960 			.ivsize = DES_BLOCK_SIZE,
3961 			.maxauthsize = SHA384_DIGEST_SIZE,
3962 		},
3963 		.caam = {
3964 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3965 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3966 					   OP_ALG_AAI_HMAC_PRECOMP,
3967 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3968 			.geniv = true,
3969 		},
3970 	},
3971 	{
3972 		.aead = {
3973 			.base = {
3974 				.cra_name = "authenc(hmac(sha512),cbc(des))",
3975 				.cra_driver_name = "authenc-hmac-sha512-"
3976 						   "cbc-des-caam",
3977 				.cra_blocksize = DES_BLOCK_SIZE,
3978 			},
3979 			.setkey = aead_setkey,
3980 			.setauthsize = aead_setauthsize,
3981 			.encrypt = aead_encrypt,
3982 			.decrypt = aead_decrypt,
3983 			.ivsize = DES_BLOCK_SIZE,
3984 			.maxauthsize = SHA512_DIGEST_SIZE,
3985 		},
3986 		.caam = {
3987 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3988 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3989 					   OP_ALG_AAI_HMAC_PRECOMP,
3990 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3991 		},
3992 	},
3993 	{
3994 		.aead = {
3995 			.base = {
3996 				.cra_name = "echainiv(authenc(hmac(sha512),"
3997 					    "cbc(des)))",
3998 				.cra_driver_name = "echainiv-authenc-"
3999 						   "hmac-sha512-cbc-des-caam",
4000 				.cra_blocksize = DES_BLOCK_SIZE,
4001 			},
4002 			.setkey = aead_setkey,
4003 			.setauthsize = aead_setauthsize,
4004 			.encrypt = aead_encrypt,
4005 			.decrypt = aead_givdecrypt,
4006 			.ivsize = DES_BLOCK_SIZE,
4007 			.maxauthsize = SHA512_DIGEST_SIZE,
4008 		},
4009 		.caam = {
4010 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
4011 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
4012 					   OP_ALG_AAI_HMAC_PRECOMP,
4013 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
4014 			.geniv = true,
4015 		},
4016 	},
4017 	{
4018 		.aead = {
4019 			.base = {
4020 				.cra_name = "authenc(hmac(md5),"
4021 					    "rfc3686(ctr(aes)))",
4022 				.cra_driver_name = "authenc-hmac-md5-"
4023 						   "rfc3686-ctr-aes-caam",
4024 				.cra_blocksize = 1,
4025 			},
4026 			.setkey = aead_setkey,
4027 			.setauthsize = aead_setauthsize,
4028 			.encrypt = aead_encrypt,
4029 			.decrypt = aead_decrypt,
4030 			.ivsize = CTR_RFC3686_IV_SIZE,
4031 			.maxauthsize = MD5_DIGEST_SIZE,
4032 		},
4033 		.caam = {
4034 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4035 					   OP_ALG_AAI_CTR_MOD128,
4036 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
4037 					   OP_ALG_AAI_HMAC_PRECOMP,
4038 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
4039 			.rfc3686 = true,
4040 		},
4041 	},
4042 	{
4043 		.aead = {
4044 			.base = {
4045 				.cra_name = "seqiv(authenc("
4046 					    "hmac(md5),rfc3686(ctr(aes))))",
4047 				.cra_driver_name = "seqiv-authenc-hmac-md5-"
4048 						   "rfc3686-ctr-aes-caam",
4049 				.cra_blocksize = 1,
4050 			},
4051 			.setkey = aead_setkey,
4052 			.setauthsize = aead_setauthsize,
4053 			.encrypt = aead_encrypt,
4054 			.decrypt = aead_givdecrypt,
4055 			.ivsize = CTR_RFC3686_IV_SIZE,
4056 			.maxauthsize = MD5_DIGEST_SIZE,
4057 		},
4058 		.caam = {
4059 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4060 					   OP_ALG_AAI_CTR_MOD128,
4061 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
4062 					   OP_ALG_AAI_HMAC_PRECOMP,
4063 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
4064 			.rfc3686 = true,
4065 			.geniv = true,
4066 		},
4067 	},
4068 	{
4069 		.aead = {
4070 			.base = {
4071 				.cra_name = "authenc(hmac(sha1),"
4072 					    "rfc3686(ctr(aes)))",
4073 				.cra_driver_name = "authenc-hmac-sha1-"
4074 						   "rfc3686-ctr-aes-caam",
4075 				.cra_blocksize = 1,
4076 			},
4077 			.setkey = aead_setkey,
4078 			.setauthsize = aead_setauthsize,
4079 			.encrypt = aead_encrypt,
4080 			.decrypt = aead_decrypt,
4081 			.ivsize = CTR_RFC3686_IV_SIZE,
4082 			.maxauthsize = SHA1_DIGEST_SIZE,
4083 		},
4084 		.caam = {
4085 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4086 					   OP_ALG_AAI_CTR_MOD128,
4087 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
4088 					   OP_ALG_AAI_HMAC_PRECOMP,
4089 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
4090 			.rfc3686 = true,
4091 		},
4092 	},
4093 	{
4094 		.aead = {
4095 			.base = {
4096 				.cra_name = "seqiv(authenc("
4097 					    "hmac(sha1),rfc3686(ctr(aes))))",
4098 				.cra_driver_name = "seqiv-authenc-hmac-sha1-"
4099 						   "rfc3686-ctr-aes-caam",
4100 				.cra_blocksize = 1,
4101 			},
4102 			.setkey = aead_setkey,
4103 			.setauthsize = aead_setauthsize,
4104 			.encrypt = aead_encrypt,
4105 			.decrypt = aead_givdecrypt,
4106 			.ivsize = CTR_RFC3686_IV_SIZE,
4107 			.maxauthsize = SHA1_DIGEST_SIZE,
4108 		},
4109 		.caam = {
4110 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4111 					   OP_ALG_AAI_CTR_MOD128,
4112 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
4113 					   OP_ALG_AAI_HMAC_PRECOMP,
4114 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
4115 			.rfc3686 = true,
4116 			.geniv = true,
4117 		},
4118 	},
4119 	{
4120 		.aead = {
4121 			.base = {
4122 				.cra_name = "authenc(hmac(sha224),"
4123 					    "rfc3686(ctr(aes)))",
4124 				.cra_driver_name = "authenc-hmac-sha224-"
4125 						   "rfc3686-ctr-aes-caam",
4126 				.cra_blocksize = 1,
4127 			},
4128 			.setkey = aead_setkey,
4129 			.setauthsize = aead_setauthsize,
4130 			.encrypt = aead_encrypt,
4131 			.decrypt = aead_decrypt,
4132 			.ivsize = CTR_RFC3686_IV_SIZE,
4133 			.maxauthsize = SHA224_DIGEST_SIZE,
4134 		},
4135 		.caam = {
4136 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4137 					   OP_ALG_AAI_CTR_MOD128,
4138 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
4139 					   OP_ALG_AAI_HMAC_PRECOMP,
4140 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
4141 			.rfc3686 = true,
4142 		},
4143 	},
4144 	{
4145 		.aead = {
4146 			.base = {
4147 				.cra_name = "seqiv(authenc("
4148 					    "hmac(sha224),rfc3686(ctr(aes))))",
4149 				.cra_driver_name = "seqiv-authenc-hmac-sha224-"
4150 						   "rfc3686-ctr-aes-caam",
4151 				.cra_blocksize = 1,
4152 			},
4153 			.setkey = aead_setkey,
4154 			.setauthsize = aead_setauthsize,
4155 			.encrypt = aead_encrypt,
4156 			.decrypt = aead_givdecrypt,
4157 			.ivsize = CTR_RFC3686_IV_SIZE,
4158 			.maxauthsize = SHA224_DIGEST_SIZE,
4159 		},
4160 		.caam = {
4161 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4162 					   OP_ALG_AAI_CTR_MOD128,
4163 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
4164 					   OP_ALG_AAI_HMAC_PRECOMP,
4165 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
4166 			.rfc3686 = true,
4167 			.geniv = true,
4168 		},
4169 	},
4170 	{
4171 		.aead = {
4172 			.base = {
4173 				.cra_name = "authenc(hmac(sha256),"
4174 					    "rfc3686(ctr(aes)))",
4175 				.cra_driver_name = "authenc-hmac-sha256-"
4176 						   "rfc3686-ctr-aes-caam",
4177 				.cra_blocksize = 1,
4178 			},
4179 			.setkey = aead_setkey,
4180 			.setauthsize = aead_setauthsize,
4181 			.encrypt = aead_encrypt,
4182 			.decrypt = aead_decrypt,
4183 			.ivsize = CTR_RFC3686_IV_SIZE,
4184 			.maxauthsize = SHA256_DIGEST_SIZE,
4185 		},
4186 		.caam = {
4187 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4188 					   OP_ALG_AAI_CTR_MOD128,
4189 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
4190 					   OP_ALG_AAI_HMAC_PRECOMP,
4191 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
4192 			.rfc3686 = true,
4193 		},
4194 	},
4195 	{
4196 		.aead = {
4197 			.base = {
4198 				.cra_name = "seqiv(authenc(hmac(sha256),"
4199 					    "rfc3686(ctr(aes))))",
4200 				.cra_driver_name = "seqiv-authenc-hmac-sha256-"
4201 						   "rfc3686-ctr-aes-caam",
4202 				.cra_blocksize = 1,
4203 			},
4204 			.setkey = aead_setkey,
4205 			.setauthsize = aead_setauthsize,
4206 			.encrypt = aead_encrypt,
4207 			.decrypt = aead_givdecrypt,
4208 			.ivsize = CTR_RFC3686_IV_SIZE,
4209 			.maxauthsize = SHA256_DIGEST_SIZE,
4210 		},
4211 		.caam = {
4212 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4213 					   OP_ALG_AAI_CTR_MOD128,
4214 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
4215 					   OP_ALG_AAI_HMAC_PRECOMP,
4216 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
4217 			.rfc3686 = true,
4218 			.geniv = true,
4219 		},
4220 	},
4221 	{
4222 		.aead = {
4223 			.base = {
4224 				.cra_name = "authenc(hmac(sha384),"
4225 					    "rfc3686(ctr(aes)))",
4226 				.cra_driver_name = "authenc-hmac-sha384-"
4227 						   "rfc3686-ctr-aes-caam",
4228 				.cra_blocksize = 1,
4229 			},
4230 			.setkey = aead_setkey,
4231 			.setauthsize = aead_setauthsize,
4232 			.encrypt = aead_encrypt,
4233 			.decrypt = aead_decrypt,
4234 			.ivsize = CTR_RFC3686_IV_SIZE,
4235 			.maxauthsize = SHA384_DIGEST_SIZE,
4236 		},
4237 		.caam = {
4238 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4239 					   OP_ALG_AAI_CTR_MOD128,
4240 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
4241 					   OP_ALG_AAI_HMAC_PRECOMP,
4242 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
4243 			.rfc3686 = true,
4244 		},
4245 	},
4246 	{
4247 		.aead = {
4248 			.base = {
4249 				.cra_name = "seqiv(authenc(hmac(sha384),"
4250 					    "rfc3686(ctr(aes))))",
4251 				.cra_driver_name = "seqiv-authenc-hmac-sha384-"
4252 						   "rfc3686-ctr-aes-caam",
4253 				.cra_blocksize = 1,
4254 			},
4255 			.setkey = aead_setkey,
4256 			.setauthsize = aead_setauthsize,
4257 			.encrypt = aead_encrypt,
4258 			.decrypt = aead_givdecrypt,
4259 			.ivsize = CTR_RFC3686_IV_SIZE,
4260 			.maxauthsize = SHA384_DIGEST_SIZE,
4261 		},
4262 		.caam = {
4263 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4264 					   OP_ALG_AAI_CTR_MOD128,
4265 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
4266 					   OP_ALG_AAI_HMAC_PRECOMP,
4267 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
4268 			.rfc3686 = true,
4269 			.geniv = true,
4270 		},
4271 	},
4272 	{
4273 		.aead = {
4274 			.base = {
4275 				.cra_name = "authenc(hmac(sha512),"
4276 					    "rfc3686(ctr(aes)))",
4277 				.cra_driver_name = "authenc-hmac-sha512-"
4278 						   "rfc3686-ctr-aes-caam",
4279 				.cra_blocksize = 1,
4280 			},
4281 			.setkey = aead_setkey,
4282 			.setauthsize = aead_setauthsize,
4283 			.encrypt = aead_encrypt,
4284 			.decrypt = aead_decrypt,
4285 			.ivsize = CTR_RFC3686_IV_SIZE,
4286 			.maxauthsize = SHA512_DIGEST_SIZE,
4287 		},
4288 		.caam = {
4289 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4290 					   OP_ALG_AAI_CTR_MOD128,
4291 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
4292 					   OP_ALG_AAI_HMAC_PRECOMP,
4293 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
4294 			.rfc3686 = true,
4295 		},
4296 	},
4297 	{
4298 		.aead = {
4299 			.base = {
4300 				.cra_name = "seqiv(authenc(hmac(sha512),"
4301 					    "rfc3686(ctr(aes))))",
4302 				.cra_driver_name = "seqiv-authenc-hmac-sha512-"
4303 						   "rfc3686-ctr-aes-caam",
4304 				.cra_blocksize = 1,
4305 			},
4306 			.setkey = aead_setkey,
4307 			.setauthsize = aead_setauthsize,
4308 			.encrypt = aead_encrypt,
4309 			.decrypt = aead_givdecrypt,
4310 			.ivsize = CTR_RFC3686_IV_SIZE,
4311 			.maxauthsize = SHA512_DIGEST_SIZE,
4312 		},
4313 		.caam = {
4314 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4315 					   OP_ALG_AAI_CTR_MOD128,
4316 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
4317 					   OP_ALG_AAI_HMAC_PRECOMP,
4318 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
4319 			.rfc3686 = true,
4320 			.geniv = true,
4321 		},
4322 	},
4323 };
4324 
4325 struct caam_crypto_alg {
4326 	struct crypto_alg crypto_alg;
4327 	struct list_head entry;
4328 	struct caam_alg_entry caam;
4329 };
4330 
4331 static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
4332 {
4333 	ctx->jrdev = caam_jr_alloc();
4334 	if (IS_ERR(ctx->jrdev)) {
4335 		pr_err("Job Ring Device allocation for transform failed\n");
4336 		return PTR_ERR(ctx->jrdev);
4337 	}
4338 
4339 	/* copy descriptor header template value */
4340 	ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
4341 	ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
4342 	ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op;
4343 
4344 	return 0;
4345 }
4346 
4347 static int caam_cra_init(struct crypto_tfm *tfm)
4348 {
4349 	struct crypto_alg *alg = tfm->__crt_alg;
4350 	struct caam_crypto_alg *caam_alg =
4351 		 container_of(alg, struct caam_crypto_alg, crypto_alg);
4352 	struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
4353 
4354 	return caam_init_common(ctx, &caam_alg->caam);
4355 }
4356 
4357 static int caam_aead_init(struct crypto_aead *tfm)
4358 {
4359 	struct aead_alg *alg = crypto_aead_alg(tfm);
4360 	struct caam_aead_alg *caam_alg =
4361 		 container_of(alg, struct caam_aead_alg, aead);
4362 	struct caam_ctx *ctx = crypto_aead_ctx(tfm);
4363 
4364 	return caam_init_common(ctx, &caam_alg->caam);
4365 }
4366 
4367 static void caam_exit_common(struct caam_ctx *ctx)
4368 {
4369 	if (ctx->sh_desc_enc_dma &&
4370 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
4371 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
4372 				 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
4373 	if (ctx->sh_desc_dec_dma &&
4374 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
4375 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
4376 				 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
4377 	if (ctx->sh_desc_givenc_dma &&
4378 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
4379 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
4380 				 desc_bytes(ctx->sh_desc_givenc),
4381 				 DMA_TO_DEVICE);
4382 	if (ctx->key_dma &&
4383 	    !dma_mapping_error(ctx->jrdev, ctx->key_dma))
4384 		dma_unmap_single(ctx->jrdev, ctx->key_dma,
4385 				 ctx->enckeylen + ctx->split_key_pad_len,
4386 				 DMA_TO_DEVICE);
4387 
4388 	caam_jr_free(ctx->jrdev);
4389 }
4390 
4391 static void caam_cra_exit(struct crypto_tfm *tfm)
4392 {
4393 	caam_exit_common(crypto_tfm_ctx(tfm));
4394 }
4395 
4396 static void caam_aead_exit(struct crypto_aead *tfm)
4397 {
4398 	caam_exit_common(crypto_aead_ctx(tfm));
4399 }
4400 
4401 static void __exit caam_algapi_exit(void)
4402 {
4403 
4404 	struct caam_crypto_alg *t_alg, *n;
4405 	int i;
4406 
4407 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
4408 		struct caam_aead_alg *t_alg = driver_aeads + i;
4409 
4410 		if (t_alg->registered)
4411 			crypto_unregister_aead(&t_alg->aead);
4412 	}
4413 
4414 	if (!alg_list.next)
4415 		return;
4416 
4417 	list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
4418 		crypto_unregister_alg(&t_alg->crypto_alg);
4419 		list_del(&t_alg->entry);
4420 		kfree(t_alg);
4421 	}
4422 }
4423 
4424 static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
4425 					      *template)
4426 {
4427 	struct caam_crypto_alg *t_alg;
4428 	struct crypto_alg *alg;
4429 
4430 	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
4431 	if (!t_alg) {
4432 		pr_err("failed to allocate t_alg\n");
4433 		return ERR_PTR(-ENOMEM);
4434 	}
4435 
4436 	alg = &t_alg->crypto_alg;
4437 
4438 	snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
4439 	snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4440 		 template->driver_name);
4441 	alg->cra_module = THIS_MODULE;
4442 	alg->cra_init = caam_cra_init;
4443 	alg->cra_exit = caam_cra_exit;
4444 	alg->cra_priority = CAAM_CRA_PRIORITY;
4445 	alg->cra_blocksize = template->blocksize;
4446 	alg->cra_alignmask = 0;
4447 	alg->cra_ctxsize = sizeof(struct caam_ctx);
4448 	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
4449 			 template->type;
4450 	switch (template->type) {
4451 	case CRYPTO_ALG_TYPE_GIVCIPHER:
4452 		alg->cra_type = &crypto_givcipher_type;
4453 		alg->cra_ablkcipher = template->template_ablkcipher;
4454 		break;
4455 	case CRYPTO_ALG_TYPE_ABLKCIPHER:
4456 		alg->cra_type = &crypto_ablkcipher_type;
4457 		alg->cra_ablkcipher = template->template_ablkcipher;
4458 		break;
4459 	}
4460 
4461 	t_alg->caam.class1_alg_type = template->class1_alg_type;
4462 	t_alg->caam.class2_alg_type = template->class2_alg_type;
4463 	t_alg->caam.alg_op = template->alg_op;
4464 
4465 	return t_alg;
4466 }
4467 
4468 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
4469 {
4470 	struct aead_alg *alg = &t_alg->aead;
4471 
4472 	alg->base.cra_module = THIS_MODULE;
4473 	alg->base.cra_priority = CAAM_CRA_PRIORITY;
4474 	alg->base.cra_ctxsize = sizeof(struct caam_ctx);
4475 	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
4476 
4477 	alg->init = caam_aead_init;
4478 	alg->exit = caam_aead_exit;
4479 }
4480 
4481 static int __init caam_algapi_init(void)
4482 {
4483 	struct device_node *dev_node;
4484 	struct platform_device *pdev;
4485 	struct device *ctrldev;
4486 	struct caam_drv_private *priv;
4487 	int i = 0, err = 0;
4488 	u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
4489 	unsigned int md_limit = SHA512_DIGEST_SIZE;
4490 	bool registered = false;
4491 
4492 	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
4493 	if (!dev_node) {
4494 		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
4495 		if (!dev_node)
4496 			return -ENODEV;
4497 	}
4498 
4499 	pdev = of_find_device_by_node(dev_node);
4500 	if (!pdev) {
4501 		of_node_put(dev_node);
4502 		return -ENODEV;
4503 	}
4504 
4505 	ctrldev = &pdev->dev;
4506 	priv = dev_get_drvdata(ctrldev);
4507 	of_node_put(dev_node);
4508 
4509 	/*
4510 	 * If priv is NULL, it's probably because the caam driver wasn't
4511 	 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
4512 	 */
4513 	if (!priv)
4514 		return -ENODEV;
4515 
4516 
4517 	INIT_LIST_HEAD(&alg_list);
4518 
4519 	/*
4520 	 * Register crypto algorithms the device supports.
4521 	 * First, detect presence and attributes of DES, AES, and MD blocks.
4522 	 */
4523 	cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
4524 	cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
4525 	des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
4526 	aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
4527 	md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
4528 
4529 	/* If MD is present, limit digest size based on LP256 */
4530 	if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
4531 		md_limit = SHA256_DIGEST_SIZE;
4532 
4533 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4534 		struct caam_crypto_alg *t_alg;
4535 		struct caam_alg_template *alg = driver_algs + i;
4536 		u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
4537 
4538 		/* Skip DES algorithms if not supported by device */
4539 		if (!des_inst &&
4540 		    ((alg_sel == OP_ALG_ALGSEL_3DES) ||
4541 		     (alg_sel == OP_ALG_ALGSEL_DES)))
4542 				continue;
4543 
4544 		/* Skip AES algorithms if not supported by device */
4545 		if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
4546 				continue;
4547 
4548 		t_alg = caam_alg_alloc(alg);
4549 		if (IS_ERR(t_alg)) {
4550 			err = PTR_ERR(t_alg);
4551 			pr_warn("%s alg allocation failed\n", alg->driver_name);
4552 			continue;
4553 		}
4554 
4555 		err = crypto_register_alg(&t_alg->crypto_alg);
4556 		if (err) {
4557 			pr_warn("%s alg registration failed\n",
4558 				t_alg->crypto_alg.cra_driver_name);
4559 			kfree(t_alg);
4560 			continue;
4561 		}
4562 
4563 		list_add_tail(&t_alg->entry, &alg_list);
4564 		registered = true;
4565 	}
4566 
4567 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
4568 		struct caam_aead_alg *t_alg = driver_aeads + i;
4569 		u32 c1_alg_sel = t_alg->caam.class1_alg_type &
4570 				 OP_ALG_ALGSEL_MASK;
4571 		u32 c2_alg_sel = t_alg->caam.class2_alg_type &
4572 				 OP_ALG_ALGSEL_MASK;
4573 		u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
4574 
4575 		/* Skip DES algorithms if not supported by device */
4576 		if (!des_inst &&
4577 		    ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
4578 		     (c1_alg_sel == OP_ALG_ALGSEL_DES)))
4579 				continue;
4580 
4581 		/* Skip AES algorithms if not supported by device */
4582 		if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
4583 				continue;
4584 
4585 		/*
4586 		 * Check support for AES algorithms not available
4587 		 * on LP devices.
4588 		 */
4589 		if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
4590 			if (alg_aai == OP_ALG_AAI_GCM)
4591 				continue;
4592 
4593 		/*
4594 		 * Skip algorithms requiring message digests
4595 		 * if MD or MD size is not supported by device.
4596 		 */
4597 		if (c2_alg_sel &&
4598 		    (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
4599 				continue;
4600 
4601 		caam_aead_alg_init(t_alg);
4602 
4603 		err = crypto_register_aead(&t_alg->aead);
4604 		if (err) {
4605 			pr_warn("%s alg registration failed\n",
4606 				t_alg->aead.base.cra_driver_name);
4607 			continue;
4608 		}
4609 
4610 		t_alg->registered = true;
4611 		registered = true;
4612 	}
4613 
4614 	if (registered)
4615 		pr_info("caam algorithms registered in /proc/crypto\n");
4616 
4617 	return err;
4618 }
4619 
4620 module_init(caam_algapi_init);
4621 module_exit(caam_algapi_exit);
4622 
4623 MODULE_LICENSE("GPL");
4624 MODULE_DESCRIPTION("FSL CAAM support for crypto API");
4625 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
4626