1 /*
2  *   Driver for ARTPEC-6 crypto block using the kernel asynchronous crypto api.
3  *
4  *    Copyright (C) 2014-2017  Axis Communications AB
5  */
6 #define pr_fmt(fmt)     KBUILD_MODNAME ": " fmt
7 
8 #include <linux/bitfield.h>
9 #include <linux/crypto.h>
10 #include <linux/debugfs.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/fault-inject.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/kernel.h>
17 #include <linux/list.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/platform_device.h>
21 #include <linux/scatterlist.h>
22 #include <linux/slab.h>
23 
24 #include <crypto/aes.h>
25 #include <crypto/gcm.h>
26 #include <crypto/internal/aead.h>
27 #include <crypto/internal/hash.h>
28 #include <crypto/internal/skcipher.h>
29 #include <crypto/scatterwalk.h>
30 #include <crypto/sha.h>
31 #include <crypto/xts.h>
32 
33 /* Max length of a line in all cache levels for Artpec SoCs. */
34 #define ARTPEC_CACHE_LINE_MAX	32
35 
36 #define PDMA_OUT_CFG		0x0000
37 #define PDMA_OUT_BUF_CFG	0x0004
38 #define PDMA_OUT_CMD		0x0008
39 #define PDMA_OUT_DESCRQ_PUSH	0x0010
40 #define PDMA_OUT_DESCRQ_STAT	0x0014
41 
42 #define A6_PDMA_IN_CFG		0x0028
43 #define A6_PDMA_IN_BUF_CFG	0x002c
44 #define A6_PDMA_IN_CMD		0x0030
45 #define A6_PDMA_IN_STATQ_PUSH	0x0038
46 #define A6_PDMA_IN_DESCRQ_PUSH	0x0044
47 #define A6_PDMA_IN_DESCRQ_STAT	0x0048
48 #define A6_PDMA_INTR_MASK	0x0068
49 #define A6_PDMA_ACK_INTR	0x006c
50 #define A6_PDMA_MASKED_INTR	0x0074
51 
52 #define A7_PDMA_IN_CFG		0x002c
53 #define A7_PDMA_IN_BUF_CFG	0x0030
54 #define A7_PDMA_IN_CMD		0x0034
55 #define A7_PDMA_IN_STATQ_PUSH	0x003c
56 #define A7_PDMA_IN_DESCRQ_PUSH	0x0048
57 #define A7_PDMA_IN_DESCRQ_STAT	0x004C
58 #define A7_PDMA_INTR_MASK	0x006c
59 #define A7_PDMA_ACK_INTR	0x0070
60 #define A7_PDMA_MASKED_INTR	0x0078
61 
62 #define PDMA_OUT_CFG_EN				BIT(0)
63 
64 #define PDMA_OUT_BUF_CFG_DATA_BUF_SIZE		GENMASK(4, 0)
65 #define PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE		GENMASK(9, 5)
66 
67 #define PDMA_OUT_CMD_START			BIT(0)
68 #define A6_PDMA_OUT_CMD_STOP			BIT(3)
69 #define A7_PDMA_OUT_CMD_STOP			BIT(2)
70 
71 #define PDMA_OUT_DESCRQ_PUSH_LEN		GENMASK(5, 0)
72 #define PDMA_OUT_DESCRQ_PUSH_ADDR		GENMASK(31, 6)
73 
74 #define PDMA_OUT_DESCRQ_STAT_LEVEL		GENMASK(3, 0)
75 #define PDMA_OUT_DESCRQ_STAT_SIZE		GENMASK(7, 4)
76 
77 #define PDMA_IN_CFG_EN				BIT(0)
78 
79 #define PDMA_IN_BUF_CFG_DATA_BUF_SIZE		GENMASK(4, 0)
80 #define PDMA_IN_BUF_CFG_DESCR_BUF_SIZE		GENMASK(9, 5)
81 #define PDMA_IN_BUF_CFG_STAT_BUF_SIZE		GENMASK(14, 10)
82 
83 #define PDMA_IN_CMD_START			BIT(0)
84 #define A6_PDMA_IN_CMD_FLUSH_STAT		BIT(2)
85 #define A6_PDMA_IN_CMD_STOP			BIT(3)
86 #define A7_PDMA_IN_CMD_FLUSH_STAT		BIT(1)
87 #define A7_PDMA_IN_CMD_STOP			BIT(2)
88 
89 #define PDMA_IN_STATQ_PUSH_LEN			GENMASK(5, 0)
90 #define PDMA_IN_STATQ_PUSH_ADDR			GENMASK(31, 6)
91 
92 #define PDMA_IN_DESCRQ_PUSH_LEN			GENMASK(5, 0)
93 #define PDMA_IN_DESCRQ_PUSH_ADDR		GENMASK(31, 6)
94 
95 #define PDMA_IN_DESCRQ_STAT_LEVEL		GENMASK(3, 0)
96 #define PDMA_IN_DESCRQ_STAT_SIZE		GENMASK(7, 4)
97 
98 #define A6_PDMA_INTR_MASK_IN_DATA		BIT(2)
99 #define A6_PDMA_INTR_MASK_IN_EOP		BIT(3)
100 #define A6_PDMA_INTR_MASK_IN_EOP_FLUSH		BIT(4)
101 
102 #define A7_PDMA_INTR_MASK_IN_DATA		BIT(3)
103 #define A7_PDMA_INTR_MASK_IN_EOP		BIT(4)
104 #define A7_PDMA_INTR_MASK_IN_EOP_FLUSH		BIT(5)
105 
106 #define A6_CRY_MD_OPER		GENMASK(19, 16)
107 
108 #define A6_CRY_MD_HASH_SEL_CTX	GENMASK(21, 20)
109 #define A6_CRY_MD_HASH_HMAC_FIN	BIT(23)
110 
111 #define A6_CRY_MD_CIPHER_LEN	GENMASK(21, 20)
112 #define A6_CRY_MD_CIPHER_DECR	BIT(22)
113 #define A6_CRY_MD_CIPHER_TWEAK	BIT(23)
114 #define A6_CRY_MD_CIPHER_DSEQ	BIT(24)
115 
116 #define A7_CRY_MD_OPER		GENMASK(11, 8)
117 
118 #define A7_CRY_MD_HASH_SEL_CTX	GENMASK(13, 12)
119 #define A7_CRY_MD_HASH_HMAC_FIN	BIT(15)
120 
121 #define A7_CRY_MD_CIPHER_LEN	GENMASK(13, 12)
122 #define A7_CRY_MD_CIPHER_DECR	BIT(14)
123 #define A7_CRY_MD_CIPHER_TWEAK	BIT(15)
124 #define A7_CRY_MD_CIPHER_DSEQ	BIT(16)
125 
126 /* DMA metadata constants */
127 #define regk_crypto_aes_cbc     0x00000002
128 #define regk_crypto_aes_ctr     0x00000003
129 #define regk_crypto_aes_ecb     0x00000001
130 #define regk_crypto_aes_gcm     0x00000004
131 #define regk_crypto_aes_xts     0x00000005
132 #define regk_crypto_cache       0x00000002
133 #define a6_regk_crypto_dlkey    0x0000000a
134 #define a7_regk_crypto_dlkey    0x0000000e
135 #define regk_crypto_ext         0x00000001
136 #define regk_crypto_hmac_sha1   0x00000007
137 #define regk_crypto_hmac_sha256 0x00000009
138 #define regk_crypto_hmac_sha384 0x0000000b
139 #define regk_crypto_hmac_sha512 0x0000000d
140 #define regk_crypto_init        0x00000000
141 #define regk_crypto_key_128     0x00000000
142 #define regk_crypto_key_192     0x00000001
143 #define regk_crypto_key_256     0x00000002
144 #define regk_crypto_null        0x00000000
145 #define regk_crypto_sha1        0x00000006
146 #define regk_crypto_sha256      0x00000008
147 #define regk_crypto_sha384      0x0000000a
148 #define regk_crypto_sha512      0x0000000c
149 
150 /* DMA descriptor structures */
151 struct pdma_descr_ctrl  {
152 	unsigned char short_descr : 1;
153 	unsigned char pad1        : 1;
154 	unsigned char eop         : 1;
155 	unsigned char intr        : 1;
156 	unsigned char short_len   : 3;
157 	unsigned char pad2        : 1;
158 } __packed;
159 
160 struct pdma_data_descr {
161 	unsigned int len : 24;
162 	unsigned int buf : 32;
163 } __packed;
164 
165 struct pdma_short_descr {
166 	unsigned char data[7];
167 } __packed;
168 
169 struct pdma_descr {
170 	struct pdma_descr_ctrl ctrl;
171 	union {
172 		struct pdma_data_descr   data;
173 		struct pdma_short_descr  shrt;
174 	};
175 };
176 
177 struct pdma_stat_descr {
178 	unsigned char pad1        : 1;
179 	unsigned char pad2        : 1;
180 	unsigned char eop         : 1;
181 	unsigned char pad3        : 5;
182 	unsigned int  len         : 24;
183 };
184 
185 /* Each descriptor array can hold max 64 entries */
186 #define PDMA_DESCR_COUNT	64
187 
188 #define MODULE_NAME   "Artpec-6 CA"
189 
190 /* Hash modes (including HMAC variants) */
191 #define ARTPEC6_CRYPTO_HASH_SHA1	1
192 #define ARTPEC6_CRYPTO_HASH_SHA256	2
193 #define ARTPEC6_CRYPTO_HASH_SHA384	3
194 #define ARTPEC6_CRYPTO_HASH_SHA512	4
195 
196 /* Crypto modes */
197 #define ARTPEC6_CRYPTO_CIPHER_AES_ECB	1
198 #define ARTPEC6_CRYPTO_CIPHER_AES_CBC	2
199 #define ARTPEC6_CRYPTO_CIPHER_AES_CTR	3
200 #define ARTPEC6_CRYPTO_CIPHER_AES_XTS	5
201 
202 /* The PDMA is a DMA-engine tightly coupled with a ciphering engine.
203  * It operates on a descriptor array with up to 64 descriptor entries.
204  * The arrays must be 64 byte aligned in memory.
205  *
206  * The ciphering unit has no registers and is completely controlled by
207  * a 4-byte metadata that is inserted at the beginning of each dma packet.
208  *
209  * A dma packet is a sequence of descriptors terminated by setting the .eop
210  * field in the final descriptor of the packet.
211  *
212  * Multiple packets are used for providing context data, key data and
213  * the plain/ciphertext.
214  *
215  *   PDMA Descriptors (Array)
216  *  +------+------+------+~~+-------+------+----
217  *  |  0   |  1   |  2   |~~| 11 EOP|  12  |  ....
218  *  +--+---+--+---+----+-+~~+-------+----+-+----
219  *     |      |        |       |         |
220  *     |      |        |       |         |
221  *   __|__  +-------++-------++-------+ +----+
222  *  | MD  | |Payload||Payload||Payload| | MD |
223  *  +-----+ +-------++-------++-------+ +----+
224  */
225 
226 struct artpec6_crypto_bounce_buffer {
227 	struct list_head list;
228 	size_t length;
229 	struct scatterlist *sg;
230 	size_t offset;
231 	/* buf is aligned to ARTPEC_CACHE_LINE_MAX and
232 	 * holds up to ARTPEC_CACHE_LINE_MAX bytes data.
233 	 */
234 	void *buf;
235 };
236 
237 struct artpec6_crypto_dma_map {
238 	dma_addr_t dma_addr;
239 	size_t size;
240 	enum dma_data_direction dir;
241 };
242 
243 struct artpec6_crypto_dma_descriptors {
244 	struct pdma_descr out[PDMA_DESCR_COUNT] __aligned(64);
245 	struct pdma_descr in[PDMA_DESCR_COUNT] __aligned(64);
246 	u32 stat[PDMA_DESCR_COUNT] __aligned(64);
247 	struct list_head bounce_buffers;
248 	/* Enough maps for all out/in buffers, and all three descr. arrays */
249 	struct artpec6_crypto_dma_map maps[PDMA_DESCR_COUNT * 2 + 2];
250 	dma_addr_t out_dma_addr;
251 	dma_addr_t in_dma_addr;
252 	dma_addr_t stat_dma_addr;
253 	size_t out_cnt;
254 	size_t in_cnt;
255 	size_t map_count;
256 };
257 
258 enum artpec6_crypto_variant {
259 	ARTPEC6_CRYPTO,
260 	ARTPEC7_CRYPTO,
261 };
262 
263 struct artpec6_crypto {
264 	void __iomem *base;
265 	spinlock_t queue_lock;
266 	struct list_head queue; /* waiting for pdma fifo space */
267 	struct list_head pending; /* submitted to pdma fifo */
268 	struct tasklet_struct task;
269 	struct kmem_cache *dma_cache;
270 	int pending_count;
271 	struct timer_list timer;
272 	enum artpec6_crypto_variant variant;
273 	void *pad_buffer; /* cache-aligned block padding buffer */
274 	void *zero_buffer;
275 };
276 
277 enum artpec6_crypto_hash_flags {
278 	HASH_FLAG_INIT_CTX = 2,
279 	HASH_FLAG_UPDATE = 4,
280 	HASH_FLAG_FINALIZE = 8,
281 	HASH_FLAG_HMAC = 16,
282 	HASH_FLAG_UPDATE_KEY = 32,
283 };
284 
285 struct artpec6_crypto_req_common {
286 	struct list_head list;
287 	struct artpec6_crypto_dma_descriptors *dma;
288 	struct crypto_async_request *req;
289 	void (*complete)(struct crypto_async_request *req);
290 	gfp_t gfp_flags;
291 };
292 
293 struct artpec6_hash_request_context {
294 	char partial_buffer[SHA512_BLOCK_SIZE];
295 	char partial_buffer_out[SHA512_BLOCK_SIZE];
296 	char key_buffer[SHA512_BLOCK_SIZE];
297 	char pad_buffer[SHA512_BLOCK_SIZE + 32];
298 	unsigned char digeststate[SHA512_DIGEST_SIZE];
299 	size_t partial_bytes;
300 	u64 digcnt;
301 	u32 key_md;
302 	u32 hash_md;
303 	enum artpec6_crypto_hash_flags hash_flags;
304 	struct artpec6_crypto_req_common common;
305 };
306 
307 struct artpec6_hash_export_state {
308 	char partial_buffer[SHA512_BLOCK_SIZE];
309 	unsigned char digeststate[SHA512_DIGEST_SIZE];
310 	size_t partial_bytes;
311 	u64 digcnt;
312 	int oper;
313 	unsigned int hash_flags;
314 };
315 
316 struct artpec6_hashalg_context {
317 	char hmac_key[SHA512_BLOCK_SIZE];
318 	size_t hmac_key_length;
319 	struct crypto_shash *child_hash;
320 };
321 
322 struct artpec6_crypto_request_context {
323 	u32 cipher_md;
324 	bool decrypt;
325 	struct artpec6_crypto_req_common common;
326 };
327 
328 struct artpec6_cryptotfm_context {
329 	unsigned char aes_key[2*AES_MAX_KEY_SIZE];
330 	size_t key_length;
331 	u32 key_md;
332 	int crypto_type;
333 	struct crypto_skcipher *fallback;
334 };
335 
336 struct artpec6_crypto_aead_hw_ctx {
337 	__be64	aad_length_bits;
338 	__be64  text_length_bits;
339 	__u8	J0[AES_BLOCK_SIZE];
340 };
341 
342 struct artpec6_crypto_aead_req_ctx {
343 	struct artpec6_crypto_aead_hw_ctx hw_ctx;
344 	u32 cipher_md;
345 	bool decrypt;
346 	struct artpec6_crypto_req_common common;
347 	__u8 decryption_tag[AES_BLOCK_SIZE] ____cacheline_aligned;
348 };
349 
350 /* The crypto framework makes it hard to avoid this global. */
351 static struct device *artpec6_crypto_dev;
352 
353 #ifdef CONFIG_FAULT_INJECTION
354 static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read);
355 static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full);
356 #endif
357 
358 enum {
359 	ARTPEC6_CRYPTO_PREPARE_HASH_NO_START,
360 	ARTPEC6_CRYPTO_PREPARE_HASH_START,
361 };
362 
363 static int artpec6_crypto_prepare_aead(struct aead_request *areq);
364 static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq);
365 static int artpec6_crypto_prepare_hash(struct ahash_request *areq);
366 
367 static void
368 artpec6_crypto_complete_crypto(struct crypto_async_request *req);
369 static void
370 artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req);
371 static void
372 artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req);
373 static void
374 artpec6_crypto_complete_aead(struct crypto_async_request *req);
375 static void
376 artpec6_crypto_complete_hash(struct crypto_async_request *req);
377 
378 static int
379 artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common);
380 
381 static void
382 artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common);
383 
384 struct artpec6_crypto_walk {
385 	struct scatterlist *sg;
386 	size_t offset;
387 };
388 
389 static void artpec6_crypto_walk_init(struct artpec6_crypto_walk *awalk,
390 				     struct scatterlist *sg)
391 {
392 	awalk->sg = sg;
393 	awalk->offset = 0;
394 }
395 
396 static size_t artpec6_crypto_walk_advance(struct artpec6_crypto_walk *awalk,
397 					  size_t nbytes)
398 {
399 	while (nbytes && awalk->sg) {
400 		size_t piece;
401 
402 		WARN_ON(awalk->offset > awalk->sg->length);
403 
404 		piece = min(nbytes, (size_t)awalk->sg->length - awalk->offset);
405 		nbytes -= piece;
406 		awalk->offset += piece;
407 		if (awalk->offset == awalk->sg->length) {
408 			awalk->sg = sg_next(awalk->sg);
409 			awalk->offset = 0;
410 		}
411 
412 	}
413 
414 	return nbytes;
415 }
416 
417 static size_t
418 artpec6_crypto_walk_chunklen(const struct artpec6_crypto_walk *awalk)
419 {
420 	WARN_ON(awalk->sg->length == awalk->offset);
421 
422 	return awalk->sg->length - awalk->offset;
423 }
424 
425 static dma_addr_t
426 artpec6_crypto_walk_chunk_phys(const struct artpec6_crypto_walk *awalk)
427 {
428 	return sg_phys(awalk->sg) + awalk->offset;
429 }
430 
431 static void
432 artpec6_crypto_copy_bounce_buffers(struct artpec6_crypto_req_common *common)
433 {
434 	struct artpec6_crypto_dma_descriptors *dma = common->dma;
435 	struct artpec6_crypto_bounce_buffer *b;
436 	struct artpec6_crypto_bounce_buffer *next;
437 
438 	list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) {
439 		pr_debug("bounce entry %p: %zu bytes @ %zu from %p\n",
440 			 b, b->length, b->offset, b->buf);
441 		sg_pcopy_from_buffer(b->sg,
442 				   1,
443 				   b->buf,
444 				   b->length,
445 				   b->offset);
446 
447 		list_del(&b->list);
448 		kfree(b);
449 	}
450 }
451 
452 static inline bool artpec6_crypto_busy(void)
453 {
454 	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
455 	int fifo_count = ac->pending_count;
456 
457 	return fifo_count > 6;
458 }
459 
460 static int artpec6_crypto_submit(struct artpec6_crypto_req_common *req)
461 {
462 	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
463 	int ret = -EBUSY;
464 
465 	spin_lock_bh(&ac->queue_lock);
466 
467 	if (!artpec6_crypto_busy()) {
468 		list_add_tail(&req->list, &ac->pending);
469 		artpec6_crypto_start_dma(req);
470 		ret = -EINPROGRESS;
471 	} else if (req->req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) {
472 		list_add_tail(&req->list, &ac->queue);
473 	} else {
474 		artpec6_crypto_common_destroy(req);
475 	}
476 
477 	spin_unlock_bh(&ac->queue_lock);
478 
479 	return ret;
480 }
481 
482 static void artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common)
483 {
484 	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
485 	enum artpec6_crypto_variant variant = ac->variant;
486 	void __iomem *base = ac->base;
487 	struct artpec6_crypto_dma_descriptors *dma = common->dma;
488 	u32 ind, statd, outd;
489 
490 	/* Make descriptor content visible to the DMA before starting it. */
491 	wmb();
492 
493 	ind = FIELD_PREP(PDMA_IN_DESCRQ_PUSH_LEN, dma->in_cnt - 1) |
494 	      FIELD_PREP(PDMA_IN_DESCRQ_PUSH_ADDR, dma->in_dma_addr >> 6);
495 
496 	statd = FIELD_PREP(PDMA_IN_STATQ_PUSH_LEN, dma->in_cnt - 1) |
497 		FIELD_PREP(PDMA_IN_STATQ_PUSH_ADDR, dma->stat_dma_addr >> 6);
498 
499 	outd = FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_LEN, dma->out_cnt - 1) |
500 	       FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_ADDR, dma->out_dma_addr >> 6);
501 
502 	if (variant == ARTPEC6_CRYPTO) {
503 		writel_relaxed(ind, base + A6_PDMA_IN_DESCRQ_PUSH);
504 		writel_relaxed(statd, base + A6_PDMA_IN_STATQ_PUSH);
505 		writel_relaxed(PDMA_IN_CMD_START, base + A6_PDMA_IN_CMD);
506 	} else {
507 		writel_relaxed(ind, base + A7_PDMA_IN_DESCRQ_PUSH);
508 		writel_relaxed(statd, base + A7_PDMA_IN_STATQ_PUSH);
509 		writel_relaxed(PDMA_IN_CMD_START, base + A7_PDMA_IN_CMD);
510 	}
511 
512 	writel_relaxed(outd, base + PDMA_OUT_DESCRQ_PUSH);
513 	writel_relaxed(PDMA_OUT_CMD_START, base + PDMA_OUT_CMD);
514 
515 	ac->pending_count++;
516 }
517 
518 static void
519 artpec6_crypto_init_dma_operation(struct artpec6_crypto_req_common *common)
520 {
521 	struct artpec6_crypto_dma_descriptors *dma = common->dma;
522 
523 	dma->out_cnt = 0;
524 	dma->in_cnt = 0;
525 	dma->map_count = 0;
526 	INIT_LIST_HEAD(&dma->bounce_buffers);
527 }
528 
529 static bool fault_inject_dma_descr(void)
530 {
531 #ifdef CONFIG_FAULT_INJECTION
532 	return should_fail(&artpec6_crypto_fail_dma_array_full, 1);
533 #else
534 	return false;
535 #endif
536 }
537 
538 /** artpec6_crypto_setup_out_descr_phys - Setup an out channel with a
539  *                                        physical address
540  *
541  * @addr: The physical address of the data buffer
542  * @len:  The length of the data buffer
543  * @eop:  True if this is the last buffer in the packet
544  *
545  * @return 0 on success or -ENOSPC if there are no more descriptors available
546  */
547 static int
548 artpec6_crypto_setup_out_descr_phys(struct artpec6_crypto_req_common *common,
549 				    dma_addr_t addr, size_t len, bool eop)
550 {
551 	struct artpec6_crypto_dma_descriptors *dma = common->dma;
552 	struct pdma_descr *d;
553 
554 	if (dma->out_cnt >= PDMA_DESCR_COUNT ||
555 	    fault_inject_dma_descr()) {
556 		pr_err("No free OUT DMA descriptors available!\n");
557 		return -ENOSPC;
558 	}
559 
560 	d = &dma->out[dma->out_cnt++];
561 	memset(d, 0, sizeof(*d));
562 
563 	d->ctrl.short_descr = 0;
564 	d->ctrl.eop = eop;
565 	d->data.len = len;
566 	d->data.buf = addr;
567 	return 0;
568 }
569 
570 /** artpec6_crypto_setup_out_descr_short - Setup a short out descriptor
571  *
572  * @dst: The virtual address of the data
573  * @len: The length of the data, must be between 1 to 7 bytes
574  * @eop: True if this is the last buffer in the packet
575  *
576  * @return 0 on success
577  *	-ENOSPC if no more descriptors are available
578  *	-EINVAL if the data length exceeds 7 bytes
579  */
580 static int
581 artpec6_crypto_setup_out_descr_short(struct artpec6_crypto_req_common *common,
582 				     void *dst, unsigned int len, bool eop)
583 {
584 	struct artpec6_crypto_dma_descriptors *dma = common->dma;
585 	struct pdma_descr *d;
586 
587 	if (dma->out_cnt >= PDMA_DESCR_COUNT ||
588 	    fault_inject_dma_descr()) {
589 		pr_err("No free OUT DMA descriptors available!\n");
590 		return -ENOSPC;
591 	} else if (len > 7 || len < 1) {
592 		return -EINVAL;
593 	}
594 	d = &dma->out[dma->out_cnt++];
595 	memset(d, 0, sizeof(*d));
596 
597 	d->ctrl.short_descr = 1;
598 	d->ctrl.short_len = len;
599 	d->ctrl.eop = eop;
600 	memcpy(d->shrt.data, dst, len);
601 	return 0;
602 }
603 
604 static int artpec6_crypto_dma_map_page(struct artpec6_crypto_req_common *common,
605 				      struct page *page, size_t offset,
606 				      size_t size,
607 				      enum dma_data_direction dir,
608 				      dma_addr_t *dma_addr_out)
609 {
610 	struct artpec6_crypto_dma_descriptors *dma = common->dma;
611 	struct device *dev = artpec6_crypto_dev;
612 	struct artpec6_crypto_dma_map *map;
613 	dma_addr_t dma_addr;
614 
615 	*dma_addr_out = 0;
616 
617 	if (dma->map_count >= ARRAY_SIZE(dma->maps))
618 		return -ENOMEM;
619 
620 	dma_addr = dma_map_page(dev, page, offset, size, dir);
621 	if (dma_mapping_error(dev, dma_addr))
622 		return -ENOMEM;
623 
624 	map = &dma->maps[dma->map_count++];
625 	map->size = size;
626 	map->dma_addr = dma_addr;
627 	map->dir = dir;
628 
629 	*dma_addr_out = dma_addr;
630 
631 	return 0;
632 }
633 
634 static int
635 artpec6_crypto_dma_map_single(struct artpec6_crypto_req_common *common,
636 			      void *ptr, size_t size,
637 			      enum dma_data_direction dir,
638 			      dma_addr_t *dma_addr_out)
639 {
640 	struct page *page = virt_to_page(ptr);
641 	size_t offset = (uintptr_t)ptr & ~PAGE_MASK;
642 
643 	return artpec6_crypto_dma_map_page(common, page, offset, size, dir,
644 					  dma_addr_out);
645 }
646 
647 static int
648 artpec6_crypto_dma_map_descs(struct artpec6_crypto_req_common *common)
649 {
650 	struct artpec6_crypto_dma_descriptors *dma = common->dma;
651 	int ret;
652 
653 	ret = artpec6_crypto_dma_map_single(common, dma->in,
654 				sizeof(dma->in[0]) * dma->in_cnt,
655 				DMA_TO_DEVICE, &dma->in_dma_addr);
656 	if (ret)
657 		return ret;
658 
659 	ret = artpec6_crypto_dma_map_single(common, dma->out,
660 				sizeof(dma->out[0]) * dma->out_cnt,
661 				DMA_TO_DEVICE, &dma->out_dma_addr);
662 	if (ret)
663 		return ret;
664 
665 	/* We only read one stat descriptor */
666 	dma->stat[dma->in_cnt - 1] = 0;
667 
668 	/*
669 	 * DMA_BIDIRECTIONAL since we need our zeroing of the stat descriptor
670 	 * to be written.
671 	 */
672 	return artpec6_crypto_dma_map_single(common,
673 				dma->stat + dma->in_cnt - 1,
674 				sizeof(dma->stat[0]),
675 				DMA_BIDIRECTIONAL,
676 				&dma->stat_dma_addr);
677 }
678 
679 static void
680 artpec6_crypto_dma_unmap_all(struct artpec6_crypto_req_common *common)
681 {
682 	struct artpec6_crypto_dma_descriptors *dma = common->dma;
683 	struct device *dev = artpec6_crypto_dev;
684 	int i;
685 
686 	for (i = 0; i < dma->map_count; i++) {
687 		struct artpec6_crypto_dma_map *map = &dma->maps[i];
688 
689 		dma_unmap_page(dev, map->dma_addr, map->size, map->dir);
690 	}
691 
692 	dma->map_count = 0;
693 }
694 
695 /** artpec6_crypto_setup_out_descr - Setup an out descriptor
696  *
697  * @dst: The virtual address of the data
698  * @len: The length of the data
699  * @eop: True if this is the last buffer in the packet
700  * @use_short: If this is true and the data length is 7 bytes or less then
701  *	a short descriptor will be used
702  *
703  * @return 0 on success
704  *	Any errors from artpec6_crypto_setup_out_descr_short() or
705  *	setup_out_descr_phys()
706  */
707 static int
708 artpec6_crypto_setup_out_descr(struct artpec6_crypto_req_common *common,
709 			       void *dst, unsigned int len, bool eop,
710 			       bool use_short)
711 {
712 	if (use_short && len < 7) {
713 		return artpec6_crypto_setup_out_descr_short(common, dst, len,
714 							    eop);
715 	} else {
716 		int ret;
717 		dma_addr_t dma_addr;
718 
719 		ret = artpec6_crypto_dma_map_single(common, dst, len,
720 						   DMA_TO_DEVICE,
721 						   &dma_addr);
722 		if (ret)
723 			return ret;
724 
725 		return artpec6_crypto_setup_out_descr_phys(common, dma_addr,
726 							   len, eop);
727 	}
728 }
729 
730 /** artpec6_crypto_setup_in_descr_phys - Setup an in channel with a
731  *                                       physical address
732  *
733  * @addr: The physical address of the data buffer
734  * @len:  The length of the data buffer
735  * @intr: True if an interrupt should be fired after HW processing of this
736  *	  descriptor
737  *
738  */
739 static int
740 artpec6_crypto_setup_in_descr_phys(struct artpec6_crypto_req_common *common,
741 			       dma_addr_t addr, unsigned int len, bool intr)
742 {
743 	struct artpec6_crypto_dma_descriptors *dma = common->dma;
744 	struct pdma_descr *d;
745 
746 	if (dma->in_cnt >= PDMA_DESCR_COUNT ||
747 	    fault_inject_dma_descr()) {
748 		pr_err("No free IN DMA descriptors available!\n");
749 		return -ENOSPC;
750 	}
751 	d = &dma->in[dma->in_cnt++];
752 	memset(d, 0, sizeof(*d));
753 
754 	d->ctrl.intr = intr;
755 	d->data.len = len;
756 	d->data.buf = addr;
757 	return 0;
758 }
759 
760 /** artpec6_crypto_setup_in_descr - Setup an in channel descriptor
761  *
762  * @buffer: The virtual address to of the data buffer
763  * @len:    The length of the data buffer
764  * @last:   If this is the last data buffer in the request (i.e. an interrupt
765  *	    is needed
766  *
767  * Short descriptors are not used for the in channel
768  */
769 static int
770 artpec6_crypto_setup_in_descr(struct artpec6_crypto_req_common *common,
771 			  void *buffer, unsigned int len, bool last)
772 {
773 	dma_addr_t dma_addr;
774 	int ret;
775 
776 	ret = artpec6_crypto_dma_map_single(common, buffer, len,
777 					   DMA_FROM_DEVICE, &dma_addr);
778 	if (ret)
779 		return ret;
780 
781 	return artpec6_crypto_setup_in_descr_phys(common, dma_addr, len, last);
782 }
783 
784 static struct artpec6_crypto_bounce_buffer *
785 artpec6_crypto_alloc_bounce(gfp_t flags)
786 {
787 	void *base;
788 	size_t alloc_size = sizeof(struct artpec6_crypto_bounce_buffer) +
789 			    2 * ARTPEC_CACHE_LINE_MAX;
790 	struct artpec6_crypto_bounce_buffer *bbuf = kzalloc(alloc_size, flags);
791 
792 	if (!bbuf)
793 		return NULL;
794 
795 	base = bbuf + 1;
796 	bbuf->buf = PTR_ALIGN(base, ARTPEC_CACHE_LINE_MAX);
797 	return bbuf;
798 }
799 
800 static int setup_bounce_buffer_in(struct artpec6_crypto_req_common *common,
801 				  struct artpec6_crypto_walk *walk, size_t size)
802 {
803 	struct artpec6_crypto_bounce_buffer *bbuf;
804 	int ret;
805 
806 	bbuf = artpec6_crypto_alloc_bounce(common->gfp_flags);
807 	if (!bbuf)
808 		return -ENOMEM;
809 
810 	bbuf->length = size;
811 	bbuf->sg = walk->sg;
812 	bbuf->offset = walk->offset;
813 
814 	ret =  artpec6_crypto_setup_in_descr(common, bbuf->buf, size, false);
815 	if (ret) {
816 		kfree(bbuf);
817 		return ret;
818 	}
819 
820 	pr_debug("BOUNCE %zu offset %zu\n", size, walk->offset);
821 	list_add_tail(&bbuf->list, &common->dma->bounce_buffers);
822 	return 0;
823 }
824 
825 static int
826 artpec6_crypto_setup_sg_descrs_in(struct artpec6_crypto_req_common *common,
827 				  struct artpec6_crypto_walk *walk,
828 				  size_t count)
829 {
830 	size_t chunk;
831 	int ret;
832 	dma_addr_t addr;
833 
834 	while (walk->sg && count) {
835 		chunk = min(count, artpec6_crypto_walk_chunklen(walk));
836 		addr = artpec6_crypto_walk_chunk_phys(walk);
837 
838 		/* When destination buffers are not aligned to the cache line
839 		 * size we need bounce buffers. The DMA-API requires that the
840 		 * entire line is owned by the DMA buffer and this holds also
841 		 * for the case when coherent DMA is used.
842 		 */
843 		if (!IS_ALIGNED(addr, ARTPEC_CACHE_LINE_MAX)) {
844 			chunk = min_t(dma_addr_t, chunk,
845 				      ALIGN(addr, ARTPEC_CACHE_LINE_MAX) -
846 				      addr);
847 
848 			pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk);
849 			ret = setup_bounce_buffer_in(common, walk, chunk);
850 		} else if (chunk < ARTPEC_CACHE_LINE_MAX) {
851 			pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk);
852 			ret = setup_bounce_buffer_in(common, walk, chunk);
853 		} else {
854 			dma_addr_t dma_addr;
855 
856 			chunk = chunk & ~(ARTPEC_CACHE_LINE_MAX-1);
857 
858 			pr_debug("CHUNK %pad:%zu\n", &addr, chunk);
859 
860 			ret = artpec6_crypto_dma_map_page(common,
861 							 sg_page(walk->sg),
862 							 walk->sg->offset +
863 							 walk->offset,
864 							 chunk,
865 							 DMA_FROM_DEVICE,
866 							 &dma_addr);
867 			if (ret)
868 				return ret;
869 
870 			ret = artpec6_crypto_setup_in_descr_phys(common,
871 								 dma_addr,
872 								 chunk, false);
873 		}
874 
875 		if (ret)
876 			return ret;
877 
878 		count = count - chunk;
879 		artpec6_crypto_walk_advance(walk, chunk);
880 	}
881 
882 	if (count)
883 		pr_err("EOL unexpected %zu bytes left\n", count);
884 
885 	return count ? -EINVAL : 0;
886 }
887 
888 static int
889 artpec6_crypto_setup_sg_descrs_out(struct artpec6_crypto_req_common *common,
890 				   struct artpec6_crypto_walk *walk,
891 				   size_t count)
892 {
893 	size_t chunk;
894 	int ret;
895 	dma_addr_t addr;
896 
897 	while (walk->sg && count) {
898 		chunk = min(count, artpec6_crypto_walk_chunklen(walk));
899 		addr = artpec6_crypto_walk_chunk_phys(walk);
900 
901 		pr_debug("OUT-CHUNK %pad:%zu\n", &addr, chunk);
902 
903 		if (addr & 3) {
904 			char buf[3];
905 
906 			chunk = min_t(size_t, chunk, (4-(addr&3)));
907 
908 			sg_pcopy_to_buffer(walk->sg, 1, buf, chunk,
909 					   walk->offset);
910 
911 			ret = artpec6_crypto_setup_out_descr_short(common, buf,
912 								   chunk,
913 								   false);
914 		} else {
915 			dma_addr_t dma_addr;
916 
917 			ret = artpec6_crypto_dma_map_page(common,
918 							 sg_page(walk->sg),
919 							 walk->sg->offset +
920 							 walk->offset,
921 							 chunk,
922 							 DMA_TO_DEVICE,
923 							 &dma_addr);
924 			if (ret)
925 				return ret;
926 
927 			ret = artpec6_crypto_setup_out_descr_phys(common,
928 								 dma_addr,
929 								 chunk, false);
930 		}
931 
932 		if (ret)
933 			return ret;
934 
935 		count = count - chunk;
936 		artpec6_crypto_walk_advance(walk, chunk);
937 	}
938 
939 	if (count)
940 		pr_err("EOL unexpected %zu bytes left\n", count);
941 
942 	return count ? -EINVAL : 0;
943 }
944 
945 
946 /** artpec6_crypto_terminate_out_descrs - Set the EOP on the last out descriptor
947  *
948  * If the out descriptor list is non-empty, then the eop flag on the
949  * last used out descriptor will be set.
950  *
951  * @return  0 on success
952  *	-EINVAL if the out descriptor is empty or has overflown
953  */
954 static int
955 artpec6_crypto_terminate_out_descrs(struct artpec6_crypto_req_common *common)
956 {
957 	struct artpec6_crypto_dma_descriptors *dma = common->dma;
958 	struct pdma_descr *d;
959 
960 	if (!dma->out_cnt || dma->out_cnt > PDMA_DESCR_COUNT) {
961 		pr_err("%s: OUT descriptor list is %s\n",
962 			MODULE_NAME, dma->out_cnt ? "empty" : "full");
963 		return -EINVAL;
964 
965 	}
966 
967 	d = &dma->out[dma->out_cnt-1];
968 	d->ctrl.eop = 1;
969 
970 	return 0;
971 }
972 
973 /** artpec6_crypto_terminate_in_descrs - Set the interrupt flag on the last
974  *                                       in descriptor
975  *
976  * See artpec6_crypto_terminate_out_descrs() for return values
977  */
978 static int
979 artpec6_crypto_terminate_in_descrs(struct artpec6_crypto_req_common *common)
980 {
981 	struct artpec6_crypto_dma_descriptors *dma = common->dma;
982 	struct pdma_descr *d;
983 
984 	if (!dma->in_cnt || dma->in_cnt > PDMA_DESCR_COUNT) {
985 		pr_err("%s: IN descriptor list is %s\n",
986 			MODULE_NAME, dma->in_cnt ? "empty" : "full");
987 		return -EINVAL;
988 	}
989 
990 	d = &dma->in[dma->in_cnt-1];
991 	d->ctrl.intr = 1;
992 	return 0;
993 }
994 
995 /** create_hash_pad - Create a Secure Hash conformant pad
996  *
997  * @dst:      The destination buffer to write the pad. Must be at least 64 bytes
998  * @dgstlen:  The total length of the hash digest in bytes
999  * @bitcount: The total length of the digest in bits
1000  *
1001  * @return The total number of padding bytes written to @dst
1002  */
1003 static size_t
1004 create_hash_pad(int oper, unsigned char *dst, u64 dgstlen, u64 bitcount)
1005 {
1006 	unsigned int mod, target, diff, pad_bytes, size_bytes;
1007 	__be64 bits = __cpu_to_be64(bitcount);
1008 
1009 	switch (oper) {
1010 	case regk_crypto_sha1:
1011 	case regk_crypto_sha256:
1012 	case regk_crypto_hmac_sha1:
1013 	case regk_crypto_hmac_sha256:
1014 		target = 448 / 8;
1015 		mod = 512 / 8;
1016 		size_bytes = 8;
1017 		break;
1018 	default:
1019 		target = 896 / 8;
1020 		mod = 1024 / 8;
1021 		size_bytes = 16;
1022 		break;
1023 	}
1024 
1025 	target -= 1;
1026 	diff = dgstlen & (mod - 1);
1027 	pad_bytes = diff > target ? target + mod - diff : target - diff;
1028 
1029 	memset(dst + 1, 0, pad_bytes);
1030 	dst[0] = 0x80;
1031 
1032 	if (size_bytes == 16) {
1033 		memset(dst + 1 + pad_bytes, 0, 8);
1034 		memcpy(dst + 1 + pad_bytes + 8, &bits, 8);
1035 	} else {
1036 		memcpy(dst + 1 + pad_bytes, &bits, 8);
1037 	}
1038 
1039 	return pad_bytes + size_bytes + 1;
1040 }
1041 
1042 static int artpec6_crypto_common_init(struct artpec6_crypto_req_common *common,
1043 		struct crypto_async_request *parent,
1044 		void (*complete)(struct crypto_async_request *req),
1045 		struct scatterlist *dstsg, unsigned int nbytes)
1046 {
1047 	gfp_t flags;
1048 	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1049 
1050 	flags = (parent->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1051 		 GFP_KERNEL : GFP_ATOMIC;
1052 
1053 	common->gfp_flags = flags;
1054 	common->dma = kmem_cache_alloc(ac->dma_cache, flags);
1055 	if (!common->dma)
1056 		return -ENOMEM;
1057 
1058 	common->req = parent;
1059 	common->complete = complete;
1060 	return 0;
1061 }
1062 
1063 static void
1064 artpec6_crypto_bounce_destroy(struct artpec6_crypto_dma_descriptors *dma)
1065 {
1066 	struct artpec6_crypto_bounce_buffer *b;
1067 	struct artpec6_crypto_bounce_buffer *next;
1068 
1069 	list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) {
1070 		kfree(b);
1071 	}
1072 }
1073 
1074 static int
1075 artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common)
1076 {
1077 	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1078 
1079 	artpec6_crypto_dma_unmap_all(common);
1080 	artpec6_crypto_bounce_destroy(common->dma);
1081 	kmem_cache_free(ac->dma_cache, common->dma);
1082 	common->dma = NULL;
1083 	return 0;
1084 }
1085 
1086 /*
1087  * Ciphering functions.
1088  */
1089 static int artpec6_crypto_encrypt(struct skcipher_request *req)
1090 {
1091 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
1092 	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
1093 	struct artpec6_crypto_request_context *req_ctx = NULL;
1094 	void (*complete)(struct crypto_async_request *req);
1095 	int ret;
1096 
1097 	req_ctx = skcipher_request_ctx(req);
1098 
1099 	switch (ctx->crypto_type) {
1100 	case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1101 	case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
1102 	case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
1103 		req_ctx->decrypt = 0;
1104 		break;
1105 	default:
1106 		break;
1107 	}
1108 
1109 	switch (ctx->crypto_type) {
1110 	case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1111 		complete = artpec6_crypto_complete_cbc_encrypt;
1112 		break;
1113 	default:
1114 		complete = artpec6_crypto_complete_crypto;
1115 		break;
1116 	}
1117 
1118 	ret = artpec6_crypto_common_init(&req_ctx->common,
1119 				  &req->base,
1120 				  complete,
1121 				  req->dst, req->cryptlen);
1122 	if (ret)
1123 		return ret;
1124 
1125 	ret = artpec6_crypto_prepare_crypto(req);
1126 	if (ret) {
1127 		artpec6_crypto_common_destroy(&req_ctx->common);
1128 		return ret;
1129 	}
1130 
1131 	return artpec6_crypto_submit(&req_ctx->common);
1132 }
1133 
1134 static int artpec6_crypto_decrypt(struct skcipher_request *req)
1135 {
1136 	int ret;
1137 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
1138 	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
1139 	struct artpec6_crypto_request_context *req_ctx = NULL;
1140 	void (*complete)(struct crypto_async_request *req);
1141 
1142 	req_ctx = skcipher_request_ctx(req);
1143 
1144 	switch (ctx->crypto_type) {
1145 	case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1146 	case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
1147 	case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
1148 		req_ctx->decrypt = 1;
1149 		break;
1150 	default:
1151 		break;
1152 	}
1153 
1154 
1155 	switch (ctx->crypto_type) {
1156 	case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1157 		complete = artpec6_crypto_complete_cbc_decrypt;
1158 		break;
1159 	default:
1160 		complete = artpec6_crypto_complete_crypto;
1161 		break;
1162 	}
1163 
1164 	ret = artpec6_crypto_common_init(&req_ctx->common, &req->base,
1165 				  complete,
1166 				  req->dst, req->cryptlen);
1167 	if (ret)
1168 		return ret;
1169 
1170 	ret = artpec6_crypto_prepare_crypto(req);
1171 	if (ret) {
1172 		artpec6_crypto_common_destroy(&req_ctx->common);
1173 		return ret;
1174 	}
1175 
1176 	return artpec6_crypto_submit(&req_ctx->common);
1177 }
1178 
1179 static int
1180 artpec6_crypto_ctr_crypt(struct skcipher_request *req, bool encrypt)
1181 {
1182 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
1183 	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
1184 	size_t iv_len = crypto_skcipher_ivsize(cipher);
1185 	unsigned int counter = be32_to_cpup((__be32 *)
1186 					    (req->iv + iv_len - 4));
1187 	unsigned int nblks = ALIGN(req->cryptlen, AES_BLOCK_SIZE) /
1188 			     AES_BLOCK_SIZE;
1189 
1190 	/*
1191 	 * The hardware uses only the last 32-bits as the counter while the
1192 	 * kernel tests (aes_ctr_enc_tv_template[4] for example) expect that
1193 	 * the whole IV is a counter.  So fallback if the counter is going to
1194 	 * overlow.
1195 	 */
1196 	if (counter + nblks < counter) {
1197 		int ret;
1198 
1199 		pr_debug("counter %x will overflow (nblks %u), falling back\n",
1200 			 counter, counter + nblks);
1201 
1202 		ret = crypto_skcipher_setkey(ctx->fallback, ctx->aes_key,
1203 					     ctx->key_length);
1204 		if (ret)
1205 			return ret;
1206 
1207 		{
1208 			SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
1209 
1210 			skcipher_request_set_tfm(subreq, ctx->fallback);
1211 			skcipher_request_set_callback(subreq, req->base.flags,
1212 						      NULL, NULL);
1213 			skcipher_request_set_crypt(subreq, req->src, req->dst,
1214 						   req->cryptlen, req->iv);
1215 			ret = encrypt ? crypto_skcipher_encrypt(subreq)
1216 				      : crypto_skcipher_decrypt(subreq);
1217 			skcipher_request_zero(subreq);
1218 		}
1219 		return ret;
1220 	}
1221 
1222 	return encrypt ? artpec6_crypto_encrypt(req)
1223 		       : artpec6_crypto_decrypt(req);
1224 }
1225 
1226 static int artpec6_crypto_ctr_encrypt(struct skcipher_request *req)
1227 {
1228 	return artpec6_crypto_ctr_crypt(req, true);
1229 }
1230 
1231 static int artpec6_crypto_ctr_decrypt(struct skcipher_request *req)
1232 {
1233 	return artpec6_crypto_ctr_crypt(req, false);
1234 }
1235 
1236 /*
1237  * AEAD functions
1238  */
1239 static int artpec6_crypto_aead_init(struct crypto_aead *tfm)
1240 {
1241 	struct artpec6_cryptotfm_context *tfm_ctx = crypto_aead_ctx(tfm);
1242 
1243 	memset(tfm_ctx, 0, sizeof(*tfm_ctx));
1244 
1245 	crypto_aead_set_reqsize(tfm,
1246 				sizeof(struct artpec6_crypto_aead_req_ctx));
1247 
1248 	return 0;
1249 }
1250 
1251 static int artpec6_crypto_aead_set_key(struct crypto_aead *tfm, const u8 *key,
1252 			       unsigned int len)
1253 {
1254 	struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(&tfm->base);
1255 
1256 	if (len != 16 && len != 24 && len != 32) {
1257 		crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1258 		return -1;
1259 	}
1260 
1261 	ctx->key_length = len;
1262 
1263 	memcpy(ctx->aes_key, key, len);
1264 	return 0;
1265 }
1266 
1267 static int artpec6_crypto_aead_encrypt(struct aead_request *req)
1268 {
1269 	int ret;
1270 	struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req);
1271 
1272 	req_ctx->decrypt = false;
1273 	ret = artpec6_crypto_common_init(&req_ctx->common, &req->base,
1274 				  artpec6_crypto_complete_aead,
1275 				  NULL, 0);
1276 	if (ret)
1277 		return ret;
1278 
1279 	ret = artpec6_crypto_prepare_aead(req);
1280 	if (ret) {
1281 		artpec6_crypto_common_destroy(&req_ctx->common);
1282 		return ret;
1283 	}
1284 
1285 	return artpec6_crypto_submit(&req_ctx->common);
1286 }
1287 
1288 static int artpec6_crypto_aead_decrypt(struct aead_request *req)
1289 {
1290 	int ret;
1291 	struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req);
1292 
1293 	req_ctx->decrypt = true;
1294 	if (req->cryptlen < AES_BLOCK_SIZE)
1295 		return -EINVAL;
1296 
1297 	ret = artpec6_crypto_common_init(&req_ctx->common,
1298 				  &req->base,
1299 				  artpec6_crypto_complete_aead,
1300 				  NULL, 0);
1301 	if (ret)
1302 		return ret;
1303 
1304 	ret = artpec6_crypto_prepare_aead(req);
1305 	if (ret) {
1306 		artpec6_crypto_common_destroy(&req_ctx->common);
1307 		return ret;
1308 	}
1309 
1310 	return artpec6_crypto_submit(&req_ctx->common);
1311 }
1312 
1313 static int artpec6_crypto_prepare_hash(struct ahash_request *areq)
1314 {
1315 	struct artpec6_hashalg_context *ctx = crypto_tfm_ctx(areq->base.tfm);
1316 	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(areq);
1317 	size_t digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
1318 	size_t contextsize = digestsize == SHA384_DIGEST_SIZE ?
1319 		SHA512_DIGEST_SIZE : digestsize;
1320 	size_t blocksize = crypto_tfm_alg_blocksize(
1321 		crypto_ahash_tfm(crypto_ahash_reqtfm(areq)));
1322 	struct artpec6_crypto_req_common *common = &req_ctx->common;
1323 	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1324 	enum artpec6_crypto_variant variant = ac->variant;
1325 	u32 sel_ctx;
1326 	bool ext_ctx = false;
1327 	bool run_hw = false;
1328 	int error = 0;
1329 
1330 	artpec6_crypto_init_dma_operation(common);
1331 
1332 	/* Upload HMAC key, must be first the first packet */
1333 	if (req_ctx->hash_flags & HASH_FLAG_HMAC) {
1334 		if (variant == ARTPEC6_CRYPTO) {
1335 			req_ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER,
1336 						     a6_regk_crypto_dlkey);
1337 		} else {
1338 			req_ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER,
1339 						     a7_regk_crypto_dlkey);
1340 		}
1341 
1342 		/* Copy and pad up the key */
1343 		memcpy(req_ctx->key_buffer, ctx->hmac_key,
1344 		       ctx->hmac_key_length);
1345 		memset(req_ctx->key_buffer + ctx->hmac_key_length, 0,
1346 		       blocksize - ctx->hmac_key_length);
1347 
1348 		error = artpec6_crypto_setup_out_descr(common,
1349 					(void *)&req_ctx->key_md,
1350 					sizeof(req_ctx->key_md), false, false);
1351 		if (error)
1352 			return error;
1353 
1354 		error = artpec6_crypto_setup_out_descr(common,
1355 					req_ctx->key_buffer, blocksize,
1356 					true, false);
1357 		if (error)
1358 			return error;
1359 	}
1360 
1361 	if (!(req_ctx->hash_flags & HASH_FLAG_INIT_CTX)) {
1362 		/* Restore context */
1363 		sel_ctx = regk_crypto_ext;
1364 		ext_ctx = true;
1365 	} else {
1366 		sel_ctx = regk_crypto_init;
1367 	}
1368 
1369 	if (variant == ARTPEC6_CRYPTO) {
1370 		req_ctx->hash_md &= ~A6_CRY_MD_HASH_SEL_CTX;
1371 		req_ctx->hash_md |= FIELD_PREP(A6_CRY_MD_HASH_SEL_CTX, sel_ctx);
1372 
1373 		/* If this is the final round, set the final flag */
1374 		if (req_ctx->hash_flags & HASH_FLAG_FINALIZE)
1375 			req_ctx->hash_md |= A6_CRY_MD_HASH_HMAC_FIN;
1376 	} else {
1377 		req_ctx->hash_md &= ~A7_CRY_MD_HASH_SEL_CTX;
1378 		req_ctx->hash_md |= FIELD_PREP(A7_CRY_MD_HASH_SEL_CTX, sel_ctx);
1379 
1380 		/* If this is the final round, set the final flag */
1381 		if (req_ctx->hash_flags & HASH_FLAG_FINALIZE)
1382 			req_ctx->hash_md |= A7_CRY_MD_HASH_HMAC_FIN;
1383 	}
1384 
1385 	/* Setup up metadata descriptors */
1386 	error = artpec6_crypto_setup_out_descr(common,
1387 				(void *)&req_ctx->hash_md,
1388 				sizeof(req_ctx->hash_md), false, false);
1389 	if (error)
1390 		return error;
1391 
1392 	error = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
1393 	if (error)
1394 		return error;
1395 
1396 	if (ext_ctx) {
1397 		error = artpec6_crypto_setup_out_descr(common,
1398 					req_ctx->digeststate,
1399 					contextsize, false, false);
1400 
1401 		if (error)
1402 			return error;
1403 	}
1404 
1405 	if (req_ctx->hash_flags & HASH_FLAG_UPDATE) {
1406 		size_t done_bytes = 0;
1407 		size_t total_bytes = areq->nbytes + req_ctx->partial_bytes;
1408 		size_t ready_bytes = round_down(total_bytes, blocksize);
1409 		struct artpec6_crypto_walk walk;
1410 
1411 		run_hw = ready_bytes > 0;
1412 		if (req_ctx->partial_bytes && ready_bytes) {
1413 			/* We have a partial buffer and will at least some bytes
1414 			 * to the HW. Empty this partial buffer before tackling
1415 			 * the SG lists
1416 			 */
1417 			memcpy(req_ctx->partial_buffer_out,
1418 				req_ctx->partial_buffer,
1419 				req_ctx->partial_bytes);
1420 
1421 			error = artpec6_crypto_setup_out_descr(common,
1422 						req_ctx->partial_buffer_out,
1423 						req_ctx->partial_bytes,
1424 						false, true);
1425 			if (error)
1426 				return error;
1427 
1428 			/* Reset partial buffer */
1429 			done_bytes += req_ctx->partial_bytes;
1430 			req_ctx->partial_bytes = 0;
1431 		}
1432 
1433 		artpec6_crypto_walk_init(&walk, areq->src);
1434 
1435 		error = artpec6_crypto_setup_sg_descrs_out(common, &walk,
1436 							   ready_bytes -
1437 							   done_bytes);
1438 		if (error)
1439 			return error;
1440 
1441 		if (walk.sg) {
1442 			size_t sg_skip = ready_bytes - done_bytes;
1443 			size_t sg_rem = areq->nbytes - sg_skip;
1444 
1445 			sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
1446 					   req_ctx->partial_buffer +
1447 					   req_ctx->partial_bytes,
1448 					   sg_rem, sg_skip);
1449 
1450 			req_ctx->partial_bytes += sg_rem;
1451 		}
1452 
1453 		req_ctx->digcnt += ready_bytes;
1454 		req_ctx->hash_flags &= ~(HASH_FLAG_UPDATE);
1455 	}
1456 
1457 	/* Finalize */
1458 	if (req_ctx->hash_flags & HASH_FLAG_FINALIZE) {
1459 		bool needtrim = contextsize != digestsize;
1460 		size_t hash_pad_len;
1461 		u64 digest_bits;
1462 		u32 oper;
1463 
1464 		if (variant == ARTPEC6_CRYPTO)
1465 			oper = FIELD_GET(A6_CRY_MD_OPER, req_ctx->hash_md);
1466 		else
1467 			oper = FIELD_GET(A7_CRY_MD_OPER, req_ctx->hash_md);
1468 
1469 		/* Write out the partial buffer if present */
1470 		if (req_ctx->partial_bytes) {
1471 			memcpy(req_ctx->partial_buffer_out,
1472 			       req_ctx->partial_buffer,
1473 			       req_ctx->partial_bytes);
1474 			error = artpec6_crypto_setup_out_descr(common,
1475 						req_ctx->partial_buffer_out,
1476 						req_ctx->partial_bytes,
1477 						false, true);
1478 			if (error)
1479 				return error;
1480 
1481 			req_ctx->digcnt += req_ctx->partial_bytes;
1482 			req_ctx->partial_bytes = 0;
1483 		}
1484 
1485 		if (req_ctx->hash_flags & HASH_FLAG_HMAC)
1486 			digest_bits = 8 * (req_ctx->digcnt + blocksize);
1487 		else
1488 			digest_bits = 8 * req_ctx->digcnt;
1489 
1490 		/* Add the hash pad */
1491 		hash_pad_len = create_hash_pad(oper, req_ctx->pad_buffer,
1492 					       req_ctx->digcnt, digest_bits);
1493 		error = artpec6_crypto_setup_out_descr(common,
1494 						      req_ctx->pad_buffer,
1495 						      hash_pad_len, false,
1496 						      true);
1497 		req_ctx->digcnt = 0;
1498 
1499 		if (error)
1500 			return error;
1501 
1502 		/* Descriptor for the final result */
1503 		error = artpec6_crypto_setup_in_descr(common, areq->result,
1504 						      digestsize,
1505 						      !needtrim);
1506 		if (error)
1507 			return error;
1508 
1509 		if (needtrim) {
1510 			/* Discard the extra context bytes for SHA-384 */
1511 			error = artpec6_crypto_setup_in_descr(common,
1512 					req_ctx->partial_buffer,
1513 					digestsize - contextsize, true);
1514 			if (error)
1515 				return error;
1516 		}
1517 
1518 	} else { /* This is not the final operation for this request */
1519 		if (!run_hw)
1520 			return ARTPEC6_CRYPTO_PREPARE_HASH_NO_START;
1521 
1522 		/* Save the result to the context */
1523 		error = artpec6_crypto_setup_in_descr(common,
1524 						      req_ctx->digeststate,
1525 						      contextsize, false);
1526 		if (error)
1527 			return error;
1528 		/* fall through */
1529 	}
1530 
1531 	req_ctx->hash_flags &= ~(HASH_FLAG_INIT_CTX | HASH_FLAG_UPDATE |
1532 				 HASH_FLAG_FINALIZE);
1533 
1534 	error = artpec6_crypto_terminate_in_descrs(common);
1535 	if (error)
1536 		return error;
1537 
1538 	error = artpec6_crypto_terminate_out_descrs(common);
1539 	if (error)
1540 		return error;
1541 
1542 	error = artpec6_crypto_dma_map_descs(common);
1543 	if (error)
1544 		return error;
1545 
1546 	return ARTPEC6_CRYPTO_PREPARE_HASH_START;
1547 }
1548 
1549 
1550 static int artpec6_crypto_aes_ecb_init(struct crypto_skcipher *tfm)
1551 {
1552 	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1553 
1554 	tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
1555 	ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_ECB;
1556 
1557 	return 0;
1558 }
1559 
1560 static int artpec6_crypto_aes_ctr_init(struct crypto_skcipher *tfm)
1561 {
1562 	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1563 
1564 	ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(&tfm->base),
1565 					      0,
1566 					      CRYPTO_ALG_ASYNC |
1567 					      CRYPTO_ALG_NEED_FALLBACK);
1568 	if (IS_ERR(ctx->fallback))
1569 		return PTR_ERR(ctx->fallback);
1570 
1571 	tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
1572 	ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CTR;
1573 
1574 	return 0;
1575 }
1576 
1577 static int artpec6_crypto_aes_cbc_init(struct crypto_skcipher *tfm)
1578 {
1579 	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1580 
1581 	tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
1582 	ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CBC;
1583 
1584 	return 0;
1585 }
1586 
1587 static int artpec6_crypto_aes_xts_init(struct crypto_skcipher *tfm)
1588 {
1589 	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1590 
1591 	tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
1592 	ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_XTS;
1593 
1594 	return 0;
1595 }
1596 
1597 static void artpec6_crypto_aes_exit(struct crypto_skcipher *tfm)
1598 {
1599 	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1600 
1601 	memset(ctx, 0, sizeof(*ctx));
1602 }
1603 
1604 static void artpec6_crypto_aes_ctr_exit(struct crypto_skcipher *tfm)
1605 {
1606 	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1607 
1608 	crypto_free_skcipher(ctx->fallback);
1609 	artpec6_crypto_aes_exit(tfm);
1610 }
1611 
1612 static int
1613 artpec6_crypto_cipher_set_key(struct crypto_skcipher *cipher, const u8 *key,
1614 			      unsigned int keylen)
1615 {
1616 	struct artpec6_cryptotfm_context *ctx =
1617 		crypto_skcipher_ctx(cipher);
1618 
1619 	switch (keylen) {
1620 	case 16:
1621 	case 24:
1622 	case 32:
1623 		break;
1624 	default:
1625 		crypto_skcipher_set_flags(cipher,
1626 					  CRYPTO_TFM_RES_BAD_KEY_LEN);
1627 		return -EINVAL;
1628 	}
1629 
1630 	memcpy(ctx->aes_key, key, keylen);
1631 	ctx->key_length = keylen;
1632 	return 0;
1633 }
1634 
1635 static int
1636 artpec6_crypto_xts_set_key(struct crypto_skcipher *cipher, const u8 *key,
1637 			      unsigned int keylen)
1638 {
1639 	struct artpec6_cryptotfm_context *ctx =
1640 		crypto_skcipher_ctx(cipher);
1641 	int ret;
1642 
1643 	ret = xts_check_key(&cipher->base, key, keylen);
1644 	if (ret)
1645 		return ret;
1646 
1647 	switch (keylen) {
1648 	case 32:
1649 	case 48:
1650 	case 64:
1651 		break;
1652 	default:
1653 		crypto_skcipher_set_flags(cipher,
1654 					  CRYPTO_TFM_RES_BAD_KEY_LEN);
1655 		return -EINVAL;
1656 	}
1657 
1658 	memcpy(ctx->aes_key, key, keylen);
1659 	ctx->key_length = keylen;
1660 	return 0;
1661 }
1662 
1663 /** artpec6_crypto_process_crypto - Prepare an async block cipher crypto request
1664  *
1665  * @req: The asynch request to process
1666  *
1667  * @return 0 if the dma job was successfully prepared
1668  *	  <0 on error
1669  *
1670  * This function sets up the PDMA descriptors for a block cipher request.
1671  *
1672  * The required padding is added for AES-CTR using a statically defined
1673  * buffer.
1674  *
1675  * The PDMA descriptor list will be as follows:
1676  *
1677  * OUT: [KEY_MD][KEY][EOP]<CIPHER_MD>[IV]<data_0>...[data_n][AES-CTR_pad]<eop>
1678  * IN:  <CIPHER_MD><data_0>...[data_n]<intr>
1679  *
1680  */
1681 static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq)
1682 {
1683 	int ret;
1684 	struct artpec6_crypto_walk walk;
1685 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1686 	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
1687 	struct artpec6_crypto_request_context *req_ctx = NULL;
1688 	size_t iv_len = crypto_skcipher_ivsize(cipher);
1689 	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1690 	enum artpec6_crypto_variant variant = ac->variant;
1691 	struct artpec6_crypto_req_common *common;
1692 	bool cipher_decr = false;
1693 	size_t cipher_klen;
1694 	u32 cipher_len = 0; /* Same as regk_crypto_key_128 for NULL crypto */
1695 	u32 oper;
1696 
1697 	req_ctx = skcipher_request_ctx(areq);
1698 	common = &req_ctx->common;
1699 
1700 	artpec6_crypto_init_dma_operation(common);
1701 
1702 	if (variant == ARTPEC6_CRYPTO)
1703 		ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER, a6_regk_crypto_dlkey);
1704 	else
1705 		ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER, a7_regk_crypto_dlkey);
1706 
1707 	ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md,
1708 					     sizeof(ctx->key_md), false, false);
1709 	if (ret)
1710 		return ret;
1711 
1712 	ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key,
1713 					      ctx->key_length, true, false);
1714 	if (ret)
1715 		return ret;
1716 
1717 	req_ctx->cipher_md = 0;
1718 
1719 	if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS)
1720 		cipher_klen = ctx->key_length/2;
1721 	else
1722 		cipher_klen =  ctx->key_length;
1723 
1724 	/* Metadata */
1725 	switch (cipher_klen) {
1726 	case 16:
1727 		cipher_len = regk_crypto_key_128;
1728 		break;
1729 	case 24:
1730 		cipher_len = regk_crypto_key_192;
1731 		break;
1732 	case 32:
1733 		cipher_len = regk_crypto_key_256;
1734 		break;
1735 	default:
1736 		pr_err("%s: Invalid key length %d!\n",
1737 			MODULE_NAME, ctx->key_length);
1738 		return -EINVAL;
1739 	}
1740 
1741 	switch (ctx->crypto_type) {
1742 	case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
1743 		oper = regk_crypto_aes_ecb;
1744 		cipher_decr = req_ctx->decrypt;
1745 		break;
1746 
1747 	case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1748 		oper = regk_crypto_aes_cbc;
1749 		cipher_decr = req_ctx->decrypt;
1750 		break;
1751 
1752 	case ARTPEC6_CRYPTO_CIPHER_AES_CTR:
1753 		oper = regk_crypto_aes_ctr;
1754 		cipher_decr = false;
1755 		break;
1756 
1757 	case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
1758 		oper = regk_crypto_aes_xts;
1759 		cipher_decr = req_ctx->decrypt;
1760 
1761 		if (variant == ARTPEC6_CRYPTO)
1762 			req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DSEQ;
1763 		else
1764 			req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DSEQ;
1765 		break;
1766 
1767 	default:
1768 		pr_err("%s: Invalid cipher mode %d!\n",
1769 			MODULE_NAME, ctx->crypto_type);
1770 		return -EINVAL;
1771 	}
1772 
1773 	if (variant == ARTPEC6_CRYPTO) {
1774 		req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER, oper);
1775 		req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN,
1776 						 cipher_len);
1777 		if (cipher_decr)
1778 			req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR;
1779 	} else {
1780 		req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER, oper);
1781 		req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN,
1782 						 cipher_len);
1783 		if (cipher_decr)
1784 			req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR;
1785 	}
1786 
1787 	ret = artpec6_crypto_setup_out_descr(common,
1788 					    &req_ctx->cipher_md,
1789 					    sizeof(req_ctx->cipher_md),
1790 					    false, false);
1791 	if (ret)
1792 		return ret;
1793 
1794 	ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
1795 	if (ret)
1796 		return ret;
1797 
1798 	if (iv_len) {
1799 		ret = artpec6_crypto_setup_out_descr(common, areq->iv, iv_len,
1800 						     false, false);
1801 		if (ret)
1802 			return ret;
1803 	}
1804 	/* Data out */
1805 	artpec6_crypto_walk_init(&walk, areq->src);
1806 	ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, areq->cryptlen);
1807 	if (ret)
1808 		return ret;
1809 
1810 	/* Data in */
1811 	artpec6_crypto_walk_init(&walk, areq->dst);
1812 	ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, areq->cryptlen);
1813 	if (ret)
1814 		return ret;
1815 
1816 	/* CTR-mode padding required by the HW. */
1817 	if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_CTR ||
1818 	    ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS) {
1819 		size_t pad = ALIGN(areq->cryptlen, AES_BLOCK_SIZE) -
1820 			     areq->cryptlen;
1821 
1822 		if (pad) {
1823 			ret = artpec6_crypto_setup_out_descr(common,
1824 							     ac->pad_buffer,
1825 							     pad, false, false);
1826 			if (ret)
1827 				return ret;
1828 
1829 			ret = artpec6_crypto_setup_in_descr(common,
1830 							    ac->pad_buffer, pad,
1831 							    false);
1832 			if (ret)
1833 				return ret;
1834 		}
1835 	}
1836 
1837 	ret = artpec6_crypto_terminate_out_descrs(common);
1838 	if (ret)
1839 		return ret;
1840 
1841 	ret = artpec6_crypto_terminate_in_descrs(common);
1842 	if (ret)
1843 		return ret;
1844 
1845 	return artpec6_crypto_dma_map_descs(common);
1846 }
1847 
1848 static int artpec6_crypto_prepare_aead(struct aead_request *areq)
1849 {
1850 	size_t count;
1851 	int ret;
1852 	size_t input_length;
1853 	struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(areq->base.tfm);
1854 	struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq);
1855 	struct crypto_aead *cipher = crypto_aead_reqtfm(areq);
1856 	struct artpec6_crypto_req_common *common = &req_ctx->common;
1857 	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1858 	enum artpec6_crypto_variant variant = ac->variant;
1859 	u32 md_cipher_len;
1860 
1861 	artpec6_crypto_init_dma_operation(common);
1862 
1863 	/* Key */
1864 	if (variant == ARTPEC6_CRYPTO) {
1865 		ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER,
1866 					 a6_regk_crypto_dlkey);
1867 	} else {
1868 		ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER,
1869 					 a7_regk_crypto_dlkey);
1870 	}
1871 	ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md,
1872 					     sizeof(ctx->key_md), false, false);
1873 	if (ret)
1874 		return ret;
1875 
1876 	ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key,
1877 					     ctx->key_length, true, false);
1878 	if (ret)
1879 		return ret;
1880 
1881 	req_ctx->cipher_md = 0;
1882 
1883 	switch (ctx->key_length) {
1884 	case 16:
1885 		md_cipher_len = regk_crypto_key_128;
1886 		break;
1887 	case 24:
1888 		md_cipher_len = regk_crypto_key_192;
1889 		break;
1890 	case 32:
1891 		md_cipher_len = regk_crypto_key_256;
1892 		break;
1893 	default:
1894 		return -EINVAL;
1895 	}
1896 
1897 	if (variant == ARTPEC6_CRYPTO) {
1898 		req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER,
1899 						 regk_crypto_aes_gcm);
1900 		req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN,
1901 						 md_cipher_len);
1902 		if (req_ctx->decrypt)
1903 			req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR;
1904 	} else {
1905 		req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER,
1906 						 regk_crypto_aes_gcm);
1907 		req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN,
1908 						 md_cipher_len);
1909 		if (req_ctx->decrypt)
1910 			req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR;
1911 	}
1912 
1913 	ret = artpec6_crypto_setup_out_descr(common,
1914 					    (void *) &req_ctx->cipher_md,
1915 					    sizeof(req_ctx->cipher_md), false,
1916 					    false);
1917 	if (ret)
1918 		return ret;
1919 
1920 	ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
1921 	if (ret)
1922 		return ret;
1923 
1924 	/* For the decryption, cryptlen includes the tag. */
1925 	input_length = areq->cryptlen;
1926 	if (req_ctx->decrypt)
1927 		input_length -= AES_BLOCK_SIZE;
1928 
1929 	/* Prepare the context buffer */
1930 	req_ctx->hw_ctx.aad_length_bits =
1931 		__cpu_to_be64(8*areq->assoclen);
1932 
1933 	req_ctx->hw_ctx.text_length_bits =
1934 		__cpu_to_be64(8*input_length);
1935 
1936 	memcpy(req_ctx->hw_ctx.J0, areq->iv, crypto_aead_ivsize(cipher));
1937 	// The HW omits the initial increment of the counter field.
1938 	memcpy(req_ctx->hw_ctx.J0 + GCM_AES_IV_SIZE, "\x00\x00\x00\x01", 4);
1939 
1940 	ret = artpec6_crypto_setup_out_descr(common, &req_ctx->hw_ctx,
1941 		sizeof(struct artpec6_crypto_aead_hw_ctx), false, false);
1942 	if (ret)
1943 		return ret;
1944 
1945 	{
1946 		struct artpec6_crypto_walk walk;
1947 
1948 		artpec6_crypto_walk_init(&walk, areq->src);
1949 
1950 		/* Associated data */
1951 		count = areq->assoclen;
1952 		ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count);
1953 		if (ret)
1954 			return ret;
1955 
1956 		if (!IS_ALIGNED(areq->assoclen, 16)) {
1957 			size_t assoc_pad = 16 - (areq->assoclen % 16);
1958 			/* The HW mandates zero padding here */
1959 			ret = artpec6_crypto_setup_out_descr(common,
1960 							     ac->zero_buffer,
1961 							     assoc_pad, false,
1962 							     false);
1963 			if (ret)
1964 				return ret;
1965 		}
1966 
1967 		/* Data to crypto */
1968 		count = input_length;
1969 		ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count);
1970 		if (ret)
1971 			return ret;
1972 
1973 		if (!IS_ALIGNED(input_length, 16)) {
1974 			size_t crypto_pad = 16 - (input_length % 16);
1975 			/* The HW mandates zero padding here */
1976 			ret = artpec6_crypto_setup_out_descr(common,
1977 							     ac->zero_buffer,
1978 							     crypto_pad,
1979 							     false,
1980 							     false);
1981 			if (ret)
1982 				return ret;
1983 		}
1984 	}
1985 
1986 	/* Data from crypto */
1987 	{
1988 		struct artpec6_crypto_walk walk;
1989 		size_t output_len = areq->cryptlen;
1990 
1991 		if (req_ctx->decrypt)
1992 			output_len -= AES_BLOCK_SIZE;
1993 
1994 		artpec6_crypto_walk_init(&walk, areq->dst);
1995 
1996 		/* skip associated data in the output */
1997 		count = artpec6_crypto_walk_advance(&walk, areq->assoclen);
1998 		if (count)
1999 			return -EINVAL;
2000 
2001 		count = output_len;
2002 		ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, count);
2003 		if (ret)
2004 			return ret;
2005 
2006 		/* Put padding between the cryptotext and the auth tag */
2007 		if (!IS_ALIGNED(output_len, 16)) {
2008 			size_t crypto_pad = 16 - (output_len % 16);
2009 
2010 			ret = artpec6_crypto_setup_in_descr(common,
2011 							    ac->pad_buffer,
2012 							    crypto_pad, false);
2013 			if (ret)
2014 				return ret;
2015 		}
2016 
2017 		/* The authentication tag shall follow immediately after
2018 		 * the output ciphertext. For decryption it is put in a context
2019 		 * buffer for later compare against the input tag.
2020 		 */
2021 		count = AES_BLOCK_SIZE;
2022 
2023 		if (req_ctx->decrypt) {
2024 			ret = artpec6_crypto_setup_in_descr(common,
2025 				req_ctx->decryption_tag, count, false);
2026 			if (ret)
2027 				return ret;
2028 
2029 		} else {
2030 			ret = artpec6_crypto_setup_sg_descrs_in(common, &walk,
2031 								count);
2032 			if (ret)
2033 				return ret;
2034 		}
2035 
2036 	}
2037 
2038 	ret = artpec6_crypto_terminate_in_descrs(common);
2039 	if (ret)
2040 		return ret;
2041 
2042 	ret = artpec6_crypto_terminate_out_descrs(common);
2043 	if (ret)
2044 		return ret;
2045 
2046 	return artpec6_crypto_dma_map_descs(common);
2047 }
2048 
2049 static void artpec6_crypto_process_queue(struct artpec6_crypto *ac)
2050 {
2051 	struct artpec6_crypto_req_common *req;
2052 
2053 	while (!list_empty(&ac->queue) && !artpec6_crypto_busy()) {
2054 		req = list_first_entry(&ac->queue,
2055 				       struct artpec6_crypto_req_common,
2056 				       list);
2057 		list_move_tail(&req->list, &ac->pending);
2058 		artpec6_crypto_start_dma(req);
2059 
2060 		req->req->complete(req->req, -EINPROGRESS);
2061 	}
2062 
2063 	/*
2064 	 * In some cases, the hardware can raise an in_eop_flush interrupt
2065 	 * before actually updating the status, so we have an timer which will
2066 	 * recheck the status on timeout.  Since the cases are expected to be
2067 	 * very rare, we use a relatively large timeout value.  There should be
2068 	 * no noticeable negative effect if we timeout spuriously.
2069 	 */
2070 	if (ac->pending_count)
2071 		mod_timer(&ac->timer, jiffies + msecs_to_jiffies(100));
2072 	else
2073 		del_timer(&ac->timer);
2074 }
2075 
2076 static void artpec6_crypto_timeout(struct timer_list *t)
2077 {
2078 	struct artpec6_crypto *ac = from_timer(ac, t, timer);
2079 
2080 	dev_info_ratelimited(artpec6_crypto_dev, "timeout\n");
2081 
2082 	tasklet_schedule(&ac->task);
2083 }
2084 
2085 static void artpec6_crypto_task(unsigned long data)
2086 {
2087 	struct artpec6_crypto *ac = (struct artpec6_crypto *)data;
2088 	struct artpec6_crypto_req_common *req;
2089 	struct artpec6_crypto_req_common *n;
2090 
2091 	if (list_empty(&ac->pending)) {
2092 		pr_debug("Spurious IRQ\n");
2093 		return;
2094 	}
2095 
2096 	spin_lock_bh(&ac->queue_lock);
2097 
2098 	list_for_each_entry_safe(req, n, &ac->pending, list) {
2099 		struct artpec6_crypto_dma_descriptors *dma = req->dma;
2100 		u32 stat;
2101 
2102 		dma_sync_single_for_cpu(artpec6_crypto_dev, dma->stat_dma_addr,
2103 					sizeof(dma->stat[0]),
2104 					DMA_BIDIRECTIONAL);
2105 
2106 		stat = req->dma->stat[req->dma->in_cnt-1];
2107 
2108 		/* A non-zero final status descriptor indicates
2109 		 * this job has finished.
2110 		 */
2111 		pr_debug("Request %p status is %X\n", req, stat);
2112 		if (!stat)
2113 			break;
2114 
2115 		/* Allow testing of timeout handling with fault injection */
2116 #ifdef CONFIG_FAULT_INJECTION
2117 		if (should_fail(&artpec6_crypto_fail_status_read, 1))
2118 			continue;
2119 #endif
2120 
2121 		pr_debug("Completing request %p\n", req);
2122 
2123 		list_del(&req->list);
2124 
2125 		artpec6_crypto_dma_unmap_all(req);
2126 		artpec6_crypto_copy_bounce_buffers(req);
2127 
2128 		ac->pending_count--;
2129 		artpec6_crypto_common_destroy(req);
2130 		req->complete(req->req);
2131 	}
2132 
2133 	artpec6_crypto_process_queue(ac);
2134 
2135 	spin_unlock_bh(&ac->queue_lock);
2136 }
2137 
2138 static void artpec6_crypto_complete_crypto(struct crypto_async_request *req)
2139 {
2140 	req->complete(req, 0);
2141 }
2142 
2143 static void
2144 artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req)
2145 {
2146 	struct skcipher_request *cipher_req = container_of(req,
2147 		struct skcipher_request, base);
2148 
2149 	scatterwalk_map_and_copy(cipher_req->iv, cipher_req->src,
2150 				 cipher_req->cryptlen - AES_BLOCK_SIZE,
2151 				 AES_BLOCK_SIZE, 0);
2152 	req->complete(req, 0);
2153 }
2154 
2155 static void
2156 artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req)
2157 {
2158 	struct skcipher_request *cipher_req = container_of(req,
2159 		struct skcipher_request, base);
2160 
2161 	scatterwalk_map_and_copy(cipher_req->iv, cipher_req->dst,
2162 				 cipher_req->cryptlen - AES_BLOCK_SIZE,
2163 				 AES_BLOCK_SIZE, 0);
2164 	req->complete(req, 0);
2165 }
2166 
2167 static void artpec6_crypto_complete_aead(struct crypto_async_request *req)
2168 {
2169 	int result = 0;
2170 
2171 	/* Verify GCM hashtag. */
2172 	struct aead_request *areq = container_of(req,
2173 		struct aead_request, base);
2174 	struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq);
2175 
2176 	if (req_ctx->decrypt) {
2177 		u8 input_tag[AES_BLOCK_SIZE];
2178 
2179 		sg_pcopy_to_buffer(areq->src,
2180 				   sg_nents(areq->src),
2181 				   input_tag,
2182 				   AES_BLOCK_SIZE,
2183 				   areq->assoclen + areq->cryptlen -
2184 				   AES_BLOCK_SIZE);
2185 
2186 		if (memcmp(req_ctx->decryption_tag,
2187 			   input_tag,
2188 			   AES_BLOCK_SIZE)) {
2189 			pr_debug("***EBADMSG:\n");
2190 			print_hex_dump_debug("ref:", DUMP_PREFIX_ADDRESS, 32, 1,
2191 					     input_tag, AES_BLOCK_SIZE, true);
2192 			print_hex_dump_debug("out:", DUMP_PREFIX_ADDRESS, 32, 1,
2193 					     req_ctx->decryption_tag,
2194 					     AES_BLOCK_SIZE, true);
2195 
2196 			result = -EBADMSG;
2197 		}
2198 	}
2199 
2200 	req->complete(req, result);
2201 }
2202 
2203 static void artpec6_crypto_complete_hash(struct crypto_async_request *req)
2204 {
2205 	req->complete(req, 0);
2206 }
2207 
2208 
2209 /*------------------- Hash functions -----------------------------------------*/
2210 static int
2211 artpec6_crypto_hash_set_key(struct crypto_ahash *tfm,
2212 		    const u8 *key, unsigned int keylen)
2213 {
2214 	struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(&tfm->base);
2215 	size_t blocksize;
2216 	int ret;
2217 
2218 	if (!keylen) {
2219 		pr_err("Invalid length (%d) of HMAC key\n",
2220 			keylen);
2221 		return -EINVAL;
2222 	}
2223 
2224 	memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key));
2225 
2226 	blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2227 
2228 	if (keylen > blocksize) {
2229 		SHASH_DESC_ON_STACK(hdesc, tfm_ctx->child_hash);
2230 
2231 		hdesc->tfm = tfm_ctx->child_hash;
2232 		hdesc->flags = crypto_ahash_get_flags(tfm) &
2233 			       CRYPTO_TFM_REQ_MAY_SLEEP;
2234 
2235 		tfm_ctx->hmac_key_length = blocksize;
2236 		ret = crypto_shash_digest(hdesc, key, keylen,
2237 					  tfm_ctx->hmac_key);
2238 		if (ret)
2239 			return ret;
2240 
2241 	} else {
2242 		memcpy(tfm_ctx->hmac_key, key, keylen);
2243 		tfm_ctx->hmac_key_length = keylen;
2244 	}
2245 
2246 	return 0;
2247 }
2248 
2249 static int
2250 artpec6_crypto_init_hash(struct ahash_request *req, u8 type, int hmac)
2251 {
2252 	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
2253 	enum artpec6_crypto_variant variant = ac->variant;
2254 	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2255 	u32 oper;
2256 
2257 	memset(req_ctx, 0, sizeof(*req_ctx));
2258 
2259 	req_ctx->hash_flags = HASH_FLAG_INIT_CTX;
2260 	if (hmac)
2261 		req_ctx->hash_flags |= (HASH_FLAG_HMAC | HASH_FLAG_UPDATE_KEY);
2262 
2263 	switch (type) {
2264 	case ARTPEC6_CRYPTO_HASH_SHA1:
2265 		oper = hmac ? regk_crypto_hmac_sha1 : regk_crypto_sha1;
2266 		break;
2267 	case ARTPEC6_CRYPTO_HASH_SHA256:
2268 		oper = hmac ? regk_crypto_hmac_sha256 : regk_crypto_sha256;
2269 		break;
2270 	case ARTPEC6_CRYPTO_HASH_SHA384:
2271 		oper = hmac ? regk_crypto_hmac_sha384 : regk_crypto_sha384;
2272 		break;
2273 	case ARTPEC6_CRYPTO_HASH_SHA512:
2274 		oper = hmac ? regk_crypto_hmac_sha512 : regk_crypto_sha512;
2275 		break;
2276 
2277 	default:
2278 		pr_err("%s: Unsupported hash type 0x%x\n", MODULE_NAME, type);
2279 		return -EINVAL;
2280 	}
2281 
2282 	if (variant == ARTPEC6_CRYPTO)
2283 		req_ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, oper);
2284 	else
2285 		req_ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, oper);
2286 
2287 	return 0;
2288 }
2289 
2290 static int artpec6_crypto_prepare_submit_hash(struct ahash_request *req)
2291 {
2292 	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2293 	int ret;
2294 
2295 	if (!req_ctx->common.dma) {
2296 		ret = artpec6_crypto_common_init(&req_ctx->common,
2297 					  &req->base,
2298 					  artpec6_crypto_complete_hash,
2299 					  NULL, 0);
2300 
2301 		if (ret)
2302 			return ret;
2303 	}
2304 
2305 	ret = artpec6_crypto_prepare_hash(req);
2306 	switch (ret) {
2307 	case ARTPEC6_CRYPTO_PREPARE_HASH_START:
2308 		ret = artpec6_crypto_submit(&req_ctx->common);
2309 		break;
2310 
2311 	case ARTPEC6_CRYPTO_PREPARE_HASH_NO_START:
2312 		ret = 0;
2313 		/* Fallthrough */
2314 
2315 	default:
2316 		artpec6_crypto_common_destroy(&req_ctx->common);
2317 		break;
2318 	}
2319 
2320 	return ret;
2321 }
2322 
2323 static int artpec6_crypto_hash_final(struct ahash_request *req)
2324 {
2325 	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2326 
2327 	req_ctx->hash_flags |= HASH_FLAG_FINALIZE;
2328 
2329 	return artpec6_crypto_prepare_submit_hash(req);
2330 }
2331 
2332 static int artpec6_crypto_hash_update(struct ahash_request *req)
2333 {
2334 	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2335 
2336 	req_ctx->hash_flags |= HASH_FLAG_UPDATE;
2337 
2338 	return artpec6_crypto_prepare_submit_hash(req);
2339 }
2340 
2341 static int artpec6_crypto_sha1_init(struct ahash_request *req)
2342 {
2343 	return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0);
2344 }
2345 
2346 static int artpec6_crypto_sha1_digest(struct ahash_request *req)
2347 {
2348 	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2349 
2350 	artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0);
2351 
2352 	req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
2353 
2354 	return artpec6_crypto_prepare_submit_hash(req);
2355 }
2356 
2357 static int artpec6_crypto_sha256_init(struct ahash_request *req)
2358 {
2359 	return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0);
2360 }
2361 
2362 static int artpec6_crypto_sha256_digest(struct ahash_request *req)
2363 {
2364 	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2365 
2366 	artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0);
2367 	req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
2368 
2369 	return artpec6_crypto_prepare_submit_hash(req);
2370 }
2371 
2372 static int __maybe_unused artpec6_crypto_sha384_init(struct ahash_request *req)
2373 {
2374 	return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 0);
2375 }
2376 
2377 static int __maybe_unused
2378 artpec6_crypto_sha384_digest(struct ahash_request *req)
2379 {
2380 	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2381 
2382 	artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 0);
2383 	req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
2384 
2385 	return artpec6_crypto_prepare_submit_hash(req);
2386 }
2387 
2388 static int artpec6_crypto_sha512_init(struct ahash_request *req)
2389 {
2390 	return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 0);
2391 }
2392 
2393 static int artpec6_crypto_sha512_digest(struct ahash_request *req)
2394 {
2395 	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2396 
2397 	artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 0);
2398 	req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
2399 
2400 	return artpec6_crypto_prepare_submit_hash(req);
2401 }
2402 
2403 static int artpec6_crypto_hmac_sha256_init(struct ahash_request *req)
2404 {
2405 	return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1);
2406 }
2407 
2408 static int __maybe_unused
2409 artpec6_crypto_hmac_sha384_init(struct ahash_request *req)
2410 {
2411 	return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 1);
2412 }
2413 
2414 static int artpec6_crypto_hmac_sha512_init(struct ahash_request *req)
2415 {
2416 	return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 1);
2417 }
2418 
2419 static int artpec6_crypto_hmac_sha256_digest(struct ahash_request *req)
2420 {
2421 	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2422 
2423 	artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1);
2424 	req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
2425 
2426 	return artpec6_crypto_prepare_submit_hash(req);
2427 }
2428 
2429 static int __maybe_unused
2430 artpec6_crypto_hmac_sha384_digest(struct ahash_request *req)
2431 {
2432 	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2433 
2434 	artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 1);
2435 	req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
2436 
2437 	return artpec6_crypto_prepare_submit_hash(req);
2438 }
2439 
2440 static int artpec6_crypto_hmac_sha512_digest(struct ahash_request *req)
2441 {
2442 	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2443 
2444 	artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 1);
2445 	req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
2446 
2447 	return artpec6_crypto_prepare_submit_hash(req);
2448 }
2449 
2450 static int artpec6_crypto_ahash_init_common(struct crypto_tfm *tfm,
2451 				    const char *base_hash_name)
2452 {
2453 	struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm);
2454 
2455 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2456 				 sizeof(struct artpec6_hash_request_context));
2457 	memset(tfm_ctx, 0, sizeof(*tfm_ctx));
2458 
2459 	if (base_hash_name) {
2460 		struct crypto_shash *child;
2461 
2462 		child = crypto_alloc_shash(base_hash_name, 0,
2463 					   CRYPTO_ALG_NEED_FALLBACK);
2464 
2465 		if (IS_ERR(child))
2466 			return PTR_ERR(child);
2467 
2468 		tfm_ctx->child_hash = child;
2469 	}
2470 
2471 	return 0;
2472 }
2473 
2474 static int artpec6_crypto_ahash_init(struct crypto_tfm *tfm)
2475 {
2476 	return artpec6_crypto_ahash_init_common(tfm, NULL);
2477 }
2478 
2479 static int artpec6_crypto_ahash_init_hmac_sha256(struct crypto_tfm *tfm)
2480 {
2481 	return artpec6_crypto_ahash_init_common(tfm, "sha256");
2482 }
2483 
2484 static int __maybe_unused
2485 artpec6_crypto_ahash_init_hmac_sha384(struct crypto_tfm *tfm)
2486 {
2487 	return artpec6_crypto_ahash_init_common(tfm, "sha384");
2488 }
2489 
2490 static int artpec6_crypto_ahash_init_hmac_sha512(struct crypto_tfm *tfm)
2491 {
2492 	return artpec6_crypto_ahash_init_common(tfm, "sha512");
2493 }
2494 
2495 static void artpec6_crypto_ahash_exit(struct crypto_tfm *tfm)
2496 {
2497 	struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm);
2498 
2499 	if (tfm_ctx->child_hash)
2500 		crypto_free_shash(tfm_ctx->child_hash);
2501 
2502 	memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key));
2503 	tfm_ctx->hmac_key_length = 0;
2504 }
2505 
2506 static int artpec6_crypto_hash_export(struct ahash_request *req, void *out)
2507 {
2508 	const struct artpec6_hash_request_context *ctx = ahash_request_ctx(req);
2509 	struct artpec6_hash_export_state *state = out;
2510 	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
2511 	enum artpec6_crypto_variant variant = ac->variant;
2512 
2513 	BUILD_BUG_ON(sizeof(state->partial_buffer) !=
2514 		     sizeof(ctx->partial_buffer));
2515 	BUILD_BUG_ON(sizeof(state->digeststate) != sizeof(ctx->digeststate));
2516 
2517 	state->digcnt = ctx->digcnt;
2518 	state->partial_bytes = ctx->partial_bytes;
2519 	state->hash_flags = ctx->hash_flags;
2520 
2521 	if (variant == ARTPEC6_CRYPTO)
2522 		state->oper = FIELD_GET(A6_CRY_MD_OPER, ctx->hash_md);
2523 	else
2524 		state->oper = FIELD_GET(A7_CRY_MD_OPER, ctx->hash_md);
2525 
2526 	memcpy(state->partial_buffer, ctx->partial_buffer,
2527 	       sizeof(state->partial_buffer));
2528 	memcpy(state->digeststate, ctx->digeststate,
2529 	       sizeof(state->digeststate));
2530 
2531 	return 0;
2532 }
2533 
2534 static int artpec6_crypto_hash_import(struct ahash_request *req, const void *in)
2535 {
2536 	struct artpec6_hash_request_context *ctx = ahash_request_ctx(req);
2537 	const struct artpec6_hash_export_state *state = in;
2538 	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
2539 	enum artpec6_crypto_variant variant = ac->variant;
2540 
2541 	memset(ctx, 0, sizeof(*ctx));
2542 
2543 	ctx->digcnt = state->digcnt;
2544 	ctx->partial_bytes = state->partial_bytes;
2545 	ctx->hash_flags = state->hash_flags;
2546 
2547 	if (variant == ARTPEC6_CRYPTO)
2548 		ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, state->oper);
2549 	else
2550 		ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, state->oper);
2551 
2552 	memcpy(ctx->partial_buffer, state->partial_buffer,
2553 	       sizeof(state->partial_buffer));
2554 	memcpy(ctx->digeststate, state->digeststate,
2555 	       sizeof(state->digeststate));
2556 
2557 	return 0;
2558 }
2559 
2560 static int init_crypto_hw(struct artpec6_crypto *ac)
2561 {
2562 	enum artpec6_crypto_variant variant = ac->variant;
2563 	void __iomem *base = ac->base;
2564 	u32 out_descr_buf_size;
2565 	u32 out_data_buf_size;
2566 	u32 in_data_buf_size;
2567 	u32 in_descr_buf_size;
2568 	u32 in_stat_buf_size;
2569 	u32 in, out;
2570 
2571 	/*
2572 	 * The PDMA unit contains 1984 bytes of internal memory for the OUT
2573 	 * channels and 1024 bytes for the IN channel. This is an elastic
2574 	 * memory used to internally store the descriptors and data. The values
2575 	 * ares specified in 64 byte incremements.  Trustzone buffers are not
2576 	 * used at this stage.
2577 	 */
2578 	out_data_buf_size = 16;  /* 1024 bytes for data */
2579 	out_descr_buf_size = 15; /* 960 bytes for descriptors */
2580 	in_data_buf_size = 8;    /* 512 bytes for data */
2581 	in_descr_buf_size = 4;   /* 256 bytes for descriptors */
2582 	in_stat_buf_size = 4;   /* 256 bytes for stat descrs */
2583 
2584 	BUILD_BUG_ON_MSG((out_data_buf_size
2585 				+ out_descr_buf_size) * 64 > 1984,
2586 			  "Invalid OUT configuration");
2587 
2588 	BUILD_BUG_ON_MSG((in_data_buf_size
2589 				+ in_descr_buf_size
2590 				+ in_stat_buf_size) * 64 > 1024,
2591 			  "Invalid IN configuration");
2592 
2593 	in = FIELD_PREP(PDMA_IN_BUF_CFG_DATA_BUF_SIZE, in_data_buf_size) |
2594 	     FIELD_PREP(PDMA_IN_BUF_CFG_DESCR_BUF_SIZE, in_descr_buf_size) |
2595 	     FIELD_PREP(PDMA_IN_BUF_CFG_STAT_BUF_SIZE, in_stat_buf_size);
2596 
2597 	out = FIELD_PREP(PDMA_OUT_BUF_CFG_DATA_BUF_SIZE, out_data_buf_size) |
2598 	      FIELD_PREP(PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE, out_descr_buf_size);
2599 
2600 	writel_relaxed(out, base + PDMA_OUT_BUF_CFG);
2601 	writel_relaxed(PDMA_OUT_CFG_EN, base + PDMA_OUT_CFG);
2602 
2603 	if (variant == ARTPEC6_CRYPTO) {
2604 		writel_relaxed(in, base + A6_PDMA_IN_BUF_CFG);
2605 		writel_relaxed(PDMA_IN_CFG_EN, base + A6_PDMA_IN_CFG);
2606 		writel_relaxed(A6_PDMA_INTR_MASK_IN_DATA |
2607 			       A6_PDMA_INTR_MASK_IN_EOP_FLUSH,
2608 			       base + A6_PDMA_INTR_MASK);
2609 	} else {
2610 		writel_relaxed(in, base + A7_PDMA_IN_BUF_CFG);
2611 		writel_relaxed(PDMA_IN_CFG_EN, base + A7_PDMA_IN_CFG);
2612 		writel_relaxed(A7_PDMA_INTR_MASK_IN_DATA |
2613 			       A7_PDMA_INTR_MASK_IN_EOP_FLUSH,
2614 			       base + A7_PDMA_INTR_MASK);
2615 	}
2616 
2617 	return 0;
2618 }
2619 
2620 static void artpec6_crypto_disable_hw(struct artpec6_crypto *ac)
2621 {
2622 	enum artpec6_crypto_variant variant = ac->variant;
2623 	void __iomem *base = ac->base;
2624 
2625 	if (variant == ARTPEC6_CRYPTO) {
2626 		writel_relaxed(A6_PDMA_IN_CMD_STOP, base + A6_PDMA_IN_CMD);
2627 		writel_relaxed(0, base + A6_PDMA_IN_CFG);
2628 		writel_relaxed(A6_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD);
2629 	} else {
2630 		writel_relaxed(A7_PDMA_IN_CMD_STOP, base + A7_PDMA_IN_CMD);
2631 		writel_relaxed(0, base + A7_PDMA_IN_CFG);
2632 		writel_relaxed(A7_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD);
2633 	}
2634 
2635 	writel_relaxed(0, base + PDMA_OUT_CFG);
2636 
2637 }
2638 
2639 static irqreturn_t artpec6_crypto_irq(int irq, void *dev_id)
2640 {
2641 	struct artpec6_crypto *ac = dev_id;
2642 	enum artpec6_crypto_variant variant = ac->variant;
2643 	void __iomem *base = ac->base;
2644 	u32 mask_in_data, mask_in_eop_flush;
2645 	u32 in_cmd_flush_stat, in_cmd_reg;
2646 	u32 ack_intr_reg;
2647 	u32 ack = 0;
2648 	u32 intr;
2649 
2650 	if (variant == ARTPEC6_CRYPTO) {
2651 		intr = readl_relaxed(base + A6_PDMA_MASKED_INTR);
2652 		mask_in_data = A6_PDMA_INTR_MASK_IN_DATA;
2653 		mask_in_eop_flush = A6_PDMA_INTR_MASK_IN_EOP_FLUSH;
2654 		in_cmd_flush_stat = A6_PDMA_IN_CMD_FLUSH_STAT;
2655 		in_cmd_reg = A6_PDMA_IN_CMD;
2656 		ack_intr_reg = A6_PDMA_ACK_INTR;
2657 	} else {
2658 		intr = readl_relaxed(base + A7_PDMA_MASKED_INTR);
2659 		mask_in_data = A7_PDMA_INTR_MASK_IN_DATA;
2660 		mask_in_eop_flush = A7_PDMA_INTR_MASK_IN_EOP_FLUSH;
2661 		in_cmd_flush_stat = A7_PDMA_IN_CMD_FLUSH_STAT;
2662 		in_cmd_reg = A7_PDMA_IN_CMD;
2663 		ack_intr_reg = A7_PDMA_ACK_INTR;
2664 	}
2665 
2666 	/* We get two interrupt notifications from each job.
2667 	 * The in_data means all data was sent to memory and then
2668 	 * we request a status flush command to write the per-job
2669 	 * status to its status vector. This ensures that the
2670 	 * tasklet can detect exactly how many submitted jobs
2671 	 * that have finished.
2672 	 */
2673 	if (intr & mask_in_data)
2674 		ack |= mask_in_data;
2675 
2676 	if (intr & mask_in_eop_flush)
2677 		ack |= mask_in_eop_flush;
2678 	else
2679 		writel_relaxed(in_cmd_flush_stat, base + in_cmd_reg);
2680 
2681 	writel_relaxed(ack, base + ack_intr_reg);
2682 
2683 	if (intr & mask_in_eop_flush)
2684 		tasklet_schedule(&ac->task);
2685 
2686 	return IRQ_HANDLED;
2687 }
2688 
2689 /*------------------- Algorithm definitions ----------------------------------*/
2690 
2691 /* Hashes */
2692 static struct ahash_alg hash_algos[] = {
2693 	/* SHA-1 */
2694 	{
2695 		.init = artpec6_crypto_sha1_init,
2696 		.update = artpec6_crypto_hash_update,
2697 		.final = artpec6_crypto_hash_final,
2698 		.digest = artpec6_crypto_sha1_digest,
2699 		.import = artpec6_crypto_hash_import,
2700 		.export = artpec6_crypto_hash_export,
2701 		.halg.digestsize = SHA1_DIGEST_SIZE,
2702 		.halg.statesize = sizeof(struct artpec6_hash_export_state),
2703 		.halg.base = {
2704 			.cra_name = "sha1",
2705 			.cra_driver_name = "artpec-sha1",
2706 			.cra_priority = 300,
2707 			.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
2708 			.cra_blocksize = SHA1_BLOCK_SIZE,
2709 			.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
2710 			.cra_alignmask = 3,
2711 			.cra_module = THIS_MODULE,
2712 			.cra_init = artpec6_crypto_ahash_init,
2713 			.cra_exit = artpec6_crypto_ahash_exit,
2714 		}
2715 	},
2716 	/* SHA-256 */
2717 	{
2718 		.init = artpec6_crypto_sha256_init,
2719 		.update = artpec6_crypto_hash_update,
2720 		.final = artpec6_crypto_hash_final,
2721 		.digest = artpec6_crypto_sha256_digest,
2722 		.import = artpec6_crypto_hash_import,
2723 		.export = artpec6_crypto_hash_export,
2724 		.halg.digestsize = SHA256_DIGEST_SIZE,
2725 		.halg.statesize = sizeof(struct artpec6_hash_export_state),
2726 		.halg.base = {
2727 			.cra_name = "sha256",
2728 			.cra_driver_name = "artpec-sha256",
2729 			.cra_priority = 300,
2730 			.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
2731 			.cra_blocksize = SHA256_BLOCK_SIZE,
2732 			.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
2733 			.cra_alignmask = 3,
2734 			.cra_module = THIS_MODULE,
2735 			.cra_init = artpec6_crypto_ahash_init,
2736 			.cra_exit = artpec6_crypto_ahash_exit,
2737 		}
2738 	},
2739 	/* HMAC SHA-256 */
2740 	{
2741 		.init = artpec6_crypto_hmac_sha256_init,
2742 		.update = artpec6_crypto_hash_update,
2743 		.final = artpec6_crypto_hash_final,
2744 		.digest = artpec6_crypto_hmac_sha256_digest,
2745 		.import = artpec6_crypto_hash_import,
2746 		.export = artpec6_crypto_hash_export,
2747 		.setkey = artpec6_crypto_hash_set_key,
2748 		.halg.digestsize = SHA256_DIGEST_SIZE,
2749 		.halg.statesize = sizeof(struct artpec6_hash_export_state),
2750 		.halg.base = {
2751 			.cra_name = "hmac(sha256)",
2752 			.cra_driver_name = "artpec-hmac-sha256",
2753 			.cra_priority = 300,
2754 			.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
2755 			.cra_blocksize = SHA256_BLOCK_SIZE,
2756 			.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
2757 			.cra_alignmask = 3,
2758 			.cra_module = THIS_MODULE,
2759 			.cra_init = artpec6_crypto_ahash_init_hmac_sha256,
2760 			.cra_exit = artpec6_crypto_ahash_exit,
2761 		}
2762 	},
2763 };
2764 
2765 static struct ahash_alg artpec7_hash_algos[] = {
2766 	/* SHA-384 */
2767 	{
2768 		.init = artpec6_crypto_sha384_init,
2769 		.update = artpec6_crypto_hash_update,
2770 		.final = artpec6_crypto_hash_final,
2771 		.digest = artpec6_crypto_sha384_digest,
2772 		.import = artpec6_crypto_hash_import,
2773 		.export = artpec6_crypto_hash_export,
2774 		.halg.digestsize = SHA384_DIGEST_SIZE,
2775 		.halg.statesize = sizeof(struct artpec6_hash_export_state),
2776 		.halg.base = {
2777 			.cra_name = "sha384",
2778 			.cra_driver_name = "artpec-sha384",
2779 			.cra_priority = 300,
2780 			.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
2781 			.cra_blocksize = SHA384_BLOCK_SIZE,
2782 			.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
2783 			.cra_alignmask = 3,
2784 			.cra_module = THIS_MODULE,
2785 			.cra_init = artpec6_crypto_ahash_init,
2786 			.cra_exit = artpec6_crypto_ahash_exit,
2787 		}
2788 	},
2789 	/* HMAC SHA-384 */
2790 	{
2791 		.init = artpec6_crypto_hmac_sha384_init,
2792 		.update = artpec6_crypto_hash_update,
2793 		.final = artpec6_crypto_hash_final,
2794 		.digest = artpec6_crypto_hmac_sha384_digest,
2795 		.import = artpec6_crypto_hash_import,
2796 		.export = artpec6_crypto_hash_export,
2797 		.setkey = artpec6_crypto_hash_set_key,
2798 		.halg.digestsize = SHA384_DIGEST_SIZE,
2799 		.halg.statesize = sizeof(struct artpec6_hash_export_state),
2800 		.halg.base = {
2801 			.cra_name = "hmac(sha384)",
2802 			.cra_driver_name = "artpec-hmac-sha384",
2803 			.cra_priority = 300,
2804 			.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
2805 			.cra_blocksize = SHA384_BLOCK_SIZE,
2806 			.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
2807 			.cra_alignmask = 3,
2808 			.cra_module = THIS_MODULE,
2809 			.cra_init = artpec6_crypto_ahash_init_hmac_sha384,
2810 			.cra_exit = artpec6_crypto_ahash_exit,
2811 		}
2812 	},
2813 	/* SHA-512 */
2814 	{
2815 		.init = artpec6_crypto_sha512_init,
2816 		.update = artpec6_crypto_hash_update,
2817 		.final = artpec6_crypto_hash_final,
2818 		.digest = artpec6_crypto_sha512_digest,
2819 		.import = artpec6_crypto_hash_import,
2820 		.export = artpec6_crypto_hash_export,
2821 		.halg.digestsize = SHA512_DIGEST_SIZE,
2822 		.halg.statesize = sizeof(struct artpec6_hash_export_state),
2823 		.halg.base = {
2824 			.cra_name = "sha512",
2825 			.cra_driver_name = "artpec-sha512",
2826 			.cra_priority = 300,
2827 			.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
2828 			.cra_blocksize = SHA512_BLOCK_SIZE,
2829 			.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
2830 			.cra_alignmask = 3,
2831 			.cra_module = THIS_MODULE,
2832 			.cra_init = artpec6_crypto_ahash_init,
2833 			.cra_exit = artpec6_crypto_ahash_exit,
2834 		}
2835 	},
2836 	/* HMAC SHA-512 */
2837 	{
2838 		.init = artpec6_crypto_hmac_sha512_init,
2839 		.update = artpec6_crypto_hash_update,
2840 		.final = artpec6_crypto_hash_final,
2841 		.digest = artpec6_crypto_hmac_sha512_digest,
2842 		.import = artpec6_crypto_hash_import,
2843 		.export = artpec6_crypto_hash_export,
2844 		.setkey = artpec6_crypto_hash_set_key,
2845 		.halg.digestsize = SHA512_DIGEST_SIZE,
2846 		.halg.statesize = sizeof(struct artpec6_hash_export_state),
2847 		.halg.base = {
2848 			.cra_name = "hmac(sha512)",
2849 			.cra_driver_name = "artpec-hmac-sha512",
2850 			.cra_priority = 300,
2851 			.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
2852 			.cra_blocksize = SHA512_BLOCK_SIZE,
2853 			.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
2854 			.cra_alignmask = 3,
2855 			.cra_module = THIS_MODULE,
2856 			.cra_init = artpec6_crypto_ahash_init_hmac_sha512,
2857 			.cra_exit = artpec6_crypto_ahash_exit,
2858 		}
2859 	},
2860 };
2861 
2862 /* Crypto */
2863 static struct skcipher_alg crypto_algos[] = {
2864 	/* AES - ECB */
2865 	{
2866 		.base = {
2867 			.cra_name = "ecb(aes)",
2868 			.cra_driver_name = "artpec6-ecb-aes",
2869 			.cra_priority = 300,
2870 			.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
2871 				     CRYPTO_ALG_ASYNC,
2872 			.cra_blocksize = AES_BLOCK_SIZE,
2873 			.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2874 			.cra_alignmask = 3,
2875 			.cra_module = THIS_MODULE,
2876 		},
2877 		.min_keysize = AES_MIN_KEY_SIZE,
2878 		.max_keysize = AES_MAX_KEY_SIZE,
2879 		.setkey = artpec6_crypto_cipher_set_key,
2880 		.encrypt = artpec6_crypto_encrypt,
2881 		.decrypt = artpec6_crypto_decrypt,
2882 		.init = artpec6_crypto_aes_ecb_init,
2883 		.exit = artpec6_crypto_aes_exit,
2884 	},
2885 	/* AES - CTR */
2886 	{
2887 		.base = {
2888 			.cra_name = "ctr(aes)",
2889 			.cra_driver_name = "artpec6-ctr-aes",
2890 			.cra_priority = 300,
2891 			.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
2892 				     CRYPTO_ALG_ASYNC |
2893 				     CRYPTO_ALG_NEED_FALLBACK,
2894 			.cra_blocksize = 1,
2895 			.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2896 			.cra_alignmask = 3,
2897 			.cra_module = THIS_MODULE,
2898 		},
2899 		.min_keysize = AES_MIN_KEY_SIZE,
2900 		.max_keysize = AES_MAX_KEY_SIZE,
2901 		.ivsize = AES_BLOCK_SIZE,
2902 		.setkey = artpec6_crypto_cipher_set_key,
2903 		.encrypt = artpec6_crypto_ctr_encrypt,
2904 		.decrypt = artpec6_crypto_ctr_decrypt,
2905 		.init = artpec6_crypto_aes_ctr_init,
2906 		.exit = artpec6_crypto_aes_ctr_exit,
2907 	},
2908 	/* AES - CBC */
2909 	{
2910 		.base = {
2911 			.cra_name = "cbc(aes)",
2912 			.cra_driver_name = "artpec6-cbc-aes",
2913 			.cra_priority = 300,
2914 			.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
2915 				     CRYPTO_ALG_ASYNC,
2916 			.cra_blocksize = AES_BLOCK_SIZE,
2917 			.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2918 			.cra_alignmask = 3,
2919 			.cra_module = THIS_MODULE,
2920 		},
2921 		.min_keysize = AES_MIN_KEY_SIZE,
2922 		.max_keysize = AES_MAX_KEY_SIZE,
2923 		.ivsize = AES_BLOCK_SIZE,
2924 		.setkey = artpec6_crypto_cipher_set_key,
2925 		.encrypt = artpec6_crypto_encrypt,
2926 		.decrypt = artpec6_crypto_decrypt,
2927 		.init = artpec6_crypto_aes_cbc_init,
2928 		.exit = artpec6_crypto_aes_exit
2929 	},
2930 	/* AES - XTS */
2931 	{
2932 		.base = {
2933 			.cra_name = "xts(aes)",
2934 			.cra_driver_name = "artpec6-xts-aes",
2935 			.cra_priority = 300,
2936 			.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
2937 				     CRYPTO_ALG_ASYNC,
2938 			.cra_blocksize = 1,
2939 			.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2940 			.cra_alignmask = 3,
2941 			.cra_module = THIS_MODULE,
2942 		},
2943 		.min_keysize = 2*AES_MIN_KEY_SIZE,
2944 		.max_keysize = 2*AES_MAX_KEY_SIZE,
2945 		.ivsize = 16,
2946 		.setkey = artpec6_crypto_xts_set_key,
2947 		.encrypt = artpec6_crypto_encrypt,
2948 		.decrypt = artpec6_crypto_decrypt,
2949 		.init = artpec6_crypto_aes_xts_init,
2950 		.exit = artpec6_crypto_aes_exit,
2951 	},
2952 };
2953 
2954 static struct aead_alg aead_algos[] = {
2955 	{
2956 		.init   = artpec6_crypto_aead_init,
2957 		.setkey = artpec6_crypto_aead_set_key,
2958 		.encrypt = artpec6_crypto_aead_encrypt,
2959 		.decrypt = artpec6_crypto_aead_decrypt,
2960 		.ivsize = GCM_AES_IV_SIZE,
2961 		.maxauthsize = AES_BLOCK_SIZE,
2962 
2963 		.base = {
2964 			.cra_name = "gcm(aes)",
2965 			.cra_driver_name = "artpec-gcm-aes",
2966 			.cra_priority = 300,
2967 			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
2968 				     CRYPTO_ALG_KERN_DRIVER_ONLY,
2969 			.cra_blocksize = 1,
2970 			.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2971 			.cra_alignmask = 3,
2972 			.cra_module = THIS_MODULE,
2973 		},
2974 	}
2975 };
2976 
2977 #ifdef CONFIG_DEBUG_FS
2978 
2979 struct dbgfs_u32 {
2980 	char *name;
2981 	mode_t mode;
2982 	u32 *flag;
2983 	char *desc;
2984 };
2985 
2986 static struct dentry *dbgfs_root;
2987 
2988 static void artpec6_crypto_init_debugfs(void)
2989 {
2990 	dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL);
2991 
2992 	if (!dbgfs_root || IS_ERR(dbgfs_root)) {
2993 		dbgfs_root = NULL;
2994 		pr_err("%s: Could not initialise debugfs!\n", MODULE_NAME);
2995 		return;
2996 	}
2997 
2998 #ifdef CONFIG_FAULT_INJECTION
2999 	fault_create_debugfs_attr("fail_status_read", dbgfs_root,
3000 				  &artpec6_crypto_fail_status_read);
3001 
3002 	fault_create_debugfs_attr("fail_dma_array_full", dbgfs_root,
3003 				  &artpec6_crypto_fail_dma_array_full);
3004 #endif
3005 }
3006 
3007 static void artpec6_crypto_free_debugfs(void)
3008 {
3009 	if (!dbgfs_root)
3010 		return;
3011 
3012 	debugfs_remove_recursive(dbgfs_root);
3013 	dbgfs_root = NULL;
3014 }
3015 #endif
3016 
3017 static const struct of_device_id artpec6_crypto_of_match[] = {
3018 	{ .compatible = "axis,artpec6-crypto", .data = (void *)ARTPEC6_CRYPTO },
3019 	{ .compatible = "axis,artpec7-crypto", .data = (void *)ARTPEC7_CRYPTO },
3020 	{}
3021 };
3022 MODULE_DEVICE_TABLE(of, artpec6_crypto_of_match);
3023 
3024 static int artpec6_crypto_probe(struct platform_device *pdev)
3025 {
3026 	const struct of_device_id *match;
3027 	enum artpec6_crypto_variant variant;
3028 	struct artpec6_crypto *ac;
3029 	struct device *dev = &pdev->dev;
3030 	void __iomem *base;
3031 	struct resource *res;
3032 	int irq;
3033 	int err;
3034 
3035 	if (artpec6_crypto_dev)
3036 		return -ENODEV;
3037 
3038 	match = of_match_node(artpec6_crypto_of_match, dev->of_node);
3039 	if (!match)
3040 		return -EINVAL;
3041 
3042 	variant = (enum artpec6_crypto_variant)match->data;
3043 
3044 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3045 	base = devm_ioremap_resource(&pdev->dev, res);
3046 	if (IS_ERR(base))
3047 		return PTR_ERR(base);
3048 
3049 	irq = platform_get_irq(pdev, 0);
3050 	if (irq < 0)
3051 		return -ENODEV;
3052 
3053 	ac = devm_kzalloc(&pdev->dev, sizeof(struct artpec6_crypto),
3054 			  GFP_KERNEL);
3055 	if (!ac)
3056 		return -ENOMEM;
3057 
3058 	platform_set_drvdata(pdev, ac);
3059 	ac->variant = variant;
3060 
3061 	spin_lock_init(&ac->queue_lock);
3062 	INIT_LIST_HEAD(&ac->queue);
3063 	INIT_LIST_HEAD(&ac->pending);
3064 	timer_setup(&ac->timer, artpec6_crypto_timeout, 0);
3065 
3066 	ac->base = base;
3067 
3068 	ac->dma_cache = kmem_cache_create("artpec6_crypto_dma",
3069 		sizeof(struct artpec6_crypto_dma_descriptors),
3070 		64,
3071 		0,
3072 		NULL);
3073 	if (!ac->dma_cache)
3074 		return -ENOMEM;
3075 
3076 #ifdef CONFIG_DEBUG_FS
3077 	artpec6_crypto_init_debugfs();
3078 #endif
3079 
3080 	tasklet_init(&ac->task, artpec6_crypto_task,
3081 		     (unsigned long)ac);
3082 
3083 	ac->pad_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX,
3084 				      GFP_KERNEL);
3085 	if (!ac->pad_buffer)
3086 		return -ENOMEM;
3087 	ac->pad_buffer = PTR_ALIGN(ac->pad_buffer, ARTPEC_CACHE_LINE_MAX);
3088 
3089 	ac->zero_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX,
3090 				      GFP_KERNEL);
3091 	if (!ac->zero_buffer)
3092 		return -ENOMEM;
3093 	ac->zero_buffer = PTR_ALIGN(ac->zero_buffer, ARTPEC_CACHE_LINE_MAX);
3094 
3095 	err = init_crypto_hw(ac);
3096 	if (err)
3097 		goto free_cache;
3098 
3099 	err = devm_request_irq(&pdev->dev, irq, artpec6_crypto_irq, 0,
3100 			       "artpec6-crypto", ac);
3101 	if (err)
3102 		goto disable_hw;
3103 
3104 	artpec6_crypto_dev = &pdev->dev;
3105 
3106 	err = crypto_register_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
3107 	if (err) {
3108 		dev_err(dev, "Failed to register ahashes\n");
3109 		goto disable_hw;
3110 	}
3111 
3112 	if (variant != ARTPEC6_CRYPTO) {
3113 		err = crypto_register_ahashes(artpec7_hash_algos,
3114 					      ARRAY_SIZE(artpec7_hash_algos));
3115 		if (err) {
3116 			dev_err(dev, "Failed to register ahashes\n");
3117 			goto unregister_ahashes;
3118 		}
3119 	}
3120 
3121 	err = crypto_register_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
3122 	if (err) {
3123 		dev_err(dev, "Failed to register ciphers\n");
3124 		goto unregister_a7_ahashes;
3125 	}
3126 
3127 	err = crypto_register_aeads(aead_algos, ARRAY_SIZE(aead_algos));
3128 	if (err) {
3129 		dev_err(dev, "Failed to register aeads\n");
3130 		goto unregister_algs;
3131 	}
3132 
3133 	return 0;
3134 
3135 unregister_algs:
3136 	crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
3137 unregister_a7_ahashes:
3138 	if (variant != ARTPEC6_CRYPTO)
3139 		crypto_unregister_ahashes(artpec7_hash_algos,
3140 					  ARRAY_SIZE(artpec7_hash_algos));
3141 unregister_ahashes:
3142 	crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
3143 disable_hw:
3144 	artpec6_crypto_disable_hw(ac);
3145 free_cache:
3146 	kmem_cache_destroy(ac->dma_cache);
3147 	return err;
3148 }
3149 
3150 static int artpec6_crypto_remove(struct platform_device *pdev)
3151 {
3152 	struct artpec6_crypto *ac = platform_get_drvdata(pdev);
3153 	int irq = platform_get_irq(pdev, 0);
3154 
3155 	crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
3156 	if (ac->variant != ARTPEC6_CRYPTO)
3157 		crypto_unregister_ahashes(artpec7_hash_algos,
3158 					  ARRAY_SIZE(artpec7_hash_algos));
3159 	crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
3160 	crypto_unregister_aeads(aead_algos, ARRAY_SIZE(aead_algos));
3161 
3162 	tasklet_disable(&ac->task);
3163 	devm_free_irq(&pdev->dev, irq, ac);
3164 	tasklet_kill(&ac->task);
3165 	del_timer_sync(&ac->timer);
3166 
3167 	artpec6_crypto_disable_hw(ac);
3168 
3169 	kmem_cache_destroy(ac->dma_cache);
3170 #ifdef CONFIG_DEBUG_FS
3171 	artpec6_crypto_free_debugfs();
3172 #endif
3173 	return 0;
3174 }
3175 
3176 static struct platform_driver artpec6_crypto_driver = {
3177 	.probe   = artpec6_crypto_probe,
3178 	.remove  = artpec6_crypto_remove,
3179 	.driver  = {
3180 		.name  = "artpec6-crypto",
3181 		.owner = THIS_MODULE,
3182 		.of_match_table = artpec6_crypto_of_match,
3183 	},
3184 };
3185 
3186 module_platform_driver(artpec6_crypto_driver);
3187 
3188 MODULE_AUTHOR("Axis Communications AB");
3189 MODULE_DESCRIPTION("ARTPEC-6 Crypto driver");
3190 MODULE_LICENSE("GPL");
3191