1 /*
2  *   Driver for ARTPEC-6 crypto block using the kernel asynchronous crypto api.
3  *
4  *    Copyright (C) 2014-2017  Axis Communications AB
5  */
6 #define pr_fmt(fmt)     KBUILD_MODNAME ": " fmt
7 
8 #include <linux/bitfield.h>
9 #include <linux/crypto.h>
10 #include <linux/debugfs.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/fault-inject.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/kernel.h>
17 #include <linux/list.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/platform_device.h>
21 #include <linux/scatterlist.h>
22 #include <linux/slab.h>
23 
24 #include <crypto/aes.h>
25 #include <crypto/gcm.h>
26 #include <crypto/internal/aead.h>
27 #include <crypto/internal/hash.h>
28 #include <crypto/internal/skcipher.h>
29 #include <crypto/scatterwalk.h>
30 #include <crypto/sha.h>
31 #include <crypto/xts.h>
32 
33 /* Max length of a line in all cache levels for Artpec SoCs. */
34 #define ARTPEC_CACHE_LINE_MAX	32
35 
36 #define PDMA_OUT_CFG		0x0000
37 #define PDMA_OUT_BUF_CFG	0x0004
38 #define PDMA_OUT_CMD		0x0008
39 #define PDMA_OUT_DESCRQ_PUSH	0x0010
40 #define PDMA_OUT_DESCRQ_STAT	0x0014
41 
42 #define A6_PDMA_IN_CFG		0x0028
43 #define A6_PDMA_IN_BUF_CFG	0x002c
44 #define A6_PDMA_IN_CMD		0x0030
45 #define A6_PDMA_IN_STATQ_PUSH	0x0038
46 #define A6_PDMA_IN_DESCRQ_PUSH	0x0044
47 #define A6_PDMA_IN_DESCRQ_STAT	0x0048
48 #define A6_PDMA_INTR_MASK	0x0068
49 #define A6_PDMA_ACK_INTR	0x006c
50 #define A6_PDMA_MASKED_INTR	0x0074
51 
52 #define A7_PDMA_IN_CFG		0x002c
53 #define A7_PDMA_IN_BUF_CFG	0x0030
54 #define A7_PDMA_IN_CMD		0x0034
55 #define A7_PDMA_IN_STATQ_PUSH	0x003c
56 #define A7_PDMA_IN_DESCRQ_PUSH	0x0048
57 #define A7_PDMA_IN_DESCRQ_STAT	0x004C
58 #define A7_PDMA_INTR_MASK	0x006c
59 #define A7_PDMA_ACK_INTR	0x0070
60 #define A7_PDMA_MASKED_INTR	0x0078
61 
62 #define PDMA_OUT_CFG_EN				BIT(0)
63 
64 #define PDMA_OUT_BUF_CFG_DATA_BUF_SIZE		GENMASK(4, 0)
65 #define PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE		GENMASK(9, 5)
66 
67 #define PDMA_OUT_CMD_START			BIT(0)
68 #define A6_PDMA_OUT_CMD_STOP			BIT(3)
69 #define A7_PDMA_OUT_CMD_STOP			BIT(2)
70 
71 #define PDMA_OUT_DESCRQ_PUSH_LEN		GENMASK(5, 0)
72 #define PDMA_OUT_DESCRQ_PUSH_ADDR		GENMASK(31, 6)
73 
74 #define PDMA_OUT_DESCRQ_STAT_LEVEL		GENMASK(3, 0)
75 #define PDMA_OUT_DESCRQ_STAT_SIZE		GENMASK(7, 4)
76 
77 #define PDMA_IN_CFG_EN				BIT(0)
78 
79 #define PDMA_IN_BUF_CFG_DATA_BUF_SIZE		GENMASK(4, 0)
80 #define PDMA_IN_BUF_CFG_DESCR_BUF_SIZE		GENMASK(9, 5)
81 #define PDMA_IN_BUF_CFG_STAT_BUF_SIZE		GENMASK(14, 10)
82 
83 #define PDMA_IN_CMD_START			BIT(0)
84 #define A6_PDMA_IN_CMD_FLUSH_STAT		BIT(2)
85 #define A6_PDMA_IN_CMD_STOP			BIT(3)
86 #define A7_PDMA_IN_CMD_FLUSH_STAT		BIT(1)
87 #define A7_PDMA_IN_CMD_STOP			BIT(2)
88 
89 #define PDMA_IN_STATQ_PUSH_LEN			GENMASK(5, 0)
90 #define PDMA_IN_STATQ_PUSH_ADDR			GENMASK(31, 6)
91 
92 #define PDMA_IN_DESCRQ_PUSH_LEN			GENMASK(5, 0)
93 #define PDMA_IN_DESCRQ_PUSH_ADDR		GENMASK(31, 6)
94 
95 #define PDMA_IN_DESCRQ_STAT_LEVEL		GENMASK(3, 0)
96 #define PDMA_IN_DESCRQ_STAT_SIZE		GENMASK(7, 4)
97 
98 #define A6_PDMA_INTR_MASK_IN_DATA		BIT(2)
99 #define A6_PDMA_INTR_MASK_IN_EOP		BIT(3)
100 #define A6_PDMA_INTR_MASK_IN_EOP_FLUSH		BIT(4)
101 
102 #define A7_PDMA_INTR_MASK_IN_DATA		BIT(3)
103 #define A7_PDMA_INTR_MASK_IN_EOP		BIT(4)
104 #define A7_PDMA_INTR_MASK_IN_EOP_FLUSH		BIT(5)
105 
106 #define A6_CRY_MD_OPER		GENMASK(19, 16)
107 
108 #define A6_CRY_MD_HASH_SEL_CTX	GENMASK(21, 20)
109 #define A6_CRY_MD_HASH_HMAC_FIN	BIT(23)
110 
111 #define A6_CRY_MD_CIPHER_LEN	GENMASK(21, 20)
112 #define A6_CRY_MD_CIPHER_DECR	BIT(22)
113 #define A6_CRY_MD_CIPHER_TWEAK	BIT(23)
114 #define A6_CRY_MD_CIPHER_DSEQ	BIT(24)
115 
116 #define A7_CRY_MD_OPER		GENMASK(11, 8)
117 
118 #define A7_CRY_MD_HASH_SEL_CTX	GENMASK(13, 12)
119 #define A7_CRY_MD_HASH_HMAC_FIN	BIT(15)
120 
121 #define A7_CRY_MD_CIPHER_LEN	GENMASK(13, 12)
122 #define A7_CRY_MD_CIPHER_DECR	BIT(14)
123 #define A7_CRY_MD_CIPHER_TWEAK	BIT(15)
124 #define A7_CRY_MD_CIPHER_DSEQ	BIT(16)
125 
126 /* DMA metadata constants */
127 #define regk_crypto_aes_cbc     0x00000002
128 #define regk_crypto_aes_ctr     0x00000003
129 #define regk_crypto_aes_ecb     0x00000001
130 #define regk_crypto_aes_gcm     0x00000004
131 #define regk_crypto_aes_xts     0x00000005
132 #define regk_crypto_cache       0x00000002
133 #define a6_regk_crypto_dlkey    0x0000000a
134 #define a7_regk_crypto_dlkey    0x0000000e
135 #define regk_crypto_ext         0x00000001
136 #define regk_crypto_hmac_sha1   0x00000007
137 #define regk_crypto_hmac_sha256 0x00000009
138 #define regk_crypto_hmac_sha384 0x0000000b
139 #define regk_crypto_hmac_sha512 0x0000000d
140 #define regk_crypto_init        0x00000000
141 #define regk_crypto_key_128     0x00000000
142 #define regk_crypto_key_192     0x00000001
143 #define regk_crypto_key_256     0x00000002
144 #define regk_crypto_null        0x00000000
145 #define regk_crypto_sha1        0x00000006
146 #define regk_crypto_sha256      0x00000008
147 #define regk_crypto_sha384      0x0000000a
148 #define regk_crypto_sha512      0x0000000c
149 
150 /* DMA descriptor structures */
151 struct pdma_descr_ctrl  {
152 	unsigned char short_descr : 1;
153 	unsigned char pad1        : 1;
154 	unsigned char eop         : 1;
155 	unsigned char intr        : 1;
156 	unsigned char short_len   : 3;
157 	unsigned char pad2        : 1;
158 } __packed;
159 
160 struct pdma_data_descr {
161 	unsigned int len : 24;
162 	unsigned int buf : 32;
163 } __packed;
164 
165 struct pdma_short_descr {
166 	unsigned char data[7];
167 } __packed;
168 
169 struct pdma_descr {
170 	struct pdma_descr_ctrl ctrl;
171 	union {
172 		struct pdma_data_descr   data;
173 		struct pdma_short_descr  shrt;
174 	};
175 };
176 
177 struct pdma_stat_descr {
178 	unsigned char pad1        : 1;
179 	unsigned char pad2        : 1;
180 	unsigned char eop         : 1;
181 	unsigned char pad3        : 5;
182 	unsigned int  len         : 24;
183 };
184 
185 /* Each descriptor array can hold max 64 entries */
186 #define PDMA_DESCR_COUNT	64
187 
188 #define MODULE_NAME   "Artpec-6 CA"
189 
190 /* Hash modes (including HMAC variants) */
191 #define ARTPEC6_CRYPTO_HASH_SHA1	1
192 #define ARTPEC6_CRYPTO_HASH_SHA256	2
193 #define ARTPEC6_CRYPTO_HASH_SHA384	3
194 #define ARTPEC6_CRYPTO_HASH_SHA512	4
195 
196 /* Crypto modes */
197 #define ARTPEC6_CRYPTO_CIPHER_AES_ECB	1
198 #define ARTPEC6_CRYPTO_CIPHER_AES_CBC	2
199 #define ARTPEC6_CRYPTO_CIPHER_AES_CTR	3
200 #define ARTPEC6_CRYPTO_CIPHER_AES_XTS	5
201 
202 /* The PDMA is a DMA-engine tightly coupled with a ciphering engine.
203  * It operates on a descriptor array with up to 64 descriptor entries.
204  * The arrays must be 64 byte aligned in memory.
205  *
206  * The ciphering unit has no registers and is completely controlled by
207  * a 4-byte metadata that is inserted at the beginning of each dma packet.
208  *
209  * A dma packet is a sequence of descriptors terminated by setting the .eop
210  * field in the final descriptor of the packet.
211  *
212  * Multiple packets are used for providing context data, key data and
213  * the plain/ciphertext.
214  *
215  *   PDMA Descriptors (Array)
216  *  +------+------+------+~~+-------+------+----
217  *  |  0   |  1   |  2   |~~| 11 EOP|  12  |  ....
218  *  +--+---+--+---+----+-+~~+-------+----+-+----
219  *     |      |        |       |         |
220  *     |      |        |       |         |
221  *   __|__  +-------++-------++-------+ +----+
222  *  | MD  | |Payload||Payload||Payload| | MD |
223  *  +-----+ +-------++-------++-------+ +----+
224  */
225 
226 struct artpec6_crypto_bounce_buffer {
227 	struct list_head list;
228 	size_t length;
229 	struct scatterlist *sg;
230 	size_t offset;
231 	/* buf is aligned to ARTPEC_CACHE_LINE_MAX and
232 	 * holds up to ARTPEC_CACHE_LINE_MAX bytes data.
233 	 */
234 	void *buf;
235 };
236 
237 struct artpec6_crypto_dma_map {
238 	dma_addr_t dma_addr;
239 	size_t size;
240 	enum dma_data_direction dir;
241 };
242 
243 struct artpec6_crypto_dma_descriptors {
244 	struct pdma_descr out[PDMA_DESCR_COUNT] __aligned(64);
245 	struct pdma_descr in[PDMA_DESCR_COUNT] __aligned(64);
246 	u32 stat[PDMA_DESCR_COUNT] __aligned(64);
247 	struct list_head bounce_buffers;
248 	/* Enough maps for all out/in buffers, and all three descr. arrays */
249 	struct artpec6_crypto_dma_map maps[PDMA_DESCR_COUNT * 2 + 2];
250 	dma_addr_t out_dma_addr;
251 	dma_addr_t in_dma_addr;
252 	dma_addr_t stat_dma_addr;
253 	size_t out_cnt;
254 	size_t in_cnt;
255 	size_t map_count;
256 };
257 
258 enum artpec6_crypto_variant {
259 	ARTPEC6_CRYPTO,
260 	ARTPEC7_CRYPTO,
261 };
262 
263 struct artpec6_crypto {
264 	void __iomem *base;
265 	spinlock_t queue_lock;
266 	struct list_head queue; /* waiting for pdma fifo space */
267 	struct list_head pending; /* submitted to pdma fifo */
268 	struct tasklet_struct task;
269 	struct kmem_cache *dma_cache;
270 	int pending_count;
271 	struct timer_list timer;
272 	enum artpec6_crypto_variant variant;
273 	void *pad_buffer; /* cache-aligned block padding buffer */
274 	void *zero_buffer;
275 };
276 
277 enum artpec6_crypto_hash_flags {
278 	HASH_FLAG_INIT_CTX = 2,
279 	HASH_FLAG_UPDATE = 4,
280 	HASH_FLAG_FINALIZE = 8,
281 	HASH_FLAG_HMAC = 16,
282 	HASH_FLAG_UPDATE_KEY = 32,
283 };
284 
285 struct artpec6_crypto_req_common {
286 	struct list_head list;
287 	struct artpec6_crypto_dma_descriptors *dma;
288 	struct crypto_async_request *req;
289 	void (*complete)(struct crypto_async_request *req);
290 	gfp_t gfp_flags;
291 };
292 
293 struct artpec6_hash_request_context {
294 	char partial_buffer[SHA512_BLOCK_SIZE];
295 	char partial_buffer_out[SHA512_BLOCK_SIZE];
296 	char key_buffer[SHA512_BLOCK_SIZE];
297 	char pad_buffer[SHA512_BLOCK_SIZE + 32];
298 	unsigned char digeststate[SHA512_DIGEST_SIZE];
299 	size_t partial_bytes;
300 	u64 digcnt;
301 	u32 key_md;
302 	u32 hash_md;
303 	enum artpec6_crypto_hash_flags hash_flags;
304 	struct artpec6_crypto_req_common common;
305 };
306 
307 struct artpec6_hash_export_state {
308 	char partial_buffer[SHA512_BLOCK_SIZE];
309 	unsigned char digeststate[SHA512_DIGEST_SIZE];
310 	size_t partial_bytes;
311 	u64 digcnt;
312 	int oper;
313 	unsigned int hash_flags;
314 };
315 
316 struct artpec6_hashalg_context {
317 	char hmac_key[SHA512_BLOCK_SIZE];
318 	size_t hmac_key_length;
319 	struct crypto_shash *child_hash;
320 };
321 
322 struct artpec6_crypto_request_context {
323 	u32 cipher_md;
324 	bool decrypt;
325 	struct artpec6_crypto_req_common common;
326 };
327 
328 struct artpec6_cryptotfm_context {
329 	unsigned char aes_key[2*AES_MAX_KEY_SIZE];
330 	size_t key_length;
331 	u32 key_md;
332 	int crypto_type;
333 	struct crypto_sync_skcipher *fallback;
334 };
335 
336 struct artpec6_crypto_aead_hw_ctx {
337 	__be64	aad_length_bits;
338 	__be64  text_length_bits;
339 	__u8	J0[AES_BLOCK_SIZE];
340 };
341 
342 struct artpec6_crypto_aead_req_ctx {
343 	struct artpec6_crypto_aead_hw_ctx hw_ctx;
344 	u32 cipher_md;
345 	bool decrypt;
346 	struct artpec6_crypto_req_common common;
347 	__u8 decryption_tag[AES_BLOCK_SIZE] ____cacheline_aligned;
348 };
349 
350 /* The crypto framework makes it hard to avoid this global. */
351 static struct device *artpec6_crypto_dev;
352 
353 #ifdef CONFIG_FAULT_INJECTION
354 static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read);
355 static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full);
356 #endif
357 
358 enum {
359 	ARTPEC6_CRYPTO_PREPARE_HASH_NO_START,
360 	ARTPEC6_CRYPTO_PREPARE_HASH_START,
361 };
362 
363 static int artpec6_crypto_prepare_aead(struct aead_request *areq);
364 static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq);
365 static int artpec6_crypto_prepare_hash(struct ahash_request *areq);
366 
367 static void
368 artpec6_crypto_complete_crypto(struct crypto_async_request *req);
369 static void
370 artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req);
371 static void
372 artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req);
373 static void
374 artpec6_crypto_complete_aead(struct crypto_async_request *req);
375 static void
376 artpec6_crypto_complete_hash(struct crypto_async_request *req);
377 
378 static int
379 artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common);
380 
381 static void
382 artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common);
383 
384 struct artpec6_crypto_walk {
385 	struct scatterlist *sg;
386 	size_t offset;
387 };
388 
389 static void artpec6_crypto_walk_init(struct artpec6_crypto_walk *awalk,
390 				     struct scatterlist *sg)
391 {
392 	awalk->sg = sg;
393 	awalk->offset = 0;
394 }
395 
396 static size_t artpec6_crypto_walk_advance(struct artpec6_crypto_walk *awalk,
397 					  size_t nbytes)
398 {
399 	while (nbytes && awalk->sg) {
400 		size_t piece;
401 
402 		WARN_ON(awalk->offset > awalk->sg->length);
403 
404 		piece = min(nbytes, (size_t)awalk->sg->length - awalk->offset);
405 		nbytes -= piece;
406 		awalk->offset += piece;
407 		if (awalk->offset == awalk->sg->length) {
408 			awalk->sg = sg_next(awalk->sg);
409 			awalk->offset = 0;
410 		}
411 
412 	}
413 
414 	return nbytes;
415 }
416 
417 static size_t
418 artpec6_crypto_walk_chunklen(const struct artpec6_crypto_walk *awalk)
419 {
420 	WARN_ON(awalk->sg->length == awalk->offset);
421 
422 	return awalk->sg->length - awalk->offset;
423 }
424 
425 static dma_addr_t
426 artpec6_crypto_walk_chunk_phys(const struct artpec6_crypto_walk *awalk)
427 {
428 	return sg_phys(awalk->sg) + awalk->offset;
429 }
430 
431 static void
432 artpec6_crypto_copy_bounce_buffers(struct artpec6_crypto_req_common *common)
433 {
434 	struct artpec6_crypto_dma_descriptors *dma = common->dma;
435 	struct artpec6_crypto_bounce_buffer *b;
436 	struct artpec6_crypto_bounce_buffer *next;
437 
438 	list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) {
439 		pr_debug("bounce entry %p: %zu bytes @ %zu from %p\n",
440 			 b, b->length, b->offset, b->buf);
441 		sg_pcopy_from_buffer(b->sg,
442 				   1,
443 				   b->buf,
444 				   b->length,
445 				   b->offset);
446 
447 		list_del(&b->list);
448 		kfree(b);
449 	}
450 }
451 
452 static inline bool artpec6_crypto_busy(void)
453 {
454 	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
455 	int fifo_count = ac->pending_count;
456 
457 	return fifo_count > 6;
458 }
459 
460 static int artpec6_crypto_submit(struct artpec6_crypto_req_common *req)
461 {
462 	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
463 	int ret = -EBUSY;
464 
465 	spin_lock_bh(&ac->queue_lock);
466 
467 	if (!artpec6_crypto_busy()) {
468 		list_add_tail(&req->list, &ac->pending);
469 		artpec6_crypto_start_dma(req);
470 		ret = -EINPROGRESS;
471 	} else if (req->req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) {
472 		list_add_tail(&req->list, &ac->queue);
473 	} else {
474 		artpec6_crypto_common_destroy(req);
475 	}
476 
477 	spin_unlock_bh(&ac->queue_lock);
478 
479 	return ret;
480 }
481 
482 static void artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common)
483 {
484 	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
485 	enum artpec6_crypto_variant variant = ac->variant;
486 	void __iomem *base = ac->base;
487 	struct artpec6_crypto_dma_descriptors *dma = common->dma;
488 	u32 ind, statd, outd;
489 
490 	/* Make descriptor content visible to the DMA before starting it. */
491 	wmb();
492 
493 	ind = FIELD_PREP(PDMA_IN_DESCRQ_PUSH_LEN, dma->in_cnt - 1) |
494 	      FIELD_PREP(PDMA_IN_DESCRQ_PUSH_ADDR, dma->in_dma_addr >> 6);
495 
496 	statd = FIELD_PREP(PDMA_IN_STATQ_PUSH_LEN, dma->in_cnt - 1) |
497 		FIELD_PREP(PDMA_IN_STATQ_PUSH_ADDR, dma->stat_dma_addr >> 6);
498 
499 	outd = FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_LEN, dma->out_cnt - 1) |
500 	       FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_ADDR, dma->out_dma_addr >> 6);
501 
502 	if (variant == ARTPEC6_CRYPTO) {
503 		writel_relaxed(ind, base + A6_PDMA_IN_DESCRQ_PUSH);
504 		writel_relaxed(statd, base + A6_PDMA_IN_STATQ_PUSH);
505 		writel_relaxed(PDMA_IN_CMD_START, base + A6_PDMA_IN_CMD);
506 	} else {
507 		writel_relaxed(ind, base + A7_PDMA_IN_DESCRQ_PUSH);
508 		writel_relaxed(statd, base + A7_PDMA_IN_STATQ_PUSH);
509 		writel_relaxed(PDMA_IN_CMD_START, base + A7_PDMA_IN_CMD);
510 	}
511 
512 	writel_relaxed(outd, base + PDMA_OUT_DESCRQ_PUSH);
513 	writel_relaxed(PDMA_OUT_CMD_START, base + PDMA_OUT_CMD);
514 
515 	ac->pending_count++;
516 }
517 
518 static void
519 artpec6_crypto_init_dma_operation(struct artpec6_crypto_req_common *common)
520 {
521 	struct artpec6_crypto_dma_descriptors *dma = common->dma;
522 
523 	dma->out_cnt = 0;
524 	dma->in_cnt = 0;
525 	dma->map_count = 0;
526 	INIT_LIST_HEAD(&dma->bounce_buffers);
527 }
528 
529 static bool fault_inject_dma_descr(void)
530 {
531 #ifdef CONFIG_FAULT_INJECTION
532 	return should_fail(&artpec6_crypto_fail_dma_array_full, 1);
533 #else
534 	return false;
535 #endif
536 }
537 
538 /** artpec6_crypto_setup_out_descr_phys - Setup an out channel with a
539  *                                        physical address
540  *
541  * @addr: The physical address of the data buffer
542  * @len:  The length of the data buffer
543  * @eop:  True if this is the last buffer in the packet
544  *
545  * @return 0 on success or -ENOSPC if there are no more descriptors available
546  */
547 static int
548 artpec6_crypto_setup_out_descr_phys(struct artpec6_crypto_req_common *common,
549 				    dma_addr_t addr, size_t len, bool eop)
550 {
551 	struct artpec6_crypto_dma_descriptors *dma = common->dma;
552 	struct pdma_descr *d;
553 
554 	if (dma->out_cnt >= PDMA_DESCR_COUNT ||
555 	    fault_inject_dma_descr()) {
556 		pr_err("No free OUT DMA descriptors available!\n");
557 		return -ENOSPC;
558 	}
559 
560 	d = &dma->out[dma->out_cnt++];
561 	memset(d, 0, sizeof(*d));
562 
563 	d->ctrl.short_descr = 0;
564 	d->ctrl.eop = eop;
565 	d->data.len = len;
566 	d->data.buf = addr;
567 	return 0;
568 }
569 
570 /** artpec6_crypto_setup_out_descr_short - Setup a short out descriptor
571  *
572  * @dst: The virtual address of the data
573  * @len: The length of the data, must be between 1 to 7 bytes
574  * @eop: True if this is the last buffer in the packet
575  *
576  * @return 0 on success
577  *	-ENOSPC if no more descriptors are available
578  *	-EINVAL if the data length exceeds 7 bytes
579  */
580 static int
581 artpec6_crypto_setup_out_descr_short(struct artpec6_crypto_req_common *common,
582 				     void *dst, unsigned int len, bool eop)
583 {
584 	struct artpec6_crypto_dma_descriptors *dma = common->dma;
585 	struct pdma_descr *d;
586 
587 	if (dma->out_cnt >= PDMA_DESCR_COUNT ||
588 	    fault_inject_dma_descr()) {
589 		pr_err("No free OUT DMA descriptors available!\n");
590 		return -ENOSPC;
591 	} else if (len > 7 || len < 1) {
592 		return -EINVAL;
593 	}
594 	d = &dma->out[dma->out_cnt++];
595 	memset(d, 0, sizeof(*d));
596 
597 	d->ctrl.short_descr = 1;
598 	d->ctrl.short_len = len;
599 	d->ctrl.eop = eop;
600 	memcpy(d->shrt.data, dst, len);
601 	return 0;
602 }
603 
604 static int artpec6_crypto_dma_map_page(struct artpec6_crypto_req_common *common,
605 				      struct page *page, size_t offset,
606 				      size_t size,
607 				      enum dma_data_direction dir,
608 				      dma_addr_t *dma_addr_out)
609 {
610 	struct artpec6_crypto_dma_descriptors *dma = common->dma;
611 	struct device *dev = artpec6_crypto_dev;
612 	struct artpec6_crypto_dma_map *map;
613 	dma_addr_t dma_addr;
614 
615 	*dma_addr_out = 0;
616 
617 	if (dma->map_count >= ARRAY_SIZE(dma->maps))
618 		return -ENOMEM;
619 
620 	dma_addr = dma_map_page(dev, page, offset, size, dir);
621 	if (dma_mapping_error(dev, dma_addr))
622 		return -ENOMEM;
623 
624 	map = &dma->maps[dma->map_count++];
625 	map->size = size;
626 	map->dma_addr = dma_addr;
627 	map->dir = dir;
628 
629 	*dma_addr_out = dma_addr;
630 
631 	return 0;
632 }
633 
634 static int
635 artpec6_crypto_dma_map_single(struct artpec6_crypto_req_common *common,
636 			      void *ptr, size_t size,
637 			      enum dma_data_direction dir,
638 			      dma_addr_t *dma_addr_out)
639 {
640 	struct page *page = virt_to_page(ptr);
641 	size_t offset = (uintptr_t)ptr & ~PAGE_MASK;
642 
643 	return artpec6_crypto_dma_map_page(common, page, offset, size, dir,
644 					  dma_addr_out);
645 }
646 
647 static int
648 artpec6_crypto_dma_map_descs(struct artpec6_crypto_req_common *common)
649 {
650 	struct artpec6_crypto_dma_descriptors *dma = common->dma;
651 	int ret;
652 
653 	ret = artpec6_crypto_dma_map_single(common, dma->in,
654 				sizeof(dma->in[0]) * dma->in_cnt,
655 				DMA_TO_DEVICE, &dma->in_dma_addr);
656 	if (ret)
657 		return ret;
658 
659 	ret = artpec6_crypto_dma_map_single(common, dma->out,
660 				sizeof(dma->out[0]) * dma->out_cnt,
661 				DMA_TO_DEVICE, &dma->out_dma_addr);
662 	if (ret)
663 		return ret;
664 
665 	/* We only read one stat descriptor */
666 	dma->stat[dma->in_cnt - 1] = 0;
667 
668 	/*
669 	 * DMA_BIDIRECTIONAL since we need our zeroing of the stat descriptor
670 	 * to be written.
671 	 */
672 	return artpec6_crypto_dma_map_single(common,
673 				dma->stat + dma->in_cnt - 1,
674 				sizeof(dma->stat[0]),
675 				DMA_BIDIRECTIONAL,
676 				&dma->stat_dma_addr);
677 }
678 
679 static void
680 artpec6_crypto_dma_unmap_all(struct artpec6_crypto_req_common *common)
681 {
682 	struct artpec6_crypto_dma_descriptors *dma = common->dma;
683 	struct device *dev = artpec6_crypto_dev;
684 	int i;
685 
686 	for (i = 0; i < dma->map_count; i++) {
687 		struct artpec6_crypto_dma_map *map = &dma->maps[i];
688 
689 		dma_unmap_page(dev, map->dma_addr, map->size, map->dir);
690 	}
691 
692 	dma->map_count = 0;
693 }
694 
695 /** artpec6_crypto_setup_out_descr - Setup an out descriptor
696  *
697  * @dst: The virtual address of the data
698  * @len: The length of the data
699  * @eop: True if this is the last buffer in the packet
700  * @use_short: If this is true and the data length is 7 bytes or less then
701  *	a short descriptor will be used
702  *
703  * @return 0 on success
704  *	Any errors from artpec6_crypto_setup_out_descr_short() or
705  *	setup_out_descr_phys()
706  */
707 static int
708 artpec6_crypto_setup_out_descr(struct artpec6_crypto_req_common *common,
709 			       void *dst, unsigned int len, bool eop,
710 			       bool use_short)
711 {
712 	if (use_short && len < 7) {
713 		return artpec6_crypto_setup_out_descr_short(common, dst, len,
714 							    eop);
715 	} else {
716 		int ret;
717 		dma_addr_t dma_addr;
718 
719 		ret = artpec6_crypto_dma_map_single(common, dst, len,
720 						   DMA_TO_DEVICE,
721 						   &dma_addr);
722 		if (ret)
723 			return ret;
724 
725 		return artpec6_crypto_setup_out_descr_phys(common, dma_addr,
726 							   len, eop);
727 	}
728 }
729 
730 /** artpec6_crypto_setup_in_descr_phys - Setup an in channel with a
731  *                                       physical address
732  *
733  * @addr: The physical address of the data buffer
734  * @len:  The length of the data buffer
735  * @intr: True if an interrupt should be fired after HW processing of this
736  *	  descriptor
737  *
738  */
739 static int
740 artpec6_crypto_setup_in_descr_phys(struct artpec6_crypto_req_common *common,
741 			       dma_addr_t addr, unsigned int len, bool intr)
742 {
743 	struct artpec6_crypto_dma_descriptors *dma = common->dma;
744 	struct pdma_descr *d;
745 
746 	if (dma->in_cnt >= PDMA_DESCR_COUNT ||
747 	    fault_inject_dma_descr()) {
748 		pr_err("No free IN DMA descriptors available!\n");
749 		return -ENOSPC;
750 	}
751 	d = &dma->in[dma->in_cnt++];
752 	memset(d, 0, sizeof(*d));
753 
754 	d->ctrl.intr = intr;
755 	d->data.len = len;
756 	d->data.buf = addr;
757 	return 0;
758 }
759 
760 /** artpec6_crypto_setup_in_descr - Setup an in channel descriptor
761  *
762  * @buffer: The virtual address to of the data buffer
763  * @len:    The length of the data buffer
764  * @last:   If this is the last data buffer in the request (i.e. an interrupt
765  *	    is needed
766  *
767  * Short descriptors are not used for the in channel
768  */
769 static int
770 artpec6_crypto_setup_in_descr(struct artpec6_crypto_req_common *common,
771 			  void *buffer, unsigned int len, bool last)
772 {
773 	dma_addr_t dma_addr;
774 	int ret;
775 
776 	ret = artpec6_crypto_dma_map_single(common, buffer, len,
777 					   DMA_FROM_DEVICE, &dma_addr);
778 	if (ret)
779 		return ret;
780 
781 	return artpec6_crypto_setup_in_descr_phys(common, dma_addr, len, last);
782 }
783 
784 static struct artpec6_crypto_bounce_buffer *
785 artpec6_crypto_alloc_bounce(gfp_t flags)
786 {
787 	void *base;
788 	size_t alloc_size = sizeof(struct artpec6_crypto_bounce_buffer) +
789 			    2 * ARTPEC_CACHE_LINE_MAX;
790 	struct artpec6_crypto_bounce_buffer *bbuf = kzalloc(alloc_size, flags);
791 
792 	if (!bbuf)
793 		return NULL;
794 
795 	base = bbuf + 1;
796 	bbuf->buf = PTR_ALIGN(base, ARTPEC_CACHE_LINE_MAX);
797 	return bbuf;
798 }
799 
800 static int setup_bounce_buffer_in(struct artpec6_crypto_req_common *common,
801 				  struct artpec6_crypto_walk *walk, size_t size)
802 {
803 	struct artpec6_crypto_bounce_buffer *bbuf;
804 	int ret;
805 
806 	bbuf = artpec6_crypto_alloc_bounce(common->gfp_flags);
807 	if (!bbuf)
808 		return -ENOMEM;
809 
810 	bbuf->length = size;
811 	bbuf->sg = walk->sg;
812 	bbuf->offset = walk->offset;
813 
814 	ret =  artpec6_crypto_setup_in_descr(common, bbuf->buf, size, false);
815 	if (ret) {
816 		kfree(bbuf);
817 		return ret;
818 	}
819 
820 	pr_debug("BOUNCE %zu offset %zu\n", size, walk->offset);
821 	list_add_tail(&bbuf->list, &common->dma->bounce_buffers);
822 	return 0;
823 }
824 
825 static int
826 artpec6_crypto_setup_sg_descrs_in(struct artpec6_crypto_req_common *common,
827 				  struct artpec6_crypto_walk *walk,
828 				  size_t count)
829 {
830 	size_t chunk;
831 	int ret;
832 	dma_addr_t addr;
833 
834 	while (walk->sg && count) {
835 		chunk = min(count, artpec6_crypto_walk_chunklen(walk));
836 		addr = artpec6_crypto_walk_chunk_phys(walk);
837 
838 		/* When destination buffers are not aligned to the cache line
839 		 * size we need bounce buffers. The DMA-API requires that the
840 		 * entire line is owned by the DMA buffer and this holds also
841 		 * for the case when coherent DMA is used.
842 		 */
843 		if (!IS_ALIGNED(addr, ARTPEC_CACHE_LINE_MAX)) {
844 			chunk = min_t(dma_addr_t, chunk,
845 				      ALIGN(addr, ARTPEC_CACHE_LINE_MAX) -
846 				      addr);
847 
848 			pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk);
849 			ret = setup_bounce_buffer_in(common, walk, chunk);
850 		} else if (chunk < ARTPEC_CACHE_LINE_MAX) {
851 			pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk);
852 			ret = setup_bounce_buffer_in(common, walk, chunk);
853 		} else {
854 			dma_addr_t dma_addr;
855 
856 			chunk = chunk & ~(ARTPEC_CACHE_LINE_MAX-1);
857 
858 			pr_debug("CHUNK %pad:%zu\n", &addr, chunk);
859 
860 			ret = artpec6_crypto_dma_map_page(common,
861 							 sg_page(walk->sg),
862 							 walk->sg->offset +
863 							 walk->offset,
864 							 chunk,
865 							 DMA_FROM_DEVICE,
866 							 &dma_addr);
867 			if (ret)
868 				return ret;
869 
870 			ret = artpec6_crypto_setup_in_descr_phys(common,
871 								 dma_addr,
872 								 chunk, false);
873 		}
874 
875 		if (ret)
876 			return ret;
877 
878 		count = count - chunk;
879 		artpec6_crypto_walk_advance(walk, chunk);
880 	}
881 
882 	if (count)
883 		pr_err("EOL unexpected %zu bytes left\n", count);
884 
885 	return count ? -EINVAL : 0;
886 }
887 
888 static int
889 artpec6_crypto_setup_sg_descrs_out(struct artpec6_crypto_req_common *common,
890 				   struct artpec6_crypto_walk *walk,
891 				   size_t count)
892 {
893 	size_t chunk;
894 	int ret;
895 	dma_addr_t addr;
896 
897 	while (walk->sg && count) {
898 		chunk = min(count, artpec6_crypto_walk_chunklen(walk));
899 		addr = artpec6_crypto_walk_chunk_phys(walk);
900 
901 		pr_debug("OUT-CHUNK %pad:%zu\n", &addr, chunk);
902 
903 		if (addr & 3) {
904 			char buf[3];
905 
906 			chunk = min_t(size_t, chunk, (4-(addr&3)));
907 
908 			sg_pcopy_to_buffer(walk->sg, 1, buf, chunk,
909 					   walk->offset);
910 
911 			ret = artpec6_crypto_setup_out_descr_short(common, buf,
912 								   chunk,
913 								   false);
914 		} else {
915 			dma_addr_t dma_addr;
916 
917 			ret = artpec6_crypto_dma_map_page(common,
918 							 sg_page(walk->sg),
919 							 walk->sg->offset +
920 							 walk->offset,
921 							 chunk,
922 							 DMA_TO_DEVICE,
923 							 &dma_addr);
924 			if (ret)
925 				return ret;
926 
927 			ret = artpec6_crypto_setup_out_descr_phys(common,
928 								 dma_addr,
929 								 chunk, false);
930 		}
931 
932 		if (ret)
933 			return ret;
934 
935 		count = count - chunk;
936 		artpec6_crypto_walk_advance(walk, chunk);
937 	}
938 
939 	if (count)
940 		pr_err("EOL unexpected %zu bytes left\n", count);
941 
942 	return count ? -EINVAL : 0;
943 }
944 
945 
946 /** artpec6_crypto_terminate_out_descrs - Set the EOP on the last out descriptor
947  *
948  * If the out descriptor list is non-empty, then the eop flag on the
949  * last used out descriptor will be set.
950  *
951  * @return  0 on success
952  *	-EINVAL if the out descriptor is empty or has overflown
953  */
954 static int
955 artpec6_crypto_terminate_out_descrs(struct artpec6_crypto_req_common *common)
956 {
957 	struct artpec6_crypto_dma_descriptors *dma = common->dma;
958 	struct pdma_descr *d;
959 
960 	if (!dma->out_cnt || dma->out_cnt > PDMA_DESCR_COUNT) {
961 		pr_err("%s: OUT descriptor list is %s\n",
962 			MODULE_NAME, dma->out_cnt ? "empty" : "full");
963 		return -EINVAL;
964 
965 	}
966 
967 	d = &dma->out[dma->out_cnt-1];
968 	d->ctrl.eop = 1;
969 
970 	return 0;
971 }
972 
973 /** artpec6_crypto_terminate_in_descrs - Set the interrupt flag on the last
974  *                                       in descriptor
975  *
976  * See artpec6_crypto_terminate_out_descrs() for return values
977  */
978 static int
979 artpec6_crypto_terminate_in_descrs(struct artpec6_crypto_req_common *common)
980 {
981 	struct artpec6_crypto_dma_descriptors *dma = common->dma;
982 	struct pdma_descr *d;
983 
984 	if (!dma->in_cnt || dma->in_cnt > PDMA_DESCR_COUNT) {
985 		pr_err("%s: IN descriptor list is %s\n",
986 			MODULE_NAME, dma->in_cnt ? "empty" : "full");
987 		return -EINVAL;
988 	}
989 
990 	d = &dma->in[dma->in_cnt-1];
991 	d->ctrl.intr = 1;
992 	return 0;
993 }
994 
995 /** create_hash_pad - Create a Secure Hash conformant pad
996  *
997  * @dst:      The destination buffer to write the pad. Must be at least 64 bytes
998  * @dgstlen:  The total length of the hash digest in bytes
999  * @bitcount: The total length of the digest in bits
1000  *
1001  * @return The total number of padding bytes written to @dst
1002  */
1003 static size_t
1004 create_hash_pad(int oper, unsigned char *dst, u64 dgstlen, u64 bitcount)
1005 {
1006 	unsigned int mod, target, diff, pad_bytes, size_bytes;
1007 	__be64 bits = __cpu_to_be64(bitcount);
1008 
1009 	switch (oper) {
1010 	case regk_crypto_sha1:
1011 	case regk_crypto_sha256:
1012 	case regk_crypto_hmac_sha1:
1013 	case regk_crypto_hmac_sha256:
1014 		target = 448 / 8;
1015 		mod = 512 / 8;
1016 		size_bytes = 8;
1017 		break;
1018 	default:
1019 		target = 896 / 8;
1020 		mod = 1024 / 8;
1021 		size_bytes = 16;
1022 		break;
1023 	}
1024 
1025 	target -= 1;
1026 	diff = dgstlen & (mod - 1);
1027 	pad_bytes = diff > target ? target + mod - diff : target - diff;
1028 
1029 	memset(dst + 1, 0, pad_bytes);
1030 	dst[0] = 0x80;
1031 
1032 	if (size_bytes == 16) {
1033 		memset(dst + 1 + pad_bytes, 0, 8);
1034 		memcpy(dst + 1 + pad_bytes + 8, &bits, 8);
1035 	} else {
1036 		memcpy(dst + 1 + pad_bytes, &bits, 8);
1037 	}
1038 
1039 	return pad_bytes + size_bytes + 1;
1040 }
1041 
1042 static int artpec6_crypto_common_init(struct artpec6_crypto_req_common *common,
1043 		struct crypto_async_request *parent,
1044 		void (*complete)(struct crypto_async_request *req),
1045 		struct scatterlist *dstsg, unsigned int nbytes)
1046 {
1047 	gfp_t flags;
1048 	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1049 
1050 	flags = (parent->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1051 		 GFP_KERNEL : GFP_ATOMIC;
1052 
1053 	common->gfp_flags = flags;
1054 	common->dma = kmem_cache_alloc(ac->dma_cache, flags);
1055 	if (!common->dma)
1056 		return -ENOMEM;
1057 
1058 	common->req = parent;
1059 	common->complete = complete;
1060 	return 0;
1061 }
1062 
1063 static void
1064 artpec6_crypto_bounce_destroy(struct artpec6_crypto_dma_descriptors *dma)
1065 {
1066 	struct artpec6_crypto_bounce_buffer *b;
1067 	struct artpec6_crypto_bounce_buffer *next;
1068 
1069 	list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) {
1070 		kfree(b);
1071 	}
1072 }
1073 
1074 static int
1075 artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common)
1076 {
1077 	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1078 
1079 	artpec6_crypto_dma_unmap_all(common);
1080 	artpec6_crypto_bounce_destroy(common->dma);
1081 	kmem_cache_free(ac->dma_cache, common->dma);
1082 	common->dma = NULL;
1083 	return 0;
1084 }
1085 
1086 /*
1087  * Ciphering functions.
1088  */
1089 static int artpec6_crypto_encrypt(struct skcipher_request *req)
1090 {
1091 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
1092 	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
1093 	struct artpec6_crypto_request_context *req_ctx = NULL;
1094 	void (*complete)(struct crypto_async_request *req);
1095 	int ret;
1096 
1097 	req_ctx = skcipher_request_ctx(req);
1098 
1099 	switch (ctx->crypto_type) {
1100 	case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1101 	case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
1102 	case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
1103 		req_ctx->decrypt = 0;
1104 		break;
1105 	default:
1106 		break;
1107 	}
1108 
1109 	switch (ctx->crypto_type) {
1110 	case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1111 		complete = artpec6_crypto_complete_cbc_encrypt;
1112 		break;
1113 	default:
1114 		complete = artpec6_crypto_complete_crypto;
1115 		break;
1116 	}
1117 
1118 	ret = artpec6_crypto_common_init(&req_ctx->common,
1119 				  &req->base,
1120 				  complete,
1121 				  req->dst, req->cryptlen);
1122 	if (ret)
1123 		return ret;
1124 
1125 	ret = artpec6_crypto_prepare_crypto(req);
1126 	if (ret) {
1127 		artpec6_crypto_common_destroy(&req_ctx->common);
1128 		return ret;
1129 	}
1130 
1131 	return artpec6_crypto_submit(&req_ctx->common);
1132 }
1133 
1134 static int artpec6_crypto_decrypt(struct skcipher_request *req)
1135 {
1136 	int ret;
1137 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
1138 	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
1139 	struct artpec6_crypto_request_context *req_ctx = NULL;
1140 	void (*complete)(struct crypto_async_request *req);
1141 
1142 	req_ctx = skcipher_request_ctx(req);
1143 
1144 	switch (ctx->crypto_type) {
1145 	case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1146 	case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
1147 	case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
1148 		req_ctx->decrypt = 1;
1149 		break;
1150 	default:
1151 		break;
1152 	}
1153 
1154 
1155 	switch (ctx->crypto_type) {
1156 	case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1157 		complete = artpec6_crypto_complete_cbc_decrypt;
1158 		break;
1159 	default:
1160 		complete = artpec6_crypto_complete_crypto;
1161 		break;
1162 	}
1163 
1164 	ret = artpec6_crypto_common_init(&req_ctx->common, &req->base,
1165 				  complete,
1166 				  req->dst, req->cryptlen);
1167 	if (ret)
1168 		return ret;
1169 
1170 	ret = artpec6_crypto_prepare_crypto(req);
1171 	if (ret) {
1172 		artpec6_crypto_common_destroy(&req_ctx->common);
1173 		return ret;
1174 	}
1175 
1176 	return artpec6_crypto_submit(&req_ctx->common);
1177 }
1178 
1179 static int
1180 artpec6_crypto_ctr_crypt(struct skcipher_request *req, bool encrypt)
1181 {
1182 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
1183 	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
1184 	size_t iv_len = crypto_skcipher_ivsize(cipher);
1185 	unsigned int counter = be32_to_cpup((__be32 *)
1186 					    (req->iv + iv_len - 4));
1187 	unsigned int nblks = ALIGN(req->cryptlen, AES_BLOCK_SIZE) /
1188 			     AES_BLOCK_SIZE;
1189 
1190 	/*
1191 	 * The hardware uses only the last 32-bits as the counter while the
1192 	 * kernel tests (aes_ctr_enc_tv_template[4] for example) expect that
1193 	 * the whole IV is a counter.  So fallback if the counter is going to
1194 	 * overlow.
1195 	 */
1196 	if (counter + nblks < counter) {
1197 		int ret;
1198 
1199 		pr_debug("counter %x will overflow (nblks %u), falling back\n",
1200 			 counter, counter + nblks);
1201 
1202 		ret = crypto_sync_skcipher_setkey(ctx->fallback, ctx->aes_key,
1203 						  ctx->key_length);
1204 		if (ret)
1205 			return ret;
1206 
1207 		{
1208 			SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
1209 
1210 			skcipher_request_set_sync_tfm(subreq, ctx->fallback);
1211 			skcipher_request_set_callback(subreq, req->base.flags,
1212 						      NULL, NULL);
1213 			skcipher_request_set_crypt(subreq, req->src, req->dst,
1214 						   req->cryptlen, req->iv);
1215 			ret = encrypt ? crypto_skcipher_encrypt(subreq)
1216 				      : crypto_skcipher_decrypt(subreq);
1217 			skcipher_request_zero(subreq);
1218 		}
1219 		return ret;
1220 	}
1221 
1222 	return encrypt ? artpec6_crypto_encrypt(req)
1223 		       : artpec6_crypto_decrypt(req);
1224 }
1225 
1226 static int artpec6_crypto_ctr_encrypt(struct skcipher_request *req)
1227 {
1228 	return artpec6_crypto_ctr_crypt(req, true);
1229 }
1230 
1231 static int artpec6_crypto_ctr_decrypt(struct skcipher_request *req)
1232 {
1233 	return artpec6_crypto_ctr_crypt(req, false);
1234 }
1235 
1236 /*
1237  * AEAD functions
1238  */
1239 static int artpec6_crypto_aead_init(struct crypto_aead *tfm)
1240 {
1241 	struct artpec6_cryptotfm_context *tfm_ctx = crypto_aead_ctx(tfm);
1242 
1243 	memset(tfm_ctx, 0, sizeof(*tfm_ctx));
1244 
1245 	crypto_aead_set_reqsize(tfm,
1246 				sizeof(struct artpec6_crypto_aead_req_ctx));
1247 
1248 	return 0;
1249 }
1250 
1251 static int artpec6_crypto_aead_set_key(struct crypto_aead *tfm, const u8 *key,
1252 			       unsigned int len)
1253 {
1254 	struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(&tfm->base);
1255 
1256 	if (len != 16 && len != 24 && len != 32) {
1257 		crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1258 		return -1;
1259 	}
1260 
1261 	ctx->key_length = len;
1262 
1263 	memcpy(ctx->aes_key, key, len);
1264 	return 0;
1265 }
1266 
1267 static int artpec6_crypto_aead_encrypt(struct aead_request *req)
1268 {
1269 	int ret;
1270 	struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req);
1271 
1272 	req_ctx->decrypt = false;
1273 	ret = artpec6_crypto_common_init(&req_ctx->common, &req->base,
1274 				  artpec6_crypto_complete_aead,
1275 				  NULL, 0);
1276 	if (ret)
1277 		return ret;
1278 
1279 	ret = artpec6_crypto_prepare_aead(req);
1280 	if (ret) {
1281 		artpec6_crypto_common_destroy(&req_ctx->common);
1282 		return ret;
1283 	}
1284 
1285 	return artpec6_crypto_submit(&req_ctx->common);
1286 }
1287 
1288 static int artpec6_crypto_aead_decrypt(struct aead_request *req)
1289 {
1290 	int ret;
1291 	struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req);
1292 
1293 	req_ctx->decrypt = true;
1294 	if (req->cryptlen < AES_BLOCK_SIZE)
1295 		return -EINVAL;
1296 
1297 	ret = artpec6_crypto_common_init(&req_ctx->common,
1298 				  &req->base,
1299 				  artpec6_crypto_complete_aead,
1300 				  NULL, 0);
1301 	if (ret)
1302 		return ret;
1303 
1304 	ret = artpec6_crypto_prepare_aead(req);
1305 	if (ret) {
1306 		artpec6_crypto_common_destroy(&req_ctx->common);
1307 		return ret;
1308 	}
1309 
1310 	return artpec6_crypto_submit(&req_ctx->common);
1311 }
1312 
1313 static int artpec6_crypto_prepare_hash(struct ahash_request *areq)
1314 {
1315 	struct artpec6_hashalg_context *ctx = crypto_tfm_ctx(areq->base.tfm);
1316 	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(areq);
1317 	size_t digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
1318 	size_t contextsize = digestsize == SHA384_DIGEST_SIZE ?
1319 		SHA512_DIGEST_SIZE : digestsize;
1320 	size_t blocksize = crypto_tfm_alg_blocksize(
1321 		crypto_ahash_tfm(crypto_ahash_reqtfm(areq)));
1322 	struct artpec6_crypto_req_common *common = &req_ctx->common;
1323 	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1324 	enum artpec6_crypto_variant variant = ac->variant;
1325 	u32 sel_ctx;
1326 	bool ext_ctx = false;
1327 	bool run_hw = false;
1328 	int error = 0;
1329 
1330 	artpec6_crypto_init_dma_operation(common);
1331 
1332 	/* Upload HMAC key, must be first the first packet */
1333 	if (req_ctx->hash_flags & HASH_FLAG_HMAC) {
1334 		if (variant == ARTPEC6_CRYPTO) {
1335 			req_ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER,
1336 						     a6_regk_crypto_dlkey);
1337 		} else {
1338 			req_ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER,
1339 						     a7_regk_crypto_dlkey);
1340 		}
1341 
1342 		/* Copy and pad up the key */
1343 		memcpy(req_ctx->key_buffer, ctx->hmac_key,
1344 		       ctx->hmac_key_length);
1345 		memset(req_ctx->key_buffer + ctx->hmac_key_length, 0,
1346 		       blocksize - ctx->hmac_key_length);
1347 
1348 		error = artpec6_crypto_setup_out_descr(common,
1349 					(void *)&req_ctx->key_md,
1350 					sizeof(req_ctx->key_md), false, false);
1351 		if (error)
1352 			return error;
1353 
1354 		error = artpec6_crypto_setup_out_descr(common,
1355 					req_ctx->key_buffer, blocksize,
1356 					true, false);
1357 		if (error)
1358 			return error;
1359 	}
1360 
1361 	if (!(req_ctx->hash_flags & HASH_FLAG_INIT_CTX)) {
1362 		/* Restore context */
1363 		sel_ctx = regk_crypto_ext;
1364 		ext_ctx = true;
1365 	} else {
1366 		sel_ctx = regk_crypto_init;
1367 	}
1368 
1369 	if (variant == ARTPEC6_CRYPTO) {
1370 		req_ctx->hash_md &= ~A6_CRY_MD_HASH_SEL_CTX;
1371 		req_ctx->hash_md |= FIELD_PREP(A6_CRY_MD_HASH_SEL_CTX, sel_ctx);
1372 
1373 		/* If this is the final round, set the final flag */
1374 		if (req_ctx->hash_flags & HASH_FLAG_FINALIZE)
1375 			req_ctx->hash_md |= A6_CRY_MD_HASH_HMAC_FIN;
1376 	} else {
1377 		req_ctx->hash_md &= ~A7_CRY_MD_HASH_SEL_CTX;
1378 		req_ctx->hash_md |= FIELD_PREP(A7_CRY_MD_HASH_SEL_CTX, sel_ctx);
1379 
1380 		/* If this is the final round, set the final flag */
1381 		if (req_ctx->hash_flags & HASH_FLAG_FINALIZE)
1382 			req_ctx->hash_md |= A7_CRY_MD_HASH_HMAC_FIN;
1383 	}
1384 
1385 	/* Setup up metadata descriptors */
1386 	error = artpec6_crypto_setup_out_descr(common,
1387 				(void *)&req_ctx->hash_md,
1388 				sizeof(req_ctx->hash_md), false, false);
1389 	if (error)
1390 		return error;
1391 
1392 	error = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
1393 	if (error)
1394 		return error;
1395 
1396 	if (ext_ctx) {
1397 		error = artpec6_crypto_setup_out_descr(common,
1398 					req_ctx->digeststate,
1399 					contextsize, false, false);
1400 
1401 		if (error)
1402 			return error;
1403 	}
1404 
1405 	if (req_ctx->hash_flags & HASH_FLAG_UPDATE) {
1406 		size_t done_bytes = 0;
1407 		size_t total_bytes = areq->nbytes + req_ctx->partial_bytes;
1408 		size_t ready_bytes = round_down(total_bytes, blocksize);
1409 		struct artpec6_crypto_walk walk;
1410 
1411 		run_hw = ready_bytes > 0;
1412 		if (req_ctx->partial_bytes && ready_bytes) {
1413 			/* We have a partial buffer and will at least some bytes
1414 			 * to the HW. Empty this partial buffer before tackling
1415 			 * the SG lists
1416 			 */
1417 			memcpy(req_ctx->partial_buffer_out,
1418 				req_ctx->partial_buffer,
1419 				req_ctx->partial_bytes);
1420 
1421 			error = artpec6_crypto_setup_out_descr(common,
1422 						req_ctx->partial_buffer_out,
1423 						req_ctx->partial_bytes,
1424 						false, true);
1425 			if (error)
1426 				return error;
1427 
1428 			/* Reset partial buffer */
1429 			done_bytes += req_ctx->partial_bytes;
1430 			req_ctx->partial_bytes = 0;
1431 		}
1432 
1433 		artpec6_crypto_walk_init(&walk, areq->src);
1434 
1435 		error = artpec6_crypto_setup_sg_descrs_out(common, &walk,
1436 							   ready_bytes -
1437 							   done_bytes);
1438 		if (error)
1439 			return error;
1440 
1441 		if (walk.sg) {
1442 			size_t sg_skip = ready_bytes - done_bytes;
1443 			size_t sg_rem = areq->nbytes - sg_skip;
1444 
1445 			sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
1446 					   req_ctx->partial_buffer +
1447 					   req_ctx->partial_bytes,
1448 					   sg_rem, sg_skip);
1449 
1450 			req_ctx->partial_bytes += sg_rem;
1451 		}
1452 
1453 		req_ctx->digcnt += ready_bytes;
1454 		req_ctx->hash_flags &= ~(HASH_FLAG_UPDATE);
1455 	}
1456 
1457 	/* Finalize */
1458 	if (req_ctx->hash_flags & HASH_FLAG_FINALIZE) {
1459 		bool needtrim = contextsize != digestsize;
1460 		size_t hash_pad_len;
1461 		u64 digest_bits;
1462 		u32 oper;
1463 
1464 		if (variant == ARTPEC6_CRYPTO)
1465 			oper = FIELD_GET(A6_CRY_MD_OPER, req_ctx->hash_md);
1466 		else
1467 			oper = FIELD_GET(A7_CRY_MD_OPER, req_ctx->hash_md);
1468 
1469 		/* Write out the partial buffer if present */
1470 		if (req_ctx->partial_bytes) {
1471 			memcpy(req_ctx->partial_buffer_out,
1472 			       req_ctx->partial_buffer,
1473 			       req_ctx->partial_bytes);
1474 			error = artpec6_crypto_setup_out_descr(common,
1475 						req_ctx->partial_buffer_out,
1476 						req_ctx->partial_bytes,
1477 						false, true);
1478 			if (error)
1479 				return error;
1480 
1481 			req_ctx->digcnt += req_ctx->partial_bytes;
1482 			req_ctx->partial_bytes = 0;
1483 		}
1484 
1485 		if (req_ctx->hash_flags & HASH_FLAG_HMAC)
1486 			digest_bits = 8 * (req_ctx->digcnt + blocksize);
1487 		else
1488 			digest_bits = 8 * req_ctx->digcnt;
1489 
1490 		/* Add the hash pad */
1491 		hash_pad_len = create_hash_pad(oper, req_ctx->pad_buffer,
1492 					       req_ctx->digcnt, digest_bits);
1493 		error = artpec6_crypto_setup_out_descr(common,
1494 						      req_ctx->pad_buffer,
1495 						      hash_pad_len, false,
1496 						      true);
1497 		req_ctx->digcnt = 0;
1498 
1499 		if (error)
1500 			return error;
1501 
1502 		/* Descriptor for the final result */
1503 		error = artpec6_crypto_setup_in_descr(common, areq->result,
1504 						      digestsize,
1505 						      !needtrim);
1506 		if (error)
1507 			return error;
1508 
1509 		if (needtrim) {
1510 			/* Discard the extra context bytes for SHA-384 */
1511 			error = artpec6_crypto_setup_in_descr(common,
1512 					req_ctx->partial_buffer,
1513 					digestsize - contextsize, true);
1514 			if (error)
1515 				return error;
1516 		}
1517 
1518 	} else { /* This is not the final operation for this request */
1519 		if (!run_hw)
1520 			return ARTPEC6_CRYPTO_PREPARE_HASH_NO_START;
1521 
1522 		/* Save the result to the context */
1523 		error = artpec6_crypto_setup_in_descr(common,
1524 						      req_ctx->digeststate,
1525 						      contextsize, false);
1526 		if (error)
1527 			return error;
1528 		/* fall through */
1529 	}
1530 
1531 	req_ctx->hash_flags &= ~(HASH_FLAG_INIT_CTX | HASH_FLAG_UPDATE |
1532 				 HASH_FLAG_FINALIZE);
1533 
1534 	error = artpec6_crypto_terminate_in_descrs(common);
1535 	if (error)
1536 		return error;
1537 
1538 	error = artpec6_crypto_terminate_out_descrs(common);
1539 	if (error)
1540 		return error;
1541 
1542 	error = artpec6_crypto_dma_map_descs(common);
1543 	if (error)
1544 		return error;
1545 
1546 	return ARTPEC6_CRYPTO_PREPARE_HASH_START;
1547 }
1548 
1549 
1550 static int artpec6_crypto_aes_ecb_init(struct crypto_skcipher *tfm)
1551 {
1552 	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1553 
1554 	tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
1555 	ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_ECB;
1556 
1557 	return 0;
1558 }
1559 
1560 static int artpec6_crypto_aes_ctr_init(struct crypto_skcipher *tfm)
1561 {
1562 	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1563 
1564 	ctx->fallback =
1565 		crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm->base),
1566 					   0, CRYPTO_ALG_NEED_FALLBACK);
1567 	if (IS_ERR(ctx->fallback))
1568 		return PTR_ERR(ctx->fallback);
1569 
1570 	tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
1571 	ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CTR;
1572 
1573 	return 0;
1574 }
1575 
1576 static int artpec6_crypto_aes_cbc_init(struct crypto_skcipher *tfm)
1577 {
1578 	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1579 
1580 	tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
1581 	ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CBC;
1582 
1583 	return 0;
1584 }
1585 
1586 static int artpec6_crypto_aes_xts_init(struct crypto_skcipher *tfm)
1587 {
1588 	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1589 
1590 	tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
1591 	ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_XTS;
1592 
1593 	return 0;
1594 }
1595 
1596 static void artpec6_crypto_aes_exit(struct crypto_skcipher *tfm)
1597 {
1598 	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1599 
1600 	memset(ctx, 0, sizeof(*ctx));
1601 }
1602 
1603 static void artpec6_crypto_aes_ctr_exit(struct crypto_skcipher *tfm)
1604 {
1605 	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1606 
1607 	crypto_free_sync_skcipher(ctx->fallback);
1608 	artpec6_crypto_aes_exit(tfm);
1609 }
1610 
1611 static int
1612 artpec6_crypto_cipher_set_key(struct crypto_skcipher *cipher, const u8 *key,
1613 			      unsigned int keylen)
1614 {
1615 	struct artpec6_cryptotfm_context *ctx =
1616 		crypto_skcipher_ctx(cipher);
1617 
1618 	switch (keylen) {
1619 	case 16:
1620 	case 24:
1621 	case 32:
1622 		break;
1623 	default:
1624 		crypto_skcipher_set_flags(cipher,
1625 					  CRYPTO_TFM_RES_BAD_KEY_LEN);
1626 		return -EINVAL;
1627 	}
1628 
1629 	memcpy(ctx->aes_key, key, keylen);
1630 	ctx->key_length = keylen;
1631 	return 0;
1632 }
1633 
1634 static int
1635 artpec6_crypto_xts_set_key(struct crypto_skcipher *cipher, const u8 *key,
1636 			      unsigned int keylen)
1637 {
1638 	struct artpec6_cryptotfm_context *ctx =
1639 		crypto_skcipher_ctx(cipher);
1640 	int ret;
1641 
1642 	ret = xts_check_key(&cipher->base, key, keylen);
1643 	if (ret)
1644 		return ret;
1645 
1646 	switch (keylen) {
1647 	case 32:
1648 	case 48:
1649 	case 64:
1650 		break;
1651 	default:
1652 		crypto_skcipher_set_flags(cipher,
1653 					  CRYPTO_TFM_RES_BAD_KEY_LEN);
1654 		return -EINVAL;
1655 	}
1656 
1657 	memcpy(ctx->aes_key, key, keylen);
1658 	ctx->key_length = keylen;
1659 	return 0;
1660 }
1661 
1662 /** artpec6_crypto_process_crypto - Prepare an async block cipher crypto request
1663  *
1664  * @req: The asynch request to process
1665  *
1666  * @return 0 if the dma job was successfully prepared
1667  *	  <0 on error
1668  *
1669  * This function sets up the PDMA descriptors for a block cipher request.
1670  *
1671  * The required padding is added for AES-CTR using a statically defined
1672  * buffer.
1673  *
1674  * The PDMA descriptor list will be as follows:
1675  *
1676  * OUT: [KEY_MD][KEY][EOP]<CIPHER_MD>[IV]<data_0>...[data_n][AES-CTR_pad]<eop>
1677  * IN:  <CIPHER_MD><data_0>...[data_n]<intr>
1678  *
1679  */
1680 static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq)
1681 {
1682 	int ret;
1683 	struct artpec6_crypto_walk walk;
1684 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1685 	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
1686 	struct artpec6_crypto_request_context *req_ctx = NULL;
1687 	size_t iv_len = crypto_skcipher_ivsize(cipher);
1688 	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1689 	enum artpec6_crypto_variant variant = ac->variant;
1690 	struct artpec6_crypto_req_common *common;
1691 	bool cipher_decr = false;
1692 	size_t cipher_klen;
1693 	u32 cipher_len = 0; /* Same as regk_crypto_key_128 for NULL crypto */
1694 	u32 oper;
1695 
1696 	req_ctx = skcipher_request_ctx(areq);
1697 	common = &req_ctx->common;
1698 
1699 	artpec6_crypto_init_dma_operation(common);
1700 
1701 	if (variant == ARTPEC6_CRYPTO)
1702 		ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER, a6_regk_crypto_dlkey);
1703 	else
1704 		ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER, a7_regk_crypto_dlkey);
1705 
1706 	ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md,
1707 					     sizeof(ctx->key_md), false, false);
1708 	if (ret)
1709 		return ret;
1710 
1711 	ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key,
1712 					      ctx->key_length, true, false);
1713 	if (ret)
1714 		return ret;
1715 
1716 	req_ctx->cipher_md = 0;
1717 
1718 	if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS)
1719 		cipher_klen = ctx->key_length/2;
1720 	else
1721 		cipher_klen =  ctx->key_length;
1722 
1723 	/* Metadata */
1724 	switch (cipher_klen) {
1725 	case 16:
1726 		cipher_len = regk_crypto_key_128;
1727 		break;
1728 	case 24:
1729 		cipher_len = regk_crypto_key_192;
1730 		break;
1731 	case 32:
1732 		cipher_len = regk_crypto_key_256;
1733 		break;
1734 	default:
1735 		pr_err("%s: Invalid key length %d!\n",
1736 			MODULE_NAME, ctx->key_length);
1737 		return -EINVAL;
1738 	}
1739 
1740 	switch (ctx->crypto_type) {
1741 	case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
1742 		oper = regk_crypto_aes_ecb;
1743 		cipher_decr = req_ctx->decrypt;
1744 		break;
1745 
1746 	case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1747 		oper = regk_crypto_aes_cbc;
1748 		cipher_decr = req_ctx->decrypt;
1749 		break;
1750 
1751 	case ARTPEC6_CRYPTO_CIPHER_AES_CTR:
1752 		oper = regk_crypto_aes_ctr;
1753 		cipher_decr = false;
1754 		break;
1755 
1756 	case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
1757 		oper = regk_crypto_aes_xts;
1758 		cipher_decr = req_ctx->decrypt;
1759 
1760 		if (variant == ARTPEC6_CRYPTO)
1761 			req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DSEQ;
1762 		else
1763 			req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DSEQ;
1764 		break;
1765 
1766 	default:
1767 		pr_err("%s: Invalid cipher mode %d!\n",
1768 			MODULE_NAME, ctx->crypto_type);
1769 		return -EINVAL;
1770 	}
1771 
1772 	if (variant == ARTPEC6_CRYPTO) {
1773 		req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER, oper);
1774 		req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN,
1775 						 cipher_len);
1776 		if (cipher_decr)
1777 			req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR;
1778 	} else {
1779 		req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER, oper);
1780 		req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN,
1781 						 cipher_len);
1782 		if (cipher_decr)
1783 			req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR;
1784 	}
1785 
1786 	ret = artpec6_crypto_setup_out_descr(common,
1787 					    &req_ctx->cipher_md,
1788 					    sizeof(req_ctx->cipher_md),
1789 					    false, false);
1790 	if (ret)
1791 		return ret;
1792 
1793 	ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
1794 	if (ret)
1795 		return ret;
1796 
1797 	if (iv_len) {
1798 		ret = artpec6_crypto_setup_out_descr(common, areq->iv, iv_len,
1799 						     false, false);
1800 		if (ret)
1801 			return ret;
1802 	}
1803 	/* Data out */
1804 	artpec6_crypto_walk_init(&walk, areq->src);
1805 	ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, areq->cryptlen);
1806 	if (ret)
1807 		return ret;
1808 
1809 	/* Data in */
1810 	artpec6_crypto_walk_init(&walk, areq->dst);
1811 	ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, areq->cryptlen);
1812 	if (ret)
1813 		return ret;
1814 
1815 	/* CTR-mode padding required by the HW. */
1816 	if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_CTR ||
1817 	    ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS) {
1818 		size_t pad = ALIGN(areq->cryptlen, AES_BLOCK_SIZE) -
1819 			     areq->cryptlen;
1820 
1821 		if (pad) {
1822 			ret = artpec6_crypto_setup_out_descr(common,
1823 							     ac->pad_buffer,
1824 							     pad, false, false);
1825 			if (ret)
1826 				return ret;
1827 
1828 			ret = artpec6_crypto_setup_in_descr(common,
1829 							    ac->pad_buffer, pad,
1830 							    false);
1831 			if (ret)
1832 				return ret;
1833 		}
1834 	}
1835 
1836 	ret = artpec6_crypto_terminate_out_descrs(common);
1837 	if (ret)
1838 		return ret;
1839 
1840 	ret = artpec6_crypto_terminate_in_descrs(common);
1841 	if (ret)
1842 		return ret;
1843 
1844 	return artpec6_crypto_dma_map_descs(common);
1845 }
1846 
1847 static int artpec6_crypto_prepare_aead(struct aead_request *areq)
1848 {
1849 	size_t count;
1850 	int ret;
1851 	size_t input_length;
1852 	struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(areq->base.tfm);
1853 	struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq);
1854 	struct crypto_aead *cipher = crypto_aead_reqtfm(areq);
1855 	struct artpec6_crypto_req_common *common = &req_ctx->common;
1856 	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1857 	enum artpec6_crypto_variant variant = ac->variant;
1858 	u32 md_cipher_len;
1859 
1860 	artpec6_crypto_init_dma_operation(common);
1861 
1862 	/* Key */
1863 	if (variant == ARTPEC6_CRYPTO) {
1864 		ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER,
1865 					 a6_regk_crypto_dlkey);
1866 	} else {
1867 		ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER,
1868 					 a7_regk_crypto_dlkey);
1869 	}
1870 	ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md,
1871 					     sizeof(ctx->key_md), false, false);
1872 	if (ret)
1873 		return ret;
1874 
1875 	ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key,
1876 					     ctx->key_length, true, false);
1877 	if (ret)
1878 		return ret;
1879 
1880 	req_ctx->cipher_md = 0;
1881 
1882 	switch (ctx->key_length) {
1883 	case 16:
1884 		md_cipher_len = regk_crypto_key_128;
1885 		break;
1886 	case 24:
1887 		md_cipher_len = regk_crypto_key_192;
1888 		break;
1889 	case 32:
1890 		md_cipher_len = regk_crypto_key_256;
1891 		break;
1892 	default:
1893 		return -EINVAL;
1894 	}
1895 
1896 	if (variant == ARTPEC6_CRYPTO) {
1897 		req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER,
1898 						 regk_crypto_aes_gcm);
1899 		req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN,
1900 						 md_cipher_len);
1901 		if (req_ctx->decrypt)
1902 			req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR;
1903 	} else {
1904 		req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER,
1905 						 regk_crypto_aes_gcm);
1906 		req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN,
1907 						 md_cipher_len);
1908 		if (req_ctx->decrypt)
1909 			req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR;
1910 	}
1911 
1912 	ret = artpec6_crypto_setup_out_descr(common,
1913 					    (void *) &req_ctx->cipher_md,
1914 					    sizeof(req_ctx->cipher_md), false,
1915 					    false);
1916 	if (ret)
1917 		return ret;
1918 
1919 	ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
1920 	if (ret)
1921 		return ret;
1922 
1923 	/* For the decryption, cryptlen includes the tag. */
1924 	input_length = areq->cryptlen;
1925 	if (req_ctx->decrypt)
1926 		input_length -= AES_BLOCK_SIZE;
1927 
1928 	/* Prepare the context buffer */
1929 	req_ctx->hw_ctx.aad_length_bits =
1930 		__cpu_to_be64(8*areq->assoclen);
1931 
1932 	req_ctx->hw_ctx.text_length_bits =
1933 		__cpu_to_be64(8*input_length);
1934 
1935 	memcpy(req_ctx->hw_ctx.J0, areq->iv, crypto_aead_ivsize(cipher));
1936 	// The HW omits the initial increment of the counter field.
1937 	memcpy(req_ctx->hw_ctx.J0 + GCM_AES_IV_SIZE, "\x00\x00\x00\x01", 4);
1938 
1939 	ret = artpec6_crypto_setup_out_descr(common, &req_ctx->hw_ctx,
1940 		sizeof(struct artpec6_crypto_aead_hw_ctx), false, false);
1941 	if (ret)
1942 		return ret;
1943 
1944 	{
1945 		struct artpec6_crypto_walk walk;
1946 
1947 		artpec6_crypto_walk_init(&walk, areq->src);
1948 
1949 		/* Associated data */
1950 		count = areq->assoclen;
1951 		ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count);
1952 		if (ret)
1953 			return ret;
1954 
1955 		if (!IS_ALIGNED(areq->assoclen, 16)) {
1956 			size_t assoc_pad = 16 - (areq->assoclen % 16);
1957 			/* The HW mandates zero padding here */
1958 			ret = artpec6_crypto_setup_out_descr(common,
1959 							     ac->zero_buffer,
1960 							     assoc_pad, false,
1961 							     false);
1962 			if (ret)
1963 				return ret;
1964 		}
1965 
1966 		/* Data to crypto */
1967 		count = input_length;
1968 		ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count);
1969 		if (ret)
1970 			return ret;
1971 
1972 		if (!IS_ALIGNED(input_length, 16)) {
1973 			size_t crypto_pad = 16 - (input_length % 16);
1974 			/* The HW mandates zero padding here */
1975 			ret = artpec6_crypto_setup_out_descr(common,
1976 							     ac->zero_buffer,
1977 							     crypto_pad,
1978 							     false,
1979 							     false);
1980 			if (ret)
1981 				return ret;
1982 		}
1983 	}
1984 
1985 	/* Data from crypto */
1986 	{
1987 		struct artpec6_crypto_walk walk;
1988 		size_t output_len = areq->cryptlen;
1989 
1990 		if (req_ctx->decrypt)
1991 			output_len -= AES_BLOCK_SIZE;
1992 
1993 		artpec6_crypto_walk_init(&walk, areq->dst);
1994 
1995 		/* skip associated data in the output */
1996 		count = artpec6_crypto_walk_advance(&walk, areq->assoclen);
1997 		if (count)
1998 			return -EINVAL;
1999 
2000 		count = output_len;
2001 		ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, count);
2002 		if (ret)
2003 			return ret;
2004 
2005 		/* Put padding between the cryptotext and the auth tag */
2006 		if (!IS_ALIGNED(output_len, 16)) {
2007 			size_t crypto_pad = 16 - (output_len % 16);
2008 
2009 			ret = artpec6_crypto_setup_in_descr(common,
2010 							    ac->pad_buffer,
2011 							    crypto_pad, false);
2012 			if (ret)
2013 				return ret;
2014 		}
2015 
2016 		/* The authentication tag shall follow immediately after
2017 		 * the output ciphertext. For decryption it is put in a context
2018 		 * buffer for later compare against the input tag.
2019 		 */
2020 		count = AES_BLOCK_SIZE;
2021 
2022 		if (req_ctx->decrypt) {
2023 			ret = artpec6_crypto_setup_in_descr(common,
2024 				req_ctx->decryption_tag, count, false);
2025 			if (ret)
2026 				return ret;
2027 
2028 		} else {
2029 			ret = artpec6_crypto_setup_sg_descrs_in(common, &walk,
2030 								count);
2031 			if (ret)
2032 				return ret;
2033 		}
2034 
2035 	}
2036 
2037 	ret = artpec6_crypto_terminate_in_descrs(common);
2038 	if (ret)
2039 		return ret;
2040 
2041 	ret = artpec6_crypto_terminate_out_descrs(common);
2042 	if (ret)
2043 		return ret;
2044 
2045 	return artpec6_crypto_dma_map_descs(common);
2046 }
2047 
2048 static void artpec6_crypto_process_queue(struct artpec6_crypto *ac)
2049 {
2050 	struct artpec6_crypto_req_common *req;
2051 
2052 	while (!list_empty(&ac->queue) && !artpec6_crypto_busy()) {
2053 		req = list_first_entry(&ac->queue,
2054 				       struct artpec6_crypto_req_common,
2055 				       list);
2056 		list_move_tail(&req->list, &ac->pending);
2057 		artpec6_crypto_start_dma(req);
2058 
2059 		req->req->complete(req->req, -EINPROGRESS);
2060 	}
2061 
2062 	/*
2063 	 * In some cases, the hardware can raise an in_eop_flush interrupt
2064 	 * before actually updating the status, so we have an timer which will
2065 	 * recheck the status on timeout.  Since the cases are expected to be
2066 	 * very rare, we use a relatively large timeout value.  There should be
2067 	 * no noticeable negative effect if we timeout spuriously.
2068 	 */
2069 	if (ac->pending_count)
2070 		mod_timer(&ac->timer, jiffies + msecs_to_jiffies(100));
2071 	else
2072 		del_timer(&ac->timer);
2073 }
2074 
2075 static void artpec6_crypto_timeout(struct timer_list *t)
2076 {
2077 	struct artpec6_crypto *ac = from_timer(ac, t, timer);
2078 
2079 	dev_info_ratelimited(artpec6_crypto_dev, "timeout\n");
2080 
2081 	tasklet_schedule(&ac->task);
2082 }
2083 
2084 static void artpec6_crypto_task(unsigned long data)
2085 {
2086 	struct artpec6_crypto *ac = (struct artpec6_crypto *)data;
2087 	struct artpec6_crypto_req_common *req;
2088 	struct artpec6_crypto_req_common *n;
2089 
2090 	if (list_empty(&ac->pending)) {
2091 		pr_debug("Spurious IRQ\n");
2092 		return;
2093 	}
2094 
2095 	spin_lock_bh(&ac->queue_lock);
2096 
2097 	list_for_each_entry_safe(req, n, &ac->pending, list) {
2098 		struct artpec6_crypto_dma_descriptors *dma = req->dma;
2099 		u32 stat;
2100 
2101 		dma_sync_single_for_cpu(artpec6_crypto_dev, dma->stat_dma_addr,
2102 					sizeof(dma->stat[0]),
2103 					DMA_BIDIRECTIONAL);
2104 
2105 		stat = req->dma->stat[req->dma->in_cnt-1];
2106 
2107 		/* A non-zero final status descriptor indicates
2108 		 * this job has finished.
2109 		 */
2110 		pr_debug("Request %p status is %X\n", req, stat);
2111 		if (!stat)
2112 			break;
2113 
2114 		/* Allow testing of timeout handling with fault injection */
2115 #ifdef CONFIG_FAULT_INJECTION
2116 		if (should_fail(&artpec6_crypto_fail_status_read, 1))
2117 			continue;
2118 #endif
2119 
2120 		pr_debug("Completing request %p\n", req);
2121 
2122 		list_del(&req->list);
2123 
2124 		artpec6_crypto_dma_unmap_all(req);
2125 		artpec6_crypto_copy_bounce_buffers(req);
2126 
2127 		ac->pending_count--;
2128 		artpec6_crypto_common_destroy(req);
2129 		req->complete(req->req);
2130 	}
2131 
2132 	artpec6_crypto_process_queue(ac);
2133 
2134 	spin_unlock_bh(&ac->queue_lock);
2135 }
2136 
2137 static void artpec6_crypto_complete_crypto(struct crypto_async_request *req)
2138 {
2139 	req->complete(req, 0);
2140 }
2141 
2142 static void
2143 artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req)
2144 {
2145 	struct skcipher_request *cipher_req = container_of(req,
2146 		struct skcipher_request, base);
2147 
2148 	scatterwalk_map_and_copy(cipher_req->iv, cipher_req->src,
2149 				 cipher_req->cryptlen - AES_BLOCK_SIZE,
2150 				 AES_BLOCK_SIZE, 0);
2151 	req->complete(req, 0);
2152 }
2153 
2154 static void
2155 artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req)
2156 {
2157 	struct skcipher_request *cipher_req = container_of(req,
2158 		struct skcipher_request, base);
2159 
2160 	scatterwalk_map_and_copy(cipher_req->iv, cipher_req->dst,
2161 				 cipher_req->cryptlen - AES_BLOCK_SIZE,
2162 				 AES_BLOCK_SIZE, 0);
2163 	req->complete(req, 0);
2164 }
2165 
2166 static void artpec6_crypto_complete_aead(struct crypto_async_request *req)
2167 {
2168 	int result = 0;
2169 
2170 	/* Verify GCM hashtag. */
2171 	struct aead_request *areq = container_of(req,
2172 		struct aead_request, base);
2173 	struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq);
2174 
2175 	if (req_ctx->decrypt) {
2176 		u8 input_tag[AES_BLOCK_SIZE];
2177 
2178 		sg_pcopy_to_buffer(areq->src,
2179 				   sg_nents(areq->src),
2180 				   input_tag,
2181 				   AES_BLOCK_SIZE,
2182 				   areq->assoclen + areq->cryptlen -
2183 				   AES_BLOCK_SIZE);
2184 
2185 		if (memcmp(req_ctx->decryption_tag,
2186 			   input_tag,
2187 			   AES_BLOCK_SIZE)) {
2188 			pr_debug("***EBADMSG:\n");
2189 			print_hex_dump_debug("ref:", DUMP_PREFIX_ADDRESS, 32, 1,
2190 					     input_tag, AES_BLOCK_SIZE, true);
2191 			print_hex_dump_debug("out:", DUMP_PREFIX_ADDRESS, 32, 1,
2192 					     req_ctx->decryption_tag,
2193 					     AES_BLOCK_SIZE, true);
2194 
2195 			result = -EBADMSG;
2196 		}
2197 	}
2198 
2199 	req->complete(req, result);
2200 }
2201 
2202 static void artpec6_crypto_complete_hash(struct crypto_async_request *req)
2203 {
2204 	req->complete(req, 0);
2205 }
2206 
2207 
2208 /*------------------- Hash functions -----------------------------------------*/
2209 static int
2210 artpec6_crypto_hash_set_key(struct crypto_ahash *tfm,
2211 		    const u8 *key, unsigned int keylen)
2212 {
2213 	struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(&tfm->base);
2214 	size_t blocksize;
2215 	int ret;
2216 
2217 	if (!keylen) {
2218 		pr_err("Invalid length (%d) of HMAC key\n",
2219 			keylen);
2220 		return -EINVAL;
2221 	}
2222 
2223 	memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key));
2224 
2225 	blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2226 
2227 	if (keylen > blocksize) {
2228 		SHASH_DESC_ON_STACK(hdesc, tfm_ctx->child_hash);
2229 
2230 		hdesc->tfm = tfm_ctx->child_hash;
2231 		hdesc->flags = crypto_ahash_get_flags(tfm) &
2232 			       CRYPTO_TFM_REQ_MAY_SLEEP;
2233 
2234 		tfm_ctx->hmac_key_length = blocksize;
2235 		ret = crypto_shash_digest(hdesc, key, keylen,
2236 					  tfm_ctx->hmac_key);
2237 		if (ret)
2238 			return ret;
2239 
2240 	} else {
2241 		memcpy(tfm_ctx->hmac_key, key, keylen);
2242 		tfm_ctx->hmac_key_length = keylen;
2243 	}
2244 
2245 	return 0;
2246 }
2247 
2248 static int
2249 artpec6_crypto_init_hash(struct ahash_request *req, u8 type, int hmac)
2250 {
2251 	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
2252 	enum artpec6_crypto_variant variant = ac->variant;
2253 	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2254 	u32 oper;
2255 
2256 	memset(req_ctx, 0, sizeof(*req_ctx));
2257 
2258 	req_ctx->hash_flags = HASH_FLAG_INIT_CTX;
2259 	if (hmac)
2260 		req_ctx->hash_flags |= (HASH_FLAG_HMAC | HASH_FLAG_UPDATE_KEY);
2261 
2262 	switch (type) {
2263 	case ARTPEC6_CRYPTO_HASH_SHA1:
2264 		oper = hmac ? regk_crypto_hmac_sha1 : regk_crypto_sha1;
2265 		break;
2266 	case ARTPEC6_CRYPTO_HASH_SHA256:
2267 		oper = hmac ? regk_crypto_hmac_sha256 : regk_crypto_sha256;
2268 		break;
2269 	case ARTPEC6_CRYPTO_HASH_SHA384:
2270 		oper = hmac ? regk_crypto_hmac_sha384 : regk_crypto_sha384;
2271 		break;
2272 	case ARTPEC6_CRYPTO_HASH_SHA512:
2273 		oper = hmac ? regk_crypto_hmac_sha512 : regk_crypto_sha512;
2274 		break;
2275 
2276 	default:
2277 		pr_err("%s: Unsupported hash type 0x%x\n", MODULE_NAME, type);
2278 		return -EINVAL;
2279 	}
2280 
2281 	if (variant == ARTPEC6_CRYPTO)
2282 		req_ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, oper);
2283 	else
2284 		req_ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, oper);
2285 
2286 	return 0;
2287 }
2288 
2289 static int artpec6_crypto_prepare_submit_hash(struct ahash_request *req)
2290 {
2291 	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2292 	int ret;
2293 
2294 	if (!req_ctx->common.dma) {
2295 		ret = artpec6_crypto_common_init(&req_ctx->common,
2296 					  &req->base,
2297 					  artpec6_crypto_complete_hash,
2298 					  NULL, 0);
2299 
2300 		if (ret)
2301 			return ret;
2302 	}
2303 
2304 	ret = artpec6_crypto_prepare_hash(req);
2305 	switch (ret) {
2306 	case ARTPEC6_CRYPTO_PREPARE_HASH_START:
2307 		ret = artpec6_crypto_submit(&req_ctx->common);
2308 		break;
2309 
2310 	case ARTPEC6_CRYPTO_PREPARE_HASH_NO_START:
2311 		ret = 0;
2312 		/* Fallthrough */
2313 
2314 	default:
2315 		artpec6_crypto_common_destroy(&req_ctx->common);
2316 		break;
2317 	}
2318 
2319 	return ret;
2320 }
2321 
2322 static int artpec6_crypto_hash_final(struct ahash_request *req)
2323 {
2324 	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2325 
2326 	req_ctx->hash_flags |= HASH_FLAG_FINALIZE;
2327 
2328 	return artpec6_crypto_prepare_submit_hash(req);
2329 }
2330 
2331 static int artpec6_crypto_hash_update(struct ahash_request *req)
2332 {
2333 	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2334 
2335 	req_ctx->hash_flags |= HASH_FLAG_UPDATE;
2336 
2337 	return artpec6_crypto_prepare_submit_hash(req);
2338 }
2339 
2340 static int artpec6_crypto_sha1_init(struct ahash_request *req)
2341 {
2342 	return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0);
2343 }
2344 
2345 static int artpec6_crypto_sha1_digest(struct ahash_request *req)
2346 {
2347 	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2348 
2349 	artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0);
2350 
2351 	req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
2352 
2353 	return artpec6_crypto_prepare_submit_hash(req);
2354 }
2355 
2356 static int artpec6_crypto_sha256_init(struct ahash_request *req)
2357 {
2358 	return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0);
2359 }
2360 
2361 static int artpec6_crypto_sha256_digest(struct ahash_request *req)
2362 {
2363 	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2364 
2365 	artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0);
2366 	req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
2367 
2368 	return artpec6_crypto_prepare_submit_hash(req);
2369 }
2370 
2371 static int __maybe_unused artpec6_crypto_sha384_init(struct ahash_request *req)
2372 {
2373 	return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 0);
2374 }
2375 
2376 static int __maybe_unused
2377 artpec6_crypto_sha384_digest(struct ahash_request *req)
2378 {
2379 	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2380 
2381 	artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 0);
2382 	req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
2383 
2384 	return artpec6_crypto_prepare_submit_hash(req);
2385 }
2386 
2387 static int artpec6_crypto_sha512_init(struct ahash_request *req)
2388 {
2389 	return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 0);
2390 }
2391 
2392 static int artpec6_crypto_sha512_digest(struct ahash_request *req)
2393 {
2394 	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2395 
2396 	artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 0);
2397 	req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
2398 
2399 	return artpec6_crypto_prepare_submit_hash(req);
2400 }
2401 
2402 static int artpec6_crypto_hmac_sha256_init(struct ahash_request *req)
2403 {
2404 	return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1);
2405 }
2406 
2407 static int __maybe_unused
2408 artpec6_crypto_hmac_sha384_init(struct ahash_request *req)
2409 {
2410 	return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 1);
2411 }
2412 
2413 static int artpec6_crypto_hmac_sha512_init(struct ahash_request *req)
2414 {
2415 	return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 1);
2416 }
2417 
2418 static int artpec6_crypto_hmac_sha256_digest(struct ahash_request *req)
2419 {
2420 	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2421 
2422 	artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1);
2423 	req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
2424 
2425 	return artpec6_crypto_prepare_submit_hash(req);
2426 }
2427 
2428 static int __maybe_unused
2429 artpec6_crypto_hmac_sha384_digest(struct ahash_request *req)
2430 {
2431 	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2432 
2433 	artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 1);
2434 	req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
2435 
2436 	return artpec6_crypto_prepare_submit_hash(req);
2437 }
2438 
2439 static int artpec6_crypto_hmac_sha512_digest(struct ahash_request *req)
2440 {
2441 	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2442 
2443 	artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 1);
2444 	req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
2445 
2446 	return artpec6_crypto_prepare_submit_hash(req);
2447 }
2448 
2449 static int artpec6_crypto_ahash_init_common(struct crypto_tfm *tfm,
2450 				    const char *base_hash_name)
2451 {
2452 	struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm);
2453 
2454 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2455 				 sizeof(struct artpec6_hash_request_context));
2456 	memset(tfm_ctx, 0, sizeof(*tfm_ctx));
2457 
2458 	if (base_hash_name) {
2459 		struct crypto_shash *child;
2460 
2461 		child = crypto_alloc_shash(base_hash_name, 0,
2462 					   CRYPTO_ALG_NEED_FALLBACK);
2463 
2464 		if (IS_ERR(child))
2465 			return PTR_ERR(child);
2466 
2467 		tfm_ctx->child_hash = child;
2468 	}
2469 
2470 	return 0;
2471 }
2472 
2473 static int artpec6_crypto_ahash_init(struct crypto_tfm *tfm)
2474 {
2475 	return artpec6_crypto_ahash_init_common(tfm, NULL);
2476 }
2477 
2478 static int artpec6_crypto_ahash_init_hmac_sha256(struct crypto_tfm *tfm)
2479 {
2480 	return artpec6_crypto_ahash_init_common(tfm, "sha256");
2481 }
2482 
2483 static int __maybe_unused
2484 artpec6_crypto_ahash_init_hmac_sha384(struct crypto_tfm *tfm)
2485 {
2486 	return artpec6_crypto_ahash_init_common(tfm, "sha384");
2487 }
2488 
2489 static int artpec6_crypto_ahash_init_hmac_sha512(struct crypto_tfm *tfm)
2490 {
2491 	return artpec6_crypto_ahash_init_common(tfm, "sha512");
2492 }
2493 
2494 static void artpec6_crypto_ahash_exit(struct crypto_tfm *tfm)
2495 {
2496 	struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm);
2497 
2498 	if (tfm_ctx->child_hash)
2499 		crypto_free_shash(tfm_ctx->child_hash);
2500 
2501 	memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key));
2502 	tfm_ctx->hmac_key_length = 0;
2503 }
2504 
2505 static int artpec6_crypto_hash_export(struct ahash_request *req, void *out)
2506 {
2507 	const struct artpec6_hash_request_context *ctx = ahash_request_ctx(req);
2508 	struct artpec6_hash_export_state *state = out;
2509 	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
2510 	enum artpec6_crypto_variant variant = ac->variant;
2511 
2512 	BUILD_BUG_ON(sizeof(state->partial_buffer) !=
2513 		     sizeof(ctx->partial_buffer));
2514 	BUILD_BUG_ON(sizeof(state->digeststate) != sizeof(ctx->digeststate));
2515 
2516 	state->digcnt = ctx->digcnt;
2517 	state->partial_bytes = ctx->partial_bytes;
2518 	state->hash_flags = ctx->hash_flags;
2519 
2520 	if (variant == ARTPEC6_CRYPTO)
2521 		state->oper = FIELD_GET(A6_CRY_MD_OPER, ctx->hash_md);
2522 	else
2523 		state->oper = FIELD_GET(A7_CRY_MD_OPER, ctx->hash_md);
2524 
2525 	memcpy(state->partial_buffer, ctx->partial_buffer,
2526 	       sizeof(state->partial_buffer));
2527 	memcpy(state->digeststate, ctx->digeststate,
2528 	       sizeof(state->digeststate));
2529 
2530 	return 0;
2531 }
2532 
2533 static int artpec6_crypto_hash_import(struct ahash_request *req, const void *in)
2534 {
2535 	struct artpec6_hash_request_context *ctx = ahash_request_ctx(req);
2536 	const struct artpec6_hash_export_state *state = in;
2537 	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
2538 	enum artpec6_crypto_variant variant = ac->variant;
2539 
2540 	memset(ctx, 0, sizeof(*ctx));
2541 
2542 	ctx->digcnt = state->digcnt;
2543 	ctx->partial_bytes = state->partial_bytes;
2544 	ctx->hash_flags = state->hash_flags;
2545 
2546 	if (variant == ARTPEC6_CRYPTO)
2547 		ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, state->oper);
2548 	else
2549 		ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, state->oper);
2550 
2551 	memcpy(ctx->partial_buffer, state->partial_buffer,
2552 	       sizeof(state->partial_buffer));
2553 	memcpy(ctx->digeststate, state->digeststate,
2554 	       sizeof(state->digeststate));
2555 
2556 	return 0;
2557 }
2558 
2559 static int init_crypto_hw(struct artpec6_crypto *ac)
2560 {
2561 	enum artpec6_crypto_variant variant = ac->variant;
2562 	void __iomem *base = ac->base;
2563 	u32 out_descr_buf_size;
2564 	u32 out_data_buf_size;
2565 	u32 in_data_buf_size;
2566 	u32 in_descr_buf_size;
2567 	u32 in_stat_buf_size;
2568 	u32 in, out;
2569 
2570 	/*
2571 	 * The PDMA unit contains 1984 bytes of internal memory for the OUT
2572 	 * channels and 1024 bytes for the IN channel. This is an elastic
2573 	 * memory used to internally store the descriptors and data. The values
2574 	 * ares specified in 64 byte incremements.  Trustzone buffers are not
2575 	 * used at this stage.
2576 	 */
2577 	out_data_buf_size = 16;  /* 1024 bytes for data */
2578 	out_descr_buf_size = 15; /* 960 bytes for descriptors */
2579 	in_data_buf_size = 8;    /* 512 bytes for data */
2580 	in_descr_buf_size = 4;   /* 256 bytes for descriptors */
2581 	in_stat_buf_size = 4;   /* 256 bytes for stat descrs */
2582 
2583 	BUILD_BUG_ON_MSG((out_data_buf_size
2584 				+ out_descr_buf_size) * 64 > 1984,
2585 			  "Invalid OUT configuration");
2586 
2587 	BUILD_BUG_ON_MSG((in_data_buf_size
2588 				+ in_descr_buf_size
2589 				+ in_stat_buf_size) * 64 > 1024,
2590 			  "Invalid IN configuration");
2591 
2592 	in = FIELD_PREP(PDMA_IN_BUF_CFG_DATA_BUF_SIZE, in_data_buf_size) |
2593 	     FIELD_PREP(PDMA_IN_BUF_CFG_DESCR_BUF_SIZE, in_descr_buf_size) |
2594 	     FIELD_PREP(PDMA_IN_BUF_CFG_STAT_BUF_SIZE, in_stat_buf_size);
2595 
2596 	out = FIELD_PREP(PDMA_OUT_BUF_CFG_DATA_BUF_SIZE, out_data_buf_size) |
2597 	      FIELD_PREP(PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE, out_descr_buf_size);
2598 
2599 	writel_relaxed(out, base + PDMA_OUT_BUF_CFG);
2600 	writel_relaxed(PDMA_OUT_CFG_EN, base + PDMA_OUT_CFG);
2601 
2602 	if (variant == ARTPEC6_CRYPTO) {
2603 		writel_relaxed(in, base + A6_PDMA_IN_BUF_CFG);
2604 		writel_relaxed(PDMA_IN_CFG_EN, base + A6_PDMA_IN_CFG);
2605 		writel_relaxed(A6_PDMA_INTR_MASK_IN_DATA |
2606 			       A6_PDMA_INTR_MASK_IN_EOP_FLUSH,
2607 			       base + A6_PDMA_INTR_MASK);
2608 	} else {
2609 		writel_relaxed(in, base + A7_PDMA_IN_BUF_CFG);
2610 		writel_relaxed(PDMA_IN_CFG_EN, base + A7_PDMA_IN_CFG);
2611 		writel_relaxed(A7_PDMA_INTR_MASK_IN_DATA |
2612 			       A7_PDMA_INTR_MASK_IN_EOP_FLUSH,
2613 			       base + A7_PDMA_INTR_MASK);
2614 	}
2615 
2616 	return 0;
2617 }
2618 
2619 static void artpec6_crypto_disable_hw(struct artpec6_crypto *ac)
2620 {
2621 	enum artpec6_crypto_variant variant = ac->variant;
2622 	void __iomem *base = ac->base;
2623 
2624 	if (variant == ARTPEC6_CRYPTO) {
2625 		writel_relaxed(A6_PDMA_IN_CMD_STOP, base + A6_PDMA_IN_CMD);
2626 		writel_relaxed(0, base + A6_PDMA_IN_CFG);
2627 		writel_relaxed(A6_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD);
2628 	} else {
2629 		writel_relaxed(A7_PDMA_IN_CMD_STOP, base + A7_PDMA_IN_CMD);
2630 		writel_relaxed(0, base + A7_PDMA_IN_CFG);
2631 		writel_relaxed(A7_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD);
2632 	}
2633 
2634 	writel_relaxed(0, base + PDMA_OUT_CFG);
2635 
2636 }
2637 
2638 static irqreturn_t artpec6_crypto_irq(int irq, void *dev_id)
2639 {
2640 	struct artpec6_crypto *ac = dev_id;
2641 	enum artpec6_crypto_variant variant = ac->variant;
2642 	void __iomem *base = ac->base;
2643 	u32 mask_in_data, mask_in_eop_flush;
2644 	u32 in_cmd_flush_stat, in_cmd_reg;
2645 	u32 ack_intr_reg;
2646 	u32 ack = 0;
2647 	u32 intr;
2648 
2649 	if (variant == ARTPEC6_CRYPTO) {
2650 		intr = readl_relaxed(base + A6_PDMA_MASKED_INTR);
2651 		mask_in_data = A6_PDMA_INTR_MASK_IN_DATA;
2652 		mask_in_eop_flush = A6_PDMA_INTR_MASK_IN_EOP_FLUSH;
2653 		in_cmd_flush_stat = A6_PDMA_IN_CMD_FLUSH_STAT;
2654 		in_cmd_reg = A6_PDMA_IN_CMD;
2655 		ack_intr_reg = A6_PDMA_ACK_INTR;
2656 	} else {
2657 		intr = readl_relaxed(base + A7_PDMA_MASKED_INTR);
2658 		mask_in_data = A7_PDMA_INTR_MASK_IN_DATA;
2659 		mask_in_eop_flush = A7_PDMA_INTR_MASK_IN_EOP_FLUSH;
2660 		in_cmd_flush_stat = A7_PDMA_IN_CMD_FLUSH_STAT;
2661 		in_cmd_reg = A7_PDMA_IN_CMD;
2662 		ack_intr_reg = A7_PDMA_ACK_INTR;
2663 	}
2664 
2665 	/* We get two interrupt notifications from each job.
2666 	 * The in_data means all data was sent to memory and then
2667 	 * we request a status flush command to write the per-job
2668 	 * status to its status vector. This ensures that the
2669 	 * tasklet can detect exactly how many submitted jobs
2670 	 * that have finished.
2671 	 */
2672 	if (intr & mask_in_data)
2673 		ack |= mask_in_data;
2674 
2675 	if (intr & mask_in_eop_flush)
2676 		ack |= mask_in_eop_flush;
2677 	else
2678 		writel_relaxed(in_cmd_flush_stat, base + in_cmd_reg);
2679 
2680 	writel_relaxed(ack, base + ack_intr_reg);
2681 
2682 	if (intr & mask_in_eop_flush)
2683 		tasklet_schedule(&ac->task);
2684 
2685 	return IRQ_HANDLED;
2686 }
2687 
2688 /*------------------- Algorithm definitions ----------------------------------*/
2689 
2690 /* Hashes */
2691 static struct ahash_alg hash_algos[] = {
2692 	/* SHA-1 */
2693 	{
2694 		.init = artpec6_crypto_sha1_init,
2695 		.update = artpec6_crypto_hash_update,
2696 		.final = artpec6_crypto_hash_final,
2697 		.digest = artpec6_crypto_sha1_digest,
2698 		.import = artpec6_crypto_hash_import,
2699 		.export = artpec6_crypto_hash_export,
2700 		.halg.digestsize = SHA1_DIGEST_SIZE,
2701 		.halg.statesize = sizeof(struct artpec6_hash_export_state),
2702 		.halg.base = {
2703 			.cra_name = "sha1",
2704 			.cra_driver_name = "artpec-sha1",
2705 			.cra_priority = 300,
2706 			.cra_flags = CRYPTO_ALG_ASYNC,
2707 			.cra_blocksize = SHA1_BLOCK_SIZE,
2708 			.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
2709 			.cra_alignmask = 3,
2710 			.cra_module = THIS_MODULE,
2711 			.cra_init = artpec6_crypto_ahash_init,
2712 			.cra_exit = artpec6_crypto_ahash_exit,
2713 		}
2714 	},
2715 	/* SHA-256 */
2716 	{
2717 		.init = artpec6_crypto_sha256_init,
2718 		.update = artpec6_crypto_hash_update,
2719 		.final = artpec6_crypto_hash_final,
2720 		.digest = artpec6_crypto_sha256_digest,
2721 		.import = artpec6_crypto_hash_import,
2722 		.export = artpec6_crypto_hash_export,
2723 		.halg.digestsize = SHA256_DIGEST_SIZE,
2724 		.halg.statesize = sizeof(struct artpec6_hash_export_state),
2725 		.halg.base = {
2726 			.cra_name = "sha256",
2727 			.cra_driver_name = "artpec-sha256",
2728 			.cra_priority = 300,
2729 			.cra_flags = CRYPTO_ALG_ASYNC,
2730 			.cra_blocksize = SHA256_BLOCK_SIZE,
2731 			.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
2732 			.cra_alignmask = 3,
2733 			.cra_module = THIS_MODULE,
2734 			.cra_init = artpec6_crypto_ahash_init,
2735 			.cra_exit = artpec6_crypto_ahash_exit,
2736 		}
2737 	},
2738 	/* HMAC SHA-256 */
2739 	{
2740 		.init = artpec6_crypto_hmac_sha256_init,
2741 		.update = artpec6_crypto_hash_update,
2742 		.final = artpec6_crypto_hash_final,
2743 		.digest = artpec6_crypto_hmac_sha256_digest,
2744 		.import = artpec6_crypto_hash_import,
2745 		.export = artpec6_crypto_hash_export,
2746 		.setkey = artpec6_crypto_hash_set_key,
2747 		.halg.digestsize = SHA256_DIGEST_SIZE,
2748 		.halg.statesize = sizeof(struct artpec6_hash_export_state),
2749 		.halg.base = {
2750 			.cra_name = "hmac(sha256)",
2751 			.cra_driver_name = "artpec-hmac-sha256",
2752 			.cra_priority = 300,
2753 			.cra_flags = CRYPTO_ALG_ASYNC,
2754 			.cra_blocksize = SHA256_BLOCK_SIZE,
2755 			.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
2756 			.cra_alignmask = 3,
2757 			.cra_module = THIS_MODULE,
2758 			.cra_init = artpec6_crypto_ahash_init_hmac_sha256,
2759 			.cra_exit = artpec6_crypto_ahash_exit,
2760 		}
2761 	},
2762 };
2763 
2764 static struct ahash_alg artpec7_hash_algos[] = {
2765 	/* SHA-384 */
2766 	{
2767 		.init = artpec6_crypto_sha384_init,
2768 		.update = artpec6_crypto_hash_update,
2769 		.final = artpec6_crypto_hash_final,
2770 		.digest = artpec6_crypto_sha384_digest,
2771 		.import = artpec6_crypto_hash_import,
2772 		.export = artpec6_crypto_hash_export,
2773 		.halg.digestsize = SHA384_DIGEST_SIZE,
2774 		.halg.statesize = sizeof(struct artpec6_hash_export_state),
2775 		.halg.base = {
2776 			.cra_name = "sha384",
2777 			.cra_driver_name = "artpec-sha384",
2778 			.cra_priority = 300,
2779 			.cra_flags = CRYPTO_ALG_ASYNC,
2780 			.cra_blocksize = SHA384_BLOCK_SIZE,
2781 			.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
2782 			.cra_alignmask = 3,
2783 			.cra_module = THIS_MODULE,
2784 			.cra_init = artpec6_crypto_ahash_init,
2785 			.cra_exit = artpec6_crypto_ahash_exit,
2786 		}
2787 	},
2788 	/* HMAC SHA-384 */
2789 	{
2790 		.init = artpec6_crypto_hmac_sha384_init,
2791 		.update = artpec6_crypto_hash_update,
2792 		.final = artpec6_crypto_hash_final,
2793 		.digest = artpec6_crypto_hmac_sha384_digest,
2794 		.import = artpec6_crypto_hash_import,
2795 		.export = artpec6_crypto_hash_export,
2796 		.setkey = artpec6_crypto_hash_set_key,
2797 		.halg.digestsize = SHA384_DIGEST_SIZE,
2798 		.halg.statesize = sizeof(struct artpec6_hash_export_state),
2799 		.halg.base = {
2800 			.cra_name = "hmac(sha384)",
2801 			.cra_driver_name = "artpec-hmac-sha384",
2802 			.cra_priority = 300,
2803 			.cra_flags = CRYPTO_ALG_ASYNC,
2804 			.cra_blocksize = SHA384_BLOCK_SIZE,
2805 			.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
2806 			.cra_alignmask = 3,
2807 			.cra_module = THIS_MODULE,
2808 			.cra_init = artpec6_crypto_ahash_init_hmac_sha384,
2809 			.cra_exit = artpec6_crypto_ahash_exit,
2810 		}
2811 	},
2812 	/* SHA-512 */
2813 	{
2814 		.init = artpec6_crypto_sha512_init,
2815 		.update = artpec6_crypto_hash_update,
2816 		.final = artpec6_crypto_hash_final,
2817 		.digest = artpec6_crypto_sha512_digest,
2818 		.import = artpec6_crypto_hash_import,
2819 		.export = artpec6_crypto_hash_export,
2820 		.halg.digestsize = SHA512_DIGEST_SIZE,
2821 		.halg.statesize = sizeof(struct artpec6_hash_export_state),
2822 		.halg.base = {
2823 			.cra_name = "sha512",
2824 			.cra_driver_name = "artpec-sha512",
2825 			.cra_priority = 300,
2826 			.cra_flags = CRYPTO_ALG_ASYNC,
2827 			.cra_blocksize = SHA512_BLOCK_SIZE,
2828 			.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
2829 			.cra_alignmask = 3,
2830 			.cra_module = THIS_MODULE,
2831 			.cra_init = artpec6_crypto_ahash_init,
2832 			.cra_exit = artpec6_crypto_ahash_exit,
2833 		}
2834 	},
2835 	/* HMAC SHA-512 */
2836 	{
2837 		.init = artpec6_crypto_hmac_sha512_init,
2838 		.update = artpec6_crypto_hash_update,
2839 		.final = artpec6_crypto_hash_final,
2840 		.digest = artpec6_crypto_hmac_sha512_digest,
2841 		.import = artpec6_crypto_hash_import,
2842 		.export = artpec6_crypto_hash_export,
2843 		.setkey = artpec6_crypto_hash_set_key,
2844 		.halg.digestsize = SHA512_DIGEST_SIZE,
2845 		.halg.statesize = sizeof(struct artpec6_hash_export_state),
2846 		.halg.base = {
2847 			.cra_name = "hmac(sha512)",
2848 			.cra_driver_name = "artpec-hmac-sha512",
2849 			.cra_priority = 300,
2850 			.cra_flags = CRYPTO_ALG_ASYNC,
2851 			.cra_blocksize = SHA512_BLOCK_SIZE,
2852 			.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
2853 			.cra_alignmask = 3,
2854 			.cra_module = THIS_MODULE,
2855 			.cra_init = artpec6_crypto_ahash_init_hmac_sha512,
2856 			.cra_exit = artpec6_crypto_ahash_exit,
2857 		}
2858 	},
2859 };
2860 
2861 /* Crypto */
2862 static struct skcipher_alg crypto_algos[] = {
2863 	/* AES - ECB */
2864 	{
2865 		.base = {
2866 			.cra_name = "ecb(aes)",
2867 			.cra_driver_name = "artpec6-ecb-aes",
2868 			.cra_priority = 300,
2869 			.cra_flags = CRYPTO_ALG_ASYNC,
2870 			.cra_blocksize = AES_BLOCK_SIZE,
2871 			.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2872 			.cra_alignmask = 3,
2873 			.cra_module = THIS_MODULE,
2874 		},
2875 		.min_keysize = AES_MIN_KEY_SIZE,
2876 		.max_keysize = AES_MAX_KEY_SIZE,
2877 		.setkey = artpec6_crypto_cipher_set_key,
2878 		.encrypt = artpec6_crypto_encrypt,
2879 		.decrypt = artpec6_crypto_decrypt,
2880 		.init = artpec6_crypto_aes_ecb_init,
2881 		.exit = artpec6_crypto_aes_exit,
2882 	},
2883 	/* AES - CTR */
2884 	{
2885 		.base = {
2886 			.cra_name = "ctr(aes)",
2887 			.cra_driver_name = "artpec6-ctr-aes",
2888 			.cra_priority = 300,
2889 			.cra_flags = CRYPTO_ALG_ASYNC |
2890 				     CRYPTO_ALG_NEED_FALLBACK,
2891 			.cra_blocksize = 1,
2892 			.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2893 			.cra_alignmask = 3,
2894 			.cra_module = THIS_MODULE,
2895 		},
2896 		.min_keysize = AES_MIN_KEY_SIZE,
2897 		.max_keysize = AES_MAX_KEY_SIZE,
2898 		.ivsize = AES_BLOCK_SIZE,
2899 		.setkey = artpec6_crypto_cipher_set_key,
2900 		.encrypt = artpec6_crypto_ctr_encrypt,
2901 		.decrypt = artpec6_crypto_ctr_decrypt,
2902 		.init = artpec6_crypto_aes_ctr_init,
2903 		.exit = artpec6_crypto_aes_ctr_exit,
2904 	},
2905 	/* AES - CBC */
2906 	{
2907 		.base = {
2908 			.cra_name = "cbc(aes)",
2909 			.cra_driver_name = "artpec6-cbc-aes",
2910 			.cra_priority = 300,
2911 			.cra_flags = CRYPTO_ALG_ASYNC,
2912 			.cra_blocksize = AES_BLOCK_SIZE,
2913 			.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2914 			.cra_alignmask = 3,
2915 			.cra_module = THIS_MODULE,
2916 		},
2917 		.min_keysize = AES_MIN_KEY_SIZE,
2918 		.max_keysize = AES_MAX_KEY_SIZE,
2919 		.ivsize = AES_BLOCK_SIZE,
2920 		.setkey = artpec6_crypto_cipher_set_key,
2921 		.encrypt = artpec6_crypto_encrypt,
2922 		.decrypt = artpec6_crypto_decrypt,
2923 		.init = artpec6_crypto_aes_cbc_init,
2924 		.exit = artpec6_crypto_aes_exit
2925 	},
2926 	/* AES - XTS */
2927 	{
2928 		.base = {
2929 			.cra_name = "xts(aes)",
2930 			.cra_driver_name = "artpec6-xts-aes",
2931 			.cra_priority = 300,
2932 			.cra_flags = CRYPTO_ALG_ASYNC,
2933 			.cra_blocksize = 1,
2934 			.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2935 			.cra_alignmask = 3,
2936 			.cra_module = THIS_MODULE,
2937 		},
2938 		.min_keysize = 2*AES_MIN_KEY_SIZE,
2939 		.max_keysize = 2*AES_MAX_KEY_SIZE,
2940 		.ivsize = 16,
2941 		.setkey = artpec6_crypto_xts_set_key,
2942 		.encrypt = artpec6_crypto_encrypt,
2943 		.decrypt = artpec6_crypto_decrypt,
2944 		.init = artpec6_crypto_aes_xts_init,
2945 		.exit = artpec6_crypto_aes_exit,
2946 	},
2947 };
2948 
2949 static struct aead_alg aead_algos[] = {
2950 	{
2951 		.init   = artpec6_crypto_aead_init,
2952 		.setkey = artpec6_crypto_aead_set_key,
2953 		.encrypt = artpec6_crypto_aead_encrypt,
2954 		.decrypt = artpec6_crypto_aead_decrypt,
2955 		.ivsize = GCM_AES_IV_SIZE,
2956 		.maxauthsize = AES_BLOCK_SIZE,
2957 
2958 		.base = {
2959 			.cra_name = "gcm(aes)",
2960 			.cra_driver_name = "artpec-gcm-aes",
2961 			.cra_priority = 300,
2962 			.cra_flags = CRYPTO_ALG_ASYNC |
2963 				     CRYPTO_ALG_KERN_DRIVER_ONLY,
2964 			.cra_blocksize = 1,
2965 			.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2966 			.cra_alignmask = 3,
2967 			.cra_module = THIS_MODULE,
2968 		},
2969 	}
2970 };
2971 
2972 #ifdef CONFIG_DEBUG_FS
2973 
2974 struct dbgfs_u32 {
2975 	char *name;
2976 	mode_t mode;
2977 	u32 *flag;
2978 	char *desc;
2979 };
2980 
2981 static struct dentry *dbgfs_root;
2982 
2983 static void artpec6_crypto_init_debugfs(void)
2984 {
2985 	dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL);
2986 
2987 	if (!dbgfs_root || IS_ERR(dbgfs_root)) {
2988 		dbgfs_root = NULL;
2989 		pr_err("%s: Could not initialise debugfs!\n", MODULE_NAME);
2990 		return;
2991 	}
2992 
2993 #ifdef CONFIG_FAULT_INJECTION
2994 	fault_create_debugfs_attr("fail_status_read", dbgfs_root,
2995 				  &artpec6_crypto_fail_status_read);
2996 
2997 	fault_create_debugfs_attr("fail_dma_array_full", dbgfs_root,
2998 				  &artpec6_crypto_fail_dma_array_full);
2999 #endif
3000 }
3001 
3002 static void artpec6_crypto_free_debugfs(void)
3003 {
3004 	if (!dbgfs_root)
3005 		return;
3006 
3007 	debugfs_remove_recursive(dbgfs_root);
3008 	dbgfs_root = NULL;
3009 }
3010 #endif
3011 
3012 static const struct of_device_id artpec6_crypto_of_match[] = {
3013 	{ .compatible = "axis,artpec6-crypto", .data = (void *)ARTPEC6_CRYPTO },
3014 	{ .compatible = "axis,artpec7-crypto", .data = (void *)ARTPEC7_CRYPTO },
3015 	{}
3016 };
3017 MODULE_DEVICE_TABLE(of, artpec6_crypto_of_match);
3018 
3019 static int artpec6_crypto_probe(struct platform_device *pdev)
3020 {
3021 	const struct of_device_id *match;
3022 	enum artpec6_crypto_variant variant;
3023 	struct artpec6_crypto *ac;
3024 	struct device *dev = &pdev->dev;
3025 	void __iomem *base;
3026 	struct resource *res;
3027 	int irq;
3028 	int err;
3029 
3030 	if (artpec6_crypto_dev)
3031 		return -ENODEV;
3032 
3033 	match = of_match_node(artpec6_crypto_of_match, dev->of_node);
3034 	if (!match)
3035 		return -EINVAL;
3036 
3037 	variant = (enum artpec6_crypto_variant)match->data;
3038 
3039 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3040 	base = devm_ioremap_resource(&pdev->dev, res);
3041 	if (IS_ERR(base))
3042 		return PTR_ERR(base);
3043 
3044 	irq = platform_get_irq(pdev, 0);
3045 	if (irq < 0)
3046 		return -ENODEV;
3047 
3048 	ac = devm_kzalloc(&pdev->dev, sizeof(struct artpec6_crypto),
3049 			  GFP_KERNEL);
3050 	if (!ac)
3051 		return -ENOMEM;
3052 
3053 	platform_set_drvdata(pdev, ac);
3054 	ac->variant = variant;
3055 
3056 	spin_lock_init(&ac->queue_lock);
3057 	INIT_LIST_HEAD(&ac->queue);
3058 	INIT_LIST_HEAD(&ac->pending);
3059 	timer_setup(&ac->timer, artpec6_crypto_timeout, 0);
3060 
3061 	ac->base = base;
3062 
3063 	ac->dma_cache = kmem_cache_create("artpec6_crypto_dma",
3064 		sizeof(struct artpec6_crypto_dma_descriptors),
3065 		64,
3066 		0,
3067 		NULL);
3068 	if (!ac->dma_cache)
3069 		return -ENOMEM;
3070 
3071 #ifdef CONFIG_DEBUG_FS
3072 	artpec6_crypto_init_debugfs();
3073 #endif
3074 
3075 	tasklet_init(&ac->task, artpec6_crypto_task,
3076 		     (unsigned long)ac);
3077 
3078 	ac->pad_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX,
3079 				      GFP_KERNEL);
3080 	if (!ac->pad_buffer)
3081 		return -ENOMEM;
3082 	ac->pad_buffer = PTR_ALIGN(ac->pad_buffer, ARTPEC_CACHE_LINE_MAX);
3083 
3084 	ac->zero_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX,
3085 				      GFP_KERNEL);
3086 	if (!ac->zero_buffer)
3087 		return -ENOMEM;
3088 	ac->zero_buffer = PTR_ALIGN(ac->zero_buffer, ARTPEC_CACHE_LINE_MAX);
3089 
3090 	err = init_crypto_hw(ac);
3091 	if (err)
3092 		goto free_cache;
3093 
3094 	err = devm_request_irq(&pdev->dev, irq, artpec6_crypto_irq, 0,
3095 			       "artpec6-crypto", ac);
3096 	if (err)
3097 		goto disable_hw;
3098 
3099 	artpec6_crypto_dev = &pdev->dev;
3100 
3101 	err = crypto_register_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
3102 	if (err) {
3103 		dev_err(dev, "Failed to register ahashes\n");
3104 		goto disable_hw;
3105 	}
3106 
3107 	if (variant != ARTPEC6_CRYPTO) {
3108 		err = crypto_register_ahashes(artpec7_hash_algos,
3109 					      ARRAY_SIZE(artpec7_hash_algos));
3110 		if (err) {
3111 			dev_err(dev, "Failed to register ahashes\n");
3112 			goto unregister_ahashes;
3113 		}
3114 	}
3115 
3116 	err = crypto_register_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
3117 	if (err) {
3118 		dev_err(dev, "Failed to register ciphers\n");
3119 		goto unregister_a7_ahashes;
3120 	}
3121 
3122 	err = crypto_register_aeads(aead_algos, ARRAY_SIZE(aead_algos));
3123 	if (err) {
3124 		dev_err(dev, "Failed to register aeads\n");
3125 		goto unregister_algs;
3126 	}
3127 
3128 	return 0;
3129 
3130 unregister_algs:
3131 	crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
3132 unregister_a7_ahashes:
3133 	if (variant != ARTPEC6_CRYPTO)
3134 		crypto_unregister_ahashes(artpec7_hash_algos,
3135 					  ARRAY_SIZE(artpec7_hash_algos));
3136 unregister_ahashes:
3137 	crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
3138 disable_hw:
3139 	artpec6_crypto_disable_hw(ac);
3140 free_cache:
3141 	kmem_cache_destroy(ac->dma_cache);
3142 	return err;
3143 }
3144 
3145 static int artpec6_crypto_remove(struct platform_device *pdev)
3146 {
3147 	struct artpec6_crypto *ac = platform_get_drvdata(pdev);
3148 	int irq = platform_get_irq(pdev, 0);
3149 
3150 	crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
3151 	if (ac->variant != ARTPEC6_CRYPTO)
3152 		crypto_unregister_ahashes(artpec7_hash_algos,
3153 					  ARRAY_SIZE(artpec7_hash_algos));
3154 	crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
3155 	crypto_unregister_aeads(aead_algos, ARRAY_SIZE(aead_algos));
3156 
3157 	tasklet_disable(&ac->task);
3158 	devm_free_irq(&pdev->dev, irq, ac);
3159 	tasklet_kill(&ac->task);
3160 	del_timer_sync(&ac->timer);
3161 
3162 	artpec6_crypto_disable_hw(ac);
3163 
3164 	kmem_cache_destroy(ac->dma_cache);
3165 #ifdef CONFIG_DEBUG_FS
3166 	artpec6_crypto_free_debugfs();
3167 #endif
3168 	return 0;
3169 }
3170 
3171 static struct platform_driver artpec6_crypto_driver = {
3172 	.probe   = artpec6_crypto_probe,
3173 	.remove  = artpec6_crypto_remove,
3174 	.driver  = {
3175 		.name  = "artpec6-crypto",
3176 		.of_match_table = artpec6_crypto_of_match,
3177 	},
3178 };
3179 
3180 module_platform_driver(artpec6_crypto_driver);
3181 
3182 MODULE_AUTHOR("Axis Communications AB");
3183 MODULE_DESCRIPTION("ARTPEC-6 Crypto driver");
3184 MODULE_LICENSE("GPL");
3185