1 /*
2  *   Driver for ARTPEC-6 crypto block using the kernel asynchronous crypto api.
3  *
4  *    Copyright (C) 2014-2017  Axis Communications AB
5  */
6 #define pr_fmt(fmt)     KBUILD_MODNAME ": " fmt
7 
8 #include <linux/bitfield.h>
9 #include <linux/crypto.h>
10 #include <linux/debugfs.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/fault-inject.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/kernel.h>
17 #include <linux/list.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/platform_device.h>
21 #include <linux/scatterlist.h>
22 #include <linux/slab.h>
23 
24 #include <crypto/aes.h>
25 #include <crypto/gcm.h>
26 #include <crypto/internal/aead.h>
27 #include <crypto/internal/hash.h>
28 #include <crypto/internal/skcipher.h>
29 #include <crypto/scatterwalk.h>
30 #include <crypto/sha.h>
31 #include <crypto/xts.h>
32 
33 /* Max length of a line in all cache levels for Artpec SoCs. */
34 #define ARTPEC_CACHE_LINE_MAX	32
35 
36 #define PDMA_OUT_CFG		0x0000
37 #define PDMA_OUT_BUF_CFG	0x0004
38 #define PDMA_OUT_CMD		0x0008
39 #define PDMA_OUT_DESCRQ_PUSH	0x0010
40 #define PDMA_OUT_DESCRQ_STAT	0x0014
41 
42 #define A6_PDMA_IN_CFG		0x0028
43 #define A6_PDMA_IN_BUF_CFG	0x002c
44 #define A6_PDMA_IN_CMD		0x0030
45 #define A6_PDMA_IN_STATQ_PUSH	0x0038
46 #define A6_PDMA_IN_DESCRQ_PUSH	0x0044
47 #define A6_PDMA_IN_DESCRQ_STAT	0x0048
48 #define A6_PDMA_INTR_MASK	0x0068
49 #define A6_PDMA_ACK_INTR	0x006c
50 #define A6_PDMA_MASKED_INTR	0x0074
51 
52 #define A7_PDMA_IN_CFG		0x002c
53 #define A7_PDMA_IN_BUF_CFG	0x0030
54 #define A7_PDMA_IN_CMD		0x0034
55 #define A7_PDMA_IN_STATQ_PUSH	0x003c
56 #define A7_PDMA_IN_DESCRQ_PUSH	0x0048
57 #define A7_PDMA_IN_DESCRQ_STAT	0x004C
58 #define A7_PDMA_INTR_MASK	0x006c
59 #define A7_PDMA_ACK_INTR	0x0070
60 #define A7_PDMA_MASKED_INTR	0x0078
61 
62 #define PDMA_OUT_CFG_EN				BIT(0)
63 
64 #define PDMA_OUT_BUF_CFG_DATA_BUF_SIZE		GENMASK(4, 0)
65 #define PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE		GENMASK(9, 5)
66 
67 #define PDMA_OUT_CMD_START			BIT(0)
68 #define A6_PDMA_OUT_CMD_STOP			BIT(3)
69 #define A7_PDMA_OUT_CMD_STOP			BIT(2)
70 
71 #define PDMA_OUT_DESCRQ_PUSH_LEN		GENMASK(5, 0)
72 #define PDMA_OUT_DESCRQ_PUSH_ADDR		GENMASK(31, 6)
73 
74 #define PDMA_OUT_DESCRQ_STAT_LEVEL		GENMASK(3, 0)
75 #define PDMA_OUT_DESCRQ_STAT_SIZE		GENMASK(7, 4)
76 
77 #define PDMA_IN_CFG_EN				BIT(0)
78 
79 #define PDMA_IN_BUF_CFG_DATA_BUF_SIZE		GENMASK(4, 0)
80 #define PDMA_IN_BUF_CFG_DESCR_BUF_SIZE		GENMASK(9, 5)
81 #define PDMA_IN_BUF_CFG_STAT_BUF_SIZE		GENMASK(14, 10)
82 
83 #define PDMA_IN_CMD_START			BIT(0)
84 #define A6_PDMA_IN_CMD_FLUSH_STAT		BIT(2)
85 #define A6_PDMA_IN_CMD_STOP			BIT(3)
86 #define A7_PDMA_IN_CMD_FLUSH_STAT		BIT(1)
87 #define A7_PDMA_IN_CMD_STOP			BIT(2)
88 
89 #define PDMA_IN_STATQ_PUSH_LEN			GENMASK(5, 0)
90 #define PDMA_IN_STATQ_PUSH_ADDR			GENMASK(31, 6)
91 
92 #define PDMA_IN_DESCRQ_PUSH_LEN			GENMASK(5, 0)
93 #define PDMA_IN_DESCRQ_PUSH_ADDR		GENMASK(31, 6)
94 
95 #define PDMA_IN_DESCRQ_STAT_LEVEL		GENMASK(3, 0)
96 #define PDMA_IN_DESCRQ_STAT_SIZE		GENMASK(7, 4)
97 
98 #define A6_PDMA_INTR_MASK_IN_DATA		BIT(2)
99 #define A6_PDMA_INTR_MASK_IN_EOP		BIT(3)
100 #define A6_PDMA_INTR_MASK_IN_EOP_FLUSH		BIT(4)
101 
102 #define A7_PDMA_INTR_MASK_IN_DATA		BIT(3)
103 #define A7_PDMA_INTR_MASK_IN_EOP		BIT(4)
104 #define A7_PDMA_INTR_MASK_IN_EOP_FLUSH		BIT(5)
105 
106 #define A6_CRY_MD_OPER		GENMASK(19, 16)
107 
108 #define A6_CRY_MD_HASH_SEL_CTX	GENMASK(21, 20)
109 #define A6_CRY_MD_HASH_HMAC_FIN	BIT(23)
110 
111 #define A6_CRY_MD_CIPHER_LEN	GENMASK(21, 20)
112 #define A6_CRY_MD_CIPHER_DECR	BIT(22)
113 #define A6_CRY_MD_CIPHER_TWEAK	BIT(23)
114 #define A6_CRY_MD_CIPHER_DSEQ	BIT(24)
115 
116 #define A7_CRY_MD_OPER		GENMASK(11, 8)
117 
118 #define A7_CRY_MD_HASH_SEL_CTX	GENMASK(13, 12)
119 #define A7_CRY_MD_HASH_HMAC_FIN	BIT(15)
120 
121 #define A7_CRY_MD_CIPHER_LEN	GENMASK(13, 12)
122 #define A7_CRY_MD_CIPHER_DECR	BIT(14)
123 #define A7_CRY_MD_CIPHER_TWEAK	BIT(15)
124 #define A7_CRY_MD_CIPHER_DSEQ	BIT(16)
125 
126 /* DMA metadata constants */
127 #define regk_crypto_aes_cbc     0x00000002
128 #define regk_crypto_aes_ctr     0x00000003
129 #define regk_crypto_aes_ecb     0x00000001
130 #define regk_crypto_aes_gcm     0x00000004
131 #define regk_crypto_aes_xts     0x00000005
132 #define regk_crypto_cache       0x00000002
133 #define a6_regk_crypto_dlkey    0x0000000a
134 #define a7_regk_crypto_dlkey    0x0000000e
135 #define regk_crypto_ext         0x00000001
136 #define regk_crypto_hmac_sha1   0x00000007
137 #define regk_crypto_hmac_sha256 0x00000009
138 #define regk_crypto_init        0x00000000
139 #define regk_crypto_key_128     0x00000000
140 #define regk_crypto_key_192     0x00000001
141 #define regk_crypto_key_256     0x00000002
142 #define regk_crypto_null        0x00000000
143 #define regk_crypto_sha1        0x00000006
144 #define regk_crypto_sha256      0x00000008
145 
146 /* DMA descriptor structures */
147 struct pdma_descr_ctrl  {
148 	unsigned char short_descr : 1;
149 	unsigned char pad1        : 1;
150 	unsigned char eop         : 1;
151 	unsigned char intr        : 1;
152 	unsigned char short_len   : 3;
153 	unsigned char pad2        : 1;
154 } __packed;
155 
156 struct pdma_data_descr {
157 	unsigned int len : 24;
158 	unsigned int buf : 32;
159 } __packed;
160 
161 struct pdma_short_descr {
162 	unsigned char data[7];
163 } __packed;
164 
165 struct pdma_descr {
166 	struct pdma_descr_ctrl ctrl;
167 	union {
168 		struct pdma_data_descr   data;
169 		struct pdma_short_descr  shrt;
170 	};
171 };
172 
173 struct pdma_stat_descr {
174 	unsigned char pad1        : 1;
175 	unsigned char pad2        : 1;
176 	unsigned char eop         : 1;
177 	unsigned char pad3        : 5;
178 	unsigned int  len         : 24;
179 };
180 
181 /* Each descriptor array can hold max 64 entries */
182 #define PDMA_DESCR_COUNT	64
183 
184 #define MODULE_NAME   "Artpec-6 CA"
185 
186 /* Hash modes (including HMAC variants) */
187 #define ARTPEC6_CRYPTO_HASH_SHA1	1
188 #define ARTPEC6_CRYPTO_HASH_SHA256	2
189 
190 /* Crypto modes */
191 #define ARTPEC6_CRYPTO_CIPHER_AES_ECB	1
192 #define ARTPEC6_CRYPTO_CIPHER_AES_CBC	2
193 #define ARTPEC6_CRYPTO_CIPHER_AES_CTR	3
194 #define ARTPEC6_CRYPTO_CIPHER_AES_XTS	5
195 
196 /* The PDMA is a DMA-engine tightly coupled with a ciphering engine.
197  * It operates on a descriptor array with up to 64 descriptor entries.
198  * The arrays must be 64 byte aligned in memory.
199  *
200  * The ciphering unit has no registers and is completely controlled by
201  * a 4-byte metadata that is inserted at the beginning of each dma packet.
202  *
203  * A dma packet is a sequence of descriptors terminated by setting the .eop
204  * field in the final descriptor of the packet.
205  *
206  * Multiple packets are used for providing context data, key data and
207  * the plain/ciphertext.
208  *
209  *   PDMA Descriptors (Array)
210  *  +------+------+------+~~+-------+------+----
211  *  |  0   |  1   |  2   |~~| 11 EOP|  12  |  ....
212  *  +--+---+--+---+----+-+~~+-------+----+-+----
213  *     |      |        |       |         |
214  *     |      |        |       |         |
215  *   __|__  +-------++-------++-------+ +----+
216  *  | MD  | |Payload||Payload||Payload| | MD |
217  *  +-----+ +-------++-------++-------+ +----+
218  */
219 
220 struct artpec6_crypto_bounce_buffer {
221 	struct list_head list;
222 	size_t length;
223 	struct scatterlist *sg;
224 	size_t offset;
225 	/* buf is aligned to ARTPEC_CACHE_LINE_MAX and
226 	 * holds up to ARTPEC_CACHE_LINE_MAX bytes data.
227 	 */
228 	void *buf;
229 };
230 
231 struct artpec6_crypto_dma_map {
232 	dma_addr_t dma_addr;
233 	size_t size;
234 	enum dma_data_direction dir;
235 };
236 
237 struct artpec6_crypto_dma_descriptors {
238 	struct pdma_descr out[PDMA_DESCR_COUNT] __aligned(64);
239 	struct pdma_descr in[PDMA_DESCR_COUNT] __aligned(64);
240 	u32 stat[PDMA_DESCR_COUNT] __aligned(64);
241 	struct list_head bounce_buffers;
242 	/* Enough maps for all out/in buffers, and all three descr. arrays */
243 	struct artpec6_crypto_dma_map maps[PDMA_DESCR_COUNT * 2 + 2];
244 	dma_addr_t out_dma_addr;
245 	dma_addr_t in_dma_addr;
246 	dma_addr_t stat_dma_addr;
247 	size_t out_cnt;
248 	size_t in_cnt;
249 	size_t map_count;
250 };
251 
252 enum artpec6_crypto_variant {
253 	ARTPEC6_CRYPTO,
254 	ARTPEC7_CRYPTO,
255 };
256 
257 struct artpec6_crypto {
258 	void __iomem *base;
259 	spinlock_t queue_lock;
260 	struct list_head queue; /* waiting for pdma fifo space */
261 	struct list_head pending; /* submitted to pdma fifo */
262 	struct tasklet_struct task;
263 	struct kmem_cache *dma_cache;
264 	int pending_count;
265 	struct timer_list timer;
266 	enum artpec6_crypto_variant variant;
267 	void *pad_buffer; /* cache-aligned block padding buffer */
268 	void *zero_buffer;
269 };
270 
271 enum artpec6_crypto_hash_flags {
272 	HASH_FLAG_INIT_CTX = 2,
273 	HASH_FLAG_UPDATE = 4,
274 	HASH_FLAG_FINALIZE = 8,
275 	HASH_FLAG_HMAC = 16,
276 	HASH_FLAG_UPDATE_KEY = 32,
277 };
278 
279 struct artpec6_crypto_req_common {
280 	struct list_head list;
281 	struct list_head complete_in_progress;
282 	struct artpec6_crypto_dma_descriptors *dma;
283 	struct crypto_async_request *req;
284 	void (*complete)(struct crypto_async_request *req);
285 	gfp_t gfp_flags;
286 };
287 
288 struct artpec6_hash_request_context {
289 	char partial_buffer[SHA256_BLOCK_SIZE];
290 	char partial_buffer_out[SHA256_BLOCK_SIZE];
291 	char key_buffer[SHA256_BLOCK_SIZE];
292 	char pad_buffer[SHA256_BLOCK_SIZE + 32];
293 	unsigned char digeststate[SHA256_DIGEST_SIZE];
294 	size_t partial_bytes;
295 	u64 digcnt;
296 	u32 key_md;
297 	u32 hash_md;
298 	enum artpec6_crypto_hash_flags hash_flags;
299 	struct artpec6_crypto_req_common common;
300 };
301 
302 struct artpec6_hash_export_state {
303 	char partial_buffer[SHA256_BLOCK_SIZE];
304 	unsigned char digeststate[SHA256_DIGEST_SIZE];
305 	size_t partial_bytes;
306 	u64 digcnt;
307 	int oper;
308 	unsigned int hash_flags;
309 };
310 
311 struct artpec6_hashalg_context {
312 	char hmac_key[SHA256_BLOCK_SIZE];
313 	size_t hmac_key_length;
314 	struct crypto_shash *child_hash;
315 };
316 
317 struct artpec6_crypto_request_context {
318 	u32 cipher_md;
319 	bool decrypt;
320 	struct artpec6_crypto_req_common common;
321 };
322 
323 struct artpec6_cryptotfm_context {
324 	unsigned char aes_key[2*AES_MAX_KEY_SIZE];
325 	size_t key_length;
326 	u32 key_md;
327 	int crypto_type;
328 	struct crypto_sync_skcipher *fallback;
329 };
330 
331 struct artpec6_crypto_aead_hw_ctx {
332 	__be64	aad_length_bits;
333 	__be64  text_length_bits;
334 	__u8	J0[AES_BLOCK_SIZE];
335 };
336 
337 struct artpec6_crypto_aead_req_ctx {
338 	struct artpec6_crypto_aead_hw_ctx hw_ctx;
339 	u32 cipher_md;
340 	bool decrypt;
341 	struct artpec6_crypto_req_common common;
342 	__u8 decryption_tag[AES_BLOCK_SIZE] ____cacheline_aligned;
343 };
344 
345 /* The crypto framework makes it hard to avoid this global. */
346 static struct device *artpec6_crypto_dev;
347 
348 #ifdef CONFIG_FAULT_INJECTION
349 static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read);
350 static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full);
351 #endif
352 
353 enum {
354 	ARTPEC6_CRYPTO_PREPARE_HASH_NO_START,
355 	ARTPEC6_CRYPTO_PREPARE_HASH_START,
356 };
357 
358 static int artpec6_crypto_prepare_aead(struct aead_request *areq);
359 static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq);
360 static int artpec6_crypto_prepare_hash(struct ahash_request *areq);
361 
362 static void
363 artpec6_crypto_complete_crypto(struct crypto_async_request *req);
364 static void
365 artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req);
366 static void
367 artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req);
368 static void
369 artpec6_crypto_complete_aead(struct crypto_async_request *req);
370 static void
371 artpec6_crypto_complete_hash(struct crypto_async_request *req);
372 
373 static int
374 artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common);
375 
376 static void
377 artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common);
378 
379 struct artpec6_crypto_walk {
380 	struct scatterlist *sg;
381 	size_t offset;
382 };
383 
384 static void artpec6_crypto_walk_init(struct artpec6_crypto_walk *awalk,
385 				     struct scatterlist *sg)
386 {
387 	awalk->sg = sg;
388 	awalk->offset = 0;
389 }
390 
391 static size_t artpec6_crypto_walk_advance(struct artpec6_crypto_walk *awalk,
392 					  size_t nbytes)
393 {
394 	while (nbytes && awalk->sg) {
395 		size_t piece;
396 
397 		WARN_ON(awalk->offset > awalk->sg->length);
398 
399 		piece = min(nbytes, (size_t)awalk->sg->length - awalk->offset);
400 		nbytes -= piece;
401 		awalk->offset += piece;
402 		if (awalk->offset == awalk->sg->length) {
403 			awalk->sg = sg_next(awalk->sg);
404 			awalk->offset = 0;
405 		}
406 
407 	}
408 
409 	return nbytes;
410 }
411 
412 static size_t
413 artpec6_crypto_walk_chunklen(const struct artpec6_crypto_walk *awalk)
414 {
415 	WARN_ON(awalk->sg->length == awalk->offset);
416 
417 	return awalk->sg->length - awalk->offset;
418 }
419 
420 static dma_addr_t
421 artpec6_crypto_walk_chunk_phys(const struct artpec6_crypto_walk *awalk)
422 {
423 	return sg_phys(awalk->sg) + awalk->offset;
424 }
425 
426 static void
427 artpec6_crypto_copy_bounce_buffers(struct artpec6_crypto_req_common *common)
428 {
429 	struct artpec6_crypto_dma_descriptors *dma = common->dma;
430 	struct artpec6_crypto_bounce_buffer *b;
431 	struct artpec6_crypto_bounce_buffer *next;
432 
433 	list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) {
434 		pr_debug("bounce entry %p: %zu bytes @ %zu from %p\n",
435 			 b, b->length, b->offset, b->buf);
436 		sg_pcopy_from_buffer(b->sg,
437 				   1,
438 				   b->buf,
439 				   b->length,
440 				   b->offset);
441 
442 		list_del(&b->list);
443 		kfree(b);
444 	}
445 }
446 
447 static inline bool artpec6_crypto_busy(void)
448 {
449 	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
450 	int fifo_count = ac->pending_count;
451 
452 	return fifo_count > 6;
453 }
454 
455 static int artpec6_crypto_submit(struct artpec6_crypto_req_common *req)
456 {
457 	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
458 	int ret = -EBUSY;
459 
460 	spin_lock_bh(&ac->queue_lock);
461 
462 	if (!artpec6_crypto_busy()) {
463 		list_add_tail(&req->list, &ac->pending);
464 		artpec6_crypto_start_dma(req);
465 		ret = -EINPROGRESS;
466 	} else if (req->req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) {
467 		list_add_tail(&req->list, &ac->queue);
468 	} else {
469 		artpec6_crypto_common_destroy(req);
470 	}
471 
472 	spin_unlock_bh(&ac->queue_lock);
473 
474 	return ret;
475 }
476 
477 static void artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common)
478 {
479 	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
480 	enum artpec6_crypto_variant variant = ac->variant;
481 	void __iomem *base = ac->base;
482 	struct artpec6_crypto_dma_descriptors *dma = common->dma;
483 	u32 ind, statd, outd;
484 
485 	/* Make descriptor content visible to the DMA before starting it. */
486 	wmb();
487 
488 	ind = FIELD_PREP(PDMA_IN_DESCRQ_PUSH_LEN, dma->in_cnt - 1) |
489 	      FIELD_PREP(PDMA_IN_DESCRQ_PUSH_ADDR, dma->in_dma_addr >> 6);
490 
491 	statd = FIELD_PREP(PDMA_IN_STATQ_PUSH_LEN, dma->in_cnt - 1) |
492 		FIELD_PREP(PDMA_IN_STATQ_PUSH_ADDR, dma->stat_dma_addr >> 6);
493 
494 	outd = FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_LEN, dma->out_cnt - 1) |
495 	       FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_ADDR, dma->out_dma_addr >> 6);
496 
497 	if (variant == ARTPEC6_CRYPTO) {
498 		writel_relaxed(ind, base + A6_PDMA_IN_DESCRQ_PUSH);
499 		writel_relaxed(statd, base + A6_PDMA_IN_STATQ_PUSH);
500 		writel_relaxed(PDMA_IN_CMD_START, base + A6_PDMA_IN_CMD);
501 	} else {
502 		writel_relaxed(ind, base + A7_PDMA_IN_DESCRQ_PUSH);
503 		writel_relaxed(statd, base + A7_PDMA_IN_STATQ_PUSH);
504 		writel_relaxed(PDMA_IN_CMD_START, base + A7_PDMA_IN_CMD);
505 	}
506 
507 	writel_relaxed(outd, base + PDMA_OUT_DESCRQ_PUSH);
508 	writel_relaxed(PDMA_OUT_CMD_START, base + PDMA_OUT_CMD);
509 
510 	ac->pending_count++;
511 }
512 
513 static void
514 artpec6_crypto_init_dma_operation(struct artpec6_crypto_req_common *common)
515 {
516 	struct artpec6_crypto_dma_descriptors *dma = common->dma;
517 
518 	dma->out_cnt = 0;
519 	dma->in_cnt = 0;
520 	dma->map_count = 0;
521 	INIT_LIST_HEAD(&dma->bounce_buffers);
522 }
523 
524 static bool fault_inject_dma_descr(void)
525 {
526 #ifdef CONFIG_FAULT_INJECTION
527 	return should_fail(&artpec6_crypto_fail_dma_array_full, 1);
528 #else
529 	return false;
530 #endif
531 }
532 
533 /** artpec6_crypto_setup_out_descr_phys - Setup an out channel with a
534  *                                        physical address
535  *
536  * @addr: The physical address of the data buffer
537  * @len:  The length of the data buffer
538  * @eop:  True if this is the last buffer in the packet
539  *
540  * @return 0 on success or -ENOSPC if there are no more descriptors available
541  */
542 static int
543 artpec6_crypto_setup_out_descr_phys(struct artpec6_crypto_req_common *common,
544 				    dma_addr_t addr, size_t len, bool eop)
545 {
546 	struct artpec6_crypto_dma_descriptors *dma = common->dma;
547 	struct pdma_descr *d;
548 
549 	if (dma->out_cnt >= PDMA_DESCR_COUNT ||
550 	    fault_inject_dma_descr()) {
551 		pr_err("No free OUT DMA descriptors available!\n");
552 		return -ENOSPC;
553 	}
554 
555 	d = &dma->out[dma->out_cnt++];
556 	memset(d, 0, sizeof(*d));
557 
558 	d->ctrl.short_descr = 0;
559 	d->ctrl.eop = eop;
560 	d->data.len = len;
561 	d->data.buf = addr;
562 	return 0;
563 }
564 
565 /** artpec6_crypto_setup_out_descr_short - Setup a short out descriptor
566  *
567  * @dst: The virtual address of the data
568  * @len: The length of the data, must be between 1 to 7 bytes
569  * @eop: True if this is the last buffer in the packet
570  *
571  * @return 0 on success
572  *	-ENOSPC if no more descriptors are available
573  *	-EINVAL if the data length exceeds 7 bytes
574  */
575 static int
576 artpec6_crypto_setup_out_descr_short(struct artpec6_crypto_req_common *common,
577 				     void *dst, unsigned int len, bool eop)
578 {
579 	struct artpec6_crypto_dma_descriptors *dma = common->dma;
580 	struct pdma_descr *d;
581 
582 	if (dma->out_cnt >= PDMA_DESCR_COUNT ||
583 	    fault_inject_dma_descr()) {
584 		pr_err("No free OUT DMA descriptors available!\n");
585 		return -ENOSPC;
586 	} else if (len > 7 || len < 1) {
587 		return -EINVAL;
588 	}
589 	d = &dma->out[dma->out_cnt++];
590 	memset(d, 0, sizeof(*d));
591 
592 	d->ctrl.short_descr = 1;
593 	d->ctrl.short_len = len;
594 	d->ctrl.eop = eop;
595 	memcpy(d->shrt.data, dst, len);
596 	return 0;
597 }
598 
599 static int artpec6_crypto_dma_map_page(struct artpec6_crypto_req_common *common,
600 				      struct page *page, size_t offset,
601 				      size_t size,
602 				      enum dma_data_direction dir,
603 				      dma_addr_t *dma_addr_out)
604 {
605 	struct artpec6_crypto_dma_descriptors *dma = common->dma;
606 	struct device *dev = artpec6_crypto_dev;
607 	struct artpec6_crypto_dma_map *map;
608 	dma_addr_t dma_addr;
609 
610 	*dma_addr_out = 0;
611 
612 	if (dma->map_count >= ARRAY_SIZE(dma->maps))
613 		return -ENOMEM;
614 
615 	dma_addr = dma_map_page(dev, page, offset, size, dir);
616 	if (dma_mapping_error(dev, dma_addr))
617 		return -ENOMEM;
618 
619 	map = &dma->maps[dma->map_count++];
620 	map->size = size;
621 	map->dma_addr = dma_addr;
622 	map->dir = dir;
623 
624 	*dma_addr_out = dma_addr;
625 
626 	return 0;
627 }
628 
629 static int
630 artpec6_crypto_dma_map_single(struct artpec6_crypto_req_common *common,
631 			      void *ptr, size_t size,
632 			      enum dma_data_direction dir,
633 			      dma_addr_t *dma_addr_out)
634 {
635 	struct page *page = virt_to_page(ptr);
636 	size_t offset = (uintptr_t)ptr & ~PAGE_MASK;
637 
638 	return artpec6_crypto_dma_map_page(common, page, offset, size, dir,
639 					  dma_addr_out);
640 }
641 
642 static int
643 artpec6_crypto_dma_map_descs(struct artpec6_crypto_req_common *common)
644 {
645 	struct artpec6_crypto_dma_descriptors *dma = common->dma;
646 	int ret;
647 
648 	ret = artpec6_crypto_dma_map_single(common, dma->in,
649 				sizeof(dma->in[0]) * dma->in_cnt,
650 				DMA_TO_DEVICE, &dma->in_dma_addr);
651 	if (ret)
652 		return ret;
653 
654 	ret = artpec6_crypto_dma_map_single(common, dma->out,
655 				sizeof(dma->out[0]) * dma->out_cnt,
656 				DMA_TO_DEVICE, &dma->out_dma_addr);
657 	if (ret)
658 		return ret;
659 
660 	/* We only read one stat descriptor */
661 	dma->stat[dma->in_cnt - 1] = 0;
662 
663 	/*
664 	 * DMA_BIDIRECTIONAL since we need our zeroing of the stat descriptor
665 	 * to be written.
666 	 */
667 	return artpec6_crypto_dma_map_single(common,
668 				dma->stat,
669 				sizeof(dma->stat[0]) * dma->in_cnt,
670 				DMA_BIDIRECTIONAL,
671 				&dma->stat_dma_addr);
672 }
673 
674 static void
675 artpec6_crypto_dma_unmap_all(struct artpec6_crypto_req_common *common)
676 {
677 	struct artpec6_crypto_dma_descriptors *dma = common->dma;
678 	struct device *dev = artpec6_crypto_dev;
679 	int i;
680 
681 	for (i = 0; i < dma->map_count; i++) {
682 		struct artpec6_crypto_dma_map *map = &dma->maps[i];
683 
684 		dma_unmap_page(dev, map->dma_addr, map->size, map->dir);
685 	}
686 
687 	dma->map_count = 0;
688 }
689 
690 /** artpec6_crypto_setup_out_descr - Setup an out descriptor
691  *
692  * @dst: The virtual address of the data
693  * @len: The length of the data
694  * @eop: True if this is the last buffer in the packet
695  * @use_short: If this is true and the data length is 7 bytes or less then
696  *	a short descriptor will be used
697  *
698  * @return 0 on success
699  *	Any errors from artpec6_crypto_setup_out_descr_short() or
700  *	setup_out_descr_phys()
701  */
702 static int
703 artpec6_crypto_setup_out_descr(struct artpec6_crypto_req_common *common,
704 			       void *dst, unsigned int len, bool eop,
705 			       bool use_short)
706 {
707 	if (use_short && len < 7) {
708 		return artpec6_crypto_setup_out_descr_short(common, dst, len,
709 							    eop);
710 	} else {
711 		int ret;
712 		dma_addr_t dma_addr;
713 
714 		ret = artpec6_crypto_dma_map_single(common, dst, len,
715 						   DMA_TO_DEVICE,
716 						   &dma_addr);
717 		if (ret)
718 			return ret;
719 
720 		return artpec6_crypto_setup_out_descr_phys(common, dma_addr,
721 							   len, eop);
722 	}
723 }
724 
725 /** artpec6_crypto_setup_in_descr_phys - Setup an in channel with a
726  *                                       physical address
727  *
728  * @addr: The physical address of the data buffer
729  * @len:  The length of the data buffer
730  * @intr: True if an interrupt should be fired after HW processing of this
731  *	  descriptor
732  *
733  */
734 static int
735 artpec6_crypto_setup_in_descr_phys(struct artpec6_crypto_req_common *common,
736 			       dma_addr_t addr, unsigned int len, bool intr)
737 {
738 	struct artpec6_crypto_dma_descriptors *dma = common->dma;
739 	struct pdma_descr *d;
740 
741 	if (dma->in_cnt >= PDMA_DESCR_COUNT ||
742 	    fault_inject_dma_descr()) {
743 		pr_err("No free IN DMA descriptors available!\n");
744 		return -ENOSPC;
745 	}
746 	d = &dma->in[dma->in_cnt++];
747 	memset(d, 0, sizeof(*d));
748 
749 	d->ctrl.intr = intr;
750 	d->data.len = len;
751 	d->data.buf = addr;
752 	return 0;
753 }
754 
755 /** artpec6_crypto_setup_in_descr - Setup an in channel descriptor
756  *
757  * @buffer: The virtual address to of the data buffer
758  * @len:    The length of the data buffer
759  * @last:   If this is the last data buffer in the request (i.e. an interrupt
760  *	    is needed
761  *
762  * Short descriptors are not used for the in channel
763  */
764 static int
765 artpec6_crypto_setup_in_descr(struct artpec6_crypto_req_common *common,
766 			  void *buffer, unsigned int len, bool last)
767 {
768 	dma_addr_t dma_addr;
769 	int ret;
770 
771 	ret = artpec6_crypto_dma_map_single(common, buffer, len,
772 					   DMA_FROM_DEVICE, &dma_addr);
773 	if (ret)
774 		return ret;
775 
776 	return artpec6_crypto_setup_in_descr_phys(common, dma_addr, len, last);
777 }
778 
779 static struct artpec6_crypto_bounce_buffer *
780 artpec6_crypto_alloc_bounce(gfp_t flags)
781 {
782 	void *base;
783 	size_t alloc_size = sizeof(struct artpec6_crypto_bounce_buffer) +
784 			    2 * ARTPEC_CACHE_LINE_MAX;
785 	struct artpec6_crypto_bounce_buffer *bbuf = kzalloc(alloc_size, flags);
786 
787 	if (!bbuf)
788 		return NULL;
789 
790 	base = bbuf + 1;
791 	bbuf->buf = PTR_ALIGN(base, ARTPEC_CACHE_LINE_MAX);
792 	return bbuf;
793 }
794 
795 static int setup_bounce_buffer_in(struct artpec6_crypto_req_common *common,
796 				  struct artpec6_crypto_walk *walk, size_t size)
797 {
798 	struct artpec6_crypto_bounce_buffer *bbuf;
799 	int ret;
800 
801 	bbuf = artpec6_crypto_alloc_bounce(common->gfp_flags);
802 	if (!bbuf)
803 		return -ENOMEM;
804 
805 	bbuf->length = size;
806 	bbuf->sg = walk->sg;
807 	bbuf->offset = walk->offset;
808 
809 	ret =  artpec6_crypto_setup_in_descr(common, bbuf->buf, size, false);
810 	if (ret) {
811 		kfree(bbuf);
812 		return ret;
813 	}
814 
815 	pr_debug("BOUNCE %zu offset %zu\n", size, walk->offset);
816 	list_add_tail(&bbuf->list, &common->dma->bounce_buffers);
817 	return 0;
818 }
819 
820 static int
821 artpec6_crypto_setup_sg_descrs_in(struct artpec6_crypto_req_common *common,
822 				  struct artpec6_crypto_walk *walk,
823 				  size_t count)
824 {
825 	size_t chunk;
826 	int ret;
827 	dma_addr_t addr;
828 
829 	while (walk->sg && count) {
830 		chunk = min(count, artpec6_crypto_walk_chunklen(walk));
831 		addr = artpec6_crypto_walk_chunk_phys(walk);
832 
833 		/* When destination buffers are not aligned to the cache line
834 		 * size we need bounce buffers. The DMA-API requires that the
835 		 * entire line is owned by the DMA buffer and this holds also
836 		 * for the case when coherent DMA is used.
837 		 */
838 		if (!IS_ALIGNED(addr, ARTPEC_CACHE_LINE_MAX)) {
839 			chunk = min_t(dma_addr_t, chunk,
840 				      ALIGN(addr, ARTPEC_CACHE_LINE_MAX) -
841 				      addr);
842 
843 			pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk);
844 			ret = setup_bounce_buffer_in(common, walk, chunk);
845 		} else if (chunk < ARTPEC_CACHE_LINE_MAX) {
846 			pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk);
847 			ret = setup_bounce_buffer_in(common, walk, chunk);
848 		} else {
849 			dma_addr_t dma_addr;
850 
851 			chunk = chunk & ~(ARTPEC_CACHE_LINE_MAX-1);
852 
853 			pr_debug("CHUNK %pad:%zu\n", &addr, chunk);
854 
855 			ret = artpec6_crypto_dma_map_page(common,
856 							 sg_page(walk->sg),
857 							 walk->sg->offset +
858 							 walk->offset,
859 							 chunk,
860 							 DMA_FROM_DEVICE,
861 							 &dma_addr);
862 			if (ret)
863 				return ret;
864 
865 			ret = artpec6_crypto_setup_in_descr_phys(common,
866 								 dma_addr,
867 								 chunk, false);
868 		}
869 
870 		if (ret)
871 			return ret;
872 
873 		count = count - chunk;
874 		artpec6_crypto_walk_advance(walk, chunk);
875 	}
876 
877 	if (count)
878 		pr_err("EOL unexpected %zu bytes left\n", count);
879 
880 	return count ? -EINVAL : 0;
881 }
882 
883 static int
884 artpec6_crypto_setup_sg_descrs_out(struct artpec6_crypto_req_common *common,
885 				   struct artpec6_crypto_walk *walk,
886 				   size_t count)
887 {
888 	size_t chunk;
889 	int ret;
890 	dma_addr_t addr;
891 
892 	while (walk->sg && count) {
893 		chunk = min(count, artpec6_crypto_walk_chunklen(walk));
894 		addr = artpec6_crypto_walk_chunk_phys(walk);
895 
896 		pr_debug("OUT-CHUNK %pad:%zu\n", &addr, chunk);
897 
898 		if (addr & 3) {
899 			char buf[3];
900 
901 			chunk = min_t(size_t, chunk, (4-(addr&3)));
902 
903 			sg_pcopy_to_buffer(walk->sg, 1, buf, chunk,
904 					   walk->offset);
905 
906 			ret = artpec6_crypto_setup_out_descr_short(common, buf,
907 								   chunk,
908 								   false);
909 		} else {
910 			dma_addr_t dma_addr;
911 
912 			ret = artpec6_crypto_dma_map_page(common,
913 							 sg_page(walk->sg),
914 							 walk->sg->offset +
915 							 walk->offset,
916 							 chunk,
917 							 DMA_TO_DEVICE,
918 							 &dma_addr);
919 			if (ret)
920 				return ret;
921 
922 			ret = artpec6_crypto_setup_out_descr_phys(common,
923 								 dma_addr,
924 								 chunk, false);
925 		}
926 
927 		if (ret)
928 			return ret;
929 
930 		count = count - chunk;
931 		artpec6_crypto_walk_advance(walk, chunk);
932 	}
933 
934 	if (count)
935 		pr_err("EOL unexpected %zu bytes left\n", count);
936 
937 	return count ? -EINVAL : 0;
938 }
939 
940 
941 /** artpec6_crypto_terminate_out_descrs - Set the EOP on the last out descriptor
942  *
943  * If the out descriptor list is non-empty, then the eop flag on the
944  * last used out descriptor will be set.
945  *
946  * @return  0 on success
947  *	-EINVAL if the out descriptor is empty or has overflown
948  */
949 static int
950 artpec6_crypto_terminate_out_descrs(struct artpec6_crypto_req_common *common)
951 {
952 	struct artpec6_crypto_dma_descriptors *dma = common->dma;
953 	struct pdma_descr *d;
954 
955 	if (!dma->out_cnt || dma->out_cnt > PDMA_DESCR_COUNT) {
956 		pr_err("%s: OUT descriptor list is %s\n",
957 			MODULE_NAME, dma->out_cnt ? "empty" : "full");
958 		return -EINVAL;
959 
960 	}
961 
962 	d = &dma->out[dma->out_cnt-1];
963 	d->ctrl.eop = 1;
964 
965 	return 0;
966 }
967 
968 /** artpec6_crypto_terminate_in_descrs - Set the interrupt flag on the last
969  *                                       in descriptor
970  *
971  * See artpec6_crypto_terminate_out_descrs() for return values
972  */
973 static int
974 artpec6_crypto_terminate_in_descrs(struct artpec6_crypto_req_common *common)
975 {
976 	struct artpec6_crypto_dma_descriptors *dma = common->dma;
977 	struct pdma_descr *d;
978 
979 	if (!dma->in_cnt || dma->in_cnt > PDMA_DESCR_COUNT) {
980 		pr_err("%s: IN descriptor list is %s\n",
981 			MODULE_NAME, dma->in_cnt ? "empty" : "full");
982 		return -EINVAL;
983 	}
984 
985 	d = &dma->in[dma->in_cnt-1];
986 	d->ctrl.intr = 1;
987 	return 0;
988 }
989 
990 /** create_hash_pad - Create a Secure Hash conformant pad
991  *
992  * @dst:      The destination buffer to write the pad. Must be at least 64 bytes
993  * @dgstlen:  The total length of the hash digest in bytes
994  * @bitcount: The total length of the digest in bits
995  *
996  * @return The total number of padding bytes written to @dst
997  */
998 static size_t
999 create_hash_pad(int oper, unsigned char *dst, u64 dgstlen, u64 bitcount)
1000 {
1001 	unsigned int mod, target, diff, pad_bytes, size_bytes;
1002 	__be64 bits = __cpu_to_be64(bitcount);
1003 
1004 	switch (oper) {
1005 	case regk_crypto_sha1:
1006 	case regk_crypto_sha256:
1007 	case regk_crypto_hmac_sha1:
1008 	case regk_crypto_hmac_sha256:
1009 		target = 448 / 8;
1010 		mod = 512 / 8;
1011 		size_bytes = 8;
1012 		break;
1013 	default:
1014 		target = 896 / 8;
1015 		mod = 1024 / 8;
1016 		size_bytes = 16;
1017 		break;
1018 	}
1019 
1020 	target -= 1;
1021 	diff = dgstlen & (mod - 1);
1022 	pad_bytes = diff > target ? target + mod - diff : target - diff;
1023 
1024 	memset(dst + 1, 0, pad_bytes);
1025 	dst[0] = 0x80;
1026 
1027 	if (size_bytes == 16) {
1028 		memset(dst + 1 + pad_bytes, 0, 8);
1029 		memcpy(dst + 1 + pad_bytes + 8, &bits, 8);
1030 	} else {
1031 		memcpy(dst + 1 + pad_bytes, &bits, 8);
1032 	}
1033 
1034 	return pad_bytes + size_bytes + 1;
1035 }
1036 
1037 static int artpec6_crypto_common_init(struct artpec6_crypto_req_common *common,
1038 		struct crypto_async_request *parent,
1039 		void (*complete)(struct crypto_async_request *req),
1040 		struct scatterlist *dstsg, unsigned int nbytes)
1041 {
1042 	gfp_t flags;
1043 	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1044 
1045 	flags = (parent->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1046 		 GFP_KERNEL : GFP_ATOMIC;
1047 
1048 	common->gfp_flags = flags;
1049 	common->dma = kmem_cache_alloc(ac->dma_cache, flags);
1050 	if (!common->dma)
1051 		return -ENOMEM;
1052 
1053 	common->req = parent;
1054 	common->complete = complete;
1055 	return 0;
1056 }
1057 
1058 static void
1059 artpec6_crypto_bounce_destroy(struct artpec6_crypto_dma_descriptors *dma)
1060 {
1061 	struct artpec6_crypto_bounce_buffer *b;
1062 	struct artpec6_crypto_bounce_buffer *next;
1063 
1064 	list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) {
1065 		kfree(b);
1066 	}
1067 }
1068 
1069 static int
1070 artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common)
1071 {
1072 	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1073 
1074 	artpec6_crypto_dma_unmap_all(common);
1075 	artpec6_crypto_bounce_destroy(common->dma);
1076 	kmem_cache_free(ac->dma_cache, common->dma);
1077 	common->dma = NULL;
1078 	return 0;
1079 }
1080 
1081 /*
1082  * Ciphering functions.
1083  */
1084 static int artpec6_crypto_encrypt(struct skcipher_request *req)
1085 {
1086 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
1087 	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
1088 	struct artpec6_crypto_request_context *req_ctx = NULL;
1089 	void (*complete)(struct crypto_async_request *req);
1090 	int ret;
1091 
1092 	req_ctx = skcipher_request_ctx(req);
1093 
1094 	switch (ctx->crypto_type) {
1095 	case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1096 	case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
1097 	case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
1098 		req_ctx->decrypt = 0;
1099 		break;
1100 	default:
1101 		break;
1102 	}
1103 
1104 	switch (ctx->crypto_type) {
1105 	case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1106 		complete = artpec6_crypto_complete_cbc_encrypt;
1107 		break;
1108 	default:
1109 		complete = artpec6_crypto_complete_crypto;
1110 		break;
1111 	}
1112 
1113 	ret = artpec6_crypto_common_init(&req_ctx->common,
1114 				  &req->base,
1115 				  complete,
1116 				  req->dst, req->cryptlen);
1117 	if (ret)
1118 		return ret;
1119 
1120 	ret = artpec6_crypto_prepare_crypto(req);
1121 	if (ret) {
1122 		artpec6_crypto_common_destroy(&req_ctx->common);
1123 		return ret;
1124 	}
1125 
1126 	return artpec6_crypto_submit(&req_ctx->common);
1127 }
1128 
1129 static int artpec6_crypto_decrypt(struct skcipher_request *req)
1130 {
1131 	int ret;
1132 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
1133 	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
1134 	struct artpec6_crypto_request_context *req_ctx = NULL;
1135 	void (*complete)(struct crypto_async_request *req);
1136 
1137 	req_ctx = skcipher_request_ctx(req);
1138 
1139 	switch (ctx->crypto_type) {
1140 	case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1141 	case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
1142 	case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
1143 		req_ctx->decrypt = 1;
1144 		break;
1145 	default:
1146 		break;
1147 	}
1148 
1149 
1150 	switch (ctx->crypto_type) {
1151 	case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1152 		complete = artpec6_crypto_complete_cbc_decrypt;
1153 		break;
1154 	default:
1155 		complete = artpec6_crypto_complete_crypto;
1156 		break;
1157 	}
1158 
1159 	ret = artpec6_crypto_common_init(&req_ctx->common, &req->base,
1160 				  complete,
1161 				  req->dst, req->cryptlen);
1162 	if (ret)
1163 		return ret;
1164 
1165 	ret = artpec6_crypto_prepare_crypto(req);
1166 	if (ret) {
1167 		artpec6_crypto_common_destroy(&req_ctx->common);
1168 		return ret;
1169 	}
1170 
1171 	return artpec6_crypto_submit(&req_ctx->common);
1172 }
1173 
1174 static int
1175 artpec6_crypto_ctr_crypt(struct skcipher_request *req, bool encrypt)
1176 {
1177 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
1178 	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
1179 	size_t iv_len = crypto_skcipher_ivsize(cipher);
1180 	unsigned int counter = be32_to_cpup((__be32 *)
1181 					    (req->iv + iv_len - 4));
1182 	unsigned int nblks = ALIGN(req->cryptlen, AES_BLOCK_SIZE) /
1183 			     AES_BLOCK_SIZE;
1184 
1185 	/*
1186 	 * The hardware uses only the last 32-bits as the counter while the
1187 	 * kernel tests (aes_ctr_enc_tv_template[4] for example) expect that
1188 	 * the whole IV is a counter.  So fallback if the counter is going to
1189 	 * overlow.
1190 	 */
1191 	if (counter + nblks < counter) {
1192 		int ret;
1193 
1194 		pr_debug("counter %x will overflow (nblks %u), falling back\n",
1195 			 counter, counter + nblks);
1196 
1197 		ret = crypto_sync_skcipher_setkey(ctx->fallback, ctx->aes_key,
1198 						  ctx->key_length);
1199 		if (ret)
1200 			return ret;
1201 
1202 		{
1203 			SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
1204 
1205 			skcipher_request_set_sync_tfm(subreq, ctx->fallback);
1206 			skcipher_request_set_callback(subreq, req->base.flags,
1207 						      NULL, NULL);
1208 			skcipher_request_set_crypt(subreq, req->src, req->dst,
1209 						   req->cryptlen, req->iv);
1210 			ret = encrypt ? crypto_skcipher_encrypt(subreq)
1211 				      : crypto_skcipher_decrypt(subreq);
1212 			skcipher_request_zero(subreq);
1213 		}
1214 		return ret;
1215 	}
1216 
1217 	return encrypt ? artpec6_crypto_encrypt(req)
1218 		       : artpec6_crypto_decrypt(req);
1219 }
1220 
1221 static int artpec6_crypto_ctr_encrypt(struct skcipher_request *req)
1222 {
1223 	return artpec6_crypto_ctr_crypt(req, true);
1224 }
1225 
1226 static int artpec6_crypto_ctr_decrypt(struct skcipher_request *req)
1227 {
1228 	return artpec6_crypto_ctr_crypt(req, false);
1229 }
1230 
1231 /*
1232  * AEAD functions
1233  */
1234 static int artpec6_crypto_aead_init(struct crypto_aead *tfm)
1235 {
1236 	struct artpec6_cryptotfm_context *tfm_ctx = crypto_aead_ctx(tfm);
1237 
1238 	memset(tfm_ctx, 0, sizeof(*tfm_ctx));
1239 
1240 	crypto_aead_set_reqsize(tfm,
1241 				sizeof(struct artpec6_crypto_aead_req_ctx));
1242 
1243 	return 0;
1244 }
1245 
1246 static int artpec6_crypto_aead_set_key(struct crypto_aead *tfm, const u8 *key,
1247 			       unsigned int len)
1248 {
1249 	struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(&tfm->base);
1250 
1251 	if (len != 16 && len != 24 && len != 32) {
1252 		crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1253 		return -1;
1254 	}
1255 
1256 	ctx->key_length = len;
1257 
1258 	memcpy(ctx->aes_key, key, len);
1259 	return 0;
1260 }
1261 
1262 static int artpec6_crypto_aead_encrypt(struct aead_request *req)
1263 {
1264 	int ret;
1265 	struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req);
1266 
1267 	req_ctx->decrypt = false;
1268 	ret = artpec6_crypto_common_init(&req_ctx->common, &req->base,
1269 				  artpec6_crypto_complete_aead,
1270 				  NULL, 0);
1271 	if (ret)
1272 		return ret;
1273 
1274 	ret = artpec6_crypto_prepare_aead(req);
1275 	if (ret) {
1276 		artpec6_crypto_common_destroy(&req_ctx->common);
1277 		return ret;
1278 	}
1279 
1280 	return artpec6_crypto_submit(&req_ctx->common);
1281 }
1282 
1283 static int artpec6_crypto_aead_decrypt(struct aead_request *req)
1284 {
1285 	int ret;
1286 	struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req);
1287 
1288 	req_ctx->decrypt = true;
1289 	if (req->cryptlen < AES_BLOCK_SIZE)
1290 		return -EINVAL;
1291 
1292 	ret = artpec6_crypto_common_init(&req_ctx->common,
1293 				  &req->base,
1294 				  artpec6_crypto_complete_aead,
1295 				  NULL, 0);
1296 	if (ret)
1297 		return ret;
1298 
1299 	ret = artpec6_crypto_prepare_aead(req);
1300 	if (ret) {
1301 		artpec6_crypto_common_destroy(&req_ctx->common);
1302 		return ret;
1303 	}
1304 
1305 	return artpec6_crypto_submit(&req_ctx->common);
1306 }
1307 
1308 static int artpec6_crypto_prepare_hash(struct ahash_request *areq)
1309 {
1310 	struct artpec6_hashalg_context *ctx = crypto_tfm_ctx(areq->base.tfm);
1311 	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(areq);
1312 	size_t digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
1313 	size_t contextsize = digestsize;
1314 	size_t blocksize = crypto_tfm_alg_blocksize(
1315 		crypto_ahash_tfm(crypto_ahash_reqtfm(areq)));
1316 	struct artpec6_crypto_req_common *common = &req_ctx->common;
1317 	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1318 	enum artpec6_crypto_variant variant = ac->variant;
1319 	u32 sel_ctx;
1320 	bool ext_ctx = false;
1321 	bool run_hw = false;
1322 	int error = 0;
1323 
1324 	artpec6_crypto_init_dma_operation(common);
1325 
1326 	/* Upload HMAC key, must be first the first packet */
1327 	if (req_ctx->hash_flags & HASH_FLAG_HMAC) {
1328 		if (variant == ARTPEC6_CRYPTO) {
1329 			req_ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER,
1330 						     a6_regk_crypto_dlkey);
1331 		} else {
1332 			req_ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER,
1333 						     a7_regk_crypto_dlkey);
1334 		}
1335 
1336 		/* Copy and pad up the key */
1337 		memcpy(req_ctx->key_buffer, ctx->hmac_key,
1338 		       ctx->hmac_key_length);
1339 		memset(req_ctx->key_buffer + ctx->hmac_key_length, 0,
1340 		       blocksize - ctx->hmac_key_length);
1341 
1342 		error = artpec6_crypto_setup_out_descr(common,
1343 					(void *)&req_ctx->key_md,
1344 					sizeof(req_ctx->key_md), false, false);
1345 		if (error)
1346 			return error;
1347 
1348 		error = artpec6_crypto_setup_out_descr(common,
1349 					req_ctx->key_buffer, blocksize,
1350 					true, false);
1351 		if (error)
1352 			return error;
1353 	}
1354 
1355 	if (!(req_ctx->hash_flags & HASH_FLAG_INIT_CTX)) {
1356 		/* Restore context */
1357 		sel_ctx = regk_crypto_ext;
1358 		ext_ctx = true;
1359 	} else {
1360 		sel_ctx = regk_crypto_init;
1361 	}
1362 
1363 	if (variant == ARTPEC6_CRYPTO) {
1364 		req_ctx->hash_md &= ~A6_CRY_MD_HASH_SEL_CTX;
1365 		req_ctx->hash_md |= FIELD_PREP(A6_CRY_MD_HASH_SEL_CTX, sel_ctx);
1366 
1367 		/* If this is the final round, set the final flag */
1368 		if (req_ctx->hash_flags & HASH_FLAG_FINALIZE)
1369 			req_ctx->hash_md |= A6_CRY_MD_HASH_HMAC_FIN;
1370 	} else {
1371 		req_ctx->hash_md &= ~A7_CRY_MD_HASH_SEL_CTX;
1372 		req_ctx->hash_md |= FIELD_PREP(A7_CRY_MD_HASH_SEL_CTX, sel_ctx);
1373 
1374 		/* If this is the final round, set the final flag */
1375 		if (req_ctx->hash_flags & HASH_FLAG_FINALIZE)
1376 			req_ctx->hash_md |= A7_CRY_MD_HASH_HMAC_FIN;
1377 	}
1378 
1379 	/* Setup up metadata descriptors */
1380 	error = artpec6_crypto_setup_out_descr(common,
1381 				(void *)&req_ctx->hash_md,
1382 				sizeof(req_ctx->hash_md), false, false);
1383 	if (error)
1384 		return error;
1385 
1386 	error = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
1387 	if (error)
1388 		return error;
1389 
1390 	if (ext_ctx) {
1391 		error = artpec6_crypto_setup_out_descr(common,
1392 					req_ctx->digeststate,
1393 					contextsize, false, false);
1394 
1395 		if (error)
1396 			return error;
1397 	}
1398 
1399 	if (req_ctx->hash_flags & HASH_FLAG_UPDATE) {
1400 		size_t done_bytes = 0;
1401 		size_t total_bytes = areq->nbytes + req_ctx->partial_bytes;
1402 		size_t ready_bytes = round_down(total_bytes, blocksize);
1403 		struct artpec6_crypto_walk walk;
1404 
1405 		run_hw = ready_bytes > 0;
1406 		if (req_ctx->partial_bytes && ready_bytes) {
1407 			/* We have a partial buffer and will at least some bytes
1408 			 * to the HW. Empty this partial buffer before tackling
1409 			 * the SG lists
1410 			 */
1411 			memcpy(req_ctx->partial_buffer_out,
1412 				req_ctx->partial_buffer,
1413 				req_ctx->partial_bytes);
1414 
1415 			error = artpec6_crypto_setup_out_descr(common,
1416 						req_ctx->partial_buffer_out,
1417 						req_ctx->partial_bytes,
1418 						false, true);
1419 			if (error)
1420 				return error;
1421 
1422 			/* Reset partial buffer */
1423 			done_bytes += req_ctx->partial_bytes;
1424 			req_ctx->partial_bytes = 0;
1425 		}
1426 
1427 		artpec6_crypto_walk_init(&walk, areq->src);
1428 
1429 		error = artpec6_crypto_setup_sg_descrs_out(common, &walk,
1430 							   ready_bytes -
1431 							   done_bytes);
1432 		if (error)
1433 			return error;
1434 
1435 		if (walk.sg) {
1436 			size_t sg_skip = ready_bytes - done_bytes;
1437 			size_t sg_rem = areq->nbytes - sg_skip;
1438 
1439 			sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
1440 					   req_ctx->partial_buffer +
1441 					   req_ctx->partial_bytes,
1442 					   sg_rem, sg_skip);
1443 
1444 			req_ctx->partial_bytes += sg_rem;
1445 		}
1446 
1447 		req_ctx->digcnt += ready_bytes;
1448 		req_ctx->hash_flags &= ~(HASH_FLAG_UPDATE);
1449 	}
1450 
1451 	/* Finalize */
1452 	if (req_ctx->hash_flags & HASH_FLAG_FINALIZE) {
1453 		size_t hash_pad_len;
1454 		u64 digest_bits;
1455 		u32 oper;
1456 
1457 		if (variant == ARTPEC6_CRYPTO)
1458 			oper = FIELD_GET(A6_CRY_MD_OPER, req_ctx->hash_md);
1459 		else
1460 			oper = FIELD_GET(A7_CRY_MD_OPER, req_ctx->hash_md);
1461 
1462 		/* Write out the partial buffer if present */
1463 		if (req_ctx->partial_bytes) {
1464 			memcpy(req_ctx->partial_buffer_out,
1465 			       req_ctx->partial_buffer,
1466 			       req_ctx->partial_bytes);
1467 			error = artpec6_crypto_setup_out_descr(common,
1468 						req_ctx->partial_buffer_out,
1469 						req_ctx->partial_bytes,
1470 						false, true);
1471 			if (error)
1472 				return error;
1473 
1474 			req_ctx->digcnt += req_ctx->partial_bytes;
1475 			req_ctx->partial_bytes = 0;
1476 		}
1477 
1478 		if (req_ctx->hash_flags & HASH_FLAG_HMAC)
1479 			digest_bits = 8 * (req_ctx->digcnt + blocksize);
1480 		else
1481 			digest_bits = 8 * req_ctx->digcnt;
1482 
1483 		/* Add the hash pad */
1484 		hash_pad_len = create_hash_pad(oper, req_ctx->pad_buffer,
1485 					       req_ctx->digcnt, digest_bits);
1486 		error = artpec6_crypto_setup_out_descr(common,
1487 						      req_ctx->pad_buffer,
1488 						      hash_pad_len, false,
1489 						      true);
1490 		req_ctx->digcnt = 0;
1491 
1492 		if (error)
1493 			return error;
1494 
1495 		/* Descriptor for the final result */
1496 		error = artpec6_crypto_setup_in_descr(common, areq->result,
1497 						      digestsize,
1498 						      true);
1499 		if (error)
1500 			return error;
1501 
1502 	} else { /* This is not the final operation for this request */
1503 		if (!run_hw)
1504 			return ARTPEC6_CRYPTO_PREPARE_HASH_NO_START;
1505 
1506 		/* Save the result to the context */
1507 		error = artpec6_crypto_setup_in_descr(common,
1508 						      req_ctx->digeststate,
1509 						      contextsize, false);
1510 		if (error)
1511 			return error;
1512 		/* fall through */
1513 	}
1514 
1515 	req_ctx->hash_flags &= ~(HASH_FLAG_INIT_CTX | HASH_FLAG_UPDATE |
1516 				 HASH_FLAG_FINALIZE);
1517 
1518 	error = artpec6_crypto_terminate_in_descrs(common);
1519 	if (error)
1520 		return error;
1521 
1522 	error = artpec6_crypto_terminate_out_descrs(common);
1523 	if (error)
1524 		return error;
1525 
1526 	error = artpec6_crypto_dma_map_descs(common);
1527 	if (error)
1528 		return error;
1529 
1530 	return ARTPEC6_CRYPTO_PREPARE_HASH_START;
1531 }
1532 
1533 
1534 static int artpec6_crypto_aes_ecb_init(struct crypto_skcipher *tfm)
1535 {
1536 	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1537 
1538 	tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
1539 	ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_ECB;
1540 
1541 	return 0;
1542 }
1543 
1544 static int artpec6_crypto_aes_ctr_init(struct crypto_skcipher *tfm)
1545 {
1546 	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1547 
1548 	ctx->fallback =
1549 		crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm->base),
1550 					   0, CRYPTO_ALG_NEED_FALLBACK);
1551 	if (IS_ERR(ctx->fallback))
1552 		return PTR_ERR(ctx->fallback);
1553 
1554 	tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
1555 	ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CTR;
1556 
1557 	return 0;
1558 }
1559 
1560 static int artpec6_crypto_aes_cbc_init(struct crypto_skcipher *tfm)
1561 {
1562 	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1563 
1564 	tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
1565 	ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CBC;
1566 
1567 	return 0;
1568 }
1569 
1570 static int artpec6_crypto_aes_xts_init(struct crypto_skcipher *tfm)
1571 {
1572 	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1573 
1574 	tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
1575 	ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_XTS;
1576 
1577 	return 0;
1578 }
1579 
1580 static void artpec6_crypto_aes_exit(struct crypto_skcipher *tfm)
1581 {
1582 	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1583 
1584 	memset(ctx, 0, sizeof(*ctx));
1585 }
1586 
1587 static void artpec6_crypto_aes_ctr_exit(struct crypto_skcipher *tfm)
1588 {
1589 	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1590 
1591 	crypto_free_sync_skcipher(ctx->fallback);
1592 	artpec6_crypto_aes_exit(tfm);
1593 }
1594 
1595 static int
1596 artpec6_crypto_cipher_set_key(struct crypto_skcipher *cipher, const u8 *key,
1597 			      unsigned int keylen)
1598 {
1599 	struct artpec6_cryptotfm_context *ctx =
1600 		crypto_skcipher_ctx(cipher);
1601 
1602 	switch (keylen) {
1603 	case 16:
1604 	case 24:
1605 	case 32:
1606 		break;
1607 	default:
1608 		crypto_skcipher_set_flags(cipher,
1609 					  CRYPTO_TFM_RES_BAD_KEY_LEN);
1610 		return -EINVAL;
1611 	}
1612 
1613 	memcpy(ctx->aes_key, key, keylen);
1614 	ctx->key_length = keylen;
1615 	return 0;
1616 }
1617 
1618 static int
1619 artpec6_crypto_xts_set_key(struct crypto_skcipher *cipher, const u8 *key,
1620 			      unsigned int keylen)
1621 {
1622 	struct artpec6_cryptotfm_context *ctx =
1623 		crypto_skcipher_ctx(cipher);
1624 	int ret;
1625 
1626 	ret = xts_check_key(&cipher->base, key, keylen);
1627 	if (ret)
1628 		return ret;
1629 
1630 	switch (keylen) {
1631 	case 32:
1632 	case 48:
1633 	case 64:
1634 		break;
1635 	default:
1636 		crypto_skcipher_set_flags(cipher,
1637 					  CRYPTO_TFM_RES_BAD_KEY_LEN);
1638 		return -EINVAL;
1639 	}
1640 
1641 	memcpy(ctx->aes_key, key, keylen);
1642 	ctx->key_length = keylen;
1643 	return 0;
1644 }
1645 
1646 /** artpec6_crypto_process_crypto - Prepare an async block cipher crypto request
1647  *
1648  * @req: The asynch request to process
1649  *
1650  * @return 0 if the dma job was successfully prepared
1651  *	  <0 on error
1652  *
1653  * This function sets up the PDMA descriptors for a block cipher request.
1654  *
1655  * The required padding is added for AES-CTR using a statically defined
1656  * buffer.
1657  *
1658  * The PDMA descriptor list will be as follows:
1659  *
1660  * OUT: [KEY_MD][KEY][EOP]<CIPHER_MD>[IV]<data_0>...[data_n][AES-CTR_pad]<eop>
1661  * IN:  <CIPHER_MD><data_0>...[data_n]<intr>
1662  *
1663  */
1664 static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq)
1665 {
1666 	int ret;
1667 	struct artpec6_crypto_walk walk;
1668 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1669 	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
1670 	struct artpec6_crypto_request_context *req_ctx = NULL;
1671 	size_t iv_len = crypto_skcipher_ivsize(cipher);
1672 	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1673 	enum artpec6_crypto_variant variant = ac->variant;
1674 	struct artpec6_crypto_req_common *common;
1675 	bool cipher_decr = false;
1676 	size_t cipher_klen;
1677 	u32 cipher_len = 0; /* Same as regk_crypto_key_128 for NULL crypto */
1678 	u32 oper;
1679 
1680 	req_ctx = skcipher_request_ctx(areq);
1681 	common = &req_ctx->common;
1682 
1683 	artpec6_crypto_init_dma_operation(common);
1684 
1685 	if (variant == ARTPEC6_CRYPTO)
1686 		ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER, a6_regk_crypto_dlkey);
1687 	else
1688 		ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER, a7_regk_crypto_dlkey);
1689 
1690 	ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md,
1691 					     sizeof(ctx->key_md), false, false);
1692 	if (ret)
1693 		return ret;
1694 
1695 	ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key,
1696 					      ctx->key_length, true, false);
1697 	if (ret)
1698 		return ret;
1699 
1700 	req_ctx->cipher_md = 0;
1701 
1702 	if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS)
1703 		cipher_klen = ctx->key_length/2;
1704 	else
1705 		cipher_klen =  ctx->key_length;
1706 
1707 	/* Metadata */
1708 	switch (cipher_klen) {
1709 	case 16:
1710 		cipher_len = regk_crypto_key_128;
1711 		break;
1712 	case 24:
1713 		cipher_len = regk_crypto_key_192;
1714 		break;
1715 	case 32:
1716 		cipher_len = regk_crypto_key_256;
1717 		break;
1718 	default:
1719 		pr_err("%s: Invalid key length %d!\n",
1720 			MODULE_NAME, ctx->key_length);
1721 		return -EINVAL;
1722 	}
1723 
1724 	switch (ctx->crypto_type) {
1725 	case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
1726 		oper = regk_crypto_aes_ecb;
1727 		cipher_decr = req_ctx->decrypt;
1728 		break;
1729 
1730 	case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1731 		oper = regk_crypto_aes_cbc;
1732 		cipher_decr = req_ctx->decrypt;
1733 		break;
1734 
1735 	case ARTPEC6_CRYPTO_CIPHER_AES_CTR:
1736 		oper = regk_crypto_aes_ctr;
1737 		cipher_decr = false;
1738 		break;
1739 
1740 	case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
1741 		oper = regk_crypto_aes_xts;
1742 		cipher_decr = req_ctx->decrypt;
1743 
1744 		if (variant == ARTPEC6_CRYPTO)
1745 			req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DSEQ;
1746 		else
1747 			req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DSEQ;
1748 		break;
1749 
1750 	default:
1751 		pr_err("%s: Invalid cipher mode %d!\n",
1752 			MODULE_NAME, ctx->crypto_type);
1753 		return -EINVAL;
1754 	}
1755 
1756 	if (variant == ARTPEC6_CRYPTO) {
1757 		req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER, oper);
1758 		req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN,
1759 						 cipher_len);
1760 		if (cipher_decr)
1761 			req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR;
1762 	} else {
1763 		req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER, oper);
1764 		req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN,
1765 						 cipher_len);
1766 		if (cipher_decr)
1767 			req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR;
1768 	}
1769 
1770 	ret = artpec6_crypto_setup_out_descr(common,
1771 					    &req_ctx->cipher_md,
1772 					    sizeof(req_ctx->cipher_md),
1773 					    false, false);
1774 	if (ret)
1775 		return ret;
1776 
1777 	ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
1778 	if (ret)
1779 		return ret;
1780 
1781 	if (iv_len) {
1782 		ret = artpec6_crypto_setup_out_descr(common, areq->iv, iv_len,
1783 						     false, false);
1784 		if (ret)
1785 			return ret;
1786 	}
1787 	/* Data out */
1788 	artpec6_crypto_walk_init(&walk, areq->src);
1789 	ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, areq->cryptlen);
1790 	if (ret)
1791 		return ret;
1792 
1793 	/* Data in */
1794 	artpec6_crypto_walk_init(&walk, areq->dst);
1795 	ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, areq->cryptlen);
1796 	if (ret)
1797 		return ret;
1798 
1799 	/* CTR-mode padding required by the HW. */
1800 	if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_CTR ||
1801 	    ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS) {
1802 		size_t pad = ALIGN(areq->cryptlen, AES_BLOCK_SIZE) -
1803 			     areq->cryptlen;
1804 
1805 		if (pad) {
1806 			ret = artpec6_crypto_setup_out_descr(common,
1807 							     ac->pad_buffer,
1808 							     pad, false, false);
1809 			if (ret)
1810 				return ret;
1811 
1812 			ret = artpec6_crypto_setup_in_descr(common,
1813 							    ac->pad_buffer, pad,
1814 							    false);
1815 			if (ret)
1816 				return ret;
1817 		}
1818 	}
1819 
1820 	ret = artpec6_crypto_terminate_out_descrs(common);
1821 	if (ret)
1822 		return ret;
1823 
1824 	ret = artpec6_crypto_terminate_in_descrs(common);
1825 	if (ret)
1826 		return ret;
1827 
1828 	return artpec6_crypto_dma_map_descs(common);
1829 }
1830 
1831 static int artpec6_crypto_prepare_aead(struct aead_request *areq)
1832 {
1833 	size_t count;
1834 	int ret;
1835 	size_t input_length;
1836 	struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(areq->base.tfm);
1837 	struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq);
1838 	struct crypto_aead *cipher = crypto_aead_reqtfm(areq);
1839 	struct artpec6_crypto_req_common *common = &req_ctx->common;
1840 	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1841 	enum artpec6_crypto_variant variant = ac->variant;
1842 	u32 md_cipher_len;
1843 
1844 	artpec6_crypto_init_dma_operation(common);
1845 
1846 	/* Key */
1847 	if (variant == ARTPEC6_CRYPTO) {
1848 		ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER,
1849 					 a6_regk_crypto_dlkey);
1850 	} else {
1851 		ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER,
1852 					 a7_regk_crypto_dlkey);
1853 	}
1854 	ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md,
1855 					     sizeof(ctx->key_md), false, false);
1856 	if (ret)
1857 		return ret;
1858 
1859 	ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key,
1860 					     ctx->key_length, true, false);
1861 	if (ret)
1862 		return ret;
1863 
1864 	req_ctx->cipher_md = 0;
1865 
1866 	switch (ctx->key_length) {
1867 	case 16:
1868 		md_cipher_len = regk_crypto_key_128;
1869 		break;
1870 	case 24:
1871 		md_cipher_len = regk_crypto_key_192;
1872 		break;
1873 	case 32:
1874 		md_cipher_len = regk_crypto_key_256;
1875 		break;
1876 	default:
1877 		return -EINVAL;
1878 	}
1879 
1880 	if (variant == ARTPEC6_CRYPTO) {
1881 		req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER,
1882 						 regk_crypto_aes_gcm);
1883 		req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN,
1884 						 md_cipher_len);
1885 		if (req_ctx->decrypt)
1886 			req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR;
1887 	} else {
1888 		req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER,
1889 						 regk_crypto_aes_gcm);
1890 		req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN,
1891 						 md_cipher_len);
1892 		if (req_ctx->decrypt)
1893 			req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR;
1894 	}
1895 
1896 	ret = artpec6_crypto_setup_out_descr(common,
1897 					    (void *) &req_ctx->cipher_md,
1898 					    sizeof(req_ctx->cipher_md), false,
1899 					    false);
1900 	if (ret)
1901 		return ret;
1902 
1903 	ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
1904 	if (ret)
1905 		return ret;
1906 
1907 	/* For the decryption, cryptlen includes the tag. */
1908 	input_length = areq->cryptlen;
1909 	if (req_ctx->decrypt)
1910 		input_length -= crypto_aead_authsize(cipher);
1911 
1912 	/* Prepare the context buffer */
1913 	req_ctx->hw_ctx.aad_length_bits =
1914 		__cpu_to_be64(8*areq->assoclen);
1915 
1916 	req_ctx->hw_ctx.text_length_bits =
1917 		__cpu_to_be64(8*input_length);
1918 
1919 	memcpy(req_ctx->hw_ctx.J0, areq->iv, crypto_aead_ivsize(cipher));
1920 	// The HW omits the initial increment of the counter field.
1921 	memcpy(req_ctx->hw_ctx.J0 + GCM_AES_IV_SIZE, "\x00\x00\x00\x01", 4);
1922 
1923 	ret = artpec6_crypto_setup_out_descr(common, &req_ctx->hw_ctx,
1924 		sizeof(struct artpec6_crypto_aead_hw_ctx), false, false);
1925 	if (ret)
1926 		return ret;
1927 
1928 	{
1929 		struct artpec6_crypto_walk walk;
1930 
1931 		artpec6_crypto_walk_init(&walk, areq->src);
1932 
1933 		/* Associated data */
1934 		count = areq->assoclen;
1935 		ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count);
1936 		if (ret)
1937 			return ret;
1938 
1939 		if (!IS_ALIGNED(areq->assoclen, 16)) {
1940 			size_t assoc_pad = 16 - (areq->assoclen % 16);
1941 			/* The HW mandates zero padding here */
1942 			ret = artpec6_crypto_setup_out_descr(common,
1943 							     ac->zero_buffer,
1944 							     assoc_pad, false,
1945 							     false);
1946 			if (ret)
1947 				return ret;
1948 		}
1949 
1950 		/* Data to crypto */
1951 		count = input_length;
1952 		ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count);
1953 		if (ret)
1954 			return ret;
1955 
1956 		if (!IS_ALIGNED(input_length, 16)) {
1957 			size_t crypto_pad = 16 - (input_length % 16);
1958 			/* The HW mandates zero padding here */
1959 			ret = artpec6_crypto_setup_out_descr(common,
1960 							     ac->zero_buffer,
1961 							     crypto_pad,
1962 							     false,
1963 							     false);
1964 			if (ret)
1965 				return ret;
1966 		}
1967 	}
1968 
1969 	/* Data from crypto */
1970 	{
1971 		struct artpec6_crypto_walk walk;
1972 		size_t output_len = areq->cryptlen;
1973 
1974 		if (req_ctx->decrypt)
1975 			output_len -= crypto_aead_authsize(cipher);
1976 
1977 		artpec6_crypto_walk_init(&walk, areq->dst);
1978 
1979 		/* skip associated data in the output */
1980 		count = artpec6_crypto_walk_advance(&walk, areq->assoclen);
1981 		if (count)
1982 			return -EINVAL;
1983 
1984 		count = output_len;
1985 		ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, count);
1986 		if (ret)
1987 			return ret;
1988 
1989 		/* Put padding between the cryptotext and the auth tag */
1990 		if (!IS_ALIGNED(output_len, 16)) {
1991 			size_t crypto_pad = 16 - (output_len % 16);
1992 
1993 			ret = artpec6_crypto_setup_in_descr(common,
1994 							    ac->pad_buffer,
1995 							    crypto_pad, false);
1996 			if (ret)
1997 				return ret;
1998 		}
1999 
2000 		/* The authentication tag shall follow immediately after
2001 		 * the output ciphertext. For decryption it is put in a context
2002 		 * buffer for later compare against the input tag.
2003 		 */
2004 
2005 		if (req_ctx->decrypt) {
2006 			ret = artpec6_crypto_setup_in_descr(common,
2007 				req_ctx->decryption_tag, AES_BLOCK_SIZE, false);
2008 			if (ret)
2009 				return ret;
2010 
2011 		} else {
2012 			/* For encryption the requested tag size may be smaller
2013 			 * than the hardware's generated tag.
2014 			 */
2015 			size_t authsize = crypto_aead_authsize(cipher);
2016 
2017 			ret = artpec6_crypto_setup_sg_descrs_in(common, &walk,
2018 								authsize);
2019 			if (ret)
2020 				return ret;
2021 
2022 			if (authsize < AES_BLOCK_SIZE) {
2023 				count = AES_BLOCK_SIZE - authsize;
2024 				ret = artpec6_crypto_setup_in_descr(common,
2025 					ac->pad_buffer,
2026 					count, false);
2027 				if (ret)
2028 					return ret;
2029 			}
2030 		}
2031 
2032 	}
2033 
2034 	ret = artpec6_crypto_terminate_in_descrs(common);
2035 	if (ret)
2036 		return ret;
2037 
2038 	ret = artpec6_crypto_terminate_out_descrs(common);
2039 	if (ret)
2040 		return ret;
2041 
2042 	return artpec6_crypto_dma_map_descs(common);
2043 }
2044 
2045 static void artpec6_crypto_process_queue(struct artpec6_crypto *ac,
2046 	    struct list_head *completions)
2047 {
2048 	struct artpec6_crypto_req_common *req;
2049 
2050 	while (!list_empty(&ac->queue) && !artpec6_crypto_busy()) {
2051 		req = list_first_entry(&ac->queue,
2052 				       struct artpec6_crypto_req_common,
2053 				       list);
2054 		list_move_tail(&req->list, &ac->pending);
2055 		artpec6_crypto_start_dma(req);
2056 
2057 		list_add_tail(&req->complete_in_progress, completions);
2058 	}
2059 
2060 	/*
2061 	 * In some cases, the hardware can raise an in_eop_flush interrupt
2062 	 * before actually updating the status, so we have an timer which will
2063 	 * recheck the status on timeout.  Since the cases are expected to be
2064 	 * very rare, we use a relatively large timeout value.  There should be
2065 	 * no noticeable negative effect if we timeout spuriously.
2066 	 */
2067 	if (ac->pending_count)
2068 		mod_timer(&ac->timer, jiffies + msecs_to_jiffies(100));
2069 	else
2070 		del_timer(&ac->timer);
2071 }
2072 
2073 static void artpec6_crypto_timeout(struct timer_list *t)
2074 {
2075 	struct artpec6_crypto *ac = from_timer(ac, t, timer);
2076 
2077 	dev_info_ratelimited(artpec6_crypto_dev, "timeout\n");
2078 
2079 	tasklet_schedule(&ac->task);
2080 }
2081 
2082 static void artpec6_crypto_task(unsigned long data)
2083 {
2084 	struct artpec6_crypto *ac = (struct artpec6_crypto *)data;
2085 	struct artpec6_crypto_req_common *req;
2086 	struct artpec6_crypto_req_common *n;
2087 	struct list_head complete_done;
2088 	struct list_head complete_in_progress;
2089 
2090 	INIT_LIST_HEAD(&complete_done);
2091 	INIT_LIST_HEAD(&complete_in_progress);
2092 
2093 	if (list_empty(&ac->pending)) {
2094 		pr_debug("Spurious IRQ\n");
2095 		return;
2096 	}
2097 
2098 	spin_lock_bh(&ac->queue_lock);
2099 
2100 	list_for_each_entry_safe(req, n, &ac->pending, list) {
2101 		struct artpec6_crypto_dma_descriptors *dma = req->dma;
2102 		u32 stat;
2103 		dma_addr_t stataddr;
2104 
2105 		stataddr = dma->stat_dma_addr + 4 * (req->dma->in_cnt - 1);
2106 		dma_sync_single_for_cpu(artpec6_crypto_dev,
2107 					stataddr,
2108 					4,
2109 					DMA_BIDIRECTIONAL);
2110 
2111 		stat = req->dma->stat[req->dma->in_cnt-1];
2112 
2113 		/* A non-zero final status descriptor indicates
2114 		 * this job has finished.
2115 		 */
2116 		pr_debug("Request %p status is %X\n", req, stat);
2117 		if (!stat)
2118 			break;
2119 
2120 		/* Allow testing of timeout handling with fault injection */
2121 #ifdef CONFIG_FAULT_INJECTION
2122 		if (should_fail(&artpec6_crypto_fail_status_read, 1))
2123 			continue;
2124 #endif
2125 
2126 		pr_debug("Completing request %p\n", req);
2127 
2128 		list_move_tail(&req->list, &complete_done);
2129 
2130 		ac->pending_count--;
2131 	}
2132 
2133 	artpec6_crypto_process_queue(ac, &complete_in_progress);
2134 
2135 	spin_unlock_bh(&ac->queue_lock);
2136 
2137 	/* Perform the completion callbacks without holding the queue lock
2138 	 * to allow new request submissions from the callbacks.
2139 	 */
2140 	list_for_each_entry_safe(req, n, &complete_done, list) {
2141 		artpec6_crypto_dma_unmap_all(req);
2142 		artpec6_crypto_copy_bounce_buffers(req);
2143 		artpec6_crypto_common_destroy(req);
2144 
2145 		req->complete(req->req);
2146 	}
2147 
2148 	list_for_each_entry_safe(req, n, &complete_in_progress,
2149 				 complete_in_progress) {
2150 		req->req->complete(req->req, -EINPROGRESS);
2151 	}
2152 }
2153 
2154 static void artpec6_crypto_complete_crypto(struct crypto_async_request *req)
2155 {
2156 	req->complete(req, 0);
2157 }
2158 
2159 static void
2160 artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req)
2161 {
2162 	struct skcipher_request *cipher_req = container_of(req,
2163 		struct skcipher_request, base);
2164 
2165 	scatterwalk_map_and_copy(cipher_req->iv, cipher_req->src,
2166 				 cipher_req->cryptlen - AES_BLOCK_SIZE,
2167 				 AES_BLOCK_SIZE, 0);
2168 	req->complete(req, 0);
2169 }
2170 
2171 static void
2172 artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req)
2173 {
2174 	struct skcipher_request *cipher_req = container_of(req,
2175 		struct skcipher_request, base);
2176 
2177 	scatterwalk_map_and_copy(cipher_req->iv, cipher_req->dst,
2178 				 cipher_req->cryptlen - AES_BLOCK_SIZE,
2179 				 AES_BLOCK_SIZE, 0);
2180 	req->complete(req, 0);
2181 }
2182 
2183 static void artpec6_crypto_complete_aead(struct crypto_async_request *req)
2184 {
2185 	int result = 0;
2186 
2187 	/* Verify GCM hashtag. */
2188 	struct aead_request *areq = container_of(req,
2189 		struct aead_request, base);
2190 	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
2191 	struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq);
2192 
2193 	if (req_ctx->decrypt) {
2194 		u8 input_tag[AES_BLOCK_SIZE];
2195 		unsigned int authsize = crypto_aead_authsize(aead);
2196 
2197 		sg_pcopy_to_buffer(areq->src,
2198 				   sg_nents(areq->src),
2199 				   input_tag,
2200 				   authsize,
2201 				   areq->assoclen + areq->cryptlen -
2202 				   authsize);
2203 
2204 		if (crypto_memneq(req_ctx->decryption_tag,
2205 				  input_tag,
2206 				  authsize)) {
2207 			pr_debug("***EBADMSG:\n");
2208 			print_hex_dump_debug("ref:", DUMP_PREFIX_ADDRESS, 32, 1,
2209 					     input_tag, authsize, true);
2210 			print_hex_dump_debug("out:", DUMP_PREFIX_ADDRESS, 32, 1,
2211 					     req_ctx->decryption_tag,
2212 					     authsize, true);
2213 
2214 			result = -EBADMSG;
2215 		}
2216 	}
2217 
2218 	req->complete(req, result);
2219 }
2220 
2221 static void artpec6_crypto_complete_hash(struct crypto_async_request *req)
2222 {
2223 	req->complete(req, 0);
2224 }
2225 
2226 
2227 /*------------------- Hash functions -----------------------------------------*/
2228 static int
2229 artpec6_crypto_hash_set_key(struct crypto_ahash *tfm,
2230 		    const u8 *key, unsigned int keylen)
2231 {
2232 	struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(&tfm->base);
2233 	size_t blocksize;
2234 	int ret;
2235 
2236 	if (!keylen) {
2237 		pr_err("Invalid length (%d) of HMAC key\n",
2238 			keylen);
2239 		return -EINVAL;
2240 	}
2241 
2242 	memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key));
2243 
2244 	blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2245 
2246 	if (keylen > blocksize) {
2247 		SHASH_DESC_ON_STACK(hdesc, tfm_ctx->child_hash);
2248 
2249 		hdesc->tfm = tfm_ctx->child_hash;
2250 		hdesc->flags = crypto_ahash_get_flags(tfm) &
2251 			       CRYPTO_TFM_REQ_MAY_SLEEP;
2252 
2253 		tfm_ctx->hmac_key_length = blocksize;
2254 		ret = crypto_shash_digest(hdesc, key, keylen,
2255 					  tfm_ctx->hmac_key);
2256 		if (ret)
2257 			return ret;
2258 
2259 	} else {
2260 		memcpy(tfm_ctx->hmac_key, key, keylen);
2261 		tfm_ctx->hmac_key_length = keylen;
2262 	}
2263 
2264 	return 0;
2265 }
2266 
2267 static int
2268 artpec6_crypto_init_hash(struct ahash_request *req, u8 type, int hmac)
2269 {
2270 	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
2271 	enum artpec6_crypto_variant variant = ac->variant;
2272 	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2273 	u32 oper;
2274 
2275 	memset(req_ctx, 0, sizeof(*req_ctx));
2276 
2277 	req_ctx->hash_flags = HASH_FLAG_INIT_CTX;
2278 	if (hmac)
2279 		req_ctx->hash_flags |= (HASH_FLAG_HMAC | HASH_FLAG_UPDATE_KEY);
2280 
2281 	switch (type) {
2282 	case ARTPEC6_CRYPTO_HASH_SHA1:
2283 		oper = hmac ? regk_crypto_hmac_sha1 : regk_crypto_sha1;
2284 		break;
2285 	case ARTPEC6_CRYPTO_HASH_SHA256:
2286 		oper = hmac ? regk_crypto_hmac_sha256 : regk_crypto_sha256;
2287 		break;
2288 	default:
2289 		pr_err("%s: Unsupported hash type 0x%x\n", MODULE_NAME, type);
2290 		return -EINVAL;
2291 	}
2292 
2293 	if (variant == ARTPEC6_CRYPTO)
2294 		req_ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, oper);
2295 	else
2296 		req_ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, oper);
2297 
2298 	return 0;
2299 }
2300 
2301 static int artpec6_crypto_prepare_submit_hash(struct ahash_request *req)
2302 {
2303 	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2304 	int ret;
2305 
2306 	if (!req_ctx->common.dma) {
2307 		ret = artpec6_crypto_common_init(&req_ctx->common,
2308 					  &req->base,
2309 					  artpec6_crypto_complete_hash,
2310 					  NULL, 0);
2311 
2312 		if (ret)
2313 			return ret;
2314 	}
2315 
2316 	ret = artpec6_crypto_prepare_hash(req);
2317 	switch (ret) {
2318 	case ARTPEC6_CRYPTO_PREPARE_HASH_START:
2319 		ret = artpec6_crypto_submit(&req_ctx->common);
2320 		break;
2321 
2322 	case ARTPEC6_CRYPTO_PREPARE_HASH_NO_START:
2323 		ret = 0;
2324 		/* Fallthrough */
2325 
2326 	default:
2327 		artpec6_crypto_common_destroy(&req_ctx->common);
2328 		break;
2329 	}
2330 
2331 	return ret;
2332 }
2333 
2334 static int artpec6_crypto_hash_final(struct ahash_request *req)
2335 {
2336 	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2337 
2338 	req_ctx->hash_flags |= HASH_FLAG_FINALIZE;
2339 
2340 	return artpec6_crypto_prepare_submit_hash(req);
2341 }
2342 
2343 static int artpec6_crypto_hash_update(struct ahash_request *req)
2344 {
2345 	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2346 
2347 	req_ctx->hash_flags |= HASH_FLAG_UPDATE;
2348 
2349 	return artpec6_crypto_prepare_submit_hash(req);
2350 }
2351 
2352 static int artpec6_crypto_sha1_init(struct ahash_request *req)
2353 {
2354 	return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0);
2355 }
2356 
2357 static int artpec6_crypto_sha1_digest(struct ahash_request *req)
2358 {
2359 	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2360 
2361 	artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0);
2362 
2363 	req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
2364 
2365 	return artpec6_crypto_prepare_submit_hash(req);
2366 }
2367 
2368 static int artpec6_crypto_sha256_init(struct ahash_request *req)
2369 {
2370 	return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0);
2371 }
2372 
2373 static int artpec6_crypto_sha256_digest(struct ahash_request *req)
2374 {
2375 	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2376 
2377 	artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0);
2378 	req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
2379 
2380 	return artpec6_crypto_prepare_submit_hash(req);
2381 }
2382 
2383 static int artpec6_crypto_hmac_sha256_init(struct ahash_request *req)
2384 {
2385 	return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1);
2386 }
2387 
2388 static int artpec6_crypto_hmac_sha256_digest(struct ahash_request *req)
2389 {
2390 	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2391 
2392 	artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1);
2393 	req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
2394 
2395 	return artpec6_crypto_prepare_submit_hash(req);
2396 }
2397 
2398 static int artpec6_crypto_ahash_init_common(struct crypto_tfm *tfm,
2399 				    const char *base_hash_name)
2400 {
2401 	struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm);
2402 
2403 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2404 				 sizeof(struct artpec6_hash_request_context));
2405 	memset(tfm_ctx, 0, sizeof(*tfm_ctx));
2406 
2407 	if (base_hash_name) {
2408 		struct crypto_shash *child;
2409 
2410 		child = crypto_alloc_shash(base_hash_name, 0,
2411 					   CRYPTO_ALG_NEED_FALLBACK);
2412 
2413 		if (IS_ERR(child))
2414 			return PTR_ERR(child);
2415 
2416 		tfm_ctx->child_hash = child;
2417 	}
2418 
2419 	return 0;
2420 }
2421 
2422 static int artpec6_crypto_ahash_init(struct crypto_tfm *tfm)
2423 {
2424 	return artpec6_crypto_ahash_init_common(tfm, NULL);
2425 }
2426 
2427 static int artpec6_crypto_ahash_init_hmac_sha256(struct crypto_tfm *tfm)
2428 {
2429 	return artpec6_crypto_ahash_init_common(tfm, "sha256");
2430 }
2431 
2432 static void artpec6_crypto_ahash_exit(struct crypto_tfm *tfm)
2433 {
2434 	struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm);
2435 
2436 	if (tfm_ctx->child_hash)
2437 		crypto_free_shash(tfm_ctx->child_hash);
2438 
2439 	memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key));
2440 	tfm_ctx->hmac_key_length = 0;
2441 }
2442 
2443 static int artpec6_crypto_hash_export(struct ahash_request *req, void *out)
2444 {
2445 	const struct artpec6_hash_request_context *ctx = ahash_request_ctx(req);
2446 	struct artpec6_hash_export_state *state = out;
2447 	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
2448 	enum artpec6_crypto_variant variant = ac->variant;
2449 
2450 	BUILD_BUG_ON(sizeof(state->partial_buffer) !=
2451 		     sizeof(ctx->partial_buffer));
2452 	BUILD_BUG_ON(sizeof(state->digeststate) != sizeof(ctx->digeststate));
2453 
2454 	state->digcnt = ctx->digcnt;
2455 	state->partial_bytes = ctx->partial_bytes;
2456 	state->hash_flags = ctx->hash_flags;
2457 
2458 	if (variant == ARTPEC6_CRYPTO)
2459 		state->oper = FIELD_GET(A6_CRY_MD_OPER, ctx->hash_md);
2460 	else
2461 		state->oper = FIELD_GET(A7_CRY_MD_OPER, ctx->hash_md);
2462 
2463 	memcpy(state->partial_buffer, ctx->partial_buffer,
2464 	       sizeof(state->partial_buffer));
2465 	memcpy(state->digeststate, ctx->digeststate,
2466 	       sizeof(state->digeststate));
2467 
2468 	return 0;
2469 }
2470 
2471 static int artpec6_crypto_hash_import(struct ahash_request *req, const void *in)
2472 {
2473 	struct artpec6_hash_request_context *ctx = ahash_request_ctx(req);
2474 	const struct artpec6_hash_export_state *state = in;
2475 	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
2476 	enum artpec6_crypto_variant variant = ac->variant;
2477 
2478 	memset(ctx, 0, sizeof(*ctx));
2479 
2480 	ctx->digcnt = state->digcnt;
2481 	ctx->partial_bytes = state->partial_bytes;
2482 	ctx->hash_flags = state->hash_flags;
2483 
2484 	if (variant == ARTPEC6_CRYPTO)
2485 		ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, state->oper);
2486 	else
2487 		ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, state->oper);
2488 
2489 	memcpy(ctx->partial_buffer, state->partial_buffer,
2490 	       sizeof(state->partial_buffer));
2491 	memcpy(ctx->digeststate, state->digeststate,
2492 	       sizeof(state->digeststate));
2493 
2494 	return 0;
2495 }
2496 
2497 static int init_crypto_hw(struct artpec6_crypto *ac)
2498 {
2499 	enum artpec6_crypto_variant variant = ac->variant;
2500 	void __iomem *base = ac->base;
2501 	u32 out_descr_buf_size;
2502 	u32 out_data_buf_size;
2503 	u32 in_data_buf_size;
2504 	u32 in_descr_buf_size;
2505 	u32 in_stat_buf_size;
2506 	u32 in, out;
2507 
2508 	/*
2509 	 * The PDMA unit contains 1984 bytes of internal memory for the OUT
2510 	 * channels and 1024 bytes for the IN channel. This is an elastic
2511 	 * memory used to internally store the descriptors and data. The values
2512 	 * ares specified in 64 byte incremements.  Trustzone buffers are not
2513 	 * used at this stage.
2514 	 */
2515 	out_data_buf_size = 16;  /* 1024 bytes for data */
2516 	out_descr_buf_size = 15; /* 960 bytes for descriptors */
2517 	in_data_buf_size = 8;    /* 512 bytes for data */
2518 	in_descr_buf_size = 4;   /* 256 bytes for descriptors */
2519 	in_stat_buf_size = 4;   /* 256 bytes for stat descrs */
2520 
2521 	BUILD_BUG_ON_MSG((out_data_buf_size
2522 				+ out_descr_buf_size) * 64 > 1984,
2523 			  "Invalid OUT configuration");
2524 
2525 	BUILD_BUG_ON_MSG((in_data_buf_size
2526 				+ in_descr_buf_size
2527 				+ in_stat_buf_size) * 64 > 1024,
2528 			  "Invalid IN configuration");
2529 
2530 	in = FIELD_PREP(PDMA_IN_BUF_CFG_DATA_BUF_SIZE, in_data_buf_size) |
2531 	     FIELD_PREP(PDMA_IN_BUF_CFG_DESCR_BUF_SIZE, in_descr_buf_size) |
2532 	     FIELD_PREP(PDMA_IN_BUF_CFG_STAT_BUF_SIZE, in_stat_buf_size);
2533 
2534 	out = FIELD_PREP(PDMA_OUT_BUF_CFG_DATA_BUF_SIZE, out_data_buf_size) |
2535 	      FIELD_PREP(PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE, out_descr_buf_size);
2536 
2537 	writel_relaxed(out, base + PDMA_OUT_BUF_CFG);
2538 	writel_relaxed(PDMA_OUT_CFG_EN, base + PDMA_OUT_CFG);
2539 
2540 	if (variant == ARTPEC6_CRYPTO) {
2541 		writel_relaxed(in, base + A6_PDMA_IN_BUF_CFG);
2542 		writel_relaxed(PDMA_IN_CFG_EN, base + A6_PDMA_IN_CFG);
2543 		writel_relaxed(A6_PDMA_INTR_MASK_IN_DATA |
2544 			       A6_PDMA_INTR_MASK_IN_EOP_FLUSH,
2545 			       base + A6_PDMA_INTR_MASK);
2546 	} else {
2547 		writel_relaxed(in, base + A7_PDMA_IN_BUF_CFG);
2548 		writel_relaxed(PDMA_IN_CFG_EN, base + A7_PDMA_IN_CFG);
2549 		writel_relaxed(A7_PDMA_INTR_MASK_IN_DATA |
2550 			       A7_PDMA_INTR_MASK_IN_EOP_FLUSH,
2551 			       base + A7_PDMA_INTR_MASK);
2552 	}
2553 
2554 	return 0;
2555 }
2556 
2557 static void artpec6_crypto_disable_hw(struct artpec6_crypto *ac)
2558 {
2559 	enum artpec6_crypto_variant variant = ac->variant;
2560 	void __iomem *base = ac->base;
2561 
2562 	if (variant == ARTPEC6_CRYPTO) {
2563 		writel_relaxed(A6_PDMA_IN_CMD_STOP, base + A6_PDMA_IN_CMD);
2564 		writel_relaxed(0, base + A6_PDMA_IN_CFG);
2565 		writel_relaxed(A6_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD);
2566 	} else {
2567 		writel_relaxed(A7_PDMA_IN_CMD_STOP, base + A7_PDMA_IN_CMD);
2568 		writel_relaxed(0, base + A7_PDMA_IN_CFG);
2569 		writel_relaxed(A7_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD);
2570 	}
2571 
2572 	writel_relaxed(0, base + PDMA_OUT_CFG);
2573 
2574 }
2575 
2576 static irqreturn_t artpec6_crypto_irq(int irq, void *dev_id)
2577 {
2578 	struct artpec6_crypto *ac = dev_id;
2579 	enum artpec6_crypto_variant variant = ac->variant;
2580 	void __iomem *base = ac->base;
2581 	u32 mask_in_data, mask_in_eop_flush;
2582 	u32 in_cmd_flush_stat, in_cmd_reg;
2583 	u32 ack_intr_reg;
2584 	u32 ack = 0;
2585 	u32 intr;
2586 
2587 	if (variant == ARTPEC6_CRYPTO) {
2588 		intr = readl_relaxed(base + A6_PDMA_MASKED_INTR);
2589 		mask_in_data = A6_PDMA_INTR_MASK_IN_DATA;
2590 		mask_in_eop_flush = A6_PDMA_INTR_MASK_IN_EOP_FLUSH;
2591 		in_cmd_flush_stat = A6_PDMA_IN_CMD_FLUSH_STAT;
2592 		in_cmd_reg = A6_PDMA_IN_CMD;
2593 		ack_intr_reg = A6_PDMA_ACK_INTR;
2594 	} else {
2595 		intr = readl_relaxed(base + A7_PDMA_MASKED_INTR);
2596 		mask_in_data = A7_PDMA_INTR_MASK_IN_DATA;
2597 		mask_in_eop_flush = A7_PDMA_INTR_MASK_IN_EOP_FLUSH;
2598 		in_cmd_flush_stat = A7_PDMA_IN_CMD_FLUSH_STAT;
2599 		in_cmd_reg = A7_PDMA_IN_CMD;
2600 		ack_intr_reg = A7_PDMA_ACK_INTR;
2601 	}
2602 
2603 	/* We get two interrupt notifications from each job.
2604 	 * The in_data means all data was sent to memory and then
2605 	 * we request a status flush command to write the per-job
2606 	 * status to its status vector. This ensures that the
2607 	 * tasklet can detect exactly how many submitted jobs
2608 	 * that have finished.
2609 	 */
2610 	if (intr & mask_in_data)
2611 		ack |= mask_in_data;
2612 
2613 	if (intr & mask_in_eop_flush)
2614 		ack |= mask_in_eop_flush;
2615 	else
2616 		writel_relaxed(in_cmd_flush_stat, base + in_cmd_reg);
2617 
2618 	writel_relaxed(ack, base + ack_intr_reg);
2619 
2620 	if (intr & mask_in_eop_flush)
2621 		tasklet_schedule(&ac->task);
2622 
2623 	return IRQ_HANDLED;
2624 }
2625 
2626 /*------------------- Algorithm definitions ----------------------------------*/
2627 
2628 /* Hashes */
2629 static struct ahash_alg hash_algos[] = {
2630 	/* SHA-1 */
2631 	{
2632 		.init = artpec6_crypto_sha1_init,
2633 		.update = artpec6_crypto_hash_update,
2634 		.final = artpec6_crypto_hash_final,
2635 		.digest = artpec6_crypto_sha1_digest,
2636 		.import = artpec6_crypto_hash_import,
2637 		.export = artpec6_crypto_hash_export,
2638 		.halg.digestsize = SHA1_DIGEST_SIZE,
2639 		.halg.statesize = sizeof(struct artpec6_hash_export_state),
2640 		.halg.base = {
2641 			.cra_name = "sha1",
2642 			.cra_driver_name = "artpec-sha1",
2643 			.cra_priority = 300,
2644 			.cra_flags = CRYPTO_ALG_ASYNC,
2645 			.cra_blocksize = SHA1_BLOCK_SIZE,
2646 			.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
2647 			.cra_alignmask = 3,
2648 			.cra_module = THIS_MODULE,
2649 			.cra_init = artpec6_crypto_ahash_init,
2650 			.cra_exit = artpec6_crypto_ahash_exit,
2651 		}
2652 	},
2653 	/* SHA-256 */
2654 	{
2655 		.init = artpec6_crypto_sha256_init,
2656 		.update = artpec6_crypto_hash_update,
2657 		.final = artpec6_crypto_hash_final,
2658 		.digest = artpec6_crypto_sha256_digest,
2659 		.import = artpec6_crypto_hash_import,
2660 		.export = artpec6_crypto_hash_export,
2661 		.halg.digestsize = SHA256_DIGEST_SIZE,
2662 		.halg.statesize = sizeof(struct artpec6_hash_export_state),
2663 		.halg.base = {
2664 			.cra_name = "sha256",
2665 			.cra_driver_name = "artpec-sha256",
2666 			.cra_priority = 300,
2667 			.cra_flags = CRYPTO_ALG_ASYNC,
2668 			.cra_blocksize = SHA256_BLOCK_SIZE,
2669 			.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
2670 			.cra_alignmask = 3,
2671 			.cra_module = THIS_MODULE,
2672 			.cra_init = artpec6_crypto_ahash_init,
2673 			.cra_exit = artpec6_crypto_ahash_exit,
2674 		}
2675 	},
2676 	/* HMAC SHA-256 */
2677 	{
2678 		.init = artpec6_crypto_hmac_sha256_init,
2679 		.update = artpec6_crypto_hash_update,
2680 		.final = artpec6_crypto_hash_final,
2681 		.digest = artpec6_crypto_hmac_sha256_digest,
2682 		.import = artpec6_crypto_hash_import,
2683 		.export = artpec6_crypto_hash_export,
2684 		.setkey = artpec6_crypto_hash_set_key,
2685 		.halg.digestsize = SHA256_DIGEST_SIZE,
2686 		.halg.statesize = sizeof(struct artpec6_hash_export_state),
2687 		.halg.base = {
2688 			.cra_name = "hmac(sha256)",
2689 			.cra_driver_name = "artpec-hmac-sha256",
2690 			.cra_priority = 300,
2691 			.cra_flags = CRYPTO_ALG_ASYNC,
2692 			.cra_blocksize = SHA256_BLOCK_SIZE,
2693 			.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
2694 			.cra_alignmask = 3,
2695 			.cra_module = THIS_MODULE,
2696 			.cra_init = artpec6_crypto_ahash_init_hmac_sha256,
2697 			.cra_exit = artpec6_crypto_ahash_exit,
2698 		}
2699 	},
2700 };
2701 
2702 /* Crypto */
2703 static struct skcipher_alg crypto_algos[] = {
2704 	/* AES - ECB */
2705 	{
2706 		.base = {
2707 			.cra_name = "ecb(aes)",
2708 			.cra_driver_name = "artpec6-ecb-aes",
2709 			.cra_priority = 300,
2710 			.cra_flags = CRYPTO_ALG_ASYNC,
2711 			.cra_blocksize = AES_BLOCK_SIZE,
2712 			.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2713 			.cra_alignmask = 3,
2714 			.cra_module = THIS_MODULE,
2715 		},
2716 		.min_keysize = AES_MIN_KEY_SIZE,
2717 		.max_keysize = AES_MAX_KEY_SIZE,
2718 		.setkey = artpec6_crypto_cipher_set_key,
2719 		.encrypt = artpec6_crypto_encrypt,
2720 		.decrypt = artpec6_crypto_decrypt,
2721 		.init = artpec6_crypto_aes_ecb_init,
2722 		.exit = artpec6_crypto_aes_exit,
2723 	},
2724 	/* AES - CTR */
2725 	{
2726 		.base = {
2727 			.cra_name = "ctr(aes)",
2728 			.cra_driver_name = "artpec6-ctr-aes",
2729 			.cra_priority = 300,
2730 			.cra_flags = CRYPTO_ALG_ASYNC |
2731 				     CRYPTO_ALG_NEED_FALLBACK,
2732 			.cra_blocksize = 1,
2733 			.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2734 			.cra_alignmask = 3,
2735 			.cra_module = THIS_MODULE,
2736 		},
2737 		.min_keysize = AES_MIN_KEY_SIZE,
2738 		.max_keysize = AES_MAX_KEY_SIZE,
2739 		.ivsize = AES_BLOCK_SIZE,
2740 		.setkey = artpec6_crypto_cipher_set_key,
2741 		.encrypt = artpec6_crypto_ctr_encrypt,
2742 		.decrypt = artpec6_crypto_ctr_decrypt,
2743 		.init = artpec6_crypto_aes_ctr_init,
2744 		.exit = artpec6_crypto_aes_ctr_exit,
2745 	},
2746 	/* AES - CBC */
2747 	{
2748 		.base = {
2749 			.cra_name = "cbc(aes)",
2750 			.cra_driver_name = "artpec6-cbc-aes",
2751 			.cra_priority = 300,
2752 			.cra_flags = CRYPTO_ALG_ASYNC,
2753 			.cra_blocksize = AES_BLOCK_SIZE,
2754 			.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2755 			.cra_alignmask = 3,
2756 			.cra_module = THIS_MODULE,
2757 		},
2758 		.min_keysize = AES_MIN_KEY_SIZE,
2759 		.max_keysize = AES_MAX_KEY_SIZE,
2760 		.ivsize = AES_BLOCK_SIZE,
2761 		.setkey = artpec6_crypto_cipher_set_key,
2762 		.encrypt = artpec6_crypto_encrypt,
2763 		.decrypt = artpec6_crypto_decrypt,
2764 		.init = artpec6_crypto_aes_cbc_init,
2765 		.exit = artpec6_crypto_aes_exit
2766 	},
2767 	/* AES - XTS */
2768 	{
2769 		.base = {
2770 			.cra_name = "xts(aes)",
2771 			.cra_driver_name = "artpec6-xts-aes",
2772 			.cra_priority = 300,
2773 			.cra_flags = CRYPTO_ALG_ASYNC,
2774 			.cra_blocksize = 1,
2775 			.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2776 			.cra_alignmask = 3,
2777 			.cra_module = THIS_MODULE,
2778 		},
2779 		.min_keysize = 2*AES_MIN_KEY_SIZE,
2780 		.max_keysize = 2*AES_MAX_KEY_SIZE,
2781 		.ivsize = 16,
2782 		.setkey = artpec6_crypto_xts_set_key,
2783 		.encrypt = artpec6_crypto_encrypt,
2784 		.decrypt = artpec6_crypto_decrypt,
2785 		.init = artpec6_crypto_aes_xts_init,
2786 		.exit = artpec6_crypto_aes_exit,
2787 	},
2788 };
2789 
2790 static struct aead_alg aead_algos[] = {
2791 	{
2792 		.init   = artpec6_crypto_aead_init,
2793 		.setkey = artpec6_crypto_aead_set_key,
2794 		.encrypt = artpec6_crypto_aead_encrypt,
2795 		.decrypt = artpec6_crypto_aead_decrypt,
2796 		.ivsize = GCM_AES_IV_SIZE,
2797 		.maxauthsize = AES_BLOCK_SIZE,
2798 
2799 		.base = {
2800 			.cra_name = "gcm(aes)",
2801 			.cra_driver_name = "artpec-gcm-aes",
2802 			.cra_priority = 300,
2803 			.cra_flags = CRYPTO_ALG_ASYNC |
2804 				     CRYPTO_ALG_KERN_DRIVER_ONLY,
2805 			.cra_blocksize = 1,
2806 			.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2807 			.cra_alignmask = 3,
2808 			.cra_module = THIS_MODULE,
2809 		},
2810 	}
2811 };
2812 
2813 #ifdef CONFIG_DEBUG_FS
2814 
2815 struct dbgfs_u32 {
2816 	char *name;
2817 	mode_t mode;
2818 	u32 *flag;
2819 	char *desc;
2820 };
2821 
2822 static struct dentry *dbgfs_root;
2823 
2824 static void artpec6_crypto_init_debugfs(void)
2825 {
2826 	dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL);
2827 
2828 #ifdef CONFIG_FAULT_INJECTION
2829 	fault_create_debugfs_attr("fail_status_read", dbgfs_root,
2830 				  &artpec6_crypto_fail_status_read);
2831 
2832 	fault_create_debugfs_attr("fail_dma_array_full", dbgfs_root,
2833 				  &artpec6_crypto_fail_dma_array_full);
2834 #endif
2835 }
2836 
2837 static void artpec6_crypto_free_debugfs(void)
2838 {
2839 	debugfs_remove_recursive(dbgfs_root);
2840 	dbgfs_root = NULL;
2841 }
2842 #endif
2843 
2844 static const struct of_device_id artpec6_crypto_of_match[] = {
2845 	{ .compatible = "axis,artpec6-crypto", .data = (void *)ARTPEC6_CRYPTO },
2846 	{ .compatible = "axis,artpec7-crypto", .data = (void *)ARTPEC7_CRYPTO },
2847 	{}
2848 };
2849 MODULE_DEVICE_TABLE(of, artpec6_crypto_of_match);
2850 
2851 static int artpec6_crypto_probe(struct platform_device *pdev)
2852 {
2853 	const struct of_device_id *match;
2854 	enum artpec6_crypto_variant variant;
2855 	struct artpec6_crypto *ac;
2856 	struct device *dev = &pdev->dev;
2857 	void __iomem *base;
2858 	struct resource *res;
2859 	int irq;
2860 	int err;
2861 
2862 	if (artpec6_crypto_dev)
2863 		return -ENODEV;
2864 
2865 	match = of_match_node(artpec6_crypto_of_match, dev->of_node);
2866 	if (!match)
2867 		return -EINVAL;
2868 
2869 	variant = (enum artpec6_crypto_variant)match->data;
2870 
2871 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2872 	base = devm_ioremap_resource(&pdev->dev, res);
2873 	if (IS_ERR(base))
2874 		return PTR_ERR(base);
2875 
2876 	irq = platform_get_irq(pdev, 0);
2877 	if (irq < 0)
2878 		return -ENODEV;
2879 
2880 	ac = devm_kzalloc(&pdev->dev, sizeof(struct artpec6_crypto),
2881 			  GFP_KERNEL);
2882 	if (!ac)
2883 		return -ENOMEM;
2884 
2885 	platform_set_drvdata(pdev, ac);
2886 	ac->variant = variant;
2887 
2888 	spin_lock_init(&ac->queue_lock);
2889 	INIT_LIST_HEAD(&ac->queue);
2890 	INIT_LIST_HEAD(&ac->pending);
2891 	timer_setup(&ac->timer, artpec6_crypto_timeout, 0);
2892 
2893 	ac->base = base;
2894 
2895 	ac->dma_cache = kmem_cache_create("artpec6_crypto_dma",
2896 		sizeof(struct artpec6_crypto_dma_descriptors),
2897 		64,
2898 		0,
2899 		NULL);
2900 	if (!ac->dma_cache)
2901 		return -ENOMEM;
2902 
2903 #ifdef CONFIG_DEBUG_FS
2904 	artpec6_crypto_init_debugfs();
2905 #endif
2906 
2907 	tasklet_init(&ac->task, artpec6_crypto_task,
2908 		     (unsigned long)ac);
2909 
2910 	ac->pad_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX,
2911 				      GFP_KERNEL);
2912 	if (!ac->pad_buffer)
2913 		return -ENOMEM;
2914 	ac->pad_buffer = PTR_ALIGN(ac->pad_buffer, ARTPEC_CACHE_LINE_MAX);
2915 
2916 	ac->zero_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX,
2917 				      GFP_KERNEL);
2918 	if (!ac->zero_buffer)
2919 		return -ENOMEM;
2920 	ac->zero_buffer = PTR_ALIGN(ac->zero_buffer, ARTPEC_CACHE_LINE_MAX);
2921 
2922 	err = init_crypto_hw(ac);
2923 	if (err)
2924 		goto free_cache;
2925 
2926 	err = devm_request_irq(&pdev->dev, irq, artpec6_crypto_irq, 0,
2927 			       "artpec6-crypto", ac);
2928 	if (err)
2929 		goto disable_hw;
2930 
2931 	artpec6_crypto_dev = &pdev->dev;
2932 
2933 	err = crypto_register_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
2934 	if (err) {
2935 		dev_err(dev, "Failed to register ahashes\n");
2936 		goto disable_hw;
2937 	}
2938 
2939 	err = crypto_register_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
2940 	if (err) {
2941 		dev_err(dev, "Failed to register ciphers\n");
2942 		goto unregister_ahashes;
2943 	}
2944 
2945 	err = crypto_register_aeads(aead_algos, ARRAY_SIZE(aead_algos));
2946 	if (err) {
2947 		dev_err(dev, "Failed to register aeads\n");
2948 		goto unregister_algs;
2949 	}
2950 
2951 	return 0;
2952 
2953 unregister_algs:
2954 	crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
2955 unregister_ahashes:
2956 	crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
2957 disable_hw:
2958 	artpec6_crypto_disable_hw(ac);
2959 free_cache:
2960 	kmem_cache_destroy(ac->dma_cache);
2961 	return err;
2962 }
2963 
2964 static int artpec6_crypto_remove(struct platform_device *pdev)
2965 {
2966 	struct artpec6_crypto *ac = platform_get_drvdata(pdev);
2967 	int irq = platform_get_irq(pdev, 0);
2968 
2969 	crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
2970 	crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
2971 	crypto_unregister_aeads(aead_algos, ARRAY_SIZE(aead_algos));
2972 
2973 	tasklet_disable(&ac->task);
2974 	devm_free_irq(&pdev->dev, irq, ac);
2975 	tasklet_kill(&ac->task);
2976 	del_timer_sync(&ac->timer);
2977 
2978 	artpec6_crypto_disable_hw(ac);
2979 
2980 	kmem_cache_destroy(ac->dma_cache);
2981 #ifdef CONFIG_DEBUG_FS
2982 	artpec6_crypto_free_debugfs();
2983 #endif
2984 	return 0;
2985 }
2986 
2987 static struct platform_driver artpec6_crypto_driver = {
2988 	.probe   = artpec6_crypto_probe,
2989 	.remove  = artpec6_crypto_remove,
2990 	.driver  = {
2991 		.name  = "artpec6-crypto",
2992 		.of_match_table = artpec6_crypto_of_match,
2993 	},
2994 };
2995 
2996 module_platform_driver(artpec6_crypto_driver);
2997 
2998 MODULE_AUTHOR("Axis Communications AB");
2999 MODULE_DESCRIPTION("ARTPEC-6 Crypto driver");
3000 MODULE_LICENSE("GPL");
3001