xref: /openbmc/linux/drivers/crypto/inside-secure/safexcel.c (revision 1ac731c529cd4d6adbce134754b51ff7d822b145)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Copyright (C) 2017 Marvell
4   *
5   * Antoine Tenart <antoine.tenart@free-electrons.com>
6   */
7  
8  #include <linux/clk.h>
9  #include <linux/device.h>
10  #include <linux/dma-mapping.h>
11  #include <linux/dmapool.h>
12  #include <linux/firmware.h>
13  #include <linux/interrupt.h>
14  #include <linux/module.h>
15  #include <linux/of_platform.h>
16  #include <linux/of_irq.h>
17  #include <linux/pci.h>
18  #include <linux/platform_device.h>
19  #include <linux/workqueue.h>
20  
21  #include <crypto/internal/aead.h>
22  #include <crypto/internal/hash.h>
23  #include <crypto/internal/skcipher.h>
24  
25  #include "safexcel.h"
26  
27  static u32 max_rings = EIP197_MAX_RINGS;
28  module_param(max_rings, uint, 0644);
29  MODULE_PARM_DESC(max_rings, "Maximum number of rings to use.");
30  
eip197_trc_cache_setupvirt(struct safexcel_crypto_priv * priv)31  static void eip197_trc_cache_setupvirt(struct safexcel_crypto_priv *priv)
32  {
33  	int i;
34  
35  	/*
36  	 * Map all interfaces/rings to register index 0
37  	 * so they can share contexts. Without this, the EIP197 will
38  	 * assume each interface/ring to be in its own memory domain
39  	 * i.e. have its own subset of UNIQUE memory addresses.
40  	 * Which would cause records with the SAME memory address to
41  	 * use DIFFERENT cache buffers, causing both poor cache utilization
42  	 * AND serious coherence/invalidation issues.
43  	 */
44  	for (i = 0; i < 4; i++)
45  		writel(0, priv->base + EIP197_FLUE_IFC_LUT(i));
46  
47  	/*
48  	 * Initialize other virtualization regs for cache
49  	 * These may not be in their reset state ...
50  	 */
51  	for (i = 0; i < priv->config.rings; i++) {
52  		writel(0, priv->base + EIP197_FLUE_CACHEBASE_LO(i));
53  		writel(0, priv->base + EIP197_FLUE_CACHEBASE_HI(i));
54  		writel(EIP197_FLUE_CONFIG_MAGIC,
55  		       priv->base + EIP197_FLUE_CONFIG(i));
56  	}
57  	writel(0, priv->base + EIP197_FLUE_OFFSETS);
58  	writel(0, priv->base + EIP197_FLUE_ARC4_OFFSET);
59  }
60  
eip197_trc_cache_banksel(struct safexcel_crypto_priv * priv,u32 addrmid,int * actbank)61  static void eip197_trc_cache_banksel(struct safexcel_crypto_priv *priv,
62  				     u32 addrmid, int *actbank)
63  {
64  	u32 val;
65  	int curbank;
66  
67  	curbank = addrmid >> 16;
68  	if (curbank != *actbank) {
69  		val = readl(priv->base + EIP197_CS_RAM_CTRL);
70  		val = (val & ~EIP197_CS_BANKSEL_MASK) |
71  		      (curbank << EIP197_CS_BANKSEL_OFS);
72  		writel(val, priv->base + EIP197_CS_RAM_CTRL);
73  		*actbank = curbank;
74  	}
75  }
76  
eip197_trc_cache_probe(struct safexcel_crypto_priv * priv,int maxbanks,u32 probemask,u32 stride)77  static u32 eip197_trc_cache_probe(struct safexcel_crypto_priv *priv,
78  				  int maxbanks, u32 probemask, u32 stride)
79  {
80  	u32 val, addrhi, addrlo, addrmid, addralias, delta, marker;
81  	int actbank;
82  
83  	/*
84  	 * And probe the actual size of the physically attached cache data RAM
85  	 * Using a binary subdivision algorithm downto 32 byte cache lines.
86  	 */
87  	addrhi = 1 << (16 + maxbanks);
88  	addrlo = 0;
89  	actbank = min(maxbanks - 1, 0);
90  	while ((addrhi - addrlo) > stride) {
91  		/* write marker to lowest address in top half */
92  		addrmid = (addrhi + addrlo) >> 1;
93  		marker = (addrmid ^ 0xabadbabe) & probemask; /* Unique */
94  		eip197_trc_cache_banksel(priv, addrmid, &actbank);
95  		writel(marker,
96  			priv->base + EIP197_CLASSIFICATION_RAMS +
97  			(addrmid & 0xffff));
98  
99  		/* write invalid markers to possible aliases */
100  		delta = 1 << __fls(addrmid);
101  		while (delta >= stride) {
102  			addralias = addrmid - delta;
103  			eip197_trc_cache_banksel(priv, addralias, &actbank);
104  			writel(~marker,
105  			       priv->base + EIP197_CLASSIFICATION_RAMS +
106  			       (addralias & 0xffff));
107  			delta >>= 1;
108  		}
109  
110  		/* read back marker from top half */
111  		eip197_trc_cache_banksel(priv, addrmid, &actbank);
112  		val = readl(priv->base + EIP197_CLASSIFICATION_RAMS +
113  			    (addrmid & 0xffff));
114  
115  		if ((val & probemask) == marker)
116  			/* read back correct, continue with top half */
117  			addrlo = addrmid;
118  		else
119  			/* not read back correct, continue with bottom half */
120  			addrhi = addrmid;
121  	}
122  	return addrhi;
123  }
124  
eip197_trc_cache_clear(struct safexcel_crypto_priv * priv,int cs_rc_max,int cs_ht_wc)125  static void eip197_trc_cache_clear(struct safexcel_crypto_priv *priv,
126  				   int cs_rc_max, int cs_ht_wc)
127  {
128  	int i;
129  	u32 htable_offset, val, offset;
130  
131  	/* Clear all records in administration RAM */
132  	for (i = 0; i < cs_rc_max; i++) {
133  		offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE;
134  
135  		writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) |
136  		       EIP197_CS_RC_PREV(EIP197_RC_NULL),
137  		       priv->base + offset);
138  
139  		val = EIP197_CS_RC_NEXT(i + 1) | EIP197_CS_RC_PREV(i - 1);
140  		if (i == 0)
141  			val |= EIP197_CS_RC_PREV(EIP197_RC_NULL);
142  		else if (i == cs_rc_max - 1)
143  			val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL);
144  		writel(val, priv->base + offset + 4);
145  		/* must also initialize the address key due to ECC! */
146  		writel(0, priv->base + offset + 8);
147  		writel(0, priv->base + offset + 12);
148  	}
149  
150  	/* Clear the hash table entries */
151  	htable_offset = cs_rc_max * EIP197_CS_RC_SIZE;
152  	for (i = 0; i < cs_ht_wc; i++)
153  		writel(GENMASK(29, 0),
154  		       priv->base + EIP197_CLASSIFICATION_RAMS +
155  		       htable_offset + i * sizeof(u32));
156  }
157  
eip197_trc_cache_init(struct safexcel_crypto_priv * priv)158  static int eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
159  {
160  	u32 val, dsize, asize;
161  	int cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc;
162  	int cs_rc_abs_max, cs_ht_sz;
163  	int maxbanks;
164  
165  	/* Setup (dummy) virtualization for cache */
166  	eip197_trc_cache_setupvirt(priv);
167  
168  	/*
169  	 * Enable the record cache memory access and
170  	 * probe the bank select width
171  	 */
172  	val = readl(priv->base + EIP197_CS_RAM_CTRL);
173  	val &= ~EIP197_TRC_ENABLE_MASK;
174  	val |= EIP197_TRC_ENABLE_0 | EIP197_CS_BANKSEL_MASK;
175  	writel(val, priv->base + EIP197_CS_RAM_CTRL);
176  	val = readl(priv->base + EIP197_CS_RAM_CTRL);
177  	maxbanks = ((val&EIP197_CS_BANKSEL_MASK)>>EIP197_CS_BANKSEL_OFS) + 1;
178  
179  	/* Clear all ECC errors */
180  	writel(0, priv->base + EIP197_TRC_ECCCTRL);
181  
182  	/*
183  	 * Make sure the cache memory is accessible by taking record cache into
184  	 * reset. Need data memory access here, not admin access.
185  	 */
186  	val = readl(priv->base + EIP197_TRC_PARAMS);
187  	val |= EIP197_TRC_PARAMS_SW_RESET | EIP197_TRC_PARAMS_DATA_ACCESS;
188  	writel(val, priv->base + EIP197_TRC_PARAMS);
189  
190  	/* Probed data RAM size in bytes */
191  	dsize = eip197_trc_cache_probe(priv, maxbanks, 0xffffffff, 32);
192  
193  	/*
194  	 * Now probe the administration RAM size pretty much the same way
195  	 * Except that only the lower 30 bits are writable and we don't need
196  	 * bank selects
197  	 */
198  	val = readl(priv->base + EIP197_TRC_PARAMS);
199  	/* admin access now */
200  	val &= ~(EIP197_TRC_PARAMS_DATA_ACCESS | EIP197_CS_BANKSEL_MASK);
201  	writel(val, priv->base + EIP197_TRC_PARAMS);
202  
203  	/* Probed admin RAM size in admin words */
204  	asize = eip197_trc_cache_probe(priv, 0, 0x3fffffff, 16) >> 4;
205  
206  	/* Clear any ECC errors detected while probing! */
207  	writel(0, priv->base + EIP197_TRC_ECCCTRL);
208  
209  	/* Sanity check probing results */
210  	if (dsize < EIP197_MIN_DSIZE || asize < EIP197_MIN_ASIZE) {
211  		dev_err(priv->dev, "Record cache probing failed (%d,%d).",
212  			dsize, asize);
213  		return -ENODEV;
214  	}
215  
216  	/*
217  	 * Determine optimal configuration from RAM sizes
218  	 * Note that we assume that the physical RAM configuration is sane
219  	 * Therefore, we don't do any parameter error checking here ...
220  	 */
221  
222  	/* For now, just use a single record format covering everything */
223  	cs_trc_rec_wc = EIP197_CS_TRC_REC_WC;
224  	cs_trc_lg_rec_wc = EIP197_CS_TRC_REC_WC;
225  
226  	/*
227  	 * Step #1: How many records will physically fit?
228  	 * Hard upper limit is 1023!
229  	 */
230  	cs_rc_abs_max = min_t(uint, ((dsize >> 2) / cs_trc_lg_rec_wc), 1023);
231  	/* Step #2: Need at least 2 words in the admin RAM per record */
232  	cs_rc_max = min_t(uint, cs_rc_abs_max, (asize >> 1));
233  	/* Step #3: Determine log2 of hash table size */
234  	cs_ht_sz = __fls(asize - cs_rc_max) - 2;
235  	/* Step #4: determine current size of hash table in dwords */
236  	cs_ht_wc = 16 << cs_ht_sz; /* dwords, not admin words */
237  	/* Step #5: add back excess words and see if we can fit more records */
238  	cs_rc_max = min_t(uint, cs_rc_abs_max, asize - (cs_ht_wc >> 2));
239  
240  	/* Clear the cache RAMs */
241  	eip197_trc_cache_clear(priv, cs_rc_max, cs_ht_wc);
242  
243  	/* Disable the record cache memory access */
244  	val = readl(priv->base + EIP197_CS_RAM_CTRL);
245  	val &= ~EIP197_TRC_ENABLE_MASK;
246  	writel(val, priv->base + EIP197_CS_RAM_CTRL);
247  
248  	/* Write head and tail pointers of the record free chain */
249  	val = EIP197_TRC_FREECHAIN_HEAD_PTR(0) |
250  	      EIP197_TRC_FREECHAIN_TAIL_PTR(cs_rc_max - 1);
251  	writel(val, priv->base + EIP197_TRC_FREECHAIN);
252  
253  	/* Configure the record cache #1 */
254  	val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(cs_trc_rec_wc) |
255  	      EIP197_TRC_PARAMS2_HTABLE_PTR(cs_rc_max);
256  	writel(val, priv->base + EIP197_TRC_PARAMS2);
257  
258  	/* Configure the record cache #2 */
259  	val = EIP197_TRC_PARAMS_RC_SZ_LARGE(cs_trc_lg_rec_wc) |
260  	      EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) |
261  	      EIP197_TRC_PARAMS_HTABLE_SZ(cs_ht_sz);
262  	writel(val, priv->base + EIP197_TRC_PARAMS);
263  
264  	dev_info(priv->dev, "TRC init: %dd,%da (%dr,%dh)\n",
265  		 dsize, asize, cs_rc_max, cs_ht_wc + cs_ht_wc);
266  	return 0;
267  }
268  
eip197_init_firmware(struct safexcel_crypto_priv * priv)269  static void eip197_init_firmware(struct safexcel_crypto_priv *priv)
270  {
271  	int pe, i;
272  	u32 val;
273  
274  	for (pe = 0; pe < priv->config.pes; pe++) {
275  		/* Configure the token FIFO's */
276  		writel(3, EIP197_PE(priv) + EIP197_PE_ICE_PUTF_CTRL(pe));
277  		writel(0, EIP197_PE(priv) + EIP197_PE_ICE_PPTF_CTRL(pe));
278  
279  		/* Clear the ICE scratchpad memory */
280  		val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
281  		val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
282  		       EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
283  		       EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
284  		       EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
285  		writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
286  
287  		/* clear the scratchpad RAM using 32 bit writes only */
288  		for (i = 0; i < EIP197_NUM_OF_SCRATCH_BLOCKS; i++)
289  			writel(0, EIP197_PE(priv) +
290  				  EIP197_PE_ICE_SCRATCH_RAM(pe) + (i << 2));
291  
292  		/* Reset the IFPP engine to make its program mem accessible */
293  		writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
294  		       EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
295  		       EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
296  		       EIP197_PE(priv) + EIP197_PE_ICE_FPP_CTRL(pe));
297  
298  		/* Reset the IPUE engine to make its program mem accessible */
299  		writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
300  		       EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
301  		       EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
302  		       EIP197_PE(priv) + EIP197_PE_ICE_PUE_CTRL(pe));
303  
304  		/* Enable access to all IFPP program memories */
305  		writel(EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN,
306  		       EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
307  
308  		/* bypass the OCE, if present */
309  		if (priv->flags & EIP197_OCE)
310  			writel(EIP197_DEBUG_OCE_BYPASS, EIP197_PE(priv) +
311  							EIP197_PE_DEBUG(pe));
312  	}
313  
314  }
315  
eip197_write_firmware(struct safexcel_crypto_priv * priv,const struct firmware * fw)316  static int eip197_write_firmware(struct safexcel_crypto_priv *priv,
317  				  const struct firmware *fw)
318  {
319  	u32 val;
320  	int i;
321  
322  	/* Write the firmware */
323  	for (i = 0; i < fw->size / sizeof(u32); i++) {
324  		if (priv->data->fw_little_endian)
325  			val = le32_to_cpu(((const __le32 *)fw->data)[i]);
326  		else
327  			val = be32_to_cpu(((const __be32 *)fw->data)[i]);
328  
329  		writel(val,
330  		       priv->base + EIP197_CLASSIFICATION_RAMS +
331  		       i * sizeof(val));
332  	}
333  
334  	/* Exclude final 2 NOPs from size */
335  	return i - EIP197_FW_TERMINAL_NOPS;
336  }
337  
338  /*
339   * If FW is actual production firmware, then poll for its initialization
340   * to complete and check if it is good for the HW, otherwise just return OK.
341   */
poll_fw_ready(struct safexcel_crypto_priv * priv,int fpp)342  static bool poll_fw_ready(struct safexcel_crypto_priv *priv, int fpp)
343  {
344  	int pe, pollcnt;
345  	u32 base, pollofs;
346  
347  	if (fpp)
348  		pollofs  = EIP197_FW_FPP_READY;
349  	else
350  		pollofs  = EIP197_FW_PUE_READY;
351  
352  	for (pe = 0; pe < priv->config.pes; pe++) {
353  		base = EIP197_PE_ICE_SCRATCH_RAM(pe);
354  		pollcnt = EIP197_FW_START_POLLCNT;
355  		while (pollcnt &&
356  		       (readl_relaxed(EIP197_PE(priv) + base +
357  			      pollofs) != 1)) {
358  			pollcnt--;
359  		}
360  		if (!pollcnt) {
361  			dev_err(priv->dev, "FW(%d) for PE %d failed to start\n",
362  				fpp, pe);
363  			return false;
364  		}
365  	}
366  	return true;
367  }
368  
eip197_start_firmware(struct safexcel_crypto_priv * priv,int ipuesz,int ifppsz,int minifw)369  static bool eip197_start_firmware(struct safexcel_crypto_priv *priv,
370  				  int ipuesz, int ifppsz, int minifw)
371  {
372  	int pe;
373  	u32 val;
374  
375  	for (pe = 0; pe < priv->config.pes; pe++) {
376  		/* Disable access to all program memory */
377  		writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
378  
379  		/* Start IFPP microengines */
380  		if (minifw)
381  			val = 0;
382  		else
383  			val = EIP197_PE_ICE_UENG_START_OFFSET((ifppsz - 1) &
384  					EIP197_PE_ICE_UENG_INIT_ALIGN_MASK) |
385  				EIP197_PE_ICE_UENG_DEBUG_RESET;
386  		writel(val, EIP197_PE(priv) + EIP197_PE_ICE_FPP_CTRL(pe));
387  
388  		/* Start IPUE microengines */
389  		if (minifw)
390  			val = 0;
391  		else
392  			val = EIP197_PE_ICE_UENG_START_OFFSET((ipuesz - 1) &
393  					EIP197_PE_ICE_UENG_INIT_ALIGN_MASK) |
394  				EIP197_PE_ICE_UENG_DEBUG_RESET;
395  		writel(val, EIP197_PE(priv) + EIP197_PE_ICE_PUE_CTRL(pe));
396  	}
397  
398  	/* For miniFW startup, there is no initialization, so always succeed */
399  	if (minifw)
400  		return true;
401  
402  	/* Wait until all the firmwares have properly started up */
403  	if (!poll_fw_ready(priv, 1))
404  		return false;
405  	if (!poll_fw_ready(priv, 0))
406  		return false;
407  
408  	return true;
409  }
410  
eip197_load_firmwares(struct safexcel_crypto_priv * priv)411  static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
412  {
413  	const char *fw_name[] = {"ifpp.bin", "ipue.bin"};
414  	const struct firmware *fw[FW_NB];
415  	char fw_path[37], *dir = NULL;
416  	int i, j, ret = 0, pe;
417  	int ipuesz, ifppsz, minifw = 0;
418  
419  	if (priv->data->version == EIP197D_MRVL)
420  		dir = "eip197d";
421  	else if (priv->data->version == EIP197B_MRVL ||
422  		 priv->data->version == EIP197_DEVBRD)
423  		dir = "eip197b";
424  	else if (priv->data->version == EIP197C_MXL)
425  		dir = "eip197c";
426  	else
427  		return -ENODEV;
428  
429  retry_fw:
430  	for (i = 0; i < FW_NB; i++) {
431  		snprintf(fw_path, 37, "inside-secure/%s/%s", dir, fw_name[i]);
432  		ret = firmware_request_nowarn(&fw[i], fw_path, priv->dev);
433  		if (ret) {
434  			if (minifw || priv->data->version != EIP197B_MRVL)
435  				goto release_fw;
436  
437  			/* Fallback to the old firmware location for the
438  			 * EIP197b.
439  			 */
440  			ret = firmware_request_nowarn(&fw[i], fw_name[i],
441  						      priv->dev);
442  			if (ret)
443  				goto release_fw;
444  		}
445  	}
446  
447  	eip197_init_firmware(priv);
448  
449  	ifppsz = eip197_write_firmware(priv, fw[FW_IFPP]);
450  
451  	/* Enable access to IPUE program memories */
452  	for (pe = 0; pe < priv->config.pes; pe++)
453  		writel(EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN,
454  		       EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
455  
456  	ipuesz = eip197_write_firmware(priv, fw[FW_IPUE]);
457  
458  	if (eip197_start_firmware(priv, ipuesz, ifppsz, minifw)) {
459  		dev_dbg(priv->dev, "Firmware loaded successfully\n");
460  		return 0;
461  	}
462  
463  	ret = -ENODEV;
464  
465  release_fw:
466  	for (j = 0; j < i; j++)
467  		release_firmware(fw[j]);
468  
469  	if (!minifw) {
470  		/* Retry with minifw path */
471  		dev_dbg(priv->dev, "Firmware set not (fully) present or init failed, falling back to BCLA mode\n");
472  		dir = "eip197_minifw";
473  		minifw = 1;
474  		goto retry_fw;
475  	}
476  
477  	dev_err(priv->dev, "Firmware load failed.\n");
478  
479  	return ret;
480  }
481  
safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv * priv)482  static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
483  {
484  	u32 cd_size_rnd, val;
485  	int i, cd_fetch_cnt;
486  
487  	cd_size_rnd  = (priv->config.cd_size +
488  			(BIT(priv->hwconfig.hwdataw) - 1)) >>
489  		       priv->hwconfig.hwdataw;
490  	/* determine number of CD's we can fetch into the CD FIFO as 1 block */
491  	if (priv->flags & SAFEXCEL_HW_EIP197) {
492  		/* EIP197: try to fetch enough in 1 go to keep all pipes busy */
493  		cd_fetch_cnt = (1 << priv->hwconfig.hwcfsize) / cd_size_rnd;
494  		cd_fetch_cnt = min_t(uint, cd_fetch_cnt,
495  				     (priv->config.pes * EIP197_FETCH_DEPTH));
496  	} else {
497  		/* for the EIP97, just fetch all that fits minus 1 */
498  		cd_fetch_cnt = ((1 << priv->hwconfig.hwcfsize) /
499  				cd_size_rnd) - 1;
500  	}
501  	/*
502  	 * Since we're using command desc's way larger than formally specified,
503  	 * we need to check whether we can fit even 1 for low-end EIP196's!
504  	 */
505  	if (!cd_fetch_cnt) {
506  		dev_err(priv->dev, "Unable to fit even 1 command desc!\n");
507  		return -ENODEV;
508  	}
509  
510  	for (i = 0; i < priv->config.rings; i++) {
511  		/* ring base address */
512  		writel(lower_32_bits(priv->ring[i].cdr.base_dma),
513  		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
514  		writel(upper_32_bits(priv->ring[i].cdr.base_dma),
515  		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
516  
517  		writel(EIP197_xDR_DESC_MODE_64BIT | EIP197_CDR_DESC_MODE_ADCP |
518  		       (priv->config.cd_offset << 14) | priv->config.cd_size,
519  		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
520  		writel(((cd_fetch_cnt *
521  			 (cd_size_rnd << priv->hwconfig.hwdataw)) << 16) |
522  		       (cd_fetch_cnt * (priv->config.cd_offset / sizeof(u32))),
523  		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
524  
525  		/* Configure DMA tx control */
526  		val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
527  		val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
528  		writel(val, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
529  
530  		/* clear any pending interrupt */
531  		writel(GENMASK(5, 0),
532  		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
533  	}
534  
535  	return 0;
536  }
537  
safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv * priv)538  static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
539  {
540  	u32 rd_size_rnd, val;
541  	int i, rd_fetch_cnt;
542  
543  	/* determine number of RD's we can fetch into the FIFO as one block */
544  	rd_size_rnd = (EIP197_RD64_FETCH_SIZE +
545  		       (BIT(priv->hwconfig.hwdataw) - 1)) >>
546  		      priv->hwconfig.hwdataw;
547  	if (priv->flags & SAFEXCEL_HW_EIP197) {
548  		/* EIP197: try to fetch enough in 1 go to keep all pipes busy */
549  		rd_fetch_cnt = (1 << priv->hwconfig.hwrfsize) / rd_size_rnd;
550  		rd_fetch_cnt = min_t(uint, rd_fetch_cnt,
551  				     (priv->config.pes * EIP197_FETCH_DEPTH));
552  	} else {
553  		/* for the EIP97, just fetch all that fits minus 1 */
554  		rd_fetch_cnt = ((1 << priv->hwconfig.hwrfsize) /
555  				rd_size_rnd) - 1;
556  	}
557  
558  	for (i = 0; i < priv->config.rings; i++) {
559  		/* ring base address */
560  		writel(lower_32_bits(priv->ring[i].rdr.base_dma),
561  		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
562  		writel(upper_32_bits(priv->ring[i].rdr.base_dma),
563  		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
564  
565  		writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 14) |
566  		       priv->config.rd_size,
567  		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
568  
569  		writel(((rd_fetch_cnt *
570  			 (rd_size_rnd << priv->hwconfig.hwdataw)) << 16) |
571  		       (rd_fetch_cnt * (priv->config.rd_offset / sizeof(u32))),
572  		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
573  
574  		/* Configure DMA tx control */
575  		val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
576  		val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
577  		val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUF;
578  		writel(val,
579  		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
580  
581  		/* clear any pending interrupt */
582  		writel(GENMASK(7, 0),
583  		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
584  
585  		/* enable ring interrupt */
586  		val = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
587  		val |= EIP197_RDR_IRQ(i);
588  		writel(val, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
589  	}
590  
591  	return 0;
592  }
593  
safexcel_hw_init(struct safexcel_crypto_priv * priv)594  static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
595  {
596  	u32 val;
597  	int i, ret, pe, opbuflo, opbufhi;
598  
599  	dev_dbg(priv->dev, "HW init: using %d pipe(s) and %d ring(s)\n",
600  		priv->config.pes, priv->config.rings);
601  
602  	/*
603  	 * For EIP197's only set maximum number of TX commands to 2^5 = 32
604  	 * Skip for the EIP97 as it does not have this field.
605  	 */
606  	if (priv->flags & SAFEXCEL_HW_EIP197) {
607  		val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
608  		val |= EIP197_MST_CTRL_TX_MAX_CMD(5);
609  		writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
610  	}
611  
612  	/* Configure wr/rd cache values */
613  	writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
614  	       EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS),
615  	       EIP197_HIA_GEN_CFG(priv) + EIP197_MST_CTRL);
616  
617  	/* Interrupts reset */
618  
619  	/* Disable all global interrupts */
620  	writel(0, EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ENABLE_CTRL);
621  
622  	/* Clear any pending interrupt */
623  	writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
624  
625  	/* Processing Engine configuration */
626  	for (pe = 0; pe < priv->config.pes; pe++) {
627  		/* Data Fetch Engine configuration */
628  
629  		/* Reset all DFE threads */
630  		writel(EIP197_DxE_THR_CTRL_RESET_PE,
631  		       EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
632  
633  		if (priv->flags & EIP197_PE_ARB)
634  			/* Reset HIA input interface arbiter (if present) */
635  			writel(EIP197_HIA_RA_PE_CTRL_RESET,
636  			       EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
637  
638  		/* DMA transfer size to use */
639  		val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
640  		val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(6) |
641  		       EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
642  		val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(6) |
643  		       EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
644  		val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
645  		val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
646  		writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG(pe));
647  
648  		/* Leave the DFE threads reset state */
649  		writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
650  
651  		/* Configure the processing engine thresholds */
652  		writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
653  		       EIP197_PE_IN_xBUF_THRES_MAX(9),
654  		       EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES(pe));
655  		writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
656  		       EIP197_PE_IN_xBUF_THRES_MAX(7),
657  		       EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES(pe));
658  
659  		if (priv->flags & SAFEXCEL_HW_EIP197)
660  			/* enable HIA input interface arbiter and rings */
661  			writel(EIP197_HIA_RA_PE_CTRL_EN |
662  			       GENMASK(priv->config.rings - 1, 0),
663  			       EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
664  
665  		/* Data Store Engine configuration */
666  
667  		/* Reset all DSE threads */
668  		writel(EIP197_DxE_THR_CTRL_RESET_PE,
669  		       EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
670  
671  		/* Wait for all DSE threads to complete */
672  		while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT(pe)) &
673  			GENMASK(15, 12)) != GENMASK(15, 12))
674  			;
675  
676  		/* DMA transfer size to use */
677  		if (priv->hwconfig.hwnumpes > 4) {
678  			opbuflo = 9;
679  			opbufhi = 10;
680  		} else {
681  			opbuflo = 7;
682  			opbufhi = 8;
683  		}
684  		val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
685  		val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(opbuflo) |
686  		       EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(opbufhi);
687  		val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
688  		val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
689  		/* FIXME: instability issues can occur for EIP97 but disabling
690  		 * it impacts performance.
691  		 */
692  		if (priv->flags & SAFEXCEL_HW_EIP197)
693  			val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
694  		writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG(pe));
695  
696  		/* Leave the DSE threads reset state */
697  		writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
698  
699  		/* Configure the processing engine thresholds */
700  		writel(EIP197_PE_OUT_DBUF_THRES_MIN(opbuflo) |
701  		       EIP197_PE_OUT_DBUF_THRES_MAX(opbufhi),
702  		       EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe));
703  
704  		/* Processing Engine configuration */
705  
706  		/* Token & context configuration */
707  		val = EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES |
708  		      EIP197_PE_EIP96_TOKEN_CTRL_NO_TOKEN_WAIT |
709  		      EIP197_PE_EIP96_TOKEN_CTRL_ENABLE_TIMEOUT;
710  		writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL(pe));
711  
712  		/* H/W capabilities selection: just enable everything */
713  		writel(EIP197_FUNCTION_ALL,
714  		       EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN(pe));
715  		writel(EIP197_FUNCTION_ALL,
716  		       EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION2_EN(pe));
717  	}
718  
719  	/* Command Descriptor Rings prepare */
720  	for (i = 0; i < priv->config.rings; i++) {
721  		/* Clear interrupts for this ring */
722  		writel(GENMASK(31, 0),
723  		       EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CLR(i));
724  
725  		/* Disable external triggering */
726  		writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
727  
728  		/* Clear the pending prepared counter */
729  		writel(EIP197_xDR_PREP_CLR_COUNT,
730  		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
731  
732  		/* Clear the pending processed counter */
733  		writel(EIP197_xDR_PROC_CLR_COUNT,
734  		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
735  
736  		writel(0,
737  		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
738  		writel(0,
739  		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
740  
741  		writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset),
742  		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
743  	}
744  
745  	/* Result Descriptor Ring prepare */
746  	for (i = 0; i < priv->config.rings; i++) {
747  		/* Disable external triggering*/
748  		writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
749  
750  		/* Clear the pending prepared counter */
751  		writel(EIP197_xDR_PREP_CLR_COUNT,
752  		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
753  
754  		/* Clear the pending processed counter */
755  		writel(EIP197_xDR_PROC_CLR_COUNT,
756  		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
757  
758  		writel(0,
759  		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
760  		writel(0,
761  		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
762  
763  		/* Ring size */
764  		writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset),
765  		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
766  	}
767  
768  	for (pe = 0; pe < priv->config.pes; pe++) {
769  		/* Enable command descriptor rings */
770  		writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
771  		       EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
772  
773  		/* Enable result descriptor rings */
774  		writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
775  		       EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
776  	}
777  
778  	/* Clear any HIA interrupt */
779  	writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
780  
781  	if (priv->flags & EIP197_SIMPLE_TRC) {
782  		writel(EIP197_STRC_CONFIG_INIT |
783  		       EIP197_STRC_CONFIG_LARGE_REC(EIP197_CS_TRC_REC_WC) |
784  		       EIP197_STRC_CONFIG_SMALL_REC(EIP197_CS_TRC_REC_WC),
785  		       priv->base + EIP197_STRC_CONFIG);
786  		writel(EIP197_PE_EIP96_TOKEN_CTRL2_CTX_DONE,
787  		       EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL2(0));
788  	} else if (priv->flags & SAFEXCEL_HW_EIP197) {
789  		ret = eip197_trc_cache_init(priv);
790  		if (ret)
791  			return ret;
792  	}
793  
794  	if (priv->flags & EIP197_ICE) {
795  		ret = eip197_load_firmwares(priv);
796  		if (ret)
797  			return ret;
798  	}
799  
800  	return safexcel_hw_setup_cdesc_rings(priv) ?:
801  	       safexcel_hw_setup_rdesc_rings(priv) ?:
802  	       0;
803  }
804  
805  /* Called with ring's lock taken */
safexcel_try_push_requests(struct safexcel_crypto_priv * priv,int ring)806  static void safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
807  				       int ring)
808  {
809  	int coal = min_t(int, priv->ring[ring].requests, EIP197_MAX_BATCH_SZ);
810  
811  	if (!coal)
812  		return;
813  
814  	/* Configure when we want an interrupt */
815  	writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
816  	       EIP197_HIA_RDR_THRESH_PROC_PKT(coal),
817  	       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH);
818  }
819  
safexcel_dequeue(struct safexcel_crypto_priv * priv,int ring)820  void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
821  {
822  	struct crypto_async_request *req, *backlog;
823  	struct safexcel_context *ctx;
824  	int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
825  
826  	/* If a request wasn't properly dequeued because of a lack of resources,
827  	 * proceeded it first,
828  	 */
829  	req = priv->ring[ring].req;
830  	backlog = priv->ring[ring].backlog;
831  	if (req)
832  		goto handle_req;
833  
834  	while (true) {
835  		spin_lock_bh(&priv->ring[ring].queue_lock);
836  		backlog = crypto_get_backlog(&priv->ring[ring].queue);
837  		req = crypto_dequeue_request(&priv->ring[ring].queue);
838  		spin_unlock_bh(&priv->ring[ring].queue_lock);
839  
840  		if (!req) {
841  			priv->ring[ring].req = NULL;
842  			priv->ring[ring].backlog = NULL;
843  			goto finalize;
844  		}
845  
846  handle_req:
847  		ctx = crypto_tfm_ctx(req->tfm);
848  		ret = ctx->send(req, ring, &commands, &results);
849  		if (ret)
850  			goto request_failed;
851  
852  		if (backlog)
853  			crypto_request_complete(backlog, -EINPROGRESS);
854  
855  		/* In case the send() helper did not issue any command to push
856  		 * to the engine because the input data was cached, continue to
857  		 * dequeue other requests as this is valid and not an error.
858  		 */
859  		if (!commands && !results)
860  			continue;
861  
862  		cdesc += commands;
863  		rdesc += results;
864  		nreq++;
865  	}
866  
867  request_failed:
868  	/* Not enough resources to handle all the requests. Bail out and save
869  	 * the request and the backlog for the next dequeue call (per-ring).
870  	 */
871  	priv->ring[ring].req = req;
872  	priv->ring[ring].backlog = backlog;
873  
874  finalize:
875  	if (!nreq)
876  		return;
877  
878  	spin_lock_bh(&priv->ring[ring].lock);
879  
880  	priv->ring[ring].requests += nreq;
881  
882  	if (!priv->ring[ring].busy) {
883  		safexcel_try_push_requests(priv, ring);
884  		priv->ring[ring].busy = true;
885  	}
886  
887  	spin_unlock_bh(&priv->ring[ring].lock);
888  
889  	/* let the RDR know we have pending descriptors */
890  	writel((rdesc * priv->config.rd_offset),
891  	       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
892  
893  	/* let the CDR know we have pending descriptors */
894  	writel((cdesc * priv->config.cd_offset),
895  	       EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
896  }
897  
safexcel_rdesc_check_errors(struct safexcel_crypto_priv * priv,void * rdp)898  inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
899  				       void *rdp)
900  {
901  	struct safexcel_result_desc *rdesc = rdp;
902  	struct result_data_desc *result_data = rdp + priv->config.res_offset;
903  
904  	if (likely((!rdesc->last_seg) || /* Rest only valid if last seg! */
905  		   ((!rdesc->descriptor_overflow) &&
906  		    (!rdesc->buffer_overflow) &&
907  		    (!result_data->error_code))))
908  		return 0;
909  
910  	if (rdesc->descriptor_overflow)
911  		dev_err(priv->dev, "Descriptor overflow detected");
912  
913  	if (rdesc->buffer_overflow)
914  		dev_err(priv->dev, "Buffer overflow detected");
915  
916  	if (result_data->error_code & 0x4066) {
917  		/* Fatal error (bits 1,2,5,6 & 14) */
918  		dev_err(priv->dev,
919  			"result descriptor error (%x)",
920  			result_data->error_code);
921  
922  		return -EIO;
923  	} else if (result_data->error_code &
924  		   (BIT(7) | BIT(4) | BIT(3) | BIT(0))) {
925  		/*
926  		 * Give priority over authentication fails:
927  		 * Blocksize, length & overflow errors,
928  		 * something wrong with the input!
929  		 */
930  		return -EINVAL;
931  	} else if (result_data->error_code & BIT(9)) {
932  		/* Authentication failed */
933  		return -EBADMSG;
934  	}
935  
936  	/* All other non-fatal errors */
937  	return -EINVAL;
938  }
939  
safexcel_rdr_req_set(struct safexcel_crypto_priv * priv,int ring,struct safexcel_result_desc * rdesc,struct crypto_async_request * req)940  inline void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
941  				 int ring,
942  				 struct safexcel_result_desc *rdesc,
943  				 struct crypto_async_request *req)
944  {
945  	int i = safexcel_ring_rdr_rdesc_index(priv, ring, rdesc);
946  
947  	priv->ring[ring].rdr_req[i] = req;
948  }
949  
950  inline struct crypto_async_request *
safexcel_rdr_req_get(struct safexcel_crypto_priv * priv,int ring)951  safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring)
952  {
953  	int i = safexcel_ring_first_rdr_index(priv, ring);
954  
955  	return priv->ring[ring].rdr_req[i];
956  }
957  
safexcel_complete(struct safexcel_crypto_priv * priv,int ring)958  void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
959  {
960  	struct safexcel_command_desc *cdesc;
961  
962  	/* Acknowledge the command descriptors */
963  	do {
964  		cdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].cdr);
965  		if (IS_ERR(cdesc)) {
966  			dev_err(priv->dev,
967  				"Could not retrieve the command descriptor\n");
968  			return;
969  		}
970  	} while (!cdesc->last_seg);
971  }
972  
safexcel_invalidate_cache(struct crypto_async_request * async,struct safexcel_crypto_priv * priv,dma_addr_t ctxr_dma,int ring)973  int safexcel_invalidate_cache(struct crypto_async_request *async,
974  			      struct safexcel_crypto_priv *priv,
975  			      dma_addr_t ctxr_dma, int ring)
976  {
977  	struct safexcel_command_desc *cdesc;
978  	struct safexcel_result_desc *rdesc;
979  	struct safexcel_token  *dmmy;
980  	int ret = 0;
981  
982  	/* Prepare command descriptor */
983  	cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma,
984  				   &dmmy);
985  	if (IS_ERR(cdesc))
986  		return PTR_ERR(cdesc);
987  
988  	cdesc->control_data.type = EIP197_TYPE_EXTENDED;
989  	cdesc->control_data.options = 0;
990  	cdesc->control_data.context_lo &= ~EIP197_CONTEXT_SIZE_MASK;
991  	cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR;
992  
993  	/* Prepare result descriptor */
994  	rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0);
995  
996  	if (IS_ERR(rdesc)) {
997  		ret = PTR_ERR(rdesc);
998  		goto cdesc_rollback;
999  	}
1000  
1001  	safexcel_rdr_req_set(priv, ring, rdesc, async);
1002  
1003  	return ret;
1004  
1005  cdesc_rollback:
1006  	safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
1007  
1008  	return ret;
1009  }
1010  
safexcel_handle_result_descriptor(struct safexcel_crypto_priv * priv,int ring)1011  static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv,
1012  						     int ring)
1013  {
1014  	struct crypto_async_request *req;
1015  	struct safexcel_context *ctx;
1016  	int ret, i, nreq, ndesc, tot_descs, handled = 0;
1017  	bool should_complete;
1018  
1019  handle_results:
1020  	tot_descs = 0;
1021  
1022  	nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
1023  	nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET;
1024  	nreq &= EIP197_xDR_PROC_xD_PKT_MASK;
1025  	if (!nreq)
1026  		goto requests_left;
1027  
1028  	for (i = 0; i < nreq; i++) {
1029  		req = safexcel_rdr_req_get(priv, ring);
1030  
1031  		ctx = crypto_tfm_ctx(req->tfm);
1032  		ndesc = ctx->handle_result(priv, ring, req,
1033  					   &should_complete, &ret);
1034  		if (ndesc < 0) {
1035  			dev_err(priv->dev, "failed to handle result (%d)\n",
1036  				ndesc);
1037  			goto acknowledge;
1038  		}
1039  
1040  		if (should_complete) {
1041  			local_bh_disable();
1042  			crypto_request_complete(req, ret);
1043  			local_bh_enable();
1044  		}
1045  
1046  		tot_descs += ndesc;
1047  		handled++;
1048  	}
1049  
1050  acknowledge:
1051  	if (i)
1052  		writel(EIP197_xDR_PROC_xD_PKT(i) |
1053  		       (tot_descs * priv->config.rd_offset),
1054  		       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
1055  
1056  	/* If the number of requests overflowed the counter, try to proceed more
1057  	 * requests.
1058  	 */
1059  	if (nreq == EIP197_xDR_PROC_xD_PKT_MASK)
1060  		goto handle_results;
1061  
1062  requests_left:
1063  	spin_lock_bh(&priv->ring[ring].lock);
1064  
1065  	priv->ring[ring].requests -= handled;
1066  	safexcel_try_push_requests(priv, ring);
1067  
1068  	if (!priv->ring[ring].requests)
1069  		priv->ring[ring].busy = false;
1070  
1071  	spin_unlock_bh(&priv->ring[ring].lock);
1072  }
1073  
safexcel_dequeue_work(struct work_struct * work)1074  static void safexcel_dequeue_work(struct work_struct *work)
1075  {
1076  	struct safexcel_work_data *data =
1077  			container_of(work, struct safexcel_work_data, work);
1078  
1079  	safexcel_dequeue(data->priv, data->ring);
1080  }
1081  
1082  struct safexcel_ring_irq_data {
1083  	struct safexcel_crypto_priv *priv;
1084  	int ring;
1085  };
1086  
safexcel_irq_ring(int irq,void * data)1087  static irqreturn_t safexcel_irq_ring(int irq, void *data)
1088  {
1089  	struct safexcel_ring_irq_data *irq_data = data;
1090  	struct safexcel_crypto_priv *priv = irq_data->priv;
1091  	int ring = irq_data->ring, rc = IRQ_NONE;
1092  	u32 status, stat;
1093  
1094  	status = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
1095  	if (!status)
1096  		return rc;
1097  
1098  	/* RDR interrupts */
1099  	if (status & EIP197_RDR_IRQ(ring)) {
1100  		stat = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
1101  
1102  		if (unlikely(stat & EIP197_xDR_ERR)) {
1103  			/*
1104  			 * Fatal error, the RDR is unusable and must be
1105  			 * reinitialized. This should not happen under
1106  			 * normal circumstances.
1107  			 */
1108  			dev_err(priv->dev, "RDR: fatal error.\n");
1109  		} else if (likely(stat & EIP197_xDR_THRESH)) {
1110  			rc = IRQ_WAKE_THREAD;
1111  		}
1112  
1113  		/* ACK the interrupts */
1114  		writel(stat & 0xff,
1115  		       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
1116  	}
1117  
1118  	/* ACK the interrupts */
1119  	writel(status, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ACK(ring));
1120  
1121  	return rc;
1122  }
1123  
safexcel_irq_ring_thread(int irq,void * data)1124  static irqreturn_t safexcel_irq_ring_thread(int irq, void *data)
1125  {
1126  	struct safexcel_ring_irq_data *irq_data = data;
1127  	struct safexcel_crypto_priv *priv = irq_data->priv;
1128  	int ring = irq_data->ring;
1129  
1130  	safexcel_handle_result_descriptor(priv, ring);
1131  
1132  	queue_work(priv->ring[ring].workqueue,
1133  		   &priv->ring[ring].work_data.work);
1134  
1135  	return IRQ_HANDLED;
1136  }
1137  
safexcel_request_ring_irq(void * pdev,int irqid,int is_pci_dev,int ring_id,irq_handler_t handler,irq_handler_t threaded_handler,struct safexcel_ring_irq_data * ring_irq_priv)1138  static int safexcel_request_ring_irq(void *pdev, int irqid,
1139  				     int is_pci_dev,
1140  				     int ring_id,
1141  				     irq_handler_t handler,
1142  				     irq_handler_t threaded_handler,
1143  				     struct safexcel_ring_irq_data *ring_irq_priv)
1144  {
1145  	int ret, irq, cpu;
1146  	struct device *dev;
1147  
1148  	if (IS_ENABLED(CONFIG_PCI) && is_pci_dev) {
1149  		struct pci_dev *pci_pdev = pdev;
1150  
1151  		dev = &pci_pdev->dev;
1152  		irq = pci_irq_vector(pci_pdev, irqid);
1153  		if (irq < 0) {
1154  			dev_err(dev, "unable to get device MSI IRQ %d (err %d)\n",
1155  				irqid, irq);
1156  			return irq;
1157  		}
1158  	} else if (IS_ENABLED(CONFIG_OF)) {
1159  		struct platform_device *plf_pdev = pdev;
1160  		char irq_name[6] = {0}; /* "ringX\0" */
1161  
1162  		snprintf(irq_name, 6, "ring%d", irqid);
1163  		dev = &plf_pdev->dev;
1164  		irq = platform_get_irq_byname(plf_pdev, irq_name);
1165  
1166  		if (irq < 0)
1167  			return irq;
1168  	} else {
1169  		return -ENXIO;
1170  	}
1171  
1172  	ret = devm_request_threaded_irq(dev, irq, handler,
1173  					threaded_handler, IRQF_ONESHOT,
1174  					dev_name(dev), ring_irq_priv);
1175  	if (ret) {
1176  		dev_err(dev, "unable to request IRQ %d\n", irq);
1177  		return ret;
1178  	}
1179  
1180  	/* Set affinity */
1181  	cpu = cpumask_local_spread(ring_id, NUMA_NO_NODE);
1182  	irq_set_affinity_hint(irq, get_cpu_mask(cpu));
1183  
1184  	return irq;
1185  }
1186  
1187  static struct safexcel_alg_template *safexcel_algs[] = {
1188  	&safexcel_alg_ecb_des,
1189  	&safexcel_alg_cbc_des,
1190  	&safexcel_alg_ecb_des3_ede,
1191  	&safexcel_alg_cbc_des3_ede,
1192  	&safexcel_alg_ecb_aes,
1193  	&safexcel_alg_cbc_aes,
1194  	&safexcel_alg_cfb_aes,
1195  	&safexcel_alg_ofb_aes,
1196  	&safexcel_alg_ctr_aes,
1197  	&safexcel_alg_md5,
1198  	&safexcel_alg_sha1,
1199  	&safexcel_alg_sha224,
1200  	&safexcel_alg_sha256,
1201  	&safexcel_alg_sha384,
1202  	&safexcel_alg_sha512,
1203  	&safexcel_alg_hmac_md5,
1204  	&safexcel_alg_hmac_sha1,
1205  	&safexcel_alg_hmac_sha224,
1206  	&safexcel_alg_hmac_sha256,
1207  	&safexcel_alg_hmac_sha384,
1208  	&safexcel_alg_hmac_sha512,
1209  	&safexcel_alg_authenc_hmac_sha1_cbc_aes,
1210  	&safexcel_alg_authenc_hmac_sha224_cbc_aes,
1211  	&safexcel_alg_authenc_hmac_sha256_cbc_aes,
1212  	&safexcel_alg_authenc_hmac_sha384_cbc_aes,
1213  	&safexcel_alg_authenc_hmac_sha512_cbc_aes,
1214  	&safexcel_alg_authenc_hmac_sha1_cbc_des3_ede,
1215  	&safexcel_alg_authenc_hmac_sha1_ctr_aes,
1216  	&safexcel_alg_authenc_hmac_sha224_ctr_aes,
1217  	&safexcel_alg_authenc_hmac_sha256_ctr_aes,
1218  	&safexcel_alg_authenc_hmac_sha384_ctr_aes,
1219  	&safexcel_alg_authenc_hmac_sha512_ctr_aes,
1220  	&safexcel_alg_xts_aes,
1221  	&safexcel_alg_gcm,
1222  	&safexcel_alg_ccm,
1223  	&safexcel_alg_crc32,
1224  	&safexcel_alg_cbcmac,
1225  	&safexcel_alg_xcbcmac,
1226  	&safexcel_alg_cmac,
1227  	&safexcel_alg_chacha20,
1228  	&safexcel_alg_chachapoly,
1229  	&safexcel_alg_chachapoly_esp,
1230  	&safexcel_alg_sm3,
1231  	&safexcel_alg_hmac_sm3,
1232  	&safexcel_alg_ecb_sm4,
1233  	&safexcel_alg_cbc_sm4,
1234  	&safexcel_alg_ofb_sm4,
1235  	&safexcel_alg_cfb_sm4,
1236  	&safexcel_alg_ctr_sm4,
1237  	&safexcel_alg_authenc_hmac_sha1_cbc_sm4,
1238  	&safexcel_alg_authenc_hmac_sm3_cbc_sm4,
1239  	&safexcel_alg_authenc_hmac_sha1_ctr_sm4,
1240  	&safexcel_alg_authenc_hmac_sm3_ctr_sm4,
1241  	&safexcel_alg_sha3_224,
1242  	&safexcel_alg_sha3_256,
1243  	&safexcel_alg_sha3_384,
1244  	&safexcel_alg_sha3_512,
1245  	&safexcel_alg_hmac_sha3_224,
1246  	&safexcel_alg_hmac_sha3_256,
1247  	&safexcel_alg_hmac_sha3_384,
1248  	&safexcel_alg_hmac_sha3_512,
1249  	&safexcel_alg_authenc_hmac_sha1_cbc_des,
1250  	&safexcel_alg_authenc_hmac_sha256_cbc_des3_ede,
1251  	&safexcel_alg_authenc_hmac_sha224_cbc_des3_ede,
1252  	&safexcel_alg_authenc_hmac_sha512_cbc_des3_ede,
1253  	&safexcel_alg_authenc_hmac_sha384_cbc_des3_ede,
1254  	&safexcel_alg_authenc_hmac_sha256_cbc_des,
1255  	&safexcel_alg_authenc_hmac_sha224_cbc_des,
1256  	&safexcel_alg_authenc_hmac_sha512_cbc_des,
1257  	&safexcel_alg_authenc_hmac_sha384_cbc_des,
1258  	&safexcel_alg_rfc4106_gcm,
1259  	&safexcel_alg_rfc4543_gcm,
1260  	&safexcel_alg_rfc4309_ccm,
1261  };
1262  
safexcel_register_algorithms(struct safexcel_crypto_priv * priv)1263  static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
1264  {
1265  	int i, j, ret = 0;
1266  
1267  	for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
1268  		safexcel_algs[i]->priv = priv;
1269  
1270  		/* Do we have all required base algorithms available? */
1271  		if ((safexcel_algs[i]->algo_mask & priv->hwconfig.algo_flags) !=
1272  		    safexcel_algs[i]->algo_mask)
1273  			/* No, so don't register this ciphersuite */
1274  			continue;
1275  
1276  		if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
1277  			ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher);
1278  		else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
1279  			ret = crypto_register_aead(&safexcel_algs[i]->alg.aead);
1280  		else
1281  			ret = crypto_register_ahash(&safexcel_algs[i]->alg.ahash);
1282  
1283  		if (ret)
1284  			goto fail;
1285  	}
1286  
1287  	return 0;
1288  
1289  fail:
1290  	for (j = 0; j < i; j++) {
1291  		/* Do we have all required base algorithms available? */
1292  		if ((safexcel_algs[j]->algo_mask & priv->hwconfig.algo_flags) !=
1293  		    safexcel_algs[j]->algo_mask)
1294  			/* No, so don't unregister this ciphersuite */
1295  			continue;
1296  
1297  		if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
1298  			crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher);
1299  		else if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_AEAD)
1300  			crypto_unregister_aead(&safexcel_algs[j]->alg.aead);
1301  		else
1302  			crypto_unregister_ahash(&safexcel_algs[j]->alg.ahash);
1303  	}
1304  
1305  	return ret;
1306  }
1307  
safexcel_unregister_algorithms(struct safexcel_crypto_priv * priv)1308  static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
1309  {
1310  	int i;
1311  
1312  	for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
1313  		/* Do we have all required base algorithms available? */
1314  		if ((safexcel_algs[i]->algo_mask & priv->hwconfig.algo_flags) !=
1315  		    safexcel_algs[i]->algo_mask)
1316  			/* No, so don't unregister this ciphersuite */
1317  			continue;
1318  
1319  		if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
1320  			crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher);
1321  		else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
1322  			crypto_unregister_aead(&safexcel_algs[i]->alg.aead);
1323  		else
1324  			crypto_unregister_ahash(&safexcel_algs[i]->alg.ahash);
1325  	}
1326  }
1327  
safexcel_configure(struct safexcel_crypto_priv * priv)1328  static void safexcel_configure(struct safexcel_crypto_priv *priv)
1329  {
1330  	u32 mask = BIT(priv->hwconfig.hwdataw) - 1;
1331  
1332  	priv->config.pes = priv->hwconfig.hwnumpes;
1333  	priv->config.rings = min_t(u32, priv->hwconfig.hwnumrings, max_rings);
1334  	/* Cannot currently support more rings than we have ring AICs! */
1335  	priv->config.rings = min_t(u32, priv->config.rings,
1336  					priv->hwconfig.hwnumraic);
1337  
1338  	priv->config.cd_size = EIP197_CD64_FETCH_SIZE;
1339  	priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask;
1340  	priv->config.cdsh_offset = (EIP197_MAX_TOKENS + mask) & ~mask;
1341  
1342  	/* res token is behind the descr, but ofs must be rounded to buswdth */
1343  	priv->config.res_offset = (EIP197_RD64_FETCH_SIZE + mask) & ~mask;
1344  	/* now the size of the descr is this 1st part plus the result struct */
1345  	priv->config.rd_size    = priv->config.res_offset +
1346  				  EIP197_RD64_RESULT_SIZE;
1347  	priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask;
1348  
1349  	/* convert dwords to bytes */
1350  	priv->config.cd_offset *= sizeof(u32);
1351  	priv->config.cdsh_offset *= sizeof(u32);
1352  	priv->config.rd_offset *= sizeof(u32);
1353  	priv->config.res_offset *= sizeof(u32);
1354  }
1355  
safexcel_init_register_offsets(struct safexcel_crypto_priv * priv)1356  static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
1357  {
1358  	struct safexcel_register_offsets *offsets = &priv->offsets;
1359  
1360  	if (priv->flags & SAFEXCEL_HW_EIP197) {
1361  		offsets->hia_aic	= EIP197_HIA_AIC_BASE;
1362  		offsets->hia_aic_g	= EIP197_HIA_AIC_G_BASE;
1363  		offsets->hia_aic_r	= EIP197_HIA_AIC_R_BASE;
1364  		offsets->hia_aic_xdr	= EIP197_HIA_AIC_xDR_BASE;
1365  		offsets->hia_dfe	= EIP197_HIA_DFE_BASE;
1366  		offsets->hia_dfe_thr	= EIP197_HIA_DFE_THR_BASE;
1367  		offsets->hia_dse	= EIP197_HIA_DSE_BASE;
1368  		offsets->hia_dse_thr	= EIP197_HIA_DSE_THR_BASE;
1369  		offsets->hia_gen_cfg	= EIP197_HIA_GEN_CFG_BASE;
1370  		offsets->pe		= EIP197_PE_BASE;
1371  		offsets->global		= EIP197_GLOBAL_BASE;
1372  	} else {
1373  		offsets->hia_aic	= EIP97_HIA_AIC_BASE;
1374  		offsets->hia_aic_g	= EIP97_HIA_AIC_G_BASE;
1375  		offsets->hia_aic_r	= EIP97_HIA_AIC_R_BASE;
1376  		offsets->hia_aic_xdr	= EIP97_HIA_AIC_xDR_BASE;
1377  		offsets->hia_dfe	= EIP97_HIA_DFE_BASE;
1378  		offsets->hia_dfe_thr	= EIP97_HIA_DFE_THR_BASE;
1379  		offsets->hia_dse	= EIP97_HIA_DSE_BASE;
1380  		offsets->hia_dse_thr	= EIP97_HIA_DSE_THR_BASE;
1381  		offsets->hia_gen_cfg	= EIP97_HIA_GEN_CFG_BASE;
1382  		offsets->pe		= EIP97_PE_BASE;
1383  		offsets->global		= EIP97_GLOBAL_BASE;
1384  	}
1385  }
1386  
1387  /*
1388   * Generic part of probe routine, shared by platform and PCI driver
1389   *
1390   * Assumes IO resources have been mapped, private data mem has been allocated,
1391   * clocks have been enabled, device pointer has been assigned etc.
1392   *
1393   */
safexcel_probe_generic(void * pdev,struct safexcel_crypto_priv * priv,int is_pci_dev)1394  static int safexcel_probe_generic(void *pdev,
1395  				  struct safexcel_crypto_priv *priv,
1396  				  int is_pci_dev)
1397  {
1398  	struct device *dev = priv->dev;
1399  	u32 peid, version, mask, val, hiaopt, hwopt, peopt;
1400  	int i, ret, hwctg;
1401  
1402  	priv->context_pool = dmam_pool_create("safexcel-context", dev,
1403  					      sizeof(struct safexcel_context_record),
1404  					      1, 0);
1405  	if (!priv->context_pool)
1406  		return -ENOMEM;
1407  
1408  	/*
1409  	 * First try the EIP97 HIA version regs
1410  	 * For the EIP197, this is guaranteed to NOT return any of the test
1411  	 * values
1412  	 */
1413  	version = readl(priv->base + EIP97_HIA_AIC_BASE + EIP197_HIA_VERSION);
1414  
1415  	mask = 0;  /* do not swap */
1416  	if (EIP197_REG_LO16(version) == EIP197_HIA_VERSION_LE) {
1417  		priv->hwconfig.hiaver = EIP197_VERSION_MASK(version);
1418  	} else if (EIP197_REG_HI16(version) == EIP197_HIA_VERSION_BE) {
1419  		/* read back byte-swapped, so complement byte swap bits */
1420  		mask = EIP197_MST_CTRL_BYTE_SWAP_BITS;
1421  		priv->hwconfig.hiaver = EIP197_VERSION_SWAP(version);
1422  	} else {
1423  		/* So it wasn't an EIP97 ... maybe it's an EIP197? */
1424  		version = readl(priv->base + EIP197_HIA_AIC_BASE +
1425  				EIP197_HIA_VERSION);
1426  		if (EIP197_REG_LO16(version) == EIP197_HIA_VERSION_LE) {
1427  			priv->hwconfig.hiaver = EIP197_VERSION_MASK(version);
1428  			priv->flags |= SAFEXCEL_HW_EIP197;
1429  		} else if (EIP197_REG_HI16(version) ==
1430  			   EIP197_HIA_VERSION_BE) {
1431  			/* read back byte-swapped, so complement swap bits */
1432  			mask = EIP197_MST_CTRL_BYTE_SWAP_BITS;
1433  			priv->hwconfig.hiaver = EIP197_VERSION_SWAP(version);
1434  			priv->flags |= SAFEXCEL_HW_EIP197;
1435  		} else {
1436  			return -ENODEV;
1437  		}
1438  	}
1439  
1440  	/* Now initialize the reg offsets based on the probing info so far */
1441  	safexcel_init_register_offsets(priv);
1442  
1443  	/*
1444  	 * If the version was read byte-swapped, we need to flip the device
1445  	 * swapping Keep in mind here, though, that what we write will also be
1446  	 * byte-swapped ...
1447  	 */
1448  	if (mask) {
1449  		val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
1450  		val = val ^ (mask >> 24); /* toggle byte swap bits */
1451  		writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
1452  	}
1453  
1454  	/*
1455  	 * We're not done probing yet! We may fall through to here if no HIA
1456  	 * was found at all. So, with the endianness presumably correct now and
1457  	 * the offsets setup, *really* probe for the EIP97/EIP197.
1458  	 */
1459  	version = readl(EIP197_GLOBAL(priv) + EIP197_VERSION);
1460  	if (((priv->flags & SAFEXCEL_HW_EIP197) &&
1461  	     (EIP197_REG_LO16(version) != EIP197_VERSION_LE) &&
1462  	     (EIP197_REG_LO16(version) != EIP196_VERSION_LE)) ||
1463  	    ((!(priv->flags & SAFEXCEL_HW_EIP197) &&
1464  	     (EIP197_REG_LO16(version) != EIP97_VERSION_LE)))) {
1465  		/*
1466  		 * We did not find the device that matched our initial probing
1467  		 * (or our initial probing failed) Report appropriate error.
1468  		 */
1469  		dev_err(priv->dev, "Probing for EIP97/EIP19x failed - no such device (read %08x)\n",
1470  			version);
1471  		return -ENODEV;
1472  	}
1473  
1474  	priv->hwconfig.hwver = EIP197_VERSION_MASK(version);
1475  	hwctg = version >> 28;
1476  	peid = version & 255;
1477  
1478  	/* Detect EIP206 processing pipe */
1479  	version = readl(EIP197_PE(priv) + + EIP197_PE_VERSION(0));
1480  	if (EIP197_REG_LO16(version) != EIP206_VERSION_LE) {
1481  		dev_err(priv->dev, "EIP%d: EIP206 not detected\n", peid);
1482  		return -ENODEV;
1483  	}
1484  	priv->hwconfig.ppver = EIP197_VERSION_MASK(version);
1485  
1486  	/* Detect EIP96 packet engine and version */
1487  	version = readl(EIP197_PE(priv) + EIP197_PE_EIP96_VERSION(0));
1488  	if (EIP197_REG_LO16(version) != EIP96_VERSION_LE) {
1489  		dev_err(dev, "EIP%d: EIP96 not detected.\n", peid);
1490  		return -ENODEV;
1491  	}
1492  	priv->hwconfig.pever = EIP197_VERSION_MASK(version);
1493  
1494  	hwopt = readl(EIP197_GLOBAL(priv) + EIP197_OPTIONS);
1495  	hiaopt = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_OPTIONS);
1496  
1497  	priv->hwconfig.icever = 0;
1498  	priv->hwconfig.ocever = 0;
1499  	priv->hwconfig.psever = 0;
1500  	if (priv->flags & SAFEXCEL_HW_EIP197) {
1501  		/* EIP197 */
1502  		peopt = readl(EIP197_PE(priv) + EIP197_PE_OPTIONS(0));
1503  
1504  		priv->hwconfig.hwdataw  = (hiaopt >> EIP197_HWDATAW_OFFSET) &
1505  					  EIP197_HWDATAW_MASK;
1506  		priv->hwconfig.hwcfsize = ((hiaopt >> EIP197_CFSIZE_OFFSET) &
1507  					   EIP197_CFSIZE_MASK) +
1508  					  EIP197_CFSIZE_ADJUST;
1509  		priv->hwconfig.hwrfsize = ((hiaopt >> EIP197_RFSIZE_OFFSET) &
1510  					   EIP197_RFSIZE_MASK) +
1511  					  EIP197_RFSIZE_ADJUST;
1512  		priv->hwconfig.hwnumpes	= (hiaopt >> EIP197_N_PES_OFFSET) &
1513  					  EIP197_N_PES_MASK;
1514  		priv->hwconfig.hwnumrings = (hiaopt >> EIP197_N_RINGS_OFFSET) &
1515  					    EIP197_N_RINGS_MASK;
1516  		if (hiaopt & EIP197_HIA_OPT_HAS_PE_ARB)
1517  			priv->flags |= EIP197_PE_ARB;
1518  		if (EIP206_OPT_ICE_TYPE(peopt) == 1) {
1519  			priv->flags |= EIP197_ICE;
1520  			/* Detect ICE EIP207 class. engine and version */
1521  			version = readl(EIP197_PE(priv) +
1522  				  EIP197_PE_ICE_VERSION(0));
1523  			if (EIP197_REG_LO16(version) != EIP207_VERSION_LE) {
1524  				dev_err(dev, "EIP%d: ICE EIP207 not detected.\n",
1525  					peid);
1526  				return -ENODEV;
1527  			}
1528  			priv->hwconfig.icever = EIP197_VERSION_MASK(version);
1529  		}
1530  		if (EIP206_OPT_OCE_TYPE(peopt) == 1) {
1531  			priv->flags |= EIP197_OCE;
1532  			/* Detect EIP96PP packet stream editor and version */
1533  			version = readl(EIP197_PE(priv) + EIP197_PE_PSE_VERSION(0));
1534  			if (EIP197_REG_LO16(version) != EIP96_VERSION_LE) {
1535  				dev_err(dev, "EIP%d: EIP96PP not detected.\n", peid);
1536  				return -ENODEV;
1537  			}
1538  			priv->hwconfig.psever = EIP197_VERSION_MASK(version);
1539  			/* Detect OCE EIP207 class. engine and version */
1540  			version = readl(EIP197_PE(priv) +
1541  				  EIP197_PE_ICE_VERSION(0));
1542  			if (EIP197_REG_LO16(version) != EIP207_VERSION_LE) {
1543  				dev_err(dev, "EIP%d: OCE EIP207 not detected.\n",
1544  					peid);
1545  				return -ENODEV;
1546  			}
1547  			priv->hwconfig.ocever = EIP197_VERSION_MASK(version);
1548  		}
1549  		/* If not a full TRC, then assume simple TRC */
1550  		if (!(hwopt & EIP197_OPT_HAS_TRC))
1551  			priv->flags |= EIP197_SIMPLE_TRC;
1552  		/* EIP197 always has SOME form of TRC */
1553  		priv->flags |= EIP197_TRC_CACHE;
1554  	} else {
1555  		/* EIP97 */
1556  		priv->hwconfig.hwdataw  = (hiaopt >> EIP197_HWDATAW_OFFSET) &
1557  					  EIP97_HWDATAW_MASK;
1558  		priv->hwconfig.hwcfsize = (hiaopt >> EIP97_CFSIZE_OFFSET) &
1559  					  EIP97_CFSIZE_MASK;
1560  		priv->hwconfig.hwrfsize = (hiaopt >> EIP97_RFSIZE_OFFSET) &
1561  					  EIP97_RFSIZE_MASK;
1562  		priv->hwconfig.hwnumpes	= 1; /* by definition */
1563  		priv->hwconfig.hwnumrings = (hiaopt >> EIP197_N_RINGS_OFFSET) &
1564  					    EIP197_N_RINGS_MASK;
1565  	}
1566  
1567  	/* Scan for ring AIC's */
1568  	for (i = 0; i < EIP197_MAX_RING_AIC; i++) {
1569  		version = readl(EIP197_HIA_AIC_R(priv) +
1570  				EIP197_HIA_AIC_R_VERSION(i));
1571  		if (EIP197_REG_LO16(version) != EIP201_VERSION_LE)
1572  			break;
1573  	}
1574  	priv->hwconfig.hwnumraic = i;
1575  	/* Low-end EIP196 may not have any ring AIC's ... */
1576  	if (!priv->hwconfig.hwnumraic) {
1577  		dev_err(priv->dev, "No ring interrupt controller present!\n");
1578  		return -ENODEV;
1579  	}
1580  
1581  	/* Get supported algorithms from EIP96 transform engine */
1582  	priv->hwconfig.algo_flags = readl(EIP197_PE(priv) +
1583  				    EIP197_PE_EIP96_OPTIONS(0));
1584  
1585  	/* Print single info line describing what we just detected */
1586  	dev_info(priv->dev, "EIP%d:%x(%d,%d,%d,%d)-HIA:%x(%d,%d,%d),PE:%x/%x(alg:%08x)/%x/%x/%x\n",
1587  		 peid, priv->hwconfig.hwver, hwctg, priv->hwconfig.hwnumpes,
1588  		 priv->hwconfig.hwnumrings, priv->hwconfig.hwnumraic,
1589  		 priv->hwconfig.hiaver, priv->hwconfig.hwdataw,
1590  		 priv->hwconfig.hwcfsize, priv->hwconfig.hwrfsize,
1591  		 priv->hwconfig.ppver, priv->hwconfig.pever,
1592  		 priv->hwconfig.algo_flags, priv->hwconfig.icever,
1593  		 priv->hwconfig.ocever, priv->hwconfig.psever);
1594  
1595  	safexcel_configure(priv);
1596  
1597  	if (IS_ENABLED(CONFIG_PCI) && priv->data->version == EIP197_DEVBRD) {
1598  		/*
1599  		 * Request MSI vectors for global + 1 per ring -
1600  		 * or just 1 for older dev images
1601  		 */
1602  		struct pci_dev *pci_pdev = pdev;
1603  
1604  		ret = pci_alloc_irq_vectors(pci_pdev,
1605  					    priv->config.rings + 1,
1606  					    priv->config.rings + 1,
1607  					    PCI_IRQ_MSI | PCI_IRQ_MSIX);
1608  		if (ret < 0) {
1609  			dev_err(dev, "Failed to allocate PCI MSI interrupts\n");
1610  			return ret;
1611  		}
1612  	}
1613  
1614  	/* Register the ring IRQ handlers and configure the rings */
1615  	priv->ring = devm_kcalloc(dev, priv->config.rings,
1616  				  sizeof(*priv->ring),
1617  				  GFP_KERNEL);
1618  	if (!priv->ring)
1619  		return -ENOMEM;
1620  
1621  	for (i = 0; i < priv->config.rings; i++) {
1622  		char wq_name[9] = {0};
1623  		int irq;
1624  		struct safexcel_ring_irq_data *ring_irq;
1625  
1626  		ret = safexcel_init_ring_descriptors(priv,
1627  						     &priv->ring[i].cdr,
1628  						     &priv->ring[i].rdr);
1629  		if (ret) {
1630  			dev_err(dev, "Failed to initialize rings\n");
1631  			goto err_cleanup_rings;
1632  		}
1633  
1634  		priv->ring[i].rdr_req = devm_kcalloc(dev,
1635  			EIP197_DEFAULT_RING_SIZE,
1636  			sizeof(*priv->ring[i].rdr_req),
1637  			GFP_KERNEL);
1638  		if (!priv->ring[i].rdr_req) {
1639  			ret = -ENOMEM;
1640  			goto err_cleanup_rings;
1641  		}
1642  
1643  		ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
1644  		if (!ring_irq) {
1645  			ret = -ENOMEM;
1646  			goto err_cleanup_rings;
1647  		}
1648  
1649  		ring_irq->priv = priv;
1650  		ring_irq->ring = i;
1651  
1652  		irq = safexcel_request_ring_irq(pdev,
1653  						EIP197_IRQ_NUMBER(i, is_pci_dev),
1654  						is_pci_dev,
1655  						i,
1656  						safexcel_irq_ring,
1657  						safexcel_irq_ring_thread,
1658  						ring_irq);
1659  		if (irq < 0) {
1660  			dev_err(dev, "Failed to get IRQ ID for ring %d\n", i);
1661  			ret = irq;
1662  			goto err_cleanup_rings;
1663  		}
1664  
1665  		priv->ring[i].irq = irq;
1666  		priv->ring[i].work_data.priv = priv;
1667  		priv->ring[i].work_data.ring = i;
1668  		INIT_WORK(&priv->ring[i].work_data.work,
1669  			  safexcel_dequeue_work);
1670  
1671  		snprintf(wq_name, 9, "wq_ring%d", i);
1672  		priv->ring[i].workqueue =
1673  			create_singlethread_workqueue(wq_name);
1674  		if (!priv->ring[i].workqueue) {
1675  			ret = -ENOMEM;
1676  			goto err_cleanup_rings;
1677  		}
1678  
1679  		priv->ring[i].requests = 0;
1680  		priv->ring[i].busy = false;
1681  
1682  		crypto_init_queue(&priv->ring[i].queue,
1683  				  EIP197_DEFAULT_RING_SIZE);
1684  
1685  		spin_lock_init(&priv->ring[i].lock);
1686  		spin_lock_init(&priv->ring[i].queue_lock);
1687  	}
1688  
1689  	atomic_set(&priv->ring_used, 0);
1690  
1691  	ret = safexcel_hw_init(priv);
1692  	if (ret) {
1693  		dev_err(dev, "HW init failed (%d)\n", ret);
1694  		goto err_cleanup_rings;
1695  	}
1696  
1697  	ret = safexcel_register_algorithms(priv);
1698  	if (ret) {
1699  		dev_err(dev, "Failed to register algorithms (%d)\n", ret);
1700  		goto err_cleanup_rings;
1701  	}
1702  
1703  	return 0;
1704  
1705  err_cleanup_rings:
1706  	for (i = 0; i < priv->config.rings; i++) {
1707  		if (priv->ring[i].irq)
1708  			irq_set_affinity_hint(priv->ring[i].irq, NULL);
1709  		if (priv->ring[i].workqueue)
1710  			destroy_workqueue(priv->ring[i].workqueue);
1711  	}
1712  
1713  	return ret;
1714  }
1715  
safexcel_hw_reset_rings(struct safexcel_crypto_priv * priv)1716  static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
1717  {
1718  	int i;
1719  
1720  	for (i = 0; i < priv->config.rings; i++) {
1721  		/* clear any pending interrupt */
1722  		writel(GENMASK(5, 0), EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
1723  		writel(GENMASK(7, 0), EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
1724  
1725  		/* Reset the CDR base address */
1726  		writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
1727  		writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
1728  
1729  		/* Reset the RDR base address */
1730  		writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
1731  		writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
1732  	}
1733  }
1734  
1735  /* for Device Tree platform driver */
1736  
safexcel_probe(struct platform_device * pdev)1737  static int safexcel_probe(struct platform_device *pdev)
1738  {
1739  	struct device *dev = &pdev->dev;
1740  	struct safexcel_crypto_priv *priv;
1741  	int ret;
1742  
1743  	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1744  	if (!priv)
1745  		return -ENOMEM;
1746  
1747  	priv->dev = dev;
1748  	priv->data = (struct safexcel_priv_data *)of_device_get_match_data(dev);
1749  
1750  	platform_set_drvdata(pdev, priv);
1751  
1752  	priv->base = devm_platform_ioremap_resource(pdev, 0);
1753  	if (IS_ERR(priv->base)) {
1754  		dev_err(dev, "failed to get resource\n");
1755  		return PTR_ERR(priv->base);
1756  	}
1757  
1758  	priv->clk = devm_clk_get(&pdev->dev, NULL);
1759  	ret = PTR_ERR_OR_ZERO(priv->clk);
1760  	/* The clock isn't mandatory */
1761  	if  (ret != -ENOENT) {
1762  		if (ret)
1763  			return ret;
1764  
1765  		ret = clk_prepare_enable(priv->clk);
1766  		if (ret) {
1767  			dev_err(dev, "unable to enable clk (%d)\n", ret);
1768  			return ret;
1769  		}
1770  	}
1771  
1772  	priv->reg_clk = devm_clk_get(&pdev->dev, "reg");
1773  	ret = PTR_ERR_OR_ZERO(priv->reg_clk);
1774  	/* The clock isn't mandatory */
1775  	if  (ret != -ENOENT) {
1776  		if (ret)
1777  			goto err_core_clk;
1778  
1779  		ret = clk_prepare_enable(priv->reg_clk);
1780  		if (ret) {
1781  			dev_err(dev, "unable to enable reg clk (%d)\n", ret);
1782  			goto err_core_clk;
1783  		}
1784  	}
1785  
1786  	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
1787  	if (ret)
1788  		goto err_reg_clk;
1789  
1790  	/* Generic EIP97/EIP197 device probing */
1791  	ret = safexcel_probe_generic(pdev, priv, 0);
1792  	if (ret)
1793  		goto err_reg_clk;
1794  
1795  	return 0;
1796  
1797  err_reg_clk:
1798  	clk_disable_unprepare(priv->reg_clk);
1799  err_core_clk:
1800  	clk_disable_unprepare(priv->clk);
1801  	return ret;
1802  }
1803  
safexcel_remove(struct platform_device * pdev)1804  static int safexcel_remove(struct platform_device *pdev)
1805  {
1806  	struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev);
1807  	int i;
1808  
1809  	safexcel_unregister_algorithms(priv);
1810  	safexcel_hw_reset_rings(priv);
1811  
1812  	clk_disable_unprepare(priv->reg_clk);
1813  	clk_disable_unprepare(priv->clk);
1814  
1815  	for (i = 0; i < priv->config.rings; i++) {
1816  		irq_set_affinity_hint(priv->ring[i].irq, NULL);
1817  		destroy_workqueue(priv->ring[i].workqueue);
1818  	}
1819  
1820  	return 0;
1821  }
1822  
1823  static const struct safexcel_priv_data eip97ies_mrvl_data = {
1824  	.version = EIP97IES_MRVL,
1825  };
1826  
1827  static const struct safexcel_priv_data eip197b_mrvl_data = {
1828  	.version = EIP197B_MRVL,
1829  };
1830  
1831  static const struct safexcel_priv_data eip197d_mrvl_data = {
1832  	.version = EIP197D_MRVL,
1833  };
1834  
1835  static const struct safexcel_priv_data eip197_devbrd_data = {
1836  	.version = EIP197_DEVBRD,
1837  };
1838  
1839  static const struct safexcel_priv_data eip197c_mxl_data = {
1840  	.version = EIP197C_MXL,
1841  	.fw_little_endian = true,
1842  };
1843  
1844  static const struct of_device_id safexcel_of_match_table[] = {
1845  	{
1846  		.compatible = "inside-secure,safexcel-eip97ies",
1847  		.data = &eip97ies_mrvl_data,
1848  	},
1849  	{
1850  		.compatible = "inside-secure,safexcel-eip197b",
1851  		.data = &eip197b_mrvl_data,
1852  	},
1853  	{
1854  		.compatible = "inside-secure,safexcel-eip197d",
1855  		.data = &eip197d_mrvl_data,
1856  	},
1857  	{
1858  		.compatible = "inside-secure,safexcel-eip197c-mxl",
1859  		.data = &eip197c_mxl_data,
1860  	},
1861  	/* For backward compatibility and intended for generic use */
1862  	{
1863  		.compatible = "inside-secure,safexcel-eip97",
1864  		.data = &eip97ies_mrvl_data,
1865  	},
1866  	{
1867  		.compatible = "inside-secure,safexcel-eip197",
1868  		.data = &eip197b_mrvl_data,
1869  	},
1870  	{},
1871  };
1872  
1873  MODULE_DEVICE_TABLE(of, safexcel_of_match_table);
1874  
1875  static struct platform_driver  crypto_safexcel = {
1876  	.probe		= safexcel_probe,
1877  	.remove		= safexcel_remove,
1878  	.driver		= {
1879  		.name	= "crypto-safexcel",
1880  		.of_match_table = safexcel_of_match_table,
1881  	},
1882  };
1883  
1884  /* PCIE devices - i.e. Inside Secure development boards */
1885  
safexcel_pci_probe(struct pci_dev * pdev,const struct pci_device_id * ent)1886  static int safexcel_pci_probe(struct pci_dev *pdev,
1887  			       const struct pci_device_id *ent)
1888  {
1889  	struct device *dev = &pdev->dev;
1890  	struct safexcel_crypto_priv *priv;
1891  	void __iomem *pciebase;
1892  	int rc;
1893  	u32 val;
1894  
1895  	dev_dbg(dev, "Probing PCIE device: vendor %04x, device %04x, subv %04x, subdev %04x, ctxt %lx\n",
1896  		ent->vendor, ent->device, ent->subvendor,
1897  		ent->subdevice, ent->driver_data);
1898  
1899  	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1900  	if (!priv)
1901  		return -ENOMEM;
1902  
1903  	priv->dev = dev;
1904  	priv->data = (struct safexcel_priv_data *)ent->driver_data;
1905  
1906  	pci_set_drvdata(pdev, priv);
1907  
1908  	/* enable the device */
1909  	rc = pcim_enable_device(pdev);
1910  	if (rc) {
1911  		dev_err(dev, "Failed to enable PCI device\n");
1912  		return rc;
1913  	}
1914  
1915  	/* take ownership of PCI BAR0 */
1916  	rc = pcim_iomap_regions(pdev, 1, "crypto_safexcel");
1917  	if (rc) {
1918  		dev_err(dev, "Failed to map IO region for BAR0\n");
1919  		return rc;
1920  	}
1921  	priv->base = pcim_iomap_table(pdev)[0];
1922  
1923  	if (priv->data->version == EIP197_DEVBRD) {
1924  		dev_dbg(dev, "Device identified as FPGA based development board - applying HW reset\n");
1925  
1926  		rc = pcim_iomap_regions(pdev, 4, "crypto_safexcel");
1927  		if (rc) {
1928  			dev_err(dev, "Failed to map IO region for BAR4\n");
1929  			return rc;
1930  		}
1931  
1932  		pciebase = pcim_iomap_table(pdev)[2];
1933  		val = readl(pciebase + EIP197_XLX_IRQ_BLOCK_ID_ADDR);
1934  		if ((val >> 16) == EIP197_XLX_IRQ_BLOCK_ID_VALUE) {
1935  			dev_dbg(dev, "Detected Xilinx PCIE IRQ block version %d, multiple MSI support enabled\n",
1936  				(val & 0xff));
1937  
1938  			/* Setup MSI identity map mapping */
1939  			writel(EIP197_XLX_USER_VECT_LUT0_IDENT,
1940  			       pciebase + EIP197_XLX_USER_VECT_LUT0_ADDR);
1941  			writel(EIP197_XLX_USER_VECT_LUT1_IDENT,
1942  			       pciebase + EIP197_XLX_USER_VECT_LUT1_ADDR);
1943  			writel(EIP197_XLX_USER_VECT_LUT2_IDENT,
1944  			       pciebase + EIP197_XLX_USER_VECT_LUT2_ADDR);
1945  			writel(EIP197_XLX_USER_VECT_LUT3_IDENT,
1946  			       pciebase + EIP197_XLX_USER_VECT_LUT3_ADDR);
1947  
1948  			/* Enable all device interrupts */
1949  			writel(GENMASK(31, 0),
1950  			       pciebase + EIP197_XLX_USER_INT_ENB_MSK);
1951  		} else {
1952  			dev_err(dev, "Unrecognised IRQ block identifier %x\n",
1953  				val);
1954  			return -ENODEV;
1955  		}
1956  
1957  		/* HW reset FPGA dev board */
1958  		/* assert reset */
1959  		writel(1, priv->base + EIP197_XLX_GPIO_BASE);
1960  		wmb(); /* maintain strict ordering for accesses here */
1961  		/* deassert reset */
1962  		writel(0, priv->base + EIP197_XLX_GPIO_BASE);
1963  		wmb(); /* maintain strict ordering for accesses here */
1964  	}
1965  
1966  	/* enable bus mastering */
1967  	pci_set_master(pdev);
1968  
1969  	/* Generic EIP97/EIP197 device probing */
1970  	rc = safexcel_probe_generic(pdev, priv, 1);
1971  	return rc;
1972  }
1973  
safexcel_pci_remove(struct pci_dev * pdev)1974  static void safexcel_pci_remove(struct pci_dev *pdev)
1975  {
1976  	struct safexcel_crypto_priv *priv = pci_get_drvdata(pdev);
1977  	int i;
1978  
1979  	safexcel_unregister_algorithms(priv);
1980  
1981  	for (i = 0; i < priv->config.rings; i++)
1982  		destroy_workqueue(priv->ring[i].workqueue);
1983  
1984  	safexcel_hw_reset_rings(priv);
1985  }
1986  
1987  static const struct pci_device_id safexcel_pci_ids[] = {
1988  	{
1989  		PCI_DEVICE_SUB(PCI_VENDOR_ID_XILINX, 0x9038,
1990  			       0x16ae, 0xc522),
1991  		.driver_data = (kernel_ulong_t)&eip197_devbrd_data,
1992  	},
1993  	{},
1994  };
1995  
1996  MODULE_DEVICE_TABLE(pci, safexcel_pci_ids);
1997  
1998  static struct pci_driver safexcel_pci_driver = {
1999  	.name          = "crypto-safexcel",
2000  	.id_table      = safexcel_pci_ids,
2001  	.probe         = safexcel_pci_probe,
2002  	.remove        = safexcel_pci_remove,
2003  };
2004  
safexcel_init(void)2005  static int __init safexcel_init(void)
2006  {
2007  	int ret;
2008  
2009  	/* Register PCI driver */
2010  	ret = pci_register_driver(&safexcel_pci_driver);
2011  
2012  	/* Register platform driver */
2013  	if (IS_ENABLED(CONFIG_OF) && !ret) {
2014  		ret = platform_driver_register(&crypto_safexcel);
2015  		if (ret)
2016  			pci_unregister_driver(&safexcel_pci_driver);
2017  	}
2018  
2019  	return ret;
2020  }
2021  
safexcel_exit(void)2022  static void __exit safexcel_exit(void)
2023  {
2024  	/* Unregister platform driver */
2025  	if (IS_ENABLED(CONFIG_OF))
2026  		platform_driver_unregister(&crypto_safexcel);
2027  
2028  	/* Unregister PCI driver if successfully registered before */
2029  	pci_unregister_driver(&safexcel_pci_driver);
2030  }
2031  
2032  module_init(safexcel_init);
2033  module_exit(safexcel_exit);
2034  
2035  MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>");
2036  MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>");
2037  MODULE_AUTHOR("Igal Liberman <igall@marvell.com>");
2038  MODULE_DESCRIPTION("Support for SafeXcel cryptographic engines: EIP97 & EIP197");
2039  MODULE_LICENSE("GPL v2");
2040  MODULE_IMPORT_NS(CRYPTO_INTERNAL);
2041  
2042  MODULE_FIRMWARE("ifpp.bin");
2043  MODULE_FIRMWARE("ipue.bin");
2044  MODULE_FIRMWARE("inside-secure/eip197b/ifpp.bin");
2045  MODULE_FIRMWARE("inside-secure/eip197b/ipue.bin");
2046  MODULE_FIRMWARE("inside-secure/eip197d/ifpp.bin");
2047  MODULE_FIRMWARE("inside-secure/eip197d/ipue.bin");
2048  MODULE_FIRMWARE("inside-secure/eip197_minifw/ifpp.bin");
2049  MODULE_FIRMWARE("inside-secure/eip197_minifw/ipue.bin");
2050