1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2020 - 2021 Intel Corporation */
3 #include <linux/iopoll.h>
4 #include <adf_accel_devices.h>
5 #include <adf_cfg.h>
6 #include <adf_cfg_services.h>
7 #include <adf_clock.h>
8 #include <adf_common_drv.h>
9 #include <adf_gen4_dc.h>
10 #include <adf_gen4_hw_data.h>
11 #include <adf_gen4_pfvf.h>
12 #include <adf_gen4_pm.h>
13 #include <adf_gen4_timer.h>
14 #include "adf_4xxx_hw_data.h"
15 #include "icp_qat_hw.h"
16 
17 #define ADF_AE_GROUP_0		GENMASK(3, 0)
18 #define ADF_AE_GROUP_1		GENMASK(7, 4)
19 #define ADF_AE_GROUP_2		BIT(8)
20 
21 enum adf_fw_objs {
22 	ADF_FW_SYM_OBJ,
23 	ADF_FW_ASYM_OBJ,
24 	ADF_FW_DC_OBJ,
25 	ADF_FW_ADMIN_OBJ,
26 };
27 
28 static const char * const adf_4xxx_fw_objs[] = {
29 	[ADF_FW_SYM_OBJ] =  ADF_4XXX_SYM_OBJ,
30 	[ADF_FW_ASYM_OBJ] =  ADF_4XXX_ASYM_OBJ,
31 	[ADF_FW_DC_OBJ] =  ADF_4XXX_DC_OBJ,
32 	[ADF_FW_ADMIN_OBJ] = ADF_4XXX_ADMIN_OBJ,
33 };
34 
35 static const char * const adf_402xx_fw_objs[] = {
36 	[ADF_FW_SYM_OBJ] =  ADF_402XX_SYM_OBJ,
37 	[ADF_FW_ASYM_OBJ] =  ADF_402XX_ASYM_OBJ,
38 	[ADF_FW_DC_OBJ] =  ADF_402XX_DC_OBJ,
39 	[ADF_FW_ADMIN_OBJ] = ADF_402XX_ADMIN_OBJ,
40 };
41 
42 struct adf_fw_config {
43 	u32 ae_mask;
44 	enum adf_fw_objs obj;
45 };
46 
47 static const struct adf_fw_config adf_fw_cy_config[] = {
48 	{ADF_AE_GROUP_1, ADF_FW_SYM_OBJ},
49 	{ADF_AE_GROUP_0, ADF_FW_ASYM_OBJ},
50 	{ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
51 };
52 
53 static const struct adf_fw_config adf_fw_dc_config[] = {
54 	{ADF_AE_GROUP_1, ADF_FW_DC_OBJ},
55 	{ADF_AE_GROUP_0, ADF_FW_DC_OBJ},
56 	{ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
57 };
58 
59 static const struct adf_fw_config adf_fw_sym_config[] = {
60 	{ADF_AE_GROUP_1, ADF_FW_SYM_OBJ},
61 	{ADF_AE_GROUP_0, ADF_FW_SYM_OBJ},
62 	{ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
63 };
64 
65 static const struct adf_fw_config adf_fw_asym_config[] = {
66 	{ADF_AE_GROUP_1, ADF_FW_ASYM_OBJ},
67 	{ADF_AE_GROUP_0, ADF_FW_ASYM_OBJ},
68 	{ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
69 };
70 
71 static const struct adf_fw_config adf_fw_asym_dc_config[] = {
72 	{ADF_AE_GROUP_1, ADF_FW_ASYM_OBJ},
73 	{ADF_AE_GROUP_0, ADF_FW_DC_OBJ},
74 	{ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
75 };
76 
77 static const struct adf_fw_config adf_fw_sym_dc_config[] = {
78 	{ADF_AE_GROUP_1, ADF_FW_SYM_OBJ},
79 	{ADF_AE_GROUP_0, ADF_FW_DC_OBJ},
80 	{ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
81 };
82 
83 static const struct adf_fw_config adf_fw_dcc_config[] = {
84 	{ADF_AE_GROUP_1, ADF_FW_DC_OBJ},
85 	{ADF_AE_GROUP_0, ADF_FW_SYM_OBJ},
86 	{ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
87 };
88 
89 static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_dc_config));
90 static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_sym_config));
91 static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_asym_config));
92 static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_asym_dc_config));
93 static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_sym_dc_config));
94 static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_dcc_config));
95 
96 /* Worker thread to service arbiter mappings */
97 static const u32 default_thrd_to_arb_map[ADF_4XXX_MAX_ACCELENGINES] = {
98 	0x5555555, 0x5555555, 0x5555555, 0x5555555,
99 	0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA,
100 	0x0
101 };
102 
103 static const u32 thrd_to_arb_map_dc[ADF_4XXX_MAX_ACCELENGINES] = {
104 	0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF,
105 	0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF,
106 	0x0
107 };
108 
109 static const u32 thrd_to_arb_map_dcc[ADF_4XXX_MAX_ACCELENGINES] = {
110 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
111 	0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF,
112 	0x0
113 };
114 
115 static struct adf_hw_device_class adf_4xxx_class = {
116 	.name = ADF_4XXX_DEVICE_NAME,
117 	.type = DEV_4XXX,
118 	.instances = 0,
119 };
120 
get_accel_mask(struct adf_hw_device_data * self)121 static u32 get_accel_mask(struct adf_hw_device_data *self)
122 {
123 	return ADF_4XXX_ACCELERATORS_MASK;
124 }
125 
get_ae_mask(struct adf_hw_device_data * self)126 static u32 get_ae_mask(struct adf_hw_device_data *self)
127 {
128 	u32 me_disable = self->fuses;
129 
130 	return ~me_disable & ADF_4XXX_ACCELENGINES_MASK;
131 }
132 
get_num_accels(struct adf_hw_device_data * self)133 static u32 get_num_accels(struct adf_hw_device_data *self)
134 {
135 	return ADF_4XXX_MAX_ACCELERATORS;
136 }
137 
get_num_aes(struct adf_hw_device_data * self)138 static u32 get_num_aes(struct adf_hw_device_data *self)
139 {
140 	if (!self || !self->ae_mask)
141 		return 0;
142 
143 	return hweight32(self->ae_mask);
144 }
145 
get_misc_bar_id(struct adf_hw_device_data * self)146 static u32 get_misc_bar_id(struct adf_hw_device_data *self)
147 {
148 	return ADF_4XXX_PMISC_BAR;
149 }
150 
get_etr_bar_id(struct adf_hw_device_data * self)151 static u32 get_etr_bar_id(struct adf_hw_device_data *self)
152 {
153 	return ADF_4XXX_ETR_BAR;
154 }
155 
get_sram_bar_id(struct adf_hw_device_data * self)156 static u32 get_sram_bar_id(struct adf_hw_device_data *self)
157 {
158 	return ADF_4XXX_SRAM_BAR;
159 }
160 
161 /*
162  * The vector routing table is used to select the MSI-X entry to use for each
163  * interrupt source.
164  * The first ADF_4XXX_ETR_MAX_BANKS entries correspond to ring interrupts.
165  * The final entry corresponds to VF2PF or error interrupts.
166  * This vector table could be used to configure one MSI-X entry to be shared
167  * between multiple interrupt sources.
168  *
169  * The default routing is set to have a one to one correspondence between the
170  * interrupt source and the MSI-X entry used.
171  */
set_msix_default_rttable(struct adf_accel_dev * accel_dev)172 static void set_msix_default_rttable(struct adf_accel_dev *accel_dev)
173 {
174 	void __iomem *csr;
175 	int i;
176 
177 	csr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
178 	for (i = 0; i <= ADF_4XXX_ETR_MAX_BANKS; i++)
179 		ADF_CSR_WR(csr, ADF_4XXX_MSIX_RTTABLE_OFFSET(i), i);
180 }
181 
get_accel_cap(struct adf_accel_dev * accel_dev)182 static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
183 {
184 	struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
185 	u32 capabilities_sym, capabilities_asym, capabilities_dc;
186 	u32 capabilities_dcc;
187 	u32 fusectl1;
188 
189 	/* Read accelerator capabilities mask */
190 	pci_read_config_dword(pdev, ADF_4XXX_FUSECTL1_OFFSET, &fusectl1);
191 
192 	capabilities_sym = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
193 			  ICP_ACCEL_CAPABILITIES_CIPHER |
194 			  ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
195 			  ICP_ACCEL_CAPABILITIES_SHA3 |
196 			  ICP_ACCEL_CAPABILITIES_SHA3_EXT |
197 			  ICP_ACCEL_CAPABILITIES_HKDF |
198 			  ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
199 			  ICP_ACCEL_CAPABILITIES_AESGCM_SPC |
200 			  ICP_ACCEL_CAPABILITIES_SM3 |
201 			  ICP_ACCEL_CAPABILITIES_SM4 |
202 			  ICP_ACCEL_CAPABILITIES_AES_V2;
203 
204 	/* A set bit in fusectl1 means the feature is OFF in this SKU */
205 	if (fusectl1 & ICP_ACCEL_4XXX_MASK_CIPHER_SLICE) {
206 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
207 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_HKDF;
208 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
209 	}
210 
211 	if (fusectl1 & ICP_ACCEL_4XXX_MASK_UCS_SLICE) {
212 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY;
213 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
214 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AES_V2;
215 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
216 	}
217 
218 	if (fusectl1 & ICP_ACCEL_4XXX_MASK_AUTH_SLICE) {
219 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
220 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3;
221 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT;
222 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
223 	}
224 
225 	if (fusectl1 & ICP_ACCEL_4XXX_MASK_SMX_SLICE) {
226 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM3;
227 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM4;
228 	}
229 
230 	capabilities_asym = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
231 			  ICP_ACCEL_CAPABILITIES_CIPHER |
232 			  ICP_ACCEL_CAPABILITIES_SM2 |
233 			  ICP_ACCEL_CAPABILITIES_ECEDMONT;
234 
235 	if (fusectl1 & ICP_ACCEL_4XXX_MASK_PKE_SLICE) {
236 		capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
237 		capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_SM2;
238 		capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT;
239 	}
240 
241 	capabilities_dc = ICP_ACCEL_CAPABILITIES_COMPRESSION |
242 			  ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION |
243 			  ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION |
244 			  ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
245 
246 	if (fusectl1 & ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE) {
247 		capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
248 		capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION;
249 		capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION;
250 		capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
251 	}
252 
253 	switch (adf_get_service_enabled(accel_dev)) {
254 	case SVC_CY:
255 	case SVC_CY2:
256 		return capabilities_sym | capabilities_asym;
257 	case SVC_DC:
258 		return capabilities_dc;
259 	case SVC_DCC:
260 		/*
261 		 * Sym capabilities are available for chaining operations,
262 		 * but sym crypto instances cannot be supported
263 		 */
264 		capabilities_dcc = capabilities_dc | capabilities_sym;
265 		capabilities_dcc &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
266 		return capabilities_dcc;
267 	case SVC_SYM:
268 		return capabilities_sym;
269 	case SVC_ASYM:
270 		return capabilities_asym;
271 	case SVC_ASYM_DC:
272 	case SVC_DC_ASYM:
273 		return capabilities_asym | capabilities_dc;
274 	case SVC_SYM_DC:
275 	case SVC_DC_SYM:
276 		return capabilities_sym | capabilities_dc;
277 	default:
278 		return 0;
279 	}
280 }
281 
get_sku(struct adf_hw_device_data * self)282 static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
283 {
284 	return DEV_SKU_1;
285 }
286 
adf_get_arbiter_mapping(struct adf_accel_dev * accel_dev)287 static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
288 {
289 	switch (adf_get_service_enabled(accel_dev)) {
290 	case SVC_DC:
291 		return thrd_to_arb_map_dc;
292 	case SVC_DCC:
293 		return thrd_to_arb_map_dcc;
294 	default:
295 		return default_thrd_to_arb_map;
296 	}
297 }
298 
get_arb_info(struct arb_info * arb_info)299 static void get_arb_info(struct arb_info *arb_info)
300 {
301 	arb_info->arb_cfg = ADF_4XXX_ARB_CONFIG;
302 	arb_info->arb_offset = ADF_4XXX_ARB_OFFSET;
303 	arb_info->wt2sam_offset = ADF_4XXX_ARB_WRK_2_SER_MAP_OFFSET;
304 }
305 
get_admin_info(struct admin_info * admin_csrs_info)306 static void get_admin_info(struct admin_info *admin_csrs_info)
307 {
308 	admin_csrs_info->mailbox_offset = ADF_4XXX_MAILBOX_BASE_OFFSET;
309 	admin_csrs_info->admin_msg_ur = ADF_4XXX_ADMINMSGUR_OFFSET;
310 	admin_csrs_info->admin_msg_lr = ADF_4XXX_ADMINMSGLR_OFFSET;
311 }
312 
get_heartbeat_clock(struct adf_hw_device_data * self)313 static u32 get_heartbeat_clock(struct adf_hw_device_data *self)
314 {
315 	/*
316 	 * 4XXX uses KPT counter for HB
317 	 */
318 	return ADF_4XXX_KPT_COUNTER_FREQ;
319 }
320 
adf_enable_error_correction(struct adf_accel_dev * accel_dev)321 static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
322 {
323 	struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR];
324 	void __iomem *csr = misc_bar->virt_addr;
325 
326 	/* Enable all in errsou3 except VFLR notification on host */
327 	ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, ADF_GEN4_VFLNOTIFY);
328 }
329 
adf_enable_ints(struct adf_accel_dev * accel_dev)330 static void adf_enable_ints(struct adf_accel_dev *accel_dev)
331 {
332 	void __iomem *addr;
333 
334 	addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
335 
336 	/* Enable bundle interrupts */
337 	ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X0_MASK_OFFSET, 0);
338 	ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X1_MASK_OFFSET, 0);
339 
340 	/* Enable misc interrupts */
341 	ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_MASK_OFFSET, 0);
342 }
343 
adf_init_device(struct adf_accel_dev * accel_dev)344 static int adf_init_device(struct adf_accel_dev *accel_dev)
345 {
346 	void __iomem *addr;
347 	u32 status;
348 	u32 csr;
349 	int ret;
350 
351 	addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
352 
353 	/* Temporarily mask PM interrupt */
354 	csr = ADF_CSR_RD(addr, ADF_GEN4_ERRMSK2);
355 	csr |= ADF_GEN4_PM_SOU;
356 	ADF_CSR_WR(addr, ADF_GEN4_ERRMSK2, csr);
357 
358 	/* Set DRV_ACTIVE bit to power up the device */
359 	ADF_CSR_WR(addr, ADF_GEN4_PM_INTERRUPT, ADF_GEN4_PM_DRV_ACTIVE);
360 
361 	/* Poll status register to make sure the device is powered up */
362 	ret = read_poll_timeout(ADF_CSR_RD, status,
363 				status & ADF_GEN4_PM_INIT_STATE,
364 				ADF_GEN4_PM_POLL_DELAY_US,
365 				ADF_GEN4_PM_POLL_TIMEOUT_US, true, addr,
366 				ADF_GEN4_PM_STATUS);
367 	if (ret)
368 		dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n");
369 
370 	return ret;
371 }
372 
uof_get_num_objs(void)373 static u32 uof_get_num_objs(void)
374 {
375 	return ARRAY_SIZE(adf_fw_cy_config);
376 }
377 
get_fw_config(struct adf_accel_dev * accel_dev)378 static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev)
379 {
380 	switch (adf_get_service_enabled(accel_dev)) {
381 	case SVC_CY:
382 	case SVC_CY2:
383 		return adf_fw_cy_config;
384 	case SVC_DC:
385 		return adf_fw_dc_config;
386 	case SVC_DCC:
387 		return adf_fw_dcc_config;
388 	case SVC_SYM:
389 		return adf_fw_sym_config;
390 	case SVC_ASYM:
391 		return adf_fw_asym_config;
392 	case SVC_ASYM_DC:
393 	case SVC_DC_ASYM:
394 		return adf_fw_asym_dc_config;
395 	case SVC_SYM_DC:
396 	case SVC_DC_SYM:
397 		return adf_fw_sym_dc_config;
398 	default:
399 		return NULL;
400 	}
401 }
402 
403 enum adf_rp_groups {
404 	RP_GROUP_0 = 0,
405 	RP_GROUP_1,
406 	RP_GROUP_COUNT
407 };
408 
get_ring_to_svc_map(struct adf_accel_dev * accel_dev)409 static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev)
410 {
411 	enum adf_cfg_service_type rps[RP_GROUP_COUNT];
412 	const struct adf_fw_config *fw_config;
413 	u16 ring_to_svc_map;
414 	int i, j;
415 
416 	fw_config = get_fw_config(accel_dev);
417 	if (!fw_config)
418 		return 0;
419 
420 	/* If dcc, all rings handle compression requests */
421 	if (adf_get_service_enabled(accel_dev) == SVC_DCC) {
422 		for (i = 0; i < RP_GROUP_COUNT; i++)
423 			rps[i] = COMP;
424 		goto set_mask;
425 	}
426 
427 	for (i = 0; i < RP_GROUP_COUNT; i++) {
428 		switch (fw_config[i].ae_mask) {
429 		case ADF_AE_GROUP_0:
430 			j = RP_GROUP_0;
431 			break;
432 		case ADF_AE_GROUP_1:
433 			j = RP_GROUP_1;
434 			break;
435 		default:
436 			return 0;
437 		}
438 
439 		switch (fw_config[i].obj) {
440 		case ADF_FW_SYM_OBJ:
441 			rps[j] = SYM;
442 			break;
443 		case ADF_FW_ASYM_OBJ:
444 			rps[j] = ASYM;
445 			break;
446 		case ADF_FW_DC_OBJ:
447 			rps[j] = COMP;
448 			break;
449 		default:
450 			rps[j] = 0;
451 			break;
452 		}
453 	}
454 
455 set_mask:
456 	ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT |
457 			  rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT |
458 			  rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT |
459 			  rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_3_SHIFT;
460 
461 	return ring_to_svc_map;
462 }
463 
uof_get_name(struct adf_accel_dev * accel_dev,u32 obj_num,const char * const fw_objs[],int num_objs)464 static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num,
465 				const char * const fw_objs[], int num_objs)
466 {
467 	const struct adf_fw_config *fw_config;
468 	int id;
469 
470 	fw_config = get_fw_config(accel_dev);
471 	if (fw_config)
472 		id = fw_config[obj_num].obj;
473 	else
474 		id = -EINVAL;
475 
476 	if (id < 0 || id >= num_objs)
477 		return NULL;
478 
479 	return fw_objs[id];
480 }
481 
uof_get_name_4xxx(struct adf_accel_dev * accel_dev,u32 obj_num)482 static const char *uof_get_name_4xxx(struct adf_accel_dev *accel_dev, u32 obj_num)
483 {
484 	int num_fw_objs = ARRAY_SIZE(adf_4xxx_fw_objs);
485 
486 	return uof_get_name(accel_dev, obj_num, adf_4xxx_fw_objs, num_fw_objs);
487 }
488 
uof_get_name_402xx(struct adf_accel_dev * accel_dev,u32 obj_num)489 static const char *uof_get_name_402xx(struct adf_accel_dev *accel_dev, u32 obj_num)
490 {
491 	int num_fw_objs = ARRAY_SIZE(adf_402xx_fw_objs);
492 
493 	return uof_get_name(accel_dev, obj_num, adf_402xx_fw_objs, num_fw_objs);
494 }
495 
uof_get_ae_mask(struct adf_accel_dev * accel_dev,u32 obj_num)496 static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
497 {
498 	const struct adf_fw_config *fw_config;
499 
500 	fw_config = get_fw_config(accel_dev);
501 	if (!fw_config)
502 		return 0;
503 
504 	return fw_config[obj_num].ae_mask;
505 }
506 
adf_init_hw_data_4xxx(struct adf_hw_device_data * hw_data,u32 dev_id)507 void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
508 {
509 	hw_data->dev_class = &adf_4xxx_class;
510 	hw_data->instance_id = adf_4xxx_class.instances++;
511 	hw_data->num_banks = ADF_4XXX_ETR_MAX_BANKS;
512 	hw_data->num_banks_per_vf = ADF_4XXX_NUM_BANKS_PER_VF;
513 	hw_data->num_rings_per_bank = ADF_4XXX_NUM_RINGS_PER_BANK;
514 	hw_data->num_accel = ADF_4XXX_MAX_ACCELERATORS;
515 	hw_data->num_engines = ADF_4XXX_MAX_ACCELENGINES;
516 	hw_data->num_logical_accel = 1;
517 	hw_data->tx_rx_gap = ADF_4XXX_RX_RINGS_OFFSET;
518 	hw_data->tx_rings_mask = ADF_4XXX_TX_RINGS_MASK;
519 	hw_data->ring_to_svc_map = ADF_GEN4_DEFAULT_RING_TO_SRV_MAP;
520 	hw_data->alloc_irq = adf_isr_resource_alloc;
521 	hw_data->free_irq = adf_isr_resource_free;
522 	hw_data->enable_error_correction = adf_enable_error_correction;
523 	hw_data->get_accel_mask = get_accel_mask;
524 	hw_data->get_ae_mask = get_ae_mask;
525 	hw_data->get_num_accels = get_num_accels;
526 	hw_data->get_num_aes = get_num_aes;
527 	hw_data->get_sram_bar_id = get_sram_bar_id;
528 	hw_data->get_etr_bar_id = get_etr_bar_id;
529 	hw_data->get_misc_bar_id = get_misc_bar_id;
530 	hw_data->get_arb_info = get_arb_info;
531 	hw_data->get_admin_info = get_admin_info;
532 	hw_data->get_accel_cap = get_accel_cap;
533 	hw_data->get_sku = get_sku;
534 	hw_data->init_admin_comms = adf_init_admin_comms;
535 	hw_data->exit_admin_comms = adf_exit_admin_comms;
536 	hw_data->send_admin_init = adf_send_admin_init;
537 	hw_data->init_arb = adf_init_arb;
538 	hw_data->exit_arb = adf_exit_arb;
539 	hw_data->get_arb_mapping = adf_get_arbiter_mapping;
540 	hw_data->enable_ints = adf_enable_ints;
541 	hw_data->init_device = adf_init_device;
542 	hw_data->reset_device = adf_reset_flr;
543 	hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK;
544 	switch (dev_id) {
545 	case ADF_402XX_PCI_DEVICE_ID:
546 		hw_data->fw_name = ADF_402XX_FW;
547 		hw_data->fw_mmp_name = ADF_402XX_MMP;
548 		hw_data->uof_get_name = uof_get_name_402xx;
549 		break;
550 
551 	default:
552 		hw_data->fw_name = ADF_4XXX_FW;
553 		hw_data->fw_mmp_name = ADF_4XXX_MMP;
554 		hw_data->uof_get_name = uof_get_name_4xxx;
555 	}
556 	hw_data->uof_get_num_objs = uof_get_num_objs;
557 	hw_data->uof_get_ae_mask = uof_get_ae_mask;
558 	hw_data->set_msix_rttable = set_msix_default_rttable;
559 	hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
560 	hw_data->get_ring_to_svc_map = get_ring_to_svc_map;
561 	hw_data->disable_iov = adf_disable_sriov;
562 	hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
563 	hw_data->enable_pm = adf_gen4_enable_pm;
564 	hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
565 	hw_data->dev_config = adf_gen4_dev_config;
566 	hw_data->start_timer = adf_gen4_timer_start;
567 	hw_data->stop_timer = adf_gen4_timer_stop;
568 	hw_data->get_hb_clock = get_heartbeat_clock;
569 	hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
570 
571 	adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
572 	adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops);
573 	adf_gen4_init_dc_ops(&hw_data->dc_ops);
574 }
575 
adf_clean_hw_data_4xxx(struct adf_hw_device_data * hw_data)576 void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data)
577 {
578 	hw_data->dev_class->instances--;
579 }
580