1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2020 - 2021 Intel Corporation */
3 #include <linux/iopoll.h>
4 #include <adf_accel_devices.h>
5 #include <adf_cfg.h>
6 #include <adf_common_drv.h>
7 #include <adf_gen4_dc.h>
8 #include <adf_gen4_hw_data.h>
9 #include <adf_gen4_pfvf.h>
10 #include <adf_gen4_pm.h>
11 #include "adf_4xxx_hw_data.h"
12 #include "icp_qat_hw.h"
13 
14 struct adf_fw_config {
15 	u32 ae_mask;
16 	char *obj_name;
17 };
18 
19 static struct adf_fw_config adf_4xxx_fw_cy_config[] = {
20 	{0xF0, ADF_4XXX_SYM_OBJ},
21 	{0xF, ADF_4XXX_ASYM_OBJ},
22 	{0x100, ADF_4XXX_ADMIN_OBJ},
23 };
24 
25 static struct adf_fw_config adf_4xxx_fw_dc_config[] = {
26 	{0xF0, ADF_4XXX_DC_OBJ},
27 	{0xF, ADF_4XXX_DC_OBJ},
28 	{0x100, ADF_4XXX_ADMIN_OBJ},
29 };
30 
31 static struct adf_fw_config adf_402xx_fw_cy_config[] = {
32 	{0xF0, ADF_402XX_SYM_OBJ},
33 	{0xF, ADF_402XX_ASYM_OBJ},
34 	{0x100, ADF_402XX_ADMIN_OBJ},
35 };
36 
37 static struct adf_fw_config adf_402xx_fw_dc_config[] = {
38 	{0xF0, ADF_402XX_DC_OBJ},
39 	{0xF, ADF_402XX_DC_OBJ},
40 	{0x100, ADF_402XX_ADMIN_OBJ},
41 };
42 
43 /* Worker thread to service arbiter mappings */
44 static const u32 thrd_to_arb_map_cy[ADF_4XXX_MAX_ACCELENGINES] = {
45 	0x5555555, 0x5555555, 0x5555555, 0x5555555,
46 	0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA,
47 	0x0
48 };
49 
50 static const u32 thrd_to_arb_map_dc[ADF_4XXX_MAX_ACCELENGINES] = {
51 	0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF,
52 	0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF,
53 	0x0
54 };
55 
56 static struct adf_hw_device_class adf_4xxx_class = {
57 	.name = ADF_4XXX_DEVICE_NAME,
58 	.type = DEV_4XXX,
59 	.instances = 0,
60 };
61 
62 enum dev_services {
63 	SVC_CY = 0,
64 	SVC_DC,
65 };
66 
67 static const char *const dev_cfg_services[] = {
68 	[SVC_CY] = ADF_CFG_CY,
69 	[SVC_DC] = ADF_CFG_DC,
70 };
71 
72 static int get_service_enabled(struct adf_accel_dev *accel_dev)
73 {
74 	char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
75 	int ret;
76 
77 	ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
78 				      ADF_SERVICES_ENABLED, services);
79 	if (ret) {
80 		dev_err(&GET_DEV(accel_dev),
81 			ADF_SERVICES_ENABLED " param not found\n");
82 		return ret;
83 	}
84 
85 	ret = match_string(dev_cfg_services, ARRAY_SIZE(dev_cfg_services),
86 			   services);
87 	if (ret < 0)
88 		dev_err(&GET_DEV(accel_dev),
89 			"Invalid value of " ADF_SERVICES_ENABLED " param: %s\n",
90 			services);
91 
92 	return ret;
93 }
94 
95 static u32 get_accel_mask(struct adf_hw_device_data *self)
96 {
97 	return ADF_4XXX_ACCELERATORS_MASK;
98 }
99 
100 static u32 get_ae_mask(struct adf_hw_device_data *self)
101 {
102 	u32 me_disable = self->fuses;
103 
104 	return ~me_disable & ADF_4XXX_ACCELENGINES_MASK;
105 }
106 
107 static u32 get_num_accels(struct adf_hw_device_data *self)
108 {
109 	return ADF_4XXX_MAX_ACCELERATORS;
110 }
111 
112 static u32 get_num_aes(struct adf_hw_device_data *self)
113 {
114 	if (!self || !self->ae_mask)
115 		return 0;
116 
117 	return hweight32(self->ae_mask);
118 }
119 
120 static u32 get_misc_bar_id(struct adf_hw_device_data *self)
121 {
122 	return ADF_4XXX_PMISC_BAR;
123 }
124 
125 static u32 get_etr_bar_id(struct adf_hw_device_data *self)
126 {
127 	return ADF_4XXX_ETR_BAR;
128 }
129 
130 static u32 get_sram_bar_id(struct adf_hw_device_data *self)
131 {
132 	return ADF_4XXX_SRAM_BAR;
133 }
134 
135 /*
136  * The vector routing table is used to select the MSI-X entry to use for each
137  * interrupt source.
138  * The first ADF_4XXX_ETR_MAX_BANKS entries correspond to ring interrupts.
139  * The final entry corresponds to VF2PF or error interrupts.
140  * This vector table could be used to configure one MSI-X entry to be shared
141  * between multiple interrupt sources.
142  *
143  * The default routing is set to have a one to one correspondence between the
144  * interrupt source and the MSI-X entry used.
145  */
146 static void set_msix_default_rttable(struct adf_accel_dev *accel_dev)
147 {
148 	void __iomem *csr;
149 	int i;
150 
151 	csr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
152 	for (i = 0; i <= ADF_4XXX_ETR_MAX_BANKS; i++)
153 		ADF_CSR_WR(csr, ADF_4XXX_MSIX_RTTABLE_OFFSET(i), i);
154 }
155 
156 static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
157 {
158 	struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
159 	u32 capabilities_cy, capabilities_dc;
160 	u32 fusectl1;
161 
162 	/* Read accelerator capabilities mask */
163 	pci_read_config_dword(pdev, ADF_4XXX_FUSECTL1_OFFSET, &fusectl1);
164 
165 	capabilities_cy = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
166 			  ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
167 			  ICP_ACCEL_CAPABILITIES_CIPHER |
168 			  ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
169 			  ICP_ACCEL_CAPABILITIES_SHA3 |
170 			  ICP_ACCEL_CAPABILITIES_SHA3_EXT |
171 			  ICP_ACCEL_CAPABILITIES_HKDF |
172 			  ICP_ACCEL_CAPABILITIES_ECEDMONT |
173 			  ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
174 			  ICP_ACCEL_CAPABILITIES_AESGCM_SPC |
175 			  ICP_ACCEL_CAPABILITIES_AES_V2;
176 
177 	/* A set bit in fusectl1 means the feature is OFF in this SKU */
178 	if (fusectl1 & ICP_ACCEL_4XXX_MASK_CIPHER_SLICE) {
179 		capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
180 		capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_HKDF;
181 		capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
182 	}
183 	if (fusectl1 & ICP_ACCEL_4XXX_MASK_UCS_SLICE) {
184 		capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY;
185 		capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
186 		capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_AES_V2;
187 		capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
188 	}
189 	if (fusectl1 & ICP_ACCEL_4XXX_MASK_AUTH_SLICE) {
190 		capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
191 		capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_SHA3;
192 		capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT;
193 		capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
194 	}
195 	if (fusectl1 & ICP_ACCEL_4XXX_MASK_PKE_SLICE) {
196 		capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
197 		capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT;
198 	}
199 
200 	capabilities_dc = ICP_ACCEL_CAPABILITIES_COMPRESSION |
201 			  ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION |
202 			  ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION |
203 			  ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
204 
205 	if (fusectl1 & ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE) {
206 		capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
207 		capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION;
208 		capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION;
209 		capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
210 	}
211 
212 	switch (get_service_enabled(accel_dev)) {
213 	case SVC_CY:
214 		return capabilities_cy;
215 	case SVC_DC:
216 		return capabilities_dc;
217 	}
218 
219 	return 0;
220 }
221 
222 static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
223 {
224 	return DEV_SKU_1;
225 }
226 
227 static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
228 {
229 	switch (get_service_enabled(accel_dev)) {
230 	case SVC_CY:
231 		return thrd_to_arb_map_cy;
232 	case SVC_DC:
233 		return thrd_to_arb_map_dc;
234 	}
235 
236 	return NULL;
237 }
238 
239 static void get_arb_info(struct arb_info *arb_info)
240 {
241 	arb_info->arb_cfg = ADF_4XXX_ARB_CONFIG;
242 	arb_info->arb_offset = ADF_4XXX_ARB_OFFSET;
243 	arb_info->wt2sam_offset = ADF_4XXX_ARB_WRK_2_SER_MAP_OFFSET;
244 }
245 
246 static void get_admin_info(struct admin_info *admin_csrs_info)
247 {
248 	admin_csrs_info->mailbox_offset = ADF_4XXX_MAILBOX_BASE_OFFSET;
249 	admin_csrs_info->admin_msg_ur = ADF_4XXX_ADMINMSGUR_OFFSET;
250 	admin_csrs_info->admin_msg_lr = ADF_4XXX_ADMINMSGLR_OFFSET;
251 }
252 
253 static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
254 {
255 	struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR];
256 	void __iomem *csr = misc_bar->virt_addr;
257 
258 	/* Enable all in errsou3 except VFLR notification on host */
259 	ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, ADF_GEN4_VFLNOTIFY);
260 }
261 
262 static void adf_enable_ints(struct adf_accel_dev *accel_dev)
263 {
264 	void __iomem *addr;
265 
266 	addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
267 
268 	/* Enable bundle interrupts */
269 	ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X0_MASK_OFFSET, 0);
270 	ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X1_MASK_OFFSET, 0);
271 
272 	/* Enable misc interrupts */
273 	ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_MASK_OFFSET, 0);
274 }
275 
276 static int adf_init_device(struct adf_accel_dev *accel_dev)
277 {
278 	void __iomem *addr;
279 	u32 status;
280 	u32 csr;
281 	int ret;
282 
283 	addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
284 
285 	/* Temporarily mask PM interrupt */
286 	csr = ADF_CSR_RD(addr, ADF_GEN4_ERRMSK2);
287 	csr |= ADF_GEN4_PM_SOU;
288 	ADF_CSR_WR(addr, ADF_GEN4_ERRMSK2, csr);
289 
290 	/* Set DRV_ACTIVE bit to power up the device */
291 	ADF_CSR_WR(addr, ADF_GEN4_PM_INTERRUPT, ADF_GEN4_PM_DRV_ACTIVE);
292 
293 	/* Poll status register to make sure the device is powered up */
294 	ret = read_poll_timeout(ADF_CSR_RD, status,
295 				status & ADF_GEN4_PM_INIT_STATE,
296 				ADF_GEN4_PM_POLL_DELAY_US,
297 				ADF_GEN4_PM_POLL_TIMEOUT_US, true, addr,
298 				ADF_GEN4_PM_STATUS);
299 	if (ret)
300 		dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n");
301 
302 	return ret;
303 }
304 
305 static u32 uof_get_num_objs(void)
306 {
307 	BUILD_BUG_ON_MSG(ARRAY_SIZE(adf_4xxx_fw_cy_config) !=
308 			 ARRAY_SIZE(adf_4xxx_fw_dc_config),
309 			 "Size mismatch between adf_4xxx_fw_*_config arrays");
310 
311 	return ARRAY_SIZE(adf_4xxx_fw_cy_config);
312 }
313 
314 static char *uof_get_name_4xxx(struct adf_accel_dev *accel_dev, u32 obj_num)
315 {
316 	switch (get_service_enabled(accel_dev)) {
317 	case SVC_CY:
318 		return adf_4xxx_fw_cy_config[obj_num].obj_name;
319 	case SVC_DC:
320 		return adf_4xxx_fw_dc_config[obj_num].obj_name;
321 	}
322 
323 	return NULL;
324 }
325 
326 static char *uof_get_name_402xx(struct adf_accel_dev *accel_dev, u32 obj_num)
327 {
328 	switch (get_service_enabled(accel_dev)) {
329 	case SVC_CY:
330 		return adf_402xx_fw_cy_config[obj_num].obj_name;
331 	case SVC_DC:
332 		return adf_402xx_fw_dc_config[obj_num].obj_name;
333 	}
334 
335 	return NULL;
336 }
337 
338 static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
339 {
340 	switch (get_service_enabled(accel_dev)) {
341 	case SVC_CY:
342 		return adf_4xxx_fw_cy_config[obj_num].ae_mask;
343 	case SVC_DC:
344 		return adf_4xxx_fw_dc_config[obj_num].ae_mask;
345 	}
346 
347 	return 0;
348 }
349 
350 void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
351 {
352 	hw_data->dev_class = &adf_4xxx_class;
353 	hw_data->instance_id = adf_4xxx_class.instances++;
354 	hw_data->num_banks = ADF_4XXX_ETR_MAX_BANKS;
355 	hw_data->num_banks_per_vf = ADF_4XXX_NUM_BANKS_PER_VF;
356 	hw_data->num_rings_per_bank = ADF_4XXX_NUM_RINGS_PER_BANK;
357 	hw_data->num_accel = ADF_4XXX_MAX_ACCELERATORS;
358 	hw_data->num_engines = ADF_4XXX_MAX_ACCELENGINES;
359 	hw_data->num_logical_accel = 1;
360 	hw_data->tx_rx_gap = ADF_4XXX_RX_RINGS_OFFSET;
361 	hw_data->tx_rings_mask = ADF_4XXX_TX_RINGS_MASK;
362 	hw_data->ring_to_svc_map = ADF_GEN4_DEFAULT_RING_TO_SRV_MAP;
363 	hw_data->alloc_irq = adf_isr_resource_alloc;
364 	hw_data->free_irq = adf_isr_resource_free;
365 	hw_data->enable_error_correction = adf_enable_error_correction;
366 	hw_data->get_accel_mask = get_accel_mask;
367 	hw_data->get_ae_mask = get_ae_mask;
368 	hw_data->get_num_accels = get_num_accels;
369 	hw_data->get_num_aes = get_num_aes;
370 	hw_data->get_sram_bar_id = get_sram_bar_id;
371 	hw_data->get_etr_bar_id = get_etr_bar_id;
372 	hw_data->get_misc_bar_id = get_misc_bar_id;
373 	hw_data->get_arb_info = get_arb_info;
374 	hw_data->get_admin_info = get_admin_info;
375 	hw_data->get_accel_cap = get_accel_cap;
376 	hw_data->get_sku = get_sku;
377 	hw_data->init_admin_comms = adf_init_admin_comms;
378 	hw_data->exit_admin_comms = adf_exit_admin_comms;
379 	hw_data->send_admin_init = adf_send_admin_init;
380 	hw_data->init_arb = adf_init_arb;
381 	hw_data->exit_arb = adf_exit_arb;
382 	hw_data->get_arb_mapping = adf_get_arbiter_mapping;
383 	hw_data->enable_ints = adf_enable_ints;
384 	hw_data->init_device = adf_init_device;
385 	hw_data->reset_device = adf_reset_flr;
386 	hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK;
387 	switch (dev_id) {
388 	case ADF_402XX_PCI_DEVICE_ID:
389 		hw_data->fw_name = ADF_402XX_FW;
390 		hw_data->fw_mmp_name = ADF_402XX_MMP;
391 		hw_data->uof_get_name = uof_get_name_402xx;
392 		break;
393 
394 	default:
395 		hw_data->fw_name = ADF_4XXX_FW;
396 		hw_data->fw_mmp_name = ADF_4XXX_MMP;
397 		hw_data->uof_get_name = uof_get_name_4xxx;
398 	}
399 	hw_data->uof_get_num_objs = uof_get_num_objs;
400 	hw_data->uof_get_ae_mask = uof_get_ae_mask;
401 	hw_data->set_msix_rttable = set_msix_default_rttable;
402 	hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
403 	hw_data->disable_iov = adf_disable_sriov;
404 	hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
405 	hw_data->enable_pm = adf_gen4_enable_pm;
406 	hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
407 	hw_data->dev_config = adf_gen4_dev_config;
408 
409 	adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
410 	adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops);
411 	adf_gen4_init_dc_ops(&hw_data->dc_ops);
412 }
413 
414 void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data)
415 {
416 	hw_data->dev_class->instances--;
417 }
418