1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3  * Copyright (c) 2015-2017  QLogic Corporation
4  * Copyright (c) 2019-2020 Marvell International Ltd.
5  */
6 
7 #include <linux/stddef.h>
8 #include <linux/pci.h>
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/delay.h>
12 #include <asm/byteorder.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/string.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/workqueue.h>
18 #include <linux/ethtool.h>
19 #include <linux/etherdevice.h>
20 #include <linux/vmalloc.h>
21 #include <linux/crash_dump.h>
22 #include <linux/crc32.h>
23 #include <linux/qed/qed_if.h>
24 #include <linux/qed/qed_ll2_if.h>
25 #include <net/devlink.h>
26 #include <linux/aer.h>
27 #include <linux/phylink.h>
28 
29 #include "qed.h"
30 #include "qed_sriov.h"
31 #include "qed_sp.h"
32 #include "qed_dev_api.h"
33 #include "qed_ll2.h"
34 #include "qed_fcoe.h"
35 #include "qed_iscsi.h"
36 
37 #include "qed_mcp.h"
38 #include "qed_reg_addr.h"
39 #include "qed_hw.h"
40 #include "qed_selftest.h"
41 #include "qed_debug.h"
42 #include "qed_devlink.h"
43 
44 #define QED_ROCE_QPS			(8192)
45 #define QED_ROCE_DPIS			(8)
46 #define QED_RDMA_SRQS                   QED_ROCE_QPS
47 #define QED_NVM_CFG_GET_FLAGS		0xA
48 #define QED_NVM_CFG_GET_PF_FLAGS	0x1A
49 #define QED_NVM_CFG_MAX_ATTRS		50
50 
51 static char version[] =
52 	"QLogic FastLinQ 4xxxx Core Module qed\n";
53 
54 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module");
55 MODULE_LICENSE("GPL");
56 
57 #define FW_FILE_VERSION				\
58 	__stringify(FW_MAJOR_VERSION) "."	\
59 	__stringify(FW_MINOR_VERSION) "."	\
60 	__stringify(FW_REVISION_VERSION) "."	\
61 	__stringify(FW_ENGINEERING_VERSION)
62 
63 #define QED_FW_FILE_NAME	\
64 	"qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin"
65 
66 MODULE_FIRMWARE(QED_FW_FILE_NAME);
67 
68 /* MFW speed capabilities maps */
69 
70 struct qed_mfw_speed_map {
71 	u32		mfw_val;
72 	__ETHTOOL_DECLARE_LINK_MODE_MASK(caps);
73 
74 	const u32	*cap_arr;
75 	u32		arr_size;
76 };
77 
78 #define QED_MFW_SPEED_MAP(type, arr)		\
79 {						\
80 	.mfw_val	= (type),		\
81 	.cap_arr	= (arr),		\
82 	.arr_size	= ARRAY_SIZE(arr),	\
83 }
84 
85 static const u32 qed_mfw_ext_1g[] __initconst = {
86 	ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
87 	ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
88 	ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
89 };
90 
91 static const u32 qed_mfw_ext_10g[] __initconst = {
92 	ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
93 	ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
94 	ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
95 	ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
96 	ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
97 	ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
98 	ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
99 	ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
100 };
101 
102 static const u32 qed_mfw_ext_20g[] __initconst = {
103 	ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
104 };
105 
106 static const u32 qed_mfw_ext_25g[] __initconst = {
107 	ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
108 	ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
109 	ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
110 };
111 
112 static const u32 qed_mfw_ext_40g[] __initconst = {
113 	ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
114 	ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
115 	ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
116 	ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
117 };
118 
119 static const u32 qed_mfw_ext_50g_base_r[] __initconst = {
120 	ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
121 	ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
122 	ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
123 	ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
124 	ETHTOOL_LINK_MODE_50000baseDR_Full_BIT,
125 };
126 
127 static const u32 qed_mfw_ext_50g_base_r2[] __initconst = {
128 	ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
129 	ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
130 	ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
131 };
132 
133 static const u32 qed_mfw_ext_100g_base_r2[] __initconst = {
134 	ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT,
135 	ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT,
136 	ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT,
137 	ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT,
138 	ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT,
139 };
140 
141 static const u32 qed_mfw_ext_100g_base_r4[] __initconst = {
142 	ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
143 	ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
144 	ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
145 	ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
146 };
147 
148 static struct qed_mfw_speed_map qed_mfw_ext_maps[] __ro_after_init = {
149 	QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_1G, qed_mfw_ext_1g),
150 	QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_10G, qed_mfw_ext_10g),
151 	QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_20G, qed_mfw_ext_20g),
152 	QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_25G, qed_mfw_ext_25g),
153 	QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_40G, qed_mfw_ext_40g),
154 	QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R,
155 			  qed_mfw_ext_50g_base_r),
156 	QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R2,
157 			  qed_mfw_ext_50g_base_r2),
158 	QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R2,
159 			  qed_mfw_ext_100g_base_r2),
160 	QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R4,
161 			  qed_mfw_ext_100g_base_r4),
162 };
163 
164 static const u32 qed_mfw_legacy_1g[] __initconst = {
165 	ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
166 	ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
167 	ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
168 };
169 
170 static const u32 qed_mfw_legacy_10g[] __initconst = {
171 	ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
172 	ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
173 	ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
174 	ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
175 	ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
176 	ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
177 	ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
178 	ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
179 };
180 
181 static const u32 qed_mfw_legacy_20g[] __initconst = {
182 	ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
183 };
184 
185 static const u32 qed_mfw_legacy_25g[] __initconst = {
186 	ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
187 	ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
188 	ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
189 };
190 
191 static const u32 qed_mfw_legacy_40g[] __initconst = {
192 	ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
193 	ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
194 	ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
195 	ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
196 };
197 
198 static const u32 qed_mfw_legacy_50g[] __initconst = {
199 	ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
200 	ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
201 	ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
202 };
203 
204 static const u32 qed_mfw_legacy_bb_100g[] __initconst = {
205 	ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
206 	ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
207 	ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
208 	ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
209 };
210 
211 static struct qed_mfw_speed_map qed_mfw_legacy_maps[] __ro_after_init = {
212 	QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G,
213 			  qed_mfw_legacy_1g),
214 	QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G,
215 			  qed_mfw_legacy_10g),
216 	QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G,
217 			  qed_mfw_legacy_20g),
218 	QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G,
219 			  qed_mfw_legacy_25g),
220 	QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G,
221 			  qed_mfw_legacy_40g),
222 	QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G,
223 			  qed_mfw_legacy_50g),
224 	QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G,
225 			  qed_mfw_legacy_bb_100g),
226 };
227 
228 static void __init qed_mfw_speed_map_populate(struct qed_mfw_speed_map *map)
229 {
230 	linkmode_set_bit_array(map->cap_arr, map->arr_size, map->caps);
231 
232 	map->cap_arr = NULL;
233 	map->arr_size = 0;
234 }
235 
236 static void __init qed_mfw_speed_maps_init(void)
237 {
238 	u32 i;
239 
240 	for (i = 0; i < ARRAY_SIZE(qed_mfw_ext_maps); i++)
241 		qed_mfw_speed_map_populate(qed_mfw_ext_maps + i);
242 
243 	for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++)
244 		qed_mfw_speed_map_populate(qed_mfw_legacy_maps + i);
245 }
246 
247 static int __init qed_init(void)
248 {
249 	pr_info("%s", version);
250 
251 	qed_mfw_speed_maps_init();
252 
253 	return 0;
254 }
255 module_init(qed_init);
256 
257 static void __exit qed_exit(void)
258 {
259 	/* To prevent marking this module as "permanent" */
260 }
261 module_exit(qed_exit);
262 
263 /* Check if the DMA controller on the machine can properly handle the DMA
264  * addressing required by the device.
265 */
266 static int qed_set_coherency_mask(struct qed_dev *cdev)
267 {
268 	struct device *dev = &cdev->pdev->dev;
269 
270 	if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
271 		if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
272 			DP_NOTICE(cdev,
273 				  "Can't request 64-bit consistent allocations\n");
274 			return -EIO;
275 		}
276 	} else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
277 		DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n");
278 		return -EIO;
279 	}
280 
281 	return 0;
282 }
283 
284 static void qed_free_pci(struct qed_dev *cdev)
285 {
286 	struct pci_dev *pdev = cdev->pdev;
287 
288 	pci_disable_pcie_error_reporting(pdev);
289 
290 	if (cdev->doorbells && cdev->db_size)
291 		iounmap(cdev->doorbells);
292 	if (cdev->regview)
293 		iounmap(cdev->regview);
294 	if (atomic_read(&pdev->enable_cnt) == 1)
295 		pci_release_regions(pdev);
296 
297 	pci_disable_device(pdev);
298 }
299 
300 #define PCI_REVISION_ID_ERROR_VAL	0xff
301 
302 /* Performs PCI initializations as well as initializing PCI-related parameters
303  * in the device structrue. Returns 0 in case of success.
304  */
305 static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
306 {
307 	u8 rev_id;
308 	int rc;
309 
310 	cdev->pdev = pdev;
311 
312 	rc = pci_enable_device(pdev);
313 	if (rc) {
314 		DP_NOTICE(cdev, "Cannot enable PCI device\n");
315 		goto err0;
316 	}
317 
318 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
319 		DP_NOTICE(cdev, "No memory region found in bar #0\n");
320 		rc = -EIO;
321 		goto err1;
322 	}
323 
324 	if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
325 		DP_NOTICE(cdev, "No memory region found in bar #2\n");
326 		rc = -EIO;
327 		goto err1;
328 	}
329 
330 	if (atomic_read(&pdev->enable_cnt) == 1) {
331 		rc = pci_request_regions(pdev, "qed");
332 		if (rc) {
333 			DP_NOTICE(cdev,
334 				  "Failed to request PCI memory resources\n");
335 			goto err1;
336 		}
337 		pci_set_master(pdev);
338 		pci_save_state(pdev);
339 	}
340 
341 	pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
342 	if (rev_id == PCI_REVISION_ID_ERROR_VAL) {
343 		DP_NOTICE(cdev,
344 			  "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n",
345 			  rev_id);
346 		rc = -ENODEV;
347 		goto err2;
348 	}
349 	if (!pci_is_pcie(pdev)) {
350 		DP_NOTICE(cdev, "The bus is not PCI Express\n");
351 		rc = -EIO;
352 		goto err2;
353 	}
354 
355 	cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
356 	if (IS_PF(cdev) && !cdev->pci_params.pm_cap)
357 		DP_NOTICE(cdev, "Cannot find power management capability\n");
358 
359 	rc = qed_set_coherency_mask(cdev);
360 	if (rc)
361 		goto err2;
362 
363 	cdev->pci_params.mem_start = pci_resource_start(pdev, 0);
364 	cdev->pci_params.mem_end = pci_resource_end(pdev, 0);
365 	cdev->pci_params.irq = pdev->irq;
366 
367 	cdev->regview = pci_ioremap_bar(pdev, 0);
368 	if (!cdev->regview) {
369 		DP_NOTICE(cdev, "Cannot map register space, aborting\n");
370 		rc = -ENOMEM;
371 		goto err2;
372 	}
373 
374 	cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
375 	cdev->db_size = pci_resource_len(cdev->pdev, 2);
376 	if (!cdev->db_size) {
377 		if (IS_PF(cdev)) {
378 			DP_NOTICE(cdev, "No Doorbell bar available\n");
379 			return -EINVAL;
380 		} else {
381 			return 0;
382 		}
383 	}
384 
385 	cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
386 
387 	if (!cdev->doorbells) {
388 		DP_NOTICE(cdev, "Cannot map doorbell space\n");
389 		return -ENOMEM;
390 	}
391 
392 	/* AER (Advanced Error reporting) configuration */
393 	rc = pci_enable_pcie_error_reporting(pdev);
394 	if (rc)
395 		DP_VERBOSE(cdev, NETIF_MSG_DRV,
396 			   "Failed to configure PCIe AER [%d]\n", rc);
397 
398 	return 0;
399 
400 err2:
401 	pci_release_regions(pdev);
402 err1:
403 	pci_disable_device(pdev);
404 err0:
405 	return rc;
406 }
407 
408 int qed_fill_dev_info(struct qed_dev *cdev,
409 		      struct qed_dev_info *dev_info)
410 {
411 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
412 	struct qed_hw_info *hw_info = &p_hwfn->hw_info;
413 	struct qed_tunnel_info *tun = &cdev->tunnel;
414 	struct qed_ptt  *ptt;
415 
416 	memset(dev_info, 0, sizeof(struct qed_dev_info));
417 
418 	if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
419 	    tun->vxlan.b_mode_enabled)
420 		dev_info->vxlan_enable = true;
421 
422 	if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled &&
423 	    tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
424 	    tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
425 		dev_info->gre_enable = true;
426 
427 	if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled &&
428 	    tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
429 	    tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
430 		dev_info->geneve_enable = true;
431 
432 	dev_info->num_hwfns = cdev->num_hwfns;
433 	dev_info->pci_mem_start = cdev->pci_params.mem_start;
434 	dev_info->pci_mem_end = cdev->pci_params.mem_end;
435 	dev_info->pci_irq = cdev->pci_params.irq;
436 	dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn);
437 	dev_info->dev_type = cdev->type;
438 	ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr);
439 
440 	if (IS_PF(cdev)) {
441 		dev_info->fw_major = FW_MAJOR_VERSION;
442 		dev_info->fw_minor = FW_MINOR_VERSION;
443 		dev_info->fw_rev = FW_REVISION_VERSION;
444 		dev_info->fw_eng = FW_ENGINEERING_VERSION;
445 		dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH,
446 						       &cdev->mf_bits);
447 		if (!test_bit(QED_MF_DISABLE_ARFS, &cdev->mf_bits))
448 			dev_info->b_arfs_capable = true;
449 		dev_info->tx_switching = true;
450 
451 		if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME)
452 			dev_info->wol_support = true;
453 
454 		dev_info->smart_an = qed_mcp_is_smart_an_supported(p_hwfn);
455 
456 		dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id;
457 	} else {
458 		qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
459 				      &dev_info->fw_minor, &dev_info->fw_rev,
460 				      &dev_info->fw_eng);
461 	}
462 
463 	if (IS_PF(cdev)) {
464 		ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
465 		if (ptt) {
466 			qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
467 					    &dev_info->mfw_rev, NULL);
468 
469 			qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt,
470 					    &dev_info->mbi_version);
471 
472 			qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
473 					       &dev_info->flash_size);
474 
475 			qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
476 		}
477 	} else {
478 		qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL,
479 				    &dev_info->mfw_rev, NULL);
480 	}
481 
482 	dev_info->mtu = hw_info->mtu;
483 	cdev->common_dev_info = *dev_info;
484 
485 	return 0;
486 }
487 
488 static void qed_free_cdev(struct qed_dev *cdev)
489 {
490 	kfree((void *)cdev);
491 }
492 
493 static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
494 {
495 	struct qed_dev *cdev;
496 
497 	cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
498 	if (!cdev)
499 		return cdev;
500 
501 	qed_init_struct(cdev);
502 
503 	return cdev;
504 }
505 
506 /* Sets the requested power state */
507 static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state)
508 {
509 	if (!cdev)
510 		return -ENODEV;
511 
512 	DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n");
513 	return 0;
514 }
515 
516 /* probing */
517 static struct qed_dev *qed_probe(struct pci_dev *pdev,
518 				 struct qed_probe_params *params)
519 {
520 	struct qed_dev *cdev;
521 	int rc;
522 
523 	cdev = qed_alloc_cdev(pdev);
524 	if (!cdev)
525 		goto err0;
526 
527 	cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
528 	cdev->protocol = params->protocol;
529 
530 	if (params->is_vf)
531 		cdev->b_is_vf = true;
532 
533 	qed_init_dp(cdev, params->dp_module, params->dp_level);
534 
535 	cdev->recov_in_prog = params->recov_in_prog;
536 
537 	rc = qed_init_pci(cdev, pdev);
538 	if (rc) {
539 		DP_ERR(cdev, "init pci failed\n");
540 		goto err1;
541 	}
542 	DP_INFO(cdev, "PCI init completed successfully\n");
543 
544 	rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT);
545 	if (rc) {
546 		DP_ERR(cdev, "hw prepare failed\n");
547 		goto err2;
548 	}
549 
550 	DP_INFO(cdev, "qed_probe completed successfully\n");
551 
552 	return cdev;
553 
554 err2:
555 	qed_free_pci(cdev);
556 err1:
557 	qed_free_cdev(cdev);
558 err0:
559 	return NULL;
560 }
561 
562 static void qed_remove(struct qed_dev *cdev)
563 {
564 	if (!cdev)
565 		return;
566 
567 	qed_hw_remove(cdev);
568 
569 	qed_free_pci(cdev);
570 
571 	qed_set_power_state(cdev, PCI_D3hot);
572 
573 	qed_free_cdev(cdev);
574 }
575 
576 static void qed_disable_msix(struct qed_dev *cdev)
577 {
578 	if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
579 		pci_disable_msix(cdev->pdev);
580 		kfree(cdev->int_params.msix_table);
581 	} else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) {
582 		pci_disable_msi(cdev->pdev);
583 	}
584 
585 	memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param));
586 }
587 
588 static int qed_enable_msix(struct qed_dev *cdev,
589 			   struct qed_int_params *int_params)
590 {
591 	int i, rc, cnt;
592 
593 	cnt = int_params->in.num_vectors;
594 
595 	for (i = 0; i < cnt; i++)
596 		int_params->msix_table[i].entry = i;
597 
598 	rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table,
599 				   int_params->in.min_msix_cnt, cnt);
600 	if (rc < cnt && rc >= int_params->in.min_msix_cnt &&
601 	    (rc % cdev->num_hwfns)) {
602 		pci_disable_msix(cdev->pdev);
603 
604 		/* If fastpath is initialized, we need at least one interrupt
605 		 * per hwfn [and the slow path interrupts]. New requested number
606 		 * should be a multiple of the number of hwfns.
607 		 */
608 		cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns;
609 		DP_NOTICE(cdev,
610 			  "Trying to enable MSI-X with less vectors (%d out of %d)\n",
611 			  cnt, int_params->in.num_vectors);
612 		rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table,
613 					   cnt);
614 		if (!rc)
615 			rc = cnt;
616 	}
617 
618 	/* For VFs, we should return with an error in case we didn't get the
619 	 * exact number of msix vectors as we requested.
620 	 * Not doing that will lead to a crash when starting queues for
621 	 * this VF.
622 	 */
623 	if ((IS_PF(cdev) && rc > 0) || (IS_VF(cdev) && rc == cnt)) {
624 		/* MSI-x configuration was achieved */
625 		int_params->out.int_mode = QED_INT_MODE_MSIX;
626 		int_params->out.num_vectors = rc;
627 		rc = 0;
628 	} else {
629 		DP_NOTICE(cdev,
630 			  "Failed to enable MSI-X [Requested %d vectors][rc %d]\n",
631 			  cnt, rc);
632 	}
633 
634 	return rc;
635 }
636 
637 /* This function outputs the int mode and the number of enabled msix vector */
638 static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
639 {
640 	struct qed_int_params *int_params = &cdev->int_params;
641 	struct msix_entry *tbl;
642 	int rc = 0, cnt;
643 
644 	switch (int_params->in.int_mode) {
645 	case QED_INT_MODE_MSIX:
646 		/* Allocate MSIX table */
647 		cnt = int_params->in.num_vectors;
648 		int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL);
649 		if (!int_params->msix_table) {
650 			rc = -ENOMEM;
651 			goto out;
652 		}
653 
654 		/* Enable MSIX */
655 		rc = qed_enable_msix(cdev, int_params);
656 		if (!rc)
657 			goto out;
658 
659 		DP_NOTICE(cdev, "Failed to enable MSI-X\n");
660 		kfree(int_params->msix_table);
661 		if (force_mode)
662 			goto out;
663 		fallthrough;
664 
665 	case QED_INT_MODE_MSI:
666 		if (cdev->num_hwfns == 1) {
667 			rc = pci_enable_msi(cdev->pdev);
668 			if (!rc) {
669 				int_params->out.int_mode = QED_INT_MODE_MSI;
670 				goto out;
671 			}
672 
673 			DP_NOTICE(cdev, "Failed to enable MSI\n");
674 			if (force_mode)
675 				goto out;
676 		}
677 		fallthrough;
678 
679 	case QED_INT_MODE_INTA:
680 			int_params->out.int_mode = QED_INT_MODE_INTA;
681 			rc = 0;
682 			goto out;
683 	default:
684 		DP_NOTICE(cdev, "Unknown int_mode value %d\n",
685 			  int_params->in.int_mode);
686 		rc = -EINVAL;
687 	}
688 
689 out:
690 	if (!rc)
691 		DP_INFO(cdev, "Using %s interrupts\n",
692 			int_params->out.int_mode == QED_INT_MODE_INTA ?
693 			"INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ?
694 			"MSI" : "MSIX");
695 	cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE;
696 
697 	return rc;
698 }
699 
700 static void qed_simd_handler_config(struct qed_dev *cdev, void *token,
701 				    int index, void(*handler)(void *))
702 {
703 	struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
704 	int relative_idx = index / cdev->num_hwfns;
705 
706 	hwfn->simd_proto_handler[relative_idx].func = handler;
707 	hwfn->simd_proto_handler[relative_idx].token = token;
708 }
709 
710 static void qed_simd_handler_clean(struct qed_dev *cdev, int index)
711 {
712 	struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
713 	int relative_idx = index / cdev->num_hwfns;
714 
715 	memset(&hwfn->simd_proto_handler[relative_idx], 0,
716 	       sizeof(struct qed_simd_fp_handler));
717 }
718 
719 static irqreturn_t qed_msix_sp_int(int irq, void *tasklet)
720 {
721 	tasklet_schedule((struct tasklet_struct *)tasklet);
722 	return IRQ_HANDLED;
723 }
724 
725 static irqreturn_t qed_single_int(int irq, void *dev_instance)
726 {
727 	struct qed_dev *cdev = (struct qed_dev *)dev_instance;
728 	struct qed_hwfn *hwfn;
729 	irqreturn_t rc = IRQ_NONE;
730 	u64 status;
731 	int i, j;
732 
733 	for (i = 0; i < cdev->num_hwfns; i++) {
734 		status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]);
735 
736 		if (!status)
737 			continue;
738 
739 		hwfn = &cdev->hwfns[i];
740 
741 		/* Slowpath interrupt */
742 		if (unlikely(status & 0x1)) {
743 			tasklet_schedule(&hwfn->sp_dpc);
744 			status &= ~0x1;
745 			rc = IRQ_HANDLED;
746 		}
747 
748 		/* Fastpath interrupts */
749 		for (j = 0; j < 64; j++) {
750 			if ((0x2ULL << j) & status) {
751 				struct qed_simd_fp_handler *p_handler =
752 					&hwfn->simd_proto_handler[j];
753 
754 				if (p_handler->func)
755 					p_handler->func(p_handler->token);
756 				else
757 					DP_NOTICE(hwfn,
758 						  "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n",
759 						  j, status);
760 
761 				status &= ~(0x2ULL << j);
762 				rc = IRQ_HANDLED;
763 			}
764 		}
765 
766 		if (unlikely(status))
767 			DP_VERBOSE(hwfn, NETIF_MSG_INTR,
768 				   "got an unknown interrupt status 0x%llx\n",
769 				   status);
770 	}
771 
772 	return rc;
773 }
774 
775 int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
776 {
777 	struct qed_dev *cdev = hwfn->cdev;
778 	u32 int_mode;
779 	int rc = 0;
780 	u8 id;
781 
782 	int_mode = cdev->int_params.out.int_mode;
783 	if (int_mode == QED_INT_MODE_MSIX) {
784 		id = hwfn->my_id;
785 		snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
786 			 id, cdev->pdev->bus->number,
787 			 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
788 		rc = request_irq(cdev->int_params.msix_table[id].vector,
789 				 qed_msix_sp_int, 0, hwfn->name, &hwfn->sp_dpc);
790 	} else {
791 		unsigned long flags = 0;
792 
793 		snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x",
794 			 cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn),
795 			 PCI_FUNC(cdev->pdev->devfn));
796 
797 		if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA)
798 			flags |= IRQF_SHARED;
799 
800 		rc = request_irq(cdev->pdev->irq, qed_single_int,
801 				 flags, cdev->name, cdev);
802 	}
803 
804 	if (rc)
805 		DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc);
806 	else
807 		DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
808 			   "Requested slowpath %s\n",
809 			   (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ");
810 
811 	return rc;
812 }
813 
814 static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn)
815 {
816 	/* Calling the disable function will make sure that any
817 	 * currently-running function is completed. The following call to the
818 	 * enable function makes this sequence a flush-like operation.
819 	 */
820 	if (p_hwfn->b_sp_dpc_enabled) {
821 		tasklet_disable(&p_hwfn->sp_dpc);
822 		tasklet_enable(&p_hwfn->sp_dpc);
823 	}
824 }
825 
826 void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn)
827 {
828 	struct qed_dev *cdev = p_hwfn->cdev;
829 	u8 id = p_hwfn->my_id;
830 	u32 int_mode;
831 
832 	int_mode = cdev->int_params.out.int_mode;
833 	if (int_mode == QED_INT_MODE_MSIX)
834 		synchronize_irq(cdev->int_params.msix_table[id].vector);
835 	else
836 		synchronize_irq(cdev->pdev->irq);
837 
838 	qed_slowpath_tasklet_flush(p_hwfn);
839 }
840 
841 static void qed_slowpath_irq_free(struct qed_dev *cdev)
842 {
843 	int i;
844 
845 	if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
846 		for_each_hwfn(cdev, i) {
847 			if (!cdev->hwfns[i].b_int_requested)
848 				break;
849 			synchronize_irq(cdev->int_params.msix_table[i].vector);
850 			free_irq(cdev->int_params.msix_table[i].vector,
851 				 &cdev->hwfns[i].sp_dpc);
852 		}
853 	} else {
854 		if (QED_LEADING_HWFN(cdev)->b_int_requested)
855 			free_irq(cdev->pdev->irq, cdev);
856 	}
857 	qed_int_disable_post_isr_release(cdev);
858 }
859 
860 static int qed_nic_stop(struct qed_dev *cdev)
861 {
862 	int i, rc;
863 
864 	rc = qed_hw_stop(cdev);
865 
866 	for (i = 0; i < cdev->num_hwfns; i++) {
867 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
868 
869 		if (p_hwfn->b_sp_dpc_enabled) {
870 			tasklet_disable(&p_hwfn->sp_dpc);
871 			p_hwfn->b_sp_dpc_enabled = false;
872 			DP_VERBOSE(cdev, NETIF_MSG_IFDOWN,
873 				   "Disabled sp tasklet [hwfn %d] at %p\n",
874 				   i, &p_hwfn->sp_dpc);
875 		}
876 	}
877 
878 	qed_dbg_pf_exit(cdev);
879 
880 	return rc;
881 }
882 
883 static int qed_nic_setup(struct qed_dev *cdev)
884 {
885 	int rc, i;
886 
887 	/* Determine if interface is going to require LL2 */
888 	if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) {
889 		for (i = 0; i < cdev->num_hwfns; i++) {
890 			struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
891 
892 			p_hwfn->using_ll2 = true;
893 		}
894 	}
895 
896 	rc = qed_resc_alloc(cdev);
897 	if (rc)
898 		return rc;
899 
900 	DP_INFO(cdev, "Allocated qed resources\n");
901 
902 	qed_resc_setup(cdev);
903 
904 	return rc;
905 }
906 
907 static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt)
908 {
909 	int limit = 0;
910 
911 	/* Mark the fastpath as free/used */
912 	cdev->int_params.fp_initialized = cnt ? true : false;
913 
914 	if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX)
915 		limit = cdev->num_hwfns * 63;
916 	else if (cdev->int_params.fp_msix_cnt)
917 		limit = cdev->int_params.fp_msix_cnt;
918 
919 	if (!limit)
920 		return -ENOMEM;
921 
922 	return min_t(int, cnt, limit);
923 }
924 
925 static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info)
926 {
927 	memset(info, 0, sizeof(struct qed_int_info));
928 
929 	if (!cdev->int_params.fp_initialized) {
930 		DP_INFO(cdev,
931 			"Protocol driver requested interrupt information, but its support is not yet configured\n");
932 		return -EINVAL;
933 	}
934 
935 	/* Need to expose only MSI-X information; Single IRQ is handled solely
936 	 * by qed.
937 	 */
938 	if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
939 		int msix_base = cdev->int_params.fp_msix_base;
940 
941 		info->msix_cnt = cdev->int_params.fp_msix_cnt;
942 		info->msix = &cdev->int_params.msix_table[msix_base];
943 	}
944 
945 	return 0;
946 }
947 
948 static int qed_slowpath_setup_int(struct qed_dev *cdev,
949 				  enum qed_int_mode int_mode)
950 {
951 	struct qed_sb_cnt_info sb_cnt_info;
952 	int num_l2_queues = 0;
953 	int rc;
954 	int i;
955 
956 	if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
957 		DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
958 		return -EINVAL;
959 	}
960 
961 	memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
962 	cdev->int_params.in.int_mode = int_mode;
963 	for_each_hwfn(cdev, i) {
964 		memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
965 		qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info);
966 		cdev->int_params.in.num_vectors += sb_cnt_info.cnt;
967 		cdev->int_params.in.num_vectors++; /* slowpath */
968 	}
969 
970 	/* We want a minimum of one slowpath and one fastpath vector per hwfn */
971 	cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
972 
973 	if (is_kdump_kernel()) {
974 		DP_INFO(cdev,
975 			"Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
976 			cdev->int_params.in.min_msix_cnt);
977 		cdev->int_params.in.num_vectors =
978 			cdev->int_params.in.min_msix_cnt;
979 	}
980 
981 	rc = qed_set_int_mode(cdev, false);
982 	if (rc)  {
983 		DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
984 		return rc;
985 	}
986 
987 	cdev->int_params.fp_msix_base = cdev->num_hwfns;
988 	cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
989 				       cdev->num_hwfns;
990 
991 	if (!IS_ENABLED(CONFIG_QED_RDMA) ||
992 	    !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev)))
993 		return 0;
994 
995 	for_each_hwfn(cdev, i)
996 		num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
997 
998 	DP_VERBOSE(cdev, QED_MSG_RDMA,
999 		   "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n",
1000 		   cdev->int_params.fp_msix_cnt, num_l2_queues);
1001 
1002 	if (cdev->int_params.fp_msix_cnt > num_l2_queues) {
1003 		cdev->int_params.rdma_msix_cnt =
1004 			(cdev->int_params.fp_msix_cnt - num_l2_queues)
1005 			/ cdev->num_hwfns;
1006 		cdev->int_params.rdma_msix_base =
1007 			cdev->int_params.fp_msix_base + num_l2_queues;
1008 		cdev->int_params.fp_msix_cnt = num_l2_queues;
1009 	} else {
1010 		cdev->int_params.rdma_msix_cnt = 0;
1011 	}
1012 
1013 	DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n",
1014 		   cdev->int_params.rdma_msix_cnt,
1015 		   cdev->int_params.rdma_msix_base);
1016 
1017 	return 0;
1018 }
1019 
1020 static int qed_slowpath_vf_setup_int(struct qed_dev *cdev)
1021 {
1022 	int rc;
1023 
1024 	memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
1025 	cdev->int_params.in.int_mode = QED_INT_MODE_MSIX;
1026 
1027 	qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev),
1028 			    &cdev->int_params.in.num_vectors);
1029 	if (cdev->num_hwfns > 1) {
1030 		u8 vectors = 0;
1031 
1032 		qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors);
1033 		cdev->int_params.in.num_vectors += vectors;
1034 	}
1035 
1036 	/* We want a minimum of one fastpath vector per vf hwfn */
1037 	cdev->int_params.in.min_msix_cnt = cdev->num_hwfns;
1038 
1039 	rc = qed_set_int_mode(cdev, true);
1040 	if (rc)
1041 		return rc;
1042 
1043 	cdev->int_params.fp_msix_base = 0;
1044 	cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors;
1045 
1046 	return 0;
1047 }
1048 
1049 u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
1050 		   u8 *input_buf, u32 max_size, u8 *unzip_buf)
1051 {
1052 	int rc;
1053 
1054 	p_hwfn->stream->next_in = input_buf;
1055 	p_hwfn->stream->avail_in = input_len;
1056 	p_hwfn->stream->next_out = unzip_buf;
1057 	p_hwfn->stream->avail_out = max_size;
1058 
1059 	rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS);
1060 
1061 	if (rc != Z_OK) {
1062 		DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n",
1063 			   rc);
1064 		return 0;
1065 	}
1066 
1067 	rc = zlib_inflate(p_hwfn->stream, Z_FINISH);
1068 	zlib_inflateEnd(p_hwfn->stream);
1069 
1070 	if (rc != Z_OK && rc != Z_STREAM_END) {
1071 		DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n",
1072 			   p_hwfn->stream->msg, rc);
1073 		return 0;
1074 	}
1075 
1076 	return p_hwfn->stream->total_out / 4;
1077 }
1078 
1079 static int qed_alloc_stream_mem(struct qed_dev *cdev)
1080 {
1081 	int i;
1082 	void *workspace;
1083 
1084 	for_each_hwfn(cdev, i) {
1085 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1086 
1087 		p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL);
1088 		if (!p_hwfn->stream)
1089 			return -ENOMEM;
1090 
1091 		workspace = vzalloc(zlib_inflate_workspacesize());
1092 		if (!workspace)
1093 			return -ENOMEM;
1094 		p_hwfn->stream->workspace = workspace;
1095 	}
1096 
1097 	return 0;
1098 }
1099 
1100 static void qed_free_stream_mem(struct qed_dev *cdev)
1101 {
1102 	int i;
1103 
1104 	for_each_hwfn(cdev, i) {
1105 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1106 
1107 		if (!p_hwfn->stream)
1108 			return;
1109 
1110 		vfree(p_hwfn->stream->workspace);
1111 		kfree(p_hwfn->stream);
1112 	}
1113 }
1114 
1115 static void qed_update_pf_params(struct qed_dev *cdev,
1116 				 struct qed_pf_params *params)
1117 {
1118 	int i;
1119 
1120 	if (IS_ENABLED(CONFIG_QED_RDMA)) {
1121 		params->rdma_pf_params.num_qps = QED_ROCE_QPS;
1122 		params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
1123 		params->rdma_pf_params.num_srqs = QED_RDMA_SRQS;
1124 		/* divide by 3 the MRs to avoid MF ILT overflow */
1125 		params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
1126 	}
1127 
1128 	if (cdev->num_hwfns > 1 || IS_VF(cdev))
1129 		params->eth_pf_params.num_arfs_filters = 0;
1130 
1131 	/* In case we might support RDMA, don't allow qede to be greedy
1132 	 * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp]
1133 	 * per hwfn.
1134 	 */
1135 	if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) {
1136 		u16 *num_cons;
1137 
1138 		num_cons = &params->eth_pf_params.num_cons;
1139 		*num_cons = min_t(u16, *num_cons, QED_MAX_L2_CONS);
1140 	}
1141 
1142 	for (i = 0; i < cdev->num_hwfns; i++) {
1143 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1144 
1145 		p_hwfn->pf_params = *params;
1146 	}
1147 }
1148 
1149 #define QED_PERIODIC_DB_REC_COUNT		10
1150 #define QED_PERIODIC_DB_REC_INTERVAL_MS		100
1151 #define QED_PERIODIC_DB_REC_INTERVAL \
1152 	msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS)
1153 
1154 static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn,
1155 				     enum qed_slowpath_wq_flag wq_flag,
1156 				     unsigned long delay)
1157 {
1158 	if (!hwfn->slowpath_wq_active)
1159 		return -EINVAL;
1160 
1161 	/* Memory barrier for setting atomic bit */
1162 	smp_mb__before_atomic();
1163 	set_bit(wq_flag, &hwfn->slowpath_task_flags);
1164 	smp_mb__after_atomic();
1165 	queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay);
1166 
1167 	return 0;
1168 }
1169 
1170 void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn)
1171 {
1172 	/* Reset periodic Doorbell Recovery counter */
1173 	p_hwfn->periodic_db_rec_count = QED_PERIODIC_DB_REC_COUNT;
1174 
1175 	/* Don't schedule periodic Doorbell Recovery if already scheduled */
1176 	if (test_bit(QED_SLOWPATH_PERIODIC_DB_REC,
1177 		     &p_hwfn->slowpath_task_flags))
1178 		return;
1179 
1180 	qed_slowpath_delayed_work(p_hwfn, QED_SLOWPATH_PERIODIC_DB_REC,
1181 				  QED_PERIODIC_DB_REC_INTERVAL);
1182 }
1183 
1184 static void qed_slowpath_wq_stop(struct qed_dev *cdev)
1185 {
1186 	int i;
1187 
1188 	if (IS_VF(cdev))
1189 		return;
1190 
1191 	for_each_hwfn(cdev, i) {
1192 		if (!cdev->hwfns[i].slowpath_wq)
1193 			continue;
1194 
1195 		/* Stop queuing new delayed works */
1196 		cdev->hwfns[i].slowpath_wq_active = false;
1197 
1198 		cancel_delayed_work(&cdev->hwfns[i].slowpath_task);
1199 		destroy_workqueue(cdev->hwfns[i].slowpath_wq);
1200 	}
1201 }
1202 
1203 static void qed_slowpath_task(struct work_struct *work)
1204 {
1205 	struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
1206 					     slowpath_task.work);
1207 	struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
1208 
1209 	if (!ptt) {
1210 		if (hwfn->slowpath_wq_active)
1211 			queue_delayed_work(hwfn->slowpath_wq,
1212 					   &hwfn->slowpath_task, 0);
1213 
1214 		return;
1215 	}
1216 
1217 	if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ,
1218 			       &hwfn->slowpath_task_flags))
1219 		qed_mfw_process_tlv_req(hwfn, ptt);
1220 
1221 	if (test_and_clear_bit(QED_SLOWPATH_PERIODIC_DB_REC,
1222 			       &hwfn->slowpath_task_flags)) {
1223 		/* skip qed_db_rec_handler during recovery/unload */
1224 		if (hwfn->cdev->recov_in_prog || !hwfn->slowpath_wq_active)
1225 			goto out;
1226 
1227 		qed_db_rec_handler(hwfn, ptt);
1228 		if (hwfn->periodic_db_rec_count--)
1229 			qed_slowpath_delayed_work(hwfn,
1230 						  QED_SLOWPATH_PERIODIC_DB_REC,
1231 						  QED_PERIODIC_DB_REC_INTERVAL);
1232 	}
1233 
1234 out:
1235 	qed_ptt_release(hwfn, ptt);
1236 }
1237 
1238 static int qed_slowpath_wq_start(struct qed_dev *cdev)
1239 {
1240 	struct qed_hwfn *hwfn;
1241 	char name[NAME_SIZE];
1242 	int i;
1243 
1244 	if (IS_VF(cdev))
1245 		return 0;
1246 
1247 	for_each_hwfn(cdev, i) {
1248 		hwfn = &cdev->hwfns[i];
1249 
1250 		snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x",
1251 			 cdev->pdev->bus->number,
1252 			 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
1253 
1254 		hwfn->slowpath_wq = alloc_workqueue(name, 0, 0);
1255 		if (!hwfn->slowpath_wq) {
1256 			DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n");
1257 			return -ENOMEM;
1258 		}
1259 
1260 		INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task);
1261 		hwfn->slowpath_wq_active = true;
1262 	}
1263 
1264 	return 0;
1265 }
1266 
1267 static int qed_slowpath_start(struct qed_dev *cdev,
1268 			      struct qed_slowpath_params *params)
1269 {
1270 	struct qed_drv_load_params drv_load_params;
1271 	struct qed_hw_init_params hw_init_params;
1272 	struct qed_mcp_drv_version drv_version;
1273 	struct qed_tunnel_info tunn_info;
1274 	const u8 *data = NULL;
1275 	struct qed_hwfn *hwfn;
1276 	struct qed_ptt *p_ptt;
1277 	int rc = -EINVAL;
1278 
1279 	if (qed_iov_wq_start(cdev))
1280 		goto err;
1281 
1282 	if (qed_slowpath_wq_start(cdev))
1283 		goto err;
1284 
1285 	if (IS_PF(cdev)) {
1286 		rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
1287 				      &cdev->pdev->dev);
1288 		if (rc) {
1289 			DP_NOTICE(cdev,
1290 				  "Failed to find fw file - /lib/firmware/%s\n",
1291 				  QED_FW_FILE_NAME);
1292 			goto err;
1293 		}
1294 
1295 		if (cdev->num_hwfns == 1) {
1296 			p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
1297 			if (p_ptt) {
1298 				QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt;
1299 			} else {
1300 				DP_NOTICE(cdev,
1301 					  "Failed to acquire PTT for aRFS\n");
1302 				goto err;
1303 			}
1304 		}
1305 	}
1306 
1307 	cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
1308 	rc = qed_nic_setup(cdev);
1309 	if (rc)
1310 		goto err;
1311 
1312 	if (IS_PF(cdev))
1313 		rc = qed_slowpath_setup_int(cdev, params->int_mode);
1314 	else
1315 		rc = qed_slowpath_vf_setup_int(cdev);
1316 	if (rc)
1317 		goto err1;
1318 
1319 	if (IS_PF(cdev)) {
1320 		/* Allocate stream for unzipping */
1321 		rc = qed_alloc_stream_mem(cdev);
1322 		if (rc)
1323 			goto err2;
1324 
1325 		/* First Dword used to differentiate between various sources */
1326 		data = cdev->firmware->data + sizeof(u32);
1327 
1328 		qed_dbg_pf_init(cdev);
1329 	}
1330 
1331 	/* Start the slowpath */
1332 	memset(&hw_init_params, 0, sizeof(hw_init_params));
1333 	memset(&tunn_info, 0, sizeof(tunn_info));
1334 	tunn_info.vxlan.b_mode_enabled = true;
1335 	tunn_info.l2_gre.b_mode_enabled = true;
1336 	tunn_info.ip_gre.b_mode_enabled = true;
1337 	tunn_info.l2_geneve.b_mode_enabled = true;
1338 	tunn_info.ip_geneve.b_mode_enabled = true;
1339 	tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1340 	tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1341 	tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1342 	tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1343 	tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1344 	hw_init_params.p_tunn = &tunn_info;
1345 	hw_init_params.b_hw_start = true;
1346 	hw_init_params.int_mode = cdev->int_params.out.int_mode;
1347 	hw_init_params.allow_npar_tx_switch = true;
1348 	hw_init_params.bin_fw_data = data;
1349 
1350 	memset(&drv_load_params, 0, sizeof(drv_load_params));
1351 	drv_load_params.is_crash_kernel = is_kdump_kernel();
1352 	drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT;
1353 	drv_load_params.avoid_eng_reset = false;
1354 	drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE;
1355 	hw_init_params.p_drv_load_params = &drv_load_params;
1356 
1357 	rc = qed_hw_init(cdev, &hw_init_params);
1358 	if (rc)
1359 		goto err2;
1360 
1361 	DP_INFO(cdev,
1362 		"HW initialization and function start completed successfully\n");
1363 
1364 	if (IS_PF(cdev)) {
1365 		cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) |
1366 					   BIT(QED_MODE_L2GENEVE_TUNN) |
1367 					   BIT(QED_MODE_IPGENEVE_TUNN) |
1368 					   BIT(QED_MODE_L2GRE_TUNN) |
1369 					   BIT(QED_MODE_IPGRE_TUNN));
1370 	}
1371 
1372 	/* Allocate LL2 interface if needed */
1373 	if (QED_LEADING_HWFN(cdev)->using_ll2) {
1374 		rc = qed_ll2_alloc_if(cdev);
1375 		if (rc)
1376 			goto err3;
1377 	}
1378 	if (IS_PF(cdev)) {
1379 		hwfn = QED_LEADING_HWFN(cdev);
1380 		drv_version.version = (params->drv_major << 24) |
1381 				      (params->drv_minor << 16) |
1382 				      (params->drv_rev << 8) |
1383 				      (params->drv_eng);
1384 		strlcpy(drv_version.name, params->name,
1385 			MCP_DRV_VER_STR_SIZE - 4);
1386 		rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
1387 					      &drv_version);
1388 		if (rc) {
1389 			DP_NOTICE(cdev, "Failed sending drv version command\n");
1390 			goto err4;
1391 		}
1392 	}
1393 
1394 	qed_reset_vport_stats(cdev);
1395 
1396 	return 0;
1397 
1398 err4:
1399 	qed_ll2_dealloc_if(cdev);
1400 err3:
1401 	qed_hw_stop(cdev);
1402 err2:
1403 	qed_hw_timers_stop_all(cdev);
1404 	if (IS_PF(cdev))
1405 		qed_slowpath_irq_free(cdev);
1406 	qed_free_stream_mem(cdev);
1407 	qed_disable_msix(cdev);
1408 err1:
1409 	qed_resc_free(cdev);
1410 err:
1411 	if (IS_PF(cdev))
1412 		release_firmware(cdev->firmware);
1413 
1414 	if (IS_PF(cdev) && (cdev->num_hwfns == 1) &&
1415 	    QED_LEADING_HWFN(cdev)->p_arfs_ptt)
1416 		qed_ptt_release(QED_LEADING_HWFN(cdev),
1417 				QED_LEADING_HWFN(cdev)->p_arfs_ptt);
1418 
1419 	qed_iov_wq_stop(cdev, false);
1420 
1421 	qed_slowpath_wq_stop(cdev);
1422 
1423 	return rc;
1424 }
1425 
1426 static int qed_slowpath_stop(struct qed_dev *cdev)
1427 {
1428 	if (!cdev)
1429 		return -ENODEV;
1430 
1431 	qed_slowpath_wq_stop(cdev);
1432 
1433 	qed_ll2_dealloc_if(cdev);
1434 
1435 	if (IS_PF(cdev)) {
1436 		if (cdev->num_hwfns == 1)
1437 			qed_ptt_release(QED_LEADING_HWFN(cdev),
1438 					QED_LEADING_HWFN(cdev)->p_arfs_ptt);
1439 		qed_free_stream_mem(cdev);
1440 		if (IS_QED_ETH_IF(cdev))
1441 			qed_sriov_disable(cdev, true);
1442 	}
1443 
1444 	qed_nic_stop(cdev);
1445 
1446 	if (IS_PF(cdev))
1447 		qed_slowpath_irq_free(cdev);
1448 
1449 	qed_disable_msix(cdev);
1450 
1451 	qed_resc_free(cdev);
1452 
1453 	qed_iov_wq_stop(cdev, true);
1454 
1455 	if (IS_PF(cdev))
1456 		release_firmware(cdev->firmware);
1457 
1458 	return 0;
1459 }
1460 
1461 static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE])
1462 {
1463 	int i;
1464 
1465 	memcpy(cdev->name, name, NAME_SIZE);
1466 	for_each_hwfn(cdev, i)
1467 		snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
1468 }
1469 
1470 static u32 qed_sb_init(struct qed_dev *cdev,
1471 		       struct qed_sb_info *sb_info,
1472 		       void *sb_virt_addr,
1473 		       dma_addr_t sb_phy_addr, u16 sb_id,
1474 		       enum qed_sb_type type)
1475 {
1476 	struct qed_hwfn *p_hwfn;
1477 	struct qed_ptt *p_ptt;
1478 	u16 rel_sb_id;
1479 	u32 rc;
1480 
1481 	/* RoCE/Storage use a single engine in CMT mode while L2 uses both */
1482 	if (type == QED_SB_TYPE_L2_QUEUE) {
1483 		p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns];
1484 		rel_sb_id = sb_id / cdev->num_hwfns;
1485 	} else {
1486 		p_hwfn = QED_AFFIN_HWFN(cdev);
1487 		rel_sb_id = sb_id;
1488 	}
1489 
1490 	DP_VERBOSE(cdev, NETIF_MSG_INTR,
1491 		   "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1492 		   IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id);
1493 
1494 	if (IS_PF(p_hwfn->cdev)) {
1495 		p_ptt = qed_ptt_acquire(p_hwfn);
1496 		if (!p_ptt)
1497 			return -EBUSY;
1498 
1499 		rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr,
1500 				     sb_phy_addr, rel_sb_id);
1501 		qed_ptt_release(p_hwfn, p_ptt);
1502 	} else {
1503 		rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr,
1504 				     sb_phy_addr, rel_sb_id);
1505 	}
1506 
1507 	return rc;
1508 }
1509 
1510 static u32 qed_sb_release(struct qed_dev *cdev,
1511 			  struct qed_sb_info *sb_info,
1512 			  u16 sb_id,
1513 			  enum qed_sb_type type)
1514 {
1515 	struct qed_hwfn *p_hwfn;
1516 	u16 rel_sb_id;
1517 	u32 rc;
1518 
1519 	/* RoCE/Storage use a single engine in CMT mode while L2 uses both */
1520 	if (type == QED_SB_TYPE_L2_QUEUE) {
1521 		p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns];
1522 		rel_sb_id = sb_id / cdev->num_hwfns;
1523 	} else {
1524 		p_hwfn = QED_AFFIN_HWFN(cdev);
1525 		rel_sb_id = sb_id;
1526 	}
1527 
1528 	DP_VERBOSE(cdev, NETIF_MSG_INTR,
1529 		   "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1530 		   IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id);
1531 
1532 	rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id);
1533 
1534 	return rc;
1535 }
1536 
1537 static bool qed_can_link_change(struct qed_dev *cdev)
1538 {
1539 	return true;
1540 }
1541 
1542 static void qed_set_ext_speed_params(struct qed_mcp_link_params *link_params,
1543 				     const struct qed_link_params *params)
1544 {
1545 	struct qed_mcp_link_speed_params *ext_speed = &link_params->ext_speed;
1546 	const struct qed_mfw_speed_map *map;
1547 	u32 i;
1548 
1549 	if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
1550 		ext_speed->autoneg = !!params->autoneg;
1551 
1552 	if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
1553 		ext_speed->advertised_speeds = 0;
1554 
1555 		for (i = 0; i < ARRAY_SIZE(qed_mfw_ext_maps); i++) {
1556 			map = qed_mfw_ext_maps + i;
1557 
1558 			if (linkmode_intersects(params->adv_speeds, map->caps))
1559 				ext_speed->advertised_speeds |= map->mfw_val;
1560 		}
1561 	}
1562 
1563 	if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) {
1564 		switch (params->forced_speed) {
1565 		case SPEED_1000:
1566 			ext_speed->forced_speed = QED_EXT_SPEED_1G;
1567 			break;
1568 		case SPEED_10000:
1569 			ext_speed->forced_speed = QED_EXT_SPEED_10G;
1570 			break;
1571 		case SPEED_20000:
1572 			ext_speed->forced_speed = QED_EXT_SPEED_20G;
1573 			break;
1574 		case SPEED_25000:
1575 			ext_speed->forced_speed = QED_EXT_SPEED_25G;
1576 			break;
1577 		case SPEED_40000:
1578 			ext_speed->forced_speed = QED_EXT_SPEED_40G;
1579 			break;
1580 		case SPEED_50000:
1581 			ext_speed->forced_speed = QED_EXT_SPEED_50G_R |
1582 						  QED_EXT_SPEED_50G_R2;
1583 			break;
1584 		case SPEED_100000:
1585 			ext_speed->forced_speed = QED_EXT_SPEED_100G_R2 |
1586 						  QED_EXT_SPEED_100G_R4 |
1587 						  QED_EXT_SPEED_100G_P4;
1588 			break;
1589 		default:
1590 			break;
1591 		}
1592 	}
1593 
1594 	if (!(params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG))
1595 		return;
1596 
1597 	switch (params->forced_speed) {
1598 	case SPEED_25000:
1599 		switch (params->fec) {
1600 		case FEC_FORCE_MODE_NONE:
1601 			link_params->ext_fec_mode = ETH_EXT_FEC_25G_NONE;
1602 			break;
1603 		case FEC_FORCE_MODE_FIRECODE:
1604 			link_params->ext_fec_mode = ETH_EXT_FEC_25G_BASE_R;
1605 			break;
1606 		case FEC_FORCE_MODE_RS:
1607 			link_params->ext_fec_mode = ETH_EXT_FEC_25G_RS528;
1608 			break;
1609 		case FEC_FORCE_MODE_AUTO:
1610 			link_params->ext_fec_mode = ETH_EXT_FEC_25G_RS528 |
1611 						    ETH_EXT_FEC_25G_BASE_R |
1612 						    ETH_EXT_FEC_25G_NONE;
1613 			break;
1614 		default:
1615 			break;
1616 		}
1617 
1618 		break;
1619 	case SPEED_40000:
1620 		switch (params->fec) {
1621 		case FEC_FORCE_MODE_NONE:
1622 			link_params->ext_fec_mode = ETH_EXT_FEC_40G_NONE;
1623 			break;
1624 		case FEC_FORCE_MODE_FIRECODE:
1625 			link_params->ext_fec_mode = ETH_EXT_FEC_40G_BASE_R;
1626 			break;
1627 		case FEC_FORCE_MODE_AUTO:
1628 			link_params->ext_fec_mode = ETH_EXT_FEC_40G_BASE_R |
1629 						    ETH_EXT_FEC_40G_NONE;
1630 			break;
1631 		default:
1632 			break;
1633 		}
1634 
1635 		break;
1636 	case SPEED_50000:
1637 		switch (params->fec) {
1638 		case FEC_FORCE_MODE_NONE:
1639 			link_params->ext_fec_mode = ETH_EXT_FEC_50G_NONE;
1640 			break;
1641 		case FEC_FORCE_MODE_FIRECODE:
1642 			link_params->ext_fec_mode = ETH_EXT_FEC_50G_BASE_R;
1643 			break;
1644 		case FEC_FORCE_MODE_RS:
1645 			link_params->ext_fec_mode = ETH_EXT_FEC_50G_RS528;
1646 			break;
1647 		case FEC_FORCE_MODE_AUTO:
1648 			link_params->ext_fec_mode = ETH_EXT_FEC_50G_RS528 |
1649 						    ETH_EXT_FEC_50G_BASE_R |
1650 						    ETH_EXT_FEC_50G_NONE;
1651 			break;
1652 		default:
1653 			break;
1654 		}
1655 
1656 		break;
1657 	case SPEED_100000:
1658 		switch (params->fec) {
1659 		case FEC_FORCE_MODE_NONE:
1660 			link_params->ext_fec_mode = ETH_EXT_FEC_100G_NONE;
1661 			break;
1662 		case FEC_FORCE_MODE_FIRECODE:
1663 			link_params->ext_fec_mode = ETH_EXT_FEC_100G_BASE_R;
1664 			break;
1665 		case FEC_FORCE_MODE_RS:
1666 			link_params->ext_fec_mode = ETH_EXT_FEC_100G_RS528;
1667 			break;
1668 		case FEC_FORCE_MODE_AUTO:
1669 			link_params->ext_fec_mode = ETH_EXT_FEC_100G_RS528 |
1670 						    ETH_EXT_FEC_100G_BASE_R |
1671 						    ETH_EXT_FEC_100G_NONE;
1672 			break;
1673 		default:
1674 			break;
1675 		}
1676 
1677 		break;
1678 	default:
1679 		break;
1680 	}
1681 }
1682 
1683 static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
1684 {
1685 	struct qed_mcp_link_params *link_params;
1686 	struct qed_mcp_link_speed_params *speed;
1687 	const struct qed_mfw_speed_map *map;
1688 	struct qed_hwfn *hwfn;
1689 	struct qed_ptt *ptt;
1690 	int rc;
1691 	u32 i;
1692 
1693 	if (!cdev)
1694 		return -ENODEV;
1695 
1696 	/* The link should be set only once per PF */
1697 	hwfn = &cdev->hwfns[0];
1698 
1699 	/* When VF wants to set link, force it to read the bulletin instead.
1700 	 * This mimics the PF behavior, where a noitification [both immediate
1701 	 * and possible later] would be generated when changing properties.
1702 	 */
1703 	if (IS_VF(cdev)) {
1704 		qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG);
1705 		return 0;
1706 	}
1707 
1708 	ptt = qed_ptt_acquire(hwfn);
1709 	if (!ptt)
1710 		return -EBUSY;
1711 
1712 	link_params = qed_mcp_get_link_params(hwfn);
1713 	if (!link_params)
1714 		return -ENODATA;
1715 
1716 	speed = &link_params->speed;
1717 
1718 	if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
1719 		speed->autoneg = !!params->autoneg;
1720 
1721 	if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
1722 		speed->advertised_speeds = 0;
1723 
1724 		for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++) {
1725 			map = qed_mfw_legacy_maps + i;
1726 
1727 			if (linkmode_intersects(params->adv_speeds, map->caps))
1728 				speed->advertised_speeds |= map->mfw_val;
1729 		}
1730 	}
1731 
1732 	if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
1733 		speed->forced_speed = params->forced_speed;
1734 
1735 	if (qed_mcp_is_ext_speed_supported(hwfn))
1736 		qed_set_ext_speed_params(link_params, params);
1737 
1738 	if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {
1739 		if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1740 			link_params->pause.autoneg = true;
1741 		else
1742 			link_params->pause.autoneg = false;
1743 		if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE)
1744 			link_params->pause.forced_rx = true;
1745 		else
1746 			link_params->pause.forced_rx = false;
1747 		if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE)
1748 			link_params->pause.forced_tx = true;
1749 		else
1750 			link_params->pause.forced_tx = false;
1751 	}
1752 
1753 	if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) {
1754 		switch (params->loopback_mode) {
1755 		case QED_LINK_LOOPBACK_INT_PHY:
1756 			link_params->loopback_mode = ETH_LOOPBACK_INT_PHY;
1757 			break;
1758 		case QED_LINK_LOOPBACK_EXT_PHY:
1759 			link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY;
1760 			break;
1761 		case QED_LINK_LOOPBACK_EXT:
1762 			link_params->loopback_mode = ETH_LOOPBACK_EXT;
1763 			break;
1764 		case QED_LINK_LOOPBACK_MAC:
1765 			link_params->loopback_mode = ETH_LOOPBACK_MAC;
1766 			break;
1767 		case QED_LINK_LOOPBACK_CNIG_AH_ONLY_0123:
1768 			link_params->loopback_mode =
1769 				ETH_LOOPBACK_CNIG_AH_ONLY_0123;
1770 			break;
1771 		case QED_LINK_LOOPBACK_CNIG_AH_ONLY_2301:
1772 			link_params->loopback_mode =
1773 				ETH_LOOPBACK_CNIG_AH_ONLY_2301;
1774 			break;
1775 		case QED_LINK_LOOPBACK_PCS_AH_ONLY:
1776 			link_params->loopback_mode = ETH_LOOPBACK_PCS_AH_ONLY;
1777 			break;
1778 		case QED_LINK_LOOPBACK_REVERSE_MAC_AH_ONLY:
1779 			link_params->loopback_mode =
1780 				ETH_LOOPBACK_REVERSE_MAC_AH_ONLY;
1781 			break;
1782 		case QED_LINK_LOOPBACK_INT_PHY_FEA_AH_ONLY:
1783 			link_params->loopback_mode =
1784 				ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY;
1785 			break;
1786 		default:
1787 			link_params->loopback_mode = ETH_LOOPBACK_NONE;
1788 			break;
1789 		}
1790 	}
1791 
1792 	if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG)
1793 		memcpy(&link_params->eee, &params->eee,
1794 		       sizeof(link_params->eee));
1795 
1796 	if (params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG)
1797 		link_params->fec = params->fec;
1798 
1799 	rc = qed_mcp_set_link(hwfn, ptt, params->link_up);
1800 
1801 	qed_ptt_release(hwfn, ptt);
1802 
1803 	return rc;
1804 }
1805 
1806 static int qed_get_port_type(u32 media_type)
1807 {
1808 	int port_type;
1809 
1810 	switch (media_type) {
1811 	case MEDIA_SFPP_10G_FIBER:
1812 	case MEDIA_SFP_1G_FIBER:
1813 	case MEDIA_XFP_FIBER:
1814 	case MEDIA_MODULE_FIBER:
1815 		port_type = PORT_FIBRE;
1816 		break;
1817 	case MEDIA_DA_TWINAX:
1818 		port_type = PORT_DA;
1819 		break;
1820 	case MEDIA_BASE_T:
1821 		port_type = PORT_TP;
1822 		break;
1823 	case MEDIA_KR:
1824 	case MEDIA_NOT_PRESENT:
1825 		port_type = PORT_NONE;
1826 		break;
1827 	case MEDIA_UNSPECIFIED:
1828 	default:
1829 		port_type = PORT_OTHER;
1830 		break;
1831 	}
1832 	return port_type;
1833 }
1834 
1835 static int qed_get_link_data(struct qed_hwfn *hwfn,
1836 			     struct qed_mcp_link_params *params,
1837 			     struct qed_mcp_link_state *link,
1838 			     struct qed_mcp_link_capabilities *link_caps)
1839 {
1840 	void *p;
1841 
1842 	if (!IS_PF(hwfn->cdev)) {
1843 		qed_vf_get_link_params(hwfn, params);
1844 		qed_vf_get_link_state(hwfn, link);
1845 		qed_vf_get_link_caps(hwfn, link_caps);
1846 
1847 		return 0;
1848 	}
1849 
1850 	p = qed_mcp_get_link_params(hwfn);
1851 	if (!p)
1852 		return -ENXIO;
1853 	memcpy(params, p, sizeof(*params));
1854 
1855 	p = qed_mcp_get_link_state(hwfn);
1856 	if (!p)
1857 		return -ENXIO;
1858 	memcpy(link, p, sizeof(*link));
1859 
1860 	p = qed_mcp_get_link_capabilities(hwfn);
1861 	if (!p)
1862 		return -ENXIO;
1863 	memcpy(link_caps, p, sizeof(*link_caps));
1864 
1865 	return 0;
1866 }
1867 
1868 static void qed_fill_link_capability(struct qed_hwfn *hwfn,
1869 				     struct qed_ptt *ptt, u32 capability,
1870 				     unsigned long *if_caps)
1871 {
1872 	u32 media_type, tcvr_state, tcvr_type;
1873 	u32 speed_mask, board_cfg;
1874 
1875 	if (qed_mcp_get_media_type(hwfn, ptt, &media_type))
1876 		media_type = MEDIA_UNSPECIFIED;
1877 
1878 	if (qed_mcp_get_transceiver_data(hwfn, ptt, &tcvr_state, &tcvr_type))
1879 		tcvr_type = ETH_TRANSCEIVER_STATE_UNPLUGGED;
1880 
1881 	if (qed_mcp_trans_speed_mask(hwfn, ptt, &speed_mask))
1882 		speed_mask = 0xFFFFFFFF;
1883 
1884 	if (qed_mcp_get_board_config(hwfn, ptt, &board_cfg))
1885 		board_cfg = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
1886 
1887 	DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
1888 		   "Media_type = 0x%x tcvr_state = 0x%x tcvr_type = 0x%x speed_mask = 0x%x board_cfg = 0x%x\n",
1889 		   media_type, tcvr_state, tcvr_type, speed_mask, board_cfg);
1890 
1891 	switch (media_type) {
1892 	case MEDIA_DA_TWINAX:
1893 		phylink_set(if_caps, FIBRE);
1894 
1895 		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
1896 			phylink_set(if_caps, 20000baseKR2_Full);
1897 
1898 		/* For DAC media multiple speed capabilities are supported */
1899 		capability |= speed_mask;
1900 
1901 		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1902 			phylink_set(if_caps, 1000baseKX_Full);
1903 		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1904 			phylink_set(if_caps, 10000baseCR_Full);
1905 
1906 		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1907 			switch (tcvr_type) {
1908 			case ETH_TRANSCEIVER_TYPE_40G_CR4:
1909 			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR:
1910 			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
1911 				phylink_set(if_caps, 40000baseCR4_Full);
1912 				break;
1913 			default:
1914 				break;
1915 			}
1916 
1917 		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1918 			phylink_set(if_caps, 25000baseCR_Full);
1919 		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1920 			phylink_set(if_caps, 50000baseCR2_Full);
1921 
1922 		if (capability &
1923 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1924 			switch (tcvr_type) {
1925 			case ETH_TRANSCEIVER_TYPE_100G_CR4:
1926 			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
1927 				phylink_set(if_caps, 100000baseCR4_Full);
1928 				break;
1929 			default:
1930 				break;
1931 			}
1932 
1933 		break;
1934 	case MEDIA_BASE_T:
1935 		phylink_set(if_caps, TP);
1936 
1937 		if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) {
1938 			if (capability &
1939 			    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1940 				phylink_set(if_caps, 1000baseT_Full);
1941 			if (capability &
1942 			    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1943 				phylink_set(if_caps, 10000baseT_Full);
1944 		}
1945 
1946 		if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) {
1947 			phylink_set(if_caps, FIBRE);
1948 
1949 			switch (tcvr_type) {
1950 			case ETH_TRANSCEIVER_TYPE_1000BASET:
1951 				phylink_set(if_caps, 1000baseT_Full);
1952 				break;
1953 			case ETH_TRANSCEIVER_TYPE_10G_BASET:
1954 				phylink_set(if_caps, 10000baseT_Full);
1955 				break;
1956 			default:
1957 				break;
1958 			}
1959 		}
1960 
1961 		break;
1962 	case MEDIA_SFP_1G_FIBER:
1963 	case MEDIA_SFPP_10G_FIBER:
1964 	case MEDIA_XFP_FIBER:
1965 	case MEDIA_MODULE_FIBER:
1966 		phylink_set(if_caps, FIBRE);
1967 		capability |= speed_mask;
1968 
1969 		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1970 			switch (tcvr_type) {
1971 			case ETH_TRANSCEIVER_TYPE_1G_LX:
1972 			case ETH_TRANSCEIVER_TYPE_1G_SX:
1973 			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR:
1974 			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR:
1975 				phylink_set(if_caps, 1000baseKX_Full);
1976 				break;
1977 			default:
1978 				break;
1979 			}
1980 
1981 		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1982 			switch (tcvr_type) {
1983 			case ETH_TRANSCEIVER_TYPE_10G_SR:
1984 			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
1985 			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR:
1986 			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR:
1987 				phylink_set(if_caps, 10000baseSR_Full);
1988 				break;
1989 			case ETH_TRANSCEIVER_TYPE_10G_LR:
1990 			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
1991 			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR:
1992 			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR:
1993 				phylink_set(if_caps, 10000baseLR_Full);
1994 				break;
1995 			case ETH_TRANSCEIVER_TYPE_10G_LRM:
1996 				phylink_set(if_caps, 10000baseLRM_Full);
1997 				break;
1998 			case ETH_TRANSCEIVER_TYPE_10G_ER:
1999 				phylink_set(if_caps, 10000baseR_FEC);
2000 				break;
2001 			default:
2002 				break;
2003 			}
2004 
2005 		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
2006 			phylink_set(if_caps, 20000baseKR2_Full);
2007 
2008 		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
2009 			switch (tcvr_type) {
2010 			case ETH_TRANSCEIVER_TYPE_25G_SR:
2011 			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR:
2012 				phylink_set(if_caps, 25000baseSR_Full);
2013 				break;
2014 			default:
2015 				break;
2016 			}
2017 
2018 		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
2019 			switch (tcvr_type) {
2020 			case ETH_TRANSCEIVER_TYPE_40G_LR4:
2021 			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
2022 			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
2023 				phylink_set(if_caps, 40000baseLR4_Full);
2024 				break;
2025 			case ETH_TRANSCEIVER_TYPE_40G_SR4:
2026 			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
2027 			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
2028 				phylink_set(if_caps, 40000baseSR4_Full);
2029 				break;
2030 			default:
2031 				break;
2032 			}
2033 
2034 		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
2035 			phylink_set(if_caps, 50000baseKR2_Full);
2036 
2037 		if (capability &
2038 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
2039 			switch (tcvr_type) {
2040 			case ETH_TRANSCEIVER_TYPE_100G_SR4:
2041 			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
2042 				phylink_set(if_caps, 100000baseSR4_Full);
2043 				break;
2044 			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
2045 				phylink_set(if_caps, 100000baseLR4_ER4_Full);
2046 				break;
2047 			default:
2048 				break;
2049 			}
2050 
2051 		break;
2052 	case MEDIA_KR:
2053 		phylink_set(if_caps, Backplane);
2054 
2055 		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
2056 			phylink_set(if_caps, 20000baseKR2_Full);
2057 		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
2058 			phylink_set(if_caps, 1000baseKX_Full);
2059 		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
2060 			phylink_set(if_caps, 10000baseKR_Full);
2061 		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
2062 			phylink_set(if_caps, 25000baseKR_Full);
2063 		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
2064 			phylink_set(if_caps, 40000baseKR4_Full);
2065 		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
2066 			phylink_set(if_caps, 50000baseKR2_Full);
2067 		if (capability &
2068 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
2069 			phylink_set(if_caps, 100000baseKR4_Full);
2070 
2071 		break;
2072 	case MEDIA_UNSPECIFIED:
2073 	case MEDIA_NOT_PRESENT:
2074 	default:
2075 		DP_VERBOSE(hwfn->cdev, QED_MSG_DEBUG,
2076 			   "Unknown media and transceiver type;\n");
2077 		break;
2078 	}
2079 }
2080 
2081 static void qed_lp_caps_to_speed_mask(u32 caps, u32 *speed_mask)
2082 {
2083 	*speed_mask = 0;
2084 
2085 	if (caps &
2086 	    (QED_LINK_PARTNER_SPEED_1G_FD | QED_LINK_PARTNER_SPEED_1G_HD))
2087 		*speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2088 	if (caps & QED_LINK_PARTNER_SPEED_10G)
2089 		*speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2090 	if (caps & QED_LINK_PARTNER_SPEED_20G)
2091 		*speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G;
2092 	if (caps & QED_LINK_PARTNER_SPEED_25G)
2093 		*speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2094 	if (caps & QED_LINK_PARTNER_SPEED_40G)
2095 		*speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
2096 	if (caps & QED_LINK_PARTNER_SPEED_50G)
2097 		*speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
2098 	if (caps & QED_LINK_PARTNER_SPEED_100G)
2099 		*speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G;
2100 }
2101 
2102 static void qed_fill_link(struct qed_hwfn *hwfn,
2103 			  struct qed_ptt *ptt,
2104 			  struct qed_link_output *if_link)
2105 {
2106 	struct qed_mcp_link_capabilities link_caps;
2107 	struct qed_mcp_link_params params;
2108 	struct qed_mcp_link_state link;
2109 	u32 media_type, speed_mask;
2110 
2111 	memset(if_link, 0, sizeof(*if_link));
2112 
2113 	/* Prepare source inputs */
2114 	if (qed_get_link_data(hwfn, &params, &link, &link_caps)) {
2115 		dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n");
2116 		return;
2117 	}
2118 
2119 	/* Set the link parameters to pass to protocol driver */
2120 	if (link.link_up)
2121 		if_link->link_up = true;
2122 
2123 	if (IS_PF(hwfn->cdev) && qed_mcp_is_ext_speed_supported(hwfn)) {
2124 		if (link_caps.default_ext_autoneg)
2125 			phylink_set(if_link->supported_caps, Autoneg);
2126 
2127 		linkmode_copy(if_link->advertised_caps, if_link->supported_caps);
2128 
2129 		if (params.ext_speed.autoneg)
2130 			phylink_set(if_link->advertised_caps, Autoneg);
2131 		else
2132 			phylink_clear(if_link->advertised_caps, Autoneg);
2133 
2134 		qed_fill_link_capability(hwfn, ptt,
2135 					 params.ext_speed.advertised_speeds,
2136 					 if_link->advertised_caps);
2137 	} else {
2138 		if (link_caps.default_speed_autoneg)
2139 			phylink_set(if_link->supported_caps, Autoneg);
2140 
2141 		linkmode_copy(if_link->advertised_caps, if_link->supported_caps);
2142 
2143 		if (params.speed.autoneg)
2144 			phylink_set(if_link->advertised_caps, Autoneg);
2145 		else
2146 			phylink_clear(if_link->advertised_caps, Autoneg);
2147 	}
2148 
2149 	if (params.pause.autoneg ||
2150 	    (params.pause.forced_rx && params.pause.forced_tx))
2151 		phylink_set(if_link->supported_caps, Asym_Pause);
2152 	if (params.pause.autoneg || params.pause.forced_rx ||
2153 	    params.pause.forced_tx)
2154 		phylink_set(if_link->supported_caps, Pause);
2155 
2156 	if_link->sup_fec = link_caps.fec_default;
2157 	if_link->active_fec = params.fec;
2158 
2159 	/* Fill link advertised capability */
2160 	qed_fill_link_capability(hwfn, ptt, params.speed.advertised_speeds,
2161 				 if_link->advertised_caps);
2162 
2163 	/* Fill link supported capability */
2164 	qed_fill_link_capability(hwfn, ptt, link_caps.speed_capabilities,
2165 				 if_link->supported_caps);
2166 
2167 	/* Fill partner advertised capability */
2168 	qed_lp_caps_to_speed_mask(link.partner_adv_speed, &speed_mask);
2169 	qed_fill_link_capability(hwfn, ptt, speed_mask, if_link->lp_caps);
2170 
2171 	if (link.link_up)
2172 		if_link->speed = link.speed;
2173 
2174 	/* TODO - fill duplex properly */
2175 	if_link->duplex = DUPLEX_FULL;
2176 	qed_mcp_get_media_type(hwfn, ptt, &media_type);
2177 	if_link->port = qed_get_port_type(media_type);
2178 
2179 	if_link->autoneg = params.speed.autoneg;
2180 
2181 	if (params.pause.autoneg)
2182 		if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
2183 	if (params.pause.forced_rx)
2184 		if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE;
2185 	if (params.pause.forced_tx)
2186 		if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
2187 
2188 	if (link.an_complete)
2189 		phylink_set(if_link->lp_caps, Autoneg);
2190 	if (link.partner_adv_pause)
2191 		phylink_set(if_link->lp_caps, Pause);
2192 	if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE ||
2193 	    link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE)
2194 		phylink_set(if_link->lp_caps, Asym_Pause);
2195 
2196 	if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) {
2197 		if_link->eee_supported = false;
2198 	} else {
2199 		if_link->eee_supported = true;
2200 		if_link->eee_active = link.eee_active;
2201 		if_link->sup_caps = link_caps.eee_speed_caps;
2202 		/* MFW clears adv_caps on eee disable; use configured value */
2203 		if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps :
2204 					params.eee.adv_caps;
2205 		if_link->eee.lp_adv_caps = link.eee_lp_adv_caps;
2206 		if_link->eee.enable = params.eee.enable;
2207 		if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable;
2208 		if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer;
2209 	}
2210 }
2211 
2212 static void qed_get_current_link(struct qed_dev *cdev,
2213 				 struct qed_link_output *if_link)
2214 {
2215 	struct qed_hwfn *hwfn;
2216 	struct qed_ptt *ptt;
2217 	int i;
2218 
2219 	hwfn = &cdev->hwfns[0];
2220 	if (IS_PF(cdev)) {
2221 		ptt = qed_ptt_acquire(hwfn);
2222 		if (ptt) {
2223 			qed_fill_link(hwfn, ptt, if_link);
2224 			qed_ptt_release(hwfn, ptt);
2225 		} else {
2226 			DP_NOTICE(hwfn, "Failed to fill link; No PTT\n");
2227 		}
2228 	} else {
2229 		qed_fill_link(hwfn, NULL, if_link);
2230 	}
2231 
2232 	for_each_hwfn(cdev, i)
2233 		qed_inform_vf_link_state(&cdev->hwfns[i]);
2234 }
2235 
2236 void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt)
2237 {
2238 	void *cookie = hwfn->cdev->ops_cookie;
2239 	struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
2240 	struct qed_link_output if_link;
2241 
2242 	qed_fill_link(hwfn, ptt, &if_link);
2243 	qed_inform_vf_link_state(hwfn);
2244 
2245 	if (IS_LEAD_HWFN(hwfn) && cookie)
2246 		op->link_update(cookie, &if_link);
2247 }
2248 
2249 void qed_bw_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt)
2250 {
2251 	void *cookie = hwfn->cdev->ops_cookie;
2252 	struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
2253 
2254 	if (IS_LEAD_HWFN(hwfn) && cookie && op && op->bw_update)
2255 		op->bw_update(cookie);
2256 }
2257 
2258 static int qed_drain(struct qed_dev *cdev)
2259 {
2260 	struct qed_hwfn *hwfn;
2261 	struct qed_ptt *ptt;
2262 	int i, rc;
2263 
2264 	if (IS_VF(cdev))
2265 		return 0;
2266 
2267 	for_each_hwfn(cdev, i) {
2268 		hwfn = &cdev->hwfns[i];
2269 		ptt = qed_ptt_acquire(hwfn);
2270 		if (!ptt) {
2271 			DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n");
2272 			return -EBUSY;
2273 		}
2274 		rc = qed_mcp_drain(hwfn, ptt);
2275 		qed_ptt_release(hwfn, ptt);
2276 		if (rc)
2277 			return rc;
2278 	}
2279 
2280 	return 0;
2281 }
2282 
2283 static u32 qed_nvm_flash_image_access_crc(struct qed_dev *cdev,
2284 					  struct qed_nvm_image_att *nvm_image,
2285 					  u32 *crc)
2286 {
2287 	u8 *buf = NULL;
2288 	int rc;
2289 
2290 	/* Allocate a buffer for holding the nvram image */
2291 	buf = kzalloc(nvm_image->length, GFP_KERNEL);
2292 	if (!buf)
2293 		return -ENOMEM;
2294 
2295 	/* Read image into buffer */
2296 	rc = qed_mcp_nvm_read(cdev, nvm_image->start_addr,
2297 			      buf, nvm_image->length);
2298 	if (rc) {
2299 		DP_ERR(cdev, "Failed reading image from nvm\n");
2300 		goto out;
2301 	}
2302 
2303 	/* Convert the buffer into big-endian format (excluding the
2304 	 * closing 4 bytes of CRC).
2305 	 */
2306 	cpu_to_be32_array((__force __be32 *)buf, (const u32 *)buf,
2307 			  DIV_ROUND_UP(nvm_image->length - 4, 4));
2308 
2309 	/* Calc CRC for the "actual" image buffer, i.e. not including
2310 	 * the last 4 CRC bytes.
2311 	 */
2312 	*crc = ~crc32(~0U, buf, nvm_image->length - 4);
2313 	*crc = (__force u32)cpu_to_be32p(crc);
2314 
2315 out:
2316 	kfree(buf);
2317 
2318 	return rc;
2319 }
2320 
2321 /* Binary file format -
2322  *     /----------------------------------------------------------------------\
2323  * 0B  |                       0x4 [command index]                            |
2324  * 4B  | image_type     | Options        |  Number of register settings       |
2325  * 8B  |                       Value                                          |
2326  * 12B |                       Mask                                           |
2327  * 16B |                       Offset                                         |
2328  *     \----------------------------------------------------------------------/
2329  * There can be several Value-Mask-Offset sets as specified by 'Number of...'.
2330  * Options - 0'b - Calculate & Update CRC for image
2331  */
2332 static int qed_nvm_flash_image_access(struct qed_dev *cdev, const u8 **data,
2333 				      bool *check_resp)
2334 {
2335 	struct qed_nvm_image_att nvm_image;
2336 	struct qed_hwfn *p_hwfn;
2337 	bool is_crc = false;
2338 	u32 image_type;
2339 	int rc = 0, i;
2340 	u16 len;
2341 
2342 	*data += 4;
2343 	image_type = **data;
2344 	p_hwfn = QED_LEADING_HWFN(cdev);
2345 	for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
2346 		if (image_type == p_hwfn->nvm_info.image_att[i].image_type)
2347 			break;
2348 	if (i == p_hwfn->nvm_info.num_images) {
2349 		DP_ERR(cdev, "Failed to find nvram image of type %08x\n",
2350 		       image_type);
2351 		return -ENOENT;
2352 	}
2353 
2354 	nvm_image.start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr;
2355 	nvm_image.length = p_hwfn->nvm_info.image_att[i].len;
2356 
2357 	DP_VERBOSE(cdev, NETIF_MSG_DRV,
2358 		   "Read image %02x; type = %08x; NVM [%08x,...,%08x]\n",
2359 		   **data, image_type, nvm_image.start_addr,
2360 		   nvm_image.start_addr + nvm_image.length - 1);
2361 	(*data)++;
2362 	is_crc = !!(**data & BIT(0));
2363 	(*data)++;
2364 	len = *((u16 *)*data);
2365 	*data += 2;
2366 	if (is_crc) {
2367 		u32 crc = 0;
2368 
2369 		rc = qed_nvm_flash_image_access_crc(cdev, &nvm_image, &crc);
2370 		if (rc) {
2371 			DP_ERR(cdev, "Failed calculating CRC, rc = %d\n", rc);
2372 			goto exit;
2373 		}
2374 
2375 		rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM,
2376 				       (nvm_image.start_addr +
2377 					nvm_image.length - 4), (u8 *)&crc, 4);
2378 		if (rc)
2379 			DP_ERR(cdev, "Failed writing to %08x, rc = %d\n",
2380 			       nvm_image.start_addr + nvm_image.length - 4, rc);
2381 		goto exit;
2382 	}
2383 
2384 	/* Iterate over the values for setting */
2385 	while (len) {
2386 		u32 offset, mask, value, cur_value;
2387 		u8 buf[4];
2388 
2389 		value = *((u32 *)*data);
2390 		*data += 4;
2391 		mask = *((u32 *)*data);
2392 		*data += 4;
2393 		offset = *((u32 *)*data);
2394 		*data += 4;
2395 
2396 		rc = qed_mcp_nvm_read(cdev, nvm_image.start_addr + offset, buf,
2397 				      4);
2398 		if (rc) {
2399 			DP_ERR(cdev, "Failed reading from %08x\n",
2400 			       nvm_image.start_addr + offset);
2401 			goto exit;
2402 		}
2403 
2404 		cur_value = le32_to_cpu(*((__le32 *)buf));
2405 		DP_VERBOSE(cdev, NETIF_MSG_DRV,
2406 			   "NVM %08x: %08x -> %08x [Value %08x Mask %08x]\n",
2407 			   nvm_image.start_addr + offset, cur_value,
2408 			   (cur_value & ~mask) | (value & mask), value, mask);
2409 		value = (value & mask) | (cur_value & ~mask);
2410 		rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM,
2411 				       nvm_image.start_addr + offset,
2412 				       (u8 *)&value, 4);
2413 		if (rc) {
2414 			DP_ERR(cdev, "Failed writing to %08x\n",
2415 			       nvm_image.start_addr + offset);
2416 			goto exit;
2417 		}
2418 
2419 		len--;
2420 	}
2421 exit:
2422 	return rc;
2423 }
2424 
2425 /* Binary file format -
2426  *     /----------------------------------------------------------------------\
2427  * 0B  |                       0x3 [command index]                            |
2428  * 4B  | b'0: check_response?   | b'1-31  reserved                            |
2429  * 8B  | File-type |                   reserved                               |
2430  * 12B |                    Image length in bytes                             |
2431  *     \----------------------------------------------------------------------/
2432  *     Start a new file of the provided type
2433  */
2434 static int qed_nvm_flash_image_file_start(struct qed_dev *cdev,
2435 					  const u8 **data, bool *check_resp)
2436 {
2437 	u32 file_type, file_size = 0;
2438 	int rc;
2439 
2440 	*data += 4;
2441 	*check_resp = !!(**data & BIT(0));
2442 	*data += 4;
2443 	file_type = **data;
2444 
2445 	DP_VERBOSE(cdev, NETIF_MSG_DRV,
2446 		   "About to start a new file of type %02x\n", file_type);
2447 	if (file_type == DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI) {
2448 		*data += 4;
2449 		file_size = *((u32 *)(*data));
2450 	}
2451 
2452 	rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_BEGIN, file_type,
2453 			       (u8 *)(&file_size), 4);
2454 	*data += 4;
2455 
2456 	return rc;
2457 }
2458 
2459 /* Binary file format -
2460  *     /----------------------------------------------------------------------\
2461  * 0B  |                       0x2 [command index]                            |
2462  * 4B  |                       Length in bytes                                |
2463  * 8B  | b'0: check_response?   | b'1-31  reserved                            |
2464  * 12B |                       Offset in bytes                                |
2465  * 16B |                       Data ...                                       |
2466  *     \----------------------------------------------------------------------/
2467  *     Write data as part of a file that was previously started. Data should be
2468  *     of length equal to that provided in the message
2469  */
2470 static int qed_nvm_flash_image_file_data(struct qed_dev *cdev,
2471 					 const u8 **data, bool *check_resp)
2472 {
2473 	u32 offset, len;
2474 	int rc;
2475 
2476 	*data += 4;
2477 	len = *((u32 *)(*data));
2478 	*data += 4;
2479 	*check_resp = !!(**data & BIT(0));
2480 	*data += 4;
2481 	offset = *((u32 *)(*data));
2482 	*data += 4;
2483 
2484 	DP_VERBOSE(cdev, NETIF_MSG_DRV,
2485 		   "About to write File-data: %08x bytes to offset %08x\n",
2486 		   len, offset);
2487 
2488 	rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_DATA, offset,
2489 			       (char *)(*data), len);
2490 	*data += len;
2491 
2492 	return rc;
2493 }
2494 
2495 /* Binary file format [General header] -
2496  *     /----------------------------------------------------------------------\
2497  * 0B  |                       QED_NVM_SIGNATURE                              |
2498  * 4B  |                       Length in bytes                                |
2499  * 8B  | Highest command in this batchfile |          Reserved                |
2500  *     \----------------------------------------------------------------------/
2501  */
2502 static int qed_nvm_flash_image_validate(struct qed_dev *cdev,
2503 					const struct firmware *image,
2504 					const u8 **data)
2505 {
2506 	u32 signature, len;
2507 
2508 	/* Check minimum size */
2509 	if (image->size < 12) {
2510 		DP_ERR(cdev, "Image is too short [%08x]\n", (u32)image->size);
2511 		return -EINVAL;
2512 	}
2513 
2514 	/* Check signature */
2515 	signature = *((u32 *)(*data));
2516 	if (signature != QED_NVM_SIGNATURE) {
2517 		DP_ERR(cdev, "Wrong signature '%08x'\n", signature);
2518 		return -EINVAL;
2519 	}
2520 
2521 	*data += 4;
2522 	/* Validate internal size equals the image-size */
2523 	len = *((u32 *)(*data));
2524 	if (len != image->size) {
2525 		DP_ERR(cdev, "Size mismatch: internal = %08x image = %08x\n",
2526 		       len, (u32)image->size);
2527 		return -EINVAL;
2528 	}
2529 
2530 	*data += 4;
2531 	/* Make sure driver familiar with all commands necessary for this */
2532 	if (*((u16 *)(*data)) >= QED_NVM_FLASH_CMD_NVM_MAX) {
2533 		DP_ERR(cdev, "File contains unsupported commands [Need %04x]\n",
2534 		       *((u16 *)(*data)));
2535 		return -EINVAL;
2536 	}
2537 
2538 	*data += 4;
2539 
2540 	return 0;
2541 }
2542 
2543 /* Binary file format -
2544  *     /----------------------------------------------------------------------\
2545  * 0B  |                       0x5 [command index]                            |
2546  * 4B  | Number of config attributes     |          Reserved                  |
2547  * 4B  | Config ID                       | Entity ID      | Length            |
2548  * 4B  | Value                                                                |
2549  *     |                                                                      |
2550  *     \----------------------------------------------------------------------/
2551  * There can be several cfg_id-entity_id-Length-Value sets as specified by
2552  * 'Number of config attributes'.
2553  *
2554  * The API parses config attributes from the user provided buffer and flashes
2555  * them to the respective NVM path using Management FW inerface.
2556  */
2557 static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data)
2558 {
2559 	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2560 	u8 entity_id, len, buf[32];
2561 	bool need_nvm_init = true;
2562 	struct qed_ptt *ptt;
2563 	u16 cfg_id, count;
2564 	int rc = 0, i;
2565 	u32 flags;
2566 
2567 	ptt = qed_ptt_acquire(hwfn);
2568 	if (!ptt)
2569 		return -EAGAIN;
2570 
2571 	/* NVM CFG ID attribute header */
2572 	*data += 4;
2573 	count = *((u16 *)*data);
2574 	*data += 4;
2575 
2576 	DP_VERBOSE(cdev, NETIF_MSG_DRV,
2577 		   "Read config ids: num_attrs = %0d\n", count);
2578 	/* NVM CFG ID attributes. Start loop index from 1 to avoid additional
2579 	 * arithmetic operations in the implementation.
2580 	 */
2581 	for (i = 1; i <= count; i++) {
2582 		cfg_id = *((u16 *)*data);
2583 		*data += 2;
2584 		entity_id = **data;
2585 		(*data)++;
2586 		len = **data;
2587 		(*data)++;
2588 		memcpy(buf, *data, len);
2589 		*data += len;
2590 
2591 		flags = 0;
2592 		if (need_nvm_init) {
2593 			flags |= QED_NVM_CFG_OPTION_INIT;
2594 			need_nvm_init = false;
2595 		}
2596 
2597 		/* Commit to flash and free the resources */
2598 		if (!(i % QED_NVM_CFG_MAX_ATTRS) || i == count) {
2599 			flags |= QED_NVM_CFG_OPTION_COMMIT |
2600 				 QED_NVM_CFG_OPTION_FREE;
2601 			need_nvm_init = true;
2602 		}
2603 
2604 		if (entity_id)
2605 			flags |= QED_NVM_CFG_OPTION_ENTITY_SEL;
2606 
2607 		DP_VERBOSE(cdev, NETIF_MSG_DRV,
2608 			   "cfg_id = %d entity = %d len = %d\n", cfg_id,
2609 			   entity_id, len);
2610 		rc = qed_mcp_nvm_set_cfg(hwfn, ptt, cfg_id, entity_id, flags,
2611 					 buf, len);
2612 		if (rc) {
2613 			DP_ERR(cdev, "Error %d configuring %d\n", rc, cfg_id);
2614 			break;
2615 		}
2616 	}
2617 
2618 	qed_ptt_release(hwfn, ptt);
2619 
2620 	return rc;
2621 }
2622 
2623 #define QED_MAX_NVM_BUF_LEN	32
2624 static int qed_nvm_flash_cfg_len(struct qed_dev *cdev, u32 cmd)
2625 {
2626 	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2627 	u8 buf[QED_MAX_NVM_BUF_LEN];
2628 	struct qed_ptt *ptt;
2629 	u32 len;
2630 	int rc;
2631 
2632 	ptt = qed_ptt_acquire(hwfn);
2633 	if (!ptt)
2634 		return QED_MAX_NVM_BUF_LEN;
2635 
2636 	rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, 0, QED_NVM_CFG_GET_FLAGS, buf,
2637 				 &len);
2638 	if (rc || !len) {
2639 		DP_ERR(cdev, "Error %d reading %d\n", rc, cmd);
2640 		len = QED_MAX_NVM_BUF_LEN;
2641 	}
2642 
2643 	qed_ptt_release(hwfn, ptt);
2644 
2645 	return len;
2646 }
2647 
2648 static int qed_nvm_flash_cfg_read(struct qed_dev *cdev, u8 **data,
2649 				  u32 cmd, u32 entity_id)
2650 {
2651 	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2652 	struct qed_ptt *ptt;
2653 	u32 flags, len;
2654 	int rc = 0;
2655 
2656 	ptt = qed_ptt_acquire(hwfn);
2657 	if (!ptt)
2658 		return -EAGAIN;
2659 
2660 	DP_VERBOSE(cdev, NETIF_MSG_DRV,
2661 		   "Read config cmd = %d entity id %d\n", cmd, entity_id);
2662 	flags = entity_id ? QED_NVM_CFG_GET_PF_FLAGS : QED_NVM_CFG_GET_FLAGS;
2663 	rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, entity_id, flags, *data, &len);
2664 	if (rc)
2665 		DP_ERR(cdev, "Error %d reading %d\n", rc, cmd);
2666 
2667 	qed_ptt_release(hwfn, ptt);
2668 
2669 	return rc;
2670 }
2671 
2672 static int qed_nvm_flash(struct qed_dev *cdev, const char *name)
2673 {
2674 	const struct firmware *image;
2675 	const u8 *data, *data_end;
2676 	u32 cmd_type;
2677 	int rc;
2678 
2679 	rc = request_firmware(&image, name, &cdev->pdev->dev);
2680 	if (rc) {
2681 		DP_ERR(cdev, "Failed to find '%s'\n", name);
2682 		return rc;
2683 	}
2684 
2685 	DP_VERBOSE(cdev, NETIF_MSG_DRV,
2686 		   "Flashing '%s' - firmware's data at %p, size is %08x\n",
2687 		   name, image->data, (u32)image->size);
2688 	data = image->data;
2689 	data_end = data + image->size;
2690 
2691 	rc = qed_nvm_flash_image_validate(cdev, image, &data);
2692 	if (rc)
2693 		goto exit;
2694 
2695 	while (data < data_end) {
2696 		bool check_resp = false;
2697 
2698 		/* Parse the actual command */
2699 		cmd_type = *((u32 *)data);
2700 		switch (cmd_type) {
2701 		case QED_NVM_FLASH_CMD_FILE_DATA:
2702 			rc = qed_nvm_flash_image_file_data(cdev, &data,
2703 							   &check_resp);
2704 			break;
2705 		case QED_NVM_FLASH_CMD_FILE_START:
2706 			rc = qed_nvm_flash_image_file_start(cdev, &data,
2707 							    &check_resp);
2708 			break;
2709 		case QED_NVM_FLASH_CMD_NVM_CHANGE:
2710 			rc = qed_nvm_flash_image_access(cdev, &data,
2711 							&check_resp);
2712 			break;
2713 		case QED_NVM_FLASH_CMD_NVM_CFG_ID:
2714 			rc = qed_nvm_flash_cfg_write(cdev, &data);
2715 			break;
2716 		default:
2717 			DP_ERR(cdev, "Unknown command %08x\n", cmd_type);
2718 			rc = -EINVAL;
2719 			goto exit;
2720 		}
2721 
2722 		if (rc) {
2723 			DP_ERR(cdev, "Command %08x failed\n", cmd_type);
2724 			goto exit;
2725 		}
2726 
2727 		/* Check response if needed */
2728 		if (check_resp) {
2729 			u32 mcp_response = 0;
2730 
2731 			if (qed_mcp_nvm_resp(cdev, (u8 *)&mcp_response)) {
2732 				DP_ERR(cdev, "Failed getting MCP response\n");
2733 				rc = -EINVAL;
2734 				goto exit;
2735 			}
2736 
2737 			switch (mcp_response & FW_MSG_CODE_MASK) {
2738 			case FW_MSG_CODE_OK:
2739 			case FW_MSG_CODE_NVM_OK:
2740 			case FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK:
2741 			case FW_MSG_CODE_PHY_OK:
2742 				break;
2743 			default:
2744 				DP_ERR(cdev, "MFW returns error: %08x\n",
2745 				       mcp_response);
2746 				rc = -EINVAL;
2747 				goto exit;
2748 			}
2749 		}
2750 	}
2751 
2752 exit:
2753 	release_firmware(image);
2754 
2755 	return rc;
2756 }
2757 
2758 static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type,
2759 			     u8 *buf, u16 len)
2760 {
2761 	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2762 
2763 	return qed_mcp_get_nvm_image(hwfn, type, buf, len);
2764 }
2765 
2766 void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn)
2767 {
2768 	struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common;
2769 	void *cookie = p_hwfn->cdev->ops_cookie;
2770 
2771 	if (ops && ops->schedule_recovery_handler)
2772 		ops->schedule_recovery_handler(cookie);
2773 }
2774 
2775 static const char * const qed_hw_err_type_descr[] = {
2776 	[QED_HW_ERR_FAN_FAIL]		= "Fan Failure",
2777 	[QED_HW_ERR_MFW_RESP_FAIL]	= "MFW Response Failure",
2778 	[QED_HW_ERR_HW_ATTN]		= "HW Attention",
2779 	[QED_HW_ERR_DMAE_FAIL]		= "DMAE Failure",
2780 	[QED_HW_ERR_RAMROD_FAIL]	= "Ramrod Failure",
2781 	[QED_HW_ERR_FW_ASSERT]		= "FW Assertion",
2782 	[QED_HW_ERR_LAST]		= "Unknown",
2783 };
2784 
2785 void qed_hw_error_occurred(struct qed_hwfn *p_hwfn,
2786 			   enum qed_hw_err_type err_type)
2787 {
2788 	struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common;
2789 	void *cookie = p_hwfn->cdev->ops_cookie;
2790 	const char *err_str;
2791 
2792 	if (err_type > QED_HW_ERR_LAST)
2793 		err_type = QED_HW_ERR_LAST;
2794 	err_str = qed_hw_err_type_descr[err_type];
2795 
2796 	DP_NOTICE(p_hwfn, "HW error occurred [%s]\n", err_str);
2797 
2798 	/* Call the HW error handler of the protocol driver.
2799 	 * If it is not available - perform a minimal handling of preventing
2800 	 * HW attentions from being reasserted.
2801 	 */
2802 	if (ops && ops->schedule_hw_err_handler)
2803 		ops->schedule_hw_err_handler(cookie, err_type);
2804 	else
2805 		qed_int_attn_clr_enable(p_hwfn->cdev, true);
2806 }
2807 
2808 static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
2809 			    void *handle)
2810 {
2811 		return qed_set_queue_coalesce(rx_coal, tx_coal, handle);
2812 }
2813 
2814 static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
2815 {
2816 	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2817 	struct qed_ptt *ptt;
2818 	int status = 0;
2819 
2820 	ptt = qed_ptt_acquire(hwfn);
2821 	if (!ptt)
2822 		return -EAGAIN;
2823 
2824 	status = qed_mcp_set_led(hwfn, ptt, mode);
2825 
2826 	qed_ptt_release(hwfn, ptt);
2827 
2828 	return status;
2829 }
2830 
2831 int qed_recovery_process(struct qed_dev *cdev)
2832 {
2833 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2834 	struct qed_ptt *p_ptt;
2835 	int rc = 0;
2836 
2837 	p_ptt = qed_ptt_acquire(p_hwfn);
2838 	if (!p_ptt)
2839 		return -EAGAIN;
2840 
2841 	rc = qed_start_recovery_process(p_hwfn, p_ptt);
2842 
2843 	qed_ptt_release(p_hwfn, p_ptt);
2844 
2845 	return rc;
2846 }
2847 
2848 static int qed_update_wol(struct qed_dev *cdev, bool enabled)
2849 {
2850 	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2851 	struct qed_ptt *ptt;
2852 	int rc = 0;
2853 
2854 	if (IS_VF(cdev))
2855 		return 0;
2856 
2857 	ptt = qed_ptt_acquire(hwfn);
2858 	if (!ptt)
2859 		return -EAGAIN;
2860 
2861 	rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED
2862 				   : QED_OV_WOL_DISABLED);
2863 	if (rc)
2864 		goto out;
2865 	rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2866 
2867 out:
2868 	qed_ptt_release(hwfn, ptt);
2869 	return rc;
2870 }
2871 
2872 static int qed_update_drv_state(struct qed_dev *cdev, bool active)
2873 {
2874 	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2875 	struct qed_ptt *ptt;
2876 	int status = 0;
2877 
2878 	if (IS_VF(cdev))
2879 		return 0;
2880 
2881 	ptt = qed_ptt_acquire(hwfn);
2882 	if (!ptt)
2883 		return -EAGAIN;
2884 
2885 	status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ?
2886 						QED_OV_DRIVER_STATE_ACTIVE :
2887 						QED_OV_DRIVER_STATE_DISABLED);
2888 
2889 	qed_ptt_release(hwfn, ptt);
2890 
2891 	return status;
2892 }
2893 
2894 static int qed_update_mac(struct qed_dev *cdev, u8 *mac)
2895 {
2896 	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2897 	struct qed_ptt *ptt;
2898 	int status = 0;
2899 
2900 	if (IS_VF(cdev))
2901 		return 0;
2902 
2903 	ptt = qed_ptt_acquire(hwfn);
2904 	if (!ptt)
2905 		return -EAGAIN;
2906 
2907 	status = qed_mcp_ov_update_mac(hwfn, ptt, mac);
2908 	if (status)
2909 		goto out;
2910 
2911 	status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2912 
2913 out:
2914 	qed_ptt_release(hwfn, ptt);
2915 	return status;
2916 }
2917 
2918 static int qed_update_mtu(struct qed_dev *cdev, u16 mtu)
2919 {
2920 	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2921 	struct qed_ptt *ptt;
2922 	int status = 0;
2923 
2924 	if (IS_VF(cdev))
2925 		return 0;
2926 
2927 	ptt = qed_ptt_acquire(hwfn);
2928 	if (!ptt)
2929 		return -EAGAIN;
2930 
2931 	status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu);
2932 	if (status)
2933 		goto out;
2934 
2935 	status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2936 
2937 out:
2938 	qed_ptt_release(hwfn, ptt);
2939 	return status;
2940 }
2941 
2942 static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf,
2943 				  u8 dev_addr, u32 offset, u32 len)
2944 {
2945 	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2946 	struct qed_ptt *ptt;
2947 	int rc = 0;
2948 
2949 	if (IS_VF(cdev))
2950 		return 0;
2951 
2952 	ptt = qed_ptt_acquire(hwfn);
2953 	if (!ptt)
2954 		return -EAGAIN;
2955 
2956 	rc = qed_mcp_phy_sfp_read(hwfn, ptt, MFW_PORT(hwfn), dev_addr,
2957 				  offset, len, buf);
2958 
2959 	qed_ptt_release(hwfn, ptt);
2960 
2961 	return rc;
2962 }
2963 
2964 static int qed_set_grc_config(struct qed_dev *cdev, u32 cfg_id, u32 val)
2965 {
2966 	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2967 	struct qed_ptt *ptt;
2968 	int rc = 0;
2969 
2970 	if (IS_VF(cdev))
2971 		return 0;
2972 
2973 	ptt = qed_ptt_acquire(hwfn);
2974 	if (!ptt)
2975 		return -EAGAIN;
2976 
2977 	rc = qed_dbg_grc_config(hwfn, cfg_id, val);
2978 
2979 	qed_ptt_release(hwfn, ptt);
2980 
2981 	return rc;
2982 }
2983 
2984 static u8 qed_get_affin_hwfn_idx(struct qed_dev *cdev)
2985 {
2986 	return QED_AFFIN_HWFN_IDX(cdev);
2987 }
2988 
2989 static struct qed_selftest_ops qed_selftest_ops_pass = {
2990 	.selftest_memory = &qed_selftest_memory,
2991 	.selftest_interrupt = &qed_selftest_interrupt,
2992 	.selftest_register = &qed_selftest_register,
2993 	.selftest_clock = &qed_selftest_clock,
2994 	.selftest_nvram = &qed_selftest_nvram,
2995 };
2996 
2997 const struct qed_common_ops qed_common_ops_pass = {
2998 	.selftest = &qed_selftest_ops_pass,
2999 	.probe = &qed_probe,
3000 	.remove = &qed_remove,
3001 	.set_power_state = &qed_set_power_state,
3002 	.set_name = &qed_set_name,
3003 	.update_pf_params = &qed_update_pf_params,
3004 	.slowpath_start = &qed_slowpath_start,
3005 	.slowpath_stop = &qed_slowpath_stop,
3006 	.set_fp_int = &qed_set_int_fp,
3007 	.get_fp_int = &qed_get_int_fp,
3008 	.sb_init = &qed_sb_init,
3009 	.sb_release = &qed_sb_release,
3010 	.simd_handler_config = &qed_simd_handler_config,
3011 	.simd_handler_clean = &qed_simd_handler_clean,
3012 	.dbg_grc = &qed_dbg_grc,
3013 	.dbg_grc_size = &qed_dbg_grc_size,
3014 	.can_link_change = &qed_can_link_change,
3015 	.set_link = &qed_set_link,
3016 	.get_link = &qed_get_current_link,
3017 	.drain = &qed_drain,
3018 	.update_msglvl = &qed_init_dp,
3019 	.devlink_register = qed_devlink_register,
3020 	.devlink_unregister = qed_devlink_unregister,
3021 	.report_fatal_error = qed_report_fatal_error,
3022 	.dbg_all_data = &qed_dbg_all_data,
3023 	.dbg_all_data_size = &qed_dbg_all_data_size,
3024 	.chain_alloc = &qed_chain_alloc,
3025 	.chain_free = &qed_chain_free,
3026 	.nvm_flash = &qed_nvm_flash,
3027 	.nvm_get_image = &qed_nvm_get_image,
3028 	.set_coalesce = &qed_set_coalesce,
3029 	.set_led = &qed_set_led,
3030 	.recovery_process = &qed_recovery_process,
3031 	.recovery_prolog = &qed_recovery_prolog,
3032 	.attn_clr_enable = &qed_int_attn_clr_enable,
3033 	.update_drv_state = &qed_update_drv_state,
3034 	.update_mac = &qed_update_mac,
3035 	.update_mtu = &qed_update_mtu,
3036 	.update_wol = &qed_update_wol,
3037 	.db_recovery_add = &qed_db_recovery_add,
3038 	.db_recovery_del = &qed_db_recovery_del,
3039 	.read_module_eeprom = &qed_read_module_eeprom,
3040 	.get_affin_hwfn_idx = &qed_get_affin_hwfn_idx,
3041 	.read_nvm_cfg = &qed_nvm_flash_cfg_read,
3042 	.read_nvm_cfg_len = &qed_nvm_flash_cfg_len,
3043 	.set_grc_config = &qed_set_grc_config,
3044 };
3045 
3046 void qed_get_protocol_stats(struct qed_dev *cdev,
3047 			    enum qed_mcp_protocol_type type,
3048 			    union qed_mcp_protocol_stats *stats)
3049 {
3050 	struct qed_eth_stats eth_stats;
3051 
3052 	memset(stats, 0, sizeof(*stats));
3053 
3054 	switch (type) {
3055 	case QED_MCP_LAN_STATS:
3056 		qed_get_vport_stats(cdev, &eth_stats);
3057 		stats->lan_stats.ucast_rx_pkts =
3058 					eth_stats.common.rx_ucast_pkts;
3059 		stats->lan_stats.ucast_tx_pkts =
3060 					eth_stats.common.tx_ucast_pkts;
3061 		stats->lan_stats.fcs_err = -1;
3062 		break;
3063 	case QED_MCP_FCOE_STATS:
3064 		qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats);
3065 		break;
3066 	case QED_MCP_ISCSI_STATS:
3067 		qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats);
3068 		break;
3069 	default:
3070 		DP_VERBOSE(cdev, QED_MSG_SP,
3071 			   "Invalid protocol type = %d\n", type);
3072 		return;
3073 	}
3074 }
3075 
3076 int qed_mfw_tlv_req(struct qed_hwfn *hwfn)
3077 {
3078 	DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
3079 		   "Scheduling slowpath task [Flag: %d]\n",
3080 		   QED_SLOWPATH_MFW_TLV_REQ);
3081 	smp_mb__before_atomic();
3082 	set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags);
3083 	smp_mb__after_atomic();
3084 	queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
3085 
3086 	return 0;
3087 }
3088 
3089 static void
3090 qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv)
3091 {
3092 	struct qed_common_cb_ops *op = cdev->protocol_ops.common;
3093 	struct qed_eth_stats_common *p_common;
3094 	struct qed_generic_tlvs gen_tlvs;
3095 	struct qed_eth_stats stats;
3096 	int i;
3097 
3098 	memset(&gen_tlvs, 0, sizeof(gen_tlvs));
3099 	op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs);
3100 
3101 	if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM)
3102 		tlv->flags.ipv4_csum_offload = true;
3103 	if (gen_tlvs.feat_flags & QED_TLV_LSO)
3104 		tlv->flags.lso_supported = true;
3105 	tlv->flags.b_set = true;
3106 
3107 	for (i = 0; i < QED_TLV_MAC_COUNT; i++) {
3108 		if (is_valid_ether_addr(gen_tlvs.mac[i])) {
3109 			ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]);
3110 			tlv->mac_set[i] = true;
3111 		}
3112 	}
3113 
3114 	qed_get_vport_stats(cdev, &stats);
3115 	p_common = &stats.common;
3116 	tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
3117 			 p_common->rx_bcast_pkts;
3118 	tlv->rx_frames_set = true;
3119 	tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
3120 			p_common->rx_bcast_bytes;
3121 	tlv->rx_bytes_set = true;
3122 	tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
3123 			 p_common->tx_bcast_pkts;
3124 	tlv->tx_frames_set = true;
3125 	tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
3126 			p_common->tx_bcast_bytes;
3127 	tlv->rx_bytes_set = true;
3128 }
3129 
3130 int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type,
3131 			  union qed_mfw_tlv_data *tlv_buf)
3132 {
3133 	struct qed_dev *cdev = hwfn->cdev;
3134 	struct qed_common_cb_ops *ops;
3135 
3136 	ops = cdev->protocol_ops.common;
3137 	if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) {
3138 		DP_NOTICE(hwfn, "Can't collect TLV management info\n");
3139 		return -EINVAL;
3140 	}
3141 
3142 	switch (type) {
3143 	case QED_MFW_TLV_GENERIC:
3144 		qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic);
3145 		break;
3146 	case QED_MFW_TLV_ETH:
3147 		ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth);
3148 		break;
3149 	case QED_MFW_TLV_FCOE:
3150 		ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe);
3151 		break;
3152 	case QED_MFW_TLV_ISCSI:
3153 		ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi);
3154 		break;
3155 	default:
3156 		break;
3157 	}
3158 
3159 	return 0;
3160 }
3161