1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/stddef.h>
34 #include <linux/pci.h>
35 #include <linux/kernel.h>
36 #include <linux/slab.h>
37 #include <linux/delay.h>
38 #include <asm/byteorder.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/string.h>
41 #include <linux/module.h>
42 #include <linux/interrupt.h>
43 #include <linux/workqueue.h>
44 #include <linux/ethtool.h>
45 #include <linux/etherdevice.h>
46 #include <linux/vmalloc.h>
47 #include <linux/crash_dump.h>
48 #include <linux/crc32.h>
49 #include <linux/qed/qed_if.h>
50 #include <linux/qed/qed_ll2_if.h>
51 #include <net/devlink.h>
52 #include <linux/aer.h>
53 
54 #include "qed.h"
55 #include "qed_sriov.h"
56 #include "qed_sp.h"
57 #include "qed_dev_api.h"
58 #include "qed_ll2.h"
59 #include "qed_fcoe.h"
60 #include "qed_iscsi.h"
61 
62 #include "qed_mcp.h"
63 #include "qed_reg_addr.h"
64 #include "qed_hw.h"
65 #include "qed_selftest.h"
66 #include "qed_debug.h"
67 
68 #define QED_ROCE_QPS			(8192)
69 #define QED_ROCE_DPIS			(8)
70 #define QED_RDMA_SRQS                   QED_ROCE_QPS
71 #define QED_NVM_CFG_GET_FLAGS		0xA
72 #define QED_NVM_CFG_GET_PF_FLAGS	0x1A
73 #define QED_NVM_CFG_MAX_ATTRS		50
74 
75 static char version[] =
76 	"QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
77 
78 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81 
82 #define FW_FILE_VERSION				\
83 	__stringify(FW_MAJOR_VERSION) "."	\
84 	__stringify(FW_MINOR_VERSION) "."	\
85 	__stringify(FW_REVISION_VERSION) "."	\
86 	__stringify(FW_ENGINEERING_VERSION)
87 
88 #define QED_FW_FILE_NAME	\
89 	"qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin"
90 
91 MODULE_FIRMWARE(QED_FW_FILE_NAME);
92 
93 static int __init qed_init(void)
94 {
95 	pr_info("%s", version);
96 
97 	return 0;
98 }
99 
100 static void __exit qed_cleanup(void)
101 {
102 	pr_notice("qed_cleanup called\n");
103 }
104 
105 module_init(qed_init);
106 module_exit(qed_cleanup);
107 
108 /* Check if the DMA controller on the machine can properly handle the DMA
109  * addressing required by the device.
110 */
111 static int qed_set_coherency_mask(struct qed_dev *cdev)
112 {
113 	struct device *dev = &cdev->pdev->dev;
114 
115 	if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
116 		if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
117 			DP_NOTICE(cdev,
118 				  "Can't request 64-bit consistent allocations\n");
119 			return -EIO;
120 		}
121 	} else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
122 		DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n");
123 		return -EIO;
124 	}
125 
126 	return 0;
127 }
128 
129 static void qed_free_pci(struct qed_dev *cdev)
130 {
131 	struct pci_dev *pdev = cdev->pdev;
132 
133 	pci_disable_pcie_error_reporting(pdev);
134 
135 	if (cdev->doorbells && cdev->db_size)
136 		iounmap(cdev->doorbells);
137 	if (cdev->regview)
138 		iounmap(cdev->regview);
139 	if (atomic_read(&pdev->enable_cnt) == 1)
140 		pci_release_regions(pdev);
141 
142 	pci_disable_device(pdev);
143 }
144 
145 #define PCI_REVISION_ID_ERROR_VAL	0xff
146 
147 /* Performs PCI initializations as well as initializing PCI-related parameters
148  * in the device structrue. Returns 0 in case of success.
149  */
150 static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
151 {
152 	u8 rev_id;
153 	int rc;
154 
155 	cdev->pdev = pdev;
156 
157 	rc = pci_enable_device(pdev);
158 	if (rc) {
159 		DP_NOTICE(cdev, "Cannot enable PCI device\n");
160 		goto err0;
161 	}
162 
163 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
164 		DP_NOTICE(cdev, "No memory region found in bar #0\n");
165 		rc = -EIO;
166 		goto err1;
167 	}
168 
169 	if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
170 		DP_NOTICE(cdev, "No memory region found in bar #2\n");
171 		rc = -EIO;
172 		goto err1;
173 	}
174 
175 	if (atomic_read(&pdev->enable_cnt) == 1) {
176 		rc = pci_request_regions(pdev, "qed");
177 		if (rc) {
178 			DP_NOTICE(cdev,
179 				  "Failed to request PCI memory resources\n");
180 			goto err1;
181 		}
182 		pci_set_master(pdev);
183 		pci_save_state(pdev);
184 	}
185 
186 	pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
187 	if (rev_id == PCI_REVISION_ID_ERROR_VAL) {
188 		DP_NOTICE(cdev,
189 			  "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n",
190 			  rev_id);
191 		rc = -ENODEV;
192 		goto err2;
193 	}
194 	if (!pci_is_pcie(pdev)) {
195 		DP_NOTICE(cdev, "The bus is not PCI Express\n");
196 		rc = -EIO;
197 		goto err2;
198 	}
199 
200 	cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
201 	if (IS_PF(cdev) && !cdev->pci_params.pm_cap)
202 		DP_NOTICE(cdev, "Cannot find power management capability\n");
203 
204 	rc = qed_set_coherency_mask(cdev);
205 	if (rc)
206 		goto err2;
207 
208 	cdev->pci_params.mem_start = pci_resource_start(pdev, 0);
209 	cdev->pci_params.mem_end = pci_resource_end(pdev, 0);
210 	cdev->pci_params.irq = pdev->irq;
211 
212 	cdev->regview = pci_ioremap_bar(pdev, 0);
213 	if (!cdev->regview) {
214 		DP_NOTICE(cdev, "Cannot map register space, aborting\n");
215 		rc = -ENOMEM;
216 		goto err2;
217 	}
218 
219 	cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
220 	cdev->db_size = pci_resource_len(cdev->pdev, 2);
221 	if (!cdev->db_size) {
222 		if (IS_PF(cdev)) {
223 			DP_NOTICE(cdev, "No Doorbell bar available\n");
224 			return -EINVAL;
225 		} else {
226 			return 0;
227 		}
228 	}
229 
230 	cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
231 
232 	if (!cdev->doorbells) {
233 		DP_NOTICE(cdev, "Cannot map doorbell space\n");
234 		return -ENOMEM;
235 	}
236 
237 	/* AER (Advanced Error reporting) configuration */
238 	rc = pci_enable_pcie_error_reporting(pdev);
239 	if (rc)
240 		DP_VERBOSE(cdev, NETIF_MSG_DRV,
241 			   "Failed to configure PCIe AER [%d]\n", rc);
242 
243 	return 0;
244 
245 err2:
246 	pci_release_regions(pdev);
247 err1:
248 	pci_disable_device(pdev);
249 err0:
250 	return rc;
251 }
252 
253 int qed_fill_dev_info(struct qed_dev *cdev,
254 		      struct qed_dev_info *dev_info)
255 {
256 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
257 	struct qed_hw_info *hw_info = &p_hwfn->hw_info;
258 	struct qed_tunnel_info *tun = &cdev->tunnel;
259 	struct qed_ptt  *ptt;
260 
261 	memset(dev_info, 0, sizeof(struct qed_dev_info));
262 
263 	if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
264 	    tun->vxlan.b_mode_enabled)
265 		dev_info->vxlan_enable = true;
266 
267 	if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled &&
268 	    tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
269 	    tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
270 		dev_info->gre_enable = true;
271 
272 	if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled &&
273 	    tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
274 	    tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
275 		dev_info->geneve_enable = true;
276 
277 	dev_info->num_hwfns = cdev->num_hwfns;
278 	dev_info->pci_mem_start = cdev->pci_params.mem_start;
279 	dev_info->pci_mem_end = cdev->pci_params.mem_end;
280 	dev_info->pci_irq = cdev->pci_params.irq;
281 	dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn);
282 	dev_info->dev_type = cdev->type;
283 	ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr);
284 
285 	if (IS_PF(cdev)) {
286 		dev_info->fw_major = FW_MAJOR_VERSION;
287 		dev_info->fw_minor = FW_MINOR_VERSION;
288 		dev_info->fw_rev = FW_REVISION_VERSION;
289 		dev_info->fw_eng = FW_ENGINEERING_VERSION;
290 		dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH,
291 						       &cdev->mf_bits);
292 		dev_info->tx_switching = true;
293 
294 		if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME)
295 			dev_info->wol_support = true;
296 
297 		dev_info->smart_an = qed_mcp_is_smart_an_supported(p_hwfn);
298 
299 		dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id;
300 	} else {
301 		qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
302 				      &dev_info->fw_minor, &dev_info->fw_rev,
303 				      &dev_info->fw_eng);
304 	}
305 
306 	if (IS_PF(cdev)) {
307 		ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
308 		if (ptt) {
309 			qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
310 					    &dev_info->mfw_rev, NULL);
311 
312 			qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt,
313 					    &dev_info->mbi_version);
314 
315 			qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
316 					       &dev_info->flash_size);
317 
318 			qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
319 		}
320 	} else {
321 		qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL,
322 				    &dev_info->mfw_rev, NULL);
323 	}
324 
325 	dev_info->mtu = hw_info->mtu;
326 
327 	return 0;
328 }
329 
330 static void qed_free_cdev(struct qed_dev *cdev)
331 {
332 	kfree((void *)cdev);
333 }
334 
335 static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
336 {
337 	struct qed_dev *cdev;
338 
339 	cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
340 	if (!cdev)
341 		return cdev;
342 
343 	qed_init_struct(cdev);
344 
345 	return cdev;
346 }
347 
348 /* Sets the requested power state */
349 static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state)
350 {
351 	if (!cdev)
352 		return -ENODEV;
353 
354 	DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n");
355 	return 0;
356 }
357 
358 struct qed_devlink {
359 	struct qed_dev *cdev;
360 };
361 
362 enum qed_devlink_param_id {
363 	QED_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
364 	QED_DEVLINK_PARAM_ID_IWARP_CMT,
365 };
366 
367 static int qed_dl_param_get(struct devlink *dl, u32 id,
368 			    struct devlink_param_gset_ctx *ctx)
369 {
370 	struct qed_devlink *qed_dl;
371 	struct qed_dev *cdev;
372 
373 	qed_dl = devlink_priv(dl);
374 	cdev = qed_dl->cdev;
375 	ctx->val.vbool = cdev->iwarp_cmt;
376 
377 	return 0;
378 }
379 
380 static int qed_dl_param_set(struct devlink *dl, u32 id,
381 			    struct devlink_param_gset_ctx *ctx)
382 {
383 	struct qed_devlink *qed_dl;
384 	struct qed_dev *cdev;
385 
386 	qed_dl = devlink_priv(dl);
387 	cdev = qed_dl->cdev;
388 	cdev->iwarp_cmt = ctx->val.vbool;
389 
390 	return 0;
391 }
392 
393 static const struct devlink_param qed_devlink_params[] = {
394 	DEVLINK_PARAM_DRIVER(QED_DEVLINK_PARAM_ID_IWARP_CMT,
395 			     "iwarp_cmt", DEVLINK_PARAM_TYPE_BOOL,
396 			     BIT(DEVLINK_PARAM_CMODE_RUNTIME),
397 			     qed_dl_param_get, qed_dl_param_set, NULL),
398 };
399 
400 static const struct devlink_ops qed_dl_ops;
401 
402 static int qed_devlink_register(struct qed_dev *cdev)
403 {
404 	union devlink_param_value value;
405 	struct qed_devlink *qed_dl;
406 	struct devlink *dl;
407 	int rc;
408 
409 	dl = devlink_alloc(&qed_dl_ops, sizeof(*qed_dl));
410 	if (!dl)
411 		return -ENOMEM;
412 
413 	qed_dl = devlink_priv(dl);
414 
415 	cdev->dl = dl;
416 	qed_dl->cdev = cdev;
417 
418 	rc = devlink_register(dl, &cdev->pdev->dev);
419 	if (rc)
420 		goto err_free;
421 
422 	rc = devlink_params_register(dl, qed_devlink_params,
423 				     ARRAY_SIZE(qed_devlink_params));
424 	if (rc)
425 		goto err_unregister;
426 
427 	value.vbool = false;
428 	devlink_param_driverinit_value_set(dl,
429 					   QED_DEVLINK_PARAM_ID_IWARP_CMT,
430 					   value);
431 
432 	devlink_params_publish(dl);
433 	cdev->iwarp_cmt = false;
434 
435 	return 0;
436 
437 err_unregister:
438 	devlink_unregister(dl);
439 
440 err_free:
441 	cdev->dl = NULL;
442 	devlink_free(dl);
443 
444 	return rc;
445 }
446 
447 static void qed_devlink_unregister(struct qed_dev *cdev)
448 {
449 	if (!cdev->dl)
450 		return;
451 
452 	devlink_params_unregister(cdev->dl, qed_devlink_params,
453 				  ARRAY_SIZE(qed_devlink_params));
454 
455 	devlink_unregister(cdev->dl);
456 	devlink_free(cdev->dl);
457 }
458 
459 /* probing */
460 static struct qed_dev *qed_probe(struct pci_dev *pdev,
461 				 struct qed_probe_params *params)
462 {
463 	struct qed_dev *cdev;
464 	int rc;
465 
466 	cdev = qed_alloc_cdev(pdev);
467 	if (!cdev)
468 		goto err0;
469 
470 	cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
471 	cdev->protocol = params->protocol;
472 
473 	if (params->is_vf)
474 		cdev->b_is_vf = true;
475 
476 	qed_init_dp(cdev, params->dp_module, params->dp_level);
477 
478 	cdev->recov_in_prog = params->recov_in_prog;
479 
480 	rc = qed_init_pci(cdev, pdev);
481 	if (rc) {
482 		DP_ERR(cdev, "init pci failed\n");
483 		goto err1;
484 	}
485 	DP_INFO(cdev, "PCI init completed successfully\n");
486 
487 	rc = qed_devlink_register(cdev);
488 	if (rc) {
489 		DP_INFO(cdev, "Failed to register devlink.\n");
490 		goto err2;
491 	}
492 
493 	rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT);
494 	if (rc) {
495 		DP_ERR(cdev, "hw prepare failed\n");
496 		goto err2;
497 	}
498 
499 	DP_INFO(cdev, "qed_probe completed successfully\n");
500 
501 	return cdev;
502 
503 err2:
504 	qed_free_pci(cdev);
505 err1:
506 	qed_free_cdev(cdev);
507 err0:
508 	return NULL;
509 }
510 
511 static void qed_remove(struct qed_dev *cdev)
512 {
513 	if (!cdev)
514 		return;
515 
516 	qed_hw_remove(cdev);
517 
518 	qed_free_pci(cdev);
519 
520 	qed_set_power_state(cdev, PCI_D3hot);
521 
522 	qed_devlink_unregister(cdev);
523 
524 	qed_free_cdev(cdev);
525 }
526 
527 static void qed_disable_msix(struct qed_dev *cdev)
528 {
529 	if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
530 		pci_disable_msix(cdev->pdev);
531 		kfree(cdev->int_params.msix_table);
532 	} else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) {
533 		pci_disable_msi(cdev->pdev);
534 	}
535 
536 	memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param));
537 }
538 
539 static int qed_enable_msix(struct qed_dev *cdev,
540 			   struct qed_int_params *int_params)
541 {
542 	int i, rc, cnt;
543 
544 	cnt = int_params->in.num_vectors;
545 
546 	for (i = 0; i < cnt; i++)
547 		int_params->msix_table[i].entry = i;
548 
549 	rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table,
550 				   int_params->in.min_msix_cnt, cnt);
551 	if (rc < cnt && rc >= int_params->in.min_msix_cnt &&
552 	    (rc % cdev->num_hwfns)) {
553 		pci_disable_msix(cdev->pdev);
554 
555 		/* If fastpath is initialized, we need at least one interrupt
556 		 * per hwfn [and the slow path interrupts]. New requested number
557 		 * should be a multiple of the number of hwfns.
558 		 */
559 		cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns;
560 		DP_NOTICE(cdev,
561 			  "Trying to enable MSI-X with less vectors (%d out of %d)\n",
562 			  cnt, int_params->in.num_vectors);
563 		rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table,
564 					   cnt);
565 		if (!rc)
566 			rc = cnt;
567 	}
568 
569 	if (rc > 0) {
570 		/* MSI-x configuration was achieved */
571 		int_params->out.int_mode = QED_INT_MODE_MSIX;
572 		int_params->out.num_vectors = rc;
573 		rc = 0;
574 	} else {
575 		DP_NOTICE(cdev,
576 			  "Failed to enable MSI-X [Requested %d vectors][rc %d]\n",
577 			  cnt, rc);
578 	}
579 
580 	return rc;
581 }
582 
583 /* This function outputs the int mode and the number of enabled msix vector */
584 static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
585 {
586 	struct qed_int_params *int_params = &cdev->int_params;
587 	struct msix_entry *tbl;
588 	int rc = 0, cnt;
589 
590 	switch (int_params->in.int_mode) {
591 	case QED_INT_MODE_MSIX:
592 		/* Allocate MSIX table */
593 		cnt = int_params->in.num_vectors;
594 		int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL);
595 		if (!int_params->msix_table) {
596 			rc = -ENOMEM;
597 			goto out;
598 		}
599 
600 		/* Enable MSIX */
601 		rc = qed_enable_msix(cdev, int_params);
602 		if (!rc)
603 			goto out;
604 
605 		DP_NOTICE(cdev, "Failed to enable MSI-X\n");
606 		kfree(int_params->msix_table);
607 		if (force_mode)
608 			goto out;
609 		/* Fallthrough */
610 
611 	case QED_INT_MODE_MSI:
612 		if (cdev->num_hwfns == 1) {
613 			rc = pci_enable_msi(cdev->pdev);
614 			if (!rc) {
615 				int_params->out.int_mode = QED_INT_MODE_MSI;
616 				goto out;
617 			}
618 
619 			DP_NOTICE(cdev, "Failed to enable MSI\n");
620 			if (force_mode)
621 				goto out;
622 		}
623 		/* Fallthrough */
624 
625 	case QED_INT_MODE_INTA:
626 			int_params->out.int_mode = QED_INT_MODE_INTA;
627 			rc = 0;
628 			goto out;
629 	default:
630 		DP_NOTICE(cdev, "Unknown int_mode value %d\n",
631 			  int_params->in.int_mode);
632 		rc = -EINVAL;
633 	}
634 
635 out:
636 	if (!rc)
637 		DP_INFO(cdev, "Using %s interrupts\n",
638 			int_params->out.int_mode == QED_INT_MODE_INTA ?
639 			"INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ?
640 			"MSI" : "MSIX");
641 	cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE;
642 
643 	return rc;
644 }
645 
646 static void qed_simd_handler_config(struct qed_dev *cdev, void *token,
647 				    int index, void(*handler)(void *))
648 {
649 	struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
650 	int relative_idx = index / cdev->num_hwfns;
651 
652 	hwfn->simd_proto_handler[relative_idx].func = handler;
653 	hwfn->simd_proto_handler[relative_idx].token = token;
654 }
655 
656 static void qed_simd_handler_clean(struct qed_dev *cdev, int index)
657 {
658 	struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
659 	int relative_idx = index / cdev->num_hwfns;
660 
661 	memset(&hwfn->simd_proto_handler[relative_idx], 0,
662 	       sizeof(struct qed_simd_fp_handler));
663 }
664 
665 static irqreturn_t qed_msix_sp_int(int irq, void *tasklet)
666 {
667 	tasklet_schedule((struct tasklet_struct *)tasklet);
668 	return IRQ_HANDLED;
669 }
670 
671 static irqreturn_t qed_single_int(int irq, void *dev_instance)
672 {
673 	struct qed_dev *cdev = (struct qed_dev *)dev_instance;
674 	struct qed_hwfn *hwfn;
675 	irqreturn_t rc = IRQ_NONE;
676 	u64 status;
677 	int i, j;
678 
679 	for (i = 0; i < cdev->num_hwfns; i++) {
680 		status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]);
681 
682 		if (!status)
683 			continue;
684 
685 		hwfn = &cdev->hwfns[i];
686 
687 		/* Slowpath interrupt */
688 		if (unlikely(status & 0x1)) {
689 			tasklet_schedule(hwfn->sp_dpc);
690 			status &= ~0x1;
691 			rc = IRQ_HANDLED;
692 		}
693 
694 		/* Fastpath interrupts */
695 		for (j = 0; j < 64; j++) {
696 			if ((0x2ULL << j) & status) {
697 				struct qed_simd_fp_handler *p_handler =
698 					&hwfn->simd_proto_handler[j];
699 
700 				if (p_handler->func)
701 					p_handler->func(p_handler->token);
702 				else
703 					DP_NOTICE(hwfn,
704 						  "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n",
705 						  j, status);
706 
707 				status &= ~(0x2ULL << j);
708 				rc = IRQ_HANDLED;
709 			}
710 		}
711 
712 		if (unlikely(status))
713 			DP_VERBOSE(hwfn, NETIF_MSG_INTR,
714 				   "got an unknown interrupt status 0x%llx\n",
715 				   status);
716 	}
717 
718 	return rc;
719 }
720 
721 int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
722 {
723 	struct qed_dev *cdev = hwfn->cdev;
724 	u32 int_mode;
725 	int rc = 0;
726 	u8 id;
727 
728 	int_mode = cdev->int_params.out.int_mode;
729 	if (int_mode == QED_INT_MODE_MSIX) {
730 		id = hwfn->my_id;
731 		snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
732 			 id, cdev->pdev->bus->number,
733 			 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
734 		rc = request_irq(cdev->int_params.msix_table[id].vector,
735 				 qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc);
736 	} else {
737 		unsigned long flags = 0;
738 
739 		snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x",
740 			 cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn),
741 			 PCI_FUNC(cdev->pdev->devfn));
742 
743 		if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA)
744 			flags |= IRQF_SHARED;
745 
746 		rc = request_irq(cdev->pdev->irq, qed_single_int,
747 				 flags, cdev->name, cdev);
748 	}
749 
750 	if (rc)
751 		DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc);
752 	else
753 		DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
754 			   "Requested slowpath %s\n",
755 			   (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ");
756 
757 	return rc;
758 }
759 
760 static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn)
761 {
762 	/* Calling the disable function will make sure that any
763 	 * currently-running function is completed. The following call to the
764 	 * enable function makes this sequence a flush-like operation.
765 	 */
766 	if (p_hwfn->b_sp_dpc_enabled) {
767 		tasklet_disable(p_hwfn->sp_dpc);
768 		tasklet_enable(p_hwfn->sp_dpc);
769 	}
770 }
771 
772 void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn)
773 {
774 	struct qed_dev *cdev = p_hwfn->cdev;
775 	u8 id = p_hwfn->my_id;
776 	u32 int_mode;
777 
778 	int_mode = cdev->int_params.out.int_mode;
779 	if (int_mode == QED_INT_MODE_MSIX)
780 		synchronize_irq(cdev->int_params.msix_table[id].vector);
781 	else
782 		synchronize_irq(cdev->pdev->irq);
783 
784 	qed_slowpath_tasklet_flush(p_hwfn);
785 }
786 
787 static void qed_slowpath_irq_free(struct qed_dev *cdev)
788 {
789 	int i;
790 
791 	if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
792 		for_each_hwfn(cdev, i) {
793 			if (!cdev->hwfns[i].b_int_requested)
794 				break;
795 			synchronize_irq(cdev->int_params.msix_table[i].vector);
796 			free_irq(cdev->int_params.msix_table[i].vector,
797 				 cdev->hwfns[i].sp_dpc);
798 		}
799 	} else {
800 		if (QED_LEADING_HWFN(cdev)->b_int_requested)
801 			free_irq(cdev->pdev->irq, cdev);
802 	}
803 	qed_int_disable_post_isr_release(cdev);
804 }
805 
806 static int qed_nic_stop(struct qed_dev *cdev)
807 {
808 	int i, rc;
809 
810 	rc = qed_hw_stop(cdev);
811 
812 	for (i = 0; i < cdev->num_hwfns; i++) {
813 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
814 
815 		if (p_hwfn->b_sp_dpc_enabled) {
816 			tasklet_disable(p_hwfn->sp_dpc);
817 			p_hwfn->b_sp_dpc_enabled = false;
818 			DP_VERBOSE(cdev, NETIF_MSG_IFDOWN,
819 				   "Disabled sp tasklet [hwfn %d] at %p\n",
820 				   i, p_hwfn->sp_dpc);
821 		}
822 	}
823 
824 	qed_dbg_pf_exit(cdev);
825 
826 	return rc;
827 }
828 
829 static int qed_nic_setup(struct qed_dev *cdev)
830 {
831 	int rc, i;
832 
833 	/* Determine if interface is going to require LL2 */
834 	if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) {
835 		for (i = 0; i < cdev->num_hwfns; i++) {
836 			struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
837 
838 			p_hwfn->using_ll2 = true;
839 		}
840 	}
841 
842 	rc = qed_resc_alloc(cdev);
843 	if (rc)
844 		return rc;
845 
846 	DP_INFO(cdev, "Allocated qed resources\n");
847 
848 	qed_resc_setup(cdev);
849 
850 	return rc;
851 }
852 
853 static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt)
854 {
855 	int limit = 0;
856 
857 	/* Mark the fastpath as free/used */
858 	cdev->int_params.fp_initialized = cnt ? true : false;
859 
860 	if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX)
861 		limit = cdev->num_hwfns * 63;
862 	else if (cdev->int_params.fp_msix_cnt)
863 		limit = cdev->int_params.fp_msix_cnt;
864 
865 	if (!limit)
866 		return -ENOMEM;
867 
868 	return min_t(int, cnt, limit);
869 }
870 
871 static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info)
872 {
873 	memset(info, 0, sizeof(struct qed_int_info));
874 
875 	if (!cdev->int_params.fp_initialized) {
876 		DP_INFO(cdev,
877 			"Protocol driver requested interrupt information, but its support is not yet configured\n");
878 		return -EINVAL;
879 	}
880 
881 	/* Need to expose only MSI-X information; Single IRQ is handled solely
882 	 * by qed.
883 	 */
884 	if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
885 		int msix_base = cdev->int_params.fp_msix_base;
886 
887 		info->msix_cnt = cdev->int_params.fp_msix_cnt;
888 		info->msix = &cdev->int_params.msix_table[msix_base];
889 	}
890 
891 	return 0;
892 }
893 
894 static int qed_slowpath_setup_int(struct qed_dev *cdev,
895 				  enum qed_int_mode int_mode)
896 {
897 	struct qed_sb_cnt_info sb_cnt_info;
898 	int num_l2_queues = 0;
899 	int rc;
900 	int i;
901 
902 	if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
903 		DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
904 		return -EINVAL;
905 	}
906 
907 	memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
908 	cdev->int_params.in.int_mode = int_mode;
909 	for_each_hwfn(cdev, i) {
910 		memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
911 		qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info);
912 		cdev->int_params.in.num_vectors += sb_cnt_info.cnt;
913 		cdev->int_params.in.num_vectors++; /* slowpath */
914 	}
915 
916 	/* We want a minimum of one slowpath and one fastpath vector per hwfn */
917 	cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
918 
919 	if (is_kdump_kernel()) {
920 		DP_INFO(cdev,
921 			"Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
922 			cdev->int_params.in.min_msix_cnt);
923 		cdev->int_params.in.num_vectors =
924 			cdev->int_params.in.min_msix_cnt;
925 	}
926 
927 	rc = qed_set_int_mode(cdev, false);
928 	if (rc)  {
929 		DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
930 		return rc;
931 	}
932 
933 	cdev->int_params.fp_msix_base = cdev->num_hwfns;
934 	cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
935 				       cdev->num_hwfns;
936 
937 	if (!IS_ENABLED(CONFIG_QED_RDMA) ||
938 	    !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev)))
939 		return 0;
940 
941 	for_each_hwfn(cdev, i)
942 		num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
943 
944 	DP_VERBOSE(cdev, QED_MSG_RDMA,
945 		   "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n",
946 		   cdev->int_params.fp_msix_cnt, num_l2_queues);
947 
948 	if (cdev->int_params.fp_msix_cnt > num_l2_queues) {
949 		cdev->int_params.rdma_msix_cnt =
950 			(cdev->int_params.fp_msix_cnt - num_l2_queues)
951 			/ cdev->num_hwfns;
952 		cdev->int_params.rdma_msix_base =
953 			cdev->int_params.fp_msix_base + num_l2_queues;
954 		cdev->int_params.fp_msix_cnt = num_l2_queues;
955 	} else {
956 		cdev->int_params.rdma_msix_cnt = 0;
957 	}
958 
959 	DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n",
960 		   cdev->int_params.rdma_msix_cnt,
961 		   cdev->int_params.rdma_msix_base);
962 
963 	return 0;
964 }
965 
966 static int qed_slowpath_vf_setup_int(struct qed_dev *cdev)
967 {
968 	int rc;
969 
970 	memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
971 	cdev->int_params.in.int_mode = QED_INT_MODE_MSIX;
972 
973 	qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev),
974 			    &cdev->int_params.in.num_vectors);
975 	if (cdev->num_hwfns > 1) {
976 		u8 vectors = 0;
977 
978 		qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors);
979 		cdev->int_params.in.num_vectors += vectors;
980 	}
981 
982 	/* We want a minimum of one fastpath vector per vf hwfn */
983 	cdev->int_params.in.min_msix_cnt = cdev->num_hwfns;
984 
985 	rc = qed_set_int_mode(cdev, true);
986 	if (rc)
987 		return rc;
988 
989 	cdev->int_params.fp_msix_base = 0;
990 	cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors;
991 
992 	return 0;
993 }
994 
995 u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
996 		   u8 *input_buf, u32 max_size, u8 *unzip_buf)
997 {
998 	int rc;
999 
1000 	p_hwfn->stream->next_in = input_buf;
1001 	p_hwfn->stream->avail_in = input_len;
1002 	p_hwfn->stream->next_out = unzip_buf;
1003 	p_hwfn->stream->avail_out = max_size;
1004 
1005 	rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS);
1006 
1007 	if (rc != Z_OK) {
1008 		DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n",
1009 			   rc);
1010 		return 0;
1011 	}
1012 
1013 	rc = zlib_inflate(p_hwfn->stream, Z_FINISH);
1014 	zlib_inflateEnd(p_hwfn->stream);
1015 
1016 	if (rc != Z_OK && rc != Z_STREAM_END) {
1017 		DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n",
1018 			   p_hwfn->stream->msg, rc);
1019 		return 0;
1020 	}
1021 
1022 	return p_hwfn->stream->total_out / 4;
1023 }
1024 
1025 static int qed_alloc_stream_mem(struct qed_dev *cdev)
1026 {
1027 	int i;
1028 	void *workspace;
1029 
1030 	for_each_hwfn(cdev, i) {
1031 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1032 
1033 		p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL);
1034 		if (!p_hwfn->stream)
1035 			return -ENOMEM;
1036 
1037 		workspace = vzalloc(zlib_inflate_workspacesize());
1038 		if (!workspace)
1039 			return -ENOMEM;
1040 		p_hwfn->stream->workspace = workspace;
1041 	}
1042 
1043 	return 0;
1044 }
1045 
1046 static void qed_free_stream_mem(struct qed_dev *cdev)
1047 {
1048 	int i;
1049 
1050 	for_each_hwfn(cdev, i) {
1051 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1052 
1053 		if (!p_hwfn->stream)
1054 			return;
1055 
1056 		vfree(p_hwfn->stream->workspace);
1057 		kfree(p_hwfn->stream);
1058 	}
1059 }
1060 
1061 static void qed_update_pf_params(struct qed_dev *cdev,
1062 				 struct qed_pf_params *params)
1063 {
1064 	int i;
1065 
1066 	if (IS_ENABLED(CONFIG_QED_RDMA)) {
1067 		params->rdma_pf_params.num_qps = QED_ROCE_QPS;
1068 		params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
1069 		params->rdma_pf_params.num_srqs = QED_RDMA_SRQS;
1070 		/* divide by 3 the MRs to avoid MF ILT overflow */
1071 		params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
1072 	}
1073 
1074 	if (cdev->num_hwfns > 1 || IS_VF(cdev))
1075 		params->eth_pf_params.num_arfs_filters = 0;
1076 
1077 	/* In case we might support RDMA, don't allow qede to be greedy
1078 	 * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp]
1079 	 * per hwfn.
1080 	 */
1081 	if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) {
1082 		u16 *num_cons;
1083 
1084 		num_cons = &params->eth_pf_params.num_cons;
1085 		*num_cons = min_t(u16, *num_cons, QED_MAX_L2_CONS);
1086 	}
1087 
1088 	for (i = 0; i < cdev->num_hwfns; i++) {
1089 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1090 
1091 		p_hwfn->pf_params = *params;
1092 	}
1093 }
1094 
1095 #define QED_PERIODIC_DB_REC_COUNT		10
1096 #define QED_PERIODIC_DB_REC_INTERVAL_MS		100
1097 #define QED_PERIODIC_DB_REC_INTERVAL \
1098 	msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS)
1099 
1100 static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn,
1101 				     enum qed_slowpath_wq_flag wq_flag,
1102 				     unsigned long delay)
1103 {
1104 	if (!hwfn->slowpath_wq_active)
1105 		return -EINVAL;
1106 
1107 	/* Memory barrier for setting atomic bit */
1108 	smp_mb__before_atomic();
1109 	set_bit(wq_flag, &hwfn->slowpath_task_flags);
1110 	smp_mb__after_atomic();
1111 	queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay);
1112 
1113 	return 0;
1114 }
1115 
1116 void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn)
1117 {
1118 	/* Reset periodic Doorbell Recovery counter */
1119 	p_hwfn->periodic_db_rec_count = QED_PERIODIC_DB_REC_COUNT;
1120 
1121 	/* Don't schedule periodic Doorbell Recovery if already scheduled */
1122 	if (test_bit(QED_SLOWPATH_PERIODIC_DB_REC,
1123 		     &p_hwfn->slowpath_task_flags))
1124 		return;
1125 
1126 	qed_slowpath_delayed_work(p_hwfn, QED_SLOWPATH_PERIODIC_DB_REC,
1127 				  QED_PERIODIC_DB_REC_INTERVAL);
1128 }
1129 
1130 static void qed_slowpath_wq_stop(struct qed_dev *cdev)
1131 {
1132 	int i;
1133 
1134 	if (IS_VF(cdev))
1135 		return;
1136 
1137 	for_each_hwfn(cdev, i) {
1138 		if (!cdev->hwfns[i].slowpath_wq)
1139 			continue;
1140 
1141 		/* Stop queuing new delayed works */
1142 		cdev->hwfns[i].slowpath_wq_active = false;
1143 
1144 		cancel_delayed_work(&cdev->hwfns[i].slowpath_task);
1145 		destroy_workqueue(cdev->hwfns[i].slowpath_wq);
1146 	}
1147 }
1148 
1149 static void qed_slowpath_task(struct work_struct *work)
1150 {
1151 	struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
1152 					     slowpath_task.work);
1153 	struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
1154 
1155 	if (!ptt) {
1156 		if (hwfn->slowpath_wq_active)
1157 			queue_delayed_work(hwfn->slowpath_wq,
1158 					   &hwfn->slowpath_task, 0);
1159 
1160 		return;
1161 	}
1162 
1163 	if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ,
1164 			       &hwfn->slowpath_task_flags))
1165 		qed_mfw_process_tlv_req(hwfn, ptt);
1166 
1167 	if (test_and_clear_bit(QED_SLOWPATH_PERIODIC_DB_REC,
1168 			       &hwfn->slowpath_task_flags)) {
1169 		qed_db_rec_handler(hwfn, ptt);
1170 		if (hwfn->periodic_db_rec_count--)
1171 			qed_slowpath_delayed_work(hwfn,
1172 						  QED_SLOWPATH_PERIODIC_DB_REC,
1173 						  QED_PERIODIC_DB_REC_INTERVAL);
1174 	}
1175 
1176 	qed_ptt_release(hwfn, ptt);
1177 }
1178 
1179 static int qed_slowpath_wq_start(struct qed_dev *cdev)
1180 {
1181 	struct qed_hwfn *hwfn;
1182 	char name[NAME_SIZE];
1183 	int i;
1184 
1185 	if (IS_VF(cdev))
1186 		return 0;
1187 
1188 	for_each_hwfn(cdev, i) {
1189 		hwfn = &cdev->hwfns[i];
1190 
1191 		snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x",
1192 			 cdev->pdev->bus->number,
1193 			 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
1194 
1195 		hwfn->slowpath_wq = alloc_workqueue(name, 0, 0);
1196 		if (!hwfn->slowpath_wq) {
1197 			DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n");
1198 			return -ENOMEM;
1199 		}
1200 
1201 		INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task);
1202 		hwfn->slowpath_wq_active = true;
1203 	}
1204 
1205 	return 0;
1206 }
1207 
1208 static int qed_slowpath_start(struct qed_dev *cdev,
1209 			      struct qed_slowpath_params *params)
1210 {
1211 	struct qed_drv_load_params drv_load_params;
1212 	struct qed_hw_init_params hw_init_params;
1213 	struct qed_mcp_drv_version drv_version;
1214 	struct qed_tunnel_info tunn_info;
1215 	const u8 *data = NULL;
1216 	struct qed_hwfn *hwfn;
1217 	struct qed_ptt *p_ptt;
1218 	int rc = -EINVAL;
1219 
1220 	if (qed_iov_wq_start(cdev))
1221 		goto err;
1222 
1223 	if (qed_slowpath_wq_start(cdev))
1224 		goto err;
1225 
1226 	if (IS_PF(cdev)) {
1227 		rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
1228 				      &cdev->pdev->dev);
1229 		if (rc) {
1230 			DP_NOTICE(cdev,
1231 				  "Failed to find fw file - /lib/firmware/%s\n",
1232 				  QED_FW_FILE_NAME);
1233 			goto err;
1234 		}
1235 
1236 		if (cdev->num_hwfns == 1) {
1237 			p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
1238 			if (p_ptt) {
1239 				QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt;
1240 			} else {
1241 				DP_NOTICE(cdev,
1242 					  "Failed to acquire PTT for aRFS\n");
1243 				goto err;
1244 			}
1245 		}
1246 	}
1247 
1248 	cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
1249 	rc = qed_nic_setup(cdev);
1250 	if (rc)
1251 		goto err;
1252 
1253 	if (IS_PF(cdev))
1254 		rc = qed_slowpath_setup_int(cdev, params->int_mode);
1255 	else
1256 		rc = qed_slowpath_vf_setup_int(cdev);
1257 	if (rc)
1258 		goto err1;
1259 
1260 	if (IS_PF(cdev)) {
1261 		/* Allocate stream for unzipping */
1262 		rc = qed_alloc_stream_mem(cdev);
1263 		if (rc)
1264 			goto err2;
1265 
1266 		/* First Dword used to differentiate between various sources */
1267 		data = cdev->firmware->data + sizeof(u32);
1268 
1269 		qed_dbg_pf_init(cdev);
1270 	}
1271 
1272 	/* Start the slowpath */
1273 	memset(&hw_init_params, 0, sizeof(hw_init_params));
1274 	memset(&tunn_info, 0, sizeof(tunn_info));
1275 	tunn_info.vxlan.b_mode_enabled = true;
1276 	tunn_info.l2_gre.b_mode_enabled = true;
1277 	tunn_info.ip_gre.b_mode_enabled = true;
1278 	tunn_info.l2_geneve.b_mode_enabled = true;
1279 	tunn_info.ip_geneve.b_mode_enabled = true;
1280 	tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1281 	tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1282 	tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1283 	tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1284 	tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1285 	hw_init_params.p_tunn = &tunn_info;
1286 	hw_init_params.b_hw_start = true;
1287 	hw_init_params.int_mode = cdev->int_params.out.int_mode;
1288 	hw_init_params.allow_npar_tx_switch = true;
1289 	hw_init_params.bin_fw_data = data;
1290 
1291 	memset(&drv_load_params, 0, sizeof(drv_load_params));
1292 	drv_load_params.is_crash_kernel = is_kdump_kernel();
1293 	drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT;
1294 	drv_load_params.avoid_eng_reset = false;
1295 	drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE;
1296 	hw_init_params.p_drv_load_params = &drv_load_params;
1297 
1298 	rc = qed_hw_init(cdev, &hw_init_params);
1299 	if (rc)
1300 		goto err2;
1301 
1302 	DP_INFO(cdev,
1303 		"HW initialization and function start completed successfully\n");
1304 
1305 	if (IS_PF(cdev)) {
1306 		cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) |
1307 					   BIT(QED_MODE_L2GENEVE_TUNN) |
1308 					   BIT(QED_MODE_IPGENEVE_TUNN) |
1309 					   BIT(QED_MODE_L2GRE_TUNN) |
1310 					   BIT(QED_MODE_IPGRE_TUNN));
1311 	}
1312 
1313 	/* Allocate LL2 interface if needed */
1314 	if (QED_LEADING_HWFN(cdev)->using_ll2) {
1315 		rc = qed_ll2_alloc_if(cdev);
1316 		if (rc)
1317 			goto err3;
1318 	}
1319 	if (IS_PF(cdev)) {
1320 		hwfn = QED_LEADING_HWFN(cdev);
1321 		drv_version.version = (params->drv_major << 24) |
1322 				      (params->drv_minor << 16) |
1323 				      (params->drv_rev << 8) |
1324 				      (params->drv_eng);
1325 		strlcpy(drv_version.name, params->name,
1326 			MCP_DRV_VER_STR_SIZE - 4);
1327 		rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
1328 					      &drv_version);
1329 		if (rc) {
1330 			DP_NOTICE(cdev, "Failed sending drv version command\n");
1331 			goto err4;
1332 		}
1333 	}
1334 
1335 	qed_reset_vport_stats(cdev);
1336 
1337 	return 0;
1338 
1339 err4:
1340 	qed_ll2_dealloc_if(cdev);
1341 err3:
1342 	qed_hw_stop(cdev);
1343 err2:
1344 	qed_hw_timers_stop_all(cdev);
1345 	if (IS_PF(cdev))
1346 		qed_slowpath_irq_free(cdev);
1347 	qed_free_stream_mem(cdev);
1348 	qed_disable_msix(cdev);
1349 err1:
1350 	qed_resc_free(cdev);
1351 err:
1352 	if (IS_PF(cdev))
1353 		release_firmware(cdev->firmware);
1354 
1355 	if (IS_PF(cdev) && (cdev->num_hwfns == 1) &&
1356 	    QED_LEADING_HWFN(cdev)->p_arfs_ptt)
1357 		qed_ptt_release(QED_LEADING_HWFN(cdev),
1358 				QED_LEADING_HWFN(cdev)->p_arfs_ptt);
1359 
1360 	qed_iov_wq_stop(cdev, false);
1361 
1362 	qed_slowpath_wq_stop(cdev);
1363 
1364 	return rc;
1365 }
1366 
1367 static int qed_slowpath_stop(struct qed_dev *cdev)
1368 {
1369 	if (!cdev)
1370 		return -ENODEV;
1371 
1372 	qed_slowpath_wq_stop(cdev);
1373 
1374 	qed_ll2_dealloc_if(cdev);
1375 
1376 	if (IS_PF(cdev)) {
1377 		if (cdev->num_hwfns == 1)
1378 			qed_ptt_release(QED_LEADING_HWFN(cdev),
1379 					QED_LEADING_HWFN(cdev)->p_arfs_ptt);
1380 		qed_free_stream_mem(cdev);
1381 		if (IS_QED_ETH_IF(cdev))
1382 			qed_sriov_disable(cdev, true);
1383 	}
1384 
1385 	qed_nic_stop(cdev);
1386 
1387 	if (IS_PF(cdev))
1388 		qed_slowpath_irq_free(cdev);
1389 
1390 	qed_disable_msix(cdev);
1391 
1392 	qed_resc_free(cdev);
1393 
1394 	qed_iov_wq_stop(cdev, true);
1395 
1396 	if (IS_PF(cdev))
1397 		release_firmware(cdev->firmware);
1398 
1399 	return 0;
1400 }
1401 
1402 static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE])
1403 {
1404 	int i;
1405 
1406 	memcpy(cdev->name, name, NAME_SIZE);
1407 	for_each_hwfn(cdev, i)
1408 		snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
1409 }
1410 
1411 static u32 qed_sb_init(struct qed_dev *cdev,
1412 		       struct qed_sb_info *sb_info,
1413 		       void *sb_virt_addr,
1414 		       dma_addr_t sb_phy_addr, u16 sb_id,
1415 		       enum qed_sb_type type)
1416 {
1417 	struct qed_hwfn *p_hwfn;
1418 	struct qed_ptt *p_ptt;
1419 	u16 rel_sb_id;
1420 	u32 rc;
1421 
1422 	/* RoCE/Storage use a single engine in CMT mode while L2 uses both */
1423 	if (type == QED_SB_TYPE_L2_QUEUE) {
1424 		p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns];
1425 		rel_sb_id = sb_id / cdev->num_hwfns;
1426 	} else {
1427 		p_hwfn = QED_AFFIN_HWFN(cdev);
1428 		rel_sb_id = sb_id;
1429 	}
1430 
1431 	DP_VERBOSE(cdev, NETIF_MSG_INTR,
1432 		   "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1433 		   IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id);
1434 
1435 	if (IS_PF(p_hwfn->cdev)) {
1436 		p_ptt = qed_ptt_acquire(p_hwfn);
1437 		if (!p_ptt)
1438 			return -EBUSY;
1439 
1440 		rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr,
1441 				     sb_phy_addr, rel_sb_id);
1442 		qed_ptt_release(p_hwfn, p_ptt);
1443 	} else {
1444 		rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr,
1445 				     sb_phy_addr, rel_sb_id);
1446 	}
1447 
1448 	return rc;
1449 }
1450 
1451 static u32 qed_sb_release(struct qed_dev *cdev,
1452 			  struct qed_sb_info *sb_info,
1453 			  u16 sb_id,
1454 			  enum qed_sb_type type)
1455 {
1456 	struct qed_hwfn *p_hwfn;
1457 	u16 rel_sb_id;
1458 	u32 rc;
1459 
1460 	/* RoCE/Storage use a single engine in CMT mode while L2 uses both */
1461 	if (type == QED_SB_TYPE_L2_QUEUE) {
1462 		p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns];
1463 		rel_sb_id = sb_id / cdev->num_hwfns;
1464 	} else {
1465 		p_hwfn = QED_AFFIN_HWFN(cdev);
1466 		rel_sb_id = sb_id;
1467 	}
1468 
1469 	DP_VERBOSE(cdev, NETIF_MSG_INTR,
1470 		   "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1471 		   IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id);
1472 
1473 	rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id);
1474 
1475 	return rc;
1476 }
1477 
1478 static bool qed_can_link_change(struct qed_dev *cdev)
1479 {
1480 	return true;
1481 }
1482 
1483 static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
1484 {
1485 	struct qed_hwfn *hwfn;
1486 	struct qed_mcp_link_params *link_params;
1487 	struct qed_ptt *ptt;
1488 	u32 sup_caps;
1489 	int rc;
1490 
1491 	if (!cdev)
1492 		return -ENODEV;
1493 
1494 	/* The link should be set only once per PF */
1495 	hwfn = &cdev->hwfns[0];
1496 
1497 	/* When VF wants to set link, force it to read the bulletin instead.
1498 	 * This mimics the PF behavior, where a noitification [both immediate
1499 	 * and possible later] would be generated when changing properties.
1500 	 */
1501 	if (IS_VF(cdev)) {
1502 		qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG);
1503 		return 0;
1504 	}
1505 
1506 	ptt = qed_ptt_acquire(hwfn);
1507 	if (!ptt)
1508 		return -EBUSY;
1509 
1510 	link_params = qed_mcp_get_link_params(hwfn);
1511 	if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
1512 		link_params->speed.autoneg = params->autoneg;
1513 	if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
1514 		link_params->speed.advertised_speeds = 0;
1515 		sup_caps = QED_LM_1000baseT_Full_BIT |
1516 			   QED_LM_1000baseKX_Full_BIT |
1517 			   QED_LM_1000baseX_Full_BIT;
1518 		if (params->adv_speeds & sup_caps)
1519 			link_params->speed.advertised_speeds |=
1520 			    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
1521 		sup_caps = QED_LM_10000baseT_Full_BIT |
1522 			   QED_LM_10000baseKR_Full_BIT |
1523 			   QED_LM_10000baseKX4_Full_BIT |
1524 			   QED_LM_10000baseR_FEC_BIT |
1525 			   QED_LM_10000baseCR_Full_BIT |
1526 			   QED_LM_10000baseSR_Full_BIT |
1527 			   QED_LM_10000baseLR_Full_BIT |
1528 			   QED_LM_10000baseLRM_Full_BIT;
1529 		if (params->adv_speeds & sup_caps)
1530 			link_params->speed.advertised_speeds |=
1531 			    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
1532 		if (params->adv_speeds & QED_LM_20000baseKR2_Full_BIT)
1533 			link_params->speed.advertised_speeds |=
1534 				NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G;
1535 		sup_caps = QED_LM_25000baseKR_Full_BIT |
1536 			   QED_LM_25000baseCR_Full_BIT |
1537 			   QED_LM_25000baseSR_Full_BIT;
1538 		if (params->adv_speeds & sup_caps)
1539 			link_params->speed.advertised_speeds |=
1540 			    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
1541 		sup_caps = QED_LM_40000baseLR4_Full_BIT |
1542 			   QED_LM_40000baseKR4_Full_BIT |
1543 			   QED_LM_40000baseCR4_Full_BIT |
1544 			   QED_LM_40000baseSR4_Full_BIT;
1545 		if (params->adv_speeds & sup_caps)
1546 			link_params->speed.advertised_speeds |=
1547 				NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
1548 		sup_caps = QED_LM_50000baseKR2_Full_BIT |
1549 			   QED_LM_50000baseCR2_Full_BIT |
1550 			   QED_LM_50000baseSR2_Full_BIT;
1551 		if (params->adv_speeds & sup_caps)
1552 			link_params->speed.advertised_speeds |=
1553 			    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
1554 		sup_caps = QED_LM_100000baseKR4_Full_BIT |
1555 			   QED_LM_100000baseSR4_Full_BIT |
1556 			   QED_LM_100000baseCR4_Full_BIT |
1557 			   QED_LM_100000baseLR4_ER4_Full_BIT;
1558 		if (params->adv_speeds & sup_caps)
1559 			link_params->speed.advertised_speeds |=
1560 			    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G;
1561 	}
1562 	if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
1563 		link_params->speed.forced_speed = params->forced_speed;
1564 	if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {
1565 		if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1566 			link_params->pause.autoneg = true;
1567 		else
1568 			link_params->pause.autoneg = false;
1569 		if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE)
1570 			link_params->pause.forced_rx = true;
1571 		else
1572 			link_params->pause.forced_rx = false;
1573 		if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE)
1574 			link_params->pause.forced_tx = true;
1575 		else
1576 			link_params->pause.forced_tx = false;
1577 	}
1578 	if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) {
1579 		switch (params->loopback_mode) {
1580 		case QED_LINK_LOOPBACK_INT_PHY:
1581 			link_params->loopback_mode = ETH_LOOPBACK_INT_PHY;
1582 			break;
1583 		case QED_LINK_LOOPBACK_EXT_PHY:
1584 			link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY;
1585 			break;
1586 		case QED_LINK_LOOPBACK_EXT:
1587 			link_params->loopback_mode = ETH_LOOPBACK_EXT;
1588 			break;
1589 		case QED_LINK_LOOPBACK_MAC:
1590 			link_params->loopback_mode = ETH_LOOPBACK_MAC;
1591 			break;
1592 		default:
1593 			link_params->loopback_mode = ETH_LOOPBACK_NONE;
1594 			break;
1595 		}
1596 	}
1597 
1598 	if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG)
1599 		memcpy(&link_params->eee, &params->eee,
1600 		       sizeof(link_params->eee));
1601 
1602 	rc = qed_mcp_set_link(hwfn, ptt, params->link_up);
1603 
1604 	qed_ptt_release(hwfn, ptt);
1605 
1606 	return rc;
1607 }
1608 
1609 static int qed_get_port_type(u32 media_type)
1610 {
1611 	int port_type;
1612 
1613 	switch (media_type) {
1614 	case MEDIA_SFPP_10G_FIBER:
1615 	case MEDIA_SFP_1G_FIBER:
1616 	case MEDIA_XFP_FIBER:
1617 	case MEDIA_MODULE_FIBER:
1618 	case MEDIA_KR:
1619 		port_type = PORT_FIBRE;
1620 		break;
1621 	case MEDIA_DA_TWINAX:
1622 		port_type = PORT_DA;
1623 		break;
1624 	case MEDIA_BASE_T:
1625 		port_type = PORT_TP;
1626 		break;
1627 	case MEDIA_NOT_PRESENT:
1628 		port_type = PORT_NONE;
1629 		break;
1630 	case MEDIA_UNSPECIFIED:
1631 	default:
1632 		port_type = PORT_OTHER;
1633 		break;
1634 	}
1635 	return port_type;
1636 }
1637 
1638 static int qed_get_link_data(struct qed_hwfn *hwfn,
1639 			     struct qed_mcp_link_params *params,
1640 			     struct qed_mcp_link_state *link,
1641 			     struct qed_mcp_link_capabilities *link_caps)
1642 {
1643 	void *p;
1644 
1645 	if (!IS_PF(hwfn->cdev)) {
1646 		qed_vf_get_link_params(hwfn, params);
1647 		qed_vf_get_link_state(hwfn, link);
1648 		qed_vf_get_link_caps(hwfn, link_caps);
1649 
1650 		return 0;
1651 	}
1652 
1653 	p = qed_mcp_get_link_params(hwfn);
1654 	if (!p)
1655 		return -ENXIO;
1656 	memcpy(params, p, sizeof(*params));
1657 
1658 	p = qed_mcp_get_link_state(hwfn);
1659 	if (!p)
1660 		return -ENXIO;
1661 	memcpy(link, p, sizeof(*link));
1662 
1663 	p = qed_mcp_get_link_capabilities(hwfn);
1664 	if (!p)
1665 		return -ENXIO;
1666 	memcpy(link_caps, p, sizeof(*link_caps));
1667 
1668 	return 0;
1669 }
1670 
1671 static void qed_fill_link_capability(struct qed_hwfn *hwfn,
1672 				     struct qed_ptt *ptt, u32 capability,
1673 				     u32 *if_capability)
1674 {
1675 	u32 media_type, tcvr_state, tcvr_type;
1676 	u32 speed_mask, board_cfg;
1677 
1678 	if (qed_mcp_get_media_type(hwfn, ptt, &media_type))
1679 		media_type = MEDIA_UNSPECIFIED;
1680 
1681 	if (qed_mcp_get_transceiver_data(hwfn, ptt, &tcvr_state, &tcvr_type))
1682 		tcvr_type = ETH_TRANSCEIVER_STATE_UNPLUGGED;
1683 
1684 	if (qed_mcp_trans_speed_mask(hwfn, ptt, &speed_mask))
1685 		speed_mask = 0xFFFFFFFF;
1686 
1687 	if (qed_mcp_get_board_config(hwfn, ptt, &board_cfg))
1688 		board_cfg = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
1689 
1690 	DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
1691 		   "Media_type = 0x%x tcvr_state = 0x%x tcvr_type = 0x%x speed_mask = 0x%x board_cfg = 0x%x\n",
1692 		   media_type, tcvr_state, tcvr_type, speed_mask, board_cfg);
1693 
1694 	switch (media_type) {
1695 	case MEDIA_DA_TWINAX:
1696 		*if_capability |= QED_LM_FIBRE_BIT;
1697 		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
1698 			*if_capability |= QED_LM_20000baseKR2_Full_BIT;
1699 		/* For DAC media multiple speed capabilities are supported*/
1700 		capability = capability & speed_mask;
1701 		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1702 			*if_capability |= QED_LM_1000baseKX_Full_BIT;
1703 		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1704 			*if_capability |= QED_LM_10000baseCR_Full_BIT;
1705 		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1706 			*if_capability |= QED_LM_40000baseCR4_Full_BIT;
1707 		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1708 			*if_capability |= QED_LM_25000baseCR_Full_BIT;
1709 		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1710 			*if_capability |= QED_LM_50000baseCR2_Full_BIT;
1711 		if (capability &
1712 			NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1713 			*if_capability |= QED_LM_100000baseCR4_Full_BIT;
1714 		break;
1715 	case MEDIA_BASE_T:
1716 		*if_capability |= QED_LM_TP_BIT;
1717 		if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) {
1718 			if (capability &
1719 			    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) {
1720 				*if_capability |= QED_LM_1000baseT_Full_BIT;
1721 			}
1722 			if (capability &
1723 			    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) {
1724 				*if_capability |= QED_LM_10000baseT_Full_BIT;
1725 			}
1726 		}
1727 		if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) {
1728 			*if_capability |= QED_LM_FIBRE_BIT;
1729 			if (tcvr_type == ETH_TRANSCEIVER_TYPE_1000BASET)
1730 				*if_capability |= QED_LM_1000baseT_Full_BIT;
1731 			if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_BASET)
1732 				*if_capability |= QED_LM_10000baseT_Full_BIT;
1733 		}
1734 		break;
1735 	case MEDIA_SFP_1G_FIBER:
1736 	case MEDIA_SFPP_10G_FIBER:
1737 	case MEDIA_XFP_FIBER:
1738 	case MEDIA_MODULE_FIBER:
1739 		*if_capability |= QED_LM_FIBRE_BIT;
1740 		if (capability &
1741 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) {
1742 			if ((tcvr_type == ETH_TRANSCEIVER_TYPE_1G_LX) ||
1743 			    (tcvr_type == ETH_TRANSCEIVER_TYPE_1G_SX))
1744 				*if_capability |= QED_LM_1000baseKX_Full_BIT;
1745 		}
1746 		if (capability &
1747 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) {
1748 			if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_SR)
1749 				*if_capability |= QED_LM_10000baseSR_Full_BIT;
1750 			if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_LR)
1751 				*if_capability |= QED_LM_10000baseLR_Full_BIT;
1752 			if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_LRM)
1753 				*if_capability |= QED_LM_10000baseLRM_Full_BIT;
1754 			if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_ER)
1755 				*if_capability |= QED_LM_10000baseR_FEC_BIT;
1756 		}
1757 		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
1758 			*if_capability |= QED_LM_20000baseKR2_Full_BIT;
1759 		if (capability &
1760 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) {
1761 			if (tcvr_type == ETH_TRANSCEIVER_TYPE_25G_SR)
1762 				*if_capability |= QED_LM_25000baseSR_Full_BIT;
1763 		}
1764 		if (capability &
1765 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) {
1766 			if (tcvr_type == ETH_TRANSCEIVER_TYPE_40G_LR4)
1767 				*if_capability |= QED_LM_40000baseLR4_Full_BIT;
1768 			if (tcvr_type == ETH_TRANSCEIVER_TYPE_40G_SR4)
1769 				*if_capability |= QED_LM_40000baseSR4_Full_BIT;
1770 		}
1771 		if (capability &
1772 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1773 			*if_capability |= QED_LM_50000baseKR2_Full_BIT;
1774 		if (capability &
1775 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) {
1776 			if (tcvr_type == ETH_TRANSCEIVER_TYPE_100G_SR4)
1777 				*if_capability |= QED_LM_100000baseSR4_Full_BIT;
1778 		}
1779 
1780 		break;
1781 	case MEDIA_KR:
1782 		*if_capability |= QED_LM_Backplane_BIT;
1783 		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
1784 			*if_capability |= QED_LM_20000baseKR2_Full_BIT;
1785 		if (capability &
1786 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1787 			*if_capability |= QED_LM_1000baseKX_Full_BIT;
1788 		if (capability &
1789 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1790 			*if_capability |= QED_LM_10000baseKR_Full_BIT;
1791 		if (capability &
1792 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1793 			*if_capability |= QED_LM_25000baseKR_Full_BIT;
1794 		if (capability &
1795 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1796 			*if_capability |= QED_LM_40000baseKR4_Full_BIT;
1797 		if (capability &
1798 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1799 			*if_capability |= QED_LM_50000baseKR2_Full_BIT;
1800 		if (capability &
1801 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1802 			*if_capability |= QED_LM_100000baseKR4_Full_BIT;
1803 		break;
1804 	case MEDIA_UNSPECIFIED:
1805 	case MEDIA_NOT_PRESENT:
1806 		DP_VERBOSE(hwfn->cdev, QED_MSG_DEBUG,
1807 			   "Unknown media and transceiver type;\n");
1808 		break;
1809 	}
1810 }
1811 
1812 static void qed_fill_link(struct qed_hwfn *hwfn,
1813 			  struct qed_ptt *ptt,
1814 			  struct qed_link_output *if_link)
1815 {
1816 	struct qed_mcp_link_capabilities link_caps;
1817 	struct qed_mcp_link_params params;
1818 	struct qed_mcp_link_state link;
1819 	u32 media_type;
1820 
1821 	memset(if_link, 0, sizeof(*if_link));
1822 
1823 	/* Prepare source inputs */
1824 	if (qed_get_link_data(hwfn, &params, &link, &link_caps)) {
1825 		dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n");
1826 		return;
1827 	}
1828 
1829 	/* Set the link parameters to pass to protocol driver */
1830 	if (link.link_up)
1831 		if_link->link_up = true;
1832 
1833 	/* TODO - at the moment assume supported and advertised speed equal */
1834 	if (link_caps.default_speed_autoneg)
1835 		if_link->supported_caps |= QED_LM_Autoneg_BIT;
1836 	if (params.pause.autoneg ||
1837 	    (params.pause.forced_rx && params.pause.forced_tx))
1838 		if_link->supported_caps |= QED_LM_Asym_Pause_BIT;
1839 	if (params.pause.autoneg || params.pause.forced_rx ||
1840 	    params.pause.forced_tx)
1841 		if_link->supported_caps |= QED_LM_Pause_BIT;
1842 
1843 	if_link->advertised_caps = if_link->supported_caps;
1844 	if (params.speed.autoneg)
1845 		if_link->advertised_caps |= QED_LM_Autoneg_BIT;
1846 	else
1847 		if_link->advertised_caps &= ~QED_LM_Autoneg_BIT;
1848 
1849 	/* Fill link advertised capability*/
1850 	qed_fill_link_capability(hwfn, ptt, params.speed.advertised_speeds,
1851 				 &if_link->advertised_caps);
1852 	/* Fill link supported capability*/
1853 	qed_fill_link_capability(hwfn, ptt, link_caps.speed_capabilities,
1854 				 &if_link->supported_caps);
1855 
1856 	if (link.link_up)
1857 		if_link->speed = link.speed;
1858 
1859 	/* TODO - fill duplex properly */
1860 	if_link->duplex = DUPLEX_FULL;
1861 	qed_mcp_get_media_type(hwfn, ptt, &media_type);
1862 	if_link->port = qed_get_port_type(media_type);
1863 
1864 	if_link->autoneg = params.speed.autoneg;
1865 
1866 	if (params.pause.autoneg)
1867 		if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
1868 	if (params.pause.forced_rx)
1869 		if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE;
1870 	if (params.pause.forced_tx)
1871 		if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
1872 
1873 	/* Link partner capabilities */
1874 	if (link.partner_adv_speed &
1875 	    QED_LINK_PARTNER_SPEED_1G_FD)
1876 		if_link->lp_caps |= QED_LM_1000baseT_Full_BIT;
1877 	if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_10G)
1878 		if_link->lp_caps |= QED_LM_10000baseKR_Full_BIT;
1879 	if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_20G)
1880 		if_link->lp_caps |= QED_LM_20000baseKR2_Full_BIT;
1881 	if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_25G)
1882 		if_link->lp_caps |= QED_LM_25000baseKR_Full_BIT;
1883 	if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_40G)
1884 		if_link->lp_caps |= QED_LM_40000baseLR4_Full_BIT;
1885 	if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_50G)
1886 		if_link->lp_caps |= QED_LM_50000baseKR2_Full_BIT;
1887 	if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_100G)
1888 		if_link->lp_caps |= QED_LM_100000baseKR4_Full_BIT;
1889 
1890 	if (link.an_complete)
1891 		if_link->lp_caps |= QED_LM_Autoneg_BIT;
1892 
1893 	if (link.partner_adv_pause)
1894 		if_link->lp_caps |= QED_LM_Pause_BIT;
1895 	if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE ||
1896 	    link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE)
1897 		if_link->lp_caps |= QED_LM_Asym_Pause_BIT;
1898 
1899 	if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) {
1900 		if_link->eee_supported = false;
1901 	} else {
1902 		if_link->eee_supported = true;
1903 		if_link->eee_active = link.eee_active;
1904 		if_link->sup_caps = link_caps.eee_speed_caps;
1905 		/* MFW clears adv_caps on eee disable; use configured value */
1906 		if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps :
1907 					params.eee.adv_caps;
1908 		if_link->eee.lp_adv_caps = link.eee_lp_adv_caps;
1909 		if_link->eee.enable = params.eee.enable;
1910 		if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable;
1911 		if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer;
1912 	}
1913 }
1914 
1915 static void qed_get_current_link(struct qed_dev *cdev,
1916 				 struct qed_link_output *if_link)
1917 {
1918 	struct qed_hwfn *hwfn;
1919 	struct qed_ptt *ptt;
1920 	int i;
1921 
1922 	hwfn = &cdev->hwfns[0];
1923 	if (IS_PF(cdev)) {
1924 		ptt = qed_ptt_acquire(hwfn);
1925 		if (ptt) {
1926 			qed_fill_link(hwfn, ptt, if_link);
1927 			qed_ptt_release(hwfn, ptt);
1928 		} else {
1929 			DP_NOTICE(hwfn, "Failed to fill link; No PTT\n");
1930 		}
1931 	} else {
1932 		qed_fill_link(hwfn, NULL, if_link);
1933 	}
1934 
1935 	for_each_hwfn(cdev, i)
1936 		qed_inform_vf_link_state(&cdev->hwfns[i]);
1937 }
1938 
1939 void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt)
1940 {
1941 	void *cookie = hwfn->cdev->ops_cookie;
1942 	struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
1943 	struct qed_link_output if_link;
1944 
1945 	qed_fill_link(hwfn, ptt, &if_link);
1946 	qed_inform_vf_link_state(hwfn);
1947 
1948 	if (IS_LEAD_HWFN(hwfn) && cookie)
1949 		op->link_update(cookie, &if_link);
1950 }
1951 
1952 void qed_bw_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt)
1953 {
1954 	void *cookie = hwfn->cdev->ops_cookie;
1955 	struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
1956 
1957 	if (IS_LEAD_HWFN(hwfn) && cookie && op && op->bw_update)
1958 		op->bw_update(cookie);
1959 }
1960 
1961 static int qed_drain(struct qed_dev *cdev)
1962 {
1963 	struct qed_hwfn *hwfn;
1964 	struct qed_ptt *ptt;
1965 	int i, rc;
1966 
1967 	if (IS_VF(cdev))
1968 		return 0;
1969 
1970 	for_each_hwfn(cdev, i) {
1971 		hwfn = &cdev->hwfns[i];
1972 		ptt = qed_ptt_acquire(hwfn);
1973 		if (!ptt) {
1974 			DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n");
1975 			return -EBUSY;
1976 		}
1977 		rc = qed_mcp_drain(hwfn, ptt);
1978 		qed_ptt_release(hwfn, ptt);
1979 		if (rc)
1980 			return rc;
1981 	}
1982 
1983 	return 0;
1984 }
1985 
1986 static u32 qed_nvm_flash_image_access_crc(struct qed_dev *cdev,
1987 					  struct qed_nvm_image_att *nvm_image,
1988 					  u32 *crc)
1989 {
1990 	u8 *buf = NULL;
1991 	int rc, j;
1992 	u32 val;
1993 
1994 	/* Allocate a buffer for holding the nvram image */
1995 	buf = kzalloc(nvm_image->length, GFP_KERNEL);
1996 	if (!buf)
1997 		return -ENOMEM;
1998 
1999 	/* Read image into buffer */
2000 	rc = qed_mcp_nvm_read(cdev, nvm_image->start_addr,
2001 			      buf, nvm_image->length);
2002 	if (rc) {
2003 		DP_ERR(cdev, "Failed reading image from nvm\n");
2004 		goto out;
2005 	}
2006 
2007 	/* Convert the buffer into big-endian format (excluding the
2008 	 * closing 4 bytes of CRC).
2009 	 */
2010 	for (j = 0; j < nvm_image->length - 4; j += 4) {
2011 		val = cpu_to_be32(*(u32 *)&buf[j]);
2012 		*(u32 *)&buf[j] = val;
2013 	}
2014 
2015 	/* Calc CRC for the "actual" image buffer, i.e. not including
2016 	 * the last 4 CRC bytes.
2017 	 */
2018 	*crc = (~cpu_to_be32(crc32(0xffffffff, buf, nvm_image->length - 4)));
2019 
2020 out:
2021 	kfree(buf);
2022 
2023 	return rc;
2024 }
2025 
2026 /* Binary file format -
2027  *     /----------------------------------------------------------------------\
2028  * 0B  |                       0x4 [command index]                            |
2029  * 4B  | image_type     | Options        |  Number of register settings       |
2030  * 8B  |                       Value                                          |
2031  * 12B |                       Mask                                           |
2032  * 16B |                       Offset                                         |
2033  *     \----------------------------------------------------------------------/
2034  * There can be several Value-Mask-Offset sets as specified by 'Number of...'.
2035  * Options - 0'b - Calculate & Update CRC for image
2036  */
2037 static int qed_nvm_flash_image_access(struct qed_dev *cdev, const u8 **data,
2038 				      bool *check_resp)
2039 {
2040 	struct qed_nvm_image_att nvm_image;
2041 	struct qed_hwfn *p_hwfn;
2042 	bool is_crc = false;
2043 	u32 image_type;
2044 	int rc = 0, i;
2045 	u16 len;
2046 
2047 	*data += 4;
2048 	image_type = **data;
2049 	p_hwfn = QED_LEADING_HWFN(cdev);
2050 	for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
2051 		if (image_type == p_hwfn->nvm_info.image_att[i].image_type)
2052 			break;
2053 	if (i == p_hwfn->nvm_info.num_images) {
2054 		DP_ERR(cdev, "Failed to find nvram image of type %08x\n",
2055 		       image_type);
2056 		return -ENOENT;
2057 	}
2058 
2059 	nvm_image.start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr;
2060 	nvm_image.length = p_hwfn->nvm_info.image_att[i].len;
2061 
2062 	DP_VERBOSE(cdev, NETIF_MSG_DRV,
2063 		   "Read image %02x; type = %08x; NVM [%08x,...,%08x]\n",
2064 		   **data, image_type, nvm_image.start_addr,
2065 		   nvm_image.start_addr + nvm_image.length - 1);
2066 	(*data)++;
2067 	is_crc = !!(**data & BIT(0));
2068 	(*data)++;
2069 	len = *((u16 *)*data);
2070 	*data += 2;
2071 	if (is_crc) {
2072 		u32 crc = 0;
2073 
2074 		rc = qed_nvm_flash_image_access_crc(cdev, &nvm_image, &crc);
2075 		if (rc) {
2076 			DP_ERR(cdev, "Failed calculating CRC, rc = %d\n", rc);
2077 			goto exit;
2078 		}
2079 
2080 		rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM,
2081 				       (nvm_image.start_addr +
2082 					nvm_image.length - 4), (u8 *)&crc, 4);
2083 		if (rc)
2084 			DP_ERR(cdev, "Failed writing to %08x, rc = %d\n",
2085 			       nvm_image.start_addr + nvm_image.length - 4, rc);
2086 		goto exit;
2087 	}
2088 
2089 	/* Iterate over the values for setting */
2090 	while (len) {
2091 		u32 offset, mask, value, cur_value;
2092 		u8 buf[4];
2093 
2094 		value = *((u32 *)*data);
2095 		*data += 4;
2096 		mask = *((u32 *)*data);
2097 		*data += 4;
2098 		offset = *((u32 *)*data);
2099 		*data += 4;
2100 
2101 		rc = qed_mcp_nvm_read(cdev, nvm_image.start_addr + offset, buf,
2102 				      4);
2103 		if (rc) {
2104 			DP_ERR(cdev, "Failed reading from %08x\n",
2105 			       nvm_image.start_addr + offset);
2106 			goto exit;
2107 		}
2108 
2109 		cur_value = le32_to_cpu(*((__le32 *)buf));
2110 		DP_VERBOSE(cdev, NETIF_MSG_DRV,
2111 			   "NVM %08x: %08x -> %08x [Value %08x Mask %08x]\n",
2112 			   nvm_image.start_addr + offset, cur_value,
2113 			   (cur_value & ~mask) | (value & mask), value, mask);
2114 		value = (value & mask) | (cur_value & ~mask);
2115 		rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM,
2116 				       nvm_image.start_addr + offset,
2117 				       (u8 *)&value, 4);
2118 		if (rc) {
2119 			DP_ERR(cdev, "Failed writing to %08x\n",
2120 			       nvm_image.start_addr + offset);
2121 			goto exit;
2122 		}
2123 
2124 		len--;
2125 	}
2126 exit:
2127 	return rc;
2128 }
2129 
2130 /* Binary file format -
2131  *     /----------------------------------------------------------------------\
2132  * 0B  |                       0x3 [command index]                            |
2133  * 4B  | b'0: check_response?   | b'1-31  reserved                            |
2134  * 8B  | File-type |                   reserved                               |
2135  * 12B |                    Image length in bytes                             |
2136  *     \----------------------------------------------------------------------/
2137  *     Start a new file of the provided type
2138  */
2139 static int qed_nvm_flash_image_file_start(struct qed_dev *cdev,
2140 					  const u8 **data, bool *check_resp)
2141 {
2142 	u32 file_type, file_size = 0;
2143 	int rc;
2144 
2145 	*data += 4;
2146 	*check_resp = !!(**data & BIT(0));
2147 	*data += 4;
2148 	file_type = **data;
2149 
2150 	DP_VERBOSE(cdev, NETIF_MSG_DRV,
2151 		   "About to start a new file of type %02x\n", file_type);
2152 	if (file_type == DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI) {
2153 		*data += 4;
2154 		file_size = *((u32 *)(*data));
2155 	}
2156 
2157 	rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_BEGIN, file_type,
2158 			       (u8 *)(&file_size), 4);
2159 	*data += 4;
2160 
2161 	return rc;
2162 }
2163 
2164 /* Binary file format -
2165  *     /----------------------------------------------------------------------\
2166  * 0B  |                       0x2 [command index]                            |
2167  * 4B  |                       Length in bytes                                |
2168  * 8B  | b'0: check_response?   | b'1-31  reserved                            |
2169  * 12B |                       Offset in bytes                                |
2170  * 16B |                       Data ...                                       |
2171  *     \----------------------------------------------------------------------/
2172  *     Write data as part of a file that was previously started. Data should be
2173  *     of length equal to that provided in the message
2174  */
2175 static int qed_nvm_flash_image_file_data(struct qed_dev *cdev,
2176 					 const u8 **data, bool *check_resp)
2177 {
2178 	u32 offset, len;
2179 	int rc;
2180 
2181 	*data += 4;
2182 	len = *((u32 *)(*data));
2183 	*data += 4;
2184 	*check_resp = !!(**data & BIT(0));
2185 	*data += 4;
2186 	offset = *((u32 *)(*data));
2187 	*data += 4;
2188 
2189 	DP_VERBOSE(cdev, NETIF_MSG_DRV,
2190 		   "About to write File-data: %08x bytes to offset %08x\n",
2191 		   len, offset);
2192 
2193 	rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_DATA, offset,
2194 			       (char *)(*data), len);
2195 	*data += len;
2196 
2197 	return rc;
2198 }
2199 
2200 /* Binary file format [General header] -
2201  *     /----------------------------------------------------------------------\
2202  * 0B  |                       QED_NVM_SIGNATURE                              |
2203  * 4B  |                       Length in bytes                                |
2204  * 8B  | Highest command in this batchfile |          Reserved                |
2205  *     \----------------------------------------------------------------------/
2206  */
2207 static int qed_nvm_flash_image_validate(struct qed_dev *cdev,
2208 					const struct firmware *image,
2209 					const u8 **data)
2210 {
2211 	u32 signature, len;
2212 
2213 	/* Check minimum size */
2214 	if (image->size < 12) {
2215 		DP_ERR(cdev, "Image is too short [%08x]\n", (u32)image->size);
2216 		return -EINVAL;
2217 	}
2218 
2219 	/* Check signature */
2220 	signature = *((u32 *)(*data));
2221 	if (signature != QED_NVM_SIGNATURE) {
2222 		DP_ERR(cdev, "Wrong signature '%08x'\n", signature);
2223 		return -EINVAL;
2224 	}
2225 
2226 	*data += 4;
2227 	/* Validate internal size equals the image-size */
2228 	len = *((u32 *)(*data));
2229 	if (len != image->size) {
2230 		DP_ERR(cdev, "Size mismatch: internal = %08x image = %08x\n",
2231 		       len, (u32)image->size);
2232 		return -EINVAL;
2233 	}
2234 
2235 	*data += 4;
2236 	/* Make sure driver familiar with all commands necessary for this */
2237 	if (*((u16 *)(*data)) >= QED_NVM_FLASH_CMD_NVM_MAX) {
2238 		DP_ERR(cdev, "File contains unsupported commands [Need %04x]\n",
2239 		       *((u16 *)(*data)));
2240 		return -EINVAL;
2241 	}
2242 
2243 	*data += 4;
2244 
2245 	return 0;
2246 }
2247 
2248 /* Binary file format -
2249  *     /----------------------------------------------------------------------\
2250  * 0B  |                       0x5 [command index]                            |
2251  * 4B  | Number of config attributes     |          Reserved                  |
2252  * 4B  | Config ID                       | Entity ID      | Length            |
2253  * 4B  | Value                                                                |
2254  *     |                                                                      |
2255  *     \----------------------------------------------------------------------/
2256  * There can be several cfg_id-entity_id-Length-Value sets as specified by
2257  * 'Number of config attributes'.
2258  *
2259  * The API parses config attributes from the user provided buffer and flashes
2260  * them to the respective NVM path using Management FW inerface.
2261  */
2262 static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data)
2263 {
2264 	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2265 	u8 entity_id, len, buf[32];
2266 	bool need_nvm_init = true;
2267 	struct qed_ptt *ptt;
2268 	u16 cfg_id, count;
2269 	int rc = 0, i;
2270 	u32 flags;
2271 
2272 	ptt = qed_ptt_acquire(hwfn);
2273 	if (!ptt)
2274 		return -EAGAIN;
2275 
2276 	/* NVM CFG ID attribute header */
2277 	*data += 4;
2278 	count = *((u16 *)*data);
2279 	*data += 4;
2280 
2281 	DP_VERBOSE(cdev, NETIF_MSG_DRV,
2282 		   "Read config ids: num_attrs = %0d\n", count);
2283 	/* NVM CFG ID attributes. Start loop index from 1 to avoid additional
2284 	 * arithmetic operations in the implementation.
2285 	 */
2286 	for (i = 1; i <= count; i++) {
2287 		cfg_id = *((u16 *)*data);
2288 		*data += 2;
2289 		entity_id = **data;
2290 		(*data)++;
2291 		len = **data;
2292 		(*data)++;
2293 		memcpy(buf, *data, len);
2294 		*data += len;
2295 
2296 		flags = 0;
2297 		if (need_nvm_init) {
2298 			flags |= QED_NVM_CFG_OPTION_INIT;
2299 			need_nvm_init = false;
2300 		}
2301 
2302 		/* Commit to flash and free the resources */
2303 		if (!(i % QED_NVM_CFG_MAX_ATTRS) || i == count) {
2304 			flags |= QED_NVM_CFG_OPTION_COMMIT |
2305 				 QED_NVM_CFG_OPTION_FREE;
2306 			need_nvm_init = true;
2307 		}
2308 
2309 		if (entity_id)
2310 			flags |= QED_NVM_CFG_OPTION_ENTITY_SEL;
2311 
2312 		DP_VERBOSE(cdev, NETIF_MSG_DRV,
2313 			   "cfg_id = %d entity = %d len = %d\n", cfg_id,
2314 			   entity_id, len);
2315 		rc = qed_mcp_nvm_set_cfg(hwfn, ptt, cfg_id, entity_id, flags,
2316 					 buf, len);
2317 		if (rc) {
2318 			DP_ERR(cdev, "Error %d configuring %d\n", rc, cfg_id);
2319 			break;
2320 		}
2321 	}
2322 
2323 	qed_ptt_release(hwfn, ptt);
2324 
2325 	return rc;
2326 }
2327 
2328 #define QED_MAX_NVM_BUF_LEN	32
2329 static int qed_nvm_flash_cfg_len(struct qed_dev *cdev, u32 cmd)
2330 {
2331 	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2332 	u8 buf[QED_MAX_NVM_BUF_LEN];
2333 	struct qed_ptt *ptt;
2334 	u32 len;
2335 	int rc;
2336 
2337 	ptt = qed_ptt_acquire(hwfn);
2338 	if (!ptt)
2339 		return QED_MAX_NVM_BUF_LEN;
2340 
2341 	rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, 0, QED_NVM_CFG_GET_FLAGS, buf,
2342 				 &len);
2343 	if (rc || !len) {
2344 		DP_ERR(cdev, "Error %d reading %d\n", rc, cmd);
2345 		len = QED_MAX_NVM_BUF_LEN;
2346 	}
2347 
2348 	qed_ptt_release(hwfn, ptt);
2349 
2350 	return len;
2351 }
2352 
2353 static int qed_nvm_flash_cfg_read(struct qed_dev *cdev, u8 **data,
2354 				  u32 cmd, u32 entity_id)
2355 {
2356 	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2357 	struct qed_ptt *ptt;
2358 	u32 flags, len;
2359 	int rc = 0;
2360 
2361 	ptt = qed_ptt_acquire(hwfn);
2362 	if (!ptt)
2363 		return -EAGAIN;
2364 
2365 	DP_VERBOSE(cdev, NETIF_MSG_DRV,
2366 		   "Read config cmd = %d entity id %d\n", cmd, entity_id);
2367 	flags = entity_id ? QED_NVM_CFG_GET_PF_FLAGS : QED_NVM_CFG_GET_FLAGS;
2368 	rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, entity_id, flags, *data, &len);
2369 	if (rc)
2370 		DP_ERR(cdev, "Error %d reading %d\n", rc, cmd);
2371 
2372 	qed_ptt_release(hwfn, ptt);
2373 
2374 	return rc;
2375 }
2376 
2377 static int qed_nvm_flash(struct qed_dev *cdev, const char *name)
2378 {
2379 	const struct firmware *image;
2380 	const u8 *data, *data_end;
2381 	u32 cmd_type;
2382 	int rc;
2383 
2384 	rc = request_firmware(&image, name, &cdev->pdev->dev);
2385 	if (rc) {
2386 		DP_ERR(cdev, "Failed to find '%s'\n", name);
2387 		return rc;
2388 	}
2389 
2390 	DP_VERBOSE(cdev, NETIF_MSG_DRV,
2391 		   "Flashing '%s' - firmware's data at %p, size is %08x\n",
2392 		   name, image->data, (u32)image->size);
2393 	data = image->data;
2394 	data_end = data + image->size;
2395 
2396 	rc = qed_nvm_flash_image_validate(cdev, image, &data);
2397 	if (rc)
2398 		goto exit;
2399 
2400 	while (data < data_end) {
2401 		bool check_resp = false;
2402 
2403 		/* Parse the actual command */
2404 		cmd_type = *((u32 *)data);
2405 		switch (cmd_type) {
2406 		case QED_NVM_FLASH_CMD_FILE_DATA:
2407 			rc = qed_nvm_flash_image_file_data(cdev, &data,
2408 							   &check_resp);
2409 			break;
2410 		case QED_NVM_FLASH_CMD_FILE_START:
2411 			rc = qed_nvm_flash_image_file_start(cdev, &data,
2412 							    &check_resp);
2413 			break;
2414 		case QED_NVM_FLASH_CMD_NVM_CHANGE:
2415 			rc = qed_nvm_flash_image_access(cdev, &data,
2416 							&check_resp);
2417 			break;
2418 		case QED_NVM_FLASH_CMD_NVM_CFG_ID:
2419 			rc = qed_nvm_flash_cfg_write(cdev, &data);
2420 			break;
2421 		default:
2422 			DP_ERR(cdev, "Unknown command %08x\n", cmd_type);
2423 			rc = -EINVAL;
2424 			goto exit;
2425 		}
2426 
2427 		if (rc) {
2428 			DP_ERR(cdev, "Command %08x failed\n", cmd_type);
2429 			goto exit;
2430 		}
2431 
2432 		/* Check response if needed */
2433 		if (check_resp) {
2434 			u32 mcp_response = 0;
2435 
2436 			if (qed_mcp_nvm_resp(cdev, (u8 *)&mcp_response)) {
2437 				DP_ERR(cdev, "Failed getting MCP response\n");
2438 				rc = -EINVAL;
2439 				goto exit;
2440 			}
2441 
2442 			switch (mcp_response & FW_MSG_CODE_MASK) {
2443 			case FW_MSG_CODE_OK:
2444 			case FW_MSG_CODE_NVM_OK:
2445 			case FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK:
2446 			case FW_MSG_CODE_PHY_OK:
2447 				break;
2448 			default:
2449 				DP_ERR(cdev, "MFW returns error: %08x\n",
2450 				       mcp_response);
2451 				rc = -EINVAL;
2452 				goto exit;
2453 			}
2454 		}
2455 	}
2456 
2457 exit:
2458 	release_firmware(image);
2459 
2460 	return rc;
2461 }
2462 
2463 static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type,
2464 			     u8 *buf, u16 len)
2465 {
2466 	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2467 
2468 	return qed_mcp_get_nvm_image(hwfn, type, buf, len);
2469 }
2470 
2471 void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn)
2472 {
2473 	struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common;
2474 	void *cookie = p_hwfn->cdev->ops_cookie;
2475 
2476 	if (ops && ops->schedule_recovery_handler)
2477 		ops->schedule_recovery_handler(cookie);
2478 }
2479 
2480 char *qed_hw_err_type_descr[] = {
2481 	[QED_HW_ERR_FAN_FAIL]		= "Fan Failure",
2482 	[QED_HW_ERR_MFW_RESP_FAIL]	= "MFW Response Failure",
2483 	[QED_HW_ERR_HW_ATTN]		= "HW Attention",
2484 	[QED_HW_ERR_DMAE_FAIL]		= "DMAE Failure",
2485 	[QED_HW_ERR_RAMROD_FAIL]	= "Ramrod Failure",
2486 	[QED_HW_ERR_FW_ASSERT]		= "FW Assertion",
2487 	[QED_HW_ERR_LAST]		= "Unknown",
2488 };
2489 
2490 void qed_hw_error_occurred(struct qed_hwfn *p_hwfn,
2491 			   enum qed_hw_err_type err_type)
2492 {
2493 	struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common;
2494 	void *cookie = p_hwfn->cdev->ops_cookie;
2495 	char *err_str;
2496 
2497 	if (err_type > QED_HW_ERR_LAST)
2498 		err_type = QED_HW_ERR_LAST;
2499 	err_str = qed_hw_err_type_descr[err_type];
2500 
2501 	DP_NOTICE(p_hwfn, "HW error occurred [%s]\n", err_str);
2502 
2503 	/* Call the HW error handler of the protocol driver.
2504 	 * If it is not available - perform a minimal handling of preventing
2505 	 * HW attentions from being reasserted.
2506 	 */
2507 	if (ops && ops->schedule_hw_err_handler)
2508 		ops->schedule_hw_err_handler(cookie, err_type);
2509 	else
2510 		qed_int_attn_clr_enable(p_hwfn->cdev, true);
2511 }
2512 
2513 static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
2514 			    void *handle)
2515 {
2516 		return qed_set_queue_coalesce(rx_coal, tx_coal, handle);
2517 }
2518 
2519 static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
2520 {
2521 	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2522 	struct qed_ptt *ptt;
2523 	int status = 0;
2524 
2525 	ptt = qed_ptt_acquire(hwfn);
2526 	if (!ptt)
2527 		return -EAGAIN;
2528 
2529 	status = qed_mcp_set_led(hwfn, ptt, mode);
2530 
2531 	qed_ptt_release(hwfn, ptt);
2532 
2533 	return status;
2534 }
2535 
2536 static int qed_recovery_process(struct qed_dev *cdev)
2537 {
2538 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2539 	struct qed_ptt *p_ptt;
2540 	int rc = 0;
2541 
2542 	p_ptt = qed_ptt_acquire(p_hwfn);
2543 	if (!p_ptt)
2544 		return -EAGAIN;
2545 
2546 	rc = qed_start_recovery_process(p_hwfn, p_ptt);
2547 
2548 	qed_ptt_release(p_hwfn, p_ptt);
2549 
2550 	return rc;
2551 }
2552 
2553 static int qed_update_wol(struct qed_dev *cdev, bool enabled)
2554 {
2555 	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2556 	struct qed_ptt *ptt;
2557 	int rc = 0;
2558 
2559 	if (IS_VF(cdev))
2560 		return 0;
2561 
2562 	ptt = qed_ptt_acquire(hwfn);
2563 	if (!ptt)
2564 		return -EAGAIN;
2565 
2566 	rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED
2567 				   : QED_OV_WOL_DISABLED);
2568 	if (rc)
2569 		goto out;
2570 	rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2571 
2572 out:
2573 	qed_ptt_release(hwfn, ptt);
2574 	return rc;
2575 }
2576 
2577 static int qed_update_drv_state(struct qed_dev *cdev, bool active)
2578 {
2579 	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2580 	struct qed_ptt *ptt;
2581 	int status = 0;
2582 
2583 	if (IS_VF(cdev))
2584 		return 0;
2585 
2586 	ptt = qed_ptt_acquire(hwfn);
2587 	if (!ptt)
2588 		return -EAGAIN;
2589 
2590 	status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ?
2591 						QED_OV_DRIVER_STATE_ACTIVE :
2592 						QED_OV_DRIVER_STATE_DISABLED);
2593 
2594 	qed_ptt_release(hwfn, ptt);
2595 
2596 	return status;
2597 }
2598 
2599 static int qed_update_mac(struct qed_dev *cdev, u8 *mac)
2600 {
2601 	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2602 	struct qed_ptt *ptt;
2603 	int status = 0;
2604 
2605 	if (IS_VF(cdev))
2606 		return 0;
2607 
2608 	ptt = qed_ptt_acquire(hwfn);
2609 	if (!ptt)
2610 		return -EAGAIN;
2611 
2612 	status = qed_mcp_ov_update_mac(hwfn, ptt, mac);
2613 	if (status)
2614 		goto out;
2615 
2616 	status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2617 
2618 out:
2619 	qed_ptt_release(hwfn, ptt);
2620 	return status;
2621 }
2622 
2623 static int qed_update_mtu(struct qed_dev *cdev, u16 mtu)
2624 {
2625 	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2626 	struct qed_ptt *ptt;
2627 	int status = 0;
2628 
2629 	if (IS_VF(cdev))
2630 		return 0;
2631 
2632 	ptt = qed_ptt_acquire(hwfn);
2633 	if (!ptt)
2634 		return -EAGAIN;
2635 
2636 	status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu);
2637 	if (status)
2638 		goto out;
2639 
2640 	status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2641 
2642 out:
2643 	qed_ptt_release(hwfn, ptt);
2644 	return status;
2645 }
2646 
2647 static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf,
2648 				  u8 dev_addr, u32 offset, u32 len)
2649 {
2650 	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2651 	struct qed_ptt *ptt;
2652 	int rc = 0;
2653 
2654 	if (IS_VF(cdev))
2655 		return 0;
2656 
2657 	ptt = qed_ptt_acquire(hwfn);
2658 	if (!ptt)
2659 		return -EAGAIN;
2660 
2661 	rc = qed_mcp_phy_sfp_read(hwfn, ptt, MFW_PORT(hwfn), dev_addr,
2662 				  offset, len, buf);
2663 
2664 	qed_ptt_release(hwfn, ptt);
2665 
2666 	return rc;
2667 }
2668 
2669 static int qed_set_grc_config(struct qed_dev *cdev, u32 cfg_id, u32 val)
2670 {
2671 	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2672 	struct qed_ptt *ptt;
2673 	int rc = 0;
2674 
2675 	if (IS_VF(cdev))
2676 		return 0;
2677 
2678 	ptt = qed_ptt_acquire(hwfn);
2679 	if (!ptt)
2680 		return -EAGAIN;
2681 
2682 	rc = qed_dbg_grc_config(hwfn, cfg_id, val);
2683 
2684 	qed_ptt_release(hwfn, ptt);
2685 
2686 	return rc;
2687 }
2688 
2689 static u8 qed_get_affin_hwfn_idx(struct qed_dev *cdev)
2690 {
2691 	return QED_AFFIN_HWFN_IDX(cdev);
2692 }
2693 
2694 static struct qed_selftest_ops qed_selftest_ops_pass = {
2695 	.selftest_memory = &qed_selftest_memory,
2696 	.selftest_interrupt = &qed_selftest_interrupt,
2697 	.selftest_register = &qed_selftest_register,
2698 	.selftest_clock = &qed_selftest_clock,
2699 	.selftest_nvram = &qed_selftest_nvram,
2700 };
2701 
2702 const struct qed_common_ops qed_common_ops_pass = {
2703 	.selftest = &qed_selftest_ops_pass,
2704 	.probe = &qed_probe,
2705 	.remove = &qed_remove,
2706 	.set_power_state = &qed_set_power_state,
2707 	.set_name = &qed_set_name,
2708 	.update_pf_params = &qed_update_pf_params,
2709 	.slowpath_start = &qed_slowpath_start,
2710 	.slowpath_stop = &qed_slowpath_stop,
2711 	.set_fp_int = &qed_set_int_fp,
2712 	.get_fp_int = &qed_get_int_fp,
2713 	.sb_init = &qed_sb_init,
2714 	.sb_release = &qed_sb_release,
2715 	.simd_handler_config = &qed_simd_handler_config,
2716 	.simd_handler_clean = &qed_simd_handler_clean,
2717 	.dbg_grc = &qed_dbg_grc,
2718 	.dbg_grc_size = &qed_dbg_grc_size,
2719 	.can_link_change = &qed_can_link_change,
2720 	.set_link = &qed_set_link,
2721 	.get_link = &qed_get_current_link,
2722 	.drain = &qed_drain,
2723 	.update_msglvl = &qed_init_dp,
2724 	.dbg_all_data = &qed_dbg_all_data,
2725 	.dbg_all_data_size = &qed_dbg_all_data_size,
2726 	.chain_alloc = &qed_chain_alloc,
2727 	.chain_free = &qed_chain_free,
2728 	.nvm_flash = &qed_nvm_flash,
2729 	.nvm_get_image = &qed_nvm_get_image,
2730 	.set_coalesce = &qed_set_coalesce,
2731 	.set_led = &qed_set_led,
2732 	.recovery_process = &qed_recovery_process,
2733 	.recovery_prolog = &qed_recovery_prolog,
2734 	.attn_clr_enable = &qed_int_attn_clr_enable,
2735 	.update_drv_state = &qed_update_drv_state,
2736 	.update_mac = &qed_update_mac,
2737 	.update_mtu = &qed_update_mtu,
2738 	.update_wol = &qed_update_wol,
2739 	.db_recovery_add = &qed_db_recovery_add,
2740 	.db_recovery_del = &qed_db_recovery_del,
2741 	.read_module_eeprom = &qed_read_module_eeprom,
2742 	.get_affin_hwfn_idx = &qed_get_affin_hwfn_idx,
2743 	.read_nvm_cfg = &qed_nvm_flash_cfg_read,
2744 	.read_nvm_cfg_len = &qed_nvm_flash_cfg_len,
2745 	.set_grc_config = &qed_set_grc_config,
2746 };
2747 
2748 void qed_get_protocol_stats(struct qed_dev *cdev,
2749 			    enum qed_mcp_protocol_type type,
2750 			    union qed_mcp_protocol_stats *stats)
2751 {
2752 	struct qed_eth_stats eth_stats;
2753 
2754 	memset(stats, 0, sizeof(*stats));
2755 
2756 	switch (type) {
2757 	case QED_MCP_LAN_STATS:
2758 		qed_get_vport_stats(cdev, &eth_stats);
2759 		stats->lan_stats.ucast_rx_pkts =
2760 					eth_stats.common.rx_ucast_pkts;
2761 		stats->lan_stats.ucast_tx_pkts =
2762 					eth_stats.common.tx_ucast_pkts;
2763 		stats->lan_stats.fcs_err = -1;
2764 		break;
2765 	case QED_MCP_FCOE_STATS:
2766 		qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats);
2767 		break;
2768 	case QED_MCP_ISCSI_STATS:
2769 		qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats);
2770 		break;
2771 	default:
2772 		DP_VERBOSE(cdev, QED_MSG_SP,
2773 			   "Invalid protocol type = %d\n", type);
2774 		return;
2775 	}
2776 }
2777 
2778 int qed_mfw_tlv_req(struct qed_hwfn *hwfn)
2779 {
2780 	DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
2781 		   "Scheduling slowpath task [Flag: %d]\n",
2782 		   QED_SLOWPATH_MFW_TLV_REQ);
2783 	smp_mb__before_atomic();
2784 	set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags);
2785 	smp_mb__after_atomic();
2786 	queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
2787 
2788 	return 0;
2789 }
2790 
2791 static void
2792 qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv)
2793 {
2794 	struct qed_common_cb_ops *op = cdev->protocol_ops.common;
2795 	struct qed_eth_stats_common *p_common;
2796 	struct qed_generic_tlvs gen_tlvs;
2797 	struct qed_eth_stats stats;
2798 	int i;
2799 
2800 	memset(&gen_tlvs, 0, sizeof(gen_tlvs));
2801 	op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs);
2802 
2803 	if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM)
2804 		tlv->flags.ipv4_csum_offload = true;
2805 	if (gen_tlvs.feat_flags & QED_TLV_LSO)
2806 		tlv->flags.lso_supported = true;
2807 	tlv->flags.b_set = true;
2808 
2809 	for (i = 0; i < QED_TLV_MAC_COUNT; i++) {
2810 		if (is_valid_ether_addr(gen_tlvs.mac[i])) {
2811 			ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]);
2812 			tlv->mac_set[i] = true;
2813 		}
2814 	}
2815 
2816 	qed_get_vport_stats(cdev, &stats);
2817 	p_common = &stats.common;
2818 	tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
2819 			 p_common->rx_bcast_pkts;
2820 	tlv->rx_frames_set = true;
2821 	tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
2822 			p_common->rx_bcast_bytes;
2823 	tlv->rx_bytes_set = true;
2824 	tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
2825 			 p_common->tx_bcast_pkts;
2826 	tlv->tx_frames_set = true;
2827 	tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
2828 			p_common->tx_bcast_bytes;
2829 	tlv->rx_bytes_set = true;
2830 }
2831 
2832 int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type,
2833 			  union qed_mfw_tlv_data *tlv_buf)
2834 {
2835 	struct qed_dev *cdev = hwfn->cdev;
2836 	struct qed_common_cb_ops *ops;
2837 
2838 	ops = cdev->protocol_ops.common;
2839 	if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) {
2840 		DP_NOTICE(hwfn, "Can't collect TLV management info\n");
2841 		return -EINVAL;
2842 	}
2843 
2844 	switch (type) {
2845 	case QED_MFW_TLV_GENERIC:
2846 		qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic);
2847 		break;
2848 	case QED_MFW_TLV_ETH:
2849 		ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth);
2850 		break;
2851 	case QED_MFW_TLV_FCOE:
2852 		ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe);
2853 		break;
2854 	case QED_MFW_TLV_ISCSI:
2855 		ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi);
2856 		break;
2857 	default:
2858 		break;
2859 	}
2860 
2861 	return 0;
2862 }
2863