1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/stddef.h>
34 #include <linux/pci.h>
35 #include <linux/kernel.h>
36 #include <linux/slab.h>
37 #include <linux/delay.h>
38 #include <asm/byteorder.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/string.h>
41 #include <linux/module.h>
42 #include <linux/interrupt.h>
43 #include <linux/workqueue.h>
44 #include <linux/ethtool.h>
45 #include <linux/etherdevice.h>
46 #include <linux/vmalloc.h>
47 #include <linux/crash_dump.h>
48 #include <linux/crc32.h>
49 #include <linux/qed/qed_if.h>
50 #include <linux/qed/qed_ll2_if.h>
51 
52 #include "qed.h"
53 #include "qed_sriov.h"
54 #include "qed_sp.h"
55 #include "qed_dev_api.h"
56 #include "qed_ll2.h"
57 #include "qed_fcoe.h"
58 #include "qed_iscsi.h"
59 
60 #include "qed_mcp.h"
61 #include "qed_reg_addr.h"
62 #include "qed_hw.h"
63 #include "qed_selftest.h"
64 #include "qed_debug.h"
65 
66 #define QED_ROCE_QPS			(8192)
67 #define QED_ROCE_DPIS			(8)
68 #define QED_RDMA_SRQS                   QED_ROCE_QPS
69 
70 static char version[] =
71 	"QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
72 
73 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module");
74 MODULE_LICENSE("GPL");
75 MODULE_VERSION(DRV_MODULE_VERSION);
76 
77 #define FW_FILE_VERSION				\
78 	__stringify(FW_MAJOR_VERSION) "."	\
79 	__stringify(FW_MINOR_VERSION) "."	\
80 	__stringify(FW_REVISION_VERSION) "."	\
81 	__stringify(FW_ENGINEERING_VERSION)
82 
83 #define QED_FW_FILE_NAME	\
84 	"qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin"
85 
86 MODULE_FIRMWARE(QED_FW_FILE_NAME);
87 
88 static int __init qed_init(void)
89 {
90 	pr_info("%s", version);
91 
92 	return 0;
93 }
94 
95 static void __exit qed_cleanup(void)
96 {
97 	pr_notice("qed_cleanup called\n");
98 }
99 
100 module_init(qed_init);
101 module_exit(qed_cleanup);
102 
103 /* Check if the DMA controller on the machine can properly handle the DMA
104  * addressing required by the device.
105 */
106 static int qed_set_coherency_mask(struct qed_dev *cdev)
107 {
108 	struct device *dev = &cdev->pdev->dev;
109 
110 	if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
111 		if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
112 			DP_NOTICE(cdev,
113 				  "Can't request 64-bit consistent allocations\n");
114 			return -EIO;
115 		}
116 	} else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
117 		DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n");
118 		return -EIO;
119 	}
120 
121 	return 0;
122 }
123 
124 static void qed_free_pci(struct qed_dev *cdev)
125 {
126 	struct pci_dev *pdev = cdev->pdev;
127 
128 	if (cdev->doorbells && cdev->db_size)
129 		iounmap(cdev->doorbells);
130 	if (cdev->regview)
131 		iounmap(cdev->regview);
132 	if (atomic_read(&pdev->enable_cnt) == 1)
133 		pci_release_regions(pdev);
134 
135 	pci_disable_device(pdev);
136 }
137 
138 #define PCI_REVISION_ID_ERROR_VAL	0xff
139 
140 /* Performs PCI initializations as well as initializing PCI-related parameters
141  * in the device structrue. Returns 0 in case of success.
142  */
143 static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
144 {
145 	u8 rev_id;
146 	int rc;
147 
148 	cdev->pdev = pdev;
149 
150 	rc = pci_enable_device(pdev);
151 	if (rc) {
152 		DP_NOTICE(cdev, "Cannot enable PCI device\n");
153 		goto err0;
154 	}
155 
156 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
157 		DP_NOTICE(cdev, "No memory region found in bar #0\n");
158 		rc = -EIO;
159 		goto err1;
160 	}
161 
162 	if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
163 		DP_NOTICE(cdev, "No memory region found in bar #2\n");
164 		rc = -EIO;
165 		goto err1;
166 	}
167 
168 	if (atomic_read(&pdev->enable_cnt) == 1) {
169 		rc = pci_request_regions(pdev, "qed");
170 		if (rc) {
171 			DP_NOTICE(cdev,
172 				  "Failed to request PCI memory resources\n");
173 			goto err1;
174 		}
175 		pci_set_master(pdev);
176 		pci_save_state(pdev);
177 	}
178 
179 	pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
180 	if (rev_id == PCI_REVISION_ID_ERROR_VAL) {
181 		DP_NOTICE(cdev,
182 			  "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n",
183 			  rev_id);
184 		rc = -ENODEV;
185 		goto err2;
186 	}
187 	if (!pci_is_pcie(pdev)) {
188 		DP_NOTICE(cdev, "The bus is not PCI Express\n");
189 		rc = -EIO;
190 		goto err2;
191 	}
192 
193 	cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
194 	if (IS_PF(cdev) && !cdev->pci_params.pm_cap)
195 		DP_NOTICE(cdev, "Cannot find power management capability\n");
196 
197 	rc = qed_set_coherency_mask(cdev);
198 	if (rc)
199 		goto err2;
200 
201 	cdev->pci_params.mem_start = pci_resource_start(pdev, 0);
202 	cdev->pci_params.mem_end = pci_resource_end(pdev, 0);
203 	cdev->pci_params.irq = pdev->irq;
204 
205 	cdev->regview = pci_ioremap_bar(pdev, 0);
206 	if (!cdev->regview) {
207 		DP_NOTICE(cdev, "Cannot map register space, aborting\n");
208 		rc = -ENOMEM;
209 		goto err2;
210 	}
211 
212 	cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
213 	cdev->db_size = pci_resource_len(cdev->pdev, 2);
214 	if (!cdev->db_size) {
215 		if (IS_PF(cdev)) {
216 			DP_NOTICE(cdev, "No Doorbell bar available\n");
217 			return -EINVAL;
218 		} else {
219 			return 0;
220 		}
221 	}
222 
223 	cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
224 
225 	if (!cdev->doorbells) {
226 		DP_NOTICE(cdev, "Cannot map doorbell space\n");
227 		return -ENOMEM;
228 	}
229 
230 	return 0;
231 
232 err2:
233 	pci_release_regions(pdev);
234 err1:
235 	pci_disable_device(pdev);
236 err0:
237 	return rc;
238 }
239 
240 int qed_fill_dev_info(struct qed_dev *cdev,
241 		      struct qed_dev_info *dev_info)
242 {
243 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
244 	struct qed_hw_info *hw_info = &p_hwfn->hw_info;
245 	struct qed_tunnel_info *tun = &cdev->tunnel;
246 	struct qed_ptt  *ptt;
247 
248 	memset(dev_info, 0, sizeof(struct qed_dev_info));
249 
250 	if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
251 	    tun->vxlan.b_mode_enabled)
252 		dev_info->vxlan_enable = true;
253 
254 	if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled &&
255 	    tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
256 	    tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
257 		dev_info->gre_enable = true;
258 
259 	if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled &&
260 	    tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
261 	    tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
262 		dev_info->geneve_enable = true;
263 
264 	dev_info->num_hwfns = cdev->num_hwfns;
265 	dev_info->pci_mem_start = cdev->pci_params.mem_start;
266 	dev_info->pci_mem_end = cdev->pci_params.mem_end;
267 	dev_info->pci_irq = cdev->pci_params.irq;
268 	dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn);
269 	dev_info->dev_type = cdev->type;
270 	ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr);
271 
272 	if (IS_PF(cdev)) {
273 		dev_info->fw_major = FW_MAJOR_VERSION;
274 		dev_info->fw_minor = FW_MINOR_VERSION;
275 		dev_info->fw_rev = FW_REVISION_VERSION;
276 		dev_info->fw_eng = FW_ENGINEERING_VERSION;
277 		dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH,
278 						       &cdev->mf_bits);
279 		dev_info->tx_switching = true;
280 
281 		if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME)
282 			dev_info->wol_support = true;
283 
284 		dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id;
285 	} else {
286 		qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
287 				      &dev_info->fw_minor, &dev_info->fw_rev,
288 				      &dev_info->fw_eng);
289 	}
290 
291 	if (IS_PF(cdev)) {
292 		ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
293 		if (ptt) {
294 			qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
295 					    &dev_info->mfw_rev, NULL);
296 
297 			qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt,
298 					    &dev_info->mbi_version);
299 
300 			qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
301 					       &dev_info->flash_size);
302 
303 			qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
304 		}
305 	} else {
306 		qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL,
307 				    &dev_info->mfw_rev, NULL);
308 	}
309 
310 	dev_info->mtu = hw_info->mtu;
311 
312 	return 0;
313 }
314 
315 static void qed_free_cdev(struct qed_dev *cdev)
316 {
317 	kfree((void *)cdev);
318 }
319 
320 static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
321 {
322 	struct qed_dev *cdev;
323 
324 	cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
325 	if (!cdev)
326 		return cdev;
327 
328 	qed_init_struct(cdev);
329 
330 	return cdev;
331 }
332 
333 /* Sets the requested power state */
334 static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state)
335 {
336 	if (!cdev)
337 		return -ENODEV;
338 
339 	DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n");
340 	return 0;
341 }
342 
343 /* probing */
344 static struct qed_dev *qed_probe(struct pci_dev *pdev,
345 				 struct qed_probe_params *params)
346 {
347 	struct qed_dev *cdev;
348 	int rc;
349 
350 	cdev = qed_alloc_cdev(pdev);
351 	if (!cdev)
352 		goto err0;
353 
354 	cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
355 	cdev->protocol = params->protocol;
356 
357 	if (params->is_vf)
358 		cdev->b_is_vf = true;
359 
360 	qed_init_dp(cdev, params->dp_module, params->dp_level);
361 
362 	rc = qed_init_pci(cdev, pdev);
363 	if (rc) {
364 		DP_ERR(cdev, "init pci failed\n");
365 		goto err1;
366 	}
367 	DP_INFO(cdev, "PCI init completed successfully\n");
368 
369 	rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT);
370 	if (rc) {
371 		DP_ERR(cdev, "hw prepare failed\n");
372 		goto err2;
373 	}
374 
375 	DP_INFO(cdev, "qed_probe completed successfully\n");
376 
377 	return cdev;
378 
379 err2:
380 	qed_free_pci(cdev);
381 err1:
382 	qed_free_cdev(cdev);
383 err0:
384 	return NULL;
385 }
386 
387 static void qed_remove(struct qed_dev *cdev)
388 {
389 	if (!cdev)
390 		return;
391 
392 	qed_hw_remove(cdev);
393 
394 	qed_free_pci(cdev);
395 
396 	qed_set_power_state(cdev, PCI_D3hot);
397 
398 	qed_free_cdev(cdev);
399 }
400 
401 static void qed_disable_msix(struct qed_dev *cdev)
402 {
403 	if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
404 		pci_disable_msix(cdev->pdev);
405 		kfree(cdev->int_params.msix_table);
406 	} else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) {
407 		pci_disable_msi(cdev->pdev);
408 	}
409 
410 	memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param));
411 }
412 
413 static int qed_enable_msix(struct qed_dev *cdev,
414 			   struct qed_int_params *int_params)
415 {
416 	int i, rc, cnt;
417 
418 	cnt = int_params->in.num_vectors;
419 
420 	for (i = 0; i < cnt; i++)
421 		int_params->msix_table[i].entry = i;
422 
423 	rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table,
424 				   int_params->in.min_msix_cnt, cnt);
425 	if (rc < cnt && rc >= int_params->in.min_msix_cnt &&
426 	    (rc % cdev->num_hwfns)) {
427 		pci_disable_msix(cdev->pdev);
428 
429 		/* If fastpath is initialized, we need at least one interrupt
430 		 * per hwfn [and the slow path interrupts]. New requested number
431 		 * should be a multiple of the number of hwfns.
432 		 */
433 		cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns;
434 		DP_NOTICE(cdev,
435 			  "Trying to enable MSI-X with less vectors (%d out of %d)\n",
436 			  cnt, int_params->in.num_vectors);
437 		rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table,
438 					   cnt);
439 		if (!rc)
440 			rc = cnt;
441 	}
442 
443 	if (rc > 0) {
444 		/* MSI-x configuration was achieved */
445 		int_params->out.int_mode = QED_INT_MODE_MSIX;
446 		int_params->out.num_vectors = rc;
447 		rc = 0;
448 	} else {
449 		DP_NOTICE(cdev,
450 			  "Failed to enable MSI-X [Requested %d vectors][rc %d]\n",
451 			  cnt, rc);
452 	}
453 
454 	return rc;
455 }
456 
457 /* This function outputs the int mode and the number of enabled msix vector */
458 static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
459 {
460 	struct qed_int_params *int_params = &cdev->int_params;
461 	struct msix_entry *tbl;
462 	int rc = 0, cnt;
463 
464 	switch (int_params->in.int_mode) {
465 	case QED_INT_MODE_MSIX:
466 		/* Allocate MSIX table */
467 		cnt = int_params->in.num_vectors;
468 		int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL);
469 		if (!int_params->msix_table) {
470 			rc = -ENOMEM;
471 			goto out;
472 		}
473 
474 		/* Enable MSIX */
475 		rc = qed_enable_msix(cdev, int_params);
476 		if (!rc)
477 			goto out;
478 
479 		DP_NOTICE(cdev, "Failed to enable MSI-X\n");
480 		kfree(int_params->msix_table);
481 		if (force_mode)
482 			goto out;
483 		/* Fallthrough */
484 
485 	case QED_INT_MODE_MSI:
486 		if (cdev->num_hwfns == 1) {
487 			rc = pci_enable_msi(cdev->pdev);
488 			if (!rc) {
489 				int_params->out.int_mode = QED_INT_MODE_MSI;
490 				goto out;
491 			}
492 
493 			DP_NOTICE(cdev, "Failed to enable MSI\n");
494 			if (force_mode)
495 				goto out;
496 		}
497 		/* Fallthrough */
498 
499 	case QED_INT_MODE_INTA:
500 			int_params->out.int_mode = QED_INT_MODE_INTA;
501 			rc = 0;
502 			goto out;
503 	default:
504 		DP_NOTICE(cdev, "Unknown int_mode value %d\n",
505 			  int_params->in.int_mode);
506 		rc = -EINVAL;
507 	}
508 
509 out:
510 	if (!rc)
511 		DP_INFO(cdev, "Using %s interrupts\n",
512 			int_params->out.int_mode == QED_INT_MODE_INTA ?
513 			"INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ?
514 			"MSI" : "MSIX");
515 	cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE;
516 
517 	return rc;
518 }
519 
520 static void qed_simd_handler_config(struct qed_dev *cdev, void *token,
521 				    int index, void(*handler)(void *))
522 {
523 	struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
524 	int relative_idx = index / cdev->num_hwfns;
525 
526 	hwfn->simd_proto_handler[relative_idx].func = handler;
527 	hwfn->simd_proto_handler[relative_idx].token = token;
528 }
529 
530 static void qed_simd_handler_clean(struct qed_dev *cdev, int index)
531 {
532 	struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
533 	int relative_idx = index / cdev->num_hwfns;
534 
535 	memset(&hwfn->simd_proto_handler[relative_idx], 0,
536 	       sizeof(struct qed_simd_fp_handler));
537 }
538 
539 static irqreturn_t qed_msix_sp_int(int irq, void *tasklet)
540 {
541 	tasklet_schedule((struct tasklet_struct *)tasklet);
542 	return IRQ_HANDLED;
543 }
544 
545 static irqreturn_t qed_single_int(int irq, void *dev_instance)
546 {
547 	struct qed_dev *cdev = (struct qed_dev *)dev_instance;
548 	struct qed_hwfn *hwfn;
549 	irqreturn_t rc = IRQ_NONE;
550 	u64 status;
551 	int i, j;
552 
553 	for (i = 0; i < cdev->num_hwfns; i++) {
554 		status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]);
555 
556 		if (!status)
557 			continue;
558 
559 		hwfn = &cdev->hwfns[i];
560 
561 		/* Slowpath interrupt */
562 		if (unlikely(status & 0x1)) {
563 			tasklet_schedule(hwfn->sp_dpc);
564 			status &= ~0x1;
565 			rc = IRQ_HANDLED;
566 		}
567 
568 		/* Fastpath interrupts */
569 		for (j = 0; j < 64; j++) {
570 			if ((0x2ULL << j) & status) {
571 				struct qed_simd_fp_handler *p_handler =
572 					&hwfn->simd_proto_handler[j];
573 
574 				if (p_handler->func)
575 					p_handler->func(p_handler->token);
576 				else
577 					DP_NOTICE(hwfn,
578 						  "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n",
579 						  j, status);
580 
581 				status &= ~(0x2ULL << j);
582 				rc = IRQ_HANDLED;
583 			}
584 		}
585 
586 		if (unlikely(status))
587 			DP_VERBOSE(hwfn, NETIF_MSG_INTR,
588 				   "got an unknown interrupt status 0x%llx\n",
589 				   status);
590 	}
591 
592 	return rc;
593 }
594 
595 int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
596 {
597 	struct qed_dev *cdev = hwfn->cdev;
598 	u32 int_mode;
599 	int rc = 0;
600 	u8 id;
601 
602 	int_mode = cdev->int_params.out.int_mode;
603 	if (int_mode == QED_INT_MODE_MSIX) {
604 		id = hwfn->my_id;
605 		snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
606 			 id, cdev->pdev->bus->number,
607 			 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
608 		rc = request_irq(cdev->int_params.msix_table[id].vector,
609 				 qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc);
610 	} else {
611 		unsigned long flags = 0;
612 
613 		snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x",
614 			 cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn),
615 			 PCI_FUNC(cdev->pdev->devfn));
616 
617 		if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA)
618 			flags |= IRQF_SHARED;
619 
620 		rc = request_irq(cdev->pdev->irq, qed_single_int,
621 				 flags, cdev->name, cdev);
622 	}
623 
624 	if (rc)
625 		DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc);
626 	else
627 		DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
628 			   "Requested slowpath %s\n",
629 			   (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ");
630 
631 	return rc;
632 }
633 
634 static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn)
635 {
636 	/* Calling the disable function will make sure that any
637 	 * currently-running function is completed. The following call to the
638 	 * enable function makes this sequence a flush-like operation.
639 	 */
640 	if (p_hwfn->b_sp_dpc_enabled) {
641 		tasklet_disable(p_hwfn->sp_dpc);
642 		tasklet_enable(p_hwfn->sp_dpc);
643 	}
644 }
645 
646 void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn)
647 {
648 	struct qed_dev *cdev = p_hwfn->cdev;
649 	u8 id = p_hwfn->my_id;
650 	u32 int_mode;
651 
652 	int_mode = cdev->int_params.out.int_mode;
653 	if (int_mode == QED_INT_MODE_MSIX)
654 		synchronize_irq(cdev->int_params.msix_table[id].vector);
655 	else
656 		synchronize_irq(cdev->pdev->irq);
657 
658 	qed_slowpath_tasklet_flush(p_hwfn);
659 }
660 
661 static void qed_slowpath_irq_free(struct qed_dev *cdev)
662 {
663 	int i;
664 
665 	if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
666 		for_each_hwfn(cdev, i) {
667 			if (!cdev->hwfns[i].b_int_requested)
668 				break;
669 			synchronize_irq(cdev->int_params.msix_table[i].vector);
670 			free_irq(cdev->int_params.msix_table[i].vector,
671 				 cdev->hwfns[i].sp_dpc);
672 		}
673 	} else {
674 		if (QED_LEADING_HWFN(cdev)->b_int_requested)
675 			free_irq(cdev->pdev->irq, cdev);
676 	}
677 	qed_int_disable_post_isr_release(cdev);
678 }
679 
680 static int qed_nic_stop(struct qed_dev *cdev)
681 {
682 	int i, rc;
683 
684 	rc = qed_hw_stop(cdev);
685 
686 	for (i = 0; i < cdev->num_hwfns; i++) {
687 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
688 
689 		if (p_hwfn->b_sp_dpc_enabled) {
690 			tasklet_disable(p_hwfn->sp_dpc);
691 			p_hwfn->b_sp_dpc_enabled = false;
692 			DP_VERBOSE(cdev, NETIF_MSG_IFDOWN,
693 				   "Disabled sp tasklet [hwfn %d] at %p\n",
694 				   i, p_hwfn->sp_dpc);
695 		}
696 	}
697 
698 	qed_dbg_pf_exit(cdev);
699 
700 	return rc;
701 }
702 
703 static int qed_nic_setup(struct qed_dev *cdev)
704 {
705 	int rc, i;
706 
707 	/* Determine if interface is going to require LL2 */
708 	if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) {
709 		for (i = 0; i < cdev->num_hwfns; i++) {
710 			struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
711 
712 			p_hwfn->using_ll2 = true;
713 		}
714 	}
715 
716 	rc = qed_resc_alloc(cdev);
717 	if (rc)
718 		return rc;
719 
720 	DP_INFO(cdev, "Allocated qed resources\n");
721 
722 	qed_resc_setup(cdev);
723 
724 	return rc;
725 }
726 
727 static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt)
728 {
729 	int limit = 0;
730 
731 	/* Mark the fastpath as free/used */
732 	cdev->int_params.fp_initialized = cnt ? true : false;
733 
734 	if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX)
735 		limit = cdev->num_hwfns * 63;
736 	else if (cdev->int_params.fp_msix_cnt)
737 		limit = cdev->int_params.fp_msix_cnt;
738 
739 	if (!limit)
740 		return -ENOMEM;
741 
742 	return min_t(int, cnt, limit);
743 }
744 
745 static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info)
746 {
747 	memset(info, 0, sizeof(struct qed_int_info));
748 
749 	if (!cdev->int_params.fp_initialized) {
750 		DP_INFO(cdev,
751 			"Protocol driver requested interrupt information, but its support is not yet configured\n");
752 		return -EINVAL;
753 	}
754 
755 	/* Need to expose only MSI-X information; Single IRQ is handled solely
756 	 * by qed.
757 	 */
758 	if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
759 		int msix_base = cdev->int_params.fp_msix_base;
760 
761 		info->msix_cnt = cdev->int_params.fp_msix_cnt;
762 		info->msix = &cdev->int_params.msix_table[msix_base];
763 	}
764 
765 	return 0;
766 }
767 
768 static int qed_slowpath_setup_int(struct qed_dev *cdev,
769 				  enum qed_int_mode int_mode)
770 {
771 	struct qed_sb_cnt_info sb_cnt_info;
772 	int num_l2_queues = 0;
773 	int rc;
774 	int i;
775 
776 	if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
777 		DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
778 		return -EINVAL;
779 	}
780 
781 	memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
782 	cdev->int_params.in.int_mode = int_mode;
783 	for_each_hwfn(cdev, i) {
784 		memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
785 		qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info);
786 		cdev->int_params.in.num_vectors += sb_cnt_info.cnt;
787 		cdev->int_params.in.num_vectors++; /* slowpath */
788 	}
789 
790 	/* We want a minimum of one slowpath and one fastpath vector per hwfn */
791 	cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
792 
793 	if (is_kdump_kernel()) {
794 		DP_INFO(cdev,
795 			"Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
796 			cdev->int_params.in.min_msix_cnt);
797 		cdev->int_params.in.num_vectors =
798 			cdev->int_params.in.min_msix_cnt;
799 	}
800 
801 	rc = qed_set_int_mode(cdev, false);
802 	if (rc)  {
803 		DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
804 		return rc;
805 	}
806 
807 	cdev->int_params.fp_msix_base = cdev->num_hwfns;
808 	cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
809 				       cdev->num_hwfns;
810 
811 	if (!IS_ENABLED(CONFIG_QED_RDMA) ||
812 	    !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev)))
813 		return 0;
814 
815 	for_each_hwfn(cdev, i)
816 		num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
817 
818 	DP_VERBOSE(cdev, QED_MSG_RDMA,
819 		   "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n",
820 		   cdev->int_params.fp_msix_cnt, num_l2_queues);
821 
822 	if (cdev->int_params.fp_msix_cnt > num_l2_queues) {
823 		cdev->int_params.rdma_msix_cnt =
824 			(cdev->int_params.fp_msix_cnt - num_l2_queues)
825 			/ cdev->num_hwfns;
826 		cdev->int_params.rdma_msix_base =
827 			cdev->int_params.fp_msix_base + num_l2_queues;
828 		cdev->int_params.fp_msix_cnt = num_l2_queues;
829 	} else {
830 		cdev->int_params.rdma_msix_cnt = 0;
831 	}
832 
833 	DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n",
834 		   cdev->int_params.rdma_msix_cnt,
835 		   cdev->int_params.rdma_msix_base);
836 
837 	return 0;
838 }
839 
840 static int qed_slowpath_vf_setup_int(struct qed_dev *cdev)
841 {
842 	int rc;
843 
844 	memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
845 	cdev->int_params.in.int_mode = QED_INT_MODE_MSIX;
846 
847 	qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev),
848 			    &cdev->int_params.in.num_vectors);
849 	if (cdev->num_hwfns > 1) {
850 		u8 vectors = 0;
851 
852 		qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors);
853 		cdev->int_params.in.num_vectors += vectors;
854 	}
855 
856 	/* We want a minimum of one fastpath vector per vf hwfn */
857 	cdev->int_params.in.min_msix_cnt = cdev->num_hwfns;
858 
859 	rc = qed_set_int_mode(cdev, true);
860 	if (rc)
861 		return rc;
862 
863 	cdev->int_params.fp_msix_base = 0;
864 	cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors;
865 
866 	return 0;
867 }
868 
869 u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
870 		   u8 *input_buf, u32 max_size, u8 *unzip_buf)
871 {
872 	int rc;
873 
874 	p_hwfn->stream->next_in = input_buf;
875 	p_hwfn->stream->avail_in = input_len;
876 	p_hwfn->stream->next_out = unzip_buf;
877 	p_hwfn->stream->avail_out = max_size;
878 
879 	rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS);
880 
881 	if (rc != Z_OK) {
882 		DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n",
883 			   rc);
884 		return 0;
885 	}
886 
887 	rc = zlib_inflate(p_hwfn->stream, Z_FINISH);
888 	zlib_inflateEnd(p_hwfn->stream);
889 
890 	if (rc != Z_OK && rc != Z_STREAM_END) {
891 		DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n",
892 			   p_hwfn->stream->msg, rc);
893 		return 0;
894 	}
895 
896 	return p_hwfn->stream->total_out / 4;
897 }
898 
899 static int qed_alloc_stream_mem(struct qed_dev *cdev)
900 {
901 	int i;
902 	void *workspace;
903 
904 	for_each_hwfn(cdev, i) {
905 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
906 
907 		p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL);
908 		if (!p_hwfn->stream)
909 			return -ENOMEM;
910 
911 		workspace = vzalloc(zlib_inflate_workspacesize());
912 		if (!workspace)
913 			return -ENOMEM;
914 		p_hwfn->stream->workspace = workspace;
915 	}
916 
917 	return 0;
918 }
919 
920 static void qed_free_stream_mem(struct qed_dev *cdev)
921 {
922 	int i;
923 
924 	for_each_hwfn(cdev, i) {
925 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
926 
927 		if (!p_hwfn->stream)
928 			return;
929 
930 		vfree(p_hwfn->stream->workspace);
931 		kfree(p_hwfn->stream);
932 	}
933 }
934 
935 static void qed_update_pf_params(struct qed_dev *cdev,
936 				 struct qed_pf_params *params)
937 {
938 	int i;
939 
940 	if (IS_ENABLED(CONFIG_QED_RDMA)) {
941 		params->rdma_pf_params.num_qps = QED_ROCE_QPS;
942 		params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
943 		params->rdma_pf_params.num_srqs = QED_RDMA_SRQS;
944 		/* divide by 3 the MRs to avoid MF ILT overflow */
945 		params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
946 	}
947 
948 	if (cdev->num_hwfns > 1 || IS_VF(cdev))
949 		params->eth_pf_params.num_arfs_filters = 0;
950 
951 	/* In case we might support RDMA, don't allow qede to be greedy
952 	 * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp]
953 	 * per hwfn.
954 	 */
955 	if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) {
956 		u16 *num_cons;
957 
958 		num_cons = &params->eth_pf_params.num_cons;
959 		*num_cons = min_t(u16, *num_cons, QED_MAX_L2_CONS);
960 	}
961 
962 	for (i = 0; i < cdev->num_hwfns; i++) {
963 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
964 
965 		p_hwfn->pf_params = *params;
966 	}
967 }
968 
969 static void qed_slowpath_wq_stop(struct qed_dev *cdev)
970 {
971 	int i;
972 
973 	if (IS_VF(cdev))
974 		return;
975 
976 	for_each_hwfn(cdev, i) {
977 		if (!cdev->hwfns[i].slowpath_wq)
978 			continue;
979 
980 		flush_workqueue(cdev->hwfns[i].slowpath_wq);
981 		destroy_workqueue(cdev->hwfns[i].slowpath_wq);
982 	}
983 }
984 
985 static void qed_slowpath_task(struct work_struct *work)
986 {
987 	struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
988 					     slowpath_task.work);
989 	struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
990 
991 	if (!ptt) {
992 		queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
993 		return;
994 	}
995 
996 	if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ,
997 			       &hwfn->slowpath_task_flags))
998 		qed_mfw_process_tlv_req(hwfn, ptt);
999 
1000 	qed_ptt_release(hwfn, ptt);
1001 }
1002 
1003 static int qed_slowpath_wq_start(struct qed_dev *cdev)
1004 {
1005 	struct qed_hwfn *hwfn;
1006 	char name[NAME_SIZE];
1007 	int i;
1008 
1009 	if (IS_VF(cdev))
1010 		return 0;
1011 
1012 	for_each_hwfn(cdev, i) {
1013 		hwfn = &cdev->hwfns[i];
1014 
1015 		snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x",
1016 			 cdev->pdev->bus->number,
1017 			 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
1018 
1019 		hwfn->slowpath_wq = alloc_workqueue(name, 0, 0);
1020 		if (!hwfn->slowpath_wq) {
1021 			DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n");
1022 			return -ENOMEM;
1023 		}
1024 
1025 		INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task);
1026 	}
1027 
1028 	return 0;
1029 }
1030 
1031 static int qed_slowpath_start(struct qed_dev *cdev,
1032 			      struct qed_slowpath_params *params)
1033 {
1034 	struct qed_drv_load_params drv_load_params;
1035 	struct qed_hw_init_params hw_init_params;
1036 	struct qed_mcp_drv_version drv_version;
1037 	struct qed_tunnel_info tunn_info;
1038 	const u8 *data = NULL;
1039 	struct qed_hwfn *hwfn;
1040 	struct qed_ptt *p_ptt;
1041 	int rc = -EINVAL;
1042 
1043 	if (qed_iov_wq_start(cdev))
1044 		goto err;
1045 
1046 	if (qed_slowpath_wq_start(cdev))
1047 		goto err;
1048 
1049 	if (IS_PF(cdev)) {
1050 		rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
1051 				      &cdev->pdev->dev);
1052 		if (rc) {
1053 			DP_NOTICE(cdev,
1054 				  "Failed to find fw file - /lib/firmware/%s\n",
1055 				  QED_FW_FILE_NAME);
1056 			goto err;
1057 		}
1058 
1059 		if (cdev->num_hwfns == 1) {
1060 			p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
1061 			if (p_ptt) {
1062 				QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt;
1063 			} else {
1064 				DP_NOTICE(cdev,
1065 					  "Failed to acquire PTT for aRFS\n");
1066 				goto err;
1067 			}
1068 		}
1069 	}
1070 
1071 	cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
1072 	rc = qed_nic_setup(cdev);
1073 	if (rc)
1074 		goto err;
1075 
1076 	if (IS_PF(cdev))
1077 		rc = qed_slowpath_setup_int(cdev, params->int_mode);
1078 	else
1079 		rc = qed_slowpath_vf_setup_int(cdev);
1080 	if (rc)
1081 		goto err1;
1082 
1083 	if (IS_PF(cdev)) {
1084 		/* Allocate stream for unzipping */
1085 		rc = qed_alloc_stream_mem(cdev);
1086 		if (rc)
1087 			goto err2;
1088 
1089 		/* First Dword used to differentiate between various sources */
1090 		data = cdev->firmware->data + sizeof(u32);
1091 
1092 		qed_dbg_pf_init(cdev);
1093 	}
1094 
1095 	/* Start the slowpath */
1096 	memset(&hw_init_params, 0, sizeof(hw_init_params));
1097 	memset(&tunn_info, 0, sizeof(tunn_info));
1098 	tunn_info.vxlan.b_mode_enabled = true;
1099 	tunn_info.l2_gre.b_mode_enabled = true;
1100 	tunn_info.ip_gre.b_mode_enabled = true;
1101 	tunn_info.l2_geneve.b_mode_enabled = true;
1102 	tunn_info.ip_geneve.b_mode_enabled = true;
1103 	tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1104 	tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1105 	tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1106 	tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1107 	tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1108 	hw_init_params.p_tunn = &tunn_info;
1109 	hw_init_params.b_hw_start = true;
1110 	hw_init_params.int_mode = cdev->int_params.out.int_mode;
1111 	hw_init_params.allow_npar_tx_switch = true;
1112 	hw_init_params.bin_fw_data = data;
1113 
1114 	memset(&drv_load_params, 0, sizeof(drv_load_params));
1115 	drv_load_params.is_crash_kernel = is_kdump_kernel();
1116 	drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT;
1117 	drv_load_params.avoid_eng_reset = false;
1118 	drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE;
1119 	hw_init_params.p_drv_load_params = &drv_load_params;
1120 
1121 	rc = qed_hw_init(cdev, &hw_init_params);
1122 	if (rc)
1123 		goto err2;
1124 
1125 	DP_INFO(cdev,
1126 		"HW initialization and function start completed successfully\n");
1127 
1128 	if (IS_PF(cdev)) {
1129 		cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) |
1130 					   BIT(QED_MODE_L2GENEVE_TUNN) |
1131 					   BIT(QED_MODE_IPGENEVE_TUNN) |
1132 					   BIT(QED_MODE_L2GRE_TUNN) |
1133 					   BIT(QED_MODE_IPGRE_TUNN));
1134 	}
1135 
1136 	/* Allocate LL2 interface if needed */
1137 	if (QED_LEADING_HWFN(cdev)->using_ll2) {
1138 		rc = qed_ll2_alloc_if(cdev);
1139 		if (rc)
1140 			goto err3;
1141 	}
1142 	if (IS_PF(cdev)) {
1143 		hwfn = QED_LEADING_HWFN(cdev);
1144 		drv_version.version = (params->drv_major << 24) |
1145 				      (params->drv_minor << 16) |
1146 				      (params->drv_rev << 8) |
1147 				      (params->drv_eng);
1148 		strlcpy(drv_version.name, params->name,
1149 			MCP_DRV_VER_STR_SIZE - 4);
1150 		rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
1151 					      &drv_version);
1152 		if (rc) {
1153 			DP_NOTICE(cdev, "Failed sending drv version command\n");
1154 			return rc;
1155 		}
1156 	}
1157 
1158 	qed_reset_vport_stats(cdev);
1159 
1160 	return 0;
1161 
1162 err3:
1163 	qed_hw_stop(cdev);
1164 err2:
1165 	qed_hw_timers_stop_all(cdev);
1166 	if (IS_PF(cdev))
1167 		qed_slowpath_irq_free(cdev);
1168 	qed_free_stream_mem(cdev);
1169 	qed_disable_msix(cdev);
1170 err1:
1171 	qed_resc_free(cdev);
1172 err:
1173 	if (IS_PF(cdev))
1174 		release_firmware(cdev->firmware);
1175 
1176 	if (IS_PF(cdev) && (cdev->num_hwfns == 1) &&
1177 	    QED_LEADING_HWFN(cdev)->p_arfs_ptt)
1178 		qed_ptt_release(QED_LEADING_HWFN(cdev),
1179 				QED_LEADING_HWFN(cdev)->p_arfs_ptt);
1180 
1181 	qed_iov_wq_stop(cdev, false);
1182 
1183 	qed_slowpath_wq_stop(cdev);
1184 
1185 	return rc;
1186 }
1187 
1188 static int qed_slowpath_stop(struct qed_dev *cdev)
1189 {
1190 	if (!cdev)
1191 		return -ENODEV;
1192 
1193 	qed_slowpath_wq_stop(cdev);
1194 
1195 	qed_ll2_dealloc_if(cdev);
1196 
1197 	if (IS_PF(cdev)) {
1198 		if (cdev->num_hwfns == 1)
1199 			qed_ptt_release(QED_LEADING_HWFN(cdev),
1200 					QED_LEADING_HWFN(cdev)->p_arfs_ptt);
1201 		qed_free_stream_mem(cdev);
1202 		if (IS_QED_ETH_IF(cdev))
1203 			qed_sriov_disable(cdev, true);
1204 	}
1205 
1206 	qed_nic_stop(cdev);
1207 
1208 	if (IS_PF(cdev))
1209 		qed_slowpath_irq_free(cdev);
1210 
1211 	qed_disable_msix(cdev);
1212 
1213 	qed_resc_free(cdev);
1214 
1215 	qed_iov_wq_stop(cdev, true);
1216 
1217 	if (IS_PF(cdev))
1218 		release_firmware(cdev->firmware);
1219 
1220 	return 0;
1221 }
1222 
1223 static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE])
1224 {
1225 	int i;
1226 
1227 	memcpy(cdev->name, name, NAME_SIZE);
1228 	for_each_hwfn(cdev, i)
1229 		snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
1230 }
1231 
1232 static u32 qed_sb_init(struct qed_dev *cdev,
1233 		       struct qed_sb_info *sb_info,
1234 		       void *sb_virt_addr,
1235 		       dma_addr_t sb_phy_addr, u16 sb_id,
1236 		       enum qed_sb_type type)
1237 {
1238 	struct qed_hwfn *p_hwfn;
1239 	struct qed_ptt *p_ptt;
1240 	int hwfn_index;
1241 	u16 rel_sb_id;
1242 	u8 n_hwfns;
1243 	u32 rc;
1244 
1245 	/* RoCE uses single engine and CMT uses two engines. When using both
1246 	 * we force only a single engine. Storage uses only engine 0 too.
1247 	 */
1248 	if (type == QED_SB_TYPE_L2_QUEUE)
1249 		n_hwfns = cdev->num_hwfns;
1250 	else
1251 		n_hwfns = 1;
1252 
1253 	hwfn_index = sb_id % n_hwfns;
1254 	p_hwfn = &cdev->hwfns[hwfn_index];
1255 	rel_sb_id = sb_id / n_hwfns;
1256 
1257 	DP_VERBOSE(cdev, NETIF_MSG_INTR,
1258 		   "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1259 		   hwfn_index, rel_sb_id, sb_id);
1260 
1261 	if (IS_PF(p_hwfn->cdev)) {
1262 		p_ptt = qed_ptt_acquire(p_hwfn);
1263 		if (!p_ptt)
1264 			return -EBUSY;
1265 
1266 		rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr,
1267 				     sb_phy_addr, rel_sb_id);
1268 		qed_ptt_release(p_hwfn, p_ptt);
1269 	} else {
1270 		rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr,
1271 				     sb_phy_addr, rel_sb_id);
1272 	}
1273 
1274 	return rc;
1275 }
1276 
1277 static u32 qed_sb_release(struct qed_dev *cdev,
1278 			  struct qed_sb_info *sb_info, u16 sb_id)
1279 {
1280 	struct qed_hwfn *p_hwfn;
1281 	int hwfn_index;
1282 	u16 rel_sb_id;
1283 	u32 rc;
1284 
1285 	hwfn_index = sb_id % cdev->num_hwfns;
1286 	p_hwfn = &cdev->hwfns[hwfn_index];
1287 	rel_sb_id = sb_id / cdev->num_hwfns;
1288 
1289 	DP_VERBOSE(cdev, NETIF_MSG_INTR,
1290 		   "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1291 		   hwfn_index, rel_sb_id, sb_id);
1292 
1293 	rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id);
1294 
1295 	return rc;
1296 }
1297 
1298 static bool qed_can_link_change(struct qed_dev *cdev)
1299 {
1300 	return true;
1301 }
1302 
1303 static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
1304 {
1305 	struct qed_hwfn *hwfn;
1306 	struct qed_mcp_link_params *link_params;
1307 	struct qed_ptt *ptt;
1308 	u32 sup_caps;
1309 	int rc;
1310 
1311 	if (!cdev)
1312 		return -ENODEV;
1313 
1314 	/* The link should be set only once per PF */
1315 	hwfn = &cdev->hwfns[0];
1316 
1317 	/* When VF wants to set link, force it to read the bulletin instead.
1318 	 * This mimics the PF behavior, where a noitification [both immediate
1319 	 * and possible later] would be generated when changing properties.
1320 	 */
1321 	if (IS_VF(cdev)) {
1322 		qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG);
1323 		return 0;
1324 	}
1325 
1326 	ptt = qed_ptt_acquire(hwfn);
1327 	if (!ptt)
1328 		return -EBUSY;
1329 
1330 	link_params = qed_mcp_get_link_params(hwfn);
1331 	if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
1332 		link_params->speed.autoneg = params->autoneg;
1333 	if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
1334 		link_params->speed.advertised_speeds = 0;
1335 		sup_caps = QED_LM_1000baseT_Full_BIT |
1336 			   QED_LM_1000baseKX_Full_BIT |
1337 			   QED_LM_1000baseX_Full_BIT;
1338 		if (params->adv_speeds & sup_caps)
1339 			link_params->speed.advertised_speeds |=
1340 			    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
1341 		sup_caps = QED_LM_10000baseT_Full_BIT |
1342 			   QED_LM_10000baseKR_Full_BIT |
1343 			   QED_LM_10000baseKX4_Full_BIT |
1344 			   QED_LM_10000baseR_FEC_BIT |
1345 			   QED_LM_10000baseCR_Full_BIT |
1346 			   QED_LM_10000baseSR_Full_BIT |
1347 			   QED_LM_10000baseLR_Full_BIT |
1348 			   QED_LM_10000baseLRM_Full_BIT;
1349 		if (params->adv_speeds & sup_caps)
1350 			link_params->speed.advertised_speeds |=
1351 			    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
1352 		if (params->adv_speeds & QED_LM_20000baseKR2_Full_BIT)
1353 			link_params->speed.advertised_speeds |=
1354 				NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G;
1355 		sup_caps = QED_LM_25000baseKR_Full_BIT |
1356 			   QED_LM_25000baseCR_Full_BIT |
1357 			   QED_LM_25000baseSR_Full_BIT;
1358 		if (params->adv_speeds & sup_caps)
1359 			link_params->speed.advertised_speeds |=
1360 			    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
1361 		sup_caps = QED_LM_40000baseLR4_Full_BIT |
1362 			   QED_LM_40000baseKR4_Full_BIT |
1363 			   QED_LM_40000baseCR4_Full_BIT |
1364 			   QED_LM_40000baseSR4_Full_BIT;
1365 		if (params->adv_speeds & sup_caps)
1366 			link_params->speed.advertised_speeds |=
1367 				NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
1368 		sup_caps = QED_LM_50000baseKR2_Full_BIT |
1369 			   QED_LM_50000baseCR2_Full_BIT |
1370 			   QED_LM_50000baseSR2_Full_BIT;
1371 		if (params->adv_speeds & sup_caps)
1372 			link_params->speed.advertised_speeds |=
1373 			    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
1374 		sup_caps = QED_LM_100000baseKR4_Full_BIT |
1375 			   QED_LM_100000baseSR4_Full_BIT |
1376 			   QED_LM_100000baseCR4_Full_BIT |
1377 			   QED_LM_100000baseLR4_ER4_Full_BIT;
1378 		if (params->adv_speeds & sup_caps)
1379 			link_params->speed.advertised_speeds |=
1380 			    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G;
1381 	}
1382 	if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
1383 		link_params->speed.forced_speed = params->forced_speed;
1384 	if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {
1385 		if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1386 			link_params->pause.autoneg = true;
1387 		else
1388 			link_params->pause.autoneg = false;
1389 		if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE)
1390 			link_params->pause.forced_rx = true;
1391 		else
1392 			link_params->pause.forced_rx = false;
1393 		if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE)
1394 			link_params->pause.forced_tx = true;
1395 		else
1396 			link_params->pause.forced_tx = false;
1397 	}
1398 	if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) {
1399 		switch (params->loopback_mode) {
1400 		case QED_LINK_LOOPBACK_INT_PHY:
1401 			link_params->loopback_mode = ETH_LOOPBACK_INT_PHY;
1402 			break;
1403 		case QED_LINK_LOOPBACK_EXT_PHY:
1404 			link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY;
1405 			break;
1406 		case QED_LINK_LOOPBACK_EXT:
1407 			link_params->loopback_mode = ETH_LOOPBACK_EXT;
1408 			break;
1409 		case QED_LINK_LOOPBACK_MAC:
1410 			link_params->loopback_mode = ETH_LOOPBACK_MAC;
1411 			break;
1412 		default:
1413 			link_params->loopback_mode = ETH_LOOPBACK_NONE;
1414 			break;
1415 		}
1416 	}
1417 
1418 	if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG)
1419 		memcpy(&link_params->eee, &params->eee,
1420 		       sizeof(link_params->eee));
1421 
1422 	rc = qed_mcp_set_link(hwfn, ptt, params->link_up);
1423 
1424 	qed_ptt_release(hwfn, ptt);
1425 
1426 	return rc;
1427 }
1428 
1429 static int qed_get_port_type(u32 media_type)
1430 {
1431 	int port_type;
1432 
1433 	switch (media_type) {
1434 	case MEDIA_SFPP_10G_FIBER:
1435 	case MEDIA_SFP_1G_FIBER:
1436 	case MEDIA_XFP_FIBER:
1437 	case MEDIA_MODULE_FIBER:
1438 	case MEDIA_KR:
1439 		port_type = PORT_FIBRE;
1440 		break;
1441 	case MEDIA_DA_TWINAX:
1442 		port_type = PORT_DA;
1443 		break;
1444 	case MEDIA_BASE_T:
1445 		port_type = PORT_TP;
1446 		break;
1447 	case MEDIA_NOT_PRESENT:
1448 		port_type = PORT_NONE;
1449 		break;
1450 	case MEDIA_UNSPECIFIED:
1451 	default:
1452 		port_type = PORT_OTHER;
1453 		break;
1454 	}
1455 	return port_type;
1456 }
1457 
1458 static int qed_get_link_data(struct qed_hwfn *hwfn,
1459 			     struct qed_mcp_link_params *params,
1460 			     struct qed_mcp_link_state *link,
1461 			     struct qed_mcp_link_capabilities *link_caps)
1462 {
1463 	void *p;
1464 
1465 	if (!IS_PF(hwfn->cdev)) {
1466 		qed_vf_get_link_params(hwfn, params);
1467 		qed_vf_get_link_state(hwfn, link);
1468 		qed_vf_get_link_caps(hwfn, link_caps);
1469 
1470 		return 0;
1471 	}
1472 
1473 	p = qed_mcp_get_link_params(hwfn);
1474 	if (!p)
1475 		return -ENXIO;
1476 	memcpy(params, p, sizeof(*params));
1477 
1478 	p = qed_mcp_get_link_state(hwfn);
1479 	if (!p)
1480 		return -ENXIO;
1481 	memcpy(link, p, sizeof(*link));
1482 
1483 	p = qed_mcp_get_link_capabilities(hwfn);
1484 	if (!p)
1485 		return -ENXIO;
1486 	memcpy(link_caps, p, sizeof(*link_caps));
1487 
1488 	return 0;
1489 }
1490 
1491 static void qed_fill_link_capability(struct qed_hwfn *hwfn,
1492 				     struct qed_ptt *ptt, u32 capability,
1493 				     u32 *if_capability)
1494 {
1495 	u32 media_type, tcvr_state, tcvr_type;
1496 	u32 speed_mask, board_cfg;
1497 
1498 	if (qed_mcp_get_media_type(hwfn, ptt, &media_type))
1499 		media_type = MEDIA_UNSPECIFIED;
1500 
1501 	if (qed_mcp_get_transceiver_data(hwfn, ptt, &tcvr_state, &tcvr_type))
1502 		tcvr_type = ETH_TRANSCEIVER_STATE_UNPLUGGED;
1503 
1504 	if (qed_mcp_trans_speed_mask(hwfn, ptt, &speed_mask))
1505 		speed_mask = 0xFFFFFFFF;
1506 
1507 	if (qed_mcp_get_board_config(hwfn, ptt, &board_cfg))
1508 		board_cfg = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
1509 
1510 	DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
1511 		   "Media_type = 0x%x tcvr_state = 0x%x tcvr_type = 0x%x speed_mask = 0x%x board_cfg = 0x%x\n",
1512 		   media_type, tcvr_state, tcvr_type, speed_mask, board_cfg);
1513 
1514 	switch (media_type) {
1515 	case MEDIA_DA_TWINAX:
1516 		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
1517 			*if_capability |= QED_LM_20000baseKR2_Full_BIT;
1518 		/* For DAC media multiple speed capabilities are supported*/
1519 		capability = capability & speed_mask;
1520 		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1521 			*if_capability |= QED_LM_1000baseKX_Full_BIT;
1522 		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1523 			*if_capability |= QED_LM_10000baseCR_Full_BIT;
1524 		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1525 			*if_capability |= QED_LM_40000baseCR4_Full_BIT;
1526 		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1527 			*if_capability |= QED_LM_25000baseCR_Full_BIT;
1528 		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1529 			*if_capability |= QED_LM_50000baseCR2_Full_BIT;
1530 		if (capability &
1531 			NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1532 			*if_capability |= QED_LM_100000baseCR4_Full_BIT;
1533 		break;
1534 	case MEDIA_BASE_T:
1535 		if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) {
1536 			if (capability &
1537 			    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) {
1538 				*if_capability |= QED_LM_1000baseT_Full_BIT;
1539 			}
1540 			if (capability &
1541 			    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) {
1542 				*if_capability |= QED_LM_10000baseT_Full_BIT;
1543 			}
1544 		}
1545 		if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) {
1546 			if (tcvr_type == ETH_TRANSCEIVER_TYPE_1000BASET)
1547 				*if_capability |= QED_LM_1000baseT_Full_BIT;
1548 			if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_BASET)
1549 				*if_capability |= QED_LM_10000baseT_Full_BIT;
1550 		}
1551 		break;
1552 	case MEDIA_SFP_1G_FIBER:
1553 	case MEDIA_SFPP_10G_FIBER:
1554 	case MEDIA_XFP_FIBER:
1555 	case MEDIA_MODULE_FIBER:
1556 		if (capability &
1557 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) {
1558 			if ((tcvr_type == ETH_TRANSCEIVER_TYPE_1G_LX) ||
1559 			    (tcvr_type == ETH_TRANSCEIVER_TYPE_1G_SX))
1560 				*if_capability |= QED_LM_1000baseKX_Full_BIT;
1561 		}
1562 		if (capability &
1563 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) {
1564 			if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_SR)
1565 				*if_capability |= QED_LM_10000baseSR_Full_BIT;
1566 			if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_LR)
1567 				*if_capability |= QED_LM_10000baseLR_Full_BIT;
1568 			if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_LRM)
1569 				*if_capability |= QED_LM_10000baseLRM_Full_BIT;
1570 			if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_ER)
1571 				*if_capability |= QED_LM_10000baseR_FEC_BIT;
1572 		}
1573 		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
1574 			*if_capability |= QED_LM_20000baseKR2_Full_BIT;
1575 		if (capability &
1576 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) {
1577 			if (tcvr_type == ETH_TRANSCEIVER_TYPE_25G_SR)
1578 				*if_capability |= QED_LM_25000baseSR_Full_BIT;
1579 		}
1580 		if (capability &
1581 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) {
1582 			if (tcvr_type == ETH_TRANSCEIVER_TYPE_40G_LR4)
1583 				*if_capability |= QED_LM_40000baseLR4_Full_BIT;
1584 			if (tcvr_type == ETH_TRANSCEIVER_TYPE_40G_SR4)
1585 				*if_capability |= QED_LM_40000baseSR4_Full_BIT;
1586 		}
1587 		if (capability &
1588 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1589 			*if_capability |= QED_LM_50000baseKR2_Full_BIT;
1590 		if (capability &
1591 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) {
1592 			if (tcvr_type == ETH_TRANSCEIVER_TYPE_100G_SR4)
1593 				*if_capability |= QED_LM_100000baseSR4_Full_BIT;
1594 		}
1595 
1596 		break;
1597 	case MEDIA_KR:
1598 		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
1599 			*if_capability |= QED_LM_20000baseKR2_Full_BIT;
1600 		if (capability &
1601 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1602 			*if_capability |= QED_LM_1000baseKX_Full_BIT;
1603 		if (capability &
1604 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1605 			*if_capability |= QED_LM_10000baseKR_Full_BIT;
1606 		if (capability &
1607 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1608 			*if_capability |= QED_LM_25000baseKR_Full_BIT;
1609 		if (capability &
1610 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1611 			*if_capability |= QED_LM_40000baseKR4_Full_BIT;
1612 		if (capability &
1613 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1614 			*if_capability |= QED_LM_50000baseKR2_Full_BIT;
1615 		if (capability &
1616 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1617 			*if_capability |= QED_LM_100000baseKR4_Full_BIT;
1618 		break;
1619 	case MEDIA_UNSPECIFIED:
1620 	case MEDIA_NOT_PRESENT:
1621 		DP_VERBOSE(hwfn->cdev, QED_MSG_DEBUG,
1622 			   "Unknown media and transceiver type;\n");
1623 		break;
1624 	}
1625 }
1626 
1627 static void qed_fill_link(struct qed_hwfn *hwfn,
1628 			  struct qed_ptt *ptt,
1629 			  struct qed_link_output *if_link)
1630 {
1631 	struct qed_mcp_link_capabilities link_caps;
1632 	struct qed_mcp_link_params params;
1633 	struct qed_mcp_link_state link;
1634 	u32 media_type;
1635 
1636 	memset(if_link, 0, sizeof(*if_link));
1637 
1638 	/* Prepare source inputs */
1639 	if (qed_get_link_data(hwfn, &params, &link, &link_caps)) {
1640 		dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n");
1641 		return;
1642 	}
1643 
1644 	/* Set the link parameters to pass to protocol driver */
1645 	if (link.link_up)
1646 		if_link->link_up = true;
1647 
1648 	/* TODO - at the moment assume supported and advertised speed equal */
1649 	if_link->supported_caps = QED_LM_FIBRE_BIT;
1650 	if (link_caps.default_speed_autoneg)
1651 		if_link->supported_caps |= QED_LM_Autoneg_BIT;
1652 	if (params.pause.autoneg ||
1653 	    (params.pause.forced_rx && params.pause.forced_tx))
1654 		if_link->supported_caps |= QED_LM_Asym_Pause_BIT;
1655 	if (params.pause.autoneg || params.pause.forced_rx ||
1656 	    params.pause.forced_tx)
1657 		if_link->supported_caps |= QED_LM_Pause_BIT;
1658 
1659 	if_link->advertised_caps = if_link->supported_caps;
1660 	if (params.speed.autoneg)
1661 		if_link->advertised_caps |= QED_LM_Autoneg_BIT;
1662 	else
1663 		if_link->advertised_caps &= ~QED_LM_Autoneg_BIT;
1664 
1665 	/* Fill link advertised capability*/
1666 	qed_fill_link_capability(hwfn, ptt, params.speed.advertised_speeds,
1667 				 &if_link->advertised_caps);
1668 	/* Fill link supported capability*/
1669 	qed_fill_link_capability(hwfn, ptt, link_caps.speed_capabilities,
1670 				 &if_link->supported_caps);
1671 
1672 	if (link.link_up)
1673 		if_link->speed = link.speed;
1674 
1675 	/* TODO - fill duplex properly */
1676 	if_link->duplex = DUPLEX_FULL;
1677 	qed_mcp_get_media_type(hwfn, ptt, &media_type);
1678 	if_link->port = qed_get_port_type(media_type);
1679 
1680 	if_link->autoneg = params.speed.autoneg;
1681 
1682 	if (params.pause.autoneg)
1683 		if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
1684 	if (params.pause.forced_rx)
1685 		if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE;
1686 	if (params.pause.forced_tx)
1687 		if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
1688 
1689 	/* Link partner capabilities */
1690 	if (link.partner_adv_speed &
1691 	    QED_LINK_PARTNER_SPEED_1G_FD)
1692 		if_link->lp_caps |= QED_LM_1000baseT_Full_BIT;
1693 	if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_10G)
1694 		if_link->lp_caps |= QED_LM_10000baseKR_Full_BIT;
1695 	if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_20G)
1696 		if_link->lp_caps |= QED_LM_20000baseKR2_Full_BIT;
1697 	if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_25G)
1698 		if_link->lp_caps |= QED_LM_25000baseKR_Full_BIT;
1699 	if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_40G)
1700 		if_link->lp_caps |= QED_LM_40000baseLR4_Full_BIT;
1701 	if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_50G)
1702 		if_link->lp_caps |= QED_LM_50000baseKR2_Full_BIT;
1703 	if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_100G)
1704 		if_link->lp_caps |= QED_LM_100000baseKR4_Full_BIT;
1705 
1706 	if (link.an_complete)
1707 		if_link->lp_caps |= QED_LM_Autoneg_BIT;
1708 
1709 	if (link.partner_adv_pause)
1710 		if_link->lp_caps |= QED_LM_Pause_BIT;
1711 	if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE ||
1712 	    link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE)
1713 		if_link->lp_caps |= QED_LM_Asym_Pause_BIT;
1714 
1715 	if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) {
1716 		if_link->eee_supported = false;
1717 	} else {
1718 		if_link->eee_supported = true;
1719 		if_link->eee_active = link.eee_active;
1720 		if_link->sup_caps = link_caps.eee_speed_caps;
1721 		/* MFW clears adv_caps on eee disable; use configured value */
1722 		if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps :
1723 					params.eee.adv_caps;
1724 		if_link->eee.lp_adv_caps = link.eee_lp_adv_caps;
1725 		if_link->eee.enable = params.eee.enable;
1726 		if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable;
1727 		if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer;
1728 	}
1729 }
1730 
1731 static void qed_get_current_link(struct qed_dev *cdev,
1732 				 struct qed_link_output *if_link)
1733 {
1734 	struct qed_hwfn *hwfn;
1735 	struct qed_ptt *ptt;
1736 	int i;
1737 
1738 	hwfn = &cdev->hwfns[0];
1739 	if (IS_PF(cdev)) {
1740 		ptt = qed_ptt_acquire(hwfn);
1741 		if (ptt) {
1742 			qed_fill_link(hwfn, ptt, if_link);
1743 			qed_ptt_release(hwfn, ptt);
1744 		} else {
1745 			DP_NOTICE(hwfn, "Failed to fill link; No PTT\n");
1746 		}
1747 	} else {
1748 		qed_fill_link(hwfn, NULL, if_link);
1749 	}
1750 
1751 	for_each_hwfn(cdev, i)
1752 		qed_inform_vf_link_state(&cdev->hwfns[i]);
1753 }
1754 
1755 void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt)
1756 {
1757 	void *cookie = hwfn->cdev->ops_cookie;
1758 	struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
1759 	struct qed_link_output if_link;
1760 
1761 	qed_fill_link(hwfn, ptt, &if_link);
1762 	qed_inform_vf_link_state(hwfn);
1763 
1764 	if (IS_LEAD_HWFN(hwfn) && cookie)
1765 		op->link_update(cookie, &if_link);
1766 }
1767 
1768 static int qed_drain(struct qed_dev *cdev)
1769 {
1770 	struct qed_hwfn *hwfn;
1771 	struct qed_ptt *ptt;
1772 	int i, rc;
1773 
1774 	if (IS_VF(cdev))
1775 		return 0;
1776 
1777 	for_each_hwfn(cdev, i) {
1778 		hwfn = &cdev->hwfns[i];
1779 		ptt = qed_ptt_acquire(hwfn);
1780 		if (!ptt) {
1781 			DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n");
1782 			return -EBUSY;
1783 		}
1784 		rc = qed_mcp_drain(hwfn, ptt);
1785 		qed_ptt_release(hwfn, ptt);
1786 		if (rc)
1787 			return rc;
1788 	}
1789 
1790 	return 0;
1791 }
1792 
1793 static u32 qed_nvm_flash_image_access_crc(struct qed_dev *cdev,
1794 					  struct qed_nvm_image_att *nvm_image,
1795 					  u32 *crc)
1796 {
1797 	u8 *buf = NULL;
1798 	int rc, j;
1799 	u32 val;
1800 
1801 	/* Allocate a buffer for holding the nvram image */
1802 	buf = kzalloc(nvm_image->length, GFP_KERNEL);
1803 	if (!buf)
1804 		return -ENOMEM;
1805 
1806 	/* Read image into buffer */
1807 	rc = qed_mcp_nvm_read(cdev, nvm_image->start_addr,
1808 			      buf, nvm_image->length);
1809 	if (rc) {
1810 		DP_ERR(cdev, "Failed reading image from nvm\n");
1811 		goto out;
1812 	}
1813 
1814 	/* Convert the buffer into big-endian format (excluding the
1815 	 * closing 4 bytes of CRC).
1816 	 */
1817 	for (j = 0; j < nvm_image->length - 4; j += 4) {
1818 		val = cpu_to_be32(*(u32 *)&buf[j]);
1819 		*(u32 *)&buf[j] = val;
1820 	}
1821 
1822 	/* Calc CRC for the "actual" image buffer, i.e. not including
1823 	 * the last 4 CRC bytes.
1824 	 */
1825 	*crc = (~cpu_to_be32(crc32(0xffffffff, buf, nvm_image->length - 4)));
1826 
1827 out:
1828 	kfree(buf);
1829 
1830 	return rc;
1831 }
1832 
1833 /* Binary file format -
1834  *     /----------------------------------------------------------------------\
1835  * 0B  |                       0x4 [command index]                            |
1836  * 4B  | image_type     | Options        |  Number of register settings       |
1837  * 8B  |                       Value                                          |
1838  * 12B |                       Mask                                           |
1839  * 16B |                       Offset                                         |
1840  *     \----------------------------------------------------------------------/
1841  * There can be several Value-Mask-Offset sets as specified by 'Number of...'.
1842  * Options - 0'b - Calculate & Update CRC for image
1843  */
1844 static int qed_nvm_flash_image_access(struct qed_dev *cdev, const u8 **data,
1845 				      bool *check_resp)
1846 {
1847 	struct qed_nvm_image_att nvm_image;
1848 	struct qed_hwfn *p_hwfn;
1849 	bool is_crc = false;
1850 	u32 image_type;
1851 	int rc = 0, i;
1852 	u16 len;
1853 
1854 	*data += 4;
1855 	image_type = **data;
1856 	p_hwfn = QED_LEADING_HWFN(cdev);
1857 	for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
1858 		if (image_type == p_hwfn->nvm_info.image_att[i].image_type)
1859 			break;
1860 	if (i == p_hwfn->nvm_info.num_images) {
1861 		DP_ERR(cdev, "Failed to find nvram image of type %08x\n",
1862 		       image_type);
1863 		return -ENOENT;
1864 	}
1865 
1866 	nvm_image.start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr;
1867 	nvm_image.length = p_hwfn->nvm_info.image_att[i].len;
1868 
1869 	DP_VERBOSE(cdev, NETIF_MSG_DRV,
1870 		   "Read image %02x; type = %08x; NVM [%08x,...,%08x]\n",
1871 		   **data, image_type, nvm_image.start_addr,
1872 		   nvm_image.start_addr + nvm_image.length - 1);
1873 	(*data)++;
1874 	is_crc = !!(**data & BIT(0));
1875 	(*data)++;
1876 	len = *((u16 *)*data);
1877 	*data += 2;
1878 	if (is_crc) {
1879 		u32 crc = 0;
1880 
1881 		rc = qed_nvm_flash_image_access_crc(cdev, &nvm_image, &crc);
1882 		if (rc) {
1883 			DP_ERR(cdev, "Failed calculating CRC, rc = %d\n", rc);
1884 			goto exit;
1885 		}
1886 
1887 		rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM,
1888 				       (nvm_image.start_addr +
1889 					nvm_image.length - 4), (u8 *)&crc, 4);
1890 		if (rc)
1891 			DP_ERR(cdev, "Failed writing to %08x, rc = %d\n",
1892 			       nvm_image.start_addr + nvm_image.length - 4, rc);
1893 		goto exit;
1894 	}
1895 
1896 	/* Iterate over the values for setting */
1897 	while (len) {
1898 		u32 offset, mask, value, cur_value;
1899 		u8 buf[4];
1900 
1901 		value = *((u32 *)*data);
1902 		*data += 4;
1903 		mask = *((u32 *)*data);
1904 		*data += 4;
1905 		offset = *((u32 *)*data);
1906 		*data += 4;
1907 
1908 		rc = qed_mcp_nvm_read(cdev, nvm_image.start_addr + offset, buf,
1909 				      4);
1910 		if (rc) {
1911 			DP_ERR(cdev, "Failed reading from %08x\n",
1912 			       nvm_image.start_addr + offset);
1913 			goto exit;
1914 		}
1915 
1916 		cur_value = le32_to_cpu(*((__le32 *)buf));
1917 		DP_VERBOSE(cdev, NETIF_MSG_DRV,
1918 			   "NVM %08x: %08x -> %08x [Value %08x Mask %08x]\n",
1919 			   nvm_image.start_addr + offset, cur_value,
1920 			   (cur_value & ~mask) | (value & mask), value, mask);
1921 		value = (value & mask) | (cur_value & ~mask);
1922 		rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM,
1923 				       nvm_image.start_addr + offset,
1924 				       (u8 *)&value, 4);
1925 		if (rc) {
1926 			DP_ERR(cdev, "Failed writing to %08x\n",
1927 			       nvm_image.start_addr + offset);
1928 			goto exit;
1929 		}
1930 
1931 		len--;
1932 	}
1933 exit:
1934 	return rc;
1935 }
1936 
1937 /* Binary file format -
1938  *     /----------------------------------------------------------------------\
1939  * 0B  |                       0x3 [command index]                            |
1940  * 4B  | b'0: check_response?   | b'1-31  reserved                            |
1941  * 8B  | File-type |                   reserved                               |
1942  *     \----------------------------------------------------------------------/
1943  *     Start a new file of the provided type
1944  */
1945 static int qed_nvm_flash_image_file_start(struct qed_dev *cdev,
1946 					  const u8 **data, bool *check_resp)
1947 {
1948 	int rc;
1949 
1950 	*data += 4;
1951 	*check_resp = !!(**data & BIT(0));
1952 	*data += 4;
1953 
1954 	DP_VERBOSE(cdev, NETIF_MSG_DRV,
1955 		   "About to start a new file of type %02x\n", **data);
1956 	rc = qed_mcp_nvm_put_file_begin(cdev, **data);
1957 	*data += 4;
1958 
1959 	return rc;
1960 }
1961 
1962 /* Binary file format -
1963  *     /----------------------------------------------------------------------\
1964  * 0B  |                       0x2 [command index]                            |
1965  * 4B  |                       Length in bytes                                |
1966  * 8B  | b'0: check_response?   | b'1-31  reserved                            |
1967  * 12B |                       Offset in bytes                                |
1968  * 16B |                       Data ...                                       |
1969  *     \----------------------------------------------------------------------/
1970  *     Write data as part of a file that was previously started. Data should be
1971  *     of length equal to that provided in the message
1972  */
1973 static int qed_nvm_flash_image_file_data(struct qed_dev *cdev,
1974 					 const u8 **data, bool *check_resp)
1975 {
1976 	u32 offset, len;
1977 	int rc;
1978 
1979 	*data += 4;
1980 	len = *((u32 *)(*data));
1981 	*data += 4;
1982 	*check_resp = !!(**data & BIT(0));
1983 	*data += 4;
1984 	offset = *((u32 *)(*data));
1985 	*data += 4;
1986 
1987 	DP_VERBOSE(cdev, NETIF_MSG_DRV,
1988 		   "About to write File-data: %08x bytes to offset %08x\n",
1989 		   len, offset);
1990 
1991 	rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_DATA, offset,
1992 			       (char *)(*data), len);
1993 	*data += len;
1994 
1995 	return rc;
1996 }
1997 
1998 /* Binary file format [General header] -
1999  *     /----------------------------------------------------------------------\
2000  * 0B  |                       QED_NVM_SIGNATURE                              |
2001  * 4B  |                       Length in bytes                                |
2002  * 8B  | Highest command in this batchfile |          Reserved                |
2003  *     \----------------------------------------------------------------------/
2004  */
2005 static int qed_nvm_flash_image_validate(struct qed_dev *cdev,
2006 					const struct firmware *image,
2007 					const u8 **data)
2008 {
2009 	u32 signature, len;
2010 
2011 	/* Check minimum size */
2012 	if (image->size < 12) {
2013 		DP_ERR(cdev, "Image is too short [%08x]\n", (u32)image->size);
2014 		return -EINVAL;
2015 	}
2016 
2017 	/* Check signature */
2018 	signature = *((u32 *)(*data));
2019 	if (signature != QED_NVM_SIGNATURE) {
2020 		DP_ERR(cdev, "Wrong signature '%08x'\n", signature);
2021 		return -EINVAL;
2022 	}
2023 
2024 	*data += 4;
2025 	/* Validate internal size equals the image-size */
2026 	len = *((u32 *)(*data));
2027 	if (len != image->size) {
2028 		DP_ERR(cdev, "Size mismatch: internal = %08x image = %08x\n",
2029 		       len, (u32)image->size);
2030 		return -EINVAL;
2031 	}
2032 
2033 	*data += 4;
2034 	/* Make sure driver familiar with all commands necessary for this */
2035 	if (*((u16 *)(*data)) >= QED_NVM_FLASH_CMD_NVM_MAX) {
2036 		DP_ERR(cdev, "File contains unsupported commands [Need %04x]\n",
2037 		       *((u16 *)(*data)));
2038 		return -EINVAL;
2039 	}
2040 
2041 	*data += 4;
2042 
2043 	return 0;
2044 }
2045 
2046 static int qed_nvm_flash(struct qed_dev *cdev, const char *name)
2047 {
2048 	const struct firmware *image;
2049 	const u8 *data, *data_end;
2050 	u32 cmd_type;
2051 	int rc;
2052 
2053 	rc = request_firmware(&image, name, &cdev->pdev->dev);
2054 	if (rc) {
2055 		DP_ERR(cdev, "Failed to find '%s'\n", name);
2056 		return rc;
2057 	}
2058 
2059 	DP_VERBOSE(cdev, NETIF_MSG_DRV,
2060 		   "Flashing '%s' - firmware's data at %p, size is %08x\n",
2061 		   name, image->data, (u32)image->size);
2062 	data = image->data;
2063 	data_end = data + image->size;
2064 
2065 	rc = qed_nvm_flash_image_validate(cdev, image, &data);
2066 	if (rc)
2067 		goto exit;
2068 
2069 	while (data < data_end) {
2070 		bool check_resp = false;
2071 
2072 		/* Parse the actual command */
2073 		cmd_type = *((u32 *)data);
2074 		switch (cmd_type) {
2075 		case QED_NVM_FLASH_CMD_FILE_DATA:
2076 			rc = qed_nvm_flash_image_file_data(cdev, &data,
2077 							   &check_resp);
2078 			break;
2079 		case QED_NVM_FLASH_CMD_FILE_START:
2080 			rc = qed_nvm_flash_image_file_start(cdev, &data,
2081 							    &check_resp);
2082 			break;
2083 		case QED_NVM_FLASH_CMD_NVM_CHANGE:
2084 			rc = qed_nvm_flash_image_access(cdev, &data,
2085 							&check_resp);
2086 			break;
2087 		default:
2088 			DP_ERR(cdev, "Unknown command %08x\n", cmd_type);
2089 			rc = -EINVAL;
2090 			goto exit;
2091 		}
2092 
2093 		if (rc) {
2094 			DP_ERR(cdev, "Command %08x failed\n", cmd_type);
2095 			goto exit;
2096 		}
2097 
2098 		/* Check response if needed */
2099 		if (check_resp) {
2100 			u32 mcp_response = 0;
2101 
2102 			if (qed_mcp_nvm_resp(cdev, (u8 *)&mcp_response)) {
2103 				DP_ERR(cdev, "Failed getting MCP response\n");
2104 				rc = -EINVAL;
2105 				goto exit;
2106 			}
2107 
2108 			switch (mcp_response & FW_MSG_CODE_MASK) {
2109 			case FW_MSG_CODE_OK:
2110 			case FW_MSG_CODE_NVM_OK:
2111 			case FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK:
2112 			case FW_MSG_CODE_PHY_OK:
2113 				break;
2114 			default:
2115 				DP_ERR(cdev, "MFW returns error: %08x\n",
2116 				       mcp_response);
2117 				rc = -EINVAL;
2118 				goto exit;
2119 			}
2120 		}
2121 	}
2122 
2123 exit:
2124 	release_firmware(image);
2125 
2126 	return rc;
2127 }
2128 
2129 static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type,
2130 			     u8 *buf, u16 len)
2131 {
2132 	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2133 
2134 	return qed_mcp_get_nvm_image(hwfn, type, buf, len);
2135 }
2136 
2137 static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
2138 			    void *handle)
2139 {
2140 		return qed_set_queue_coalesce(rx_coal, tx_coal, handle);
2141 }
2142 
2143 static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
2144 {
2145 	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2146 	struct qed_ptt *ptt;
2147 	int status = 0;
2148 
2149 	ptt = qed_ptt_acquire(hwfn);
2150 	if (!ptt)
2151 		return -EAGAIN;
2152 
2153 	status = qed_mcp_set_led(hwfn, ptt, mode);
2154 
2155 	qed_ptt_release(hwfn, ptt);
2156 
2157 	return status;
2158 }
2159 
2160 static int qed_update_wol(struct qed_dev *cdev, bool enabled)
2161 {
2162 	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2163 	struct qed_ptt *ptt;
2164 	int rc = 0;
2165 
2166 	if (IS_VF(cdev))
2167 		return 0;
2168 
2169 	ptt = qed_ptt_acquire(hwfn);
2170 	if (!ptt)
2171 		return -EAGAIN;
2172 
2173 	rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED
2174 				   : QED_OV_WOL_DISABLED);
2175 	if (rc)
2176 		goto out;
2177 	rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2178 
2179 out:
2180 	qed_ptt_release(hwfn, ptt);
2181 	return rc;
2182 }
2183 
2184 static int qed_update_drv_state(struct qed_dev *cdev, bool active)
2185 {
2186 	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2187 	struct qed_ptt *ptt;
2188 	int status = 0;
2189 
2190 	if (IS_VF(cdev))
2191 		return 0;
2192 
2193 	ptt = qed_ptt_acquire(hwfn);
2194 	if (!ptt)
2195 		return -EAGAIN;
2196 
2197 	status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ?
2198 						QED_OV_DRIVER_STATE_ACTIVE :
2199 						QED_OV_DRIVER_STATE_DISABLED);
2200 
2201 	qed_ptt_release(hwfn, ptt);
2202 
2203 	return status;
2204 }
2205 
2206 static int qed_update_mac(struct qed_dev *cdev, u8 *mac)
2207 {
2208 	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2209 	struct qed_ptt *ptt;
2210 	int status = 0;
2211 
2212 	if (IS_VF(cdev))
2213 		return 0;
2214 
2215 	ptt = qed_ptt_acquire(hwfn);
2216 	if (!ptt)
2217 		return -EAGAIN;
2218 
2219 	status = qed_mcp_ov_update_mac(hwfn, ptt, mac);
2220 	if (status)
2221 		goto out;
2222 
2223 	status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2224 
2225 out:
2226 	qed_ptt_release(hwfn, ptt);
2227 	return status;
2228 }
2229 
2230 static int qed_update_mtu(struct qed_dev *cdev, u16 mtu)
2231 {
2232 	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2233 	struct qed_ptt *ptt;
2234 	int status = 0;
2235 
2236 	if (IS_VF(cdev))
2237 		return 0;
2238 
2239 	ptt = qed_ptt_acquire(hwfn);
2240 	if (!ptt)
2241 		return -EAGAIN;
2242 
2243 	status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu);
2244 	if (status)
2245 		goto out;
2246 
2247 	status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2248 
2249 out:
2250 	qed_ptt_release(hwfn, ptt);
2251 	return status;
2252 }
2253 
2254 static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf,
2255 				  u8 dev_addr, u32 offset, u32 len)
2256 {
2257 	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2258 	struct qed_ptt *ptt;
2259 	int rc = 0;
2260 
2261 	if (IS_VF(cdev))
2262 		return 0;
2263 
2264 	ptt = qed_ptt_acquire(hwfn);
2265 	if (!ptt)
2266 		return -EAGAIN;
2267 
2268 	rc = qed_mcp_phy_sfp_read(hwfn, ptt, MFW_PORT(hwfn), dev_addr,
2269 				  offset, len, buf);
2270 
2271 	qed_ptt_release(hwfn, ptt);
2272 
2273 	return rc;
2274 }
2275 
2276 static struct qed_selftest_ops qed_selftest_ops_pass = {
2277 	.selftest_memory = &qed_selftest_memory,
2278 	.selftest_interrupt = &qed_selftest_interrupt,
2279 	.selftest_register = &qed_selftest_register,
2280 	.selftest_clock = &qed_selftest_clock,
2281 	.selftest_nvram = &qed_selftest_nvram,
2282 };
2283 
2284 const struct qed_common_ops qed_common_ops_pass = {
2285 	.selftest = &qed_selftest_ops_pass,
2286 	.probe = &qed_probe,
2287 	.remove = &qed_remove,
2288 	.set_power_state = &qed_set_power_state,
2289 	.set_name = &qed_set_name,
2290 	.update_pf_params = &qed_update_pf_params,
2291 	.slowpath_start = &qed_slowpath_start,
2292 	.slowpath_stop = &qed_slowpath_stop,
2293 	.set_fp_int = &qed_set_int_fp,
2294 	.get_fp_int = &qed_get_int_fp,
2295 	.sb_init = &qed_sb_init,
2296 	.sb_release = &qed_sb_release,
2297 	.simd_handler_config = &qed_simd_handler_config,
2298 	.simd_handler_clean = &qed_simd_handler_clean,
2299 	.dbg_grc = &qed_dbg_grc,
2300 	.dbg_grc_size = &qed_dbg_grc_size,
2301 	.can_link_change = &qed_can_link_change,
2302 	.set_link = &qed_set_link,
2303 	.get_link = &qed_get_current_link,
2304 	.drain = &qed_drain,
2305 	.update_msglvl = &qed_init_dp,
2306 	.dbg_all_data = &qed_dbg_all_data,
2307 	.dbg_all_data_size = &qed_dbg_all_data_size,
2308 	.chain_alloc = &qed_chain_alloc,
2309 	.chain_free = &qed_chain_free,
2310 	.nvm_flash = &qed_nvm_flash,
2311 	.nvm_get_image = &qed_nvm_get_image,
2312 	.set_coalesce = &qed_set_coalesce,
2313 	.set_led = &qed_set_led,
2314 	.update_drv_state = &qed_update_drv_state,
2315 	.update_mac = &qed_update_mac,
2316 	.update_mtu = &qed_update_mtu,
2317 	.update_wol = &qed_update_wol,
2318 	.read_module_eeprom = &qed_read_module_eeprom,
2319 };
2320 
2321 void qed_get_protocol_stats(struct qed_dev *cdev,
2322 			    enum qed_mcp_protocol_type type,
2323 			    union qed_mcp_protocol_stats *stats)
2324 {
2325 	struct qed_eth_stats eth_stats;
2326 
2327 	memset(stats, 0, sizeof(*stats));
2328 
2329 	switch (type) {
2330 	case QED_MCP_LAN_STATS:
2331 		qed_get_vport_stats(cdev, &eth_stats);
2332 		stats->lan_stats.ucast_rx_pkts =
2333 					eth_stats.common.rx_ucast_pkts;
2334 		stats->lan_stats.ucast_tx_pkts =
2335 					eth_stats.common.tx_ucast_pkts;
2336 		stats->lan_stats.fcs_err = -1;
2337 		break;
2338 	case QED_MCP_FCOE_STATS:
2339 		qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats);
2340 		break;
2341 	case QED_MCP_ISCSI_STATS:
2342 		qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats);
2343 		break;
2344 	default:
2345 		DP_VERBOSE(cdev, QED_MSG_SP,
2346 			   "Invalid protocol type = %d\n", type);
2347 		return;
2348 	}
2349 }
2350 
2351 int qed_mfw_tlv_req(struct qed_hwfn *hwfn)
2352 {
2353 	DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
2354 		   "Scheduling slowpath task [Flag: %d]\n",
2355 		   QED_SLOWPATH_MFW_TLV_REQ);
2356 	smp_mb__before_atomic();
2357 	set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags);
2358 	smp_mb__after_atomic();
2359 	queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
2360 
2361 	return 0;
2362 }
2363 
2364 static void
2365 qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv)
2366 {
2367 	struct qed_common_cb_ops *op = cdev->protocol_ops.common;
2368 	struct qed_eth_stats_common *p_common;
2369 	struct qed_generic_tlvs gen_tlvs;
2370 	struct qed_eth_stats stats;
2371 	int i;
2372 
2373 	memset(&gen_tlvs, 0, sizeof(gen_tlvs));
2374 	op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs);
2375 
2376 	if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM)
2377 		tlv->flags.ipv4_csum_offload = true;
2378 	if (gen_tlvs.feat_flags & QED_TLV_LSO)
2379 		tlv->flags.lso_supported = true;
2380 	tlv->flags.b_set = true;
2381 
2382 	for (i = 0; i < QED_TLV_MAC_COUNT; i++) {
2383 		if (is_valid_ether_addr(gen_tlvs.mac[i])) {
2384 			ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]);
2385 			tlv->mac_set[i] = true;
2386 		}
2387 	}
2388 
2389 	qed_get_vport_stats(cdev, &stats);
2390 	p_common = &stats.common;
2391 	tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
2392 			 p_common->rx_bcast_pkts;
2393 	tlv->rx_frames_set = true;
2394 	tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
2395 			p_common->rx_bcast_bytes;
2396 	tlv->rx_bytes_set = true;
2397 	tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
2398 			 p_common->tx_bcast_pkts;
2399 	tlv->tx_frames_set = true;
2400 	tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
2401 			p_common->tx_bcast_bytes;
2402 	tlv->rx_bytes_set = true;
2403 }
2404 
2405 int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type,
2406 			  union qed_mfw_tlv_data *tlv_buf)
2407 {
2408 	struct qed_dev *cdev = hwfn->cdev;
2409 	struct qed_common_cb_ops *ops;
2410 
2411 	ops = cdev->protocol_ops.common;
2412 	if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) {
2413 		DP_NOTICE(hwfn, "Can't collect TLV management info\n");
2414 		return -EINVAL;
2415 	}
2416 
2417 	switch (type) {
2418 	case QED_MFW_TLV_GENERIC:
2419 		qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic);
2420 		break;
2421 	case QED_MFW_TLV_ETH:
2422 		ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth);
2423 		break;
2424 	case QED_MFW_TLV_FCOE:
2425 		ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe);
2426 		break;
2427 	case QED_MFW_TLV_ISCSI:
2428 		ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi);
2429 		break;
2430 	default:
2431 		break;
2432 	}
2433 
2434 	return 0;
2435 }
2436