xref: /openbmc/linux/drivers/net/ethernet/qlogic/qed/qed_main.c (revision e4781421e883340b796da5a724bda7226817990b)
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/stddef.h>
34 #include <linux/pci.h>
35 #include <linux/kernel.h>
36 #include <linux/slab.h>
37 #include <linux/version.h>
38 #include <linux/delay.h>
39 #include <asm/byteorder.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/string.h>
42 #include <linux/module.h>
43 #include <linux/interrupt.h>
44 #include <linux/workqueue.h>
45 #include <linux/ethtool.h>
46 #include <linux/etherdevice.h>
47 #include <linux/vmalloc.h>
48 #include <linux/qed/qed_if.h>
49 #include <linux/qed/qed_ll2_if.h>
50 
51 #include "qed.h"
52 #include "qed_sriov.h"
53 #include "qed_sp.h"
54 #include "qed_dev_api.h"
55 #include "qed_ll2.h"
56 #include "qed_mcp.h"
57 #include "qed_hw.h"
58 #include "qed_selftest.h"
59 
60 #define QED_ROCE_QPS			(8192)
61 #define QED_ROCE_DPIS			(8)
62 
63 static char version[] =
64 	"QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
65 
66 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module");
67 MODULE_LICENSE("GPL");
68 MODULE_VERSION(DRV_MODULE_VERSION);
69 
70 #define FW_FILE_VERSION				\
71 	__stringify(FW_MAJOR_VERSION) "."	\
72 	__stringify(FW_MINOR_VERSION) "."	\
73 	__stringify(FW_REVISION_VERSION) "."	\
74 	__stringify(FW_ENGINEERING_VERSION)
75 
76 #define QED_FW_FILE_NAME	\
77 	"qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin"
78 
79 MODULE_FIRMWARE(QED_FW_FILE_NAME);
80 
81 static int __init qed_init(void)
82 {
83 	pr_info("%s", version);
84 
85 	return 0;
86 }
87 
88 static void __exit qed_cleanup(void)
89 {
90 	pr_notice("qed_cleanup called\n");
91 }
92 
93 module_init(qed_init);
94 module_exit(qed_cleanup);
95 
96 /* Check if the DMA controller on the machine can properly handle the DMA
97  * addressing required by the device.
98 */
99 static int qed_set_coherency_mask(struct qed_dev *cdev)
100 {
101 	struct device *dev = &cdev->pdev->dev;
102 
103 	if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
104 		if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
105 			DP_NOTICE(cdev,
106 				  "Can't request 64-bit consistent allocations\n");
107 			return -EIO;
108 		}
109 	} else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
110 		DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n");
111 		return -EIO;
112 	}
113 
114 	return 0;
115 }
116 
117 static void qed_free_pci(struct qed_dev *cdev)
118 {
119 	struct pci_dev *pdev = cdev->pdev;
120 
121 	if (cdev->doorbells)
122 		iounmap(cdev->doorbells);
123 	if (cdev->regview)
124 		iounmap(cdev->regview);
125 	if (atomic_read(&pdev->enable_cnt) == 1)
126 		pci_release_regions(pdev);
127 
128 	pci_disable_device(pdev);
129 }
130 
131 #define PCI_REVISION_ID_ERROR_VAL	0xff
132 
133 /* Performs PCI initializations as well as initializing PCI-related parameters
134  * in the device structrue. Returns 0 in case of success.
135  */
136 static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
137 {
138 	u8 rev_id;
139 	int rc;
140 
141 	cdev->pdev = pdev;
142 
143 	rc = pci_enable_device(pdev);
144 	if (rc) {
145 		DP_NOTICE(cdev, "Cannot enable PCI device\n");
146 		goto err0;
147 	}
148 
149 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
150 		DP_NOTICE(cdev, "No memory region found in bar #0\n");
151 		rc = -EIO;
152 		goto err1;
153 	}
154 
155 	if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
156 		DP_NOTICE(cdev, "No memory region found in bar #2\n");
157 		rc = -EIO;
158 		goto err1;
159 	}
160 
161 	if (atomic_read(&pdev->enable_cnt) == 1) {
162 		rc = pci_request_regions(pdev, "qed");
163 		if (rc) {
164 			DP_NOTICE(cdev,
165 				  "Failed to request PCI memory resources\n");
166 			goto err1;
167 		}
168 		pci_set_master(pdev);
169 		pci_save_state(pdev);
170 	}
171 
172 	pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
173 	if (rev_id == PCI_REVISION_ID_ERROR_VAL) {
174 		DP_NOTICE(cdev,
175 			  "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n",
176 			  rev_id);
177 		rc = -ENODEV;
178 		goto err2;
179 	}
180 	if (!pci_is_pcie(pdev)) {
181 		DP_NOTICE(cdev, "The bus is not PCI Express\n");
182 		rc = -EIO;
183 		goto err2;
184 	}
185 
186 	cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
187 	if (IS_PF(cdev) && !cdev->pci_params.pm_cap)
188 		DP_NOTICE(cdev, "Cannot find power management capability\n");
189 
190 	rc = qed_set_coherency_mask(cdev);
191 	if (rc)
192 		goto err2;
193 
194 	cdev->pci_params.mem_start = pci_resource_start(pdev, 0);
195 	cdev->pci_params.mem_end = pci_resource_end(pdev, 0);
196 	cdev->pci_params.irq = pdev->irq;
197 
198 	cdev->regview = pci_ioremap_bar(pdev, 0);
199 	if (!cdev->regview) {
200 		DP_NOTICE(cdev, "Cannot map register space, aborting\n");
201 		rc = -ENOMEM;
202 		goto err2;
203 	}
204 
205 	if (IS_PF(cdev)) {
206 		cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
207 		cdev->db_size = pci_resource_len(cdev->pdev, 2);
208 		cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
209 		if (!cdev->doorbells) {
210 			DP_NOTICE(cdev, "Cannot map doorbell space\n");
211 			return -ENOMEM;
212 		}
213 	}
214 
215 	return 0;
216 
217 err2:
218 	pci_release_regions(pdev);
219 err1:
220 	pci_disable_device(pdev);
221 err0:
222 	return rc;
223 }
224 
225 int qed_fill_dev_info(struct qed_dev *cdev,
226 		      struct qed_dev_info *dev_info)
227 {
228 	struct qed_ptt  *ptt;
229 
230 	memset(dev_info, 0, sizeof(struct qed_dev_info));
231 
232 	dev_info->num_hwfns = cdev->num_hwfns;
233 	dev_info->pci_mem_start = cdev->pci_params.mem_start;
234 	dev_info->pci_mem_end = cdev->pci_params.mem_end;
235 	dev_info->pci_irq = cdev->pci_params.irq;
236 	dev_info->rdma_supported = (cdev->hwfns[0].hw_info.personality ==
237 				    QED_PCI_ETH_ROCE);
238 	dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
239 	ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
240 
241 	if (IS_PF(cdev)) {
242 		dev_info->fw_major = FW_MAJOR_VERSION;
243 		dev_info->fw_minor = FW_MINOR_VERSION;
244 		dev_info->fw_rev = FW_REVISION_VERSION;
245 		dev_info->fw_eng = FW_ENGINEERING_VERSION;
246 		dev_info->mf_mode = cdev->mf_mode;
247 		dev_info->tx_switching = true;
248 
249 		if (QED_LEADING_HWFN(cdev)->hw_info.b_wol_support ==
250 		    QED_WOL_SUPPORT_PME)
251 			dev_info->wol_support = true;
252 	} else {
253 		qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
254 				      &dev_info->fw_minor, &dev_info->fw_rev,
255 				      &dev_info->fw_eng);
256 	}
257 
258 	if (IS_PF(cdev)) {
259 		ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
260 		if (ptt) {
261 			qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
262 					    &dev_info->mfw_rev, NULL);
263 
264 			qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
265 					       &dev_info->flash_size);
266 
267 			qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
268 		}
269 	} else {
270 		qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL,
271 				    &dev_info->mfw_rev, NULL);
272 	}
273 
274 	dev_info->mtu = QED_LEADING_HWFN(cdev)->hw_info.mtu;
275 
276 	return 0;
277 }
278 
279 static void qed_free_cdev(struct qed_dev *cdev)
280 {
281 	kfree((void *)cdev);
282 }
283 
284 static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
285 {
286 	struct qed_dev *cdev;
287 
288 	cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
289 	if (!cdev)
290 		return cdev;
291 
292 	qed_init_struct(cdev);
293 
294 	return cdev;
295 }
296 
297 /* Sets the requested power state */
298 static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state)
299 {
300 	if (!cdev)
301 		return -ENODEV;
302 
303 	DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n");
304 	return 0;
305 }
306 
307 /* probing */
308 static struct qed_dev *qed_probe(struct pci_dev *pdev,
309 				 struct qed_probe_params *params)
310 {
311 	struct qed_dev *cdev;
312 	int rc;
313 
314 	cdev = qed_alloc_cdev(pdev);
315 	if (!cdev)
316 		goto err0;
317 
318 	cdev->protocol = params->protocol;
319 
320 	if (params->is_vf)
321 		cdev->b_is_vf = true;
322 
323 	qed_init_dp(cdev, params->dp_module, params->dp_level);
324 
325 	rc = qed_init_pci(cdev, pdev);
326 	if (rc) {
327 		DP_ERR(cdev, "init pci failed\n");
328 		goto err1;
329 	}
330 	DP_INFO(cdev, "PCI init completed successfully\n");
331 
332 	rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT);
333 	if (rc) {
334 		DP_ERR(cdev, "hw prepare failed\n");
335 		goto err2;
336 	}
337 
338 	DP_INFO(cdev, "qed_probe completed successffuly\n");
339 
340 	return cdev;
341 
342 err2:
343 	qed_free_pci(cdev);
344 err1:
345 	qed_free_cdev(cdev);
346 err0:
347 	return NULL;
348 }
349 
350 static void qed_remove(struct qed_dev *cdev)
351 {
352 	if (!cdev)
353 		return;
354 
355 	qed_hw_remove(cdev);
356 
357 	qed_free_pci(cdev);
358 
359 	qed_set_power_state(cdev, PCI_D3hot);
360 
361 	qed_free_cdev(cdev);
362 }
363 
364 static void qed_disable_msix(struct qed_dev *cdev)
365 {
366 	if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
367 		pci_disable_msix(cdev->pdev);
368 		kfree(cdev->int_params.msix_table);
369 	} else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) {
370 		pci_disable_msi(cdev->pdev);
371 	}
372 
373 	memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param));
374 }
375 
376 static int qed_enable_msix(struct qed_dev *cdev,
377 			   struct qed_int_params *int_params)
378 {
379 	int i, rc, cnt;
380 
381 	cnt = int_params->in.num_vectors;
382 
383 	for (i = 0; i < cnt; i++)
384 		int_params->msix_table[i].entry = i;
385 
386 	rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table,
387 				   int_params->in.min_msix_cnt, cnt);
388 	if (rc < cnt && rc >= int_params->in.min_msix_cnt &&
389 	    (rc % cdev->num_hwfns)) {
390 		pci_disable_msix(cdev->pdev);
391 
392 		/* If fastpath is initialized, we need at least one interrupt
393 		 * per hwfn [and the slow path interrupts]. New requested number
394 		 * should be a multiple of the number of hwfns.
395 		 */
396 		cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns;
397 		DP_NOTICE(cdev,
398 			  "Trying to enable MSI-X with less vectors (%d out of %d)\n",
399 			  cnt, int_params->in.num_vectors);
400 		rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table,
401 					   cnt);
402 		if (!rc)
403 			rc = cnt;
404 	}
405 
406 	if (rc > 0) {
407 		/* MSI-x configuration was achieved */
408 		int_params->out.int_mode = QED_INT_MODE_MSIX;
409 		int_params->out.num_vectors = rc;
410 		rc = 0;
411 	} else {
412 		DP_NOTICE(cdev,
413 			  "Failed to enable MSI-X [Requested %d vectors][rc %d]\n",
414 			  cnt, rc);
415 	}
416 
417 	return rc;
418 }
419 
420 /* This function outputs the int mode and the number of enabled msix vector */
421 static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
422 {
423 	struct qed_int_params *int_params = &cdev->int_params;
424 	struct msix_entry *tbl;
425 	int rc = 0, cnt;
426 
427 	switch (int_params->in.int_mode) {
428 	case QED_INT_MODE_MSIX:
429 		/* Allocate MSIX table */
430 		cnt = int_params->in.num_vectors;
431 		int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL);
432 		if (!int_params->msix_table) {
433 			rc = -ENOMEM;
434 			goto out;
435 		}
436 
437 		/* Enable MSIX */
438 		rc = qed_enable_msix(cdev, int_params);
439 		if (!rc)
440 			goto out;
441 
442 		DP_NOTICE(cdev, "Failed to enable MSI-X\n");
443 		kfree(int_params->msix_table);
444 		if (force_mode)
445 			goto out;
446 		/* Fallthrough */
447 
448 	case QED_INT_MODE_MSI:
449 		if (cdev->num_hwfns == 1) {
450 			rc = pci_enable_msi(cdev->pdev);
451 			if (!rc) {
452 				int_params->out.int_mode = QED_INT_MODE_MSI;
453 				goto out;
454 			}
455 
456 			DP_NOTICE(cdev, "Failed to enable MSI\n");
457 			if (force_mode)
458 				goto out;
459 		}
460 		/* Fallthrough */
461 
462 	case QED_INT_MODE_INTA:
463 			int_params->out.int_mode = QED_INT_MODE_INTA;
464 			rc = 0;
465 			goto out;
466 	default:
467 		DP_NOTICE(cdev, "Unknown int_mode value %d\n",
468 			  int_params->in.int_mode);
469 		rc = -EINVAL;
470 	}
471 
472 out:
473 	if (!rc)
474 		DP_INFO(cdev, "Using %s interrupts\n",
475 			int_params->out.int_mode == QED_INT_MODE_INTA ?
476 			"INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ?
477 			"MSI" : "MSIX");
478 	cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE;
479 
480 	return rc;
481 }
482 
483 static void qed_simd_handler_config(struct qed_dev *cdev, void *token,
484 				    int index, void(*handler)(void *))
485 {
486 	struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
487 	int relative_idx = index / cdev->num_hwfns;
488 
489 	hwfn->simd_proto_handler[relative_idx].func = handler;
490 	hwfn->simd_proto_handler[relative_idx].token = token;
491 }
492 
493 static void qed_simd_handler_clean(struct qed_dev *cdev, int index)
494 {
495 	struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
496 	int relative_idx = index / cdev->num_hwfns;
497 
498 	memset(&hwfn->simd_proto_handler[relative_idx], 0,
499 	       sizeof(struct qed_simd_fp_handler));
500 }
501 
502 static irqreturn_t qed_msix_sp_int(int irq, void *tasklet)
503 {
504 	tasklet_schedule((struct tasklet_struct *)tasklet);
505 	return IRQ_HANDLED;
506 }
507 
508 static irqreturn_t qed_single_int(int irq, void *dev_instance)
509 {
510 	struct qed_dev *cdev = (struct qed_dev *)dev_instance;
511 	struct qed_hwfn *hwfn;
512 	irqreturn_t rc = IRQ_NONE;
513 	u64 status;
514 	int i, j;
515 
516 	for (i = 0; i < cdev->num_hwfns; i++) {
517 		status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]);
518 
519 		if (!status)
520 			continue;
521 
522 		hwfn = &cdev->hwfns[i];
523 
524 		/* Slowpath interrupt */
525 		if (unlikely(status & 0x1)) {
526 			tasklet_schedule(hwfn->sp_dpc);
527 			status &= ~0x1;
528 			rc = IRQ_HANDLED;
529 		}
530 
531 		/* Fastpath interrupts */
532 		for (j = 0; j < 64; j++) {
533 			if ((0x2ULL << j) & status) {
534 				hwfn->simd_proto_handler[j].func(
535 					hwfn->simd_proto_handler[j].token);
536 				status &= ~(0x2ULL << j);
537 				rc = IRQ_HANDLED;
538 			}
539 		}
540 
541 		if (unlikely(status))
542 			DP_VERBOSE(hwfn, NETIF_MSG_INTR,
543 				   "got an unknown interrupt status 0x%llx\n",
544 				   status);
545 	}
546 
547 	return rc;
548 }
549 
550 int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
551 {
552 	struct qed_dev *cdev = hwfn->cdev;
553 	u32 int_mode;
554 	int rc = 0;
555 	u8 id;
556 
557 	int_mode = cdev->int_params.out.int_mode;
558 	if (int_mode == QED_INT_MODE_MSIX) {
559 		id = hwfn->my_id;
560 		snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
561 			 id, cdev->pdev->bus->number,
562 			 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
563 		rc = request_irq(cdev->int_params.msix_table[id].vector,
564 				 qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc);
565 	} else {
566 		unsigned long flags = 0;
567 
568 		snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x",
569 			 cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn),
570 			 PCI_FUNC(cdev->pdev->devfn));
571 
572 		if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA)
573 			flags |= IRQF_SHARED;
574 
575 		rc = request_irq(cdev->pdev->irq, qed_single_int,
576 				 flags, cdev->name, cdev);
577 	}
578 
579 	if (rc)
580 		DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc);
581 	else
582 		DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
583 			   "Requested slowpath %s\n",
584 			   (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ");
585 
586 	return rc;
587 }
588 
589 static void qed_slowpath_irq_free(struct qed_dev *cdev)
590 {
591 	int i;
592 
593 	if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
594 		for_each_hwfn(cdev, i) {
595 			if (!cdev->hwfns[i].b_int_requested)
596 				break;
597 			synchronize_irq(cdev->int_params.msix_table[i].vector);
598 			free_irq(cdev->int_params.msix_table[i].vector,
599 				 cdev->hwfns[i].sp_dpc);
600 		}
601 	} else {
602 		if (QED_LEADING_HWFN(cdev)->b_int_requested)
603 			free_irq(cdev->pdev->irq, cdev);
604 	}
605 	qed_int_disable_post_isr_release(cdev);
606 }
607 
608 static int qed_nic_stop(struct qed_dev *cdev)
609 {
610 	int i, rc;
611 
612 	rc = qed_hw_stop(cdev);
613 
614 	for (i = 0; i < cdev->num_hwfns; i++) {
615 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
616 
617 		if (p_hwfn->b_sp_dpc_enabled) {
618 			tasklet_disable(p_hwfn->sp_dpc);
619 			p_hwfn->b_sp_dpc_enabled = false;
620 			DP_VERBOSE(cdev, NETIF_MSG_IFDOWN,
621 				   "Disabled sp taskelt [hwfn %d] at %p\n",
622 				   i, p_hwfn->sp_dpc);
623 		}
624 	}
625 
626 	qed_dbg_pf_exit(cdev);
627 
628 	return rc;
629 }
630 
631 static int qed_nic_reset(struct qed_dev *cdev)
632 {
633 	int rc;
634 
635 	rc = qed_hw_reset(cdev);
636 	if (rc)
637 		return rc;
638 
639 	qed_resc_free(cdev);
640 
641 	return 0;
642 }
643 
644 static int qed_nic_setup(struct qed_dev *cdev)
645 {
646 	int rc, i;
647 
648 	/* Determine if interface is going to require LL2 */
649 	if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) {
650 		for (i = 0; i < cdev->num_hwfns; i++) {
651 			struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
652 
653 			p_hwfn->using_ll2 = true;
654 		}
655 	}
656 
657 	rc = qed_resc_alloc(cdev);
658 	if (rc)
659 		return rc;
660 
661 	DP_INFO(cdev, "Allocated qed resources\n");
662 
663 	qed_resc_setup(cdev);
664 
665 	return rc;
666 }
667 
668 static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt)
669 {
670 	int limit = 0;
671 
672 	/* Mark the fastpath as free/used */
673 	cdev->int_params.fp_initialized = cnt ? true : false;
674 
675 	if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX)
676 		limit = cdev->num_hwfns * 63;
677 	else if (cdev->int_params.fp_msix_cnt)
678 		limit = cdev->int_params.fp_msix_cnt;
679 
680 	if (!limit)
681 		return -ENOMEM;
682 
683 	return min_t(int, cnt, limit);
684 }
685 
686 static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info)
687 {
688 	memset(info, 0, sizeof(struct qed_int_info));
689 
690 	if (!cdev->int_params.fp_initialized) {
691 		DP_INFO(cdev,
692 			"Protocol driver requested interrupt information, but its support is not yet configured\n");
693 		return -EINVAL;
694 	}
695 
696 	/* Need to expose only MSI-X information; Single IRQ is handled solely
697 	 * by qed.
698 	 */
699 	if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
700 		int msix_base = cdev->int_params.fp_msix_base;
701 
702 		info->msix_cnt = cdev->int_params.fp_msix_cnt;
703 		info->msix = &cdev->int_params.msix_table[msix_base];
704 	}
705 
706 	return 0;
707 }
708 
709 static int qed_slowpath_setup_int(struct qed_dev *cdev,
710 				  enum qed_int_mode int_mode)
711 {
712 	struct qed_sb_cnt_info sb_cnt_info;
713 	int num_l2_queues = 0;
714 	int rc;
715 	int i;
716 
717 	if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
718 		DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
719 		return -EINVAL;
720 	}
721 
722 	memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
723 	cdev->int_params.in.int_mode = int_mode;
724 	for_each_hwfn(cdev, i) {
725 		memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
726 		qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info);
727 		cdev->int_params.in.num_vectors += sb_cnt_info.sb_cnt;
728 		cdev->int_params.in.num_vectors++; /* slowpath */
729 	}
730 
731 	/* We want a minimum of one slowpath and one fastpath vector per hwfn */
732 	cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
733 
734 	rc = qed_set_int_mode(cdev, false);
735 	if (rc)  {
736 		DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
737 		return rc;
738 	}
739 
740 	cdev->int_params.fp_msix_base = cdev->num_hwfns;
741 	cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
742 				       cdev->num_hwfns;
743 
744 	if (!IS_ENABLED(CONFIG_QED_RDMA))
745 		return 0;
746 
747 	for_each_hwfn(cdev, i)
748 		num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
749 
750 	DP_VERBOSE(cdev, QED_MSG_RDMA,
751 		   "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n",
752 		   cdev->int_params.fp_msix_cnt, num_l2_queues);
753 
754 	if (cdev->int_params.fp_msix_cnt > num_l2_queues) {
755 		cdev->int_params.rdma_msix_cnt =
756 			(cdev->int_params.fp_msix_cnt - num_l2_queues)
757 			/ cdev->num_hwfns;
758 		cdev->int_params.rdma_msix_base =
759 			cdev->int_params.fp_msix_base + num_l2_queues;
760 		cdev->int_params.fp_msix_cnt = num_l2_queues;
761 	} else {
762 		cdev->int_params.rdma_msix_cnt = 0;
763 	}
764 
765 	DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n",
766 		   cdev->int_params.rdma_msix_cnt,
767 		   cdev->int_params.rdma_msix_base);
768 
769 	return 0;
770 }
771 
772 static int qed_slowpath_vf_setup_int(struct qed_dev *cdev)
773 {
774 	int rc;
775 
776 	memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
777 	cdev->int_params.in.int_mode = QED_INT_MODE_MSIX;
778 
779 	qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev),
780 			    &cdev->int_params.in.num_vectors);
781 	if (cdev->num_hwfns > 1) {
782 		u8 vectors = 0;
783 
784 		qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors);
785 		cdev->int_params.in.num_vectors += vectors;
786 	}
787 
788 	/* We want a minimum of one fastpath vector per vf hwfn */
789 	cdev->int_params.in.min_msix_cnt = cdev->num_hwfns;
790 
791 	rc = qed_set_int_mode(cdev, true);
792 	if (rc)
793 		return rc;
794 
795 	cdev->int_params.fp_msix_base = 0;
796 	cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors;
797 
798 	return 0;
799 }
800 
801 u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
802 		   u8 *input_buf, u32 max_size, u8 *unzip_buf)
803 {
804 	int rc;
805 
806 	p_hwfn->stream->next_in = input_buf;
807 	p_hwfn->stream->avail_in = input_len;
808 	p_hwfn->stream->next_out = unzip_buf;
809 	p_hwfn->stream->avail_out = max_size;
810 
811 	rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS);
812 
813 	if (rc != Z_OK) {
814 		DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n",
815 			   rc);
816 		return 0;
817 	}
818 
819 	rc = zlib_inflate(p_hwfn->stream, Z_FINISH);
820 	zlib_inflateEnd(p_hwfn->stream);
821 
822 	if (rc != Z_OK && rc != Z_STREAM_END) {
823 		DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n",
824 			   p_hwfn->stream->msg, rc);
825 		return 0;
826 	}
827 
828 	return p_hwfn->stream->total_out / 4;
829 }
830 
831 static int qed_alloc_stream_mem(struct qed_dev *cdev)
832 {
833 	int i;
834 	void *workspace;
835 
836 	for_each_hwfn(cdev, i) {
837 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
838 
839 		p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL);
840 		if (!p_hwfn->stream)
841 			return -ENOMEM;
842 
843 		workspace = vzalloc(zlib_inflate_workspacesize());
844 		if (!workspace)
845 			return -ENOMEM;
846 		p_hwfn->stream->workspace = workspace;
847 	}
848 
849 	return 0;
850 }
851 
852 static void qed_free_stream_mem(struct qed_dev *cdev)
853 {
854 	int i;
855 
856 	for_each_hwfn(cdev, i) {
857 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
858 
859 		if (!p_hwfn->stream)
860 			return;
861 
862 		vfree(p_hwfn->stream->workspace);
863 		kfree(p_hwfn->stream);
864 	}
865 }
866 
867 static void qed_update_pf_params(struct qed_dev *cdev,
868 				 struct qed_pf_params *params)
869 {
870 	int i;
871 
872 	if (IS_ENABLED(CONFIG_QED_RDMA)) {
873 		params->rdma_pf_params.num_qps = QED_ROCE_QPS;
874 		params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
875 		/* divide by 3 the MRs to avoid MF ILT overflow */
876 		params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
877 		params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
878 	}
879 
880 	/* In case we might support RDMA, don't allow qede to be greedy
881 	 * with the L2 contexts. Allow for 64 queues [rx, tx, xdp] per hwfn.
882 	 */
883 	if (QED_LEADING_HWFN(cdev)->hw_info.personality ==
884 	    QED_PCI_ETH_ROCE) {
885 		u16 *num_cons;
886 
887 		num_cons = &params->eth_pf_params.num_cons;
888 		*num_cons = min_t(u16, *num_cons, 192);
889 	}
890 
891 	for (i = 0; i < cdev->num_hwfns; i++) {
892 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
893 
894 		p_hwfn->pf_params = *params;
895 	}
896 }
897 
898 static int qed_slowpath_start(struct qed_dev *cdev,
899 			      struct qed_slowpath_params *params)
900 {
901 	struct qed_tunn_start_params tunn_info;
902 	struct qed_mcp_drv_version drv_version;
903 	const u8 *data = NULL;
904 	struct qed_hwfn *hwfn;
905 	int rc = -EINVAL;
906 
907 	if (qed_iov_wq_start(cdev))
908 		goto err;
909 
910 	if (IS_PF(cdev)) {
911 		rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
912 				      &cdev->pdev->dev);
913 		if (rc) {
914 			DP_NOTICE(cdev,
915 				  "Failed to find fw file - /lib/firmware/%s\n",
916 				  QED_FW_FILE_NAME);
917 			goto err;
918 		}
919 	}
920 
921 	cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
922 	rc = qed_nic_setup(cdev);
923 	if (rc)
924 		goto err;
925 
926 	if (IS_PF(cdev))
927 		rc = qed_slowpath_setup_int(cdev, params->int_mode);
928 	else
929 		rc = qed_slowpath_vf_setup_int(cdev);
930 	if (rc)
931 		goto err1;
932 
933 	if (IS_PF(cdev)) {
934 		/* Allocate stream for unzipping */
935 		rc = qed_alloc_stream_mem(cdev);
936 		if (rc)
937 			goto err2;
938 
939 		/* First Dword used to diffrentiate between various sources */
940 		data = cdev->firmware->data + sizeof(u32);
941 
942 		qed_dbg_pf_init(cdev);
943 	}
944 
945 	memset(&tunn_info, 0, sizeof(tunn_info));
946 	tunn_info.tunn_mode |=  1 << QED_MODE_VXLAN_TUNN |
947 				1 << QED_MODE_L2GRE_TUNN |
948 				1 << QED_MODE_IPGRE_TUNN |
949 				1 << QED_MODE_L2GENEVE_TUNN |
950 				1 << QED_MODE_IPGENEVE_TUNN;
951 
952 	tunn_info.tunn_clss_vxlan = QED_TUNN_CLSS_MAC_VLAN;
953 	tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN;
954 	tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN;
955 
956 	/* Start the slowpath */
957 	rc = qed_hw_init(cdev, &tunn_info, true,
958 			 cdev->int_params.out.int_mode,
959 			 true, data);
960 	if (rc)
961 		goto err2;
962 
963 	DP_INFO(cdev,
964 		"HW initialization and function start completed successfully\n");
965 
966 	/* Allocate LL2 interface if needed */
967 	if (QED_LEADING_HWFN(cdev)->using_ll2) {
968 		rc = qed_ll2_alloc_if(cdev);
969 		if (rc)
970 			goto err3;
971 	}
972 	if (IS_PF(cdev)) {
973 		hwfn = QED_LEADING_HWFN(cdev);
974 		drv_version.version = (params->drv_major << 24) |
975 				      (params->drv_minor << 16) |
976 				      (params->drv_rev << 8) |
977 				      (params->drv_eng);
978 		strlcpy(drv_version.name, params->name,
979 			MCP_DRV_VER_STR_SIZE - 4);
980 		rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
981 					      &drv_version);
982 		if (rc) {
983 			DP_NOTICE(cdev, "Failed sending drv version command\n");
984 			return rc;
985 		}
986 	}
987 
988 	qed_reset_vport_stats(cdev);
989 
990 	return 0;
991 
992 err3:
993 	qed_hw_stop(cdev);
994 err2:
995 	qed_hw_timers_stop_all(cdev);
996 	if (IS_PF(cdev))
997 		qed_slowpath_irq_free(cdev);
998 	qed_free_stream_mem(cdev);
999 	qed_disable_msix(cdev);
1000 err1:
1001 	qed_resc_free(cdev);
1002 err:
1003 	if (IS_PF(cdev))
1004 		release_firmware(cdev->firmware);
1005 
1006 	qed_iov_wq_stop(cdev, false);
1007 
1008 	return rc;
1009 }
1010 
1011 static int qed_slowpath_stop(struct qed_dev *cdev)
1012 {
1013 	if (!cdev)
1014 		return -ENODEV;
1015 
1016 	qed_ll2_dealloc_if(cdev);
1017 
1018 	if (IS_PF(cdev)) {
1019 		qed_free_stream_mem(cdev);
1020 		if (IS_QED_ETH_IF(cdev))
1021 			qed_sriov_disable(cdev, true);
1022 
1023 		qed_nic_stop(cdev);
1024 		qed_slowpath_irq_free(cdev);
1025 	}
1026 
1027 	qed_disable_msix(cdev);
1028 	qed_nic_reset(cdev);
1029 
1030 	qed_iov_wq_stop(cdev, true);
1031 
1032 	if (IS_PF(cdev))
1033 		release_firmware(cdev->firmware);
1034 
1035 	return 0;
1036 }
1037 
1038 static void qed_set_id(struct qed_dev *cdev, char name[NAME_SIZE],
1039 		       char ver_str[VER_SIZE])
1040 {
1041 	int i;
1042 
1043 	memcpy(cdev->name, name, NAME_SIZE);
1044 	for_each_hwfn(cdev, i)
1045 		snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
1046 
1047 	memcpy(cdev->ver_str, ver_str, VER_SIZE);
1048 	cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
1049 }
1050 
1051 static u32 qed_sb_init(struct qed_dev *cdev,
1052 		       struct qed_sb_info *sb_info,
1053 		       void *sb_virt_addr,
1054 		       dma_addr_t sb_phy_addr, u16 sb_id,
1055 		       enum qed_sb_type type)
1056 {
1057 	struct qed_hwfn *p_hwfn;
1058 	int hwfn_index;
1059 	u16 rel_sb_id;
1060 	u8 n_hwfns;
1061 	u32 rc;
1062 
1063 	/* RoCE uses single engine and CMT uses two engines. When using both
1064 	 * we force only a single engine. Storage uses only engine 0 too.
1065 	 */
1066 	if (type == QED_SB_TYPE_L2_QUEUE)
1067 		n_hwfns = cdev->num_hwfns;
1068 	else
1069 		n_hwfns = 1;
1070 
1071 	hwfn_index = sb_id % n_hwfns;
1072 	p_hwfn = &cdev->hwfns[hwfn_index];
1073 	rel_sb_id = sb_id / n_hwfns;
1074 
1075 	DP_VERBOSE(cdev, NETIF_MSG_INTR,
1076 		   "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1077 		   hwfn_index, rel_sb_id, sb_id);
1078 
1079 	rc = qed_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
1080 			     sb_virt_addr, sb_phy_addr, rel_sb_id);
1081 
1082 	return rc;
1083 }
1084 
1085 static u32 qed_sb_release(struct qed_dev *cdev,
1086 			  struct qed_sb_info *sb_info, u16 sb_id)
1087 {
1088 	struct qed_hwfn *p_hwfn;
1089 	int hwfn_index;
1090 	u16 rel_sb_id;
1091 	u32 rc;
1092 
1093 	hwfn_index = sb_id % cdev->num_hwfns;
1094 	p_hwfn = &cdev->hwfns[hwfn_index];
1095 	rel_sb_id = sb_id / cdev->num_hwfns;
1096 
1097 	DP_VERBOSE(cdev, NETIF_MSG_INTR,
1098 		   "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1099 		   hwfn_index, rel_sb_id, sb_id);
1100 
1101 	rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id);
1102 
1103 	return rc;
1104 }
1105 
1106 static bool qed_can_link_change(struct qed_dev *cdev)
1107 {
1108 	return true;
1109 }
1110 
1111 static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
1112 {
1113 	struct qed_hwfn *hwfn;
1114 	struct qed_mcp_link_params *link_params;
1115 	struct qed_ptt *ptt;
1116 	int rc;
1117 
1118 	if (!cdev)
1119 		return -ENODEV;
1120 
1121 	if (IS_VF(cdev))
1122 		return 0;
1123 
1124 	/* The link should be set only once per PF */
1125 	hwfn = &cdev->hwfns[0];
1126 
1127 	ptt = qed_ptt_acquire(hwfn);
1128 	if (!ptt)
1129 		return -EBUSY;
1130 
1131 	link_params = qed_mcp_get_link_params(hwfn);
1132 	if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
1133 		link_params->speed.autoneg = params->autoneg;
1134 	if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
1135 		link_params->speed.advertised_speeds = 0;
1136 		if ((params->adv_speeds & QED_LM_1000baseT_Half_BIT) ||
1137 		    (params->adv_speeds & QED_LM_1000baseT_Full_BIT))
1138 			link_params->speed.advertised_speeds |=
1139 			    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
1140 		if (params->adv_speeds & QED_LM_10000baseKR_Full_BIT)
1141 			link_params->speed.advertised_speeds |=
1142 			    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
1143 		if (params->adv_speeds & QED_LM_25000baseKR_Full_BIT)
1144 			link_params->speed.advertised_speeds |=
1145 			    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
1146 		if (params->adv_speeds & QED_LM_40000baseLR4_Full_BIT)
1147 			link_params->speed.advertised_speeds |=
1148 			    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
1149 		if (params->adv_speeds & QED_LM_50000baseKR2_Full_BIT)
1150 			link_params->speed.advertised_speeds |=
1151 			    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
1152 		if (params->adv_speeds & QED_LM_100000baseKR4_Full_BIT)
1153 			link_params->speed.advertised_speeds |=
1154 			    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G;
1155 	}
1156 	if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
1157 		link_params->speed.forced_speed = params->forced_speed;
1158 	if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {
1159 		if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1160 			link_params->pause.autoneg = true;
1161 		else
1162 			link_params->pause.autoneg = false;
1163 		if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE)
1164 			link_params->pause.forced_rx = true;
1165 		else
1166 			link_params->pause.forced_rx = false;
1167 		if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE)
1168 			link_params->pause.forced_tx = true;
1169 		else
1170 			link_params->pause.forced_tx = false;
1171 	}
1172 	if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) {
1173 		switch (params->loopback_mode) {
1174 		case QED_LINK_LOOPBACK_INT_PHY:
1175 			link_params->loopback_mode = ETH_LOOPBACK_INT_PHY;
1176 			break;
1177 		case QED_LINK_LOOPBACK_EXT_PHY:
1178 			link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY;
1179 			break;
1180 		case QED_LINK_LOOPBACK_EXT:
1181 			link_params->loopback_mode = ETH_LOOPBACK_EXT;
1182 			break;
1183 		case QED_LINK_LOOPBACK_MAC:
1184 			link_params->loopback_mode = ETH_LOOPBACK_MAC;
1185 			break;
1186 		default:
1187 			link_params->loopback_mode = ETH_LOOPBACK_NONE;
1188 			break;
1189 		}
1190 	}
1191 
1192 	rc = qed_mcp_set_link(hwfn, ptt, params->link_up);
1193 
1194 	qed_ptt_release(hwfn, ptt);
1195 
1196 	return rc;
1197 }
1198 
1199 static int qed_get_port_type(u32 media_type)
1200 {
1201 	int port_type;
1202 
1203 	switch (media_type) {
1204 	case MEDIA_SFPP_10G_FIBER:
1205 	case MEDIA_SFP_1G_FIBER:
1206 	case MEDIA_XFP_FIBER:
1207 	case MEDIA_MODULE_FIBER:
1208 	case MEDIA_KR:
1209 		port_type = PORT_FIBRE;
1210 		break;
1211 	case MEDIA_DA_TWINAX:
1212 		port_type = PORT_DA;
1213 		break;
1214 	case MEDIA_BASE_T:
1215 		port_type = PORT_TP;
1216 		break;
1217 	case MEDIA_NOT_PRESENT:
1218 		port_type = PORT_NONE;
1219 		break;
1220 	case MEDIA_UNSPECIFIED:
1221 	default:
1222 		port_type = PORT_OTHER;
1223 		break;
1224 	}
1225 	return port_type;
1226 }
1227 
1228 static int qed_get_link_data(struct qed_hwfn *hwfn,
1229 			     struct qed_mcp_link_params *params,
1230 			     struct qed_mcp_link_state *link,
1231 			     struct qed_mcp_link_capabilities *link_caps)
1232 {
1233 	void *p;
1234 
1235 	if (!IS_PF(hwfn->cdev)) {
1236 		qed_vf_get_link_params(hwfn, params);
1237 		qed_vf_get_link_state(hwfn, link);
1238 		qed_vf_get_link_caps(hwfn, link_caps);
1239 
1240 		return 0;
1241 	}
1242 
1243 	p = qed_mcp_get_link_params(hwfn);
1244 	if (!p)
1245 		return -ENXIO;
1246 	memcpy(params, p, sizeof(*params));
1247 
1248 	p = qed_mcp_get_link_state(hwfn);
1249 	if (!p)
1250 		return -ENXIO;
1251 	memcpy(link, p, sizeof(*link));
1252 
1253 	p = qed_mcp_get_link_capabilities(hwfn);
1254 	if (!p)
1255 		return -ENXIO;
1256 	memcpy(link_caps, p, sizeof(*link_caps));
1257 
1258 	return 0;
1259 }
1260 
1261 static void qed_fill_link(struct qed_hwfn *hwfn,
1262 			  struct qed_link_output *if_link)
1263 {
1264 	struct qed_mcp_link_params params;
1265 	struct qed_mcp_link_state link;
1266 	struct qed_mcp_link_capabilities link_caps;
1267 	u32 media_type;
1268 
1269 	memset(if_link, 0, sizeof(*if_link));
1270 
1271 	/* Prepare source inputs */
1272 	if (qed_get_link_data(hwfn, &params, &link, &link_caps)) {
1273 		dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n");
1274 		return;
1275 	}
1276 
1277 	/* Set the link parameters to pass to protocol driver */
1278 	if (link.link_up)
1279 		if_link->link_up = true;
1280 
1281 	/* TODO - at the moment assume supported and advertised speed equal */
1282 	if_link->supported_caps = QED_LM_FIBRE_BIT;
1283 	if (params.speed.autoneg)
1284 		if_link->supported_caps |= QED_LM_Autoneg_BIT;
1285 	if (params.pause.autoneg ||
1286 	    (params.pause.forced_rx && params.pause.forced_tx))
1287 		if_link->supported_caps |= QED_LM_Asym_Pause_BIT;
1288 	if (params.pause.autoneg || params.pause.forced_rx ||
1289 	    params.pause.forced_tx)
1290 		if_link->supported_caps |= QED_LM_Pause_BIT;
1291 
1292 	if_link->advertised_caps = if_link->supported_caps;
1293 	if (params.speed.advertised_speeds &
1294 	    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1295 		if_link->advertised_caps |= QED_LM_1000baseT_Half_BIT |
1296 		    QED_LM_1000baseT_Full_BIT;
1297 	if (params.speed.advertised_speeds &
1298 	    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1299 		if_link->advertised_caps |= QED_LM_10000baseKR_Full_BIT;
1300 	if (params.speed.advertised_speeds &
1301 	    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1302 		if_link->advertised_caps |= QED_LM_25000baseKR_Full_BIT;
1303 	if (params.speed.advertised_speeds &
1304 	    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1305 		if_link->advertised_caps |= QED_LM_40000baseLR4_Full_BIT;
1306 	if (params.speed.advertised_speeds &
1307 	    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1308 		if_link->advertised_caps |= QED_LM_50000baseKR2_Full_BIT;
1309 	if (params.speed.advertised_speeds &
1310 	    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1311 		if_link->advertised_caps |= QED_LM_100000baseKR4_Full_BIT;
1312 
1313 	if (link_caps.speed_capabilities &
1314 	    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1315 		if_link->supported_caps |= QED_LM_1000baseT_Half_BIT |
1316 		    QED_LM_1000baseT_Full_BIT;
1317 	if (link_caps.speed_capabilities &
1318 	    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1319 		if_link->supported_caps |= QED_LM_10000baseKR_Full_BIT;
1320 	if (link_caps.speed_capabilities &
1321 	    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1322 		if_link->supported_caps |= QED_LM_25000baseKR_Full_BIT;
1323 	if (link_caps.speed_capabilities &
1324 	    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1325 		if_link->supported_caps |= QED_LM_40000baseLR4_Full_BIT;
1326 	if (link_caps.speed_capabilities &
1327 	    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1328 		if_link->supported_caps |= QED_LM_50000baseKR2_Full_BIT;
1329 	if (link_caps.speed_capabilities &
1330 	    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1331 		if_link->supported_caps |= QED_LM_100000baseKR4_Full_BIT;
1332 
1333 	if (link.link_up)
1334 		if_link->speed = link.speed;
1335 
1336 	/* TODO - fill duplex properly */
1337 	if_link->duplex = DUPLEX_FULL;
1338 	qed_mcp_get_media_type(hwfn->cdev, &media_type);
1339 	if_link->port = qed_get_port_type(media_type);
1340 
1341 	if_link->autoneg = params.speed.autoneg;
1342 
1343 	if (params.pause.autoneg)
1344 		if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
1345 	if (params.pause.forced_rx)
1346 		if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE;
1347 	if (params.pause.forced_tx)
1348 		if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
1349 
1350 	/* Link partner capabilities */
1351 	if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_HD)
1352 		if_link->lp_caps |= QED_LM_1000baseT_Half_BIT;
1353 	if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_FD)
1354 		if_link->lp_caps |= QED_LM_1000baseT_Full_BIT;
1355 	if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_10G)
1356 		if_link->lp_caps |= QED_LM_10000baseKR_Full_BIT;
1357 	if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_25G)
1358 		if_link->lp_caps |= QED_LM_25000baseKR_Full_BIT;
1359 	if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_40G)
1360 		if_link->lp_caps |= QED_LM_40000baseLR4_Full_BIT;
1361 	if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_50G)
1362 		if_link->lp_caps |= QED_LM_50000baseKR2_Full_BIT;
1363 	if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_100G)
1364 		if_link->lp_caps |= QED_LM_100000baseKR4_Full_BIT;
1365 
1366 	if (link.an_complete)
1367 		if_link->lp_caps |= QED_LM_Autoneg_BIT;
1368 
1369 	if (link.partner_adv_pause)
1370 		if_link->lp_caps |= QED_LM_Pause_BIT;
1371 	if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE ||
1372 	    link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE)
1373 		if_link->lp_caps |= QED_LM_Asym_Pause_BIT;
1374 }
1375 
1376 static void qed_get_current_link(struct qed_dev *cdev,
1377 				 struct qed_link_output *if_link)
1378 {
1379 	int i;
1380 
1381 	qed_fill_link(&cdev->hwfns[0], if_link);
1382 
1383 	for_each_hwfn(cdev, i)
1384 		qed_inform_vf_link_state(&cdev->hwfns[i]);
1385 }
1386 
1387 void qed_link_update(struct qed_hwfn *hwfn)
1388 {
1389 	void *cookie = hwfn->cdev->ops_cookie;
1390 	struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
1391 	struct qed_link_output if_link;
1392 
1393 	qed_fill_link(hwfn, &if_link);
1394 	qed_inform_vf_link_state(hwfn);
1395 
1396 	if (IS_LEAD_HWFN(hwfn) && cookie)
1397 		op->link_update(cookie, &if_link);
1398 }
1399 
1400 static int qed_drain(struct qed_dev *cdev)
1401 {
1402 	struct qed_hwfn *hwfn;
1403 	struct qed_ptt *ptt;
1404 	int i, rc;
1405 
1406 	if (IS_VF(cdev))
1407 		return 0;
1408 
1409 	for_each_hwfn(cdev, i) {
1410 		hwfn = &cdev->hwfns[i];
1411 		ptt = qed_ptt_acquire(hwfn);
1412 		if (!ptt) {
1413 			DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n");
1414 			return -EBUSY;
1415 		}
1416 		rc = qed_mcp_drain(hwfn, ptt);
1417 		if (rc)
1418 			return rc;
1419 		qed_ptt_release(hwfn, ptt);
1420 	}
1421 
1422 	return 0;
1423 }
1424 
1425 static void qed_get_coalesce(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal)
1426 {
1427 	*rx_coal = cdev->rx_coalesce_usecs;
1428 	*tx_coal = cdev->tx_coalesce_usecs;
1429 }
1430 
1431 static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
1432 			    u8 qid, u16 sb_id)
1433 {
1434 	struct qed_hwfn *hwfn;
1435 	struct qed_ptt *ptt;
1436 	int hwfn_index;
1437 	int status = 0;
1438 
1439 	hwfn_index = qid % cdev->num_hwfns;
1440 	hwfn = &cdev->hwfns[hwfn_index];
1441 	ptt = qed_ptt_acquire(hwfn);
1442 	if (!ptt)
1443 		return -EAGAIN;
1444 
1445 	status = qed_set_rxq_coalesce(hwfn, ptt, rx_coal,
1446 				      qid / cdev->num_hwfns, sb_id);
1447 	if (status)
1448 		goto out;
1449 	status = qed_set_txq_coalesce(hwfn, ptt, tx_coal,
1450 				      qid / cdev->num_hwfns, sb_id);
1451 out:
1452 	qed_ptt_release(hwfn, ptt);
1453 
1454 	return status;
1455 }
1456 
1457 static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
1458 {
1459 	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1460 	struct qed_ptt *ptt;
1461 	int status = 0;
1462 
1463 	ptt = qed_ptt_acquire(hwfn);
1464 	if (!ptt)
1465 		return -EAGAIN;
1466 
1467 	status = qed_mcp_set_led(hwfn, ptt, mode);
1468 
1469 	qed_ptt_release(hwfn, ptt);
1470 
1471 	return status;
1472 }
1473 
1474 static int qed_update_wol(struct qed_dev *cdev, bool enabled)
1475 {
1476 	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1477 	struct qed_ptt *ptt;
1478 	int rc = 0;
1479 
1480 	if (IS_VF(cdev))
1481 		return 0;
1482 
1483 	ptt = qed_ptt_acquire(hwfn);
1484 	if (!ptt)
1485 		return -EAGAIN;
1486 
1487 	rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED
1488 				   : QED_OV_WOL_DISABLED);
1489 	if (rc)
1490 		goto out;
1491 	rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
1492 
1493 out:
1494 	qed_ptt_release(hwfn, ptt);
1495 	return rc;
1496 }
1497 
1498 static int qed_update_drv_state(struct qed_dev *cdev, bool active)
1499 {
1500 	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1501 	struct qed_ptt *ptt;
1502 	int status = 0;
1503 
1504 	if (IS_VF(cdev))
1505 		return 0;
1506 
1507 	ptt = qed_ptt_acquire(hwfn);
1508 	if (!ptt)
1509 		return -EAGAIN;
1510 
1511 	status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ?
1512 						QED_OV_DRIVER_STATE_ACTIVE :
1513 						QED_OV_DRIVER_STATE_DISABLED);
1514 
1515 	qed_ptt_release(hwfn, ptt);
1516 
1517 	return status;
1518 }
1519 
1520 static int qed_update_mac(struct qed_dev *cdev, u8 *mac)
1521 {
1522 	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1523 	struct qed_ptt *ptt;
1524 	int status = 0;
1525 
1526 	if (IS_VF(cdev))
1527 		return 0;
1528 
1529 	ptt = qed_ptt_acquire(hwfn);
1530 	if (!ptt)
1531 		return -EAGAIN;
1532 
1533 	status = qed_mcp_ov_update_mac(hwfn, ptt, mac);
1534 	if (status)
1535 		goto out;
1536 
1537 	status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
1538 
1539 out:
1540 	qed_ptt_release(hwfn, ptt);
1541 	return status;
1542 }
1543 
1544 static int qed_update_mtu(struct qed_dev *cdev, u16 mtu)
1545 {
1546 	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1547 	struct qed_ptt *ptt;
1548 	int status = 0;
1549 
1550 	if (IS_VF(cdev))
1551 		return 0;
1552 
1553 	ptt = qed_ptt_acquire(hwfn);
1554 	if (!ptt)
1555 		return -EAGAIN;
1556 
1557 	status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu);
1558 	if (status)
1559 		goto out;
1560 
1561 	status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
1562 
1563 out:
1564 	qed_ptt_release(hwfn, ptt);
1565 	return status;
1566 }
1567 
1568 static struct qed_selftest_ops qed_selftest_ops_pass = {
1569 	.selftest_memory = &qed_selftest_memory,
1570 	.selftest_interrupt = &qed_selftest_interrupt,
1571 	.selftest_register = &qed_selftest_register,
1572 	.selftest_clock = &qed_selftest_clock,
1573 	.selftest_nvram = &qed_selftest_nvram,
1574 };
1575 
1576 const struct qed_common_ops qed_common_ops_pass = {
1577 	.selftest = &qed_selftest_ops_pass,
1578 	.probe = &qed_probe,
1579 	.remove = &qed_remove,
1580 	.set_power_state = &qed_set_power_state,
1581 	.set_id = &qed_set_id,
1582 	.update_pf_params = &qed_update_pf_params,
1583 	.slowpath_start = &qed_slowpath_start,
1584 	.slowpath_stop = &qed_slowpath_stop,
1585 	.set_fp_int = &qed_set_int_fp,
1586 	.get_fp_int = &qed_get_int_fp,
1587 	.sb_init = &qed_sb_init,
1588 	.sb_release = &qed_sb_release,
1589 	.simd_handler_config = &qed_simd_handler_config,
1590 	.simd_handler_clean = &qed_simd_handler_clean,
1591 	.can_link_change = &qed_can_link_change,
1592 	.set_link = &qed_set_link,
1593 	.get_link = &qed_get_current_link,
1594 	.drain = &qed_drain,
1595 	.update_msglvl = &qed_init_dp,
1596 	.dbg_all_data = &qed_dbg_all_data,
1597 	.dbg_all_data_size = &qed_dbg_all_data_size,
1598 	.chain_alloc = &qed_chain_alloc,
1599 	.chain_free = &qed_chain_free,
1600 	.get_coalesce = &qed_get_coalesce,
1601 	.set_coalesce = &qed_set_coalesce,
1602 	.set_led = &qed_set_led,
1603 	.update_drv_state = &qed_update_drv_state,
1604 	.update_mac = &qed_update_mac,
1605 	.update_mtu = &qed_update_mtu,
1606 	.update_wol = &qed_update_wol,
1607 };
1608 
1609 void qed_get_protocol_stats(struct qed_dev *cdev,
1610 			    enum qed_mcp_protocol_type type,
1611 			    union qed_mcp_protocol_stats *stats)
1612 {
1613 	struct qed_eth_stats eth_stats;
1614 
1615 	memset(stats, 0, sizeof(*stats));
1616 
1617 	switch (type) {
1618 	case QED_MCP_LAN_STATS:
1619 		qed_get_vport_stats(cdev, &eth_stats);
1620 		stats->lan_stats.ucast_rx_pkts = eth_stats.rx_ucast_pkts;
1621 		stats->lan_stats.ucast_tx_pkts = eth_stats.tx_ucast_pkts;
1622 		stats->lan_stats.fcs_err = -1;
1623 		break;
1624 	default:
1625 		DP_ERR(cdev, "Invalid protocol type = %d\n", type);
1626 		return;
1627 	}
1628 }
1629