1 /* QLogic qede NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/module.h>
33 #include <linux/pci.h>
34 #include <linux/version.h>
35 #include <linux/device.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/skbuff.h>
39 #include <linux/errno.h>
40 #include <linux/list.h>
41 #include <linux/string.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/interrupt.h>
44 #include <asm/byteorder.h>
45 #include <asm/param.h>
46 #include <linux/io.h>
47 #include <linux/netdev_features.h>
48 #include <linux/udp.h>
49 #include <linux/tcp.h>
50 #include <net/udp_tunnel.h>
51 #include <linux/ip.h>
52 #include <net/ipv6.h>
53 #include <net/tcp.h>
54 #include <linux/if_ether.h>
55 #include <linux/if_vlan.h>
56 #include <linux/pkt_sched.h>
57 #include <linux/ethtool.h>
58 #include <linux/in.h>
59 #include <linux/random.h>
60 #include <net/ip6_checksum.h>
61 #include <linux/bitops.h>
62 #include <linux/vmalloc.h>
63 #include <linux/qed/qede_roce.h>
64 #include "qede.h"
65 #include "qede_ptp.h"
66 
67 static char version[] =
68 	"QLogic FastLinQ 4xxxx Ethernet Driver qede " DRV_MODULE_VERSION "\n";
69 
70 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver");
71 MODULE_LICENSE("GPL");
72 MODULE_VERSION(DRV_MODULE_VERSION);
73 
74 static uint debug;
75 module_param(debug, uint, 0);
76 MODULE_PARM_DESC(debug, " Default debug msglevel");
77 
78 static const struct qed_eth_ops *qed_ops;
79 
80 #define CHIP_NUM_57980S_40		0x1634
81 #define CHIP_NUM_57980S_10		0x1666
82 #define CHIP_NUM_57980S_MF		0x1636
83 #define CHIP_NUM_57980S_100		0x1644
84 #define CHIP_NUM_57980S_50		0x1654
85 #define CHIP_NUM_57980S_25		0x1656
86 #define CHIP_NUM_57980S_IOV		0x1664
87 #define CHIP_NUM_AH			0x8070
88 #define CHIP_NUM_AH_IOV			0x8090
89 
90 #ifndef PCI_DEVICE_ID_NX2_57980E
91 #define PCI_DEVICE_ID_57980S_40		CHIP_NUM_57980S_40
92 #define PCI_DEVICE_ID_57980S_10		CHIP_NUM_57980S_10
93 #define PCI_DEVICE_ID_57980S_MF		CHIP_NUM_57980S_MF
94 #define PCI_DEVICE_ID_57980S_100	CHIP_NUM_57980S_100
95 #define PCI_DEVICE_ID_57980S_50		CHIP_NUM_57980S_50
96 #define PCI_DEVICE_ID_57980S_25		CHIP_NUM_57980S_25
97 #define PCI_DEVICE_ID_57980S_IOV	CHIP_NUM_57980S_IOV
98 #define PCI_DEVICE_ID_AH		CHIP_NUM_AH
99 #define PCI_DEVICE_ID_AH_IOV		CHIP_NUM_AH_IOV
100 
101 #endif
102 
103 enum qede_pci_private {
104 	QEDE_PRIVATE_PF,
105 	QEDE_PRIVATE_VF
106 };
107 
108 static const struct pci_device_id qede_pci_tbl[] = {
109 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF},
110 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF},
111 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF},
112 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
113 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
114 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
115 #ifdef CONFIG_QED_SRIOV
116 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
117 #endif
118 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH), QEDE_PRIVATE_PF},
119 #ifdef CONFIG_QED_SRIOV
120 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH_IOV), QEDE_PRIVATE_VF},
121 #endif
122 	{ 0 }
123 };
124 
125 MODULE_DEVICE_TABLE(pci, qede_pci_tbl);
126 
127 static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
128 
129 #define TX_TIMEOUT		(5 * HZ)
130 
131 /* Utilize last protocol index for XDP */
132 #define XDP_PI	11
133 
134 static void qede_remove(struct pci_dev *pdev);
135 static void qede_shutdown(struct pci_dev *pdev);
136 static void qede_link_update(void *dev, struct qed_link_output *link);
137 
138 /* The qede lock is used to protect driver state change and driver flows that
139  * are not reentrant.
140  */
141 void __qede_lock(struct qede_dev *edev)
142 {
143 	mutex_lock(&edev->qede_lock);
144 }
145 
146 void __qede_unlock(struct qede_dev *edev)
147 {
148 	mutex_unlock(&edev->qede_lock);
149 }
150 
151 #ifdef CONFIG_QED_SRIOV
152 static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos,
153 			    __be16 vlan_proto)
154 {
155 	struct qede_dev *edev = netdev_priv(ndev);
156 
157 	if (vlan > 4095) {
158 		DP_NOTICE(edev, "Illegal vlan value %d\n", vlan);
159 		return -EINVAL;
160 	}
161 
162 	if (vlan_proto != htons(ETH_P_8021Q))
163 		return -EPROTONOSUPPORT;
164 
165 	DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n",
166 		   vlan, vf);
167 
168 	return edev->ops->iov->set_vlan(edev->cdev, vlan, vf);
169 }
170 
171 static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac)
172 {
173 	struct qede_dev *edev = netdev_priv(ndev);
174 
175 	DP_VERBOSE(edev, QED_MSG_IOV,
176 		   "Setting MAC %02x:%02x:%02x:%02x:%02x:%02x to VF [%d]\n",
177 		   mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], vfidx);
178 
179 	if (!is_valid_ether_addr(mac)) {
180 		DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n");
181 		return -EINVAL;
182 	}
183 
184 	return edev->ops->iov->set_mac(edev->cdev, mac, vfidx);
185 }
186 
187 static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
188 {
189 	struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev));
190 	struct qed_dev_info *qed_info = &edev->dev_info.common;
191 	struct qed_update_vport_params *vport_params;
192 	int rc;
193 
194 	vport_params = vzalloc(sizeof(*vport_params));
195 	if (!vport_params)
196 		return -ENOMEM;
197 	DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param);
198 
199 	rc = edev->ops->iov->configure(edev->cdev, num_vfs_param);
200 
201 	/* Enable/Disable Tx switching for PF */
202 	if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
203 	    qed_info->mf_mode != QED_MF_NPAR && qed_info->tx_switching) {
204 		vport_params->vport_id = 0;
205 		vport_params->update_tx_switching_flg = 1;
206 		vport_params->tx_switching_flg = num_vfs_param ? 1 : 0;
207 		edev->ops->vport_update(edev->cdev, vport_params);
208 	}
209 
210 	vfree(vport_params);
211 	return rc;
212 }
213 #endif
214 
215 static struct pci_driver qede_pci_driver = {
216 	.name = "qede",
217 	.id_table = qede_pci_tbl,
218 	.probe = qede_probe,
219 	.remove = qede_remove,
220 	.shutdown = qede_shutdown,
221 #ifdef CONFIG_QED_SRIOV
222 	.sriov_configure = qede_sriov_configure,
223 #endif
224 };
225 
226 static struct qed_eth_cb_ops qede_ll_ops = {
227 	{
228 #ifdef CONFIG_RFS_ACCEL
229 		.arfs_filter_op = qede_arfs_filter_op,
230 #endif
231 		.link_update = qede_link_update,
232 	},
233 	.force_mac = qede_force_mac,
234 	.ports_update = qede_udp_ports_update,
235 };
236 
237 static int qede_netdev_event(struct notifier_block *this, unsigned long event,
238 			     void *ptr)
239 {
240 	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
241 	struct ethtool_drvinfo drvinfo;
242 	struct qede_dev *edev;
243 
244 	if (event != NETDEV_CHANGENAME && event != NETDEV_CHANGEADDR)
245 		goto done;
246 
247 	/* Check whether this is a qede device */
248 	if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo)
249 		goto done;
250 
251 	memset(&drvinfo, 0, sizeof(drvinfo));
252 	ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo);
253 	if (strcmp(drvinfo.driver, "qede"))
254 		goto done;
255 	edev = netdev_priv(ndev);
256 
257 	switch (event) {
258 	case NETDEV_CHANGENAME:
259 		/* Notify qed of the name change */
260 		if (!edev->ops || !edev->ops->common)
261 			goto done;
262 		edev->ops->common->set_id(edev->cdev, edev->ndev->name, "qede");
263 		break;
264 	case NETDEV_CHANGEADDR:
265 		edev = netdev_priv(ndev);
266 		qede_roce_event_changeaddr(edev);
267 		break;
268 	}
269 
270 done:
271 	return NOTIFY_DONE;
272 }
273 
274 static struct notifier_block qede_netdev_notifier = {
275 	.notifier_call = qede_netdev_event,
276 };
277 
278 static
279 int __init qede_init(void)
280 {
281 	int ret;
282 
283 	pr_info("qede_init: %s\n", version);
284 
285 	qed_ops = qed_get_eth_ops();
286 	if (!qed_ops) {
287 		pr_notice("Failed to get qed ethtool operations\n");
288 		return -EINVAL;
289 	}
290 
291 	/* Must register notifier before pci ops, since we might miss
292 	 * interface rename after pci probe and netdev registeration.
293 	 */
294 	ret = register_netdevice_notifier(&qede_netdev_notifier);
295 	if (ret) {
296 		pr_notice("Failed to register netdevice_notifier\n");
297 		qed_put_eth_ops();
298 		return -EINVAL;
299 	}
300 
301 	ret = pci_register_driver(&qede_pci_driver);
302 	if (ret) {
303 		pr_notice("Failed to register driver\n");
304 		unregister_netdevice_notifier(&qede_netdev_notifier);
305 		qed_put_eth_ops();
306 		return -EINVAL;
307 	}
308 
309 	return 0;
310 }
311 
312 static void __exit qede_cleanup(void)
313 {
314 	if (debug & QED_LOG_INFO_MASK)
315 		pr_info("qede_cleanup called\n");
316 
317 	unregister_netdevice_notifier(&qede_netdev_notifier);
318 	pci_unregister_driver(&qede_pci_driver);
319 	qed_put_eth_ops();
320 }
321 
322 module_init(qede_init);
323 module_exit(qede_cleanup);
324 
325 static int qede_open(struct net_device *ndev);
326 static int qede_close(struct net_device *ndev);
327 
328 void qede_fill_by_demand_stats(struct qede_dev *edev)
329 {
330 	struct qede_stats_common *p_common = &edev->stats.common;
331 	struct qed_eth_stats stats;
332 
333 	edev->ops->get_vport_stats(edev->cdev, &stats);
334 
335 	p_common->no_buff_discards = stats.common.no_buff_discards;
336 	p_common->packet_too_big_discard = stats.common.packet_too_big_discard;
337 	p_common->ttl0_discard = stats.common.ttl0_discard;
338 	p_common->rx_ucast_bytes = stats.common.rx_ucast_bytes;
339 	p_common->rx_mcast_bytes = stats.common.rx_mcast_bytes;
340 	p_common->rx_bcast_bytes = stats.common.rx_bcast_bytes;
341 	p_common->rx_ucast_pkts = stats.common.rx_ucast_pkts;
342 	p_common->rx_mcast_pkts = stats.common.rx_mcast_pkts;
343 	p_common->rx_bcast_pkts = stats.common.rx_bcast_pkts;
344 	p_common->mftag_filter_discards = stats.common.mftag_filter_discards;
345 	p_common->mac_filter_discards = stats.common.mac_filter_discards;
346 
347 	p_common->tx_ucast_bytes = stats.common.tx_ucast_bytes;
348 	p_common->tx_mcast_bytes = stats.common.tx_mcast_bytes;
349 	p_common->tx_bcast_bytes = stats.common.tx_bcast_bytes;
350 	p_common->tx_ucast_pkts = stats.common.tx_ucast_pkts;
351 	p_common->tx_mcast_pkts = stats.common.tx_mcast_pkts;
352 	p_common->tx_bcast_pkts = stats.common.tx_bcast_pkts;
353 	p_common->tx_err_drop_pkts = stats.common.tx_err_drop_pkts;
354 	p_common->coalesced_pkts = stats.common.tpa_coalesced_pkts;
355 	p_common->coalesced_events = stats.common.tpa_coalesced_events;
356 	p_common->coalesced_aborts_num = stats.common.tpa_aborts_num;
357 	p_common->non_coalesced_pkts = stats.common.tpa_not_coalesced_pkts;
358 	p_common->coalesced_bytes = stats.common.tpa_coalesced_bytes;
359 
360 	p_common->rx_64_byte_packets = stats.common.rx_64_byte_packets;
361 	p_common->rx_65_to_127_byte_packets =
362 	    stats.common.rx_65_to_127_byte_packets;
363 	p_common->rx_128_to_255_byte_packets =
364 	    stats.common.rx_128_to_255_byte_packets;
365 	p_common->rx_256_to_511_byte_packets =
366 	    stats.common.rx_256_to_511_byte_packets;
367 	p_common->rx_512_to_1023_byte_packets =
368 	    stats.common.rx_512_to_1023_byte_packets;
369 	p_common->rx_1024_to_1518_byte_packets =
370 	    stats.common.rx_1024_to_1518_byte_packets;
371 	p_common->rx_crc_errors = stats.common.rx_crc_errors;
372 	p_common->rx_mac_crtl_frames = stats.common.rx_mac_crtl_frames;
373 	p_common->rx_pause_frames = stats.common.rx_pause_frames;
374 	p_common->rx_pfc_frames = stats.common.rx_pfc_frames;
375 	p_common->rx_align_errors = stats.common.rx_align_errors;
376 	p_common->rx_carrier_errors = stats.common.rx_carrier_errors;
377 	p_common->rx_oversize_packets = stats.common.rx_oversize_packets;
378 	p_common->rx_jabbers = stats.common.rx_jabbers;
379 	p_common->rx_undersize_packets = stats.common.rx_undersize_packets;
380 	p_common->rx_fragments = stats.common.rx_fragments;
381 	p_common->tx_64_byte_packets = stats.common.tx_64_byte_packets;
382 	p_common->tx_65_to_127_byte_packets =
383 	    stats.common.tx_65_to_127_byte_packets;
384 	p_common->tx_128_to_255_byte_packets =
385 	    stats.common.tx_128_to_255_byte_packets;
386 	p_common->tx_256_to_511_byte_packets =
387 	    stats.common.tx_256_to_511_byte_packets;
388 	p_common->tx_512_to_1023_byte_packets =
389 	    stats.common.tx_512_to_1023_byte_packets;
390 	p_common->tx_1024_to_1518_byte_packets =
391 	    stats.common.tx_1024_to_1518_byte_packets;
392 	p_common->tx_pause_frames = stats.common.tx_pause_frames;
393 	p_common->tx_pfc_frames = stats.common.tx_pfc_frames;
394 	p_common->brb_truncates = stats.common.brb_truncates;
395 	p_common->brb_discards = stats.common.brb_discards;
396 	p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames;
397 
398 	if (QEDE_IS_BB(edev)) {
399 		struct qede_stats_bb *p_bb = &edev->stats.bb;
400 
401 		p_bb->rx_1519_to_1522_byte_packets =
402 		    stats.bb.rx_1519_to_1522_byte_packets;
403 		p_bb->rx_1519_to_2047_byte_packets =
404 		    stats.bb.rx_1519_to_2047_byte_packets;
405 		p_bb->rx_2048_to_4095_byte_packets =
406 		    stats.bb.rx_2048_to_4095_byte_packets;
407 		p_bb->rx_4096_to_9216_byte_packets =
408 		    stats.bb.rx_4096_to_9216_byte_packets;
409 		p_bb->rx_9217_to_16383_byte_packets =
410 		    stats.bb.rx_9217_to_16383_byte_packets;
411 		p_bb->tx_1519_to_2047_byte_packets =
412 		    stats.bb.tx_1519_to_2047_byte_packets;
413 		p_bb->tx_2048_to_4095_byte_packets =
414 		    stats.bb.tx_2048_to_4095_byte_packets;
415 		p_bb->tx_4096_to_9216_byte_packets =
416 		    stats.bb.tx_4096_to_9216_byte_packets;
417 		p_bb->tx_9217_to_16383_byte_packets =
418 		    stats.bb.tx_9217_to_16383_byte_packets;
419 		p_bb->tx_lpi_entry_count = stats.bb.tx_lpi_entry_count;
420 		p_bb->tx_total_collisions = stats.bb.tx_total_collisions;
421 	} else {
422 		struct qede_stats_ah *p_ah = &edev->stats.ah;
423 
424 		p_ah->rx_1519_to_max_byte_packets =
425 		    stats.ah.rx_1519_to_max_byte_packets;
426 		p_ah->tx_1519_to_max_byte_packets =
427 		    stats.ah.tx_1519_to_max_byte_packets;
428 	}
429 }
430 
431 static void qede_get_stats64(struct net_device *dev,
432 			     struct rtnl_link_stats64 *stats)
433 {
434 	struct qede_dev *edev = netdev_priv(dev);
435 	struct qede_stats_common *p_common;
436 
437 	qede_fill_by_demand_stats(edev);
438 	p_common = &edev->stats.common;
439 
440 	stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
441 			    p_common->rx_bcast_pkts;
442 	stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
443 			    p_common->tx_bcast_pkts;
444 
445 	stats->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
446 			  p_common->rx_bcast_bytes;
447 	stats->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
448 			  p_common->tx_bcast_bytes;
449 
450 	stats->tx_errors = p_common->tx_err_drop_pkts;
451 	stats->multicast = p_common->rx_mcast_pkts + p_common->rx_bcast_pkts;
452 
453 	stats->rx_fifo_errors = p_common->no_buff_discards;
454 
455 	if (QEDE_IS_BB(edev))
456 		stats->collisions = edev->stats.bb.tx_total_collisions;
457 	stats->rx_crc_errors = p_common->rx_crc_errors;
458 	stats->rx_frame_errors = p_common->rx_align_errors;
459 }
460 
461 #ifdef CONFIG_QED_SRIOV
462 static int qede_get_vf_config(struct net_device *dev, int vfidx,
463 			      struct ifla_vf_info *ivi)
464 {
465 	struct qede_dev *edev = netdev_priv(dev);
466 
467 	if (!edev->ops)
468 		return -EINVAL;
469 
470 	return edev->ops->iov->get_config(edev->cdev, vfidx, ivi);
471 }
472 
473 static int qede_set_vf_rate(struct net_device *dev, int vfidx,
474 			    int min_tx_rate, int max_tx_rate)
475 {
476 	struct qede_dev *edev = netdev_priv(dev);
477 
478 	return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate,
479 					max_tx_rate);
480 }
481 
482 static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val)
483 {
484 	struct qede_dev *edev = netdev_priv(dev);
485 
486 	if (!edev->ops)
487 		return -EINVAL;
488 
489 	return edev->ops->iov->set_spoof(edev->cdev, vfidx, val);
490 }
491 
492 static int qede_set_vf_link_state(struct net_device *dev, int vfidx,
493 				  int link_state)
494 {
495 	struct qede_dev *edev = netdev_priv(dev);
496 
497 	if (!edev->ops)
498 		return -EINVAL;
499 
500 	return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state);
501 }
502 
503 static int qede_set_vf_trust(struct net_device *dev, int vfidx, bool setting)
504 {
505 	struct qede_dev *edev = netdev_priv(dev);
506 
507 	if (!edev->ops)
508 		return -EINVAL;
509 
510 	return edev->ops->iov->set_trust(edev->cdev, vfidx, setting);
511 }
512 #endif
513 
514 static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
515 {
516 	struct qede_dev *edev = netdev_priv(dev);
517 
518 	if (!netif_running(dev))
519 		return -EAGAIN;
520 
521 	switch (cmd) {
522 	case SIOCSHWTSTAMP:
523 		return qede_ptp_hw_ts(edev, ifr);
524 	default:
525 		DP_VERBOSE(edev, QED_MSG_DEBUG,
526 			   "default IOCTL cmd 0x%x\n", cmd);
527 		return -EOPNOTSUPP;
528 	}
529 
530 	return 0;
531 }
532 
533 static const struct net_device_ops qede_netdev_ops = {
534 	.ndo_open = qede_open,
535 	.ndo_stop = qede_close,
536 	.ndo_start_xmit = qede_start_xmit,
537 	.ndo_set_rx_mode = qede_set_rx_mode,
538 	.ndo_set_mac_address = qede_set_mac_addr,
539 	.ndo_validate_addr = eth_validate_addr,
540 	.ndo_change_mtu = qede_change_mtu,
541 	.ndo_do_ioctl = qede_ioctl,
542 #ifdef CONFIG_QED_SRIOV
543 	.ndo_set_vf_mac = qede_set_vf_mac,
544 	.ndo_set_vf_vlan = qede_set_vf_vlan,
545 	.ndo_set_vf_trust = qede_set_vf_trust,
546 #endif
547 	.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
548 	.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
549 	.ndo_set_features = qede_set_features,
550 	.ndo_get_stats64 = qede_get_stats64,
551 #ifdef CONFIG_QED_SRIOV
552 	.ndo_set_vf_link_state = qede_set_vf_link_state,
553 	.ndo_set_vf_spoofchk = qede_set_vf_spoofchk,
554 	.ndo_get_vf_config = qede_get_vf_config,
555 	.ndo_set_vf_rate = qede_set_vf_rate,
556 #endif
557 	.ndo_udp_tunnel_add = qede_udp_tunnel_add,
558 	.ndo_udp_tunnel_del = qede_udp_tunnel_del,
559 	.ndo_features_check = qede_features_check,
560 	.ndo_xdp = qede_xdp,
561 #ifdef CONFIG_RFS_ACCEL
562 	.ndo_rx_flow_steer = qede_rx_flow_steer,
563 #endif
564 };
565 
566 /* -------------------------------------------------------------------------
567  * START OF PROBE / REMOVE
568  * -------------------------------------------------------------------------
569  */
570 
571 static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
572 					    struct pci_dev *pdev,
573 					    struct qed_dev_eth_info *info,
574 					    u32 dp_module, u8 dp_level)
575 {
576 	struct net_device *ndev;
577 	struct qede_dev *edev;
578 
579 	ndev = alloc_etherdev_mqs(sizeof(*edev),
580 				  info->num_queues, info->num_queues);
581 	if (!ndev) {
582 		pr_err("etherdev allocation failed\n");
583 		return NULL;
584 	}
585 
586 	edev = netdev_priv(ndev);
587 	edev->ndev = ndev;
588 	edev->cdev = cdev;
589 	edev->pdev = pdev;
590 	edev->dp_module = dp_module;
591 	edev->dp_level = dp_level;
592 	edev->ops = qed_ops;
593 	edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
594 	edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
595 
596 	DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n",
597 		info->num_queues, info->num_queues);
598 
599 	SET_NETDEV_DEV(ndev, &pdev->dev);
600 
601 	memset(&edev->stats, 0, sizeof(edev->stats));
602 	memcpy(&edev->dev_info, info, sizeof(*info));
603 
604 	INIT_LIST_HEAD(&edev->vlan_list);
605 
606 	return edev;
607 }
608 
609 static void qede_init_ndev(struct qede_dev *edev)
610 {
611 	struct net_device *ndev = edev->ndev;
612 	struct pci_dev *pdev = edev->pdev;
613 	bool udp_tunnel_enable = false;
614 	netdev_features_t hw_features;
615 
616 	pci_set_drvdata(pdev, ndev);
617 
618 	ndev->mem_start = edev->dev_info.common.pci_mem_start;
619 	ndev->base_addr = ndev->mem_start;
620 	ndev->mem_end = edev->dev_info.common.pci_mem_end;
621 	ndev->irq = edev->dev_info.common.pci_irq;
622 
623 	ndev->watchdog_timeo = TX_TIMEOUT;
624 
625 	ndev->netdev_ops = &qede_netdev_ops;
626 
627 	qede_set_ethtool_ops(ndev);
628 
629 	ndev->priv_flags |= IFF_UNICAST_FLT;
630 
631 	/* user-changeble features */
632 	hw_features = NETIF_F_GRO | NETIF_F_SG |
633 		      NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
634 		      NETIF_F_TSO | NETIF_F_TSO6;
635 
636 	if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1)
637 		hw_features |= NETIF_F_NTUPLE;
638 
639 	if (edev->dev_info.common.vxlan_enable ||
640 	    edev->dev_info.common.geneve_enable)
641 		udp_tunnel_enable = true;
642 
643 	if (udp_tunnel_enable || edev->dev_info.common.gre_enable) {
644 		hw_features |= NETIF_F_TSO_ECN;
645 		ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
646 					NETIF_F_SG | NETIF_F_TSO |
647 					NETIF_F_TSO_ECN | NETIF_F_TSO6 |
648 					NETIF_F_RXCSUM;
649 	}
650 
651 	if (udp_tunnel_enable) {
652 		hw_features |= (NETIF_F_GSO_UDP_TUNNEL |
653 				NETIF_F_GSO_UDP_TUNNEL_CSUM);
654 		ndev->hw_enc_features |= (NETIF_F_GSO_UDP_TUNNEL |
655 					  NETIF_F_GSO_UDP_TUNNEL_CSUM);
656 	}
657 
658 	if (edev->dev_info.common.gre_enable) {
659 		hw_features |= (NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM);
660 		ndev->hw_enc_features |= (NETIF_F_GSO_GRE |
661 					  NETIF_F_GSO_GRE_CSUM);
662 	}
663 
664 	ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
665 			      NETIF_F_HIGHDMA;
666 	ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
667 			 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA |
668 			 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX;
669 
670 	ndev->hw_features = hw_features;
671 
672 	/* MTU range: 46 - 9600 */
673 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
674 	ndev->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE;
675 
676 	/* Set network device HW mac */
677 	ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac);
678 
679 	ndev->mtu = edev->dev_info.common.mtu;
680 }
681 
682 /* This function converts from 32b param to two params of level and module
683  * Input 32b decoding:
684  * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the
685  * 'happy' flow, e.g. memory allocation failed.
686  * b30 - enable all INFO prints. INFO prints are for major steps in the flow
687  * and provide important parameters.
688  * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that
689  * module. VERBOSE prints are for tracking the specific flow in low level.
690  *
691  * Notice that the level should be that of the lowest required logs.
692  */
693 void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level)
694 {
695 	*p_dp_level = QED_LEVEL_NOTICE;
696 	*p_dp_module = 0;
697 
698 	if (debug & QED_LOG_VERBOSE_MASK) {
699 		*p_dp_level = QED_LEVEL_VERBOSE;
700 		*p_dp_module = (debug & 0x3FFFFFFF);
701 	} else if (debug & QED_LOG_INFO_MASK) {
702 		*p_dp_level = QED_LEVEL_INFO;
703 	} else if (debug & QED_LOG_NOTICE_MASK) {
704 		*p_dp_level = QED_LEVEL_NOTICE;
705 	}
706 }
707 
708 static void qede_free_fp_array(struct qede_dev *edev)
709 {
710 	if (edev->fp_array) {
711 		struct qede_fastpath *fp;
712 		int i;
713 
714 		for_each_queue(i) {
715 			fp = &edev->fp_array[i];
716 
717 			kfree(fp->sb_info);
718 			kfree(fp->rxq);
719 			kfree(fp->xdp_tx);
720 			kfree(fp->txq);
721 		}
722 		kfree(edev->fp_array);
723 	}
724 
725 	edev->num_queues = 0;
726 	edev->fp_num_tx = 0;
727 	edev->fp_num_rx = 0;
728 }
729 
730 static int qede_alloc_fp_array(struct qede_dev *edev)
731 {
732 	u8 fp_combined, fp_rx = edev->fp_num_rx;
733 	struct qede_fastpath *fp;
734 	int i;
735 
736 	edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev),
737 				 sizeof(*edev->fp_array), GFP_KERNEL);
738 	if (!edev->fp_array) {
739 		DP_NOTICE(edev, "fp array allocation failed\n");
740 		goto err;
741 	}
742 
743 	fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx;
744 
745 	/* Allocate the FP elements for Rx queues followed by combined and then
746 	 * the Tx. This ordering should be maintained so that the respective
747 	 * queues (Rx or Tx) will be together in the fastpath array and the
748 	 * associated ids will be sequential.
749 	 */
750 	for_each_queue(i) {
751 		fp = &edev->fp_array[i];
752 
753 		fp->sb_info = kzalloc(sizeof(*fp->sb_info), GFP_KERNEL);
754 		if (!fp->sb_info) {
755 			DP_NOTICE(edev, "sb info struct allocation failed\n");
756 			goto err;
757 		}
758 
759 		if (fp_rx) {
760 			fp->type = QEDE_FASTPATH_RX;
761 			fp_rx--;
762 		} else if (fp_combined) {
763 			fp->type = QEDE_FASTPATH_COMBINED;
764 			fp_combined--;
765 		} else {
766 			fp->type = QEDE_FASTPATH_TX;
767 		}
768 
769 		if (fp->type & QEDE_FASTPATH_TX) {
770 			fp->txq = kzalloc(sizeof(*fp->txq), GFP_KERNEL);
771 			if (!fp->txq)
772 				goto err;
773 		}
774 
775 		if (fp->type & QEDE_FASTPATH_RX) {
776 			fp->rxq = kzalloc(sizeof(*fp->rxq), GFP_KERNEL);
777 			if (!fp->rxq)
778 				goto err;
779 
780 			if (edev->xdp_prog) {
781 				fp->xdp_tx = kzalloc(sizeof(*fp->xdp_tx),
782 						     GFP_KERNEL);
783 				if (!fp->xdp_tx)
784 					goto err;
785 				fp->type |= QEDE_FASTPATH_XDP;
786 			}
787 		}
788 	}
789 
790 	return 0;
791 err:
792 	qede_free_fp_array(edev);
793 	return -ENOMEM;
794 }
795 
796 static void qede_sp_task(struct work_struct *work)
797 {
798 	struct qede_dev *edev = container_of(work, struct qede_dev,
799 					     sp_task.work);
800 
801 	__qede_lock(edev);
802 
803 	if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
804 		if (edev->state == QEDE_STATE_OPEN)
805 			qede_config_rx_mode(edev->ndev);
806 
807 #ifdef CONFIG_RFS_ACCEL
808 	if (test_and_clear_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags)) {
809 		if (edev->state == QEDE_STATE_OPEN)
810 			qede_process_arfs_filters(edev, false);
811 	}
812 #endif
813 	__qede_unlock(edev);
814 }
815 
816 static void qede_update_pf_params(struct qed_dev *cdev)
817 {
818 	struct qed_pf_params pf_params;
819 
820 	/* 64 rx + 64 tx + 64 XDP */
821 	memset(&pf_params, 0, sizeof(struct qed_pf_params));
822 	pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * 3;
823 #ifdef CONFIG_RFS_ACCEL
824 	pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
825 #endif
826 	qed_ops->common->update_pf_params(cdev, &pf_params);
827 }
828 
829 enum qede_probe_mode {
830 	QEDE_PROBE_NORMAL,
831 };
832 
833 static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
834 			bool is_vf, enum qede_probe_mode mode)
835 {
836 	struct qed_probe_params probe_params;
837 	struct qed_slowpath_params sp_params;
838 	struct qed_dev_eth_info dev_info;
839 	struct qede_dev *edev;
840 	struct qed_dev *cdev;
841 	int rc;
842 
843 	if (unlikely(dp_level & QED_LEVEL_INFO))
844 		pr_notice("Starting qede probe\n");
845 
846 	memset(&probe_params, 0, sizeof(probe_params));
847 	probe_params.protocol = QED_PROTOCOL_ETH;
848 	probe_params.dp_module = dp_module;
849 	probe_params.dp_level = dp_level;
850 	probe_params.is_vf = is_vf;
851 	cdev = qed_ops->common->probe(pdev, &probe_params);
852 	if (!cdev) {
853 		rc = -ENODEV;
854 		goto err0;
855 	}
856 
857 	qede_update_pf_params(cdev);
858 
859 	/* Start the Slowpath-process */
860 	memset(&sp_params, 0, sizeof(sp_params));
861 	sp_params.int_mode = QED_INT_MODE_MSIX;
862 	sp_params.drv_major = QEDE_MAJOR_VERSION;
863 	sp_params.drv_minor = QEDE_MINOR_VERSION;
864 	sp_params.drv_rev = QEDE_REVISION_VERSION;
865 	sp_params.drv_eng = QEDE_ENGINEERING_VERSION;
866 	strlcpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
867 	rc = qed_ops->common->slowpath_start(cdev, &sp_params);
868 	if (rc) {
869 		pr_notice("Cannot start slowpath\n");
870 		goto err1;
871 	}
872 
873 	/* Learn information crucial for qede to progress */
874 	rc = qed_ops->fill_dev_info(cdev, &dev_info);
875 	if (rc)
876 		goto err2;
877 
878 	edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module,
879 				   dp_level);
880 	if (!edev) {
881 		rc = -ENOMEM;
882 		goto err2;
883 	}
884 
885 	if (is_vf)
886 		edev->flags |= QEDE_FLAG_IS_VF;
887 
888 	qede_init_ndev(edev);
889 
890 	rc = qede_roce_dev_add(edev);
891 	if (rc)
892 		goto err3;
893 
894 	/* Prepare the lock prior to the registeration of the netdev,
895 	 * as once it's registered we might reach flows requiring it
896 	 * [it's even possible to reach a flow needing it directly
897 	 * from there, although it's unlikely].
898 	 */
899 	INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
900 	mutex_init(&edev->qede_lock);
901 	rc = register_netdev(edev->ndev);
902 	if (rc) {
903 		DP_NOTICE(edev, "Cannot register net-device\n");
904 		goto err4;
905 	}
906 
907 	edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION);
908 
909 	/* PTP not supported on VFs */
910 	if (!is_vf)
911 		qede_ptp_enable(edev, true);
912 
913 	edev->ops->register_ops(cdev, &qede_ll_ops, edev);
914 
915 #ifdef CONFIG_DCB
916 	if (!IS_VF(edev))
917 		qede_set_dcbnl_ops(edev->ndev);
918 #endif
919 
920 	edev->rx_copybreak = QEDE_RX_HDR_SIZE;
921 
922 	DP_INFO(edev, "Ending successfully qede probe\n");
923 
924 	return 0;
925 
926 err4:
927 	qede_roce_dev_remove(edev);
928 err3:
929 	free_netdev(edev->ndev);
930 err2:
931 	qed_ops->common->slowpath_stop(cdev);
932 err1:
933 	qed_ops->common->remove(cdev);
934 err0:
935 	return rc;
936 }
937 
938 static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
939 {
940 	bool is_vf = false;
941 	u32 dp_module = 0;
942 	u8 dp_level = 0;
943 
944 	switch ((enum qede_pci_private)id->driver_data) {
945 	case QEDE_PRIVATE_VF:
946 		if (debug & QED_LOG_VERBOSE_MASK)
947 			dev_err(&pdev->dev, "Probing a VF\n");
948 		is_vf = true;
949 		break;
950 	default:
951 		if (debug & QED_LOG_VERBOSE_MASK)
952 			dev_err(&pdev->dev, "Probing a PF\n");
953 	}
954 
955 	qede_config_debug(debug, &dp_module, &dp_level);
956 
957 	return __qede_probe(pdev, dp_module, dp_level, is_vf,
958 			    QEDE_PROBE_NORMAL);
959 }
960 
961 enum qede_remove_mode {
962 	QEDE_REMOVE_NORMAL,
963 };
964 
965 static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
966 {
967 	struct net_device *ndev = pci_get_drvdata(pdev);
968 	struct qede_dev *edev = netdev_priv(ndev);
969 	struct qed_dev *cdev = edev->cdev;
970 
971 	DP_INFO(edev, "Starting qede_remove\n");
972 
973 	unregister_netdev(ndev);
974 	cancel_delayed_work_sync(&edev->sp_task);
975 
976 	qede_ptp_disable(edev);
977 
978 	qede_roce_dev_remove(edev);
979 
980 	edev->ops->common->set_power_state(cdev, PCI_D0);
981 
982 	pci_set_drvdata(pdev, NULL);
983 
984 	/* Release edev's reference to XDP's bpf if such exist */
985 	if (edev->xdp_prog)
986 		bpf_prog_put(edev->xdp_prog);
987 
988 	/* Use global ops since we've freed edev */
989 	qed_ops->common->slowpath_stop(cdev);
990 	if (system_state == SYSTEM_POWER_OFF)
991 		return;
992 	qed_ops->common->remove(cdev);
993 
994 	/* Since this can happen out-of-sync with other flows,
995 	 * don't release the netdevice until after slowpath stop
996 	 * has been called to guarantee various other contexts
997 	 * [e.g., QED register callbacks] won't break anything when
998 	 * accessing the netdevice.
999 	 */
1000 	 free_netdev(ndev);
1001 
1002 	dev_info(&pdev->dev, "Ending qede_remove successfully\n");
1003 }
1004 
1005 static void qede_remove(struct pci_dev *pdev)
1006 {
1007 	__qede_remove(pdev, QEDE_REMOVE_NORMAL);
1008 }
1009 
1010 static void qede_shutdown(struct pci_dev *pdev)
1011 {
1012 	__qede_remove(pdev, QEDE_REMOVE_NORMAL);
1013 }
1014 
1015 /* -------------------------------------------------------------------------
1016  * START OF LOAD / UNLOAD
1017  * -------------------------------------------------------------------------
1018  */
1019 
1020 static int qede_set_num_queues(struct qede_dev *edev)
1021 {
1022 	int rc;
1023 	u16 rss_num;
1024 
1025 	/* Setup queues according to possible resources*/
1026 	if (edev->req_queues)
1027 		rss_num = edev->req_queues;
1028 	else
1029 		rss_num = netif_get_num_default_rss_queues() *
1030 			  edev->dev_info.common.num_hwfns;
1031 
1032 	rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num);
1033 
1034 	rc = edev->ops->common->set_fp_int(edev->cdev, rss_num);
1035 	if (rc > 0) {
1036 		/* Managed to request interrupts for our queues */
1037 		edev->num_queues = rc;
1038 		DP_INFO(edev, "Managed %d [of %d] RSS queues\n",
1039 			QEDE_QUEUE_CNT(edev), rss_num);
1040 		rc = 0;
1041 	}
1042 
1043 	edev->fp_num_tx = edev->req_num_tx;
1044 	edev->fp_num_rx = edev->req_num_rx;
1045 
1046 	return rc;
1047 }
1048 
1049 static void qede_free_mem_sb(struct qede_dev *edev,
1050 			     struct qed_sb_info *sb_info)
1051 {
1052 	if (sb_info->sb_virt)
1053 		dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt),
1054 				  (void *)sb_info->sb_virt, sb_info->sb_phys);
1055 }
1056 
1057 /* This function allocates fast-path status block memory */
1058 static int qede_alloc_mem_sb(struct qede_dev *edev,
1059 			     struct qed_sb_info *sb_info, u16 sb_id)
1060 {
1061 	struct status_block *sb_virt;
1062 	dma_addr_t sb_phys;
1063 	int rc;
1064 
1065 	sb_virt = dma_alloc_coherent(&edev->pdev->dev,
1066 				     sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
1067 	if (!sb_virt) {
1068 		DP_ERR(edev, "Status block allocation failed\n");
1069 		return -ENOMEM;
1070 	}
1071 
1072 	rc = edev->ops->common->sb_init(edev->cdev, sb_info,
1073 					sb_virt, sb_phys, sb_id,
1074 					QED_SB_TYPE_L2_QUEUE);
1075 	if (rc) {
1076 		DP_ERR(edev, "Status block initialization failed\n");
1077 		dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt),
1078 				  sb_virt, sb_phys);
1079 		return rc;
1080 	}
1081 
1082 	return 0;
1083 }
1084 
1085 static void qede_free_rx_buffers(struct qede_dev *edev,
1086 				 struct qede_rx_queue *rxq)
1087 {
1088 	u16 i;
1089 
1090 	for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) {
1091 		struct sw_rx_data *rx_buf;
1092 		struct page *data;
1093 
1094 		rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX];
1095 		data = rx_buf->data;
1096 
1097 		dma_unmap_page(&edev->pdev->dev,
1098 			       rx_buf->mapping, PAGE_SIZE, rxq->data_direction);
1099 
1100 		rx_buf->data = NULL;
1101 		__free_page(data);
1102 	}
1103 }
1104 
1105 static void qede_free_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
1106 {
1107 	int i;
1108 
1109 	if (edev->gro_disable)
1110 		return;
1111 
1112 	for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
1113 		struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
1114 		struct sw_rx_data *replace_buf = &tpa_info->buffer;
1115 
1116 		if (replace_buf->data) {
1117 			dma_unmap_page(&edev->pdev->dev,
1118 				       replace_buf->mapping,
1119 				       PAGE_SIZE, DMA_FROM_DEVICE);
1120 			__free_page(replace_buf->data);
1121 		}
1122 	}
1123 }
1124 
1125 static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
1126 {
1127 	qede_free_sge_mem(edev, rxq);
1128 
1129 	/* Free rx buffers */
1130 	qede_free_rx_buffers(edev, rxq);
1131 
1132 	/* Free the parallel SW ring */
1133 	kfree(rxq->sw_rx_ring);
1134 
1135 	/* Free the real RQ ring used by FW */
1136 	edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring);
1137 	edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
1138 }
1139 
1140 static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
1141 {
1142 	dma_addr_t mapping;
1143 	int i;
1144 
1145 	/* Don't perform FW aggregations in case of XDP */
1146 	if (edev->xdp_prog)
1147 		edev->gro_disable = 1;
1148 
1149 	if (edev->gro_disable)
1150 		return 0;
1151 
1152 	if (edev->ndev->mtu > PAGE_SIZE) {
1153 		edev->gro_disable = 1;
1154 		return 0;
1155 	}
1156 
1157 	for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
1158 		struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
1159 		struct sw_rx_data *replace_buf = &tpa_info->buffer;
1160 
1161 		replace_buf->data = alloc_pages(GFP_ATOMIC, 0);
1162 		if (unlikely(!replace_buf->data)) {
1163 			DP_NOTICE(edev,
1164 				  "Failed to allocate TPA skb pool [replacement buffer]\n");
1165 			goto err;
1166 		}
1167 
1168 		mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0,
1169 				       PAGE_SIZE, DMA_FROM_DEVICE);
1170 		if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
1171 			DP_NOTICE(edev,
1172 				  "Failed to map TPA replacement buffer\n");
1173 			goto err;
1174 		}
1175 
1176 		replace_buf->mapping = mapping;
1177 		tpa_info->buffer.page_offset = 0;
1178 		tpa_info->buffer_mapping = mapping;
1179 		tpa_info->state = QEDE_AGG_STATE_NONE;
1180 	}
1181 
1182 	return 0;
1183 err:
1184 	qede_free_sge_mem(edev, rxq);
1185 	edev->gro_disable = 1;
1186 	return -ENOMEM;
1187 }
1188 
1189 /* This function allocates all memory needed per Rx queue */
1190 static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
1191 {
1192 	int i, rc, size;
1193 
1194 	rxq->num_rx_buffers = edev->q_num_rx_buffers;
1195 
1196 	rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu;
1197 	rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : 0;
1198 
1199 	/* Make sure that the headroom and  payload fit in a single page */
1200 	if (rxq->rx_buf_size + rxq->rx_headroom > PAGE_SIZE)
1201 		rxq->rx_buf_size = PAGE_SIZE - rxq->rx_headroom;
1202 
1203 	/* Segment size to spilt a page in multiple equal parts,
1204 	 * unless XDP is used in which case we'd use the entire page.
1205 	 */
1206 	if (!edev->xdp_prog)
1207 		rxq->rx_buf_seg_size = roundup_pow_of_two(rxq->rx_buf_size);
1208 	else
1209 		rxq->rx_buf_seg_size = PAGE_SIZE;
1210 
1211 	/* Allocate the parallel driver ring for Rx buffers */
1212 	size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE;
1213 	rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
1214 	if (!rxq->sw_rx_ring) {
1215 		DP_ERR(edev, "Rx buffers ring allocation failed\n");
1216 		rc = -ENOMEM;
1217 		goto err;
1218 	}
1219 
1220 	/* Allocate FW Rx ring  */
1221 	rc = edev->ops->common->chain_alloc(edev->cdev,
1222 					    QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1223 					    QED_CHAIN_MODE_NEXT_PTR,
1224 					    QED_CHAIN_CNT_TYPE_U16,
1225 					    RX_RING_SIZE,
1226 					    sizeof(struct eth_rx_bd),
1227 					    &rxq->rx_bd_ring);
1228 
1229 	if (rc)
1230 		goto err;
1231 
1232 	/* Allocate FW completion ring */
1233 	rc = edev->ops->common->chain_alloc(edev->cdev,
1234 					    QED_CHAIN_USE_TO_CONSUME,
1235 					    QED_CHAIN_MODE_PBL,
1236 					    QED_CHAIN_CNT_TYPE_U16,
1237 					    RX_RING_SIZE,
1238 					    sizeof(union eth_rx_cqe),
1239 					    &rxq->rx_comp_ring);
1240 	if (rc)
1241 		goto err;
1242 
1243 	/* Allocate buffers for the Rx ring */
1244 	rxq->filled_buffers = 0;
1245 	for (i = 0; i < rxq->num_rx_buffers; i++) {
1246 		rc = qede_alloc_rx_buffer(rxq, false);
1247 		if (rc) {
1248 			DP_ERR(edev,
1249 			       "Rx buffers allocation failed at index %d\n", i);
1250 			goto err;
1251 		}
1252 	}
1253 
1254 	rc = qede_alloc_sge_mem(edev, rxq);
1255 err:
1256 	return rc;
1257 }
1258 
1259 static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
1260 {
1261 	/* Free the parallel SW ring */
1262 	if (txq->is_xdp)
1263 		kfree(txq->sw_tx_ring.xdp);
1264 	else
1265 		kfree(txq->sw_tx_ring.skbs);
1266 
1267 	/* Free the real RQ ring used by FW */
1268 	edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl);
1269 }
1270 
1271 /* This function allocates all memory needed per Tx queue */
1272 static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
1273 {
1274 	union eth_tx_bd_types *p_virt;
1275 	int size, rc;
1276 
1277 	txq->num_tx_buffers = edev->q_num_tx_buffers;
1278 
1279 	/* Allocate the parallel driver ring for Tx buffers */
1280 	if (txq->is_xdp) {
1281 		size = sizeof(*txq->sw_tx_ring.xdp) * TX_RING_SIZE;
1282 		txq->sw_tx_ring.xdp = kzalloc(size, GFP_KERNEL);
1283 		if (!txq->sw_tx_ring.xdp)
1284 			goto err;
1285 	} else {
1286 		size = sizeof(*txq->sw_tx_ring.skbs) * TX_RING_SIZE;
1287 		txq->sw_tx_ring.skbs = kzalloc(size, GFP_KERNEL);
1288 		if (!txq->sw_tx_ring.skbs)
1289 			goto err;
1290 	}
1291 
1292 	rc = edev->ops->common->chain_alloc(edev->cdev,
1293 					    QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1294 					    QED_CHAIN_MODE_PBL,
1295 					    QED_CHAIN_CNT_TYPE_U16,
1296 					    TX_RING_SIZE,
1297 					    sizeof(*p_virt), &txq->tx_pbl);
1298 	if (rc)
1299 		goto err;
1300 
1301 	return 0;
1302 
1303 err:
1304 	qede_free_mem_txq(edev, txq);
1305 	return -ENOMEM;
1306 }
1307 
1308 /* This function frees all memory of a single fp */
1309 static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
1310 {
1311 	qede_free_mem_sb(edev, fp->sb_info);
1312 
1313 	if (fp->type & QEDE_FASTPATH_RX)
1314 		qede_free_mem_rxq(edev, fp->rxq);
1315 
1316 	if (fp->type & QEDE_FASTPATH_TX)
1317 		qede_free_mem_txq(edev, fp->txq);
1318 }
1319 
1320 /* This function allocates all memory needed for a single fp (i.e. an entity
1321  * which contains status block, one rx queue and/or multiple per-TC tx queues.
1322  */
1323 static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
1324 {
1325 	int rc = 0;
1326 
1327 	rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id);
1328 	if (rc)
1329 		goto out;
1330 
1331 	if (fp->type & QEDE_FASTPATH_RX) {
1332 		rc = qede_alloc_mem_rxq(edev, fp->rxq);
1333 		if (rc)
1334 			goto out;
1335 	}
1336 
1337 	if (fp->type & QEDE_FASTPATH_XDP) {
1338 		rc = qede_alloc_mem_txq(edev, fp->xdp_tx);
1339 		if (rc)
1340 			goto out;
1341 	}
1342 
1343 	if (fp->type & QEDE_FASTPATH_TX) {
1344 		rc = qede_alloc_mem_txq(edev, fp->txq);
1345 		if (rc)
1346 			goto out;
1347 	}
1348 
1349 out:
1350 	return rc;
1351 }
1352 
1353 static void qede_free_mem_load(struct qede_dev *edev)
1354 {
1355 	int i;
1356 
1357 	for_each_queue(i) {
1358 		struct qede_fastpath *fp = &edev->fp_array[i];
1359 
1360 		qede_free_mem_fp(edev, fp);
1361 	}
1362 }
1363 
1364 /* This function allocates all qede memory at NIC load. */
1365 static int qede_alloc_mem_load(struct qede_dev *edev)
1366 {
1367 	int rc = 0, queue_id;
1368 
1369 	for (queue_id = 0; queue_id < QEDE_QUEUE_CNT(edev); queue_id++) {
1370 		struct qede_fastpath *fp = &edev->fp_array[queue_id];
1371 
1372 		rc = qede_alloc_mem_fp(edev, fp);
1373 		if (rc) {
1374 			DP_ERR(edev,
1375 			       "Failed to allocate memory for fastpath - rss id = %d\n",
1376 			       queue_id);
1377 			qede_free_mem_load(edev);
1378 			return rc;
1379 		}
1380 	}
1381 
1382 	return 0;
1383 }
1384 
1385 /* This function inits fp content and resets the SB, RXQ and TXQ structures */
1386 static void qede_init_fp(struct qede_dev *edev)
1387 {
1388 	int queue_id, rxq_index = 0, txq_index = 0;
1389 	struct qede_fastpath *fp;
1390 
1391 	for_each_queue(queue_id) {
1392 		fp = &edev->fp_array[queue_id];
1393 
1394 		fp->edev = edev;
1395 		fp->id = queue_id;
1396 
1397 		if (fp->type & QEDE_FASTPATH_XDP) {
1398 			fp->xdp_tx->index = QEDE_TXQ_IDX_TO_XDP(edev,
1399 								rxq_index);
1400 			fp->xdp_tx->is_xdp = 1;
1401 		}
1402 
1403 		if (fp->type & QEDE_FASTPATH_RX) {
1404 			fp->rxq->rxq_id = rxq_index++;
1405 
1406 			/* Determine how to map buffers for this queue */
1407 			if (fp->type & QEDE_FASTPATH_XDP)
1408 				fp->rxq->data_direction = DMA_BIDIRECTIONAL;
1409 			else
1410 				fp->rxq->data_direction = DMA_FROM_DEVICE;
1411 			fp->rxq->dev = &edev->pdev->dev;
1412 		}
1413 
1414 		if (fp->type & QEDE_FASTPATH_TX) {
1415 			fp->txq->index = txq_index++;
1416 			if (edev->dev_info.is_legacy)
1417 				fp->txq->is_legacy = 1;
1418 			fp->txq->dev = &edev->pdev->dev;
1419 		}
1420 
1421 		snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1422 			 edev->ndev->name, queue_id);
1423 	}
1424 
1425 	edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO);
1426 }
1427 
1428 static int qede_set_real_num_queues(struct qede_dev *edev)
1429 {
1430 	int rc = 0;
1431 
1432 	rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_COUNT(edev));
1433 	if (rc) {
1434 		DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
1435 		return rc;
1436 	}
1437 
1438 	rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_COUNT(edev));
1439 	if (rc) {
1440 		DP_NOTICE(edev, "Failed to set real number of Rx queues\n");
1441 		return rc;
1442 	}
1443 
1444 	return 0;
1445 }
1446 
1447 static void qede_napi_disable_remove(struct qede_dev *edev)
1448 {
1449 	int i;
1450 
1451 	for_each_queue(i) {
1452 		napi_disable(&edev->fp_array[i].napi);
1453 
1454 		netif_napi_del(&edev->fp_array[i].napi);
1455 	}
1456 }
1457 
1458 static void qede_napi_add_enable(struct qede_dev *edev)
1459 {
1460 	int i;
1461 
1462 	/* Add NAPI objects */
1463 	for_each_queue(i) {
1464 		netif_napi_add(edev->ndev, &edev->fp_array[i].napi,
1465 			       qede_poll, NAPI_POLL_WEIGHT);
1466 		napi_enable(&edev->fp_array[i].napi);
1467 	}
1468 }
1469 
1470 static void qede_sync_free_irqs(struct qede_dev *edev)
1471 {
1472 	int i;
1473 
1474 	for (i = 0; i < edev->int_info.used_cnt; i++) {
1475 		if (edev->int_info.msix_cnt) {
1476 			synchronize_irq(edev->int_info.msix[i].vector);
1477 			free_irq(edev->int_info.msix[i].vector,
1478 				 &edev->fp_array[i]);
1479 		} else {
1480 			edev->ops->common->simd_handler_clean(edev->cdev, i);
1481 		}
1482 	}
1483 
1484 	edev->int_info.used_cnt = 0;
1485 }
1486 
1487 static int qede_req_msix_irqs(struct qede_dev *edev)
1488 {
1489 	int i, rc;
1490 
1491 	/* Sanitize number of interrupts == number of prepared RSS queues */
1492 	if (QEDE_QUEUE_CNT(edev) > edev->int_info.msix_cnt) {
1493 		DP_ERR(edev,
1494 		       "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
1495 		       QEDE_QUEUE_CNT(edev), edev->int_info.msix_cnt);
1496 		return -EINVAL;
1497 	}
1498 
1499 	for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
1500 #ifdef CONFIG_RFS_ACCEL
1501 		struct qede_fastpath *fp = &edev->fp_array[i];
1502 
1503 		if (edev->ndev->rx_cpu_rmap && (fp->type & QEDE_FASTPATH_RX)) {
1504 			rc = irq_cpu_rmap_add(edev->ndev->rx_cpu_rmap,
1505 					      edev->int_info.msix[i].vector);
1506 			if (rc) {
1507 				DP_ERR(edev, "Failed to add CPU rmap\n");
1508 				qede_free_arfs(edev);
1509 			}
1510 		}
1511 #endif
1512 		rc = request_irq(edev->int_info.msix[i].vector,
1513 				 qede_msix_fp_int, 0, edev->fp_array[i].name,
1514 				 &edev->fp_array[i]);
1515 		if (rc) {
1516 			DP_ERR(edev, "Request fp %d irq failed\n", i);
1517 			qede_sync_free_irqs(edev);
1518 			return rc;
1519 		}
1520 		DP_VERBOSE(edev, NETIF_MSG_INTR,
1521 			   "Requested fp irq for %s [entry %d]. Cookie is at %p\n",
1522 			   edev->fp_array[i].name, i,
1523 			   &edev->fp_array[i]);
1524 		edev->int_info.used_cnt++;
1525 	}
1526 
1527 	return 0;
1528 }
1529 
1530 static void qede_simd_fp_handler(void *cookie)
1531 {
1532 	struct qede_fastpath *fp = (struct qede_fastpath *)cookie;
1533 
1534 	napi_schedule_irqoff(&fp->napi);
1535 }
1536 
1537 static int qede_setup_irqs(struct qede_dev *edev)
1538 {
1539 	int i, rc = 0;
1540 
1541 	/* Learn Interrupt configuration */
1542 	rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info);
1543 	if (rc)
1544 		return rc;
1545 
1546 	if (edev->int_info.msix_cnt) {
1547 		rc = qede_req_msix_irqs(edev);
1548 		if (rc)
1549 			return rc;
1550 		edev->ndev->irq = edev->int_info.msix[0].vector;
1551 	} else {
1552 		const struct qed_common_ops *ops;
1553 
1554 		/* qed should learn receive the RSS ids and callbacks */
1555 		ops = edev->ops->common;
1556 		for (i = 0; i < QEDE_QUEUE_CNT(edev); i++)
1557 			ops->simd_handler_config(edev->cdev,
1558 						 &edev->fp_array[i], i,
1559 						 qede_simd_fp_handler);
1560 		edev->int_info.used_cnt = QEDE_QUEUE_CNT(edev);
1561 	}
1562 	return 0;
1563 }
1564 
1565 static int qede_drain_txq(struct qede_dev *edev,
1566 			  struct qede_tx_queue *txq, bool allow_drain)
1567 {
1568 	int rc, cnt = 1000;
1569 
1570 	while (txq->sw_tx_cons != txq->sw_tx_prod) {
1571 		if (!cnt) {
1572 			if (allow_drain) {
1573 				DP_NOTICE(edev,
1574 					  "Tx queue[%d] is stuck, requesting MCP to drain\n",
1575 					  txq->index);
1576 				rc = edev->ops->common->drain(edev->cdev);
1577 				if (rc)
1578 					return rc;
1579 				return qede_drain_txq(edev, txq, false);
1580 			}
1581 			DP_NOTICE(edev,
1582 				  "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n",
1583 				  txq->index, txq->sw_tx_prod,
1584 				  txq->sw_tx_cons);
1585 			return -ENODEV;
1586 		}
1587 		cnt--;
1588 		usleep_range(1000, 2000);
1589 		barrier();
1590 	}
1591 
1592 	/* FW finished processing, wait for HW to transmit all tx packets */
1593 	usleep_range(1000, 2000);
1594 
1595 	return 0;
1596 }
1597 
1598 static int qede_stop_txq(struct qede_dev *edev,
1599 			 struct qede_tx_queue *txq, int rss_id)
1600 {
1601 	return edev->ops->q_tx_stop(edev->cdev, rss_id, txq->handle);
1602 }
1603 
1604 static int qede_stop_queues(struct qede_dev *edev)
1605 {
1606 	struct qed_update_vport_params *vport_update_params;
1607 	struct qed_dev *cdev = edev->cdev;
1608 	struct qede_fastpath *fp;
1609 	int rc, i;
1610 
1611 	/* Disable the vport */
1612 	vport_update_params = vzalloc(sizeof(*vport_update_params));
1613 	if (!vport_update_params)
1614 		return -ENOMEM;
1615 
1616 	vport_update_params->vport_id = 0;
1617 	vport_update_params->update_vport_active_flg = 1;
1618 	vport_update_params->vport_active_flg = 0;
1619 	vport_update_params->update_rss_flg = 0;
1620 
1621 	rc = edev->ops->vport_update(cdev, vport_update_params);
1622 	vfree(vport_update_params);
1623 
1624 	if (rc) {
1625 		DP_ERR(edev, "Failed to update vport\n");
1626 		return rc;
1627 	}
1628 
1629 	/* Flush Tx queues. If needed, request drain from MCP */
1630 	for_each_queue(i) {
1631 		fp = &edev->fp_array[i];
1632 
1633 		if (fp->type & QEDE_FASTPATH_TX) {
1634 			rc = qede_drain_txq(edev, fp->txq, true);
1635 			if (rc)
1636 				return rc;
1637 		}
1638 
1639 		if (fp->type & QEDE_FASTPATH_XDP) {
1640 			rc = qede_drain_txq(edev, fp->xdp_tx, true);
1641 			if (rc)
1642 				return rc;
1643 		}
1644 	}
1645 
1646 	/* Stop all Queues in reverse order */
1647 	for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) {
1648 		fp = &edev->fp_array[i];
1649 
1650 		/* Stop the Tx Queue(s) */
1651 		if (fp->type & QEDE_FASTPATH_TX) {
1652 			rc = qede_stop_txq(edev, fp->txq, i);
1653 			if (rc)
1654 				return rc;
1655 		}
1656 
1657 		/* Stop the Rx Queue */
1658 		if (fp->type & QEDE_FASTPATH_RX) {
1659 			rc = edev->ops->q_rx_stop(cdev, i, fp->rxq->handle);
1660 			if (rc) {
1661 				DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
1662 				return rc;
1663 			}
1664 		}
1665 
1666 		/* Stop the XDP forwarding queue */
1667 		if (fp->type & QEDE_FASTPATH_XDP) {
1668 			rc = qede_stop_txq(edev, fp->xdp_tx, i);
1669 			if (rc)
1670 				return rc;
1671 
1672 			bpf_prog_put(fp->rxq->xdp_prog);
1673 		}
1674 	}
1675 
1676 	/* Stop the vport */
1677 	rc = edev->ops->vport_stop(cdev, 0);
1678 	if (rc)
1679 		DP_ERR(edev, "Failed to stop VPORT\n");
1680 
1681 	return rc;
1682 }
1683 
1684 static int qede_start_txq(struct qede_dev *edev,
1685 			  struct qede_fastpath *fp,
1686 			  struct qede_tx_queue *txq, u8 rss_id, u16 sb_idx)
1687 {
1688 	dma_addr_t phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl);
1689 	u32 page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl);
1690 	struct qed_queue_start_common_params params;
1691 	struct qed_txq_start_ret_params ret_params;
1692 	int rc;
1693 
1694 	memset(&params, 0, sizeof(params));
1695 	memset(&ret_params, 0, sizeof(ret_params));
1696 
1697 	/* Let the XDP queue share the queue-zone with one of the regular txq.
1698 	 * We don't really care about its coalescing.
1699 	 */
1700 	if (txq->is_xdp)
1701 		params.queue_id = QEDE_TXQ_XDP_TO_IDX(edev, txq);
1702 	else
1703 		params.queue_id = txq->index;
1704 
1705 	params.sb = fp->sb_info->igu_sb_id;
1706 	params.sb_idx = sb_idx;
1707 
1708 	rc = edev->ops->q_tx_start(edev->cdev, rss_id, &params, phys_table,
1709 				   page_cnt, &ret_params);
1710 	if (rc) {
1711 		DP_ERR(edev, "Start TXQ #%d failed %d\n", txq->index, rc);
1712 		return rc;
1713 	}
1714 
1715 	txq->doorbell_addr = ret_params.p_doorbell;
1716 	txq->handle = ret_params.p_handle;
1717 
1718 	/* Determine the FW consumer address associated */
1719 	txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[sb_idx];
1720 
1721 	/* Prepare the doorbell parameters */
1722 	SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST, DB_DEST_XCM);
1723 	SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
1724 	SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_VAL_SEL,
1725 		  DQ_XCM_ETH_TX_BD_PROD_CMD);
1726 	txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
1727 
1728 	return rc;
1729 }
1730 
1731 static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
1732 {
1733 	int vlan_removal_en = 1;
1734 	struct qed_dev *cdev = edev->cdev;
1735 	struct qed_dev_info *qed_info = &edev->dev_info.common;
1736 	struct qed_update_vport_params *vport_update_params;
1737 	struct qed_queue_start_common_params q_params;
1738 	struct qed_start_vport_params start = {0};
1739 	int rc, i;
1740 
1741 	if (!edev->num_queues) {
1742 		DP_ERR(edev,
1743 		       "Cannot update V-VPORT as active as there are no Rx queues\n");
1744 		return -EINVAL;
1745 	}
1746 
1747 	vport_update_params = vzalloc(sizeof(*vport_update_params));
1748 	if (!vport_update_params)
1749 		return -ENOMEM;
1750 
1751 	start.handle_ptp_pkts = !!(edev->ptp);
1752 	start.gro_enable = !edev->gro_disable;
1753 	start.mtu = edev->ndev->mtu;
1754 	start.vport_id = 0;
1755 	start.drop_ttl0 = true;
1756 	start.remove_inner_vlan = vlan_removal_en;
1757 	start.clear_stats = clear_stats;
1758 
1759 	rc = edev->ops->vport_start(cdev, &start);
1760 
1761 	if (rc) {
1762 		DP_ERR(edev, "Start V-PORT failed %d\n", rc);
1763 		goto out;
1764 	}
1765 
1766 	DP_VERBOSE(edev, NETIF_MSG_IFUP,
1767 		   "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
1768 		   start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
1769 
1770 	for_each_queue(i) {
1771 		struct qede_fastpath *fp = &edev->fp_array[i];
1772 		dma_addr_t p_phys_table;
1773 		u32 page_cnt;
1774 
1775 		if (fp->type & QEDE_FASTPATH_RX) {
1776 			struct qed_rxq_start_ret_params ret_params;
1777 			struct qede_rx_queue *rxq = fp->rxq;
1778 			__le16 *val;
1779 
1780 			memset(&ret_params, 0, sizeof(ret_params));
1781 			memset(&q_params, 0, sizeof(q_params));
1782 			q_params.queue_id = rxq->rxq_id;
1783 			q_params.vport_id = 0;
1784 			q_params.sb = fp->sb_info->igu_sb_id;
1785 			q_params.sb_idx = RX_PI;
1786 
1787 			p_phys_table =
1788 			    qed_chain_get_pbl_phys(&rxq->rx_comp_ring);
1789 			page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring);
1790 
1791 			rc = edev->ops->q_rx_start(cdev, i, &q_params,
1792 						   rxq->rx_buf_size,
1793 						   rxq->rx_bd_ring.p_phys_addr,
1794 						   p_phys_table,
1795 						   page_cnt, &ret_params);
1796 			if (rc) {
1797 				DP_ERR(edev, "Start RXQ #%d failed %d\n", i,
1798 				       rc);
1799 				goto out;
1800 			}
1801 
1802 			/* Use the return parameters */
1803 			rxq->hw_rxq_prod_addr = ret_params.p_prod;
1804 			rxq->handle = ret_params.p_handle;
1805 
1806 			val = &fp->sb_info->sb_virt->pi_array[RX_PI];
1807 			rxq->hw_cons_ptr = val;
1808 
1809 			qede_update_rx_prod(edev, rxq);
1810 		}
1811 
1812 		if (fp->type & QEDE_FASTPATH_XDP) {
1813 			rc = qede_start_txq(edev, fp, fp->xdp_tx, i, XDP_PI);
1814 			if (rc)
1815 				goto out;
1816 
1817 			fp->rxq->xdp_prog = bpf_prog_add(edev->xdp_prog, 1);
1818 			if (IS_ERR(fp->rxq->xdp_prog)) {
1819 				rc = PTR_ERR(fp->rxq->xdp_prog);
1820 				fp->rxq->xdp_prog = NULL;
1821 				goto out;
1822 			}
1823 		}
1824 
1825 		if (fp->type & QEDE_FASTPATH_TX) {
1826 			rc = qede_start_txq(edev, fp, fp->txq, i, TX_PI(0));
1827 			if (rc)
1828 				goto out;
1829 		}
1830 	}
1831 
1832 	/* Prepare and send the vport enable */
1833 	vport_update_params->vport_id = start.vport_id;
1834 	vport_update_params->update_vport_active_flg = 1;
1835 	vport_update_params->vport_active_flg = 1;
1836 
1837 	if ((qed_info->mf_mode == QED_MF_NPAR || pci_num_vf(edev->pdev)) &&
1838 	    qed_info->tx_switching) {
1839 		vport_update_params->update_tx_switching_flg = 1;
1840 		vport_update_params->tx_switching_flg = 1;
1841 	}
1842 
1843 	qede_fill_rss_params(edev, &vport_update_params->rss_params,
1844 			     &vport_update_params->update_rss_flg);
1845 
1846 	rc = edev->ops->vport_update(cdev, vport_update_params);
1847 	if (rc)
1848 		DP_ERR(edev, "Update V-PORT failed %d\n", rc);
1849 
1850 out:
1851 	vfree(vport_update_params);
1852 	return rc;
1853 }
1854 
1855 enum qede_unload_mode {
1856 	QEDE_UNLOAD_NORMAL,
1857 };
1858 
1859 static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
1860 			bool is_locked)
1861 {
1862 	struct qed_link_params link_params;
1863 	int rc;
1864 
1865 	DP_INFO(edev, "Starting qede unload\n");
1866 
1867 	if (!is_locked)
1868 		__qede_lock(edev);
1869 
1870 	qede_roce_dev_event_close(edev);
1871 	edev->state = QEDE_STATE_CLOSED;
1872 
1873 	/* Close OS Tx */
1874 	netif_tx_disable(edev->ndev);
1875 	netif_carrier_off(edev->ndev);
1876 
1877 	/* Reset the link */
1878 	memset(&link_params, 0, sizeof(link_params));
1879 	link_params.link_up = false;
1880 	edev->ops->common->set_link(edev->cdev, &link_params);
1881 	rc = qede_stop_queues(edev);
1882 	if (rc) {
1883 		qede_sync_free_irqs(edev);
1884 		goto out;
1885 	}
1886 
1887 	DP_INFO(edev, "Stopped Queues\n");
1888 
1889 	qede_vlan_mark_nonconfigured(edev);
1890 	edev->ops->fastpath_stop(edev->cdev);
1891 #ifdef CONFIG_RFS_ACCEL
1892 	if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) {
1893 		qede_poll_for_freeing_arfs_filters(edev);
1894 		qede_free_arfs(edev);
1895 	}
1896 #endif
1897 	/* Release the interrupts */
1898 	qede_sync_free_irqs(edev);
1899 	edev->ops->common->set_fp_int(edev->cdev, 0);
1900 
1901 	qede_napi_disable_remove(edev);
1902 
1903 	qede_free_mem_load(edev);
1904 	qede_free_fp_array(edev);
1905 
1906 out:
1907 	if (!is_locked)
1908 		__qede_unlock(edev);
1909 	DP_INFO(edev, "Ending qede unload\n");
1910 }
1911 
1912 enum qede_load_mode {
1913 	QEDE_LOAD_NORMAL,
1914 	QEDE_LOAD_RELOAD,
1915 };
1916 
1917 static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
1918 		     bool is_locked)
1919 {
1920 	struct qed_link_params link_params;
1921 	int rc;
1922 
1923 	DP_INFO(edev, "Starting qede load\n");
1924 
1925 	if (!is_locked)
1926 		__qede_lock(edev);
1927 
1928 	rc = qede_set_num_queues(edev);
1929 	if (rc)
1930 		goto out;
1931 
1932 	rc = qede_alloc_fp_array(edev);
1933 	if (rc)
1934 		goto out;
1935 
1936 	qede_init_fp(edev);
1937 
1938 	rc = qede_alloc_mem_load(edev);
1939 	if (rc)
1940 		goto err1;
1941 	DP_INFO(edev, "Allocated %d Rx, %d Tx queues\n",
1942 		QEDE_RSS_COUNT(edev), QEDE_TSS_COUNT(edev));
1943 
1944 	rc = qede_set_real_num_queues(edev);
1945 	if (rc)
1946 		goto err2;
1947 
1948 #ifdef CONFIG_RFS_ACCEL
1949 	if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) {
1950 		rc = qede_alloc_arfs(edev);
1951 		if (rc)
1952 			DP_NOTICE(edev, "aRFS memory allocation failed\n");
1953 	}
1954 #endif
1955 	qede_napi_add_enable(edev);
1956 	DP_INFO(edev, "Napi added and enabled\n");
1957 
1958 	rc = qede_setup_irqs(edev);
1959 	if (rc)
1960 		goto err3;
1961 	DP_INFO(edev, "Setup IRQs succeeded\n");
1962 
1963 	rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD);
1964 	if (rc)
1965 		goto err4;
1966 	DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
1967 
1968 	/* Add primary mac and set Rx filters */
1969 	ether_addr_copy(edev->primary_mac, edev->ndev->dev_addr);
1970 
1971 	/* Program un-configured VLANs */
1972 	qede_configure_vlan_filters(edev);
1973 
1974 	/* Ask for link-up using current configuration */
1975 	memset(&link_params, 0, sizeof(link_params));
1976 	link_params.link_up = true;
1977 	edev->ops->common->set_link(edev->cdev, &link_params);
1978 
1979 	qede_roce_dev_event_open(edev);
1980 
1981 	edev->state = QEDE_STATE_OPEN;
1982 
1983 	DP_INFO(edev, "Ending successfully qede load\n");
1984 
1985 	goto out;
1986 err4:
1987 	qede_sync_free_irqs(edev);
1988 	memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info));
1989 err3:
1990 	qede_napi_disable_remove(edev);
1991 err2:
1992 	qede_free_mem_load(edev);
1993 err1:
1994 	edev->ops->common->set_fp_int(edev->cdev, 0);
1995 	qede_free_fp_array(edev);
1996 	edev->num_queues = 0;
1997 	edev->fp_num_tx = 0;
1998 	edev->fp_num_rx = 0;
1999 out:
2000 	if (!is_locked)
2001 		__qede_unlock(edev);
2002 
2003 	return rc;
2004 }
2005 
2006 /* 'func' should be able to run between unload and reload assuming interface
2007  * is actually running, or afterwards in case it's currently DOWN.
2008  */
2009 void qede_reload(struct qede_dev *edev,
2010 		 struct qede_reload_args *args, bool is_locked)
2011 {
2012 	if (!is_locked)
2013 		__qede_lock(edev);
2014 
2015 	/* Since qede_lock is held, internal state wouldn't change even
2016 	 * if netdev state would start transitioning. Check whether current
2017 	 * internal configuration indicates device is up, then reload.
2018 	 */
2019 	if (edev->state == QEDE_STATE_OPEN) {
2020 		qede_unload(edev, QEDE_UNLOAD_NORMAL, true);
2021 		if (args)
2022 			args->func(edev, args);
2023 		qede_load(edev, QEDE_LOAD_RELOAD, true);
2024 
2025 		/* Since no one is going to do it for us, re-configure */
2026 		qede_config_rx_mode(edev->ndev);
2027 	} else if (args) {
2028 		args->func(edev, args);
2029 	}
2030 
2031 	if (!is_locked)
2032 		__qede_unlock(edev);
2033 }
2034 
2035 /* called with rtnl_lock */
2036 static int qede_open(struct net_device *ndev)
2037 {
2038 	struct qede_dev *edev = netdev_priv(ndev);
2039 	int rc;
2040 
2041 	netif_carrier_off(ndev);
2042 
2043 	edev->ops->common->set_power_state(edev->cdev, PCI_D0);
2044 
2045 	rc = qede_load(edev, QEDE_LOAD_NORMAL, false);
2046 	if (rc)
2047 		return rc;
2048 
2049 	udp_tunnel_get_rx_info(ndev);
2050 
2051 	edev->ops->common->update_drv_state(edev->cdev, true);
2052 
2053 	return 0;
2054 }
2055 
2056 static int qede_close(struct net_device *ndev)
2057 {
2058 	struct qede_dev *edev = netdev_priv(ndev);
2059 
2060 	qede_unload(edev, QEDE_UNLOAD_NORMAL, false);
2061 
2062 	edev->ops->common->update_drv_state(edev->cdev, false);
2063 
2064 	return 0;
2065 }
2066 
2067 static void qede_link_update(void *dev, struct qed_link_output *link)
2068 {
2069 	struct qede_dev *edev = dev;
2070 
2071 	if (!netif_running(edev->ndev)) {
2072 		DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not running\n");
2073 		return;
2074 	}
2075 
2076 	if (link->link_up) {
2077 		if (!netif_carrier_ok(edev->ndev)) {
2078 			DP_NOTICE(edev, "Link is up\n");
2079 			netif_tx_start_all_queues(edev->ndev);
2080 			netif_carrier_on(edev->ndev);
2081 		}
2082 	} else {
2083 		if (netif_carrier_ok(edev->ndev)) {
2084 			DP_NOTICE(edev, "Link is down\n");
2085 			netif_tx_disable(edev->ndev);
2086 			netif_carrier_off(edev->ndev);
2087 		}
2088 	}
2089 }
2090