xref: /openbmc/linux/drivers/net/ethernet/qlogic/qede/qede_main.c (revision e4781421e883340b796da5a724bda7226817990b)
1 /* QLogic qede NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/module.h>
33 #include <linux/pci.h>
34 #include <linux/version.h>
35 #include <linux/device.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/skbuff.h>
39 #include <linux/errno.h>
40 #include <linux/list.h>
41 #include <linux/string.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/interrupt.h>
44 #include <asm/byteorder.h>
45 #include <asm/param.h>
46 #include <linux/io.h>
47 #include <linux/netdev_features.h>
48 #include <linux/udp.h>
49 #include <linux/tcp.h>
50 #include <net/udp_tunnel.h>
51 #include <linux/ip.h>
52 #include <net/ipv6.h>
53 #include <net/tcp.h>
54 #include <linux/if_ether.h>
55 #include <linux/if_vlan.h>
56 #include <linux/pkt_sched.h>
57 #include <linux/ethtool.h>
58 #include <linux/in.h>
59 #include <linux/random.h>
60 #include <net/ip6_checksum.h>
61 #include <linux/bitops.h>
62 #include <linux/vmalloc.h>
63 #include <linux/qed/qede_roce.h>
64 #include "qede.h"
65 
66 static char version[] =
67 	"QLogic FastLinQ 4xxxx Ethernet Driver qede " DRV_MODULE_VERSION "\n";
68 
69 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION);
72 
73 static uint debug;
74 module_param(debug, uint, 0);
75 MODULE_PARM_DESC(debug, " Default debug msglevel");
76 
77 static const struct qed_eth_ops *qed_ops;
78 
79 #define CHIP_NUM_57980S_40		0x1634
80 #define CHIP_NUM_57980S_10		0x1666
81 #define CHIP_NUM_57980S_MF		0x1636
82 #define CHIP_NUM_57980S_100		0x1644
83 #define CHIP_NUM_57980S_50		0x1654
84 #define CHIP_NUM_57980S_25		0x1656
85 #define CHIP_NUM_57980S_IOV		0x1664
86 
87 #ifndef PCI_DEVICE_ID_NX2_57980E
88 #define PCI_DEVICE_ID_57980S_40		CHIP_NUM_57980S_40
89 #define PCI_DEVICE_ID_57980S_10		CHIP_NUM_57980S_10
90 #define PCI_DEVICE_ID_57980S_MF		CHIP_NUM_57980S_MF
91 #define PCI_DEVICE_ID_57980S_100	CHIP_NUM_57980S_100
92 #define PCI_DEVICE_ID_57980S_50		CHIP_NUM_57980S_50
93 #define PCI_DEVICE_ID_57980S_25		CHIP_NUM_57980S_25
94 #define PCI_DEVICE_ID_57980S_IOV	CHIP_NUM_57980S_IOV
95 #endif
96 
97 enum qede_pci_private {
98 	QEDE_PRIVATE_PF,
99 	QEDE_PRIVATE_VF
100 };
101 
102 static const struct pci_device_id qede_pci_tbl[] = {
103 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF},
104 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF},
105 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF},
106 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
107 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
108 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
109 #ifdef CONFIG_QED_SRIOV
110 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
111 #endif
112 	{ 0 }
113 };
114 
115 MODULE_DEVICE_TABLE(pci, qede_pci_tbl);
116 
117 static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
118 
119 #define TX_TIMEOUT		(5 * HZ)
120 
121 /* Utilize last protocol index for XDP */
122 #define XDP_PI	11
123 
124 static void qede_remove(struct pci_dev *pdev);
125 static void qede_shutdown(struct pci_dev *pdev);
126 static void qede_link_update(void *dev, struct qed_link_output *link);
127 
128 /* The qede lock is used to protect driver state change and driver flows that
129  * are not reentrant.
130  */
131 void __qede_lock(struct qede_dev *edev)
132 {
133 	mutex_lock(&edev->qede_lock);
134 }
135 
136 void __qede_unlock(struct qede_dev *edev)
137 {
138 	mutex_unlock(&edev->qede_lock);
139 }
140 
141 #ifdef CONFIG_QED_SRIOV
142 static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos,
143 			    __be16 vlan_proto)
144 {
145 	struct qede_dev *edev = netdev_priv(ndev);
146 
147 	if (vlan > 4095) {
148 		DP_NOTICE(edev, "Illegal vlan value %d\n", vlan);
149 		return -EINVAL;
150 	}
151 
152 	if (vlan_proto != htons(ETH_P_8021Q))
153 		return -EPROTONOSUPPORT;
154 
155 	DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n",
156 		   vlan, vf);
157 
158 	return edev->ops->iov->set_vlan(edev->cdev, vlan, vf);
159 }
160 
161 static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac)
162 {
163 	struct qede_dev *edev = netdev_priv(ndev);
164 
165 	DP_VERBOSE(edev, QED_MSG_IOV,
166 		   "Setting MAC %02x:%02x:%02x:%02x:%02x:%02x to VF [%d]\n",
167 		   mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], vfidx);
168 
169 	if (!is_valid_ether_addr(mac)) {
170 		DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n");
171 		return -EINVAL;
172 	}
173 
174 	return edev->ops->iov->set_mac(edev->cdev, mac, vfidx);
175 }
176 
177 static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
178 {
179 	struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev));
180 	struct qed_dev_info *qed_info = &edev->dev_info.common;
181 	struct qed_update_vport_params *vport_params;
182 	int rc;
183 
184 	vport_params = vzalloc(sizeof(*vport_params));
185 	if (!vport_params)
186 		return -ENOMEM;
187 	DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param);
188 
189 	rc = edev->ops->iov->configure(edev->cdev, num_vfs_param);
190 
191 	/* Enable/Disable Tx switching for PF */
192 	if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
193 	    qed_info->mf_mode != QED_MF_NPAR && qed_info->tx_switching) {
194 		vport_params->vport_id = 0;
195 		vport_params->update_tx_switching_flg = 1;
196 		vport_params->tx_switching_flg = num_vfs_param ? 1 : 0;
197 		edev->ops->vport_update(edev->cdev, vport_params);
198 	}
199 
200 	vfree(vport_params);
201 	return rc;
202 }
203 #endif
204 
205 static struct pci_driver qede_pci_driver = {
206 	.name = "qede",
207 	.id_table = qede_pci_tbl,
208 	.probe = qede_probe,
209 	.remove = qede_remove,
210 	.shutdown = qede_shutdown,
211 #ifdef CONFIG_QED_SRIOV
212 	.sriov_configure = qede_sriov_configure,
213 #endif
214 };
215 
216 static struct qed_eth_cb_ops qede_ll_ops = {
217 	{
218 		.link_update = qede_link_update,
219 	},
220 	.force_mac = qede_force_mac,
221 };
222 
223 static int qede_netdev_event(struct notifier_block *this, unsigned long event,
224 			     void *ptr)
225 {
226 	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
227 	struct ethtool_drvinfo drvinfo;
228 	struct qede_dev *edev;
229 
230 	if (event != NETDEV_CHANGENAME && event != NETDEV_CHANGEADDR)
231 		goto done;
232 
233 	/* Check whether this is a qede device */
234 	if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo)
235 		goto done;
236 
237 	memset(&drvinfo, 0, sizeof(drvinfo));
238 	ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo);
239 	if (strcmp(drvinfo.driver, "qede"))
240 		goto done;
241 	edev = netdev_priv(ndev);
242 
243 	switch (event) {
244 	case NETDEV_CHANGENAME:
245 		/* Notify qed of the name change */
246 		if (!edev->ops || !edev->ops->common)
247 			goto done;
248 		edev->ops->common->set_id(edev->cdev, edev->ndev->name, "qede");
249 		break;
250 	case NETDEV_CHANGEADDR:
251 		edev = netdev_priv(ndev);
252 		qede_roce_event_changeaddr(edev);
253 		break;
254 	}
255 
256 done:
257 	return NOTIFY_DONE;
258 }
259 
260 static struct notifier_block qede_netdev_notifier = {
261 	.notifier_call = qede_netdev_event,
262 };
263 
264 static
265 int __init qede_init(void)
266 {
267 	int ret;
268 
269 	pr_info("qede_init: %s\n", version);
270 
271 	qed_ops = qed_get_eth_ops();
272 	if (!qed_ops) {
273 		pr_notice("Failed to get qed ethtool operations\n");
274 		return -EINVAL;
275 	}
276 
277 	/* Must register notifier before pci ops, since we might miss
278 	 * interface rename after pci probe and netdev registeration.
279 	 */
280 	ret = register_netdevice_notifier(&qede_netdev_notifier);
281 	if (ret) {
282 		pr_notice("Failed to register netdevice_notifier\n");
283 		qed_put_eth_ops();
284 		return -EINVAL;
285 	}
286 
287 	ret = pci_register_driver(&qede_pci_driver);
288 	if (ret) {
289 		pr_notice("Failed to register driver\n");
290 		unregister_netdevice_notifier(&qede_netdev_notifier);
291 		qed_put_eth_ops();
292 		return -EINVAL;
293 	}
294 
295 	return 0;
296 }
297 
298 static void __exit qede_cleanup(void)
299 {
300 	if (debug & QED_LOG_INFO_MASK)
301 		pr_info("qede_cleanup called\n");
302 
303 	unregister_netdevice_notifier(&qede_netdev_notifier);
304 	pci_unregister_driver(&qede_pci_driver);
305 	qed_put_eth_ops();
306 }
307 
308 module_init(qede_init);
309 module_exit(qede_cleanup);
310 
311 static int qede_open(struct net_device *ndev);
312 static int qede_close(struct net_device *ndev);
313 
314 void qede_fill_by_demand_stats(struct qede_dev *edev)
315 {
316 	struct qed_eth_stats stats;
317 
318 	edev->ops->get_vport_stats(edev->cdev, &stats);
319 	edev->stats.no_buff_discards = stats.no_buff_discards;
320 	edev->stats.packet_too_big_discard = stats.packet_too_big_discard;
321 	edev->stats.ttl0_discard = stats.ttl0_discard;
322 	edev->stats.rx_ucast_bytes = stats.rx_ucast_bytes;
323 	edev->stats.rx_mcast_bytes = stats.rx_mcast_bytes;
324 	edev->stats.rx_bcast_bytes = stats.rx_bcast_bytes;
325 	edev->stats.rx_ucast_pkts = stats.rx_ucast_pkts;
326 	edev->stats.rx_mcast_pkts = stats.rx_mcast_pkts;
327 	edev->stats.rx_bcast_pkts = stats.rx_bcast_pkts;
328 	edev->stats.mftag_filter_discards = stats.mftag_filter_discards;
329 	edev->stats.mac_filter_discards = stats.mac_filter_discards;
330 
331 	edev->stats.tx_ucast_bytes = stats.tx_ucast_bytes;
332 	edev->stats.tx_mcast_bytes = stats.tx_mcast_bytes;
333 	edev->stats.tx_bcast_bytes = stats.tx_bcast_bytes;
334 	edev->stats.tx_ucast_pkts = stats.tx_ucast_pkts;
335 	edev->stats.tx_mcast_pkts = stats.tx_mcast_pkts;
336 	edev->stats.tx_bcast_pkts = stats.tx_bcast_pkts;
337 	edev->stats.tx_err_drop_pkts = stats.tx_err_drop_pkts;
338 	edev->stats.coalesced_pkts = stats.tpa_coalesced_pkts;
339 	edev->stats.coalesced_events = stats.tpa_coalesced_events;
340 	edev->stats.coalesced_aborts_num = stats.tpa_aborts_num;
341 	edev->stats.non_coalesced_pkts = stats.tpa_not_coalesced_pkts;
342 	edev->stats.coalesced_bytes = stats.tpa_coalesced_bytes;
343 
344 	edev->stats.rx_64_byte_packets = stats.rx_64_byte_packets;
345 	edev->stats.rx_65_to_127_byte_packets = stats.rx_65_to_127_byte_packets;
346 	edev->stats.rx_128_to_255_byte_packets =
347 				stats.rx_128_to_255_byte_packets;
348 	edev->stats.rx_256_to_511_byte_packets =
349 				stats.rx_256_to_511_byte_packets;
350 	edev->stats.rx_512_to_1023_byte_packets =
351 				stats.rx_512_to_1023_byte_packets;
352 	edev->stats.rx_1024_to_1518_byte_packets =
353 				stats.rx_1024_to_1518_byte_packets;
354 	edev->stats.rx_1519_to_1522_byte_packets =
355 				stats.rx_1519_to_1522_byte_packets;
356 	edev->stats.rx_1519_to_2047_byte_packets =
357 				stats.rx_1519_to_2047_byte_packets;
358 	edev->stats.rx_2048_to_4095_byte_packets =
359 				stats.rx_2048_to_4095_byte_packets;
360 	edev->stats.rx_4096_to_9216_byte_packets =
361 				stats.rx_4096_to_9216_byte_packets;
362 	edev->stats.rx_9217_to_16383_byte_packets =
363 				stats.rx_9217_to_16383_byte_packets;
364 	edev->stats.rx_crc_errors = stats.rx_crc_errors;
365 	edev->stats.rx_mac_crtl_frames = stats.rx_mac_crtl_frames;
366 	edev->stats.rx_pause_frames = stats.rx_pause_frames;
367 	edev->stats.rx_pfc_frames = stats.rx_pfc_frames;
368 	edev->stats.rx_align_errors = stats.rx_align_errors;
369 	edev->stats.rx_carrier_errors = stats.rx_carrier_errors;
370 	edev->stats.rx_oversize_packets = stats.rx_oversize_packets;
371 	edev->stats.rx_jabbers = stats.rx_jabbers;
372 	edev->stats.rx_undersize_packets = stats.rx_undersize_packets;
373 	edev->stats.rx_fragments = stats.rx_fragments;
374 	edev->stats.tx_64_byte_packets = stats.tx_64_byte_packets;
375 	edev->stats.tx_65_to_127_byte_packets = stats.tx_65_to_127_byte_packets;
376 	edev->stats.tx_128_to_255_byte_packets =
377 				stats.tx_128_to_255_byte_packets;
378 	edev->stats.tx_256_to_511_byte_packets =
379 				stats.tx_256_to_511_byte_packets;
380 	edev->stats.tx_512_to_1023_byte_packets =
381 				stats.tx_512_to_1023_byte_packets;
382 	edev->stats.tx_1024_to_1518_byte_packets =
383 				stats.tx_1024_to_1518_byte_packets;
384 	edev->stats.tx_1519_to_2047_byte_packets =
385 				stats.tx_1519_to_2047_byte_packets;
386 	edev->stats.tx_2048_to_4095_byte_packets =
387 				stats.tx_2048_to_4095_byte_packets;
388 	edev->stats.tx_4096_to_9216_byte_packets =
389 				stats.tx_4096_to_9216_byte_packets;
390 	edev->stats.tx_9217_to_16383_byte_packets =
391 				stats.tx_9217_to_16383_byte_packets;
392 	edev->stats.tx_pause_frames = stats.tx_pause_frames;
393 	edev->stats.tx_pfc_frames = stats.tx_pfc_frames;
394 	edev->stats.tx_lpi_entry_count = stats.tx_lpi_entry_count;
395 	edev->stats.tx_total_collisions = stats.tx_total_collisions;
396 	edev->stats.brb_truncates = stats.brb_truncates;
397 	edev->stats.brb_discards = stats.brb_discards;
398 	edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames;
399 }
400 
401 static
402 struct rtnl_link_stats64 *qede_get_stats64(struct net_device *dev,
403 					   struct rtnl_link_stats64 *stats)
404 {
405 	struct qede_dev *edev = netdev_priv(dev);
406 
407 	qede_fill_by_demand_stats(edev);
408 
409 	stats->rx_packets = edev->stats.rx_ucast_pkts +
410 			    edev->stats.rx_mcast_pkts +
411 			    edev->stats.rx_bcast_pkts;
412 	stats->tx_packets = edev->stats.tx_ucast_pkts +
413 			    edev->stats.tx_mcast_pkts +
414 			    edev->stats.tx_bcast_pkts;
415 
416 	stats->rx_bytes = edev->stats.rx_ucast_bytes +
417 			  edev->stats.rx_mcast_bytes +
418 			  edev->stats.rx_bcast_bytes;
419 
420 	stats->tx_bytes = edev->stats.tx_ucast_bytes +
421 			  edev->stats.tx_mcast_bytes +
422 			  edev->stats.tx_bcast_bytes;
423 
424 	stats->tx_errors = edev->stats.tx_err_drop_pkts;
425 	stats->multicast = edev->stats.rx_mcast_pkts +
426 			   edev->stats.rx_bcast_pkts;
427 
428 	stats->rx_fifo_errors = edev->stats.no_buff_discards;
429 
430 	stats->collisions = edev->stats.tx_total_collisions;
431 	stats->rx_crc_errors = edev->stats.rx_crc_errors;
432 	stats->rx_frame_errors = edev->stats.rx_align_errors;
433 
434 	return stats;
435 }
436 
437 #ifdef CONFIG_QED_SRIOV
438 static int qede_get_vf_config(struct net_device *dev, int vfidx,
439 			      struct ifla_vf_info *ivi)
440 {
441 	struct qede_dev *edev = netdev_priv(dev);
442 
443 	if (!edev->ops)
444 		return -EINVAL;
445 
446 	return edev->ops->iov->get_config(edev->cdev, vfidx, ivi);
447 }
448 
449 static int qede_set_vf_rate(struct net_device *dev, int vfidx,
450 			    int min_tx_rate, int max_tx_rate)
451 {
452 	struct qede_dev *edev = netdev_priv(dev);
453 
454 	return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate,
455 					max_tx_rate);
456 }
457 
458 static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val)
459 {
460 	struct qede_dev *edev = netdev_priv(dev);
461 
462 	if (!edev->ops)
463 		return -EINVAL;
464 
465 	return edev->ops->iov->set_spoof(edev->cdev, vfidx, val);
466 }
467 
468 static int qede_set_vf_link_state(struct net_device *dev, int vfidx,
469 				  int link_state)
470 {
471 	struct qede_dev *edev = netdev_priv(dev);
472 
473 	if (!edev->ops)
474 		return -EINVAL;
475 
476 	return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state);
477 }
478 
479 static int qede_set_vf_trust(struct net_device *dev, int vfidx, bool setting)
480 {
481 	struct qede_dev *edev = netdev_priv(dev);
482 
483 	if (!edev->ops)
484 		return -EINVAL;
485 
486 	return edev->ops->iov->set_trust(edev->cdev, vfidx, setting);
487 }
488 #endif
489 
490 static const struct net_device_ops qede_netdev_ops = {
491 	.ndo_open = qede_open,
492 	.ndo_stop = qede_close,
493 	.ndo_start_xmit = qede_start_xmit,
494 	.ndo_set_rx_mode = qede_set_rx_mode,
495 	.ndo_set_mac_address = qede_set_mac_addr,
496 	.ndo_validate_addr = eth_validate_addr,
497 	.ndo_change_mtu = qede_change_mtu,
498 #ifdef CONFIG_QED_SRIOV
499 	.ndo_set_vf_mac = qede_set_vf_mac,
500 	.ndo_set_vf_vlan = qede_set_vf_vlan,
501 	.ndo_set_vf_trust = qede_set_vf_trust,
502 #endif
503 	.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
504 	.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
505 	.ndo_set_features = qede_set_features,
506 	.ndo_get_stats64 = qede_get_stats64,
507 #ifdef CONFIG_QED_SRIOV
508 	.ndo_set_vf_link_state = qede_set_vf_link_state,
509 	.ndo_set_vf_spoofchk = qede_set_vf_spoofchk,
510 	.ndo_get_vf_config = qede_get_vf_config,
511 	.ndo_set_vf_rate = qede_set_vf_rate,
512 #endif
513 	.ndo_udp_tunnel_add = qede_udp_tunnel_add,
514 	.ndo_udp_tunnel_del = qede_udp_tunnel_del,
515 	.ndo_features_check = qede_features_check,
516 	.ndo_xdp = qede_xdp,
517 };
518 
519 /* -------------------------------------------------------------------------
520  * START OF PROBE / REMOVE
521  * -------------------------------------------------------------------------
522  */
523 
524 static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
525 					    struct pci_dev *pdev,
526 					    struct qed_dev_eth_info *info,
527 					    u32 dp_module, u8 dp_level)
528 {
529 	struct net_device *ndev;
530 	struct qede_dev *edev;
531 
532 	ndev = alloc_etherdev_mqs(sizeof(*edev),
533 				  info->num_queues, info->num_queues);
534 	if (!ndev) {
535 		pr_err("etherdev allocation failed\n");
536 		return NULL;
537 	}
538 
539 	edev = netdev_priv(ndev);
540 	edev->ndev = ndev;
541 	edev->cdev = cdev;
542 	edev->pdev = pdev;
543 	edev->dp_module = dp_module;
544 	edev->dp_level = dp_level;
545 	edev->ops = qed_ops;
546 	edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
547 	edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
548 
549 	DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n",
550 		info->num_queues, info->num_queues);
551 
552 	SET_NETDEV_DEV(ndev, &pdev->dev);
553 
554 	memset(&edev->stats, 0, sizeof(edev->stats));
555 	memcpy(&edev->dev_info, info, sizeof(*info));
556 
557 	INIT_LIST_HEAD(&edev->vlan_list);
558 
559 	return edev;
560 }
561 
562 static void qede_init_ndev(struct qede_dev *edev)
563 {
564 	struct net_device *ndev = edev->ndev;
565 	struct pci_dev *pdev = edev->pdev;
566 	u32 hw_features;
567 
568 	pci_set_drvdata(pdev, ndev);
569 
570 	ndev->mem_start = edev->dev_info.common.pci_mem_start;
571 	ndev->base_addr = ndev->mem_start;
572 	ndev->mem_end = edev->dev_info.common.pci_mem_end;
573 	ndev->irq = edev->dev_info.common.pci_irq;
574 
575 	ndev->watchdog_timeo = TX_TIMEOUT;
576 
577 	ndev->netdev_ops = &qede_netdev_ops;
578 
579 	qede_set_ethtool_ops(ndev);
580 
581 	ndev->priv_flags |= IFF_UNICAST_FLT;
582 
583 	/* user-changeble features */
584 	hw_features = NETIF_F_GRO | NETIF_F_SG |
585 		      NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
586 		      NETIF_F_TSO | NETIF_F_TSO6;
587 
588 	/* Encap features*/
589 	hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
590 		       NETIF_F_TSO_ECN | NETIF_F_GSO_UDP_TUNNEL_CSUM |
591 		       NETIF_F_GSO_GRE_CSUM;
592 	ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
593 				NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN |
594 				NETIF_F_TSO6 | NETIF_F_GSO_GRE |
595 				NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM |
596 				NETIF_F_GSO_UDP_TUNNEL_CSUM |
597 				NETIF_F_GSO_GRE_CSUM;
598 
599 	ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
600 			      NETIF_F_HIGHDMA;
601 	ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
602 			 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA |
603 			 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX;
604 
605 	ndev->hw_features = hw_features;
606 
607 	/* MTU range: 46 - 9600 */
608 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
609 	ndev->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE;
610 
611 	/* Set network device HW mac */
612 	ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac);
613 
614 	ndev->mtu = edev->dev_info.common.mtu;
615 }
616 
617 /* This function converts from 32b param to two params of level and module
618  * Input 32b decoding:
619  * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the
620  * 'happy' flow, e.g. memory allocation failed.
621  * b30 - enable all INFO prints. INFO prints are for major steps in the flow
622  * and provide important parameters.
623  * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that
624  * module. VERBOSE prints are for tracking the specific flow in low level.
625  *
626  * Notice that the level should be that of the lowest required logs.
627  */
628 void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level)
629 {
630 	*p_dp_level = QED_LEVEL_NOTICE;
631 	*p_dp_module = 0;
632 
633 	if (debug & QED_LOG_VERBOSE_MASK) {
634 		*p_dp_level = QED_LEVEL_VERBOSE;
635 		*p_dp_module = (debug & 0x3FFFFFFF);
636 	} else if (debug & QED_LOG_INFO_MASK) {
637 		*p_dp_level = QED_LEVEL_INFO;
638 	} else if (debug & QED_LOG_NOTICE_MASK) {
639 		*p_dp_level = QED_LEVEL_NOTICE;
640 	}
641 }
642 
643 static void qede_free_fp_array(struct qede_dev *edev)
644 {
645 	if (edev->fp_array) {
646 		struct qede_fastpath *fp;
647 		int i;
648 
649 		for_each_queue(i) {
650 			fp = &edev->fp_array[i];
651 
652 			kfree(fp->sb_info);
653 			kfree(fp->rxq);
654 			kfree(fp->xdp_tx);
655 			kfree(fp->txq);
656 		}
657 		kfree(edev->fp_array);
658 	}
659 
660 	edev->num_queues = 0;
661 	edev->fp_num_tx = 0;
662 	edev->fp_num_rx = 0;
663 }
664 
665 static int qede_alloc_fp_array(struct qede_dev *edev)
666 {
667 	u8 fp_combined, fp_rx = edev->fp_num_rx;
668 	struct qede_fastpath *fp;
669 	int i;
670 
671 	edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev),
672 				 sizeof(*edev->fp_array), GFP_KERNEL);
673 	if (!edev->fp_array) {
674 		DP_NOTICE(edev, "fp array allocation failed\n");
675 		goto err;
676 	}
677 
678 	fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx;
679 
680 	/* Allocate the FP elements for Rx queues followed by combined and then
681 	 * the Tx. This ordering should be maintained so that the respective
682 	 * queues (Rx or Tx) will be together in the fastpath array and the
683 	 * associated ids will be sequential.
684 	 */
685 	for_each_queue(i) {
686 		fp = &edev->fp_array[i];
687 
688 		fp->sb_info = kzalloc(sizeof(*fp->sb_info), GFP_KERNEL);
689 		if (!fp->sb_info) {
690 			DP_NOTICE(edev, "sb info struct allocation failed\n");
691 			goto err;
692 		}
693 
694 		if (fp_rx) {
695 			fp->type = QEDE_FASTPATH_RX;
696 			fp_rx--;
697 		} else if (fp_combined) {
698 			fp->type = QEDE_FASTPATH_COMBINED;
699 			fp_combined--;
700 		} else {
701 			fp->type = QEDE_FASTPATH_TX;
702 		}
703 
704 		if (fp->type & QEDE_FASTPATH_TX) {
705 			fp->txq = kzalloc(sizeof(*fp->txq), GFP_KERNEL);
706 			if (!fp->txq)
707 				goto err;
708 		}
709 
710 		if (fp->type & QEDE_FASTPATH_RX) {
711 			fp->rxq = kzalloc(sizeof(*fp->rxq), GFP_KERNEL);
712 			if (!fp->rxq)
713 				goto err;
714 
715 			if (edev->xdp_prog) {
716 				fp->xdp_tx = kzalloc(sizeof(*fp->xdp_tx),
717 						     GFP_KERNEL);
718 				if (!fp->xdp_tx)
719 					goto err;
720 				fp->type |= QEDE_FASTPATH_XDP;
721 			}
722 		}
723 	}
724 
725 	return 0;
726 err:
727 	qede_free_fp_array(edev);
728 	return -ENOMEM;
729 }
730 
731 static void qede_sp_task(struct work_struct *work)
732 {
733 	struct qede_dev *edev = container_of(work, struct qede_dev,
734 					     sp_task.work);
735 	struct qed_dev *cdev = edev->cdev;
736 
737 	__qede_lock(edev);
738 
739 	if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
740 		if (edev->state == QEDE_STATE_OPEN)
741 			qede_config_rx_mode(edev->ndev);
742 
743 	if (test_and_clear_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags)) {
744 		struct qed_tunn_params tunn_params;
745 
746 		memset(&tunn_params, 0, sizeof(tunn_params));
747 		tunn_params.update_vxlan_port = 1;
748 		tunn_params.vxlan_port = edev->vxlan_dst_port;
749 		qed_ops->tunn_config(cdev, &tunn_params);
750 	}
751 
752 	if (test_and_clear_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags)) {
753 		struct qed_tunn_params tunn_params;
754 
755 		memset(&tunn_params, 0, sizeof(tunn_params));
756 		tunn_params.update_geneve_port = 1;
757 		tunn_params.geneve_port = edev->geneve_dst_port;
758 		qed_ops->tunn_config(cdev, &tunn_params);
759 	}
760 
761 	__qede_unlock(edev);
762 }
763 
764 static void qede_update_pf_params(struct qed_dev *cdev)
765 {
766 	struct qed_pf_params pf_params;
767 
768 	/* 64 rx + 64 tx + 64 XDP */
769 	memset(&pf_params, 0, sizeof(struct qed_pf_params));
770 	pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * 3;
771 	qed_ops->common->update_pf_params(cdev, &pf_params);
772 }
773 
774 enum qede_probe_mode {
775 	QEDE_PROBE_NORMAL,
776 };
777 
778 static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
779 			bool is_vf, enum qede_probe_mode mode)
780 {
781 	struct qed_probe_params probe_params;
782 	struct qed_slowpath_params sp_params;
783 	struct qed_dev_eth_info dev_info;
784 	struct qede_dev *edev;
785 	struct qed_dev *cdev;
786 	int rc;
787 
788 	if (unlikely(dp_level & QED_LEVEL_INFO))
789 		pr_notice("Starting qede probe\n");
790 
791 	memset(&probe_params, 0, sizeof(probe_params));
792 	probe_params.protocol = QED_PROTOCOL_ETH;
793 	probe_params.dp_module = dp_module;
794 	probe_params.dp_level = dp_level;
795 	probe_params.is_vf = is_vf;
796 	cdev = qed_ops->common->probe(pdev, &probe_params);
797 	if (!cdev) {
798 		rc = -ENODEV;
799 		goto err0;
800 	}
801 
802 	qede_update_pf_params(cdev);
803 
804 	/* Start the Slowpath-process */
805 	memset(&sp_params, 0, sizeof(sp_params));
806 	sp_params.int_mode = QED_INT_MODE_MSIX;
807 	sp_params.drv_major = QEDE_MAJOR_VERSION;
808 	sp_params.drv_minor = QEDE_MINOR_VERSION;
809 	sp_params.drv_rev = QEDE_REVISION_VERSION;
810 	sp_params.drv_eng = QEDE_ENGINEERING_VERSION;
811 	strlcpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
812 	rc = qed_ops->common->slowpath_start(cdev, &sp_params);
813 	if (rc) {
814 		pr_notice("Cannot start slowpath\n");
815 		goto err1;
816 	}
817 
818 	/* Learn information crucial for qede to progress */
819 	rc = qed_ops->fill_dev_info(cdev, &dev_info);
820 	if (rc)
821 		goto err2;
822 
823 	edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module,
824 				   dp_level);
825 	if (!edev) {
826 		rc = -ENOMEM;
827 		goto err2;
828 	}
829 
830 	if (is_vf)
831 		edev->flags |= QEDE_FLAG_IS_VF;
832 
833 	qede_init_ndev(edev);
834 
835 	rc = qede_roce_dev_add(edev);
836 	if (rc)
837 		goto err3;
838 
839 	rc = register_netdev(edev->ndev);
840 	if (rc) {
841 		DP_NOTICE(edev, "Cannot register net-device\n");
842 		goto err4;
843 	}
844 
845 	edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION);
846 
847 	edev->ops->register_ops(cdev, &qede_ll_ops, edev);
848 
849 #ifdef CONFIG_DCB
850 	if (!IS_VF(edev))
851 		qede_set_dcbnl_ops(edev->ndev);
852 #endif
853 
854 	INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
855 	mutex_init(&edev->qede_lock);
856 	edev->rx_copybreak = QEDE_RX_HDR_SIZE;
857 
858 	DP_INFO(edev, "Ending successfully qede probe\n");
859 
860 	return 0;
861 
862 err4:
863 	qede_roce_dev_remove(edev);
864 err3:
865 	free_netdev(edev->ndev);
866 err2:
867 	qed_ops->common->slowpath_stop(cdev);
868 err1:
869 	qed_ops->common->remove(cdev);
870 err0:
871 	return rc;
872 }
873 
874 static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
875 {
876 	bool is_vf = false;
877 	u32 dp_module = 0;
878 	u8 dp_level = 0;
879 
880 	switch ((enum qede_pci_private)id->driver_data) {
881 	case QEDE_PRIVATE_VF:
882 		if (debug & QED_LOG_VERBOSE_MASK)
883 			dev_err(&pdev->dev, "Probing a VF\n");
884 		is_vf = true;
885 		break;
886 	default:
887 		if (debug & QED_LOG_VERBOSE_MASK)
888 			dev_err(&pdev->dev, "Probing a PF\n");
889 	}
890 
891 	qede_config_debug(debug, &dp_module, &dp_level);
892 
893 	return __qede_probe(pdev, dp_module, dp_level, is_vf,
894 			    QEDE_PROBE_NORMAL);
895 }
896 
897 enum qede_remove_mode {
898 	QEDE_REMOVE_NORMAL,
899 };
900 
901 static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
902 {
903 	struct net_device *ndev = pci_get_drvdata(pdev);
904 	struct qede_dev *edev = netdev_priv(ndev);
905 	struct qed_dev *cdev = edev->cdev;
906 
907 	DP_INFO(edev, "Starting qede_remove\n");
908 
909 	cancel_delayed_work_sync(&edev->sp_task);
910 
911 	unregister_netdev(ndev);
912 
913 	qede_roce_dev_remove(edev);
914 
915 	edev->ops->common->set_power_state(cdev, PCI_D0);
916 
917 	pci_set_drvdata(pdev, NULL);
918 
919 	/* Release edev's reference to XDP's bpf if such exist */
920 	if (edev->xdp_prog)
921 		bpf_prog_put(edev->xdp_prog);
922 
923 	free_netdev(ndev);
924 
925 	/* Use global ops since we've freed edev */
926 	qed_ops->common->slowpath_stop(cdev);
927 	if (system_state == SYSTEM_POWER_OFF)
928 		return;
929 	qed_ops->common->remove(cdev);
930 
931 	dev_info(&pdev->dev, "Ending qede_remove successfully\n");
932 }
933 
934 static void qede_remove(struct pci_dev *pdev)
935 {
936 	__qede_remove(pdev, QEDE_REMOVE_NORMAL);
937 }
938 
939 static void qede_shutdown(struct pci_dev *pdev)
940 {
941 	__qede_remove(pdev, QEDE_REMOVE_NORMAL);
942 }
943 
944 /* -------------------------------------------------------------------------
945  * START OF LOAD / UNLOAD
946  * -------------------------------------------------------------------------
947  */
948 
949 static int qede_set_num_queues(struct qede_dev *edev)
950 {
951 	int rc;
952 	u16 rss_num;
953 
954 	/* Setup queues according to possible resources*/
955 	if (edev->req_queues)
956 		rss_num = edev->req_queues;
957 	else
958 		rss_num = netif_get_num_default_rss_queues() *
959 			  edev->dev_info.common.num_hwfns;
960 
961 	rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num);
962 
963 	rc = edev->ops->common->set_fp_int(edev->cdev, rss_num);
964 	if (rc > 0) {
965 		/* Managed to request interrupts for our queues */
966 		edev->num_queues = rc;
967 		DP_INFO(edev, "Managed %d [of %d] RSS queues\n",
968 			QEDE_QUEUE_CNT(edev), rss_num);
969 		rc = 0;
970 	}
971 
972 	edev->fp_num_tx = edev->req_num_tx;
973 	edev->fp_num_rx = edev->req_num_rx;
974 
975 	return rc;
976 }
977 
978 static void qede_free_mem_sb(struct qede_dev *edev,
979 			     struct qed_sb_info *sb_info)
980 {
981 	if (sb_info->sb_virt)
982 		dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt),
983 				  (void *)sb_info->sb_virt, sb_info->sb_phys);
984 }
985 
986 /* This function allocates fast-path status block memory */
987 static int qede_alloc_mem_sb(struct qede_dev *edev,
988 			     struct qed_sb_info *sb_info, u16 sb_id)
989 {
990 	struct status_block *sb_virt;
991 	dma_addr_t sb_phys;
992 	int rc;
993 
994 	sb_virt = dma_alloc_coherent(&edev->pdev->dev,
995 				     sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
996 	if (!sb_virt) {
997 		DP_ERR(edev, "Status block allocation failed\n");
998 		return -ENOMEM;
999 	}
1000 
1001 	rc = edev->ops->common->sb_init(edev->cdev, sb_info,
1002 					sb_virt, sb_phys, sb_id,
1003 					QED_SB_TYPE_L2_QUEUE);
1004 	if (rc) {
1005 		DP_ERR(edev, "Status block initialization failed\n");
1006 		dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt),
1007 				  sb_virt, sb_phys);
1008 		return rc;
1009 	}
1010 
1011 	return 0;
1012 }
1013 
1014 static void qede_free_rx_buffers(struct qede_dev *edev,
1015 				 struct qede_rx_queue *rxq)
1016 {
1017 	u16 i;
1018 
1019 	for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) {
1020 		struct sw_rx_data *rx_buf;
1021 		struct page *data;
1022 
1023 		rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX];
1024 		data = rx_buf->data;
1025 
1026 		dma_unmap_page(&edev->pdev->dev,
1027 			       rx_buf->mapping, PAGE_SIZE, rxq->data_direction);
1028 
1029 		rx_buf->data = NULL;
1030 		__free_page(data);
1031 	}
1032 }
1033 
1034 static void qede_free_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
1035 {
1036 	int i;
1037 
1038 	if (edev->gro_disable)
1039 		return;
1040 
1041 	for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
1042 		struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
1043 		struct sw_rx_data *replace_buf = &tpa_info->buffer;
1044 
1045 		if (replace_buf->data) {
1046 			dma_unmap_page(&edev->pdev->dev,
1047 				       replace_buf->mapping,
1048 				       PAGE_SIZE, DMA_FROM_DEVICE);
1049 			__free_page(replace_buf->data);
1050 		}
1051 	}
1052 }
1053 
1054 static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
1055 {
1056 	qede_free_sge_mem(edev, rxq);
1057 
1058 	/* Free rx buffers */
1059 	qede_free_rx_buffers(edev, rxq);
1060 
1061 	/* Free the parallel SW ring */
1062 	kfree(rxq->sw_rx_ring);
1063 
1064 	/* Free the real RQ ring used by FW */
1065 	edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring);
1066 	edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
1067 }
1068 
1069 static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
1070 {
1071 	dma_addr_t mapping;
1072 	int i;
1073 
1074 	/* Don't perform FW aggregations in case of XDP */
1075 	if (edev->xdp_prog)
1076 		edev->gro_disable = 1;
1077 
1078 	if (edev->gro_disable)
1079 		return 0;
1080 
1081 	if (edev->ndev->mtu > PAGE_SIZE) {
1082 		edev->gro_disable = 1;
1083 		return 0;
1084 	}
1085 
1086 	for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
1087 		struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
1088 		struct sw_rx_data *replace_buf = &tpa_info->buffer;
1089 
1090 		replace_buf->data = alloc_pages(GFP_ATOMIC, 0);
1091 		if (unlikely(!replace_buf->data)) {
1092 			DP_NOTICE(edev,
1093 				  "Failed to allocate TPA skb pool [replacement buffer]\n");
1094 			goto err;
1095 		}
1096 
1097 		mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0,
1098 				       PAGE_SIZE, DMA_FROM_DEVICE);
1099 		if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
1100 			DP_NOTICE(edev,
1101 				  "Failed to map TPA replacement buffer\n");
1102 			goto err;
1103 		}
1104 
1105 		replace_buf->mapping = mapping;
1106 		tpa_info->buffer.page_offset = 0;
1107 		tpa_info->buffer_mapping = mapping;
1108 		tpa_info->state = QEDE_AGG_STATE_NONE;
1109 	}
1110 
1111 	return 0;
1112 err:
1113 	qede_free_sge_mem(edev, rxq);
1114 	edev->gro_disable = 1;
1115 	return -ENOMEM;
1116 }
1117 
1118 /* This function allocates all memory needed per Rx queue */
1119 static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
1120 {
1121 	int i, rc, size;
1122 
1123 	rxq->num_rx_buffers = edev->q_num_rx_buffers;
1124 
1125 	rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu;
1126 
1127 	if (rxq->rx_buf_size > PAGE_SIZE)
1128 		rxq->rx_buf_size = PAGE_SIZE;
1129 
1130 	/* Segment size to spilt a page in multiple equal parts,
1131 	 * unless XDP is used in which case we'd use the entire page.
1132 	 */
1133 	if (!edev->xdp_prog)
1134 		rxq->rx_buf_seg_size = roundup_pow_of_two(rxq->rx_buf_size);
1135 	else
1136 		rxq->rx_buf_seg_size = PAGE_SIZE;
1137 
1138 	/* Allocate the parallel driver ring for Rx buffers */
1139 	size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE;
1140 	rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
1141 	if (!rxq->sw_rx_ring) {
1142 		DP_ERR(edev, "Rx buffers ring allocation failed\n");
1143 		rc = -ENOMEM;
1144 		goto err;
1145 	}
1146 
1147 	/* Allocate FW Rx ring  */
1148 	rc = edev->ops->common->chain_alloc(edev->cdev,
1149 					    QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1150 					    QED_CHAIN_MODE_NEXT_PTR,
1151 					    QED_CHAIN_CNT_TYPE_U16,
1152 					    RX_RING_SIZE,
1153 					    sizeof(struct eth_rx_bd),
1154 					    &rxq->rx_bd_ring);
1155 
1156 	if (rc)
1157 		goto err;
1158 
1159 	/* Allocate FW completion ring */
1160 	rc = edev->ops->common->chain_alloc(edev->cdev,
1161 					    QED_CHAIN_USE_TO_CONSUME,
1162 					    QED_CHAIN_MODE_PBL,
1163 					    QED_CHAIN_CNT_TYPE_U16,
1164 					    RX_RING_SIZE,
1165 					    sizeof(union eth_rx_cqe),
1166 					    &rxq->rx_comp_ring);
1167 	if (rc)
1168 		goto err;
1169 
1170 	/* Allocate buffers for the Rx ring */
1171 	rxq->filled_buffers = 0;
1172 	for (i = 0; i < rxq->num_rx_buffers; i++) {
1173 		rc = qede_alloc_rx_buffer(rxq, false);
1174 		if (rc) {
1175 			DP_ERR(edev,
1176 			       "Rx buffers allocation failed at index %d\n", i);
1177 			goto err;
1178 		}
1179 	}
1180 
1181 	rc = qede_alloc_sge_mem(edev, rxq);
1182 err:
1183 	return rc;
1184 }
1185 
1186 static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
1187 {
1188 	/* Free the parallel SW ring */
1189 	if (txq->is_xdp)
1190 		kfree(txq->sw_tx_ring.pages);
1191 	else
1192 		kfree(txq->sw_tx_ring.skbs);
1193 
1194 	/* Free the real RQ ring used by FW */
1195 	edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl);
1196 }
1197 
1198 /* This function allocates all memory needed per Tx queue */
1199 static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
1200 {
1201 	union eth_tx_bd_types *p_virt;
1202 	int size, rc;
1203 
1204 	txq->num_tx_buffers = edev->q_num_tx_buffers;
1205 
1206 	/* Allocate the parallel driver ring for Tx buffers */
1207 	if (txq->is_xdp) {
1208 		size = sizeof(*txq->sw_tx_ring.pages) * TX_RING_SIZE;
1209 		txq->sw_tx_ring.pages = kzalloc(size, GFP_KERNEL);
1210 		if (!txq->sw_tx_ring.pages)
1211 			goto err;
1212 	} else {
1213 		size = sizeof(*txq->sw_tx_ring.skbs) * TX_RING_SIZE;
1214 		txq->sw_tx_ring.skbs = kzalloc(size, GFP_KERNEL);
1215 		if (!txq->sw_tx_ring.skbs)
1216 			goto err;
1217 	}
1218 
1219 	rc = edev->ops->common->chain_alloc(edev->cdev,
1220 					    QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1221 					    QED_CHAIN_MODE_PBL,
1222 					    QED_CHAIN_CNT_TYPE_U16,
1223 					    TX_RING_SIZE,
1224 					    sizeof(*p_virt), &txq->tx_pbl);
1225 	if (rc)
1226 		goto err;
1227 
1228 	return 0;
1229 
1230 err:
1231 	qede_free_mem_txq(edev, txq);
1232 	return -ENOMEM;
1233 }
1234 
1235 /* This function frees all memory of a single fp */
1236 static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
1237 {
1238 	qede_free_mem_sb(edev, fp->sb_info);
1239 
1240 	if (fp->type & QEDE_FASTPATH_RX)
1241 		qede_free_mem_rxq(edev, fp->rxq);
1242 
1243 	if (fp->type & QEDE_FASTPATH_TX)
1244 		qede_free_mem_txq(edev, fp->txq);
1245 }
1246 
1247 /* This function allocates all memory needed for a single fp (i.e. an entity
1248  * which contains status block, one rx queue and/or multiple per-TC tx queues.
1249  */
1250 static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
1251 {
1252 	int rc = 0;
1253 
1254 	rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id);
1255 	if (rc)
1256 		goto out;
1257 
1258 	if (fp->type & QEDE_FASTPATH_RX) {
1259 		rc = qede_alloc_mem_rxq(edev, fp->rxq);
1260 		if (rc)
1261 			goto out;
1262 	}
1263 
1264 	if (fp->type & QEDE_FASTPATH_XDP) {
1265 		rc = qede_alloc_mem_txq(edev, fp->xdp_tx);
1266 		if (rc)
1267 			goto out;
1268 	}
1269 
1270 	if (fp->type & QEDE_FASTPATH_TX) {
1271 		rc = qede_alloc_mem_txq(edev, fp->txq);
1272 		if (rc)
1273 			goto out;
1274 	}
1275 
1276 out:
1277 	return rc;
1278 }
1279 
1280 static void qede_free_mem_load(struct qede_dev *edev)
1281 {
1282 	int i;
1283 
1284 	for_each_queue(i) {
1285 		struct qede_fastpath *fp = &edev->fp_array[i];
1286 
1287 		qede_free_mem_fp(edev, fp);
1288 	}
1289 }
1290 
1291 /* This function allocates all qede memory at NIC load. */
1292 static int qede_alloc_mem_load(struct qede_dev *edev)
1293 {
1294 	int rc = 0, queue_id;
1295 
1296 	for (queue_id = 0; queue_id < QEDE_QUEUE_CNT(edev); queue_id++) {
1297 		struct qede_fastpath *fp = &edev->fp_array[queue_id];
1298 
1299 		rc = qede_alloc_mem_fp(edev, fp);
1300 		if (rc) {
1301 			DP_ERR(edev,
1302 			       "Failed to allocate memory for fastpath - rss id = %d\n",
1303 			       queue_id);
1304 			qede_free_mem_load(edev);
1305 			return rc;
1306 		}
1307 	}
1308 
1309 	return 0;
1310 }
1311 
1312 /* This function inits fp content and resets the SB, RXQ and TXQ structures */
1313 static void qede_init_fp(struct qede_dev *edev)
1314 {
1315 	int queue_id, rxq_index = 0, txq_index = 0;
1316 	struct qede_fastpath *fp;
1317 
1318 	for_each_queue(queue_id) {
1319 		fp = &edev->fp_array[queue_id];
1320 
1321 		fp->edev = edev;
1322 		fp->id = queue_id;
1323 
1324 		if (fp->type & QEDE_FASTPATH_XDP) {
1325 			fp->xdp_tx->index = QEDE_TXQ_IDX_TO_XDP(edev,
1326 								rxq_index);
1327 			fp->xdp_tx->is_xdp = 1;
1328 		}
1329 
1330 		if (fp->type & QEDE_FASTPATH_RX) {
1331 			fp->rxq->rxq_id = rxq_index++;
1332 
1333 			/* Determine how to map buffers for this queue */
1334 			if (fp->type & QEDE_FASTPATH_XDP)
1335 				fp->rxq->data_direction = DMA_BIDIRECTIONAL;
1336 			else
1337 				fp->rxq->data_direction = DMA_FROM_DEVICE;
1338 			fp->rxq->dev = &edev->pdev->dev;
1339 		}
1340 
1341 		if (fp->type & QEDE_FASTPATH_TX) {
1342 			fp->txq->index = txq_index++;
1343 			if (edev->dev_info.is_legacy)
1344 				fp->txq->is_legacy = 1;
1345 			fp->txq->dev = &edev->pdev->dev;
1346 		}
1347 
1348 		snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1349 			 edev->ndev->name, queue_id);
1350 	}
1351 
1352 	edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO);
1353 }
1354 
1355 static int qede_set_real_num_queues(struct qede_dev *edev)
1356 {
1357 	int rc = 0;
1358 
1359 	rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_COUNT(edev));
1360 	if (rc) {
1361 		DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
1362 		return rc;
1363 	}
1364 
1365 	rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_COUNT(edev));
1366 	if (rc) {
1367 		DP_NOTICE(edev, "Failed to set real number of Rx queues\n");
1368 		return rc;
1369 	}
1370 
1371 	return 0;
1372 }
1373 
1374 static void qede_napi_disable_remove(struct qede_dev *edev)
1375 {
1376 	int i;
1377 
1378 	for_each_queue(i) {
1379 		napi_disable(&edev->fp_array[i].napi);
1380 
1381 		netif_napi_del(&edev->fp_array[i].napi);
1382 	}
1383 }
1384 
1385 static void qede_napi_add_enable(struct qede_dev *edev)
1386 {
1387 	int i;
1388 
1389 	/* Add NAPI objects */
1390 	for_each_queue(i) {
1391 		netif_napi_add(edev->ndev, &edev->fp_array[i].napi,
1392 			       qede_poll, NAPI_POLL_WEIGHT);
1393 		napi_enable(&edev->fp_array[i].napi);
1394 	}
1395 }
1396 
1397 static void qede_sync_free_irqs(struct qede_dev *edev)
1398 {
1399 	int i;
1400 
1401 	for (i = 0; i < edev->int_info.used_cnt; i++) {
1402 		if (edev->int_info.msix_cnt) {
1403 			synchronize_irq(edev->int_info.msix[i].vector);
1404 			free_irq(edev->int_info.msix[i].vector,
1405 				 &edev->fp_array[i]);
1406 		} else {
1407 			edev->ops->common->simd_handler_clean(edev->cdev, i);
1408 		}
1409 	}
1410 
1411 	edev->int_info.used_cnt = 0;
1412 }
1413 
1414 static int qede_req_msix_irqs(struct qede_dev *edev)
1415 {
1416 	int i, rc;
1417 
1418 	/* Sanitize number of interrupts == number of prepared RSS queues */
1419 	if (QEDE_QUEUE_CNT(edev) > edev->int_info.msix_cnt) {
1420 		DP_ERR(edev,
1421 		       "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
1422 		       QEDE_QUEUE_CNT(edev), edev->int_info.msix_cnt);
1423 		return -EINVAL;
1424 	}
1425 
1426 	for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
1427 		rc = request_irq(edev->int_info.msix[i].vector,
1428 				 qede_msix_fp_int, 0, edev->fp_array[i].name,
1429 				 &edev->fp_array[i]);
1430 		if (rc) {
1431 			DP_ERR(edev, "Request fp %d irq failed\n", i);
1432 			qede_sync_free_irqs(edev);
1433 			return rc;
1434 		}
1435 		DP_VERBOSE(edev, NETIF_MSG_INTR,
1436 			   "Requested fp irq for %s [entry %d]. Cookie is at %p\n",
1437 			   edev->fp_array[i].name, i,
1438 			   &edev->fp_array[i]);
1439 		edev->int_info.used_cnt++;
1440 	}
1441 
1442 	return 0;
1443 }
1444 
1445 static void qede_simd_fp_handler(void *cookie)
1446 {
1447 	struct qede_fastpath *fp = (struct qede_fastpath *)cookie;
1448 
1449 	napi_schedule_irqoff(&fp->napi);
1450 }
1451 
1452 static int qede_setup_irqs(struct qede_dev *edev)
1453 {
1454 	int i, rc = 0;
1455 
1456 	/* Learn Interrupt configuration */
1457 	rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info);
1458 	if (rc)
1459 		return rc;
1460 
1461 	if (edev->int_info.msix_cnt) {
1462 		rc = qede_req_msix_irqs(edev);
1463 		if (rc)
1464 			return rc;
1465 		edev->ndev->irq = edev->int_info.msix[0].vector;
1466 	} else {
1467 		const struct qed_common_ops *ops;
1468 
1469 		/* qed should learn receive the RSS ids and callbacks */
1470 		ops = edev->ops->common;
1471 		for (i = 0; i < QEDE_QUEUE_CNT(edev); i++)
1472 			ops->simd_handler_config(edev->cdev,
1473 						 &edev->fp_array[i], i,
1474 						 qede_simd_fp_handler);
1475 		edev->int_info.used_cnt = QEDE_QUEUE_CNT(edev);
1476 	}
1477 	return 0;
1478 }
1479 
1480 static int qede_drain_txq(struct qede_dev *edev,
1481 			  struct qede_tx_queue *txq, bool allow_drain)
1482 {
1483 	int rc, cnt = 1000;
1484 
1485 	while (txq->sw_tx_cons != txq->sw_tx_prod) {
1486 		if (!cnt) {
1487 			if (allow_drain) {
1488 				DP_NOTICE(edev,
1489 					  "Tx queue[%d] is stuck, requesting MCP to drain\n",
1490 					  txq->index);
1491 				rc = edev->ops->common->drain(edev->cdev);
1492 				if (rc)
1493 					return rc;
1494 				return qede_drain_txq(edev, txq, false);
1495 			}
1496 			DP_NOTICE(edev,
1497 				  "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n",
1498 				  txq->index, txq->sw_tx_prod,
1499 				  txq->sw_tx_cons);
1500 			return -ENODEV;
1501 		}
1502 		cnt--;
1503 		usleep_range(1000, 2000);
1504 		barrier();
1505 	}
1506 
1507 	/* FW finished processing, wait for HW to transmit all tx packets */
1508 	usleep_range(1000, 2000);
1509 
1510 	return 0;
1511 }
1512 
1513 static int qede_stop_txq(struct qede_dev *edev,
1514 			 struct qede_tx_queue *txq, int rss_id)
1515 {
1516 	return edev->ops->q_tx_stop(edev->cdev, rss_id, txq->handle);
1517 }
1518 
1519 static int qede_stop_queues(struct qede_dev *edev)
1520 {
1521 	struct qed_update_vport_params *vport_update_params;
1522 	struct qed_dev *cdev = edev->cdev;
1523 	struct qede_fastpath *fp;
1524 	int rc, i;
1525 
1526 	/* Disable the vport */
1527 	vport_update_params = vzalloc(sizeof(*vport_update_params));
1528 	if (!vport_update_params)
1529 		return -ENOMEM;
1530 
1531 	vport_update_params->vport_id = 0;
1532 	vport_update_params->update_vport_active_flg = 1;
1533 	vport_update_params->vport_active_flg = 0;
1534 	vport_update_params->update_rss_flg = 0;
1535 
1536 	rc = edev->ops->vport_update(cdev, vport_update_params);
1537 	vfree(vport_update_params);
1538 
1539 	if (rc) {
1540 		DP_ERR(edev, "Failed to update vport\n");
1541 		return rc;
1542 	}
1543 
1544 	/* Flush Tx queues. If needed, request drain from MCP */
1545 	for_each_queue(i) {
1546 		fp = &edev->fp_array[i];
1547 
1548 		if (fp->type & QEDE_FASTPATH_TX) {
1549 			rc = qede_drain_txq(edev, fp->txq, true);
1550 			if (rc)
1551 				return rc;
1552 		}
1553 
1554 		if (fp->type & QEDE_FASTPATH_XDP) {
1555 			rc = qede_drain_txq(edev, fp->xdp_tx, true);
1556 			if (rc)
1557 				return rc;
1558 		}
1559 	}
1560 
1561 	/* Stop all Queues in reverse order */
1562 	for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) {
1563 		fp = &edev->fp_array[i];
1564 
1565 		/* Stop the Tx Queue(s) */
1566 		if (fp->type & QEDE_FASTPATH_TX) {
1567 			rc = qede_stop_txq(edev, fp->txq, i);
1568 			if (rc)
1569 				return rc;
1570 		}
1571 
1572 		/* Stop the Rx Queue */
1573 		if (fp->type & QEDE_FASTPATH_RX) {
1574 			rc = edev->ops->q_rx_stop(cdev, i, fp->rxq->handle);
1575 			if (rc) {
1576 				DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
1577 				return rc;
1578 			}
1579 		}
1580 
1581 		/* Stop the XDP forwarding queue */
1582 		if (fp->type & QEDE_FASTPATH_XDP) {
1583 			rc = qede_stop_txq(edev, fp->xdp_tx, i);
1584 			if (rc)
1585 				return rc;
1586 
1587 			bpf_prog_put(fp->rxq->xdp_prog);
1588 		}
1589 	}
1590 
1591 	/* Stop the vport */
1592 	rc = edev->ops->vport_stop(cdev, 0);
1593 	if (rc)
1594 		DP_ERR(edev, "Failed to stop VPORT\n");
1595 
1596 	return rc;
1597 }
1598 
1599 static int qede_start_txq(struct qede_dev *edev,
1600 			  struct qede_fastpath *fp,
1601 			  struct qede_tx_queue *txq, u8 rss_id, u16 sb_idx)
1602 {
1603 	dma_addr_t phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl);
1604 	u32 page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl);
1605 	struct qed_queue_start_common_params params;
1606 	struct qed_txq_start_ret_params ret_params;
1607 	int rc;
1608 
1609 	memset(&params, 0, sizeof(params));
1610 	memset(&ret_params, 0, sizeof(ret_params));
1611 
1612 	/* Let the XDP queue share the queue-zone with one of the regular txq.
1613 	 * We don't really care about its coalescing.
1614 	 */
1615 	if (txq->is_xdp)
1616 		params.queue_id = QEDE_TXQ_XDP_TO_IDX(edev, txq);
1617 	else
1618 		params.queue_id = txq->index;
1619 
1620 	params.sb = fp->sb_info->igu_sb_id;
1621 	params.sb_idx = sb_idx;
1622 
1623 	rc = edev->ops->q_tx_start(edev->cdev, rss_id, &params, phys_table,
1624 				   page_cnt, &ret_params);
1625 	if (rc) {
1626 		DP_ERR(edev, "Start TXQ #%d failed %d\n", txq->index, rc);
1627 		return rc;
1628 	}
1629 
1630 	txq->doorbell_addr = ret_params.p_doorbell;
1631 	txq->handle = ret_params.p_handle;
1632 
1633 	/* Determine the FW consumer address associated */
1634 	txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[sb_idx];
1635 
1636 	/* Prepare the doorbell parameters */
1637 	SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST, DB_DEST_XCM);
1638 	SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
1639 	SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_VAL_SEL,
1640 		  DQ_XCM_ETH_TX_BD_PROD_CMD);
1641 	txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
1642 
1643 	return rc;
1644 }
1645 
1646 static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
1647 {
1648 	int vlan_removal_en = 1;
1649 	struct qed_dev *cdev = edev->cdev;
1650 	struct qed_dev_info *qed_info = &edev->dev_info.common;
1651 	struct qed_update_vport_params *vport_update_params;
1652 	struct qed_queue_start_common_params q_params;
1653 	struct qed_start_vport_params start = {0};
1654 	int rc, i;
1655 
1656 	if (!edev->num_queues) {
1657 		DP_ERR(edev,
1658 		       "Cannot update V-VPORT as active as there are no Rx queues\n");
1659 		return -EINVAL;
1660 	}
1661 
1662 	vport_update_params = vzalloc(sizeof(*vport_update_params));
1663 	if (!vport_update_params)
1664 		return -ENOMEM;
1665 
1666 	start.gro_enable = !edev->gro_disable;
1667 	start.mtu = edev->ndev->mtu;
1668 	start.vport_id = 0;
1669 	start.drop_ttl0 = true;
1670 	start.remove_inner_vlan = vlan_removal_en;
1671 	start.clear_stats = clear_stats;
1672 
1673 	rc = edev->ops->vport_start(cdev, &start);
1674 
1675 	if (rc) {
1676 		DP_ERR(edev, "Start V-PORT failed %d\n", rc);
1677 		goto out;
1678 	}
1679 
1680 	DP_VERBOSE(edev, NETIF_MSG_IFUP,
1681 		   "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
1682 		   start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
1683 
1684 	for_each_queue(i) {
1685 		struct qede_fastpath *fp = &edev->fp_array[i];
1686 		dma_addr_t p_phys_table;
1687 		u32 page_cnt;
1688 
1689 		if (fp->type & QEDE_FASTPATH_RX) {
1690 			struct qed_rxq_start_ret_params ret_params;
1691 			struct qede_rx_queue *rxq = fp->rxq;
1692 			__le16 *val;
1693 
1694 			memset(&ret_params, 0, sizeof(ret_params));
1695 			memset(&q_params, 0, sizeof(q_params));
1696 			q_params.queue_id = rxq->rxq_id;
1697 			q_params.vport_id = 0;
1698 			q_params.sb = fp->sb_info->igu_sb_id;
1699 			q_params.sb_idx = RX_PI;
1700 
1701 			p_phys_table =
1702 			    qed_chain_get_pbl_phys(&rxq->rx_comp_ring);
1703 			page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring);
1704 
1705 			rc = edev->ops->q_rx_start(cdev, i, &q_params,
1706 						   rxq->rx_buf_size,
1707 						   rxq->rx_bd_ring.p_phys_addr,
1708 						   p_phys_table,
1709 						   page_cnt, &ret_params);
1710 			if (rc) {
1711 				DP_ERR(edev, "Start RXQ #%d failed %d\n", i,
1712 				       rc);
1713 				goto out;
1714 			}
1715 
1716 			/* Use the return parameters */
1717 			rxq->hw_rxq_prod_addr = ret_params.p_prod;
1718 			rxq->handle = ret_params.p_handle;
1719 
1720 			val = &fp->sb_info->sb_virt->pi_array[RX_PI];
1721 			rxq->hw_cons_ptr = val;
1722 
1723 			qede_update_rx_prod(edev, rxq);
1724 		}
1725 
1726 		if (fp->type & QEDE_FASTPATH_XDP) {
1727 			rc = qede_start_txq(edev, fp, fp->xdp_tx, i, XDP_PI);
1728 			if (rc)
1729 				goto out;
1730 
1731 			fp->rxq->xdp_prog = bpf_prog_add(edev->xdp_prog, 1);
1732 			if (IS_ERR(fp->rxq->xdp_prog)) {
1733 				rc = PTR_ERR(fp->rxq->xdp_prog);
1734 				fp->rxq->xdp_prog = NULL;
1735 				goto out;
1736 			}
1737 		}
1738 
1739 		if (fp->type & QEDE_FASTPATH_TX) {
1740 			rc = qede_start_txq(edev, fp, fp->txq, i, TX_PI(0));
1741 			if (rc)
1742 				goto out;
1743 		}
1744 	}
1745 
1746 	/* Prepare and send the vport enable */
1747 	vport_update_params->vport_id = start.vport_id;
1748 	vport_update_params->update_vport_active_flg = 1;
1749 	vport_update_params->vport_active_flg = 1;
1750 
1751 	if ((qed_info->mf_mode == QED_MF_NPAR || pci_num_vf(edev->pdev)) &&
1752 	    qed_info->tx_switching) {
1753 		vport_update_params->update_tx_switching_flg = 1;
1754 		vport_update_params->tx_switching_flg = 1;
1755 	}
1756 
1757 	qede_fill_rss_params(edev, &vport_update_params->rss_params,
1758 			     &vport_update_params->update_rss_flg);
1759 
1760 	rc = edev->ops->vport_update(cdev, vport_update_params);
1761 	if (rc)
1762 		DP_ERR(edev, "Update V-PORT failed %d\n", rc);
1763 
1764 out:
1765 	vfree(vport_update_params);
1766 	return rc;
1767 }
1768 
1769 enum qede_unload_mode {
1770 	QEDE_UNLOAD_NORMAL,
1771 };
1772 
1773 static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
1774 			bool is_locked)
1775 {
1776 	struct qed_link_params link_params;
1777 	int rc;
1778 
1779 	DP_INFO(edev, "Starting qede unload\n");
1780 
1781 	if (!is_locked)
1782 		__qede_lock(edev);
1783 
1784 	qede_roce_dev_event_close(edev);
1785 	edev->state = QEDE_STATE_CLOSED;
1786 
1787 	/* Close OS Tx */
1788 	netif_tx_disable(edev->ndev);
1789 	netif_carrier_off(edev->ndev);
1790 
1791 	/* Reset the link */
1792 	memset(&link_params, 0, sizeof(link_params));
1793 	link_params.link_up = false;
1794 	edev->ops->common->set_link(edev->cdev, &link_params);
1795 	rc = qede_stop_queues(edev);
1796 	if (rc) {
1797 		qede_sync_free_irqs(edev);
1798 		goto out;
1799 	}
1800 
1801 	DP_INFO(edev, "Stopped Queues\n");
1802 
1803 	qede_vlan_mark_nonconfigured(edev);
1804 	edev->ops->fastpath_stop(edev->cdev);
1805 
1806 	/* Release the interrupts */
1807 	qede_sync_free_irqs(edev);
1808 	edev->ops->common->set_fp_int(edev->cdev, 0);
1809 
1810 	qede_napi_disable_remove(edev);
1811 
1812 	qede_free_mem_load(edev);
1813 	qede_free_fp_array(edev);
1814 
1815 out:
1816 	if (!is_locked)
1817 		__qede_unlock(edev);
1818 	DP_INFO(edev, "Ending qede unload\n");
1819 }
1820 
1821 enum qede_load_mode {
1822 	QEDE_LOAD_NORMAL,
1823 	QEDE_LOAD_RELOAD,
1824 };
1825 
1826 static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
1827 		     bool is_locked)
1828 {
1829 	struct qed_link_params link_params;
1830 	struct qed_link_output link_output;
1831 	int rc;
1832 
1833 	DP_INFO(edev, "Starting qede load\n");
1834 
1835 	if (!is_locked)
1836 		__qede_lock(edev);
1837 
1838 	rc = qede_set_num_queues(edev);
1839 	if (rc)
1840 		goto out;
1841 
1842 	rc = qede_alloc_fp_array(edev);
1843 	if (rc)
1844 		goto out;
1845 
1846 	qede_init_fp(edev);
1847 
1848 	rc = qede_alloc_mem_load(edev);
1849 	if (rc)
1850 		goto err1;
1851 	DP_INFO(edev, "Allocated %d Rx, %d Tx queues\n",
1852 		QEDE_RSS_COUNT(edev), QEDE_TSS_COUNT(edev));
1853 
1854 	rc = qede_set_real_num_queues(edev);
1855 	if (rc)
1856 		goto err2;
1857 
1858 	qede_napi_add_enable(edev);
1859 	DP_INFO(edev, "Napi added and enabled\n");
1860 
1861 	rc = qede_setup_irqs(edev);
1862 	if (rc)
1863 		goto err3;
1864 	DP_INFO(edev, "Setup IRQs succeeded\n");
1865 
1866 	rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD);
1867 	if (rc)
1868 		goto err4;
1869 	DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
1870 
1871 	/* Add primary mac and set Rx filters */
1872 	ether_addr_copy(edev->primary_mac, edev->ndev->dev_addr);
1873 
1874 	/* Program un-configured VLANs */
1875 	qede_configure_vlan_filters(edev);
1876 
1877 	/* Ask for link-up using current configuration */
1878 	memset(&link_params, 0, sizeof(link_params));
1879 	link_params.link_up = true;
1880 	edev->ops->common->set_link(edev->cdev, &link_params);
1881 
1882 	/* Query whether link is already-up */
1883 	memset(&link_output, 0, sizeof(link_output));
1884 	edev->ops->common->get_link(edev->cdev, &link_output);
1885 	qede_roce_dev_event_open(edev);
1886 	qede_link_update(edev, &link_output);
1887 
1888 	edev->state = QEDE_STATE_OPEN;
1889 
1890 	DP_INFO(edev, "Ending successfully qede load\n");
1891 
1892 
1893 	goto out;
1894 err4:
1895 	qede_sync_free_irqs(edev);
1896 	memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info));
1897 err3:
1898 	qede_napi_disable_remove(edev);
1899 err2:
1900 	qede_free_mem_load(edev);
1901 err1:
1902 	edev->ops->common->set_fp_int(edev->cdev, 0);
1903 	qede_free_fp_array(edev);
1904 	edev->num_queues = 0;
1905 	edev->fp_num_tx = 0;
1906 	edev->fp_num_rx = 0;
1907 out:
1908 	if (!is_locked)
1909 		__qede_unlock(edev);
1910 
1911 	return rc;
1912 }
1913 
1914 /* 'func' should be able to run between unload and reload assuming interface
1915  * is actually running, or afterwards in case it's currently DOWN.
1916  */
1917 void qede_reload(struct qede_dev *edev,
1918 		 struct qede_reload_args *args, bool is_locked)
1919 {
1920 	if (!is_locked)
1921 		__qede_lock(edev);
1922 
1923 	/* Since qede_lock is held, internal state wouldn't change even
1924 	 * if netdev state would start transitioning. Check whether current
1925 	 * internal configuration indicates device is up, then reload.
1926 	 */
1927 	if (edev->state == QEDE_STATE_OPEN) {
1928 		qede_unload(edev, QEDE_UNLOAD_NORMAL, true);
1929 		if (args)
1930 			args->func(edev, args);
1931 		qede_load(edev, QEDE_LOAD_RELOAD, true);
1932 
1933 		/* Since no one is going to do it for us, re-configure */
1934 		qede_config_rx_mode(edev->ndev);
1935 	} else if (args) {
1936 		args->func(edev, args);
1937 	}
1938 
1939 	if (!is_locked)
1940 		__qede_unlock(edev);
1941 }
1942 
1943 /* called with rtnl_lock */
1944 static int qede_open(struct net_device *ndev)
1945 {
1946 	struct qede_dev *edev = netdev_priv(ndev);
1947 	int rc;
1948 
1949 	netif_carrier_off(ndev);
1950 
1951 	edev->ops->common->set_power_state(edev->cdev, PCI_D0);
1952 
1953 	rc = qede_load(edev, QEDE_LOAD_NORMAL, false);
1954 	if (rc)
1955 		return rc;
1956 
1957 	udp_tunnel_get_rx_info(ndev);
1958 
1959 	edev->ops->common->update_drv_state(edev->cdev, true);
1960 
1961 	return 0;
1962 }
1963 
1964 static int qede_close(struct net_device *ndev)
1965 {
1966 	struct qede_dev *edev = netdev_priv(ndev);
1967 
1968 	qede_unload(edev, QEDE_UNLOAD_NORMAL, false);
1969 
1970 	edev->ops->common->update_drv_state(edev->cdev, false);
1971 
1972 	return 0;
1973 }
1974 
1975 static void qede_link_update(void *dev, struct qed_link_output *link)
1976 {
1977 	struct qede_dev *edev = dev;
1978 
1979 	if (!netif_running(edev->ndev)) {
1980 		DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not running\n");
1981 		return;
1982 	}
1983 
1984 	if (link->link_up) {
1985 		if (!netif_carrier_ok(edev->ndev)) {
1986 			DP_NOTICE(edev, "Link is up\n");
1987 			netif_tx_start_all_queues(edev->ndev);
1988 			netif_carrier_on(edev->ndev);
1989 		}
1990 	} else {
1991 		if (netif_carrier_ok(edev->ndev)) {
1992 			DP_NOTICE(edev, "Link is down\n");
1993 			netif_tx_disable(edev->ndev);
1994 			netif_carrier_off(edev->ndev);
1995 		}
1996 	}
1997 }
1998