1 /* QLogic qede NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/module.h>
33 #include <linux/pci.h>
34 #include <linux/version.h>
35 #include <linux/device.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/skbuff.h>
39 #include <linux/errno.h>
40 #include <linux/list.h>
41 #include <linux/string.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/interrupt.h>
44 #include <asm/byteorder.h>
45 #include <asm/param.h>
46 #include <linux/io.h>
47 #include <linux/netdev_features.h>
48 #include <linux/udp.h>
49 #include <linux/tcp.h>
50 #include <net/udp_tunnel.h>
51 #include <linux/ip.h>
52 #include <net/ipv6.h>
53 #include <net/tcp.h>
54 #include <linux/if_ether.h>
55 #include <linux/if_vlan.h>
56 #include <linux/pkt_sched.h>
57 #include <linux/ethtool.h>
58 #include <linux/in.h>
59 #include <linux/random.h>
60 #include <net/ip6_checksum.h>
61 #include <linux/bitops.h>
62 #include <linux/vmalloc.h>
63 #include "qede.h"
64 #include "qede_ptp.h"
65 
66 static char version[] =
67 	"QLogic FastLinQ 4xxxx Ethernet Driver qede " DRV_MODULE_VERSION "\n";
68 
69 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION);
72 
73 static uint debug;
74 module_param(debug, uint, 0);
75 MODULE_PARM_DESC(debug, " Default debug msglevel");
76 
77 static const struct qed_eth_ops *qed_ops;
78 
79 #define CHIP_NUM_57980S_40		0x1634
80 #define CHIP_NUM_57980S_10		0x1666
81 #define CHIP_NUM_57980S_MF		0x1636
82 #define CHIP_NUM_57980S_100		0x1644
83 #define CHIP_NUM_57980S_50		0x1654
84 #define CHIP_NUM_57980S_25		0x1656
85 #define CHIP_NUM_57980S_IOV		0x1664
86 #define CHIP_NUM_AH			0x8070
87 #define CHIP_NUM_AH_IOV			0x8090
88 
89 #ifndef PCI_DEVICE_ID_NX2_57980E
90 #define PCI_DEVICE_ID_57980S_40		CHIP_NUM_57980S_40
91 #define PCI_DEVICE_ID_57980S_10		CHIP_NUM_57980S_10
92 #define PCI_DEVICE_ID_57980S_MF		CHIP_NUM_57980S_MF
93 #define PCI_DEVICE_ID_57980S_100	CHIP_NUM_57980S_100
94 #define PCI_DEVICE_ID_57980S_50		CHIP_NUM_57980S_50
95 #define PCI_DEVICE_ID_57980S_25		CHIP_NUM_57980S_25
96 #define PCI_DEVICE_ID_57980S_IOV	CHIP_NUM_57980S_IOV
97 #define PCI_DEVICE_ID_AH		CHIP_NUM_AH
98 #define PCI_DEVICE_ID_AH_IOV		CHIP_NUM_AH_IOV
99 
100 #endif
101 
102 enum qede_pci_private {
103 	QEDE_PRIVATE_PF,
104 	QEDE_PRIVATE_VF
105 };
106 
107 static const struct pci_device_id qede_pci_tbl[] = {
108 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF},
109 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF},
110 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF},
111 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
112 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
113 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
114 #ifdef CONFIG_QED_SRIOV
115 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
116 #endif
117 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH), QEDE_PRIVATE_PF},
118 #ifdef CONFIG_QED_SRIOV
119 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH_IOV), QEDE_PRIVATE_VF},
120 #endif
121 	{ 0 }
122 };
123 
124 MODULE_DEVICE_TABLE(pci, qede_pci_tbl);
125 
126 static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
127 
128 #define TX_TIMEOUT		(5 * HZ)
129 
130 /* Utilize last protocol index for XDP */
131 #define XDP_PI	11
132 
133 static void qede_remove(struct pci_dev *pdev);
134 static void qede_shutdown(struct pci_dev *pdev);
135 static void qede_link_update(void *dev, struct qed_link_output *link);
136 
137 /* The qede lock is used to protect driver state change and driver flows that
138  * are not reentrant.
139  */
140 void __qede_lock(struct qede_dev *edev)
141 {
142 	mutex_lock(&edev->qede_lock);
143 }
144 
145 void __qede_unlock(struct qede_dev *edev)
146 {
147 	mutex_unlock(&edev->qede_lock);
148 }
149 
150 #ifdef CONFIG_QED_SRIOV
151 static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos,
152 			    __be16 vlan_proto)
153 {
154 	struct qede_dev *edev = netdev_priv(ndev);
155 
156 	if (vlan > 4095) {
157 		DP_NOTICE(edev, "Illegal vlan value %d\n", vlan);
158 		return -EINVAL;
159 	}
160 
161 	if (vlan_proto != htons(ETH_P_8021Q))
162 		return -EPROTONOSUPPORT;
163 
164 	DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n",
165 		   vlan, vf);
166 
167 	return edev->ops->iov->set_vlan(edev->cdev, vlan, vf);
168 }
169 
170 static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac)
171 {
172 	struct qede_dev *edev = netdev_priv(ndev);
173 
174 	DP_VERBOSE(edev, QED_MSG_IOV,
175 		   "Setting MAC %02x:%02x:%02x:%02x:%02x:%02x to VF [%d]\n",
176 		   mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], vfidx);
177 
178 	if (!is_valid_ether_addr(mac)) {
179 		DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n");
180 		return -EINVAL;
181 	}
182 
183 	return edev->ops->iov->set_mac(edev->cdev, mac, vfidx);
184 }
185 
186 static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
187 {
188 	struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev));
189 	struct qed_dev_info *qed_info = &edev->dev_info.common;
190 	struct qed_update_vport_params *vport_params;
191 	int rc;
192 
193 	vport_params = vzalloc(sizeof(*vport_params));
194 	if (!vport_params)
195 		return -ENOMEM;
196 	DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param);
197 
198 	rc = edev->ops->iov->configure(edev->cdev, num_vfs_param);
199 
200 	/* Enable/Disable Tx switching for PF */
201 	if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
202 	    qed_info->mf_mode != QED_MF_NPAR && qed_info->tx_switching) {
203 		vport_params->vport_id = 0;
204 		vport_params->update_tx_switching_flg = 1;
205 		vport_params->tx_switching_flg = num_vfs_param ? 1 : 0;
206 		edev->ops->vport_update(edev->cdev, vport_params);
207 	}
208 
209 	vfree(vport_params);
210 	return rc;
211 }
212 #endif
213 
214 static struct pci_driver qede_pci_driver = {
215 	.name = "qede",
216 	.id_table = qede_pci_tbl,
217 	.probe = qede_probe,
218 	.remove = qede_remove,
219 	.shutdown = qede_shutdown,
220 #ifdef CONFIG_QED_SRIOV
221 	.sriov_configure = qede_sriov_configure,
222 #endif
223 };
224 
225 static struct qed_eth_cb_ops qede_ll_ops = {
226 	{
227 #ifdef CONFIG_RFS_ACCEL
228 		.arfs_filter_op = qede_arfs_filter_op,
229 #endif
230 		.link_update = qede_link_update,
231 	},
232 	.force_mac = qede_force_mac,
233 	.ports_update = qede_udp_ports_update,
234 };
235 
236 static int qede_netdev_event(struct notifier_block *this, unsigned long event,
237 			     void *ptr)
238 {
239 	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
240 	struct ethtool_drvinfo drvinfo;
241 	struct qede_dev *edev;
242 
243 	if (event != NETDEV_CHANGENAME && event != NETDEV_CHANGEADDR)
244 		goto done;
245 
246 	/* Check whether this is a qede device */
247 	if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo)
248 		goto done;
249 
250 	memset(&drvinfo, 0, sizeof(drvinfo));
251 	ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo);
252 	if (strcmp(drvinfo.driver, "qede"))
253 		goto done;
254 	edev = netdev_priv(ndev);
255 
256 	switch (event) {
257 	case NETDEV_CHANGENAME:
258 		/* Notify qed of the name change */
259 		if (!edev->ops || !edev->ops->common)
260 			goto done;
261 		edev->ops->common->set_name(edev->cdev, edev->ndev->name);
262 		break;
263 	case NETDEV_CHANGEADDR:
264 		edev = netdev_priv(ndev);
265 		qede_rdma_event_changeaddr(edev);
266 		break;
267 	}
268 
269 done:
270 	return NOTIFY_DONE;
271 }
272 
273 static struct notifier_block qede_netdev_notifier = {
274 	.notifier_call = qede_netdev_event,
275 };
276 
277 static
278 int __init qede_init(void)
279 {
280 	int ret;
281 
282 	pr_info("qede_init: %s\n", version);
283 
284 	qed_ops = qed_get_eth_ops();
285 	if (!qed_ops) {
286 		pr_notice("Failed to get qed ethtool operations\n");
287 		return -EINVAL;
288 	}
289 
290 	/* Must register notifier before pci ops, since we might miss
291 	 * interface rename after pci probe and netdev registeration.
292 	 */
293 	ret = register_netdevice_notifier(&qede_netdev_notifier);
294 	if (ret) {
295 		pr_notice("Failed to register netdevice_notifier\n");
296 		qed_put_eth_ops();
297 		return -EINVAL;
298 	}
299 
300 	ret = pci_register_driver(&qede_pci_driver);
301 	if (ret) {
302 		pr_notice("Failed to register driver\n");
303 		unregister_netdevice_notifier(&qede_netdev_notifier);
304 		qed_put_eth_ops();
305 		return -EINVAL;
306 	}
307 
308 	return 0;
309 }
310 
311 static void __exit qede_cleanup(void)
312 {
313 	if (debug & QED_LOG_INFO_MASK)
314 		pr_info("qede_cleanup called\n");
315 
316 	unregister_netdevice_notifier(&qede_netdev_notifier);
317 	pci_unregister_driver(&qede_pci_driver);
318 	qed_put_eth_ops();
319 }
320 
321 module_init(qede_init);
322 module_exit(qede_cleanup);
323 
324 static int qede_open(struct net_device *ndev);
325 static int qede_close(struct net_device *ndev);
326 
327 void qede_fill_by_demand_stats(struct qede_dev *edev)
328 {
329 	struct qede_stats_common *p_common = &edev->stats.common;
330 	struct qed_eth_stats stats;
331 
332 	edev->ops->get_vport_stats(edev->cdev, &stats);
333 
334 	p_common->no_buff_discards = stats.common.no_buff_discards;
335 	p_common->packet_too_big_discard = stats.common.packet_too_big_discard;
336 	p_common->ttl0_discard = stats.common.ttl0_discard;
337 	p_common->rx_ucast_bytes = stats.common.rx_ucast_bytes;
338 	p_common->rx_mcast_bytes = stats.common.rx_mcast_bytes;
339 	p_common->rx_bcast_bytes = stats.common.rx_bcast_bytes;
340 	p_common->rx_ucast_pkts = stats.common.rx_ucast_pkts;
341 	p_common->rx_mcast_pkts = stats.common.rx_mcast_pkts;
342 	p_common->rx_bcast_pkts = stats.common.rx_bcast_pkts;
343 	p_common->mftag_filter_discards = stats.common.mftag_filter_discards;
344 	p_common->mac_filter_discards = stats.common.mac_filter_discards;
345 
346 	p_common->tx_ucast_bytes = stats.common.tx_ucast_bytes;
347 	p_common->tx_mcast_bytes = stats.common.tx_mcast_bytes;
348 	p_common->tx_bcast_bytes = stats.common.tx_bcast_bytes;
349 	p_common->tx_ucast_pkts = stats.common.tx_ucast_pkts;
350 	p_common->tx_mcast_pkts = stats.common.tx_mcast_pkts;
351 	p_common->tx_bcast_pkts = stats.common.tx_bcast_pkts;
352 	p_common->tx_err_drop_pkts = stats.common.tx_err_drop_pkts;
353 	p_common->coalesced_pkts = stats.common.tpa_coalesced_pkts;
354 	p_common->coalesced_events = stats.common.tpa_coalesced_events;
355 	p_common->coalesced_aborts_num = stats.common.tpa_aborts_num;
356 	p_common->non_coalesced_pkts = stats.common.tpa_not_coalesced_pkts;
357 	p_common->coalesced_bytes = stats.common.tpa_coalesced_bytes;
358 
359 	p_common->rx_64_byte_packets = stats.common.rx_64_byte_packets;
360 	p_common->rx_65_to_127_byte_packets =
361 	    stats.common.rx_65_to_127_byte_packets;
362 	p_common->rx_128_to_255_byte_packets =
363 	    stats.common.rx_128_to_255_byte_packets;
364 	p_common->rx_256_to_511_byte_packets =
365 	    stats.common.rx_256_to_511_byte_packets;
366 	p_common->rx_512_to_1023_byte_packets =
367 	    stats.common.rx_512_to_1023_byte_packets;
368 	p_common->rx_1024_to_1518_byte_packets =
369 	    stats.common.rx_1024_to_1518_byte_packets;
370 	p_common->rx_crc_errors = stats.common.rx_crc_errors;
371 	p_common->rx_mac_crtl_frames = stats.common.rx_mac_crtl_frames;
372 	p_common->rx_pause_frames = stats.common.rx_pause_frames;
373 	p_common->rx_pfc_frames = stats.common.rx_pfc_frames;
374 	p_common->rx_align_errors = stats.common.rx_align_errors;
375 	p_common->rx_carrier_errors = stats.common.rx_carrier_errors;
376 	p_common->rx_oversize_packets = stats.common.rx_oversize_packets;
377 	p_common->rx_jabbers = stats.common.rx_jabbers;
378 	p_common->rx_undersize_packets = stats.common.rx_undersize_packets;
379 	p_common->rx_fragments = stats.common.rx_fragments;
380 	p_common->tx_64_byte_packets = stats.common.tx_64_byte_packets;
381 	p_common->tx_65_to_127_byte_packets =
382 	    stats.common.tx_65_to_127_byte_packets;
383 	p_common->tx_128_to_255_byte_packets =
384 	    stats.common.tx_128_to_255_byte_packets;
385 	p_common->tx_256_to_511_byte_packets =
386 	    stats.common.tx_256_to_511_byte_packets;
387 	p_common->tx_512_to_1023_byte_packets =
388 	    stats.common.tx_512_to_1023_byte_packets;
389 	p_common->tx_1024_to_1518_byte_packets =
390 	    stats.common.tx_1024_to_1518_byte_packets;
391 	p_common->tx_pause_frames = stats.common.tx_pause_frames;
392 	p_common->tx_pfc_frames = stats.common.tx_pfc_frames;
393 	p_common->brb_truncates = stats.common.brb_truncates;
394 	p_common->brb_discards = stats.common.brb_discards;
395 	p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames;
396 
397 	if (QEDE_IS_BB(edev)) {
398 		struct qede_stats_bb *p_bb = &edev->stats.bb;
399 
400 		p_bb->rx_1519_to_1522_byte_packets =
401 		    stats.bb.rx_1519_to_1522_byte_packets;
402 		p_bb->rx_1519_to_2047_byte_packets =
403 		    stats.bb.rx_1519_to_2047_byte_packets;
404 		p_bb->rx_2048_to_4095_byte_packets =
405 		    stats.bb.rx_2048_to_4095_byte_packets;
406 		p_bb->rx_4096_to_9216_byte_packets =
407 		    stats.bb.rx_4096_to_9216_byte_packets;
408 		p_bb->rx_9217_to_16383_byte_packets =
409 		    stats.bb.rx_9217_to_16383_byte_packets;
410 		p_bb->tx_1519_to_2047_byte_packets =
411 		    stats.bb.tx_1519_to_2047_byte_packets;
412 		p_bb->tx_2048_to_4095_byte_packets =
413 		    stats.bb.tx_2048_to_4095_byte_packets;
414 		p_bb->tx_4096_to_9216_byte_packets =
415 		    stats.bb.tx_4096_to_9216_byte_packets;
416 		p_bb->tx_9217_to_16383_byte_packets =
417 		    stats.bb.tx_9217_to_16383_byte_packets;
418 		p_bb->tx_lpi_entry_count = stats.bb.tx_lpi_entry_count;
419 		p_bb->tx_total_collisions = stats.bb.tx_total_collisions;
420 	} else {
421 		struct qede_stats_ah *p_ah = &edev->stats.ah;
422 
423 		p_ah->rx_1519_to_max_byte_packets =
424 		    stats.ah.rx_1519_to_max_byte_packets;
425 		p_ah->tx_1519_to_max_byte_packets =
426 		    stats.ah.tx_1519_to_max_byte_packets;
427 	}
428 }
429 
430 static void qede_get_stats64(struct net_device *dev,
431 			     struct rtnl_link_stats64 *stats)
432 {
433 	struct qede_dev *edev = netdev_priv(dev);
434 	struct qede_stats_common *p_common;
435 
436 	qede_fill_by_demand_stats(edev);
437 	p_common = &edev->stats.common;
438 
439 	stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
440 			    p_common->rx_bcast_pkts;
441 	stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
442 			    p_common->tx_bcast_pkts;
443 
444 	stats->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
445 			  p_common->rx_bcast_bytes;
446 	stats->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
447 			  p_common->tx_bcast_bytes;
448 
449 	stats->tx_errors = p_common->tx_err_drop_pkts;
450 	stats->multicast = p_common->rx_mcast_pkts + p_common->rx_bcast_pkts;
451 
452 	stats->rx_fifo_errors = p_common->no_buff_discards;
453 
454 	if (QEDE_IS_BB(edev))
455 		stats->collisions = edev->stats.bb.tx_total_collisions;
456 	stats->rx_crc_errors = p_common->rx_crc_errors;
457 	stats->rx_frame_errors = p_common->rx_align_errors;
458 }
459 
460 #ifdef CONFIG_QED_SRIOV
461 static int qede_get_vf_config(struct net_device *dev, int vfidx,
462 			      struct ifla_vf_info *ivi)
463 {
464 	struct qede_dev *edev = netdev_priv(dev);
465 
466 	if (!edev->ops)
467 		return -EINVAL;
468 
469 	return edev->ops->iov->get_config(edev->cdev, vfidx, ivi);
470 }
471 
472 static int qede_set_vf_rate(struct net_device *dev, int vfidx,
473 			    int min_tx_rate, int max_tx_rate)
474 {
475 	struct qede_dev *edev = netdev_priv(dev);
476 
477 	return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate,
478 					max_tx_rate);
479 }
480 
481 static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val)
482 {
483 	struct qede_dev *edev = netdev_priv(dev);
484 
485 	if (!edev->ops)
486 		return -EINVAL;
487 
488 	return edev->ops->iov->set_spoof(edev->cdev, vfidx, val);
489 }
490 
491 static int qede_set_vf_link_state(struct net_device *dev, int vfidx,
492 				  int link_state)
493 {
494 	struct qede_dev *edev = netdev_priv(dev);
495 
496 	if (!edev->ops)
497 		return -EINVAL;
498 
499 	return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state);
500 }
501 
502 static int qede_set_vf_trust(struct net_device *dev, int vfidx, bool setting)
503 {
504 	struct qede_dev *edev = netdev_priv(dev);
505 
506 	if (!edev->ops)
507 		return -EINVAL;
508 
509 	return edev->ops->iov->set_trust(edev->cdev, vfidx, setting);
510 }
511 #endif
512 
513 static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
514 {
515 	struct qede_dev *edev = netdev_priv(dev);
516 
517 	if (!netif_running(dev))
518 		return -EAGAIN;
519 
520 	switch (cmd) {
521 	case SIOCSHWTSTAMP:
522 		return qede_ptp_hw_ts(edev, ifr);
523 	default:
524 		DP_VERBOSE(edev, QED_MSG_DEBUG,
525 			   "default IOCTL cmd 0x%x\n", cmd);
526 		return -EOPNOTSUPP;
527 	}
528 
529 	return 0;
530 }
531 
532 static const struct net_device_ops qede_netdev_ops = {
533 	.ndo_open = qede_open,
534 	.ndo_stop = qede_close,
535 	.ndo_start_xmit = qede_start_xmit,
536 	.ndo_set_rx_mode = qede_set_rx_mode,
537 	.ndo_set_mac_address = qede_set_mac_addr,
538 	.ndo_validate_addr = eth_validate_addr,
539 	.ndo_change_mtu = qede_change_mtu,
540 	.ndo_do_ioctl = qede_ioctl,
541 #ifdef CONFIG_QED_SRIOV
542 	.ndo_set_vf_mac = qede_set_vf_mac,
543 	.ndo_set_vf_vlan = qede_set_vf_vlan,
544 	.ndo_set_vf_trust = qede_set_vf_trust,
545 #endif
546 	.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
547 	.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
548 	.ndo_set_features = qede_set_features,
549 	.ndo_get_stats64 = qede_get_stats64,
550 #ifdef CONFIG_QED_SRIOV
551 	.ndo_set_vf_link_state = qede_set_vf_link_state,
552 	.ndo_set_vf_spoofchk = qede_set_vf_spoofchk,
553 	.ndo_get_vf_config = qede_get_vf_config,
554 	.ndo_set_vf_rate = qede_set_vf_rate,
555 #endif
556 	.ndo_udp_tunnel_add = qede_udp_tunnel_add,
557 	.ndo_udp_tunnel_del = qede_udp_tunnel_del,
558 	.ndo_features_check = qede_features_check,
559 	.ndo_xdp = qede_xdp,
560 #ifdef CONFIG_RFS_ACCEL
561 	.ndo_rx_flow_steer = qede_rx_flow_steer,
562 #endif
563 };
564 
565 static const struct net_device_ops qede_netdev_vf_ops = {
566 	.ndo_open = qede_open,
567 	.ndo_stop = qede_close,
568 	.ndo_start_xmit = qede_start_xmit,
569 	.ndo_set_rx_mode = qede_set_rx_mode,
570 	.ndo_set_mac_address = qede_set_mac_addr,
571 	.ndo_validate_addr = eth_validate_addr,
572 	.ndo_change_mtu = qede_change_mtu,
573 	.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
574 	.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
575 	.ndo_set_features = qede_set_features,
576 	.ndo_get_stats64 = qede_get_stats64,
577 	.ndo_udp_tunnel_add = qede_udp_tunnel_add,
578 	.ndo_udp_tunnel_del = qede_udp_tunnel_del,
579 	.ndo_features_check = qede_features_check,
580 };
581 
582 static const struct net_device_ops qede_netdev_vf_xdp_ops = {
583 	.ndo_open = qede_open,
584 	.ndo_stop = qede_close,
585 	.ndo_start_xmit = qede_start_xmit,
586 	.ndo_set_rx_mode = qede_set_rx_mode,
587 	.ndo_set_mac_address = qede_set_mac_addr,
588 	.ndo_validate_addr = eth_validate_addr,
589 	.ndo_change_mtu = qede_change_mtu,
590 	.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
591 	.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
592 	.ndo_set_features = qede_set_features,
593 	.ndo_get_stats64 = qede_get_stats64,
594 	.ndo_udp_tunnel_add = qede_udp_tunnel_add,
595 	.ndo_udp_tunnel_del = qede_udp_tunnel_del,
596 	.ndo_features_check = qede_features_check,
597 	.ndo_xdp = qede_xdp,
598 };
599 
600 /* -------------------------------------------------------------------------
601  * START OF PROBE / REMOVE
602  * -------------------------------------------------------------------------
603  */
604 
605 static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
606 					    struct pci_dev *pdev,
607 					    struct qed_dev_eth_info *info,
608 					    u32 dp_module, u8 dp_level)
609 {
610 	struct net_device *ndev;
611 	struct qede_dev *edev;
612 
613 	ndev = alloc_etherdev_mqs(sizeof(*edev),
614 				  info->num_queues, info->num_queues);
615 	if (!ndev) {
616 		pr_err("etherdev allocation failed\n");
617 		return NULL;
618 	}
619 
620 	edev = netdev_priv(ndev);
621 	edev->ndev = ndev;
622 	edev->cdev = cdev;
623 	edev->pdev = pdev;
624 	edev->dp_module = dp_module;
625 	edev->dp_level = dp_level;
626 	edev->ops = qed_ops;
627 	edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
628 	edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
629 
630 	DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n",
631 		info->num_queues, info->num_queues);
632 
633 	SET_NETDEV_DEV(ndev, &pdev->dev);
634 
635 	memset(&edev->stats, 0, sizeof(edev->stats));
636 	memcpy(&edev->dev_info, info, sizeof(*info));
637 
638 	/* As ethtool doesn't have the ability to show WoL behavior as
639 	 * 'default', if device supports it declare it's enabled.
640 	 */
641 	if (edev->dev_info.common.wol_support)
642 		edev->wol_enabled = true;
643 
644 	INIT_LIST_HEAD(&edev->vlan_list);
645 
646 	return edev;
647 }
648 
649 static void qede_init_ndev(struct qede_dev *edev)
650 {
651 	struct net_device *ndev = edev->ndev;
652 	struct pci_dev *pdev = edev->pdev;
653 	bool udp_tunnel_enable = false;
654 	netdev_features_t hw_features;
655 
656 	pci_set_drvdata(pdev, ndev);
657 
658 	ndev->mem_start = edev->dev_info.common.pci_mem_start;
659 	ndev->base_addr = ndev->mem_start;
660 	ndev->mem_end = edev->dev_info.common.pci_mem_end;
661 	ndev->irq = edev->dev_info.common.pci_irq;
662 
663 	ndev->watchdog_timeo = TX_TIMEOUT;
664 
665 	if (IS_VF(edev)) {
666 		if (edev->dev_info.xdp_supported)
667 			ndev->netdev_ops = &qede_netdev_vf_xdp_ops;
668 		else
669 			ndev->netdev_ops = &qede_netdev_vf_ops;
670 	} else {
671 		ndev->netdev_ops = &qede_netdev_ops;
672 	}
673 
674 	qede_set_ethtool_ops(ndev);
675 
676 	ndev->priv_flags |= IFF_UNICAST_FLT;
677 
678 	/* user-changeble features */
679 	hw_features = NETIF_F_GRO | NETIF_F_SG |
680 		      NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
681 		      NETIF_F_TSO | NETIF_F_TSO6;
682 
683 	if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1)
684 		hw_features |= NETIF_F_NTUPLE;
685 
686 	if (edev->dev_info.common.vxlan_enable ||
687 	    edev->dev_info.common.geneve_enable)
688 		udp_tunnel_enable = true;
689 
690 	if (udp_tunnel_enable || edev->dev_info.common.gre_enable) {
691 		hw_features |= NETIF_F_TSO_ECN;
692 		ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
693 					NETIF_F_SG | NETIF_F_TSO |
694 					NETIF_F_TSO_ECN | NETIF_F_TSO6 |
695 					NETIF_F_RXCSUM;
696 	}
697 
698 	if (udp_tunnel_enable) {
699 		hw_features |= (NETIF_F_GSO_UDP_TUNNEL |
700 				NETIF_F_GSO_UDP_TUNNEL_CSUM);
701 		ndev->hw_enc_features |= (NETIF_F_GSO_UDP_TUNNEL |
702 					  NETIF_F_GSO_UDP_TUNNEL_CSUM);
703 	}
704 
705 	if (edev->dev_info.common.gre_enable) {
706 		hw_features |= (NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM);
707 		ndev->hw_enc_features |= (NETIF_F_GSO_GRE |
708 					  NETIF_F_GSO_GRE_CSUM);
709 	}
710 
711 	ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
712 			      NETIF_F_HIGHDMA;
713 	ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
714 			 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA |
715 			 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX;
716 
717 	ndev->hw_features = hw_features;
718 
719 	/* MTU range: 46 - 9600 */
720 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
721 	ndev->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE;
722 
723 	/* Set network device HW mac */
724 	ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac);
725 
726 	ndev->mtu = edev->dev_info.common.mtu;
727 }
728 
729 /* This function converts from 32b param to two params of level and module
730  * Input 32b decoding:
731  * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the
732  * 'happy' flow, e.g. memory allocation failed.
733  * b30 - enable all INFO prints. INFO prints are for major steps in the flow
734  * and provide important parameters.
735  * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that
736  * module. VERBOSE prints are for tracking the specific flow in low level.
737  *
738  * Notice that the level should be that of the lowest required logs.
739  */
740 void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level)
741 {
742 	*p_dp_level = QED_LEVEL_NOTICE;
743 	*p_dp_module = 0;
744 
745 	if (debug & QED_LOG_VERBOSE_MASK) {
746 		*p_dp_level = QED_LEVEL_VERBOSE;
747 		*p_dp_module = (debug & 0x3FFFFFFF);
748 	} else if (debug & QED_LOG_INFO_MASK) {
749 		*p_dp_level = QED_LEVEL_INFO;
750 	} else if (debug & QED_LOG_NOTICE_MASK) {
751 		*p_dp_level = QED_LEVEL_NOTICE;
752 	}
753 }
754 
755 static void qede_free_fp_array(struct qede_dev *edev)
756 {
757 	if (edev->fp_array) {
758 		struct qede_fastpath *fp;
759 		int i;
760 
761 		for_each_queue(i) {
762 			fp = &edev->fp_array[i];
763 
764 			kfree(fp->sb_info);
765 			kfree(fp->rxq);
766 			kfree(fp->xdp_tx);
767 			kfree(fp->txq);
768 		}
769 		kfree(edev->fp_array);
770 	}
771 
772 	edev->num_queues = 0;
773 	edev->fp_num_tx = 0;
774 	edev->fp_num_rx = 0;
775 }
776 
777 static int qede_alloc_fp_array(struct qede_dev *edev)
778 {
779 	u8 fp_combined, fp_rx = edev->fp_num_rx;
780 	struct qede_fastpath *fp;
781 	int i;
782 
783 	edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev),
784 				 sizeof(*edev->fp_array), GFP_KERNEL);
785 	if (!edev->fp_array) {
786 		DP_NOTICE(edev, "fp array allocation failed\n");
787 		goto err;
788 	}
789 
790 	fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx;
791 
792 	/* Allocate the FP elements for Rx queues followed by combined and then
793 	 * the Tx. This ordering should be maintained so that the respective
794 	 * queues (Rx or Tx) will be together in the fastpath array and the
795 	 * associated ids will be sequential.
796 	 */
797 	for_each_queue(i) {
798 		fp = &edev->fp_array[i];
799 
800 		fp->sb_info = kzalloc(sizeof(*fp->sb_info), GFP_KERNEL);
801 		if (!fp->sb_info) {
802 			DP_NOTICE(edev, "sb info struct allocation failed\n");
803 			goto err;
804 		}
805 
806 		if (fp_rx) {
807 			fp->type = QEDE_FASTPATH_RX;
808 			fp_rx--;
809 		} else if (fp_combined) {
810 			fp->type = QEDE_FASTPATH_COMBINED;
811 			fp_combined--;
812 		} else {
813 			fp->type = QEDE_FASTPATH_TX;
814 		}
815 
816 		if (fp->type & QEDE_FASTPATH_TX) {
817 			fp->txq = kzalloc(sizeof(*fp->txq), GFP_KERNEL);
818 			if (!fp->txq)
819 				goto err;
820 		}
821 
822 		if (fp->type & QEDE_FASTPATH_RX) {
823 			fp->rxq = kzalloc(sizeof(*fp->rxq), GFP_KERNEL);
824 			if (!fp->rxq)
825 				goto err;
826 
827 			if (edev->xdp_prog) {
828 				fp->xdp_tx = kzalloc(sizeof(*fp->xdp_tx),
829 						     GFP_KERNEL);
830 				if (!fp->xdp_tx)
831 					goto err;
832 				fp->type |= QEDE_FASTPATH_XDP;
833 			}
834 		}
835 	}
836 
837 	return 0;
838 err:
839 	qede_free_fp_array(edev);
840 	return -ENOMEM;
841 }
842 
843 static void qede_sp_task(struct work_struct *work)
844 {
845 	struct qede_dev *edev = container_of(work, struct qede_dev,
846 					     sp_task.work);
847 
848 	__qede_lock(edev);
849 
850 	if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
851 		if (edev->state == QEDE_STATE_OPEN)
852 			qede_config_rx_mode(edev->ndev);
853 
854 #ifdef CONFIG_RFS_ACCEL
855 	if (test_and_clear_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags)) {
856 		if (edev->state == QEDE_STATE_OPEN)
857 			qede_process_arfs_filters(edev, false);
858 	}
859 #endif
860 	__qede_unlock(edev);
861 }
862 
863 static void qede_update_pf_params(struct qed_dev *cdev)
864 {
865 	struct qed_pf_params pf_params;
866 
867 	/* 64 rx + 64 tx + 64 XDP */
868 	memset(&pf_params, 0, sizeof(struct qed_pf_params));
869 	pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * 3;
870 
871 	/* Same for VFs - make sure they'll have sufficient connections
872 	 * to support XDP Tx queues.
873 	 */
874 	pf_params.eth_pf_params.num_vf_cons = 48;
875 
876 	pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
877 	qed_ops->common->update_pf_params(cdev, &pf_params);
878 }
879 
880 #define QEDE_FW_VER_STR_SIZE	80
881 
882 static void qede_log_probe(struct qede_dev *edev)
883 {
884 	struct qed_dev_info *p_dev_info = &edev->dev_info.common;
885 	u8 buf[QEDE_FW_VER_STR_SIZE];
886 	size_t left_size;
887 
888 	snprintf(buf, QEDE_FW_VER_STR_SIZE,
889 		 "Storm FW %d.%d.%d.%d, Management FW %d.%d.%d.%d",
890 		 p_dev_info->fw_major, p_dev_info->fw_minor, p_dev_info->fw_rev,
891 		 p_dev_info->fw_eng,
892 		 (p_dev_info->mfw_rev & QED_MFW_VERSION_3_MASK) >>
893 		 QED_MFW_VERSION_3_OFFSET,
894 		 (p_dev_info->mfw_rev & QED_MFW_VERSION_2_MASK) >>
895 		 QED_MFW_VERSION_2_OFFSET,
896 		 (p_dev_info->mfw_rev & QED_MFW_VERSION_1_MASK) >>
897 		 QED_MFW_VERSION_1_OFFSET,
898 		 (p_dev_info->mfw_rev & QED_MFW_VERSION_0_MASK) >>
899 		 QED_MFW_VERSION_0_OFFSET);
900 
901 	left_size = QEDE_FW_VER_STR_SIZE - strlen(buf);
902 	if (p_dev_info->mbi_version && left_size)
903 		snprintf(buf + strlen(buf), left_size,
904 			 " [MBI %d.%d.%d]",
905 			 (p_dev_info->mbi_version & QED_MBI_VERSION_2_MASK) >>
906 			 QED_MBI_VERSION_2_OFFSET,
907 			 (p_dev_info->mbi_version & QED_MBI_VERSION_1_MASK) >>
908 			 QED_MBI_VERSION_1_OFFSET,
909 			 (p_dev_info->mbi_version & QED_MBI_VERSION_0_MASK) >>
910 			 QED_MBI_VERSION_0_OFFSET);
911 
912 	pr_info("qede %02x:%02x.%02x: %s [%s]\n", edev->pdev->bus->number,
913 		PCI_SLOT(edev->pdev->devfn), PCI_FUNC(edev->pdev->devfn),
914 		buf, edev->ndev->name);
915 }
916 
917 enum qede_probe_mode {
918 	QEDE_PROBE_NORMAL,
919 };
920 
921 static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
922 			bool is_vf, enum qede_probe_mode mode)
923 {
924 	struct qed_probe_params probe_params;
925 	struct qed_slowpath_params sp_params;
926 	struct qed_dev_eth_info dev_info;
927 	struct qede_dev *edev;
928 	struct qed_dev *cdev;
929 	int rc;
930 
931 	if (unlikely(dp_level & QED_LEVEL_INFO))
932 		pr_notice("Starting qede probe\n");
933 
934 	memset(&probe_params, 0, sizeof(probe_params));
935 	probe_params.protocol = QED_PROTOCOL_ETH;
936 	probe_params.dp_module = dp_module;
937 	probe_params.dp_level = dp_level;
938 	probe_params.is_vf = is_vf;
939 	cdev = qed_ops->common->probe(pdev, &probe_params);
940 	if (!cdev) {
941 		rc = -ENODEV;
942 		goto err0;
943 	}
944 
945 	qede_update_pf_params(cdev);
946 
947 	/* Start the Slowpath-process */
948 	memset(&sp_params, 0, sizeof(sp_params));
949 	sp_params.int_mode = QED_INT_MODE_MSIX;
950 	sp_params.drv_major = QEDE_MAJOR_VERSION;
951 	sp_params.drv_minor = QEDE_MINOR_VERSION;
952 	sp_params.drv_rev = QEDE_REVISION_VERSION;
953 	sp_params.drv_eng = QEDE_ENGINEERING_VERSION;
954 	strlcpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
955 	rc = qed_ops->common->slowpath_start(cdev, &sp_params);
956 	if (rc) {
957 		pr_notice("Cannot start slowpath\n");
958 		goto err1;
959 	}
960 
961 	/* Learn information crucial for qede to progress */
962 	rc = qed_ops->fill_dev_info(cdev, &dev_info);
963 	if (rc)
964 		goto err2;
965 
966 	edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module,
967 				   dp_level);
968 	if (!edev) {
969 		rc = -ENOMEM;
970 		goto err2;
971 	}
972 
973 	if (is_vf)
974 		edev->flags |= QEDE_FLAG_IS_VF;
975 
976 	qede_init_ndev(edev);
977 
978 	rc = qede_rdma_dev_add(edev);
979 	if (rc)
980 		goto err3;
981 
982 	/* Prepare the lock prior to the registeration of the netdev,
983 	 * as once it's registered we might reach flows requiring it
984 	 * [it's even possible to reach a flow needing it directly
985 	 * from there, although it's unlikely].
986 	 */
987 	INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
988 	mutex_init(&edev->qede_lock);
989 	rc = register_netdev(edev->ndev);
990 	if (rc) {
991 		DP_NOTICE(edev, "Cannot register net-device\n");
992 		goto err4;
993 	}
994 
995 	edev->ops->common->set_name(cdev, edev->ndev->name);
996 
997 	/* PTP not supported on VFs */
998 	if (!is_vf)
999 		qede_ptp_enable(edev, true);
1000 
1001 	edev->ops->register_ops(cdev, &qede_ll_ops, edev);
1002 
1003 #ifdef CONFIG_DCB
1004 	if (!IS_VF(edev))
1005 		qede_set_dcbnl_ops(edev->ndev);
1006 #endif
1007 
1008 	edev->rx_copybreak = QEDE_RX_HDR_SIZE;
1009 
1010 	qede_log_probe(edev);
1011 	return 0;
1012 
1013 err4:
1014 	qede_rdma_dev_remove(edev);
1015 err3:
1016 	free_netdev(edev->ndev);
1017 err2:
1018 	qed_ops->common->slowpath_stop(cdev);
1019 err1:
1020 	qed_ops->common->remove(cdev);
1021 err0:
1022 	return rc;
1023 }
1024 
1025 static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1026 {
1027 	bool is_vf = false;
1028 	u32 dp_module = 0;
1029 	u8 dp_level = 0;
1030 
1031 	switch ((enum qede_pci_private)id->driver_data) {
1032 	case QEDE_PRIVATE_VF:
1033 		if (debug & QED_LOG_VERBOSE_MASK)
1034 			dev_err(&pdev->dev, "Probing a VF\n");
1035 		is_vf = true;
1036 		break;
1037 	default:
1038 		if (debug & QED_LOG_VERBOSE_MASK)
1039 			dev_err(&pdev->dev, "Probing a PF\n");
1040 	}
1041 
1042 	qede_config_debug(debug, &dp_module, &dp_level);
1043 
1044 	return __qede_probe(pdev, dp_module, dp_level, is_vf,
1045 			    QEDE_PROBE_NORMAL);
1046 }
1047 
1048 enum qede_remove_mode {
1049 	QEDE_REMOVE_NORMAL,
1050 };
1051 
1052 static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
1053 {
1054 	struct net_device *ndev = pci_get_drvdata(pdev);
1055 	struct qede_dev *edev = netdev_priv(ndev);
1056 	struct qed_dev *cdev = edev->cdev;
1057 
1058 	DP_INFO(edev, "Starting qede_remove\n");
1059 
1060 	unregister_netdev(ndev);
1061 	cancel_delayed_work_sync(&edev->sp_task);
1062 
1063 	qede_ptp_disable(edev);
1064 
1065 	qede_rdma_dev_remove(edev);
1066 
1067 	edev->ops->common->set_power_state(cdev, PCI_D0);
1068 
1069 	pci_set_drvdata(pdev, NULL);
1070 
1071 	/* Release edev's reference to XDP's bpf if such exist */
1072 	if (edev->xdp_prog)
1073 		bpf_prog_put(edev->xdp_prog);
1074 
1075 	/* Use global ops since we've freed edev */
1076 	qed_ops->common->slowpath_stop(cdev);
1077 	if (system_state == SYSTEM_POWER_OFF)
1078 		return;
1079 	qed_ops->common->remove(cdev);
1080 
1081 	/* Since this can happen out-of-sync with other flows,
1082 	 * don't release the netdevice until after slowpath stop
1083 	 * has been called to guarantee various other contexts
1084 	 * [e.g., QED register callbacks] won't break anything when
1085 	 * accessing the netdevice.
1086 	 */
1087 	 free_netdev(ndev);
1088 
1089 	dev_info(&pdev->dev, "Ending qede_remove successfully\n");
1090 }
1091 
1092 static void qede_remove(struct pci_dev *pdev)
1093 {
1094 	__qede_remove(pdev, QEDE_REMOVE_NORMAL);
1095 }
1096 
1097 static void qede_shutdown(struct pci_dev *pdev)
1098 {
1099 	__qede_remove(pdev, QEDE_REMOVE_NORMAL);
1100 }
1101 
1102 /* -------------------------------------------------------------------------
1103  * START OF LOAD / UNLOAD
1104  * -------------------------------------------------------------------------
1105  */
1106 
1107 static int qede_set_num_queues(struct qede_dev *edev)
1108 {
1109 	int rc;
1110 	u16 rss_num;
1111 
1112 	/* Setup queues according to possible resources*/
1113 	if (edev->req_queues)
1114 		rss_num = edev->req_queues;
1115 	else
1116 		rss_num = netif_get_num_default_rss_queues() *
1117 			  edev->dev_info.common.num_hwfns;
1118 
1119 	rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num);
1120 
1121 	rc = edev->ops->common->set_fp_int(edev->cdev, rss_num);
1122 	if (rc > 0) {
1123 		/* Managed to request interrupts for our queues */
1124 		edev->num_queues = rc;
1125 		DP_INFO(edev, "Managed %d [of %d] RSS queues\n",
1126 			QEDE_QUEUE_CNT(edev), rss_num);
1127 		rc = 0;
1128 	}
1129 
1130 	edev->fp_num_tx = edev->req_num_tx;
1131 	edev->fp_num_rx = edev->req_num_rx;
1132 
1133 	return rc;
1134 }
1135 
1136 static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info,
1137 			     u16 sb_id)
1138 {
1139 	if (sb_info->sb_virt) {
1140 		edev->ops->common->sb_release(edev->cdev, sb_info, sb_id);
1141 		dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt),
1142 				  (void *)sb_info->sb_virt, sb_info->sb_phys);
1143 		memset(sb_info, 0, sizeof(*sb_info));
1144 	}
1145 }
1146 
1147 /* This function allocates fast-path status block memory */
1148 static int qede_alloc_mem_sb(struct qede_dev *edev,
1149 			     struct qed_sb_info *sb_info, u16 sb_id)
1150 {
1151 	struct status_block *sb_virt;
1152 	dma_addr_t sb_phys;
1153 	int rc;
1154 
1155 	sb_virt = dma_alloc_coherent(&edev->pdev->dev,
1156 				     sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
1157 	if (!sb_virt) {
1158 		DP_ERR(edev, "Status block allocation failed\n");
1159 		return -ENOMEM;
1160 	}
1161 
1162 	rc = edev->ops->common->sb_init(edev->cdev, sb_info,
1163 					sb_virt, sb_phys, sb_id,
1164 					QED_SB_TYPE_L2_QUEUE);
1165 	if (rc) {
1166 		DP_ERR(edev, "Status block initialization failed\n");
1167 		dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt),
1168 				  sb_virt, sb_phys);
1169 		return rc;
1170 	}
1171 
1172 	return 0;
1173 }
1174 
1175 static void qede_free_rx_buffers(struct qede_dev *edev,
1176 				 struct qede_rx_queue *rxq)
1177 {
1178 	u16 i;
1179 
1180 	for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) {
1181 		struct sw_rx_data *rx_buf;
1182 		struct page *data;
1183 
1184 		rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX];
1185 		data = rx_buf->data;
1186 
1187 		dma_unmap_page(&edev->pdev->dev,
1188 			       rx_buf->mapping, PAGE_SIZE, rxq->data_direction);
1189 
1190 		rx_buf->data = NULL;
1191 		__free_page(data);
1192 	}
1193 }
1194 
1195 static void qede_free_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
1196 {
1197 	int i;
1198 
1199 	if (edev->gro_disable)
1200 		return;
1201 
1202 	for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
1203 		struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
1204 		struct sw_rx_data *replace_buf = &tpa_info->buffer;
1205 
1206 		if (replace_buf->data) {
1207 			dma_unmap_page(&edev->pdev->dev,
1208 				       replace_buf->mapping,
1209 				       PAGE_SIZE, DMA_FROM_DEVICE);
1210 			__free_page(replace_buf->data);
1211 		}
1212 	}
1213 }
1214 
1215 static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
1216 {
1217 	qede_free_sge_mem(edev, rxq);
1218 
1219 	/* Free rx buffers */
1220 	qede_free_rx_buffers(edev, rxq);
1221 
1222 	/* Free the parallel SW ring */
1223 	kfree(rxq->sw_rx_ring);
1224 
1225 	/* Free the real RQ ring used by FW */
1226 	edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring);
1227 	edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
1228 }
1229 
1230 static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
1231 {
1232 	dma_addr_t mapping;
1233 	int i;
1234 
1235 	/* Don't perform FW aggregations in case of XDP */
1236 	if (edev->xdp_prog)
1237 		edev->gro_disable = 1;
1238 
1239 	if (edev->gro_disable)
1240 		return 0;
1241 
1242 	if (edev->ndev->mtu > PAGE_SIZE) {
1243 		edev->gro_disable = 1;
1244 		return 0;
1245 	}
1246 
1247 	for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
1248 		struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
1249 		struct sw_rx_data *replace_buf = &tpa_info->buffer;
1250 
1251 		replace_buf->data = alloc_pages(GFP_ATOMIC, 0);
1252 		if (unlikely(!replace_buf->data)) {
1253 			DP_NOTICE(edev,
1254 				  "Failed to allocate TPA skb pool [replacement buffer]\n");
1255 			goto err;
1256 		}
1257 
1258 		mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0,
1259 				       PAGE_SIZE, DMA_FROM_DEVICE);
1260 		if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
1261 			DP_NOTICE(edev,
1262 				  "Failed to map TPA replacement buffer\n");
1263 			goto err;
1264 		}
1265 
1266 		replace_buf->mapping = mapping;
1267 		tpa_info->buffer.page_offset = 0;
1268 		tpa_info->buffer_mapping = mapping;
1269 		tpa_info->state = QEDE_AGG_STATE_NONE;
1270 	}
1271 
1272 	return 0;
1273 err:
1274 	qede_free_sge_mem(edev, rxq);
1275 	edev->gro_disable = 1;
1276 	return -ENOMEM;
1277 }
1278 
1279 /* This function allocates all memory needed per Rx queue */
1280 static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
1281 {
1282 	int i, rc, size;
1283 
1284 	rxq->num_rx_buffers = edev->q_num_rx_buffers;
1285 
1286 	rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu;
1287 	rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : 0;
1288 
1289 	/* Make sure that the headroom and  payload fit in a single page */
1290 	if (rxq->rx_buf_size + rxq->rx_headroom > PAGE_SIZE)
1291 		rxq->rx_buf_size = PAGE_SIZE - rxq->rx_headroom;
1292 
1293 	/* Segment size to spilt a page in multiple equal parts,
1294 	 * unless XDP is used in which case we'd use the entire page.
1295 	 */
1296 	if (!edev->xdp_prog)
1297 		rxq->rx_buf_seg_size = roundup_pow_of_two(rxq->rx_buf_size);
1298 	else
1299 		rxq->rx_buf_seg_size = PAGE_SIZE;
1300 
1301 	/* Allocate the parallel driver ring for Rx buffers */
1302 	size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE;
1303 	rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
1304 	if (!rxq->sw_rx_ring) {
1305 		DP_ERR(edev, "Rx buffers ring allocation failed\n");
1306 		rc = -ENOMEM;
1307 		goto err;
1308 	}
1309 
1310 	/* Allocate FW Rx ring  */
1311 	rc = edev->ops->common->chain_alloc(edev->cdev,
1312 					    QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1313 					    QED_CHAIN_MODE_NEXT_PTR,
1314 					    QED_CHAIN_CNT_TYPE_U16,
1315 					    RX_RING_SIZE,
1316 					    sizeof(struct eth_rx_bd),
1317 					    &rxq->rx_bd_ring, NULL);
1318 	if (rc)
1319 		goto err;
1320 
1321 	/* Allocate FW completion ring */
1322 	rc = edev->ops->common->chain_alloc(edev->cdev,
1323 					    QED_CHAIN_USE_TO_CONSUME,
1324 					    QED_CHAIN_MODE_PBL,
1325 					    QED_CHAIN_CNT_TYPE_U16,
1326 					    RX_RING_SIZE,
1327 					    sizeof(union eth_rx_cqe),
1328 					    &rxq->rx_comp_ring, NULL);
1329 	if (rc)
1330 		goto err;
1331 
1332 	/* Allocate buffers for the Rx ring */
1333 	rxq->filled_buffers = 0;
1334 	for (i = 0; i < rxq->num_rx_buffers; i++) {
1335 		rc = qede_alloc_rx_buffer(rxq, false);
1336 		if (rc) {
1337 			DP_ERR(edev,
1338 			       "Rx buffers allocation failed at index %d\n", i);
1339 			goto err;
1340 		}
1341 	}
1342 
1343 	rc = qede_alloc_sge_mem(edev, rxq);
1344 err:
1345 	return rc;
1346 }
1347 
1348 static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
1349 {
1350 	/* Free the parallel SW ring */
1351 	if (txq->is_xdp)
1352 		kfree(txq->sw_tx_ring.xdp);
1353 	else
1354 		kfree(txq->sw_tx_ring.skbs);
1355 
1356 	/* Free the real RQ ring used by FW */
1357 	edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl);
1358 }
1359 
1360 /* This function allocates all memory needed per Tx queue */
1361 static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
1362 {
1363 	union eth_tx_bd_types *p_virt;
1364 	int size, rc;
1365 
1366 	txq->num_tx_buffers = edev->q_num_tx_buffers;
1367 
1368 	/* Allocate the parallel driver ring for Tx buffers */
1369 	if (txq->is_xdp) {
1370 		size = sizeof(*txq->sw_tx_ring.xdp) * txq->num_tx_buffers;
1371 		txq->sw_tx_ring.xdp = kzalloc(size, GFP_KERNEL);
1372 		if (!txq->sw_tx_ring.xdp)
1373 			goto err;
1374 	} else {
1375 		size = sizeof(*txq->sw_tx_ring.skbs) * txq->num_tx_buffers;
1376 		txq->sw_tx_ring.skbs = kzalloc(size, GFP_KERNEL);
1377 		if (!txq->sw_tx_ring.skbs)
1378 			goto err;
1379 	}
1380 
1381 	rc = edev->ops->common->chain_alloc(edev->cdev,
1382 					    QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1383 					    QED_CHAIN_MODE_PBL,
1384 					    QED_CHAIN_CNT_TYPE_U16,
1385 					    txq->num_tx_buffers,
1386 					    sizeof(*p_virt),
1387 					    &txq->tx_pbl, NULL);
1388 	if (rc)
1389 		goto err;
1390 
1391 	return 0;
1392 
1393 err:
1394 	qede_free_mem_txq(edev, txq);
1395 	return -ENOMEM;
1396 }
1397 
1398 /* This function frees all memory of a single fp */
1399 static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
1400 {
1401 	qede_free_mem_sb(edev, fp->sb_info, fp->id);
1402 
1403 	if (fp->type & QEDE_FASTPATH_RX)
1404 		qede_free_mem_rxq(edev, fp->rxq);
1405 
1406 	if (fp->type & QEDE_FASTPATH_XDP)
1407 		qede_free_mem_txq(edev, fp->xdp_tx);
1408 
1409 	if (fp->type & QEDE_FASTPATH_TX)
1410 		qede_free_mem_txq(edev, fp->txq);
1411 }
1412 
1413 /* This function allocates all memory needed for a single fp (i.e. an entity
1414  * which contains status block, one rx queue and/or multiple per-TC tx queues.
1415  */
1416 static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
1417 {
1418 	int rc = 0;
1419 
1420 	rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id);
1421 	if (rc)
1422 		goto out;
1423 
1424 	if (fp->type & QEDE_FASTPATH_RX) {
1425 		rc = qede_alloc_mem_rxq(edev, fp->rxq);
1426 		if (rc)
1427 			goto out;
1428 	}
1429 
1430 	if (fp->type & QEDE_FASTPATH_XDP) {
1431 		rc = qede_alloc_mem_txq(edev, fp->xdp_tx);
1432 		if (rc)
1433 			goto out;
1434 	}
1435 
1436 	if (fp->type & QEDE_FASTPATH_TX) {
1437 		rc = qede_alloc_mem_txq(edev, fp->txq);
1438 		if (rc)
1439 			goto out;
1440 	}
1441 
1442 out:
1443 	return rc;
1444 }
1445 
1446 static void qede_free_mem_load(struct qede_dev *edev)
1447 {
1448 	int i;
1449 
1450 	for_each_queue(i) {
1451 		struct qede_fastpath *fp = &edev->fp_array[i];
1452 
1453 		qede_free_mem_fp(edev, fp);
1454 	}
1455 }
1456 
1457 /* This function allocates all qede memory at NIC load. */
1458 static int qede_alloc_mem_load(struct qede_dev *edev)
1459 {
1460 	int rc = 0, queue_id;
1461 
1462 	for (queue_id = 0; queue_id < QEDE_QUEUE_CNT(edev); queue_id++) {
1463 		struct qede_fastpath *fp = &edev->fp_array[queue_id];
1464 
1465 		rc = qede_alloc_mem_fp(edev, fp);
1466 		if (rc) {
1467 			DP_ERR(edev,
1468 			       "Failed to allocate memory for fastpath - rss id = %d\n",
1469 			       queue_id);
1470 			qede_free_mem_load(edev);
1471 			return rc;
1472 		}
1473 	}
1474 
1475 	return 0;
1476 }
1477 
1478 /* This function inits fp content and resets the SB, RXQ and TXQ structures */
1479 static void qede_init_fp(struct qede_dev *edev)
1480 {
1481 	int queue_id, rxq_index = 0, txq_index = 0;
1482 	struct qede_fastpath *fp;
1483 
1484 	for_each_queue(queue_id) {
1485 		fp = &edev->fp_array[queue_id];
1486 
1487 		fp->edev = edev;
1488 		fp->id = queue_id;
1489 
1490 		if (fp->type & QEDE_FASTPATH_XDP) {
1491 			fp->xdp_tx->index = QEDE_TXQ_IDX_TO_XDP(edev,
1492 								rxq_index);
1493 			fp->xdp_tx->is_xdp = 1;
1494 		}
1495 
1496 		if (fp->type & QEDE_FASTPATH_RX) {
1497 			fp->rxq->rxq_id = rxq_index++;
1498 
1499 			/* Determine how to map buffers for this queue */
1500 			if (fp->type & QEDE_FASTPATH_XDP)
1501 				fp->rxq->data_direction = DMA_BIDIRECTIONAL;
1502 			else
1503 				fp->rxq->data_direction = DMA_FROM_DEVICE;
1504 			fp->rxq->dev = &edev->pdev->dev;
1505 		}
1506 
1507 		if (fp->type & QEDE_FASTPATH_TX) {
1508 			fp->txq->index = txq_index++;
1509 			if (edev->dev_info.is_legacy)
1510 				fp->txq->is_legacy = 1;
1511 			fp->txq->dev = &edev->pdev->dev;
1512 		}
1513 
1514 		snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1515 			 edev->ndev->name, queue_id);
1516 	}
1517 
1518 	edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO);
1519 }
1520 
1521 static int qede_set_real_num_queues(struct qede_dev *edev)
1522 {
1523 	int rc = 0;
1524 
1525 	rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_COUNT(edev));
1526 	if (rc) {
1527 		DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
1528 		return rc;
1529 	}
1530 
1531 	rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_COUNT(edev));
1532 	if (rc) {
1533 		DP_NOTICE(edev, "Failed to set real number of Rx queues\n");
1534 		return rc;
1535 	}
1536 
1537 	return 0;
1538 }
1539 
1540 static void qede_napi_disable_remove(struct qede_dev *edev)
1541 {
1542 	int i;
1543 
1544 	for_each_queue(i) {
1545 		napi_disable(&edev->fp_array[i].napi);
1546 
1547 		netif_napi_del(&edev->fp_array[i].napi);
1548 	}
1549 }
1550 
1551 static void qede_napi_add_enable(struct qede_dev *edev)
1552 {
1553 	int i;
1554 
1555 	/* Add NAPI objects */
1556 	for_each_queue(i) {
1557 		netif_napi_add(edev->ndev, &edev->fp_array[i].napi,
1558 			       qede_poll, NAPI_POLL_WEIGHT);
1559 		napi_enable(&edev->fp_array[i].napi);
1560 	}
1561 }
1562 
1563 static void qede_sync_free_irqs(struct qede_dev *edev)
1564 {
1565 	int i;
1566 
1567 	for (i = 0; i < edev->int_info.used_cnt; i++) {
1568 		if (edev->int_info.msix_cnt) {
1569 			synchronize_irq(edev->int_info.msix[i].vector);
1570 			free_irq(edev->int_info.msix[i].vector,
1571 				 &edev->fp_array[i]);
1572 		} else {
1573 			edev->ops->common->simd_handler_clean(edev->cdev, i);
1574 		}
1575 	}
1576 
1577 	edev->int_info.used_cnt = 0;
1578 }
1579 
1580 static int qede_req_msix_irqs(struct qede_dev *edev)
1581 {
1582 	int i, rc;
1583 
1584 	/* Sanitize number of interrupts == number of prepared RSS queues */
1585 	if (QEDE_QUEUE_CNT(edev) > edev->int_info.msix_cnt) {
1586 		DP_ERR(edev,
1587 		       "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
1588 		       QEDE_QUEUE_CNT(edev), edev->int_info.msix_cnt);
1589 		return -EINVAL;
1590 	}
1591 
1592 	for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
1593 #ifdef CONFIG_RFS_ACCEL
1594 		struct qede_fastpath *fp = &edev->fp_array[i];
1595 
1596 		if (edev->ndev->rx_cpu_rmap && (fp->type & QEDE_FASTPATH_RX)) {
1597 			rc = irq_cpu_rmap_add(edev->ndev->rx_cpu_rmap,
1598 					      edev->int_info.msix[i].vector);
1599 			if (rc) {
1600 				DP_ERR(edev, "Failed to add CPU rmap\n");
1601 				qede_free_arfs(edev);
1602 			}
1603 		}
1604 #endif
1605 		rc = request_irq(edev->int_info.msix[i].vector,
1606 				 qede_msix_fp_int, 0, edev->fp_array[i].name,
1607 				 &edev->fp_array[i]);
1608 		if (rc) {
1609 			DP_ERR(edev, "Request fp %d irq failed\n", i);
1610 			qede_sync_free_irqs(edev);
1611 			return rc;
1612 		}
1613 		DP_VERBOSE(edev, NETIF_MSG_INTR,
1614 			   "Requested fp irq for %s [entry %d]. Cookie is at %p\n",
1615 			   edev->fp_array[i].name, i,
1616 			   &edev->fp_array[i]);
1617 		edev->int_info.used_cnt++;
1618 	}
1619 
1620 	return 0;
1621 }
1622 
1623 static void qede_simd_fp_handler(void *cookie)
1624 {
1625 	struct qede_fastpath *fp = (struct qede_fastpath *)cookie;
1626 
1627 	napi_schedule_irqoff(&fp->napi);
1628 }
1629 
1630 static int qede_setup_irqs(struct qede_dev *edev)
1631 {
1632 	int i, rc = 0;
1633 
1634 	/* Learn Interrupt configuration */
1635 	rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info);
1636 	if (rc)
1637 		return rc;
1638 
1639 	if (edev->int_info.msix_cnt) {
1640 		rc = qede_req_msix_irqs(edev);
1641 		if (rc)
1642 			return rc;
1643 		edev->ndev->irq = edev->int_info.msix[0].vector;
1644 	} else {
1645 		const struct qed_common_ops *ops;
1646 
1647 		/* qed should learn receive the RSS ids and callbacks */
1648 		ops = edev->ops->common;
1649 		for (i = 0; i < QEDE_QUEUE_CNT(edev); i++)
1650 			ops->simd_handler_config(edev->cdev,
1651 						 &edev->fp_array[i], i,
1652 						 qede_simd_fp_handler);
1653 		edev->int_info.used_cnt = QEDE_QUEUE_CNT(edev);
1654 	}
1655 	return 0;
1656 }
1657 
1658 static int qede_drain_txq(struct qede_dev *edev,
1659 			  struct qede_tx_queue *txq, bool allow_drain)
1660 {
1661 	int rc, cnt = 1000;
1662 
1663 	while (txq->sw_tx_cons != txq->sw_tx_prod) {
1664 		if (!cnt) {
1665 			if (allow_drain) {
1666 				DP_NOTICE(edev,
1667 					  "Tx queue[%d] is stuck, requesting MCP to drain\n",
1668 					  txq->index);
1669 				rc = edev->ops->common->drain(edev->cdev);
1670 				if (rc)
1671 					return rc;
1672 				return qede_drain_txq(edev, txq, false);
1673 			}
1674 			DP_NOTICE(edev,
1675 				  "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n",
1676 				  txq->index, txq->sw_tx_prod,
1677 				  txq->sw_tx_cons);
1678 			return -ENODEV;
1679 		}
1680 		cnt--;
1681 		usleep_range(1000, 2000);
1682 		barrier();
1683 	}
1684 
1685 	/* FW finished processing, wait for HW to transmit all tx packets */
1686 	usleep_range(1000, 2000);
1687 
1688 	return 0;
1689 }
1690 
1691 static int qede_stop_txq(struct qede_dev *edev,
1692 			 struct qede_tx_queue *txq, int rss_id)
1693 {
1694 	return edev->ops->q_tx_stop(edev->cdev, rss_id, txq->handle);
1695 }
1696 
1697 static int qede_stop_queues(struct qede_dev *edev)
1698 {
1699 	struct qed_update_vport_params *vport_update_params;
1700 	struct qed_dev *cdev = edev->cdev;
1701 	struct qede_fastpath *fp;
1702 	int rc, i;
1703 
1704 	/* Disable the vport */
1705 	vport_update_params = vzalloc(sizeof(*vport_update_params));
1706 	if (!vport_update_params)
1707 		return -ENOMEM;
1708 
1709 	vport_update_params->vport_id = 0;
1710 	vport_update_params->update_vport_active_flg = 1;
1711 	vport_update_params->vport_active_flg = 0;
1712 	vport_update_params->update_rss_flg = 0;
1713 
1714 	rc = edev->ops->vport_update(cdev, vport_update_params);
1715 	vfree(vport_update_params);
1716 
1717 	if (rc) {
1718 		DP_ERR(edev, "Failed to update vport\n");
1719 		return rc;
1720 	}
1721 
1722 	/* Flush Tx queues. If needed, request drain from MCP */
1723 	for_each_queue(i) {
1724 		fp = &edev->fp_array[i];
1725 
1726 		if (fp->type & QEDE_FASTPATH_TX) {
1727 			rc = qede_drain_txq(edev, fp->txq, true);
1728 			if (rc)
1729 				return rc;
1730 		}
1731 
1732 		if (fp->type & QEDE_FASTPATH_XDP) {
1733 			rc = qede_drain_txq(edev, fp->xdp_tx, true);
1734 			if (rc)
1735 				return rc;
1736 		}
1737 	}
1738 
1739 	/* Stop all Queues in reverse order */
1740 	for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) {
1741 		fp = &edev->fp_array[i];
1742 
1743 		/* Stop the Tx Queue(s) */
1744 		if (fp->type & QEDE_FASTPATH_TX) {
1745 			rc = qede_stop_txq(edev, fp->txq, i);
1746 			if (rc)
1747 				return rc;
1748 		}
1749 
1750 		/* Stop the Rx Queue */
1751 		if (fp->type & QEDE_FASTPATH_RX) {
1752 			rc = edev->ops->q_rx_stop(cdev, i, fp->rxq->handle);
1753 			if (rc) {
1754 				DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
1755 				return rc;
1756 			}
1757 		}
1758 
1759 		/* Stop the XDP forwarding queue */
1760 		if (fp->type & QEDE_FASTPATH_XDP) {
1761 			rc = qede_stop_txq(edev, fp->xdp_tx, i);
1762 			if (rc)
1763 				return rc;
1764 
1765 			bpf_prog_put(fp->rxq->xdp_prog);
1766 		}
1767 	}
1768 
1769 	/* Stop the vport */
1770 	rc = edev->ops->vport_stop(cdev, 0);
1771 	if (rc)
1772 		DP_ERR(edev, "Failed to stop VPORT\n");
1773 
1774 	return rc;
1775 }
1776 
1777 static int qede_start_txq(struct qede_dev *edev,
1778 			  struct qede_fastpath *fp,
1779 			  struct qede_tx_queue *txq, u8 rss_id, u16 sb_idx)
1780 {
1781 	dma_addr_t phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl);
1782 	u32 page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl);
1783 	struct qed_queue_start_common_params params;
1784 	struct qed_txq_start_ret_params ret_params;
1785 	int rc;
1786 
1787 	memset(&params, 0, sizeof(params));
1788 	memset(&ret_params, 0, sizeof(ret_params));
1789 
1790 	/* Let the XDP queue share the queue-zone with one of the regular txq.
1791 	 * We don't really care about its coalescing.
1792 	 */
1793 	if (txq->is_xdp)
1794 		params.queue_id = QEDE_TXQ_XDP_TO_IDX(edev, txq);
1795 	else
1796 		params.queue_id = txq->index;
1797 
1798 	params.p_sb = fp->sb_info;
1799 	params.sb_idx = sb_idx;
1800 
1801 	rc = edev->ops->q_tx_start(edev->cdev, rss_id, &params, phys_table,
1802 				   page_cnt, &ret_params);
1803 	if (rc) {
1804 		DP_ERR(edev, "Start TXQ #%d failed %d\n", txq->index, rc);
1805 		return rc;
1806 	}
1807 
1808 	txq->doorbell_addr = ret_params.p_doorbell;
1809 	txq->handle = ret_params.p_handle;
1810 
1811 	/* Determine the FW consumer address associated */
1812 	txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[sb_idx];
1813 
1814 	/* Prepare the doorbell parameters */
1815 	SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST, DB_DEST_XCM);
1816 	SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
1817 	SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_VAL_SEL,
1818 		  DQ_XCM_ETH_TX_BD_PROD_CMD);
1819 	txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
1820 
1821 	return rc;
1822 }
1823 
1824 static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
1825 {
1826 	int vlan_removal_en = 1;
1827 	struct qed_dev *cdev = edev->cdev;
1828 	struct qed_dev_info *qed_info = &edev->dev_info.common;
1829 	struct qed_update_vport_params *vport_update_params;
1830 	struct qed_queue_start_common_params q_params;
1831 	struct qed_start_vport_params start = {0};
1832 	int rc, i;
1833 
1834 	if (!edev->num_queues) {
1835 		DP_ERR(edev,
1836 		       "Cannot update V-VPORT as active as there are no Rx queues\n");
1837 		return -EINVAL;
1838 	}
1839 
1840 	vport_update_params = vzalloc(sizeof(*vport_update_params));
1841 	if (!vport_update_params)
1842 		return -ENOMEM;
1843 
1844 	start.handle_ptp_pkts = !!(edev->ptp);
1845 	start.gro_enable = !edev->gro_disable;
1846 	start.mtu = edev->ndev->mtu;
1847 	start.vport_id = 0;
1848 	start.drop_ttl0 = true;
1849 	start.remove_inner_vlan = vlan_removal_en;
1850 	start.clear_stats = clear_stats;
1851 
1852 	rc = edev->ops->vport_start(cdev, &start);
1853 
1854 	if (rc) {
1855 		DP_ERR(edev, "Start V-PORT failed %d\n", rc);
1856 		goto out;
1857 	}
1858 
1859 	DP_VERBOSE(edev, NETIF_MSG_IFUP,
1860 		   "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
1861 		   start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
1862 
1863 	for_each_queue(i) {
1864 		struct qede_fastpath *fp = &edev->fp_array[i];
1865 		dma_addr_t p_phys_table;
1866 		u32 page_cnt;
1867 
1868 		if (fp->type & QEDE_FASTPATH_RX) {
1869 			struct qed_rxq_start_ret_params ret_params;
1870 			struct qede_rx_queue *rxq = fp->rxq;
1871 			__le16 *val;
1872 
1873 			memset(&ret_params, 0, sizeof(ret_params));
1874 			memset(&q_params, 0, sizeof(q_params));
1875 			q_params.queue_id = rxq->rxq_id;
1876 			q_params.vport_id = 0;
1877 			q_params.p_sb = fp->sb_info;
1878 			q_params.sb_idx = RX_PI;
1879 
1880 			p_phys_table =
1881 			    qed_chain_get_pbl_phys(&rxq->rx_comp_ring);
1882 			page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring);
1883 
1884 			rc = edev->ops->q_rx_start(cdev, i, &q_params,
1885 						   rxq->rx_buf_size,
1886 						   rxq->rx_bd_ring.p_phys_addr,
1887 						   p_phys_table,
1888 						   page_cnt, &ret_params);
1889 			if (rc) {
1890 				DP_ERR(edev, "Start RXQ #%d failed %d\n", i,
1891 				       rc);
1892 				goto out;
1893 			}
1894 
1895 			/* Use the return parameters */
1896 			rxq->hw_rxq_prod_addr = ret_params.p_prod;
1897 			rxq->handle = ret_params.p_handle;
1898 
1899 			val = &fp->sb_info->sb_virt->pi_array[RX_PI];
1900 			rxq->hw_cons_ptr = val;
1901 
1902 			qede_update_rx_prod(edev, rxq);
1903 		}
1904 
1905 		if (fp->type & QEDE_FASTPATH_XDP) {
1906 			rc = qede_start_txq(edev, fp, fp->xdp_tx, i, XDP_PI);
1907 			if (rc)
1908 				goto out;
1909 
1910 			fp->rxq->xdp_prog = bpf_prog_add(edev->xdp_prog, 1);
1911 			if (IS_ERR(fp->rxq->xdp_prog)) {
1912 				rc = PTR_ERR(fp->rxq->xdp_prog);
1913 				fp->rxq->xdp_prog = NULL;
1914 				goto out;
1915 			}
1916 		}
1917 
1918 		if (fp->type & QEDE_FASTPATH_TX) {
1919 			rc = qede_start_txq(edev, fp, fp->txq, i, TX_PI(0));
1920 			if (rc)
1921 				goto out;
1922 		}
1923 	}
1924 
1925 	/* Prepare and send the vport enable */
1926 	vport_update_params->vport_id = start.vport_id;
1927 	vport_update_params->update_vport_active_flg = 1;
1928 	vport_update_params->vport_active_flg = 1;
1929 
1930 	if ((qed_info->mf_mode == QED_MF_NPAR || pci_num_vf(edev->pdev)) &&
1931 	    qed_info->tx_switching) {
1932 		vport_update_params->update_tx_switching_flg = 1;
1933 		vport_update_params->tx_switching_flg = 1;
1934 	}
1935 
1936 	qede_fill_rss_params(edev, &vport_update_params->rss_params,
1937 			     &vport_update_params->update_rss_flg);
1938 
1939 	rc = edev->ops->vport_update(cdev, vport_update_params);
1940 	if (rc)
1941 		DP_ERR(edev, "Update V-PORT failed %d\n", rc);
1942 
1943 out:
1944 	vfree(vport_update_params);
1945 	return rc;
1946 }
1947 
1948 enum qede_unload_mode {
1949 	QEDE_UNLOAD_NORMAL,
1950 };
1951 
1952 static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
1953 			bool is_locked)
1954 {
1955 	struct qed_link_params link_params;
1956 	int rc;
1957 
1958 	DP_INFO(edev, "Starting qede unload\n");
1959 
1960 	if (!is_locked)
1961 		__qede_lock(edev);
1962 
1963 	edev->state = QEDE_STATE_CLOSED;
1964 
1965 	qede_rdma_dev_event_close(edev);
1966 
1967 	/* Close OS Tx */
1968 	netif_tx_disable(edev->ndev);
1969 	netif_carrier_off(edev->ndev);
1970 
1971 	/* Reset the link */
1972 	memset(&link_params, 0, sizeof(link_params));
1973 	link_params.link_up = false;
1974 	edev->ops->common->set_link(edev->cdev, &link_params);
1975 	rc = qede_stop_queues(edev);
1976 	if (rc) {
1977 		qede_sync_free_irqs(edev);
1978 		goto out;
1979 	}
1980 
1981 	DP_INFO(edev, "Stopped Queues\n");
1982 
1983 	qede_vlan_mark_nonconfigured(edev);
1984 	edev->ops->fastpath_stop(edev->cdev);
1985 
1986 	if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) {
1987 		qede_poll_for_freeing_arfs_filters(edev);
1988 		qede_free_arfs(edev);
1989 	}
1990 
1991 	/* Release the interrupts */
1992 	qede_sync_free_irqs(edev);
1993 	edev->ops->common->set_fp_int(edev->cdev, 0);
1994 
1995 	qede_napi_disable_remove(edev);
1996 
1997 	qede_free_mem_load(edev);
1998 	qede_free_fp_array(edev);
1999 
2000 out:
2001 	if (!is_locked)
2002 		__qede_unlock(edev);
2003 	DP_INFO(edev, "Ending qede unload\n");
2004 }
2005 
2006 enum qede_load_mode {
2007 	QEDE_LOAD_NORMAL,
2008 	QEDE_LOAD_RELOAD,
2009 };
2010 
2011 static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
2012 		     bool is_locked)
2013 {
2014 	struct qed_link_params link_params;
2015 	int rc;
2016 
2017 	DP_INFO(edev, "Starting qede load\n");
2018 
2019 	if (!is_locked)
2020 		__qede_lock(edev);
2021 
2022 	rc = qede_set_num_queues(edev);
2023 	if (rc)
2024 		goto out;
2025 
2026 	rc = qede_alloc_fp_array(edev);
2027 	if (rc)
2028 		goto out;
2029 
2030 	qede_init_fp(edev);
2031 
2032 	rc = qede_alloc_mem_load(edev);
2033 	if (rc)
2034 		goto err1;
2035 	DP_INFO(edev, "Allocated %d Rx, %d Tx queues\n",
2036 		QEDE_RSS_COUNT(edev), QEDE_TSS_COUNT(edev));
2037 
2038 	rc = qede_set_real_num_queues(edev);
2039 	if (rc)
2040 		goto err2;
2041 
2042 	if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) {
2043 		rc = qede_alloc_arfs(edev);
2044 		if (rc)
2045 			DP_NOTICE(edev, "aRFS memory allocation failed\n");
2046 	}
2047 
2048 	qede_napi_add_enable(edev);
2049 	DP_INFO(edev, "Napi added and enabled\n");
2050 
2051 	rc = qede_setup_irqs(edev);
2052 	if (rc)
2053 		goto err3;
2054 	DP_INFO(edev, "Setup IRQs succeeded\n");
2055 
2056 	rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD);
2057 	if (rc)
2058 		goto err4;
2059 	DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
2060 
2061 	/* Program un-configured VLANs */
2062 	qede_configure_vlan_filters(edev);
2063 
2064 	/* Ask for link-up using current configuration */
2065 	memset(&link_params, 0, sizeof(link_params));
2066 	link_params.link_up = true;
2067 	edev->ops->common->set_link(edev->cdev, &link_params);
2068 
2069 	qede_rdma_dev_event_open(edev);
2070 
2071 	edev->state = QEDE_STATE_OPEN;
2072 
2073 	DP_INFO(edev, "Ending successfully qede load\n");
2074 
2075 	goto out;
2076 err4:
2077 	qede_sync_free_irqs(edev);
2078 	memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info));
2079 err3:
2080 	qede_napi_disable_remove(edev);
2081 err2:
2082 	qede_free_mem_load(edev);
2083 err1:
2084 	edev->ops->common->set_fp_int(edev->cdev, 0);
2085 	qede_free_fp_array(edev);
2086 	edev->num_queues = 0;
2087 	edev->fp_num_tx = 0;
2088 	edev->fp_num_rx = 0;
2089 out:
2090 	if (!is_locked)
2091 		__qede_unlock(edev);
2092 
2093 	return rc;
2094 }
2095 
2096 /* 'func' should be able to run between unload and reload assuming interface
2097  * is actually running, or afterwards in case it's currently DOWN.
2098  */
2099 void qede_reload(struct qede_dev *edev,
2100 		 struct qede_reload_args *args, bool is_locked)
2101 {
2102 	if (!is_locked)
2103 		__qede_lock(edev);
2104 
2105 	/* Since qede_lock is held, internal state wouldn't change even
2106 	 * if netdev state would start transitioning. Check whether current
2107 	 * internal configuration indicates device is up, then reload.
2108 	 */
2109 	if (edev->state == QEDE_STATE_OPEN) {
2110 		qede_unload(edev, QEDE_UNLOAD_NORMAL, true);
2111 		if (args)
2112 			args->func(edev, args);
2113 		qede_load(edev, QEDE_LOAD_RELOAD, true);
2114 
2115 		/* Since no one is going to do it for us, re-configure */
2116 		qede_config_rx_mode(edev->ndev);
2117 	} else if (args) {
2118 		args->func(edev, args);
2119 	}
2120 
2121 	if (!is_locked)
2122 		__qede_unlock(edev);
2123 }
2124 
2125 /* called with rtnl_lock */
2126 static int qede_open(struct net_device *ndev)
2127 {
2128 	struct qede_dev *edev = netdev_priv(ndev);
2129 	int rc;
2130 
2131 	netif_carrier_off(ndev);
2132 
2133 	edev->ops->common->set_power_state(edev->cdev, PCI_D0);
2134 
2135 	rc = qede_load(edev, QEDE_LOAD_NORMAL, false);
2136 	if (rc)
2137 		return rc;
2138 
2139 	udp_tunnel_get_rx_info(ndev);
2140 
2141 	edev->ops->common->update_drv_state(edev->cdev, true);
2142 
2143 	return 0;
2144 }
2145 
2146 static int qede_close(struct net_device *ndev)
2147 {
2148 	struct qede_dev *edev = netdev_priv(ndev);
2149 
2150 	qede_unload(edev, QEDE_UNLOAD_NORMAL, false);
2151 
2152 	edev->ops->common->update_drv_state(edev->cdev, false);
2153 
2154 	return 0;
2155 }
2156 
2157 static void qede_link_update(void *dev, struct qed_link_output *link)
2158 {
2159 	struct qede_dev *edev = dev;
2160 
2161 	if (!netif_running(edev->ndev)) {
2162 		DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not running\n");
2163 		return;
2164 	}
2165 
2166 	if (link->link_up) {
2167 		if (!netif_carrier_ok(edev->ndev)) {
2168 			DP_NOTICE(edev, "Link is up\n");
2169 			netif_tx_start_all_queues(edev->ndev);
2170 			netif_carrier_on(edev->ndev);
2171 		}
2172 	} else {
2173 		if (netif_carrier_ok(edev->ndev)) {
2174 			DP_NOTICE(edev, "Link is down\n");
2175 			netif_tx_disable(edev->ndev);
2176 			netif_carrier_off(edev->ndev);
2177 		}
2178 	}
2179 }
2180