17268f33eSAlexander Lobakin // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2e712d52bSYuval Mintz /* QLogic qede NIC Driver
3e8f1cb50SMintz, Yuval * Copyright (c) 2015-2017 QLogic Corporation
4c4fad2a5SAlexander Lobakin * Copyright (c) 2019-2020 Marvell International Ltd.
5e712d52bSYuval Mintz */
67268f33eSAlexander Lobakin
773e03097SBhupesh Sharma #include <linux/crash_dump.h>
8e712d52bSYuval Mintz #include <linux/module.h>
9e712d52bSYuval Mintz #include <linux/pci.h>
10e712d52bSYuval Mintz #include <linux/device.h>
11e712d52bSYuval Mintz #include <linux/netdevice.h>
12e712d52bSYuval Mintz #include <linux/etherdevice.h>
13e712d52bSYuval Mintz #include <linux/skbuff.h>
14e712d52bSYuval Mintz #include <linux/errno.h>
15e712d52bSYuval Mintz #include <linux/list.h>
16e712d52bSYuval Mintz #include <linux/string.h>
17e712d52bSYuval Mintz #include <linux/dma-mapping.h>
18e712d52bSYuval Mintz #include <linux/interrupt.h>
19e712d52bSYuval Mintz #include <asm/byteorder.h>
20e712d52bSYuval Mintz #include <asm/param.h>
21e712d52bSYuval Mintz #include <linux/io.h>
22e712d52bSYuval Mintz #include <linux/netdev_features.h>
23e712d52bSYuval Mintz #include <linux/udp.h>
24e712d52bSYuval Mintz #include <linux/tcp.h>
25f9f082a9SAlexander Duyck #include <net/udp_tunnel.h>
26e712d52bSYuval Mintz #include <linux/ip.h>
27e712d52bSYuval Mintz #include <net/ipv6.h>
28e712d52bSYuval Mintz #include <net/tcp.h>
29e712d52bSYuval Mintz #include <linux/if_ether.h>
30e712d52bSYuval Mintz #include <linux/if_vlan.h>
31e712d52bSYuval Mintz #include <linux/pkt_sched.h>
32e712d52bSYuval Mintz #include <linux/ethtool.h>
33e712d52bSYuval Mintz #include <linux/in.h>
34e712d52bSYuval Mintz #include <linux/random.h>
35e712d52bSYuval Mintz #include <net/ip6_checksum.h>
36e712d52bSYuval Mintz #include <linux/bitops.h>
37f29ffdb6SMintz, Yuval #include <linux/vmalloc.h>
38e712d52bSYuval Mintz #include "qede.h"
394c55215cSSudarsana Reddy Kalluru #include "qede_ptp.h"
40e712d52bSYuval Mintz
415abd7e92SYuval Mintz MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver");
42e712d52bSYuval Mintz MODULE_LICENSE("GPL");
43e712d52bSYuval Mintz
44e712d52bSYuval Mintz static uint debug;
45e712d52bSYuval Mintz module_param(debug, uint, 0);
46e712d52bSYuval Mintz MODULE_PARM_DESC(debug, " Default debug msglevel");
47e712d52bSYuval Mintz
48e712d52bSYuval Mintz static const struct qed_eth_ops *qed_ops;
49e712d52bSYuval Mintz
50e712d52bSYuval Mintz #define CHIP_NUM_57980S_40 0x1634
510e7441d7SYuval Mintz #define CHIP_NUM_57980S_10 0x1666
52e712d52bSYuval Mintz #define CHIP_NUM_57980S_MF 0x1636
53e712d52bSYuval Mintz #define CHIP_NUM_57980S_100 0x1644
54e712d52bSYuval Mintz #define CHIP_NUM_57980S_50 0x1654
55e712d52bSYuval Mintz #define CHIP_NUM_57980S_25 0x1656
56fefb0202SYuval Mintz #define CHIP_NUM_57980S_IOV 0x1664
579c79ddaaSMintz, Yuval #define CHIP_NUM_AH 0x8070
589c79ddaaSMintz, Yuval #define CHIP_NUM_AH_IOV 0x8090
59e712d52bSYuval Mintz
60e712d52bSYuval Mintz #ifndef PCI_DEVICE_ID_NX2_57980E
61e712d52bSYuval Mintz #define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40
62e712d52bSYuval Mintz #define PCI_DEVICE_ID_57980S_10 CHIP_NUM_57980S_10
63e712d52bSYuval Mintz #define PCI_DEVICE_ID_57980S_MF CHIP_NUM_57980S_MF
64e712d52bSYuval Mintz #define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100
65e712d52bSYuval Mintz #define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50
66e712d52bSYuval Mintz #define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25
67fefb0202SYuval Mintz #define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV
689c79ddaaSMintz, Yuval #define PCI_DEVICE_ID_AH CHIP_NUM_AH
699c79ddaaSMintz, Yuval #define PCI_DEVICE_ID_AH_IOV CHIP_NUM_AH_IOV
709c79ddaaSMintz, Yuval
71e712d52bSYuval Mintz #endif
72e712d52bSYuval Mintz
73fefb0202SYuval Mintz enum qede_pci_private {
74fefb0202SYuval Mintz QEDE_PRIVATE_PF,
75fefb0202SYuval Mintz QEDE_PRIVATE_VF
76fefb0202SYuval Mintz };
77fefb0202SYuval Mintz
78e712d52bSYuval Mintz static const struct pci_device_id qede_pci_tbl[] = {
79fefb0202SYuval Mintz {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF},
80fefb0202SYuval Mintz {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF},
81fefb0202SYuval Mintz {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF},
82fefb0202SYuval Mintz {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
83fefb0202SYuval Mintz {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
84fefb0202SYuval Mintz {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
8514b84e86SArnd Bergmann #ifdef CONFIG_QED_SRIOV
86fefb0202SYuval Mintz {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
8714b84e86SArnd Bergmann #endif
889c79ddaaSMintz, Yuval {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH), QEDE_PRIVATE_PF},
899c79ddaaSMintz, Yuval #ifdef CONFIG_QED_SRIOV
909c79ddaaSMintz, Yuval {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH_IOV), QEDE_PRIVATE_VF},
919c79ddaaSMintz, Yuval #endif
92e712d52bSYuval Mintz { 0 }
93e712d52bSYuval Mintz };
94e712d52bSYuval Mintz
95e712d52bSYuval Mintz MODULE_DEVICE_TABLE(pci, qede_pci_tbl);
96e712d52bSYuval Mintz
97e712d52bSYuval Mintz static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
98731815e7SSudarsana Reddy Kalluru static pci_ers_result_t
99731815e7SSudarsana Reddy Kalluru qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state);
100e712d52bSYuval Mintz
101e712d52bSYuval Mintz #define TX_TIMEOUT (5 * HZ)
102e712d52bSYuval Mintz
103cb6aeb07SMintz, Yuval /* Utilize last protocol index for XDP */
104cb6aeb07SMintz, Yuval #define XDP_PI 11
105cb6aeb07SMintz, Yuval
106e712d52bSYuval Mintz static void qede_remove(struct pci_dev *pdev);
10714d39648SMintz, Yuval static void qede_shutdown(struct pci_dev *pdev);
108a2ec6172SSudarsana Kalluru static void qede_link_update(void *dev, struct qed_link_output *link);
109ccc67ef5STomer Tayar static void qede_schedule_recovery_handler(void *dev);
110ccc67ef5STomer Tayar static void qede_recovery_handler(struct qede_dev *edev);
111a8736ea8SIgor Russkikh static void qede_schedule_hw_err_handler(void *dev,
112a8736ea8SIgor Russkikh enum qed_hw_err_type err_type);
113d25b859cSSudarsana Reddy Kalluru static void qede_get_eth_tlv_data(void *edev, void *data);
114d25b859cSSudarsana Reddy Kalluru static void qede_get_generic_tlv_data(void *edev,
115d25b859cSSudarsana Reddy Kalluru struct qed_generic_tlvs *data);
116a8736ea8SIgor Russkikh static void qede_generic_hw_err_handler(struct qede_dev *edev);
117fefb0202SYuval Mintz #ifdef CONFIG_QED_SRIOV
qede_set_vf_vlan(struct net_device * ndev,int vf,u16 vlan,u8 qos,__be16 vlan_proto)11879aab093SMoshe Shemesh static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos,
11979aab093SMoshe Shemesh __be16 vlan_proto)
12008feecd7SYuval Mintz {
12108feecd7SYuval Mintz struct qede_dev *edev = netdev_priv(ndev);
12208feecd7SYuval Mintz
12308feecd7SYuval Mintz if (vlan > 4095) {
12408feecd7SYuval Mintz DP_NOTICE(edev, "Illegal vlan value %d\n", vlan);
12508feecd7SYuval Mintz return -EINVAL;
12608feecd7SYuval Mintz }
12708feecd7SYuval Mintz
12879aab093SMoshe Shemesh if (vlan_proto != htons(ETH_P_8021Q))
12979aab093SMoshe Shemesh return -EPROTONOSUPPORT;
13079aab093SMoshe Shemesh
13108feecd7SYuval Mintz DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n",
13208feecd7SYuval Mintz vlan, vf);
13308feecd7SYuval Mintz
13408feecd7SYuval Mintz return edev->ops->iov->set_vlan(edev->cdev, vlan, vf);
13508feecd7SYuval Mintz }
13608feecd7SYuval Mintz
qede_set_vf_mac(struct net_device * ndev,int vfidx,u8 * mac)137eff16960SYuval Mintz static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac)
138eff16960SYuval Mintz {
139eff16960SYuval Mintz struct qede_dev *edev = netdev_priv(ndev);
140eff16960SYuval Mintz
14126b4b2d9SAndy Shevchenko DP_VERBOSE(edev, QED_MSG_IOV, "Setting MAC %pM to VF [%d]\n", mac, vfidx);
142eff16960SYuval Mintz
143eff16960SYuval Mintz if (!is_valid_ether_addr(mac)) {
144eff16960SYuval Mintz DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n");
145eff16960SYuval Mintz return -EINVAL;
146eff16960SYuval Mintz }
147eff16960SYuval Mintz
148eff16960SYuval Mintz return edev->ops->iov->set_mac(edev->cdev, mac, vfidx);
149eff16960SYuval Mintz }
150eff16960SYuval Mintz
qede_sriov_configure(struct pci_dev * pdev,int num_vfs_param)151fefb0202SYuval Mintz static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
152fefb0202SYuval Mintz {
153fefb0202SYuval Mintz struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev));
154831bfb0eSYuval Mintz struct qed_dev_info *qed_info = &edev->dev_info.common;
155f29ffdb6SMintz, Yuval struct qed_update_vport_params *vport_params;
156831bfb0eSYuval Mintz int rc;
157fefb0202SYuval Mintz
158f29ffdb6SMintz, Yuval vport_params = vzalloc(sizeof(*vport_params));
159f29ffdb6SMintz, Yuval if (!vport_params)
160f29ffdb6SMintz, Yuval return -ENOMEM;
161fefb0202SYuval Mintz DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param);
162fefb0202SYuval Mintz
163831bfb0eSYuval Mintz rc = edev->ops->iov->configure(edev->cdev, num_vfs_param);
164831bfb0eSYuval Mintz
165831bfb0eSYuval Mintz /* Enable/Disable Tx switching for PF */
166831bfb0eSYuval Mintz if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
1670bc5fe85SSudarsana Reddy Kalluru !qed_info->b_inter_pf_switch && qed_info->tx_switching) {
168f29ffdb6SMintz, Yuval vport_params->vport_id = 0;
169f29ffdb6SMintz, Yuval vport_params->update_tx_switching_flg = 1;
170f29ffdb6SMintz, Yuval vport_params->tx_switching_flg = num_vfs_param ? 1 : 0;
171f29ffdb6SMintz, Yuval edev->ops->vport_update(edev->cdev, vport_params);
172831bfb0eSYuval Mintz }
173831bfb0eSYuval Mintz
174f29ffdb6SMintz, Yuval vfree(vport_params);
175831bfb0eSYuval Mintz return rc;
176fefb0202SYuval Mintz }
177fefb0202SYuval Mintz #endif
178fefb0202SYuval Mintz
qede_suspend(struct device * dev)179*2eb9625aSManish Chopra static int __maybe_unused qede_suspend(struct device *dev)
180*2eb9625aSManish Chopra {
181*2eb9625aSManish Chopra dev_info(dev, "Device does not support suspend operation\n");
182*2eb9625aSManish Chopra
183*2eb9625aSManish Chopra return -EOPNOTSUPP;
184*2eb9625aSManish Chopra }
185*2eb9625aSManish Chopra
186*2eb9625aSManish Chopra static DEFINE_SIMPLE_DEV_PM_OPS(qede_pm_ops, qede_suspend, NULL);
187*2eb9625aSManish Chopra
188731815e7SSudarsana Reddy Kalluru static const struct pci_error_handlers qede_err_handler = {
189731815e7SSudarsana Reddy Kalluru .error_detected = qede_io_error_detected,
190731815e7SSudarsana Reddy Kalluru };
191731815e7SSudarsana Reddy Kalluru
192e712d52bSYuval Mintz static struct pci_driver qede_pci_driver = {
193e712d52bSYuval Mintz .name = "qede",
194e712d52bSYuval Mintz .id_table = qede_pci_tbl,
195e712d52bSYuval Mintz .probe = qede_probe,
196e712d52bSYuval Mintz .remove = qede_remove,
19714d39648SMintz, Yuval .shutdown = qede_shutdown,
198fefb0202SYuval Mintz #ifdef CONFIG_QED_SRIOV
199fefb0202SYuval Mintz .sriov_configure = qede_sriov_configure,
200fefb0202SYuval Mintz #endif
201731815e7SSudarsana Reddy Kalluru .err_handler = &qede_err_handler,
202*2eb9625aSManish Chopra .driver.pm = &qede_pm_ops,
203e712d52bSYuval Mintz };
204e712d52bSYuval Mintz
205a2ec6172SSudarsana Kalluru static struct qed_eth_cb_ops qede_ll_ops = {
206a2ec6172SSudarsana Kalluru {
207e4917d46SChopra, Manish #ifdef CONFIG_RFS_ACCEL
208e4917d46SChopra, Manish .arfs_filter_op = qede_arfs_filter_op,
209e4917d46SChopra, Manish #endif
210a2ec6172SSudarsana Kalluru .link_update = qede_link_update,
211ccc67ef5STomer Tayar .schedule_recovery_handler = qede_schedule_recovery_handler,
212a8736ea8SIgor Russkikh .schedule_hw_err_handler = qede_schedule_hw_err_handler,
213d25b859cSSudarsana Reddy Kalluru .get_generic_tlv_data = qede_get_generic_tlv_data,
214d25b859cSSudarsana Reddy Kalluru .get_protocol_tlv_data = qede_get_eth_tlv_data,
215a2ec6172SSudarsana Kalluru },
216eff16960SYuval Mintz .force_mac = qede_force_mac,
21797379f15SChopra, Manish .ports_update = qede_udp_ports_update,
218a2ec6172SSudarsana Kalluru };
219a2ec6172SSudarsana Kalluru
qede_netdev_event(struct notifier_block * this,unsigned long event,void * ptr)2202950219dSYuval Mintz static int qede_netdev_event(struct notifier_block *this, unsigned long event,
2212950219dSYuval Mintz void *ptr)
2222950219dSYuval Mintz {
2232950219dSYuval Mintz struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
2242950219dSYuval Mintz struct ethtool_drvinfo drvinfo;
2252950219dSYuval Mintz struct qede_dev *edev;
2262950219dSYuval Mintz
227cee9fbd8SRam Amrani if (event != NETDEV_CHANGENAME && event != NETDEV_CHANGEADDR)
2282950219dSYuval Mintz goto done;
2292950219dSYuval Mintz
2302950219dSYuval Mintz /* Check whether this is a qede device */
2312950219dSYuval Mintz if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo)
2322950219dSYuval Mintz goto done;
2332950219dSYuval Mintz
2342950219dSYuval Mintz memset(&drvinfo, 0, sizeof(drvinfo));
2352950219dSYuval Mintz ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo);
2362950219dSYuval Mintz if (strcmp(drvinfo.driver, "qede"))
2372950219dSYuval Mintz goto done;
2382950219dSYuval Mintz edev = netdev_priv(ndev);
2392950219dSYuval Mintz
240cee9fbd8SRam Amrani switch (event) {
241cee9fbd8SRam Amrani case NETDEV_CHANGENAME:
2422950219dSYuval Mintz /* Notify qed of the name change */
2432950219dSYuval Mintz if (!edev->ops || !edev->ops->common)
2442950219dSYuval Mintz goto done;
245712c3cbfSMintz, Yuval edev->ops->common->set_name(edev->cdev, edev->ndev->name);
246cee9fbd8SRam Amrani break;
247cee9fbd8SRam Amrani case NETDEV_CHANGEADDR:
248cee9fbd8SRam Amrani edev = netdev_priv(ndev);
249bbfcd1e8SMichal Kalderon qede_rdma_event_changeaddr(edev);
250cee9fbd8SRam Amrani break;
251cee9fbd8SRam Amrani }
2522950219dSYuval Mintz
2532950219dSYuval Mintz done:
2542950219dSYuval Mintz return NOTIFY_DONE;
2552950219dSYuval Mintz }
2562950219dSYuval Mintz
2572950219dSYuval Mintz static struct notifier_block qede_netdev_notifier = {
2582950219dSYuval Mintz .notifier_call = qede_netdev_event,
2592950219dSYuval Mintz };
2602950219dSYuval Mintz
261e712d52bSYuval Mintz static
qede_init(void)262e712d52bSYuval Mintz int __init qede_init(void)
263e712d52bSYuval Mintz {
264e712d52bSYuval Mintz int ret;
265e712d52bSYuval Mintz
26688ea96f8SShai Malin pr_info("qede init: QLogic FastLinQ 4xxxx Ethernet Driver qede\n");
267e712d52bSYuval Mintz
2681d4e4eccSAlexander Lobakin qede_forced_speed_maps_init();
2691d4e4eccSAlexander Lobakin
27095114344SRahul Verma qed_ops = qed_get_eth_ops();
271e712d52bSYuval Mintz if (!qed_ops) {
272e712d52bSYuval Mintz pr_notice("Failed to get qed ethtool operations\n");
273e712d52bSYuval Mintz return -EINVAL;
274e712d52bSYuval Mintz }
275e712d52bSYuval Mintz
2762950219dSYuval Mintz /* Must register notifier before pci ops, since we might miss
2773f2176ddSColin Ian King * interface rename after pci probe and netdev registration.
2782950219dSYuval Mintz */
2792950219dSYuval Mintz ret = register_netdevice_notifier(&qede_netdev_notifier);
2802950219dSYuval Mintz if (ret) {
2812950219dSYuval Mintz pr_notice("Failed to register netdevice_notifier\n");
2822950219dSYuval Mintz qed_put_eth_ops();
2832950219dSYuval Mintz return -EINVAL;
2842950219dSYuval Mintz }
2852950219dSYuval Mintz
286e712d52bSYuval Mintz ret = pci_register_driver(&qede_pci_driver);
287e712d52bSYuval Mintz if (ret) {
288e712d52bSYuval Mintz pr_notice("Failed to register driver\n");
2892950219dSYuval Mintz unregister_netdevice_notifier(&qede_netdev_notifier);
290e712d52bSYuval Mintz qed_put_eth_ops();
291e712d52bSYuval Mintz return -EINVAL;
292e712d52bSYuval Mintz }
293e712d52bSYuval Mintz
294e712d52bSYuval Mintz return 0;
295e712d52bSYuval Mintz }
296e712d52bSYuval Mintz
qede_cleanup(void)297e712d52bSYuval Mintz static void __exit qede_cleanup(void)
298e712d52bSYuval Mintz {
299525ef5c0SYuval Mintz if (debug & QED_LOG_INFO_MASK)
300525ef5c0SYuval Mintz pr_info("qede_cleanup called\n");
301e712d52bSYuval Mintz
3022950219dSYuval Mintz unregister_netdevice_notifier(&qede_netdev_notifier);
303e712d52bSYuval Mintz pci_unregister_driver(&qede_pci_driver);
304e712d52bSYuval Mintz qed_put_eth_ops();
305e712d52bSYuval Mintz }
306e712d52bSYuval Mintz
307e712d52bSYuval Mintz module_init(qede_init);
308e712d52bSYuval Mintz module_exit(qede_cleanup);
309e712d52bSYuval Mintz
3102950219dSYuval Mintz static int qede_open(struct net_device *ndev);
3112950219dSYuval Mintz static int qede_close(struct net_device *ndev);
3127c1bfcadSSudarsana Reddy Kalluru
qede_fill_by_demand_stats(struct qede_dev * edev)313133fac0eSSudarsana Kalluru void qede_fill_by_demand_stats(struct qede_dev *edev)
314133fac0eSSudarsana Kalluru {
3159c79ddaaSMintz, Yuval struct qede_stats_common *p_common = &edev->stats.common;
316133fac0eSSudarsana Kalluru struct qed_eth_stats stats;
317133fac0eSSudarsana Kalluru
318133fac0eSSudarsana Kalluru edev->ops->get_vport_stats(edev->cdev, &stats);
319133fac0eSSudarsana Kalluru
32042510dffSManish Chopra spin_lock(&edev->stats_lock);
32142510dffSManish Chopra
3229c79ddaaSMintz, Yuval p_common->no_buff_discards = stats.common.no_buff_discards;
3239c79ddaaSMintz, Yuval p_common->packet_too_big_discard = stats.common.packet_too_big_discard;
3249c79ddaaSMintz, Yuval p_common->ttl0_discard = stats.common.ttl0_discard;
3259c79ddaaSMintz, Yuval p_common->rx_ucast_bytes = stats.common.rx_ucast_bytes;
3269c79ddaaSMintz, Yuval p_common->rx_mcast_bytes = stats.common.rx_mcast_bytes;
3279c79ddaaSMintz, Yuval p_common->rx_bcast_bytes = stats.common.rx_bcast_bytes;
3289c79ddaaSMintz, Yuval p_common->rx_ucast_pkts = stats.common.rx_ucast_pkts;
3299c79ddaaSMintz, Yuval p_common->rx_mcast_pkts = stats.common.rx_mcast_pkts;
3309c79ddaaSMintz, Yuval p_common->rx_bcast_pkts = stats.common.rx_bcast_pkts;
3319c79ddaaSMintz, Yuval p_common->mftag_filter_discards = stats.common.mftag_filter_discards;
3329c79ddaaSMintz, Yuval p_common->mac_filter_discards = stats.common.mac_filter_discards;
333608e00d0SManish Chopra p_common->gft_filter_drop = stats.common.gft_filter_drop;
334133fac0eSSudarsana Kalluru
3359c79ddaaSMintz, Yuval p_common->tx_ucast_bytes = stats.common.tx_ucast_bytes;
3369c79ddaaSMintz, Yuval p_common->tx_mcast_bytes = stats.common.tx_mcast_bytes;
3379c79ddaaSMintz, Yuval p_common->tx_bcast_bytes = stats.common.tx_bcast_bytes;
3389c79ddaaSMintz, Yuval p_common->tx_ucast_pkts = stats.common.tx_ucast_pkts;
3399c79ddaaSMintz, Yuval p_common->tx_mcast_pkts = stats.common.tx_mcast_pkts;
3409c79ddaaSMintz, Yuval p_common->tx_bcast_pkts = stats.common.tx_bcast_pkts;
3419c79ddaaSMintz, Yuval p_common->tx_err_drop_pkts = stats.common.tx_err_drop_pkts;
3429c79ddaaSMintz, Yuval p_common->coalesced_pkts = stats.common.tpa_coalesced_pkts;
3439c79ddaaSMintz, Yuval p_common->coalesced_events = stats.common.tpa_coalesced_events;
3449c79ddaaSMintz, Yuval p_common->coalesced_aborts_num = stats.common.tpa_aborts_num;
3459c79ddaaSMintz, Yuval p_common->non_coalesced_pkts = stats.common.tpa_not_coalesced_pkts;
3469c79ddaaSMintz, Yuval p_common->coalesced_bytes = stats.common.tpa_coalesced_bytes;
3479c79ddaaSMintz, Yuval
3489c79ddaaSMintz, Yuval p_common->rx_64_byte_packets = stats.common.rx_64_byte_packets;
3499c79ddaaSMintz, Yuval p_common->rx_65_to_127_byte_packets =
3509c79ddaaSMintz, Yuval stats.common.rx_65_to_127_byte_packets;
3519c79ddaaSMintz, Yuval p_common->rx_128_to_255_byte_packets =
3529c79ddaaSMintz, Yuval stats.common.rx_128_to_255_byte_packets;
3539c79ddaaSMintz, Yuval p_common->rx_256_to_511_byte_packets =
3549c79ddaaSMintz, Yuval stats.common.rx_256_to_511_byte_packets;
3559c79ddaaSMintz, Yuval p_common->rx_512_to_1023_byte_packets =
3569c79ddaaSMintz, Yuval stats.common.rx_512_to_1023_byte_packets;
3579c79ddaaSMintz, Yuval p_common->rx_1024_to_1518_byte_packets =
3589c79ddaaSMintz, Yuval stats.common.rx_1024_to_1518_byte_packets;
3599c79ddaaSMintz, Yuval p_common->rx_crc_errors = stats.common.rx_crc_errors;
3609c79ddaaSMintz, Yuval p_common->rx_mac_crtl_frames = stats.common.rx_mac_crtl_frames;
3619c79ddaaSMintz, Yuval p_common->rx_pause_frames = stats.common.rx_pause_frames;
3629c79ddaaSMintz, Yuval p_common->rx_pfc_frames = stats.common.rx_pfc_frames;
3639c79ddaaSMintz, Yuval p_common->rx_align_errors = stats.common.rx_align_errors;
3649c79ddaaSMintz, Yuval p_common->rx_carrier_errors = stats.common.rx_carrier_errors;
3659c79ddaaSMintz, Yuval p_common->rx_oversize_packets = stats.common.rx_oversize_packets;
3669c79ddaaSMintz, Yuval p_common->rx_jabbers = stats.common.rx_jabbers;
3679c79ddaaSMintz, Yuval p_common->rx_undersize_packets = stats.common.rx_undersize_packets;
3689c79ddaaSMintz, Yuval p_common->rx_fragments = stats.common.rx_fragments;
3699c79ddaaSMintz, Yuval p_common->tx_64_byte_packets = stats.common.tx_64_byte_packets;
3709c79ddaaSMintz, Yuval p_common->tx_65_to_127_byte_packets =
3719c79ddaaSMintz, Yuval stats.common.tx_65_to_127_byte_packets;
3729c79ddaaSMintz, Yuval p_common->tx_128_to_255_byte_packets =
3739c79ddaaSMintz, Yuval stats.common.tx_128_to_255_byte_packets;
3749c79ddaaSMintz, Yuval p_common->tx_256_to_511_byte_packets =
3759c79ddaaSMintz, Yuval stats.common.tx_256_to_511_byte_packets;
3769c79ddaaSMintz, Yuval p_common->tx_512_to_1023_byte_packets =
3779c79ddaaSMintz, Yuval stats.common.tx_512_to_1023_byte_packets;
3789c79ddaaSMintz, Yuval p_common->tx_1024_to_1518_byte_packets =
3799c79ddaaSMintz, Yuval stats.common.tx_1024_to_1518_byte_packets;
3809c79ddaaSMintz, Yuval p_common->tx_pause_frames = stats.common.tx_pause_frames;
3819c79ddaaSMintz, Yuval p_common->tx_pfc_frames = stats.common.tx_pfc_frames;
3829c79ddaaSMintz, Yuval p_common->brb_truncates = stats.common.brb_truncates;
3839c79ddaaSMintz, Yuval p_common->brb_discards = stats.common.brb_discards;
3849c79ddaaSMintz, Yuval p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames;
38532d26a68SSudarsana Reddy Kalluru p_common->link_change_count = stats.common.link_change_count;
3869adebac3SSudarsana Reddy Kalluru p_common->ptp_skip_txts = edev->ptp_skip_txts;
3879c79ddaaSMintz, Yuval
3889c79ddaaSMintz, Yuval if (QEDE_IS_BB(edev)) {
3899c79ddaaSMintz, Yuval struct qede_stats_bb *p_bb = &edev->stats.bb;
3909c79ddaaSMintz, Yuval
3919c79ddaaSMintz, Yuval p_bb->rx_1519_to_1522_byte_packets =
3929c79ddaaSMintz, Yuval stats.bb.rx_1519_to_1522_byte_packets;
3939c79ddaaSMintz, Yuval p_bb->rx_1519_to_2047_byte_packets =
3949c79ddaaSMintz, Yuval stats.bb.rx_1519_to_2047_byte_packets;
3959c79ddaaSMintz, Yuval p_bb->rx_2048_to_4095_byte_packets =
3969c79ddaaSMintz, Yuval stats.bb.rx_2048_to_4095_byte_packets;
3979c79ddaaSMintz, Yuval p_bb->rx_4096_to_9216_byte_packets =
3989c79ddaaSMintz, Yuval stats.bb.rx_4096_to_9216_byte_packets;
3999c79ddaaSMintz, Yuval p_bb->rx_9217_to_16383_byte_packets =
4009c79ddaaSMintz, Yuval stats.bb.rx_9217_to_16383_byte_packets;
4019c79ddaaSMintz, Yuval p_bb->tx_1519_to_2047_byte_packets =
4029c79ddaaSMintz, Yuval stats.bb.tx_1519_to_2047_byte_packets;
4039c79ddaaSMintz, Yuval p_bb->tx_2048_to_4095_byte_packets =
4049c79ddaaSMintz, Yuval stats.bb.tx_2048_to_4095_byte_packets;
4059c79ddaaSMintz, Yuval p_bb->tx_4096_to_9216_byte_packets =
4069c79ddaaSMintz, Yuval stats.bb.tx_4096_to_9216_byte_packets;
4079c79ddaaSMintz, Yuval p_bb->tx_9217_to_16383_byte_packets =
4089c79ddaaSMintz, Yuval stats.bb.tx_9217_to_16383_byte_packets;
4099c79ddaaSMintz, Yuval p_bb->tx_lpi_entry_count = stats.bb.tx_lpi_entry_count;
4109c79ddaaSMintz, Yuval p_bb->tx_total_collisions = stats.bb.tx_total_collisions;
4119c79ddaaSMintz, Yuval } else {
4129c79ddaaSMintz, Yuval struct qede_stats_ah *p_ah = &edev->stats.ah;
4139c79ddaaSMintz, Yuval
4149c79ddaaSMintz, Yuval p_ah->rx_1519_to_max_byte_packets =
4159c79ddaaSMintz, Yuval stats.ah.rx_1519_to_max_byte_packets;
4169c79ddaaSMintz, Yuval p_ah->tx_1519_to_max_byte_packets =
4179c79ddaaSMintz, Yuval stats.ah.tx_1519_to_max_byte_packets;
4189c79ddaaSMintz, Yuval }
41942510dffSManish Chopra
42042510dffSManish Chopra spin_unlock(&edev->stats_lock);
421133fac0eSSudarsana Kalluru }
422133fac0eSSudarsana Kalluru
qede_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)423bc1f4470Sstephen hemminger static void qede_get_stats64(struct net_device *dev,
424133fac0eSSudarsana Kalluru struct rtnl_link_stats64 *stats)
425133fac0eSSudarsana Kalluru {
426133fac0eSSudarsana Kalluru struct qede_dev *edev = netdev_priv(dev);
4279c79ddaaSMintz, Yuval struct qede_stats_common *p_common;
428133fac0eSSudarsana Kalluru
4299c79ddaaSMintz, Yuval p_common = &edev->stats.common;
430133fac0eSSudarsana Kalluru
43142510dffSManish Chopra spin_lock(&edev->stats_lock);
43242510dffSManish Chopra
4339c79ddaaSMintz, Yuval stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
4349c79ddaaSMintz, Yuval p_common->rx_bcast_pkts;
4359c79ddaaSMintz, Yuval stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
4369c79ddaaSMintz, Yuval p_common->tx_bcast_pkts;
437133fac0eSSudarsana Kalluru
4389c79ddaaSMintz, Yuval stats->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
4399c79ddaaSMintz, Yuval p_common->rx_bcast_bytes;
4409c79ddaaSMintz, Yuval stats->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
4419c79ddaaSMintz, Yuval p_common->tx_bcast_bytes;
442133fac0eSSudarsana Kalluru
4439c79ddaaSMintz, Yuval stats->tx_errors = p_common->tx_err_drop_pkts;
4449c79ddaaSMintz, Yuval stats->multicast = p_common->rx_mcast_pkts + p_common->rx_bcast_pkts;
445133fac0eSSudarsana Kalluru
4469c79ddaaSMintz, Yuval stats->rx_fifo_errors = p_common->no_buff_discards;
447133fac0eSSudarsana Kalluru
4489c79ddaaSMintz, Yuval if (QEDE_IS_BB(edev))
4499c79ddaaSMintz, Yuval stats->collisions = edev->stats.bb.tx_total_collisions;
4509c79ddaaSMintz, Yuval stats->rx_crc_errors = p_common->rx_crc_errors;
4519c79ddaaSMintz, Yuval stats->rx_frame_errors = p_common->rx_align_errors;
45242510dffSManish Chopra
45342510dffSManish Chopra spin_unlock(&edev->stats_lock);
454133fac0eSSudarsana Kalluru }
455133fac0eSSudarsana Kalluru
456733def6aSYuval Mintz #ifdef CONFIG_QED_SRIOV
qede_get_vf_config(struct net_device * dev,int vfidx,struct ifla_vf_info * ivi)45773390ac9SYuval Mintz static int qede_get_vf_config(struct net_device *dev, int vfidx,
45873390ac9SYuval Mintz struct ifla_vf_info *ivi)
45973390ac9SYuval Mintz {
46073390ac9SYuval Mintz struct qede_dev *edev = netdev_priv(dev);
46173390ac9SYuval Mintz
46273390ac9SYuval Mintz if (!edev->ops)
46373390ac9SYuval Mintz return -EINVAL;
46473390ac9SYuval Mintz
46573390ac9SYuval Mintz return edev->ops->iov->get_config(edev->cdev, vfidx, ivi);
46673390ac9SYuval Mintz }
46773390ac9SYuval Mintz
qede_set_vf_rate(struct net_device * dev,int vfidx,int min_tx_rate,int max_tx_rate)468733def6aSYuval Mintz static int qede_set_vf_rate(struct net_device *dev, int vfidx,
469733def6aSYuval Mintz int min_tx_rate, int max_tx_rate)
470733def6aSYuval Mintz {
471733def6aSYuval Mintz struct qede_dev *edev = netdev_priv(dev);
472733def6aSYuval Mintz
473be7b6d64SYuval Mintz return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate,
474733def6aSYuval Mintz max_tx_rate);
475733def6aSYuval Mintz }
476733def6aSYuval Mintz
qede_set_vf_spoofchk(struct net_device * dev,int vfidx,bool val)4776ddc7608SYuval Mintz static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val)
4786ddc7608SYuval Mintz {
4796ddc7608SYuval Mintz struct qede_dev *edev = netdev_priv(dev);
4806ddc7608SYuval Mintz
4816ddc7608SYuval Mintz if (!edev->ops)
4826ddc7608SYuval Mintz return -EINVAL;
4836ddc7608SYuval Mintz
4846ddc7608SYuval Mintz return edev->ops->iov->set_spoof(edev->cdev, vfidx, val);
4856ddc7608SYuval Mintz }
4866ddc7608SYuval Mintz
qede_set_vf_link_state(struct net_device * dev,int vfidx,int link_state)487733def6aSYuval Mintz static int qede_set_vf_link_state(struct net_device *dev, int vfidx,
488733def6aSYuval Mintz int link_state)
489733def6aSYuval Mintz {
490733def6aSYuval Mintz struct qede_dev *edev = netdev_priv(dev);
491733def6aSYuval Mintz
492733def6aSYuval Mintz if (!edev->ops)
493733def6aSYuval Mintz return -EINVAL;
494733def6aSYuval Mintz
495733def6aSYuval Mintz return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state);
496733def6aSYuval Mintz }
497f990c82cSMintz, Yuval
qede_set_vf_trust(struct net_device * dev,int vfidx,bool setting)498f990c82cSMintz, Yuval static int qede_set_vf_trust(struct net_device *dev, int vfidx, bool setting)
499f990c82cSMintz, Yuval {
500f990c82cSMintz, Yuval struct qede_dev *edev = netdev_priv(dev);
501f990c82cSMintz, Yuval
502f990c82cSMintz, Yuval if (!edev->ops)
503f990c82cSMintz, Yuval return -EINVAL;
504f990c82cSMintz, Yuval
505f990c82cSMintz, Yuval return edev->ops->iov->set_trust(edev->cdev, vfidx, setting);
506f990c82cSMintz, Yuval }
507733def6aSYuval Mintz #endif
508733def6aSYuval Mintz
qede_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)5094c55215cSSudarsana Reddy Kalluru static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5104c55215cSSudarsana Reddy Kalluru {
5114c55215cSSudarsana Reddy Kalluru struct qede_dev *edev = netdev_priv(dev);
5124c55215cSSudarsana Reddy Kalluru
5134c55215cSSudarsana Reddy Kalluru if (!netif_running(dev))
5144c55215cSSudarsana Reddy Kalluru return -EAGAIN;
5154c55215cSSudarsana Reddy Kalluru
5164c55215cSSudarsana Reddy Kalluru switch (cmd) {
5174c55215cSSudarsana Reddy Kalluru case SIOCSHWTSTAMP:
5184c55215cSSudarsana Reddy Kalluru return qede_ptp_hw_ts(edev, ifr);
5194c55215cSSudarsana Reddy Kalluru default:
5204c55215cSSudarsana Reddy Kalluru DP_VERBOSE(edev, QED_MSG_DEBUG,
5214c55215cSSudarsana Reddy Kalluru "default IOCTL cmd 0x%x\n", cmd);
5224c55215cSSudarsana Reddy Kalluru return -EOPNOTSUPP;
5234c55215cSSudarsana Reddy Kalluru }
5244c55215cSSudarsana Reddy Kalluru
5254c55215cSSudarsana Reddy Kalluru return 0;
5264c55215cSSudarsana Reddy Kalluru }
5274c55215cSSudarsana Reddy Kalluru
qede_fp_sb_dump(struct qede_dev * edev,struct qede_fastpath * fp)5280cc3a801SManish Chopra static void qede_fp_sb_dump(struct qede_dev *edev, struct qede_fastpath *fp)
5295144e9f4SDenis Bolotin {
5300cc3a801SManish Chopra char *p_sb = (char *)fp->sb_info->sb_virt;
5310cc3a801SManish Chopra u32 sb_size, i;
5320cc3a801SManish Chopra
5330cc3a801SManish Chopra sb_size = sizeof(struct status_block);
5340cc3a801SManish Chopra
5350cc3a801SManish Chopra for (i = 0; i < sb_size; i += 8)
5365144e9f4SDenis Bolotin DP_NOTICE(edev,
5370cc3a801SManish Chopra "%02hhX %02hhX %02hhX %02hhX %02hhX %02hhX %02hhX %02hhX\n",
5380cc3a801SManish Chopra p_sb[i], p_sb[i + 1], p_sb[i + 2], p_sb[i + 3],
5390cc3a801SManish Chopra p_sb[i + 4], p_sb[i + 5], p_sb[i + 6], p_sb[i + 7]);
5400cc3a801SManish Chopra }
5410cc3a801SManish Chopra
5420cc3a801SManish Chopra static void
qede_txq_fp_log_metadata(struct qede_dev * edev,struct qede_fastpath * fp,struct qede_tx_queue * txq)5430cc3a801SManish Chopra qede_txq_fp_log_metadata(struct qede_dev *edev,
5440cc3a801SManish Chopra struct qede_fastpath *fp, struct qede_tx_queue *txq)
5450cc3a801SManish Chopra {
5460cc3a801SManish Chopra struct qed_chain *p_chain = &txq->tx_pbl;
5470cc3a801SManish Chopra
5480cc3a801SManish Chopra /* Dump txq/fp/sb ids etc. other metadata */
5490cc3a801SManish Chopra DP_NOTICE(edev,
5500cc3a801SManish Chopra "fpid 0x%x sbid 0x%x txqid [0x%x] ndev_qid [0x%x] cos [0x%x] p_chain %p cap %d size %d jiffies %lu HZ 0x%x\n",
5510cc3a801SManish Chopra fp->id, fp->sb_info->igu_sb_id, txq->index, txq->ndev_txq_id, txq->cos,
5520cc3a801SManish Chopra p_chain, p_chain->capacity, p_chain->size, jiffies, HZ);
5530cc3a801SManish Chopra
5540cc3a801SManish Chopra /* Dump all the relevant prod/cons indexes */
5550cc3a801SManish Chopra DP_NOTICE(edev,
5560cc3a801SManish Chopra "hw cons %04x sw_tx_prod=0x%x, sw_tx_cons=0x%x, bd_prod 0x%x bd_cons 0x%x\n",
5570cc3a801SManish Chopra le16_to_cpu(*txq->hw_cons_ptr), txq->sw_tx_prod, txq->sw_tx_cons,
5580cc3a801SManish Chopra qed_chain_get_prod_idx(p_chain), qed_chain_get_cons_idx(p_chain));
5590cc3a801SManish Chopra }
5600cc3a801SManish Chopra
5610cc3a801SManish Chopra static void
qede_tx_log_print(struct qede_dev * edev,struct qede_fastpath * fp,struct qede_tx_queue * txq)5620cc3a801SManish Chopra qede_tx_log_print(struct qede_dev *edev, struct qede_fastpath *fp, struct qede_tx_queue *txq)
5630cc3a801SManish Chopra {
5640cc3a801SManish Chopra struct qed_sb_info_dbg sb_dbg;
5650cc3a801SManish Chopra int rc;
5660cc3a801SManish Chopra
5670cc3a801SManish Chopra /* sb info */
5680cc3a801SManish Chopra qede_fp_sb_dump(edev, fp);
5690cc3a801SManish Chopra
5700cc3a801SManish Chopra memset(&sb_dbg, 0, sizeof(sb_dbg));
5710cc3a801SManish Chopra rc = edev->ops->common->get_sb_info(edev->cdev, fp->sb_info, (u16)fp->id, &sb_dbg);
5720cc3a801SManish Chopra
5730cc3a801SManish Chopra DP_NOTICE(edev, "IGU: prod %08x cons %08x CAU Tx %04x\n",
5740cc3a801SManish Chopra sb_dbg.igu_prod, sb_dbg.igu_cons, sb_dbg.pi[TX_PI(txq->cos)]);
5750cc3a801SManish Chopra
5760cc3a801SManish Chopra /* report to mfw */
5770cc3a801SManish Chopra edev->ops->common->mfw_report(edev->cdev,
5785144e9f4SDenis Bolotin "Txq[%d]: FW cons [host] %04x, SW cons %04x, SW prod %04x [Jiffies %lu]\n",
5795144e9f4SDenis Bolotin txq->index, le16_to_cpu(*txq->hw_cons_ptr),
5805144e9f4SDenis Bolotin qed_chain_get_cons_idx(&txq->tx_pbl),
5810cc3a801SManish Chopra qed_chain_get_prod_idx(&txq->tx_pbl), jiffies);
5820cc3a801SManish Chopra if (!rc)
5830cc3a801SManish Chopra edev->ops->common->mfw_report(edev->cdev,
5840cc3a801SManish Chopra "Txq[%d]: SB[0x%04x] - IGU: prod %08x cons %08x CAU Tx %04x\n",
5850cc3a801SManish Chopra txq->index, fp->sb_info->igu_sb_id,
5860cc3a801SManish Chopra sb_dbg.igu_prod, sb_dbg.igu_cons,
5870cc3a801SManish Chopra sb_dbg.pi[TX_PI(txq->cos)]);
5885144e9f4SDenis Bolotin }
5895144e9f4SDenis Bolotin
qede_tx_timeout(struct net_device * dev,unsigned int txqueue)5905144e9f4SDenis Bolotin static void qede_tx_timeout(struct net_device *dev, unsigned int txqueue)
5915144e9f4SDenis Bolotin {
5925144e9f4SDenis Bolotin struct qede_dev *edev = netdev_priv(dev);
5930cc3a801SManish Chopra int i;
5945144e9f4SDenis Bolotin
5955144e9f4SDenis Bolotin netif_carrier_off(dev);
5965144e9f4SDenis Bolotin DP_NOTICE(edev, "TX timeout on queue %u!\n", txqueue);
5975144e9f4SDenis Bolotin
5980cc3a801SManish Chopra for_each_queue(i) {
5990cc3a801SManish Chopra struct qede_tx_queue *txq;
6000cc3a801SManish Chopra struct qede_fastpath *fp;
6010cc3a801SManish Chopra int cos;
6020cc3a801SManish Chopra
6030cc3a801SManish Chopra fp = &edev->fp_array[i];
6040cc3a801SManish Chopra if (!(fp->type & QEDE_FASTPATH_TX))
6050cc3a801SManish Chopra continue;
6065144e9f4SDenis Bolotin
6075144e9f4SDenis Bolotin for_each_cos_in_txq(edev, cos) {
6080cc3a801SManish Chopra txq = &fp->txq[cos];
6090cc3a801SManish Chopra
6100cc3a801SManish Chopra /* Dump basic metadata for all queues */
6110cc3a801SManish Chopra qede_txq_fp_log_metadata(edev, fp, txq);
6125144e9f4SDenis Bolotin
6135144e9f4SDenis Bolotin if (qed_chain_get_cons_idx(&txq->tx_pbl) !=
6145144e9f4SDenis Bolotin qed_chain_get_prod_idx(&txq->tx_pbl))
6150cc3a801SManish Chopra qede_tx_log_print(edev, fp, txq);
6160cc3a801SManish Chopra }
6175144e9f4SDenis Bolotin }
6185144e9f4SDenis Bolotin
6195144e9f4SDenis Bolotin if (IS_VF(edev))
6205144e9f4SDenis Bolotin return;
6215144e9f4SDenis Bolotin
6225144e9f4SDenis Bolotin if (test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) ||
6235144e9f4SDenis Bolotin edev->state == QEDE_STATE_RECOVERY) {
6245144e9f4SDenis Bolotin DP_INFO(edev,
6255144e9f4SDenis Bolotin "Avoid handling a Tx timeout while another HW error is being handled\n");
6265144e9f4SDenis Bolotin return;
6275144e9f4SDenis Bolotin }
6285144e9f4SDenis Bolotin
6295144e9f4SDenis Bolotin set_bit(QEDE_ERR_GET_DBG_INFO, &edev->err_flags);
6305144e9f4SDenis Bolotin set_bit(QEDE_SP_HW_ERR, &edev->sp_flags);
6315144e9f4SDenis Bolotin schedule_delayed_work(&edev->sp_task, 0);
6325144e9f4SDenis Bolotin }
6335144e9f4SDenis Bolotin
qede_setup_tc(struct net_device * ndev,u8 num_tc)6341e7953bcSkbuild test robot static int qede_setup_tc(struct net_device *ndev, u8 num_tc)
6355e7baf0fSManish Chopra {
6365e7baf0fSManish Chopra struct qede_dev *edev = netdev_priv(ndev);
6375e7baf0fSManish Chopra int cos, count, offset;
6385e7baf0fSManish Chopra
6395e7baf0fSManish Chopra if (num_tc > edev->dev_info.num_tc)
6405e7baf0fSManish Chopra return -EINVAL;
6415e7baf0fSManish Chopra
6425e7baf0fSManish Chopra netdev_reset_tc(ndev);
6435e7baf0fSManish Chopra netdev_set_num_tc(ndev, num_tc);
6445e7baf0fSManish Chopra
6455e7baf0fSManish Chopra for_each_cos_in_txq(edev, cos) {
6465e7baf0fSManish Chopra count = QEDE_TSS_COUNT(edev);
6475e7baf0fSManish Chopra offset = cos * QEDE_TSS_COUNT(edev);
6485e7baf0fSManish Chopra netdev_set_tc_queue(ndev, cos, count, offset);
6495e7baf0fSManish Chopra }
6505e7baf0fSManish Chopra
6515e7baf0fSManish Chopra return 0;
6525e7baf0fSManish Chopra }
6535e7baf0fSManish Chopra
6545e7baf0fSManish Chopra static int
qede_set_flower(struct qede_dev * edev,struct flow_cls_offload * f,__be16 proto)655f9e30088SPablo Neira Ayuso qede_set_flower(struct qede_dev *edev, struct flow_cls_offload *f,
6562ce9c93eSManish Chopra __be16 proto)
6572ce9c93eSManish Chopra {
6582ce9c93eSManish Chopra switch (f->command) {
659f9e30088SPablo Neira Ayuso case FLOW_CLS_REPLACE:
6602ce9c93eSManish Chopra return qede_add_tc_flower_fltr(edev, proto, f);
661f9e30088SPablo Neira Ayuso case FLOW_CLS_DESTROY:
6622ce9c93eSManish Chopra return qede_delete_flow_filter(edev, f->cookie);
6632ce9c93eSManish Chopra default:
6642ce9c93eSManish Chopra return -EOPNOTSUPP;
6652ce9c93eSManish Chopra }
6662ce9c93eSManish Chopra }
6672ce9c93eSManish Chopra
qede_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)6682ce9c93eSManish Chopra static int qede_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6692ce9c93eSManish Chopra void *cb_priv)
6702ce9c93eSManish Chopra {
671f9e30088SPablo Neira Ayuso struct flow_cls_offload *f;
6722ce9c93eSManish Chopra struct qede_dev *edev = cb_priv;
6732ce9c93eSManish Chopra
6742ce9c93eSManish Chopra if (!tc_cls_can_offload_and_chain0(edev->ndev, type_data))
6752ce9c93eSManish Chopra return -EOPNOTSUPP;
6762ce9c93eSManish Chopra
6772ce9c93eSManish Chopra switch (type) {
6782ce9c93eSManish Chopra case TC_SETUP_CLSFLOWER:
6792ce9c93eSManish Chopra f = type_data;
6802ce9c93eSManish Chopra return qede_set_flower(edev, f, f->common.protocol);
6812ce9c93eSManish Chopra default:
6822ce9c93eSManish Chopra return -EOPNOTSUPP;
6832ce9c93eSManish Chopra }
6842ce9c93eSManish Chopra }
6852ce9c93eSManish Chopra
686955bcb6eSPablo Neira Ayuso static LIST_HEAD(qede_block_cb_list);
687955bcb6eSPablo Neira Ayuso
6882ce9c93eSManish Chopra static int
qede_setup_tc_offload(struct net_device * dev,enum tc_setup_type type,void * type_data)6895e7baf0fSManish Chopra qede_setup_tc_offload(struct net_device *dev, enum tc_setup_type type,
6905e7baf0fSManish Chopra void *type_data)
6915e7baf0fSManish Chopra {
6922ce9c93eSManish Chopra struct qede_dev *edev = netdev_priv(dev);
6935e7baf0fSManish Chopra struct tc_mqprio_qopt *mqprio;
6945e7baf0fSManish Chopra
6955e7baf0fSManish Chopra switch (type) {
6962ce9c93eSManish Chopra case TC_SETUP_BLOCK:
697955bcb6eSPablo Neira Ayuso return flow_block_cb_setup_simple(type_data,
698955bcb6eSPablo Neira Ayuso &qede_block_cb_list,
6994e95bc26SPablo Neira Ayuso qede_setup_tc_block_cb,
7004e95bc26SPablo Neira Ayuso edev, edev, true);
7015e7baf0fSManish Chopra case TC_SETUP_QDISC_MQPRIO:
7025e7baf0fSManish Chopra mqprio = type_data;
7035e7baf0fSManish Chopra
7045e7baf0fSManish Chopra mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
7055e7baf0fSManish Chopra return qede_setup_tc(dev, mqprio->num_tc);
7065e7baf0fSManish Chopra default:
7075e7baf0fSManish Chopra return -EOPNOTSUPP;
7085e7baf0fSManish Chopra }
7095e7baf0fSManish Chopra }
7105e7baf0fSManish Chopra
7112950219dSYuval Mintz static const struct net_device_ops qede_netdev_ops = {
7122950219dSYuval Mintz .ndo_open = qede_open,
7132950219dSYuval Mintz .ndo_stop = qede_close,
7142950219dSYuval Mintz .ndo_start_xmit = qede_start_xmit,
7150aa4febbSSudarsana Reddy Kalluru .ndo_select_queue = qede_select_queue,
7160d8e0aa0SSudarsana Kalluru .ndo_set_rx_mode = qede_set_rx_mode,
7170d8e0aa0SSudarsana Kalluru .ndo_set_mac_address = qede_set_mac_addr,
7182950219dSYuval Mintz .ndo_validate_addr = eth_validate_addr,
719133fac0eSSudarsana Kalluru .ndo_change_mtu = qede_change_mtu,
720a7605370SArnd Bergmann .ndo_eth_ioctl = qede_ioctl,
7215144e9f4SDenis Bolotin .ndo_tx_timeout = qede_tx_timeout,
72208feecd7SYuval Mintz #ifdef CONFIG_QED_SRIOV
723eff16960SYuval Mintz .ndo_set_vf_mac = qede_set_vf_mac,
72408feecd7SYuval Mintz .ndo_set_vf_vlan = qede_set_vf_vlan,
725f990c82cSMintz, Yuval .ndo_set_vf_trust = qede_set_vf_trust,
72608feecd7SYuval Mintz #endif
7277c1bfcadSSudarsana Reddy Kalluru .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
7287c1bfcadSSudarsana Reddy Kalluru .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
72918c602deSMichael Chan .ndo_fix_features = qede_fix_features,
730ce2b885cSYuval Mintz .ndo_set_features = qede_set_features,
731133fac0eSSudarsana Kalluru .ndo_get_stats64 = qede_get_stats64,
732733def6aSYuval Mintz #ifdef CONFIG_QED_SRIOV
733733def6aSYuval Mintz .ndo_set_vf_link_state = qede_set_vf_link_state,
7346ddc7608SYuval Mintz .ndo_set_vf_spoofchk = qede_set_vf_spoofchk,
73573390ac9SYuval Mintz .ndo_get_vf_config = qede_get_vf_config,
736733def6aSYuval Mintz .ndo_set_vf_rate = qede_set_vf_rate,
737733def6aSYuval Mintz #endif
73825695853SManish Chopra .ndo_features_check = qede_features_check,
739f4e63525SJakub Kicinski .ndo_bpf = qede_xdp,
740e4917d46SChopra, Manish #ifdef CONFIG_RFS_ACCEL
741e4917d46SChopra, Manish .ndo_rx_flow_steer = qede_rx_flow_steer,
742e4917d46SChopra, Manish #endif
743d1b25b79SAlexander Lobakin .ndo_xdp_xmit = qede_xdp_transmit,
7445e7baf0fSManish Chopra .ndo_setup_tc = qede_setup_tc_offload,
7452950219dSYuval Mintz };
7462950219dSYuval Mintz
747be47c555SMintz, Yuval static const struct net_device_ops qede_netdev_vf_ops = {
748be47c555SMintz, Yuval .ndo_open = qede_open,
749be47c555SMintz, Yuval .ndo_stop = qede_close,
750be47c555SMintz, Yuval .ndo_start_xmit = qede_start_xmit,
7510aa4febbSSudarsana Reddy Kalluru .ndo_select_queue = qede_select_queue,
752be47c555SMintz, Yuval .ndo_set_rx_mode = qede_set_rx_mode,
753be47c555SMintz, Yuval .ndo_set_mac_address = qede_set_mac_addr,
754be47c555SMintz, Yuval .ndo_validate_addr = eth_validate_addr,
755be47c555SMintz, Yuval .ndo_change_mtu = qede_change_mtu,
756be47c555SMintz, Yuval .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
757be47c555SMintz, Yuval .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
75818c602deSMichael Chan .ndo_fix_features = qede_fix_features,
759be47c555SMintz, Yuval .ndo_set_features = qede_set_features,
760be47c555SMintz, Yuval .ndo_get_stats64 = qede_get_stats64,
761be47c555SMintz, Yuval .ndo_features_check = qede_features_check,
762be47c555SMintz, Yuval };
763be47c555SMintz, Yuval
764e7b80decSMintz, Yuval static const struct net_device_ops qede_netdev_vf_xdp_ops = {
765e7b80decSMintz, Yuval .ndo_open = qede_open,
766e7b80decSMintz, Yuval .ndo_stop = qede_close,
767e7b80decSMintz, Yuval .ndo_start_xmit = qede_start_xmit,
7680aa4febbSSudarsana Reddy Kalluru .ndo_select_queue = qede_select_queue,
769e7b80decSMintz, Yuval .ndo_set_rx_mode = qede_set_rx_mode,
770e7b80decSMintz, Yuval .ndo_set_mac_address = qede_set_mac_addr,
771e7b80decSMintz, Yuval .ndo_validate_addr = eth_validate_addr,
772e7b80decSMintz, Yuval .ndo_change_mtu = qede_change_mtu,
773e7b80decSMintz, Yuval .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
774e7b80decSMintz, Yuval .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
77518c602deSMichael Chan .ndo_fix_features = qede_fix_features,
776e7b80decSMintz, Yuval .ndo_set_features = qede_set_features,
777e7b80decSMintz, Yuval .ndo_get_stats64 = qede_get_stats64,
778e7b80decSMintz, Yuval .ndo_features_check = qede_features_check,
779f4e63525SJakub Kicinski .ndo_bpf = qede_xdp,
780d1b25b79SAlexander Lobakin .ndo_xdp_xmit = qede_xdp_transmit,
781e7b80decSMintz, Yuval };
782e7b80decSMintz, Yuval
7832950219dSYuval Mintz /* -------------------------------------------------------------------------
784e712d52bSYuval Mintz * START OF PROBE / REMOVE
785e712d52bSYuval Mintz * -------------------------------------------------------------------------
786e712d52bSYuval Mintz */
787e712d52bSYuval Mintz
qede_alloc_etherdev(struct qed_dev * cdev,struct pci_dev * pdev,struct qed_dev_eth_info * info,u32 dp_module,u8 dp_level)788e712d52bSYuval Mintz static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
789e712d52bSYuval Mintz struct pci_dev *pdev,
790e712d52bSYuval Mintz struct qed_dev_eth_info *info,
7911a635e48SYuval Mintz u32 dp_module, u8 dp_level)
792e712d52bSYuval Mintz {
793e712d52bSYuval Mintz struct net_device *ndev;
794e712d52bSYuval Mintz struct qede_dev *edev;
795e712d52bSYuval Mintz
796e712d52bSYuval Mintz ndev = alloc_etherdev_mqs(sizeof(*edev),
7975e7baf0fSManish Chopra info->num_queues * info->num_tc,
7985e7baf0fSManish Chopra info->num_queues);
799e712d52bSYuval Mintz if (!ndev) {
800e712d52bSYuval Mintz pr_err("etherdev allocation failed\n");
801e712d52bSYuval Mintz return NULL;
802e712d52bSYuval Mintz }
803e712d52bSYuval Mintz
804e712d52bSYuval Mintz edev = netdev_priv(ndev);
805e712d52bSYuval Mintz edev->ndev = ndev;
806e712d52bSYuval Mintz edev->cdev = cdev;
807e712d52bSYuval Mintz edev->pdev = pdev;
808e712d52bSYuval Mintz edev->dp_module = dp_module;
809e712d52bSYuval Mintz edev->dp_level = dp_level;
810e712d52bSYuval Mintz edev->ops = qed_ops;
81173e03097SBhupesh Sharma
81273e03097SBhupesh Sharma if (is_kdump_kernel()) {
81373e03097SBhupesh Sharma edev->q_num_rx_buffers = NUM_RX_BDS_KDUMP_MIN;
81473e03097SBhupesh Sharma edev->q_num_tx_buffers = NUM_TX_BDS_KDUMP_MIN;
81573e03097SBhupesh Sharma } else {
8162950219dSYuval Mintz edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
8172950219dSYuval Mintz edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
81873e03097SBhupesh Sharma }
819e712d52bSYuval Mintz
820525ef5c0SYuval Mintz DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n",
821525ef5c0SYuval Mintz info->num_queues, info->num_queues);
822525ef5c0SYuval Mintz
823e712d52bSYuval Mintz SET_NETDEV_DEV(ndev, &pdev->dev);
824e712d52bSYuval Mintz
825133fac0eSSudarsana Kalluru memset(&edev->stats, 0, sizeof(edev->stats));
826e712d52bSYuval Mintz memcpy(&edev->dev_info, info, sizeof(*info));
827e712d52bSYuval Mintz
828ba798b5bSMintz, Yuval /* As ethtool doesn't have the ability to show WoL behavior as
829ba798b5bSMintz, Yuval * 'default', if device supports it declare it's enabled.
830ba798b5bSMintz, Yuval */
831ba798b5bSMintz, Yuval if (edev->dev_info.common.wol_support)
832ba798b5bSMintz, Yuval edev->wol_enabled = true;
833ba798b5bSMintz, Yuval
8347c1bfcadSSudarsana Reddy Kalluru INIT_LIST_HEAD(&edev->vlan_list);
8357c1bfcadSSudarsana Reddy Kalluru
836e712d52bSYuval Mintz return edev;
837e712d52bSYuval Mintz }
838e712d52bSYuval Mintz
qede_init_ndev(struct qede_dev * edev)839e712d52bSYuval Mintz static void qede_init_ndev(struct qede_dev *edev)
840e712d52bSYuval Mintz {
841e712d52bSYuval Mintz struct net_device *ndev = edev->ndev;
842e712d52bSYuval Mintz struct pci_dev *pdev = edev->pdev;
84319489c7fSChopra, Manish bool udp_tunnel_enable = false;
844e4917d46SChopra, Manish netdev_features_t hw_features;
845e712d52bSYuval Mintz
846e712d52bSYuval Mintz pci_set_drvdata(pdev, ndev);
847e712d52bSYuval Mintz
848e712d52bSYuval Mintz ndev->mem_start = edev->dev_info.common.pci_mem_start;
849e712d52bSYuval Mintz ndev->base_addr = ndev->mem_start;
850e712d52bSYuval Mintz ndev->mem_end = edev->dev_info.common.pci_mem_end;
851e712d52bSYuval Mintz ndev->irq = edev->dev_info.common.pci_irq;
852e712d52bSYuval Mintz
853e712d52bSYuval Mintz ndev->watchdog_timeo = TX_TIMEOUT;
854e712d52bSYuval Mintz
855e7b80decSMintz, Yuval if (IS_VF(edev)) {
856e7b80decSMintz, Yuval if (edev->dev_info.xdp_supported)
857e7b80decSMintz, Yuval ndev->netdev_ops = &qede_netdev_vf_xdp_ops;
858be47c555SMintz, Yuval else
859e7b80decSMintz, Yuval ndev->netdev_ops = &qede_netdev_vf_ops;
860e7b80decSMintz, Yuval } else {
8612950219dSYuval Mintz ndev->netdev_ops = &qede_netdev_ops;
862e7b80decSMintz, Yuval }
8632950219dSYuval Mintz
864133fac0eSSudarsana Kalluru qede_set_ethtool_ops(ndev);
865133fac0eSSudarsana Kalluru
8660183eb1cSMintz, Yuval ndev->priv_flags |= IFF_UNICAST_FLT;
8677b7e70f9SYuval Mintz
868e712d52bSYuval Mintz /* user-changeble features */
86918c602deSMichael Chan hw_features = NETIF_F_GRO | NETIF_F_GRO_HW | NETIF_F_SG |
870e712d52bSYuval Mintz NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
8712ce9c93eSManish Chopra NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_TC;
872e712d52bSYuval Mintz
8730367f058SDmitry Bogdanov if (edev->dev_info.common.b_arfs_capable)
874e4917d46SChopra, Manish hw_features |= NETIF_F_NTUPLE;
875e4917d46SChopra, Manish
87619489c7fSChopra, Manish if (edev->dev_info.common.vxlan_enable ||
87719489c7fSChopra, Manish edev->dev_info.common.geneve_enable)
87819489c7fSChopra, Manish udp_tunnel_enable = true;
87919489c7fSChopra, Manish
88019489c7fSChopra, Manish if (udp_tunnel_enable || edev->dev_info.common.gre_enable) {
88119489c7fSChopra, Manish hw_features |= NETIF_F_TSO_ECN;
88214db81deSManish Chopra ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
88319489c7fSChopra, Manish NETIF_F_SG | NETIF_F_TSO |
88419489c7fSChopra, Manish NETIF_F_TSO_ECN | NETIF_F_TSO6 |
88519489c7fSChopra, Manish NETIF_F_RXCSUM;
88619489c7fSChopra, Manish }
88719489c7fSChopra, Manish
88819489c7fSChopra, Manish if (udp_tunnel_enable) {
88919489c7fSChopra, Manish hw_features |= (NETIF_F_GSO_UDP_TUNNEL |
89019489c7fSChopra, Manish NETIF_F_GSO_UDP_TUNNEL_CSUM);
89119489c7fSChopra, Manish ndev->hw_enc_features |= (NETIF_F_GSO_UDP_TUNNEL |
89219489c7fSChopra, Manish NETIF_F_GSO_UDP_TUNNEL_CSUM);
8938cd160a2SJakub Kicinski
8948cd160a2SJakub Kicinski qede_set_udp_tunnels(edev);
89519489c7fSChopra, Manish }
89619489c7fSChopra, Manish
89719489c7fSChopra, Manish if (edev->dev_info.common.gre_enable) {
89819489c7fSChopra, Manish hw_features |= (NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM);
89919489c7fSChopra, Manish ndev->hw_enc_features |= (NETIF_F_GSO_GRE |
90019489c7fSChopra, Manish NETIF_F_GSO_GRE_CSUM);
90119489c7fSChopra, Manish }
90214db81deSManish Chopra
903e712d52bSYuval Mintz ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
904e712d52bSYuval Mintz NETIF_F_HIGHDMA;
905e712d52bSYuval Mintz ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
906e712d52bSYuval Mintz NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA |
9077c1bfcadSSudarsana Reddy Kalluru NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX;
908e712d52bSYuval Mintz
909e712d52bSYuval Mintz ndev->hw_features = hw_features;
910e712d52bSYuval Mintz
91166c0e13aSMarek Majtyka ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
91266c0e13aSMarek Majtyka NETDEV_XDP_ACT_NDO_XMIT;
91366c0e13aSMarek Majtyka
914caff2a87SJarod Wilson /* MTU range: 46 - 9600 */
915caff2a87SJarod Wilson ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
916caff2a87SJarod Wilson ndev->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE;
917caff2a87SJarod Wilson
918e712d52bSYuval Mintz /* Set network device HW mac */
919f3956ebbSJakub Kicinski eth_hw_addr_set(edev->ndev, edev->dev_info.common.hw_mac);
9200fefbfbaSSudarsana Kalluru
9210fefbfbaSSudarsana Kalluru ndev->mtu = edev->dev_info.common.mtu;
922e712d52bSYuval Mintz }
923e712d52bSYuval Mintz
924e712d52bSYuval Mintz /* This function converts from 32b param to two params of level and module
925e712d52bSYuval Mintz * Input 32b decoding:
926e712d52bSYuval Mintz * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the
927e712d52bSYuval Mintz * 'happy' flow, e.g. memory allocation failed.
928e712d52bSYuval Mintz * b30 - enable all INFO prints. INFO prints are for major steps in the flow
929e712d52bSYuval Mintz * and provide important parameters.
930e712d52bSYuval Mintz * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that
931e712d52bSYuval Mintz * module. VERBOSE prints are for tracking the specific flow in low level.
932e712d52bSYuval Mintz *
933e712d52bSYuval Mintz * Notice that the level should be that of the lowest required logs.
934e712d52bSYuval Mintz */
qede_config_debug(uint debug,u32 * p_dp_module,u8 * p_dp_level)935133fac0eSSudarsana Kalluru void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level)
936e712d52bSYuval Mintz {
937e712d52bSYuval Mintz *p_dp_level = QED_LEVEL_NOTICE;
938e712d52bSYuval Mintz *p_dp_module = 0;
939e712d52bSYuval Mintz
940e712d52bSYuval Mintz if (debug & QED_LOG_VERBOSE_MASK) {
941e712d52bSYuval Mintz *p_dp_level = QED_LEVEL_VERBOSE;
942e712d52bSYuval Mintz *p_dp_module = (debug & 0x3FFFFFFF);
943e712d52bSYuval Mintz } else if (debug & QED_LOG_INFO_MASK) {
944e712d52bSYuval Mintz *p_dp_level = QED_LEVEL_INFO;
945e712d52bSYuval Mintz } else if (debug & QED_LOG_NOTICE_MASK) {
946e712d52bSYuval Mintz *p_dp_level = QED_LEVEL_NOTICE;
947e712d52bSYuval Mintz }
948e712d52bSYuval Mintz }
949e712d52bSYuval Mintz
qede_free_fp_array(struct qede_dev * edev)9502950219dSYuval Mintz static void qede_free_fp_array(struct qede_dev *edev)
9512950219dSYuval Mintz {
9522950219dSYuval Mintz if (edev->fp_array) {
9532950219dSYuval Mintz struct qede_fastpath *fp;
9542950219dSYuval Mintz int i;
9552950219dSYuval Mintz
9569a4d7e86SSudarsana Reddy Kalluru for_each_queue(i) {
9572950219dSYuval Mintz fp = &edev->fp_array[i];
9582950219dSYuval Mintz
9592950219dSYuval Mintz kfree(fp->sb_info);
960c0124f32SJesper Dangaard Brouer /* Handle mem alloc failure case where qede_init_fp
961c0124f32SJesper Dangaard Brouer * didn't register xdp_rxq_info yet.
962c0124f32SJesper Dangaard Brouer * Implicit only (fp->type & QEDE_FASTPATH_RX)
963c0124f32SJesper Dangaard Brouer */
964c0124f32SJesper Dangaard Brouer if (fp->rxq && xdp_rxq_info_is_reg(&fp->rxq->xdp_rxq))
965c0124f32SJesper Dangaard Brouer xdp_rxq_info_unreg(&fp->rxq->xdp_rxq);
9662950219dSYuval Mintz kfree(fp->rxq);
967cb6aeb07SMintz, Yuval kfree(fp->xdp_tx);
96880439a17SMintz, Yuval kfree(fp->txq);
9692950219dSYuval Mintz }
9702950219dSYuval Mintz kfree(edev->fp_array);
9712950219dSYuval Mintz }
9729a4d7e86SSudarsana Reddy Kalluru
9739a4d7e86SSudarsana Reddy Kalluru edev->num_queues = 0;
9749a4d7e86SSudarsana Reddy Kalluru edev->fp_num_tx = 0;
9759a4d7e86SSudarsana Reddy Kalluru edev->fp_num_rx = 0;
9762950219dSYuval Mintz }
9772950219dSYuval Mintz
qede_alloc_fp_array(struct qede_dev * edev)9782950219dSYuval Mintz static int qede_alloc_fp_array(struct qede_dev *edev)
9792950219dSYuval Mintz {
9809a4d7e86SSudarsana Reddy Kalluru u8 fp_combined, fp_rx = edev->fp_num_rx;
9812950219dSYuval Mintz struct qede_fastpath *fp;
9822950219dSYuval Mintz int i;
9832950219dSYuval Mintz
9849a4d7e86SSudarsana Reddy Kalluru edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev),
9852950219dSYuval Mintz sizeof(*edev->fp_array), GFP_KERNEL);
9862950219dSYuval Mintz if (!edev->fp_array) {
9872950219dSYuval Mintz DP_NOTICE(edev, "fp array allocation failed\n");
9882950219dSYuval Mintz goto err;
9892950219dSYuval Mintz }
9902950219dSYuval Mintz
991908d4bb7SManish Chopra if (!edev->coal_entry) {
992aaa3c08eSMichal Schmidt edev->coal_entry = kcalloc(QEDE_MAX_RSS_CNT(edev),
993aaa3c08eSMichal Schmidt sizeof(*edev->coal_entry),
994908d4bb7SManish Chopra GFP_KERNEL);
995aaa3c08eSMichal Schmidt if (!edev->coal_entry) {
996b0ec5489SBhaskar Upadhaya DP_ERR(edev, "coalesce entry allocation failed\n");
997b0ec5489SBhaskar Upadhaya goto err;
998b0ec5489SBhaskar Upadhaya }
999aaa3c08eSMichal Schmidt }
1000b0ec5489SBhaskar Upadhaya
10019a4d7e86SSudarsana Reddy Kalluru fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx;
10029a4d7e86SSudarsana Reddy Kalluru
10039a4d7e86SSudarsana Reddy Kalluru /* Allocate the FP elements for Rx queues followed by combined and then
10049a4d7e86SSudarsana Reddy Kalluru * the Tx. This ordering should be maintained so that the respective
10059a4d7e86SSudarsana Reddy Kalluru * queues (Rx or Tx) will be together in the fastpath array and the
10069a4d7e86SSudarsana Reddy Kalluru * associated ids will be sequential.
10079a4d7e86SSudarsana Reddy Kalluru */
10089a4d7e86SSudarsana Reddy Kalluru for_each_queue(i) {
10092950219dSYuval Mintz fp = &edev->fp_array[i];
10102950219dSYuval Mintz
101180439a17SMintz, Yuval fp->sb_info = kzalloc(sizeof(*fp->sb_info), GFP_KERNEL);
10122950219dSYuval Mintz if (!fp->sb_info) {
10132950219dSYuval Mintz DP_NOTICE(edev, "sb info struct allocation failed\n");
10142950219dSYuval Mintz goto err;
10152950219dSYuval Mintz }
10162950219dSYuval Mintz
10179a4d7e86SSudarsana Reddy Kalluru if (fp_rx) {
10189a4d7e86SSudarsana Reddy Kalluru fp->type = QEDE_FASTPATH_RX;
10199a4d7e86SSudarsana Reddy Kalluru fp_rx--;
10209a4d7e86SSudarsana Reddy Kalluru } else if (fp_combined) {
10219a4d7e86SSudarsana Reddy Kalluru fp->type = QEDE_FASTPATH_COMBINED;
10229a4d7e86SSudarsana Reddy Kalluru fp_combined--;
10239a4d7e86SSudarsana Reddy Kalluru } else {
10249a4d7e86SSudarsana Reddy Kalluru fp->type = QEDE_FASTPATH_TX;
10252950219dSYuval Mintz }
10262950219dSYuval Mintz
10279a4d7e86SSudarsana Reddy Kalluru if (fp->type & QEDE_FASTPATH_TX) {
10285e7baf0fSManish Chopra fp->txq = kcalloc(edev->dev_info.num_tc,
10295e7baf0fSManish Chopra sizeof(*fp->txq), GFP_KERNEL);
103080439a17SMintz, Yuval if (!fp->txq)
10312950219dSYuval Mintz goto err;
10322950219dSYuval Mintz }
10332950219dSYuval Mintz
10349a4d7e86SSudarsana Reddy Kalluru if (fp->type & QEDE_FASTPATH_RX) {
103580439a17SMintz, Yuval fp->rxq = kzalloc(sizeof(*fp->rxq), GFP_KERNEL);
103680439a17SMintz, Yuval if (!fp->rxq)
10379a4d7e86SSudarsana Reddy Kalluru goto err;
1038496e0517SMintz, Yuval
1039cb6aeb07SMintz, Yuval if (edev->xdp_prog) {
1040cb6aeb07SMintz, Yuval fp->xdp_tx = kzalloc(sizeof(*fp->xdp_tx),
1041cb6aeb07SMintz, Yuval GFP_KERNEL);
1042cb6aeb07SMintz, Yuval if (!fp->xdp_tx)
1043cb6aeb07SMintz, Yuval goto err;
1044496e0517SMintz, Yuval fp->type |= QEDE_FASTPATH_XDP;
10459a4d7e86SSudarsana Reddy Kalluru }
10469a4d7e86SSudarsana Reddy Kalluru }
1047cb6aeb07SMintz, Yuval }
10489a4d7e86SSudarsana Reddy Kalluru
10492950219dSYuval Mintz return 0;
10502950219dSYuval Mintz err:
10512950219dSYuval Mintz qede_free_fp_array(edev);
10522950219dSYuval Mintz return -ENOMEM;
10532950219dSYuval Mintz }
10542950219dSYuval Mintz
1055ccc67ef5STomer Tayar /* The qede lock is used to protect driver state change and driver flows that
1056ccc67ef5STomer Tayar * are not reentrant.
1057ccc67ef5STomer Tayar */
__qede_lock(struct qede_dev * edev)1058ccc67ef5STomer Tayar void __qede_lock(struct qede_dev *edev)
1059ccc67ef5STomer Tayar {
1060ccc67ef5STomer Tayar mutex_lock(&edev->qede_lock);
1061ccc67ef5STomer Tayar }
1062ccc67ef5STomer Tayar
__qede_unlock(struct qede_dev * edev)1063ccc67ef5STomer Tayar void __qede_unlock(struct qede_dev *edev)
1064ccc67ef5STomer Tayar {
1065ccc67ef5STomer Tayar mutex_unlock(&edev->qede_lock);
1066ccc67ef5STomer Tayar }
1067ccc67ef5STomer Tayar
1068ccc67ef5STomer Tayar /* This version of the lock should be used when acquiring the RTNL lock is also
1069ccc67ef5STomer Tayar * needed in addition to the internal qede lock.
1070ccc67ef5STomer Tayar */
qede_lock(struct qede_dev * edev)10715948d117SYueHaibing static void qede_lock(struct qede_dev *edev)
1072ccc67ef5STomer Tayar {
1073ccc67ef5STomer Tayar rtnl_lock();
1074ccc67ef5STomer Tayar __qede_lock(edev);
1075ccc67ef5STomer Tayar }
1076ccc67ef5STomer Tayar
qede_unlock(struct qede_dev * edev)10775948d117SYueHaibing static void qede_unlock(struct qede_dev *edev)
1078ccc67ef5STomer Tayar {
1079ccc67ef5STomer Tayar __qede_unlock(edev);
1080ccc67ef5STomer Tayar rtnl_unlock();
1081ccc67ef5STomer Tayar }
1082ccc67ef5STomer Tayar
qede_periodic_task(struct work_struct * work)108342510dffSManish Chopra static void qede_periodic_task(struct work_struct *work)
108442510dffSManish Chopra {
108542510dffSManish Chopra struct qede_dev *edev = container_of(work, struct qede_dev,
108642510dffSManish Chopra periodic_task.work);
108742510dffSManish Chopra
108842510dffSManish Chopra qede_fill_by_demand_stats(edev);
108942510dffSManish Chopra schedule_delayed_work(&edev->periodic_task, edev->stats_coal_ticks);
109042510dffSManish Chopra }
109142510dffSManish Chopra
qede_init_periodic_task(struct qede_dev * edev)109242510dffSManish Chopra static void qede_init_periodic_task(struct qede_dev *edev)
109342510dffSManish Chopra {
109442510dffSManish Chopra INIT_DELAYED_WORK(&edev->periodic_task, qede_periodic_task);
109542510dffSManish Chopra spin_lock_init(&edev->stats_lock);
109642510dffSManish Chopra edev->stats_coal_usecs = USEC_PER_SEC;
109742510dffSManish Chopra edev->stats_coal_ticks = usecs_to_jiffies(USEC_PER_SEC);
109842510dffSManish Chopra }
109942510dffSManish Chopra
qede_sp_task(struct work_struct * work)11000d8e0aa0SSudarsana Kalluru static void qede_sp_task(struct work_struct *work)
11010d8e0aa0SSudarsana Kalluru {
11020d8e0aa0SSudarsana Kalluru struct qede_dev *edev = container_of(work, struct qede_dev,
11030d8e0aa0SSudarsana Kalluru sp_task.work);
1104b18e170cSManish Chopra
11051159e25cSPrabhakar Kushwaha /* Disable execution of this deferred work once
11061159e25cSPrabhakar Kushwaha * qede removal is in progress, this stop any future
11071159e25cSPrabhakar Kushwaha * scheduling of sp_task.
11081159e25cSPrabhakar Kushwaha */
11091159e25cSPrabhakar Kushwaha if (test_bit(QEDE_SP_DISABLE, &edev->sp_flags))
11101159e25cSPrabhakar Kushwaha return;
11111159e25cSPrabhakar Kushwaha
1112ccc67ef5STomer Tayar /* The locking scheme depends on the specific flag:
1113ccc67ef5STomer Tayar * In case of QEDE_SP_RECOVERY, acquiring the RTNL lock is required to
1114ccc67ef5STomer Tayar * ensure that ongoing flows are ended and new ones are not started.
1115ccc67ef5STomer Tayar * In other cases - only the internal qede lock should be acquired.
1116ccc67ef5STomer Tayar */
1117ccc67ef5STomer Tayar
1118ccc67ef5STomer Tayar if (test_and_clear_bit(QEDE_SP_RECOVERY, &edev->sp_flags)) {
111942510dffSManish Chopra cancel_delayed_work_sync(&edev->periodic_task);
1120ccc67ef5STomer Tayar #ifdef CONFIG_QED_SRIOV
1121ccc67ef5STomer Tayar /* SRIOV must be disabled outside the lock to avoid a deadlock.
1122ccc67ef5STomer Tayar * The recovery of the active VFs is currently not supported.
1123ccc67ef5STomer Tayar */
1124731815e7SSudarsana Reddy Kalluru if (pci_num_vf(edev->pdev))
1125ccc67ef5STomer Tayar qede_sriov_configure(edev->pdev, 0);
1126ccc67ef5STomer Tayar #endif
1127ccc67ef5STomer Tayar qede_lock(edev);
1128ccc67ef5STomer Tayar qede_recovery_handler(edev);
1129ccc67ef5STomer Tayar qede_unlock(edev);
1130ccc67ef5STomer Tayar }
1131ccc67ef5STomer Tayar
1132567b3c12SMintz, Yuval __qede_lock(edev);
11330d8e0aa0SSudarsana Kalluru
11340d8e0aa0SSudarsana Kalluru if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
1135567b3c12SMintz, Yuval if (edev->state == QEDE_STATE_OPEN)
11360d8e0aa0SSudarsana Kalluru qede_config_rx_mode(edev->ndev);
11370d8e0aa0SSudarsana Kalluru
1138e4917d46SChopra, Manish #ifdef CONFIG_RFS_ACCEL
1139e4917d46SChopra, Manish if (test_and_clear_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags)) {
1140e4917d46SChopra, Manish if (edev->state == QEDE_STATE_OPEN)
1141e4917d46SChopra, Manish qede_process_arfs_filters(edev, false);
1142e4917d46SChopra, Manish }
1143e4917d46SChopra, Manish #endif
1144a8736ea8SIgor Russkikh if (test_and_clear_bit(QEDE_SP_HW_ERR, &edev->sp_flags))
1145a8736ea8SIgor Russkikh qede_generic_hw_err_handler(edev);
1146567b3c12SMintz, Yuval __qede_unlock(edev);
1147731815e7SSudarsana Reddy Kalluru
1148731815e7SSudarsana Reddy Kalluru if (test_and_clear_bit(QEDE_SP_AER, &edev->sp_flags)) {
1149731815e7SSudarsana Reddy Kalluru #ifdef CONFIG_QED_SRIOV
1150731815e7SSudarsana Reddy Kalluru /* SRIOV must be disabled outside the lock to avoid a deadlock.
1151731815e7SSudarsana Reddy Kalluru * The recovery of the active VFs is currently not supported.
1152731815e7SSudarsana Reddy Kalluru */
1153731815e7SSudarsana Reddy Kalluru if (pci_num_vf(edev->pdev))
1154731815e7SSudarsana Reddy Kalluru qede_sriov_configure(edev->pdev, 0);
1155731815e7SSudarsana Reddy Kalluru #endif
1156731815e7SSudarsana Reddy Kalluru edev->ops->common->recovery_process(edev->cdev);
1157731815e7SSudarsana Reddy Kalluru }
11580d8e0aa0SSudarsana Kalluru }
11590d8e0aa0SSudarsana Kalluru
qede_update_pf_params(struct qed_dev * cdev)1160e712d52bSYuval Mintz static void qede_update_pf_params(struct qed_dev *cdev)
1161e712d52bSYuval Mintz {
1162e712d52bSYuval Mintz struct qed_pf_params pf_params;
11635e7baf0fSManish Chopra u16 num_cons;
1164e712d52bSYuval Mintz
1165cb6aeb07SMintz, Yuval /* 64 rx + 64 tx + 64 XDP */
1166e712d52bSYuval Mintz memset(&pf_params, 0, sizeof(struct qed_pf_params));
11675e7baf0fSManish Chopra
11685e7baf0fSManish Chopra /* 1 rx + 1 xdp + max tx cos */
11695e7baf0fSManish Chopra num_cons = QED_MIN_L2_CONS;
11705e7baf0fSManish Chopra
11715e7baf0fSManish Chopra pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * num_cons;
1172e7b80decSMintz, Yuval
1173e7b80decSMintz, Yuval /* Same for VFs - make sure they'll have sufficient connections
1174e7b80decSMintz, Yuval * to support XDP Tx queues.
1175e7b80decSMintz, Yuval */
1176e7b80decSMintz, Yuval pf_params.eth_pf_params.num_vf_cons = 48;
1177e7b80decSMintz, Yuval
1178e4917d46SChopra, Manish pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
1179e712d52bSYuval Mintz qed_ops->common->update_pf_params(cdev, &pf_params);
1180e712d52bSYuval Mintz }
1181e712d52bSYuval Mintz
11826bc9f234SMintz, Yuval #define QEDE_FW_VER_STR_SIZE 80
11836bc9f234SMintz, Yuval
qede_log_probe(struct qede_dev * edev)11846bc9f234SMintz, Yuval static void qede_log_probe(struct qede_dev *edev)
11856bc9f234SMintz, Yuval {
11866bc9f234SMintz, Yuval struct qed_dev_info *p_dev_info = &edev->dev_info.common;
11876bc9f234SMintz, Yuval u8 buf[QEDE_FW_VER_STR_SIZE];
11886bc9f234SMintz, Yuval size_t left_size;
11896bc9f234SMintz, Yuval
11906bc9f234SMintz, Yuval snprintf(buf, QEDE_FW_VER_STR_SIZE,
11916bc9f234SMintz, Yuval "Storm FW %d.%d.%d.%d, Management FW %d.%d.%d.%d",
11926bc9f234SMintz, Yuval p_dev_info->fw_major, p_dev_info->fw_minor, p_dev_info->fw_rev,
11936bc9f234SMintz, Yuval p_dev_info->fw_eng,
11946bc9f234SMintz, Yuval (p_dev_info->mfw_rev & QED_MFW_VERSION_3_MASK) >>
11956bc9f234SMintz, Yuval QED_MFW_VERSION_3_OFFSET,
11966bc9f234SMintz, Yuval (p_dev_info->mfw_rev & QED_MFW_VERSION_2_MASK) >>
11976bc9f234SMintz, Yuval QED_MFW_VERSION_2_OFFSET,
11986bc9f234SMintz, Yuval (p_dev_info->mfw_rev & QED_MFW_VERSION_1_MASK) >>
11996bc9f234SMintz, Yuval QED_MFW_VERSION_1_OFFSET,
12006bc9f234SMintz, Yuval (p_dev_info->mfw_rev & QED_MFW_VERSION_0_MASK) >>
12016bc9f234SMintz, Yuval QED_MFW_VERSION_0_OFFSET);
12026bc9f234SMintz, Yuval
12036bc9f234SMintz, Yuval left_size = QEDE_FW_VER_STR_SIZE - strlen(buf);
12046bc9f234SMintz, Yuval if (p_dev_info->mbi_version && left_size)
12056bc9f234SMintz, Yuval snprintf(buf + strlen(buf), left_size,
12066bc9f234SMintz, Yuval " [MBI %d.%d.%d]",
12076bc9f234SMintz, Yuval (p_dev_info->mbi_version & QED_MBI_VERSION_2_MASK) >>
12086bc9f234SMintz, Yuval QED_MBI_VERSION_2_OFFSET,
12096bc9f234SMintz, Yuval (p_dev_info->mbi_version & QED_MBI_VERSION_1_MASK) >>
12106bc9f234SMintz, Yuval QED_MBI_VERSION_1_OFFSET,
12116bc9f234SMintz, Yuval (p_dev_info->mbi_version & QED_MBI_VERSION_0_MASK) >>
12126bc9f234SMintz, Yuval QED_MBI_VERSION_0_OFFSET);
12136bc9f234SMintz, Yuval
12146bc9f234SMintz, Yuval pr_info("qede %02x:%02x.%02x: %s [%s]\n", edev->pdev->bus->number,
12156bc9f234SMintz, Yuval PCI_SLOT(edev->pdev->devfn), PCI_FUNC(edev->pdev->devfn),
12166bc9f234SMintz, Yuval buf, edev->ndev->name);
12176bc9f234SMintz, Yuval }
12186bc9f234SMintz, Yuval
1219e712d52bSYuval Mintz enum qede_probe_mode {
1220e712d52bSYuval Mintz QEDE_PROBE_NORMAL,
1221ccc67ef5STomer Tayar QEDE_PROBE_RECOVERY,
1222e712d52bSYuval Mintz };
1223e712d52bSYuval Mintz
__qede_probe(struct pci_dev * pdev,u32 dp_module,u8 dp_level,bool is_vf,enum qede_probe_mode mode)1224e712d52bSYuval Mintz static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
12251408cc1fSYuval Mintz bool is_vf, enum qede_probe_mode mode)
1226e712d52bSYuval Mintz {
12271408cc1fSYuval Mintz struct qed_probe_params probe_params;
12281a635e48SYuval Mintz struct qed_slowpath_params sp_params;
1229e712d52bSYuval Mintz struct qed_dev_eth_info dev_info;
1230e712d52bSYuval Mintz struct qede_dev *edev;
1231e712d52bSYuval Mintz struct qed_dev *cdev;
1232e712d52bSYuval Mintz int rc;
1233e712d52bSYuval Mintz
1234e712d52bSYuval Mintz if (unlikely(dp_level & QED_LEVEL_INFO))
1235e712d52bSYuval Mintz pr_notice("Starting qede probe\n");
1236e712d52bSYuval Mintz
12371408cc1fSYuval Mintz memset(&probe_params, 0, sizeof(probe_params));
12381408cc1fSYuval Mintz probe_params.protocol = QED_PROTOCOL_ETH;
12391408cc1fSYuval Mintz probe_params.dp_module = dp_module;
12401408cc1fSYuval Mintz probe_params.dp_level = dp_level;
12411408cc1fSYuval Mintz probe_params.is_vf = is_vf;
1242ccc67ef5STomer Tayar probe_params.recov_in_prog = (mode == QEDE_PROBE_RECOVERY);
12431408cc1fSYuval Mintz cdev = qed_ops->common->probe(pdev, &probe_params);
1244e712d52bSYuval Mintz if (!cdev) {
1245e712d52bSYuval Mintz rc = -ENODEV;
1246e712d52bSYuval Mintz goto err0;
1247e712d52bSYuval Mintz }
1248e712d52bSYuval Mintz
1249e712d52bSYuval Mintz qede_update_pf_params(cdev);
1250e712d52bSYuval Mintz
1251e712d52bSYuval Mintz /* Start the Slowpath-process */
12521a635e48SYuval Mintz memset(&sp_params, 0, sizeof(sp_params));
12531a635e48SYuval Mintz sp_params.int_mode = QED_INT_MODE_MSIX;
1254f029c781SWolfram Sang strscpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
12551a635e48SYuval Mintz rc = qed_ops->common->slowpath_start(cdev, &sp_params);
1256e712d52bSYuval Mintz if (rc) {
1257e712d52bSYuval Mintz pr_notice("Cannot start slowpath\n");
1258e712d52bSYuval Mintz goto err1;
1259e712d52bSYuval Mintz }
1260e712d52bSYuval Mintz
1261e712d52bSYuval Mintz /* Learn information crucial for qede to progress */
1262e712d52bSYuval Mintz rc = qed_ops->fill_dev_info(cdev, &dev_info);
1263e712d52bSYuval Mintz if (rc)
1264e712d52bSYuval Mintz goto err2;
1265e712d52bSYuval Mintz
1266ccc67ef5STomer Tayar if (mode != QEDE_PROBE_RECOVERY) {
1267e712d52bSYuval Mintz edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module,
1268e712d52bSYuval Mintz dp_level);
1269e712d52bSYuval Mintz if (!edev) {
1270e712d52bSYuval Mintz rc = -ENOMEM;
1271e712d52bSYuval Mintz goto err2;
1272e712d52bSYuval Mintz }
1273755f982bSIgor Russkikh
1274755f982bSIgor Russkikh edev->devlink = qed_ops->common->devlink_register(cdev);
1275755f982bSIgor Russkikh if (IS_ERR(edev->devlink)) {
1276755f982bSIgor Russkikh DP_NOTICE(edev, "Cannot register devlink\n");
1277e6a54d6fSLeon Romanovsky rc = PTR_ERR(edev->devlink);
1278755f982bSIgor Russkikh edev->devlink = NULL;
1279e6a54d6fSLeon Romanovsky goto err3;
1280755f982bSIgor Russkikh }
1281ccc67ef5STomer Tayar } else {
1282ccc67ef5STomer Tayar struct net_device *ndev = pci_get_drvdata(pdev);
1283e6a54d6fSLeon Romanovsky struct qed_devlink *qdl;
1284ccc67ef5STomer Tayar
1285ccc67ef5STomer Tayar edev = netdev_priv(ndev);
1286e6a54d6fSLeon Romanovsky qdl = devlink_priv(edev->devlink);
1287755f982bSIgor Russkikh qdl->cdev = cdev;
1288ccc67ef5STomer Tayar edev->cdev = cdev;
1289ccc67ef5STomer Tayar memset(&edev->stats, 0, sizeof(edev->stats));
1290ccc67ef5STomer Tayar memcpy(&edev->dev_info, &dev_info, sizeof(dev_info));
1291ccc67ef5STomer Tayar }
1292e712d52bSYuval Mintz
1293fefb0202SYuval Mintz if (is_vf)
1294149d3775SSudarsana Reddy Kalluru set_bit(QEDE_FLAGS_IS_VF, &edev->flags);
1295fefb0202SYuval Mintz
1296e712d52bSYuval Mintz qede_init_ndev(edev);
1297e712d52bSYuval Mintz
1298ccc67ef5STomer Tayar rc = qede_rdma_dev_add(edev, (mode == QEDE_PROBE_RECOVERY));
1299cee9fbd8SRam Amrani if (rc)
1300cee9fbd8SRam Amrani goto err3;
1301cee9fbd8SRam Amrani
1302ccc67ef5STomer Tayar if (mode != QEDE_PROBE_RECOVERY) {
13033f2176ddSColin Ian King /* Prepare the lock prior to the registration of the netdev,
13040e0b80a9SMintz, Yuval * as once it's registered we might reach flows requiring it
13050e0b80a9SMintz, Yuval * [it's even possible to reach a flow needing it directly
13060e0b80a9SMintz, Yuval * from there, although it's unlikely].
13070e0b80a9SMintz, Yuval */
13080e0b80a9SMintz, Yuval INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
13090e0b80a9SMintz, Yuval mutex_init(&edev->qede_lock);
131042510dffSManish Chopra qede_init_periodic_task(edev);
1311ccc67ef5STomer Tayar
13122950219dSYuval Mintz rc = register_netdev(edev->ndev);
13132950219dSYuval Mintz if (rc) {
13142950219dSYuval Mintz DP_NOTICE(edev, "Cannot register net-device\n");
1315cee9fbd8SRam Amrani goto err4;
13162950219dSYuval Mintz }
1317ccc67ef5STomer Tayar }
13182950219dSYuval Mintz
1319712c3cbfSMintz, Yuval edev->ops->common->set_name(cdev, edev->ndev->name);
1320e712d52bSYuval Mintz
13214c55215cSSudarsana Reddy Kalluru /* PTP not supported on VFs */
132203574497Ssudarsana.kalluru@cavium.com if (!is_vf)
13231c85f394SAlexander Lobakin qede_ptp_enable(edev);
13244c55215cSSudarsana Reddy Kalluru
1325a2ec6172SSudarsana Kalluru edev->ops->register_ops(cdev, &qede_ll_ops, edev);
1326a2ec6172SSudarsana Kalluru
1327489e45aeSSudarsana Reddy Kalluru #ifdef CONFIG_DCB
13285fe118c9SSudarsana Reddy Kalluru if (!IS_VF(edev))
1329489e45aeSSudarsana Reddy Kalluru qede_set_dcbnl_ops(edev->ndev);
1330489e45aeSSudarsana Reddy Kalluru #endif
1331489e45aeSSudarsana Reddy Kalluru
13323d789994SManish Chopra edev->rx_copybreak = QEDE_RX_HDR_SIZE;
13330d8e0aa0SSudarsana Kalluru
13346bc9f234SMintz, Yuval qede_log_probe(edev);
133542510dffSManish Chopra
133642510dffSManish Chopra /* retain user config (for example - after recovery) */
133742510dffSManish Chopra if (edev->stats_coal_usecs)
133842510dffSManish Chopra schedule_delayed_work(&edev->periodic_task, 0);
133942510dffSManish Chopra
1340e712d52bSYuval Mintz return 0;
1341e712d52bSYuval Mintz
1342cee9fbd8SRam Amrani err4:
1343ccc67ef5STomer Tayar qede_rdma_dev_remove(edev, (mode == QEDE_PROBE_RECOVERY));
13442950219dSYuval Mintz err3:
1345adc100d0SIgor Russkikh if (mode != QEDE_PROBE_RECOVERY)
13462950219dSYuval Mintz free_netdev(edev->ndev);
1347adc100d0SIgor Russkikh else
1348adc100d0SIgor Russkikh edev->cdev = NULL;
1349e712d52bSYuval Mintz err2:
1350e712d52bSYuval Mintz qed_ops->common->slowpath_stop(cdev);
1351e712d52bSYuval Mintz err1:
1352e712d52bSYuval Mintz qed_ops->common->remove(cdev);
1353e712d52bSYuval Mintz err0:
1354e712d52bSYuval Mintz return rc;
1355e712d52bSYuval Mintz }
1356e712d52bSYuval Mintz
qede_probe(struct pci_dev * pdev,const struct pci_device_id * id)1357e712d52bSYuval Mintz static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1358e712d52bSYuval Mintz {
1359fefb0202SYuval Mintz bool is_vf = false;
1360e712d52bSYuval Mintz u32 dp_module = 0;
1361e712d52bSYuval Mintz u8 dp_level = 0;
1362e712d52bSYuval Mintz
1363fefb0202SYuval Mintz switch ((enum qede_pci_private)id->driver_data) {
1364fefb0202SYuval Mintz case QEDE_PRIVATE_VF:
1365fefb0202SYuval Mintz if (debug & QED_LOG_VERBOSE_MASK)
1366fefb0202SYuval Mintz dev_err(&pdev->dev, "Probing a VF\n");
13676da95b52SAlok Prasad is_vf = true;
1368fefb0202SYuval Mintz break;
1369fefb0202SYuval Mintz default:
1370fefb0202SYuval Mintz if (debug & QED_LOG_VERBOSE_MASK)
1371fefb0202SYuval Mintz dev_err(&pdev->dev, "Probing a PF\n");
1372fefb0202SYuval Mintz }
1373fefb0202SYuval Mintz
1374e712d52bSYuval Mintz qede_config_debug(debug, &dp_module, &dp_level);
1375e712d52bSYuval Mintz
1376fefb0202SYuval Mintz return __qede_probe(pdev, dp_module, dp_level, is_vf,
1377e712d52bSYuval Mintz QEDE_PROBE_NORMAL);
1378e712d52bSYuval Mintz }
1379e712d52bSYuval Mintz
1380e712d52bSYuval Mintz enum qede_remove_mode {
1381e712d52bSYuval Mintz QEDE_REMOVE_NORMAL,
1382ccc67ef5STomer Tayar QEDE_REMOVE_RECOVERY,
1383e712d52bSYuval Mintz };
1384e712d52bSYuval Mintz
__qede_remove(struct pci_dev * pdev,enum qede_remove_mode mode)1385e712d52bSYuval Mintz static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
1386e712d52bSYuval Mintz {
1387e712d52bSYuval Mintz struct net_device *ndev = pci_get_drvdata(pdev);
1388deabc871SManish Chopra struct qede_dev *edev;
1389deabc871SManish Chopra struct qed_dev *cdev;
1390deabc871SManish Chopra
1391deabc871SManish Chopra if (!ndev) {
1392deabc871SManish Chopra dev_info(&pdev->dev, "Device has already been removed\n");
1393deabc871SManish Chopra return;
1394deabc871SManish Chopra }
1395deabc871SManish Chopra
1396deabc871SManish Chopra edev = netdev_priv(ndev);
1397deabc871SManish Chopra cdev = edev->cdev;
1398e712d52bSYuval Mintz
1399e712d52bSYuval Mintz DP_INFO(edev, "Starting qede_remove\n");
1400e712d52bSYuval Mintz
1401ccc67ef5STomer Tayar qede_rdma_dev_remove(edev, (mode == QEDE_REMOVE_RECOVERY));
14022950219dSYuval Mintz
1403ccc67ef5STomer Tayar if (mode != QEDE_REMOVE_RECOVERY) {
14041159e25cSPrabhakar Kushwaha set_bit(QEDE_SP_DISABLE, &edev->sp_flags);
1405ccc67ef5STomer Tayar unregister_netdev(ndev);
1406ccc67ef5STomer Tayar
1407ccc67ef5STomer Tayar cancel_delayed_work_sync(&edev->sp_task);
140842510dffSManish Chopra cancel_delayed_work_sync(&edev->periodic_task);
14094c55215cSSudarsana Reddy Kalluru
1410e712d52bSYuval Mintz edev->ops->common->set_power_state(cdev, PCI_D0);
1411e712d52bSYuval Mintz
1412e712d52bSYuval Mintz pci_set_drvdata(pdev, NULL);
1413ccc67ef5STomer Tayar }
1414ccc67ef5STomer Tayar
1415ccc67ef5STomer Tayar qede_ptp_disable(edev);
1416e712d52bSYuval Mintz
1417e712d52bSYuval Mintz /* Use global ops since we've freed edev */
1418e712d52bSYuval Mintz qed_ops->common->slowpath_stop(cdev);
141914d39648SMintz, Yuval if (system_state == SYSTEM_POWER_OFF)
142014d39648SMintz, Yuval return;
1421755f982bSIgor Russkikh
1422755f982bSIgor Russkikh if (mode != QEDE_REMOVE_RECOVERY && edev->devlink) {
1423755f982bSIgor Russkikh qed_ops->common->devlink_unregister(edev->devlink);
1424755f982bSIgor Russkikh edev->devlink = NULL;
1425755f982bSIgor Russkikh }
1426e712d52bSYuval Mintz qed_ops->common->remove(cdev);
1427ec6c8059SAlexander Lobakin edev->cdev = NULL;
1428e712d52bSYuval Mintz
1429885185dfSMintz, Yuval /* Since this can happen out-of-sync with other flows,
1430885185dfSMintz, Yuval * don't release the netdevice until after slowpath stop
1431885185dfSMintz, Yuval * has been called to guarantee various other contexts
1432885185dfSMintz, Yuval * [e.g., QED register callbacks] won't break anything when
1433885185dfSMintz, Yuval * accessing the netdevice.
1434885185dfSMintz, Yuval */
1435b0ec5489SBhaskar Upadhaya if (mode != QEDE_REMOVE_RECOVERY) {
1436b0ec5489SBhaskar Upadhaya kfree(edev->coal_entry);
1437885185dfSMintz, Yuval free_netdev(ndev);
1438b0ec5489SBhaskar Upadhaya }
1439885185dfSMintz, Yuval
1440525ef5c0SYuval Mintz dev_info(&pdev->dev, "Ending qede_remove successfully\n");
1441e712d52bSYuval Mintz }
1442e712d52bSYuval Mintz
qede_remove(struct pci_dev * pdev)1443e712d52bSYuval Mintz static void qede_remove(struct pci_dev *pdev)
1444e712d52bSYuval Mintz {
1445e712d52bSYuval Mintz __qede_remove(pdev, QEDE_REMOVE_NORMAL);
1446e712d52bSYuval Mintz }
14472950219dSYuval Mintz
qede_shutdown(struct pci_dev * pdev)144814d39648SMintz, Yuval static void qede_shutdown(struct pci_dev *pdev)
144914d39648SMintz, Yuval {
145014d39648SMintz, Yuval __qede_remove(pdev, QEDE_REMOVE_NORMAL);
145114d39648SMintz, Yuval }
145214d39648SMintz, Yuval
14532950219dSYuval Mintz /* -------------------------------------------------------------------------
14542950219dSYuval Mintz * START OF LOAD / UNLOAD
14552950219dSYuval Mintz * -------------------------------------------------------------------------
14562950219dSYuval Mintz */
14572950219dSYuval Mintz
qede_set_num_queues(struct qede_dev * edev)14582950219dSYuval Mintz static int qede_set_num_queues(struct qede_dev *edev)
14592950219dSYuval Mintz {
14602950219dSYuval Mintz int rc;
14612950219dSYuval Mintz u16 rss_num;
14622950219dSYuval Mintz
14632950219dSYuval Mintz /* Setup queues according to possible resources*/
14649a4d7e86SSudarsana Reddy Kalluru if (edev->req_queues)
14659a4d7e86SSudarsana Reddy Kalluru rss_num = edev->req_queues;
14668edf049dSSudarsana Kalluru else
14672950219dSYuval Mintz rss_num = netif_get_num_default_rss_queues() *
14682950219dSYuval Mintz edev->dev_info.common.num_hwfns;
14692950219dSYuval Mintz
14702950219dSYuval Mintz rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num);
14712950219dSYuval Mintz
14722950219dSYuval Mintz rc = edev->ops->common->set_fp_int(edev->cdev, rss_num);
14732950219dSYuval Mintz if (rc > 0) {
14742950219dSYuval Mintz /* Managed to request interrupts for our queues */
14759a4d7e86SSudarsana Reddy Kalluru edev->num_queues = rc;
14762950219dSYuval Mintz DP_INFO(edev, "Managed %d [of %d] RSS queues\n",
14779a4d7e86SSudarsana Reddy Kalluru QEDE_QUEUE_CNT(edev), rss_num);
14782950219dSYuval Mintz rc = 0;
14792950219dSYuval Mintz }
14809a4d7e86SSudarsana Reddy Kalluru
14819a4d7e86SSudarsana Reddy Kalluru edev->fp_num_tx = edev->req_num_tx;
14829a4d7e86SSudarsana Reddy Kalluru edev->fp_num_rx = edev->req_num_rx;
14839a4d7e86SSudarsana Reddy Kalluru
14842950219dSYuval Mintz return rc;
14852950219dSYuval Mintz }
14862950219dSYuval Mintz
qede_free_mem_sb(struct qede_dev * edev,struct qed_sb_info * sb_info,u16 sb_id)148771851ea5SSudarsana Reddy Kalluru static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info,
148871851ea5SSudarsana Reddy Kalluru u16 sb_id)
14892950219dSYuval Mintz {
149071851ea5SSudarsana Reddy Kalluru if (sb_info->sb_virt) {
149108eb1fb0SMichal Kalderon edev->ops->common->sb_release(edev->cdev, sb_info, sb_id,
149208eb1fb0SMichal Kalderon QED_SB_TYPE_L2_QUEUE);
14932950219dSYuval Mintz dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt),
14942950219dSYuval Mintz (void *)sb_info->sb_virt, sb_info->sb_phys);
149571851ea5SSudarsana Reddy Kalluru memset(sb_info, 0, sizeof(*sb_info));
149671851ea5SSudarsana Reddy Kalluru }
14972950219dSYuval Mintz }
14982950219dSYuval Mintz
14992950219dSYuval Mintz /* This function allocates fast-path status block memory */
qede_alloc_mem_sb(struct qede_dev * edev,struct qed_sb_info * sb_info,u16 sb_id)15002950219dSYuval Mintz static int qede_alloc_mem_sb(struct qede_dev *edev,
15011a635e48SYuval Mintz struct qed_sb_info *sb_info, u16 sb_id)
15022950219dSYuval Mintz {
1503fb09a1edSShai Malin struct status_block *sb_virt;
15042950219dSYuval Mintz dma_addr_t sb_phys;
15052950219dSYuval Mintz int rc;
15062950219dSYuval Mintz
15072950219dSYuval Mintz sb_virt = dma_alloc_coherent(&edev->pdev->dev,
15081a635e48SYuval Mintz sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
15092950219dSYuval Mintz if (!sb_virt) {
15102950219dSYuval Mintz DP_ERR(edev, "Status block allocation failed\n");
15112950219dSYuval Mintz return -ENOMEM;
15122950219dSYuval Mintz }
15132950219dSYuval Mintz
15142950219dSYuval Mintz rc = edev->ops->common->sb_init(edev->cdev, sb_info,
15152950219dSYuval Mintz sb_virt, sb_phys, sb_id,
15162950219dSYuval Mintz QED_SB_TYPE_L2_QUEUE);
15172950219dSYuval Mintz if (rc) {
15182950219dSYuval Mintz DP_ERR(edev, "Status block initialization failed\n");
15192950219dSYuval Mintz dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt),
15202950219dSYuval Mintz sb_virt, sb_phys);
15212950219dSYuval Mintz return rc;
15222950219dSYuval Mintz }
15232950219dSYuval Mintz
15242950219dSYuval Mintz return 0;
15252950219dSYuval Mintz }
15262950219dSYuval Mintz
qede_free_rx_buffers(struct qede_dev * edev,struct qede_rx_queue * rxq)15272950219dSYuval Mintz static void qede_free_rx_buffers(struct qede_dev *edev,
15282950219dSYuval Mintz struct qede_rx_queue *rxq)
15292950219dSYuval Mintz {
15302950219dSYuval Mintz u16 i;
15312950219dSYuval Mintz
15322950219dSYuval Mintz for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) {
15332950219dSYuval Mintz struct sw_rx_data *rx_buf;
1534fc48b7a6SYuval Mintz struct page *data;
15352950219dSYuval Mintz
15362950219dSYuval Mintz rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX];
15372950219dSYuval Mintz data = rx_buf->data;
15382950219dSYuval Mintz
1539fc48b7a6SYuval Mintz dma_unmap_page(&edev->pdev->dev,
1540cb6aeb07SMintz, Yuval rx_buf->mapping, PAGE_SIZE, rxq->data_direction);
15412950219dSYuval Mintz
15422950219dSYuval Mintz rx_buf->data = NULL;
1543fc48b7a6SYuval Mintz __free_page(data);
15442950219dSYuval Mintz }
15452950219dSYuval Mintz }
15462950219dSYuval Mintz
qede_free_mem_rxq(struct qede_dev * edev,struct qede_rx_queue * rxq)15471a635e48SYuval Mintz static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
15482950219dSYuval Mintz {
15492950219dSYuval Mintz /* Free rx buffers */
15502950219dSYuval Mintz qede_free_rx_buffers(edev, rxq);
15512950219dSYuval Mintz
15522950219dSYuval Mintz /* Free the parallel SW ring */
15532950219dSYuval Mintz kfree(rxq->sw_rx_ring);
15542950219dSYuval Mintz
15552950219dSYuval Mintz /* Free the real RQ ring used by FW */
15562950219dSYuval Mintz edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring);
15572950219dSYuval Mintz edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
15582950219dSYuval Mintz }
15592950219dSYuval Mintz
qede_set_tpa_param(struct qede_rx_queue * rxq)15608a863397SManish Chopra static void qede_set_tpa_param(struct qede_rx_queue *rxq)
156155482edcSManish Chopra {
156255482edcSManish Chopra int i;
156355482edcSManish Chopra
156455482edcSManish Chopra for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
156555482edcSManish Chopra struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
156655482edcSManish Chopra
156701e23015SMintz, Yuval tpa_info->state = QEDE_AGG_STATE_NONE;
156855482edcSManish Chopra }
156955482edcSManish Chopra }
157055482edcSManish Chopra
15712950219dSYuval Mintz /* This function allocates all memory needed per Rx queue */
qede_alloc_mem_rxq(struct qede_dev * edev,struct qede_rx_queue * rxq)15721a635e48SYuval Mintz static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
15732950219dSYuval Mintz {
1574b6db3f71SAlexander Lobakin struct qed_chain_init_params params = {
1575b6db3f71SAlexander Lobakin .cnt_type = QED_CHAIN_CNT_TYPE_U16,
1576b6db3f71SAlexander Lobakin .num_elems = RX_RING_SIZE,
1577b6db3f71SAlexander Lobakin };
1578b6db3f71SAlexander Lobakin struct qed_dev *cdev = edev->cdev;
1579f86af2dfSManish Chopra int i, rc, size;
15802950219dSYuval Mintz
15812950219dSYuval Mintz rxq->num_rx_buffers = edev->q_num_rx_buffers;
15822950219dSYuval Mintz
15831a635e48SYuval Mintz rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu;
15848a863397SManish Chopra
15858a863397SManish Chopra rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : NET_SKB_PAD;
15868a863397SManish Chopra size = rxq->rx_headroom +
15878a863397SManish Chopra SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
15881a635e48SYuval Mintz
158915ed8a47SMintz, Yuval /* Make sure that the headroom and payload fit in a single page */
15908a863397SManish Chopra if (rxq->rx_buf_size + size > PAGE_SIZE)
15918a863397SManish Chopra rxq->rx_buf_size = PAGE_SIZE - size;
1592fc48b7a6SYuval Mintz
1593bc1c5745SJesper Dangaard Brouer /* Segment size to split a page in multiple equal parts,
1594496e0517SMintz, Yuval * unless XDP is used in which case we'd use the entire page.
1595496e0517SMintz, Yuval */
15968a863397SManish Chopra if (!edev->xdp_prog) {
15978a863397SManish Chopra size = size + rxq->rx_buf_size;
15988a863397SManish Chopra rxq->rx_buf_seg_size = roundup_pow_of_two(size);
15998a863397SManish Chopra } else {
1600496e0517SMintz, Yuval rxq->rx_buf_seg_size = PAGE_SIZE;
16014c8dc005SManish Chopra edev->ndev->features &= ~NETIF_F_GRO_HW;
16028a863397SManish Chopra }
16032950219dSYuval Mintz
16042950219dSYuval Mintz /* Allocate the parallel driver ring for Rx buffers */
1605fc48b7a6SYuval Mintz size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE;
16062950219dSYuval Mintz rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
16072950219dSYuval Mintz if (!rxq->sw_rx_ring) {
16082950219dSYuval Mintz DP_ERR(edev, "Rx buffers ring allocation failed\n");
1609f86af2dfSManish Chopra rc = -ENOMEM;
16102950219dSYuval Mintz goto err;
16112950219dSYuval Mintz }
16122950219dSYuval Mintz
16132950219dSYuval Mintz /* Allocate FW Rx ring */
1614b6db3f71SAlexander Lobakin params.mode = QED_CHAIN_MODE_NEXT_PTR;
1615b6db3f71SAlexander Lobakin params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
1616b6db3f71SAlexander Lobakin params.elem_size = sizeof(struct eth_rx_bd);
1617b6db3f71SAlexander Lobakin
1618b6db3f71SAlexander Lobakin rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_bd_ring, ¶ms);
16192950219dSYuval Mintz if (rc)
16202950219dSYuval Mintz goto err;
16212950219dSYuval Mintz
16222950219dSYuval Mintz /* Allocate FW completion ring */
1623b6db3f71SAlexander Lobakin params.mode = QED_CHAIN_MODE_PBL;
1624b6db3f71SAlexander Lobakin params.intended_use = QED_CHAIN_USE_TO_CONSUME;
1625b6db3f71SAlexander Lobakin params.elem_size = sizeof(union eth_rx_cqe);
1626b6db3f71SAlexander Lobakin
1627b6db3f71SAlexander Lobakin rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_comp_ring, ¶ms);
16282950219dSYuval Mintz if (rc)
16292950219dSYuval Mintz goto err;
16302950219dSYuval Mintz
16312950219dSYuval Mintz /* Allocate buffers for the Rx ring */
1632e3eef7eeSMintz, Yuval rxq->filled_buffers = 0;
16332950219dSYuval Mintz for (i = 0; i < rxq->num_rx_buffers; i++) {
1634e3eef7eeSMintz, Yuval rc = qede_alloc_rx_buffer(rxq, false);
1635f86af2dfSManish Chopra if (rc) {
1636f86af2dfSManish Chopra DP_ERR(edev,
1637f86af2dfSManish Chopra "Rx buffers allocation failed at index %d\n", i);
16382950219dSYuval Mintz goto err;
1639f86af2dfSManish Chopra }
16402950219dSYuval Mintz }
16412950219dSYuval Mintz
16424c8dc005SManish Chopra edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW);
16438a863397SManish Chopra if (!edev->gro_disable)
16448a863397SManish Chopra qede_set_tpa_param(rxq);
16452950219dSYuval Mintz err:
1646f86af2dfSManish Chopra return rc;
16472950219dSYuval Mintz }
16482950219dSYuval Mintz
qede_free_mem_txq(struct qede_dev * edev,struct qede_tx_queue * txq)16491a635e48SYuval Mintz static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
16502950219dSYuval Mintz {
16512950219dSYuval Mintz /* Free the parallel SW ring */
1652cb6aeb07SMintz, Yuval if (txq->is_xdp)
165389e1afc4SMintz, Yuval kfree(txq->sw_tx_ring.xdp);
1654cb6aeb07SMintz, Yuval else
1655cb6aeb07SMintz, Yuval kfree(txq->sw_tx_ring.skbs);
16562950219dSYuval Mintz
16572950219dSYuval Mintz /* Free the real RQ ring used by FW */
16582950219dSYuval Mintz edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl);
16592950219dSYuval Mintz }
16602950219dSYuval Mintz
16612950219dSYuval Mintz /* This function allocates all memory needed per Tx queue */
qede_alloc_mem_txq(struct qede_dev * edev,struct qede_tx_queue * txq)16621a635e48SYuval Mintz static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
16632950219dSYuval Mintz {
1664b6db3f71SAlexander Lobakin struct qed_chain_init_params params = {
1665b6db3f71SAlexander Lobakin .mode = QED_CHAIN_MODE_PBL,
1666b6db3f71SAlexander Lobakin .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1667b6db3f71SAlexander Lobakin .cnt_type = QED_CHAIN_CNT_TYPE_U16,
1668b6db3f71SAlexander Lobakin .num_elems = edev->q_num_tx_buffers,
1669b6db3f71SAlexander Lobakin .elem_size = sizeof(union eth_tx_bd_types),
1670b6db3f71SAlexander Lobakin };
1671cb6aeb07SMintz, Yuval int size, rc;
16722950219dSYuval Mintz
16732950219dSYuval Mintz txq->num_tx_buffers = edev->q_num_tx_buffers;
16742950219dSYuval Mintz
16752950219dSYuval Mintz /* Allocate the parallel driver ring for Tx buffers */
1676cb6aeb07SMintz, Yuval if (txq->is_xdp) {
16775a052d62SSudarsana Reddy Kalluru size = sizeof(*txq->sw_tx_ring.xdp) * txq->num_tx_buffers;
167889e1afc4SMintz, Yuval txq->sw_tx_ring.xdp = kzalloc(size, GFP_KERNEL);
167989e1afc4SMintz, Yuval if (!txq->sw_tx_ring.xdp)
1680cb6aeb07SMintz, Yuval goto err;
1681cb6aeb07SMintz, Yuval } else {
16825a052d62SSudarsana Reddy Kalluru size = sizeof(*txq->sw_tx_ring.skbs) * txq->num_tx_buffers;
1683cb6aeb07SMintz, Yuval txq->sw_tx_ring.skbs = kzalloc(size, GFP_KERNEL);
1684cb6aeb07SMintz, Yuval if (!txq->sw_tx_ring.skbs)
16852950219dSYuval Mintz goto err;
16862950219dSYuval Mintz }
16872950219dSYuval Mintz
1688b6db3f71SAlexander Lobakin rc = edev->ops->common->chain_alloc(edev->cdev, &txq->tx_pbl, ¶ms);
16892950219dSYuval Mintz if (rc)
16902950219dSYuval Mintz goto err;
16912950219dSYuval Mintz
16922950219dSYuval Mintz return 0;
16932950219dSYuval Mintz
16942950219dSYuval Mintz err:
16952950219dSYuval Mintz qede_free_mem_txq(edev, txq);
16962950219dSYuval Mintz return -ENOMEM;
16972950219dSYuval Mintz }
16982950219dSYuval Mintz
16992950219dSYuval Mintz /* This function frees all memory of a single fp */
qede_free_mem_fp(struct qede_dev * edev,struct qede_fastpath * fp)17001a635e48SYuval Mintz static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
17012950219dSYuval Mintz {
170271851ea5SSudarsana Reddy Kalluru qede_free_mem_sb(edev, fp->sb_info, fp->id);
17032950219dSYuval Mintz
17049a4d7e86SSudarsana Reddy Kalluru if (fp->type & QEDE_FASTPATH_RX)
17052950219dSYuval Mintz qede_free_mem_rxq(edev, fp->rxq);
17062950219dSYuval Mintz
170792c43eb4SSuddarsana Reddy Kalluru if (fp->type & QEDE_FASTPATH_XDP)
170892c43eb4SSuddarsana Reddy Kalluru qede_free_mem_txq(edev, fp->xdp_tx);
170992c43eb4SSuddarsana Reddy Kalluru
17105e7baf0fSManish Chopra if (fp->type & QEDE_FASTPATH_TX) {
17115e7baf0fSManish Chopra int cos;
17125e7baf0fSManish Chopra
17135e7baf0fSManish Chopra for_each_cos_in_txq(edev, cos)
17145e7baf0fSManish Chopra qede_free_mem_txq(edev, &fp->txq[cos]);
17155e7baf0fSManish Chopra }
17162950219dSYuval Mintz }
17172950219dSYuval Mintz
17182950219dSYuval Mintz /* This function allocates all memory needed for a single fp (i.e. an entity
17199a4d7e86SSudarsana Reddy Kalluru * which contains status block, one rx queue and/or multiple per-TC tx queues.
17202950219dSYuval Mintz */
qede_alloc_mem_fp(struct qede_dev * edev,struct qede_fastpath * fp)17211a635e48SYuval Mintz static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
17222950219dSYuval Mintz {
1723cb6aeb07SMintz, Yuval int rc = 0;
17242950219dSYuval Mintz
17259a4d7e86SSudarsana Reddy Kalluru rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id);
17262950219dSYuval Mintz if (rc)
1727cb6aeb07SMintz, Yuval goto out;
17282950219dSYuval Mintz
17299a4d7e86SSudarsana Reddy Kalluru if (fp->type & QEDE_FASTPATH_RX) {
17302950219dSYuval Mintz rc = qede_alloc_mem_rxq(edev, fp->rxq);
17312950219dSYuval Mintz if (rc)
1732cb6aeb07SMintz, Yuval goto out;
1733cb6aeb07SMintz, Yuval }
1734cb6aeb07SMintz, Yuval
1735cb6aeb07SMintz, Yuval if (fp->type & QEDE_FASTPATH_XDP) {
1736cb6aeb07SMintz, Yuval rc = qede_alloc_mem_txq(edev, fp->xdp_tx);
1737cb6aeb07SMintz, Yuval if (rc)
1738cb6aeb07SMintz, Yuval goto out;
17399a4d7e86SSudarsana Reddy Kalluru }
17402950219dSYuval Mintz
17419a4d7e86SSudarsana Reddy Kalluru if (fp->type & QEDE_FASTPATH_TX) {
17425e7baf0fSManish Chopra int cos;
17435e7baf0fSManish Chopra
17445e7baf0fSManish Chopra for_each_cos_in_txq(edev, cos) {
17455e7baf0fSManish Chopra rc = qede_alloc_mem_txq(edev, &fp->txq[cos]);
17462950219dSYuval Mintz if (rc)
1747cb6aeb07SMintz, Yuval goto out;
17482950219dSYuval Mintz }
17495e7baf0fSManish Chopra }
17502950219dSYuval Mintz
1751cb6aeb07SMintz, Yuval out:
1752f86af2dfSManish Chopra return rc;
17532950219dSYuval Mintz }
17542950219dSYuval Mintz
qede_free_mem_load(struct qede_dev * edev)17552950219dSYuval Mintz static void qede_free_mem_load(struct qede_dev *edev)
17562950219dSYuval Mintz {
17572950219dSYuval Mintz int i;
17582950219dSYuval Mintz
17599a4d7e86SSudarsana Reddy Kalluru for_each_queue(i) {
17602950219dSYuval Mintz struct qede_fastpath *fp = &edev->fp_array[i];
17612950219dSYuval Mintz
17622950219dSYuval Mintz qede_free_mem_fp(edev, fp);
17632950219dSYuval Mintz }
17642950219dSYuval Mintz }
17652950219dSYuval Mintz
17662950219dSYuval Mintz /* This function allocates all qede memory at NIC load. */
qede_alloc_mem_load(struct qede_dev * edev)17672950219dSYuval Mintz static int qede_alloc_mem_load(struct qede_dev *edev)
17682950219dSYuval Mintz {
17699a4d7e86SSudarsana Reddy Kalluru int rc = 0, queue_id;
17702950219dSYuval Mintz
17719a4d7e86SSudarsana Reddy Kalluru for (queue_id = 0; queue_id < QEDE_QUEUE_CNT(edev); queue_id++) {
17729a4d7e86SSudarsana Reddy Kalluru struct qede_fastpath *fp = &edev->fp_array[queue_id];
17732950219dSYuval Mintz
17742950219dSYuval Mintz rc = qede_alloc_mem_fp(edev, fp);
1775f86af2dfSManish Chopra if (rc) {
17762950219dSYuval Mintz DP_ERR(edev,
1777f86af2dfSManish Chopra "Failed to allocate memory for fastpath - rss id = %d\n",
17789a4d7e86SSudarsana Reddy Kalluru queue_id);
1779f86af2dfSManish Chopra qede_free_mem_load(edev);
1780f86af2dfSManish Chopra return rc;
17812950219dSYuval Mintz }
17822950219dSYuval Mintz }
17832950219dSYuval Mintz
17842950219dSYuval Mintz return 0;
17852950219dSYuval Mintz }
17862950219dSYuval Mintz
qede_empty_tx_queue(struct qede_dev * edev,struct qede_tx_queue * txq)1787ccc67ef5STomer Tayar static void qede_empty_tx_queue(struct qede_dev *edev,
1788ccc67ef5STomer Tayar struct qede_tx_queue *txq)
1789ccc67ef5STomer Tayar {
1790ccc67ef5STomer Tayar unsigned int pkts_compl = 0, bytes_compl = 0;
1791ccc67ef5STomer Tayar struct netdev_queue *netdev_txq;
1792ccc67ef5STomer Tayar int rc, len = 0;
1793ccc67ef5STomer Tayar
1794ccc67ef5STomer Tayar netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
1795ccc67ef5STomer Tayar
1796ccc67ef5STomer Tayar while (qed_chain_get_cons_idx(&txq->tx_pbl) !=
1797ccc67ef5STomer Tayar qed_chain_get_prod_idx(&txq->tx_pbl)) {
1798ccc67ef5STomer Tayar DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
1799ccc67ef5STomer Tayar "Freeing a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n",
1800ccc67ef5STomer Tayar txq->index, qed_chain_get_cons_idx(&txq->tx_pbl),
1801ccc67ef5STomer Tayar qed_chain_get_prod_idx(&txq->tx_pbl));
1802ccc67ef5STomer Tayar
1803ccc67ef5STomer Tayar rc = qede_free_tx_pkt(edev, txq, &len);
1804ccc67ef5STomer Tayar if (rc) {
1805ccc67ef5STomer Tayar DP_NOTICE(edev,
1806ccc67ef5STomer Tayar "Failed to free a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n",
1807ccc67ef5STomer Tayar txq->index,
1808ccc67ef5STomer Tayar qed_chain_get_cons_idx(&txq->tx_pbl),
1809ccc67ef5STomer Tayar qed_chain_get_prod_idx(&txq->tx_pbl));
1810ccc67ef5STomer Tayar break;
1811ccc67ef5STomer Tayar }
1812ccc67ef5STomer Tayar
1813ccc67ef5STomer Tayar bytes_compl += len;
1814ccc67ef5STomer Tayar pkts_compl++;
1815ccc67ef5STomer Tayar txq->sw_tx_cons++;
1816ccc67ef5STomer Tayar }
1817ccc67ef5STomer Tayar
1818ccc67ef5STomer Tayar netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
1819ccc67ef5STomer Tayar }
1820ccc67ef5STomer Tayar
qede_empty_tx_queues(struct qede_dev * edev)1821ccc67ef5STomer Tayar static void qede_empty_tx_queues(struct qede_dev *edev)
1822ccc67ef5STomer Tayar {
1823ccc67ef5STomer Tayar int i;
1824ccc67ef5STomer Tayar
1825ccc67ef5STomer Tayar for_each_queue(i)
1826ccc67ef5STomer Tayar if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
1827ccc67ef5STomer Tayar int cos;
1828ccc67ef5STomer Tayar
1829ccc67ef5STomer Tayar for_each_cos_in_txq(edev, cos) {
1830ccc67ef5STomer Tayar struct qede_fastpath *fp;
1831ccc67ef5STomer Tayar
1832ccc67ef5STomer Tayar fp = &edev->fp_array[i];
1833ccc67ef5STomer Tayar qede_empty_tx_queue(edev,
1834ccc67ef5STomer Tayar &fp->txq[cos]);
1835ccc67ef5STomer Tayar }
1836ccc67ef5STomer Tayar }
1837ccc67ef5STomer Tayar }
1838ccc67ef5STomer Tayar
18392950219dSYuval Mintz /* This function inits fp content and resets the SB, RXQ and TXQ structures */
qede_init_fp(struct qede_dev * edev)18402950219dSYuval Mintz static void qede_init_fp(struct qede_dev *edev)
18412950219dSYuval Mintz {
184280439a17SMintz, Yuval int queue_id, rxq_index = 0, txq_index = 0;
18432950219dSYuval Mintz struct qede_fastpath *fp;
1844d1b25b79SAlexander Lobakin bool init_xdp = false;
18452950219dSYuval Mintz
18469a4d7e86SSudarsana Reddy Kalluru for_each_queue(queue_id) {
18479a4d7e86SSudarsana Reddy Kalluru fp = &edev->fp_array[queue_id];
18482950219dSYuval Mintz
18492950219dSYuval Mintz fp->edev = edev;
18509a4d7e86SSudarsana Reddy Kalluru fp->id = queue_id;
18512950219dSYuval Mintz
1852cb6aeb07SMintz, Yuval if (fp->type & QEDE_FASTPATH_XDP) {
1853cb6aeb07SMintz, Yuval fp->xdp_tx->index = QEDE_TXQ_IDX_TO_XDP(edev,
1854cb6aeb07SMintz, Yuval rxq_index);
1855cb6aeb07SMintz, Yuval fp->xdp_tx->is_xdp = 1;
1856d1b25b79SAlexander Lobakin
1857d1b25b79SAlexander Lobakin spin_lock_init(&fp->xdp_tx->xdp_tx_lock);
1858d1b25b79SAlexander Lobakin init_xdp = true;
1859cb6aeb07SMintz, Yuval }
18602950219dSYuval Mintz
18619a4d7e86SSudarsana Reddy Kalluru if (fp->type & QEDE_FASTPATH_RX) {
18629a4d7e86SSudarsana Reddy Kalluru fp->rxq->rxq_id = rxq_index++;
1863cb6aeb07SMintz, Yuval
1864cb6aeb07SMintz, Yuval /* Determine how to map buffers for this queue */
1865cb6aeb07SMintz, Yuval if (fp->type & QEDE_FASTPATH_XDP)
1866cb6aeb07SMintz, Yuval fp->rxq->data_direction = DMA_BIDIRECTIONAL;
1867cb6aeb07SMintz, Yuval else
1868cb6aeb07SMintz, Yuval fp->rxq->data_direction = DMA_FROM_DEVICE;
18699eb22357SMintz, Yuval fp->rxq->dev = &edev->pdev->dev;
1870c0124f32SJesper Dangaard Brouer
1871c0124f32SJesper Dangaard Brouer /* Driver have no error path from here */
1872c0124f32SJesper Dangaard Brouer WARN_ON(xdp_rxq_info_reg(&fp->rxq->xdp_rxq, edev->ndev,
1873b02e5a0eSBjörn Töpel fp->rxq->rxq_id, 0) < 0);
1874d1b25b79SAlexander Lobakin
1875d1b25b79SAlexander Lobakin if (xdp_rxq_info_reg_mem_model(&fp->rxq->xdp_rxq,
1876d1b25b79SAlexander Lobakin MEM_TYPE_PAGE_ORDER0,
1877d1b25b79SAlexander Lobakin NULL)) {
1878d1b25b79SAlexander Lobakin DP_NOTICE(edev,
1879d1b25b79SAlexander Lobakin "Failed to register XDP memory model\n");
1880d1b25b79SAlexander Lobakin }
18819a4d7e86SSudarsana Reddy Kalluru }
18822950219dSYuval Mintz
18839a4d7e86SSudarsana Reddy Kalluru if (fp->type & QEDE_FASTPATH_TX) {
18845e7baf0fSManish Chopra int cos;
18855e7baf0fSManish Chopra
18865e7baf0fSManish Chopra for_each_cos_in_txq(edev, cos) {
18875e7baf0fSManish Chopra struct qede_tx_queue *txq = &fp->txq[cos];
18885e7baf0fSManish Chopra u16 ndev_tx_id;
18895e7baf0fSManish Chopra
18905e7baf0fSManish Chopra txq->cos = cos;
18915e7baf0fSManish Chopra txq->index = txq_index;
18925e7baf0fSManish Chopra ndev_tx_id = QEDE_TXQ_TO_NDEV_TXQ_ID(edev, txq);
18935e7baf0fSManish Chopra txq->ndev_txq_id = ndev_tx_id;
18945e7baf0fSManish Chopra
1895d8c2c7e3SYuval Mintz if (edev->dev_info.is_legacy)
189693e6044bSJason Yan txq->is_legacy = true;
18975e7baf0fSManish Chopra txq->dev = &edev->pdev->dev;
18985e7baf0fSManish Chopra }
18995e7baf0fSManish Chopra
19005e7baf0fSManish Chopra txq_index++;
19019a4d7e86SSudarsana Reddy Kalluru }
19022950219dSYuval Mintz
19032950219dSYuval Mintz snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
19049a4d7e86SSudarsana Reddy Kalluru edev->ndev->name, queue_id);
19052950219dSYuval Mintz }
1906d1b25b79SAlexander Lobakin
1907d1b25b79SAlexander Lobakin if (init_xdp) {
1908d1b25b79SAlexander Lobakin edev->total_xdp_queues = QEDE_RSS_COUNT(edev);
1909d1b25b79SAlexander Lobakin DP_INFO(edev, "Total XDP queues: %u\n", edev->total_xdp_queues);
1910d1b25b79SAlexander Lobakin }
19112950219dSYuval Mintz }
19122950219dSYuval Mintz
qede_set_real_num_queues(struct qede_dev * edev)19132950219dSYuval Mintz static int qede_set_real_num_queues(struct qede_dev *edev)
19142950219dSYuval Mintz {
19152950219dSYuval Mintz int rc = 0;
19162950219dSYuval Mintz
19175e7baf0fSManish Chopra rc = netif_set_real_num_tx_queues(edev->ndev,
19185e7baf0fSManish Chopra QEDE_TSS_COUNT(edev) *
19195e7baf0fSManish Chopra edev->dev_info.num_tc);
19202950219dSYuval Mintz if (rc) {
19212950219dSYuval Mintz DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
19222950219dSYuval Mintz return rc;
19232950219dSYuval Mintz }
19249a4d7e86SSudarsana Reddy Kalluru
19259a4d7e86SSudarsana Reddy Kalluru rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_COUNT(edev));
19262950219dSYuval Mintz if (rc) {
19272950219dSYuval Mintz DP_NOTICE(edev, "Failed to set real number of Rx queues\n");
19282950219dSYuval Mintz return rc;
19292950219dSYuval Mintz }
19302950219dSYuval Mintz
19312950219dSYuval Mintz return 0;
19322950219dSYuval Mintz }
19332950219dSYuval Mintz
qede_napi_disable_remove(struct qede_dev * edev)19342950219dSYuval Mintz static void qede_napi_disable_remove(struct qede_dev *edev)
19352950219dSYuval Mintz {
19362950219dSYuval Mintz int i;
19372950219dSYuval Mintz
19389a4d7e86SSudarsana Reddy Kalluru for_each_queue(i) {
19392950219dSYuval Mintz napi_disable(&edev->fp_array[i].napi);
19402950219dSYuval Mintz
19412950219dSYuval Mintz netif_napi_del(&edev->fp_array[i].napi);
19422950219dSYuval Mintz }
19432950219dSYuval Mintz }
19442950219dSYuval Mintz
qede_napi_add_enable(struct qede_dev * edev)19452950219dSYuval Mintz static void qede_napi_add_enable(struct qede_dev *edev)
19462950219dSYuval Mintz {
19472950219dSYuval Mintz int i;
19482950219dSYuval Mintz
19492950219dSYuval Mintz /* Add NAPI objects */
19509a4d7e86SSudarsana Reddy Kalluru for_each_queue(i) {
1951b48b89f9SJakub Kicinski netif_napi_add(edev->ndev, &edev->fp_array[i].napi, qede_poll);
19522950219dSYuval Mintz napi_enable(&edev->fp_array[i].napi);
19532950219dSYuval Mintz }
19542950219dSYuval Mintz }
19552950219dSYuval Mintz
qede_sync_free_irqs(struct qede_dev * edev)19562950219dSYuval Mintz static void qede_sync_free_irqs(struct qede_dev *edev)
19572950219dSYuval Mintz {
19582950219dSYuval Mintz int i;
19592950219dSYuval Mintz
19602950219dSYuval Mintz for (i = 0; i < edev->int_info.used_cnt; i++) {
19612950219dSYuval Mintz if (edev->int_info.msix_cnt) {
19622950219dSYuval Mintz free_irq(edev->int_info.msix[i].vector,
19632950219dSYuval Mintz &edev->fp_array[i]);
19642950219dSYuval Mintz } else {
19652950219dSYuval Mintz edev->ops->common->simd_handler_clean(edev->cdev, i);
19662950219dSYuval Mintz }
19672950219dSYuval Mintz }
19682950219dSYuval Mintz
19692950219dSYuval Mintz edev->int_info.used_cnt = 0;
1970e5434688SShai Malin edev->int_info.msix_cnt = 0;
19712950219dSYuval Mintz }
19722950219dSYuval Mintz
qede_req_msix_irqs(struct qede_dev * edev)19732950219dSYuval Mintz static int qede_req_msix_irqs(struct qede_dev *edev)
19742950219dSYuval Mintz {
19752950219dSYuval Mintz int i, rc;
19762950219dSYuval Mintz
19772950219dSYuval Mintz /* Sanitize number of interrupts == number of prepared RSS queues */
19789a4d7e86SSudarsana Reddy Kalluru if (QEDE_QUEUE_CNT(edev) > edev->int_info.msix_cnt) {
19792950219dSYuval Mintz DP_ERR(edev,
19802950219dSYuval Mintz "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
19819a4d7e86SSudarsana Reddy Kalluru QEDE_QUEUE_CNT(edev), edev->int_info.msix_cnt);
19822950219dSYuval Mintz return -EINVAL;
19832950219dSYuval Mintz }
19842950219dSYuval Mintz
19859a4d7e86SSudarsana Reddy Kalluru for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
1986e4917d46SChopra, Manish #ifdef CONFIG_RFS_ACCEL
1987e4917d46SChopra, Manish struct qede_fastpath *fp = &edev->fp_array[i];
1988e4917d46SChopra, Manish
1989e4917d46SChopra, Manish if (edev->ndev->rx_cpu_rmap && (fp->type & QEDE_FASTPATH_RX)) {
1990e4917d46SChopra, Manish rc = irq_cpu_rmap_add(edev->ndev->rx_cpu_rmap,
1991e4917d46SChopra, Manish edev->int_info.msix[i].vector);
1992e4917d46SChopra, Manish if (rc) {
1993e4917d46SChopra, Manish DP_ERR(edev, "Failed to add CPU rmap\n");
1994e4917d46SChopra, Manish qede_free_arfs(edev);
1995e4917d46SChopra, Manish }
1996e4917d46SChopra, Manish }
1997e4917d46SChopra, Manish #endif
19982950219dSYuval Mintz rc = request_irq(edev->int_info.msix[i].vector,
19992950219dSYuval Mintz qede_msix_fp_int, 0, edev->fp_array[i].name,
20002950219dSYuval Mintz &edev->fp_array[i]);
20012950219dSYuval Mintz if (rc) {
20022950219dSYuval Mintz DP_ERR(edev, "Request fp %d irq failed\n", i);
2003755f9053SAlok Prasad #ifdef CONFIG_RFS_ACCEL
2004755f9053SAlok Prasad if (edev->ndev->rx_cpu_rmap)
2005755f9053SAlok Prasad free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
2006755f9053SAlok Prasad
2007755f9053SAlok Prasad edev->ndev->rx_cpu_rmap = NULL;
2008755f9053SAlok Prasad #endif
20092950219dSYuval Mintz qede_sync_free_irqs(edev);
20102950219dSYuval Mintz return rc;
20112950219dSYuval Mintz }
20122950219dSYuval Mintz DP_VERBOSE(edev, NETIF_MSG_INTR,
20132950219dSYuval Mintz "Requested fp irq for %s [entry %d]. Cookie is at %p\n",
20142950219dSYuval Mintz edev->fp_array[i].name, i,
20152950219dSYuval Mintz &edev->fp_array[i]);
20162950219dSYuval Mintz edev->int_info.used_cnt++;
20172950219dSYuval Mintz }
20182950219dSYuval Mintz
20192950219dSYuval Mintz return 0;
20202950219dSYuval Mintz }
20212950219dSYuval Mintz
qede_simd_fp_handler(void * cookie)20222950219dSYuval Mintz static void qede_simd_fp_handler(void *cookie)
20232950219dSYuval Mintz {
20242950219dSYuval Mintz struct qede_fastpath *fp = (struct qede_fastpath *)cookie;
20252950219dSYuval Mintz
20262950219dSYuval Mintz napi_schedule_irqoff(&fp->napi);
20272950219dSYuval Mintz }
20282950219dSYuval Mintz
qede_setup_irqs(struct qede_dev * edev)20292950219dSYuval Mintz static int qede_setup_irqs(struct qede_dev *edev)
20302950219dSYuval Mintz {
20312950219dSYuval Mintz int i, rc = 0;
20322950219dSYuval Mintz
20332950219dSYuval Mintz /* Learn Interrupt configuration */
20342950219dSYuval Mintz rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info);
20352950219dSYuval Mintz if (rc)
20362950219dSYuval Mintz return rc;
20372950219dSYuval Mintz
20382950219dSYuval Mintz if (edev->int_info.msix_cnt) {
20392950219dSYuval Mintz rc = qede_req_msix_irqs(edev);
20402950219dSYuval Mintz if (rc)
20412950219dSYuval Mintz return rc;
20422950219dSYuval Mintz edev->ndev->irq = edev->int_info.msix[0].vector;
20432950219dSYuval Mintz } else {
20442950219dSYuval Mintz const struct qed_common_ops *ops;
20452950219dSYuval Mintz
20462950219dSYuval Mintz /* qed should learn receive the RSS ids and callbacks */
20472950219dSYuval Mintz ops = edev->ops->common;
20489a4d7e86SSudarsana Reddy Kalluru for (i = 0; i < QEDE_QUEUE_CNT(edev); i++)
20492950219dSYuval Mintz ops->simd_handler_config(edev->cdev,
20502950219dSYuval Mintz &edev->fp_array[i], i,
20512950219dSYuval Mintz qede_simd_fp_handler);
20529a4d7e86SSudarsana Reddy Kalluru edev->int_info.used_cnt = QEDE_QUEUE_CNT(edev);
20532950219dSYuval Mintz }
20542950219dSYuval Mintz return 0;
20552950219dSYuval Mintz }
20562950219dSYuval Mintz
qede_drain_txq(struct qede_dev * edev,struct qede_tx_queue * txq,bool allow_drain)20572950219dSYuval Mintz static int qede_drain_txq(struct qede_dev *edev,
20581a635e48SYuval Mintz struct qede_tx_queue *txq, bool allow_drain)
20592950219dSYuval Mintz {
20602950219dSYuval Mintz int rc, cnt = 1000;
20612950219dSYuval Mintz
20622950219dSYuval Mintz while (txq->sw_tx_cons != txq->sw_tx_prod) {
20632950219dSYuval Mintz if (!cnt) {
20642950219dSYuval Mintz if (allow_drain) {
20652950219dSYuval Mintz DP_NOTICE(edev,
20662950219dSYuval Mintz "Tx queue[%d] is stuck, requesting MCP to drain\n",
20672950219dSYuval Mintz txq->index);
20682950219dSYuval Mintz rc = edev->ops->common->drain(edev->cdev);
20692950219dSYuval Mintz if (rc)
20702950219dSYuval Mintz return rc;
20712950219dSYuval Mintz return qede_drain_txq(edev, txq, false);
20722950219dSYuval Mintz }
20732950219dSYuval Mintz DP_NOTICE(edev,
20742950219dSYuval Mintz "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n",
20752950219dSYuval Mintz txq->index, txq->sw_tx_prod,
20762950219dSYuval Mintz txq->sw_tx_cons);
20772950219dSYuval Mintz return -ENODEV;
20782950219dSYuval Mintz }
20792950219dSYuval Mintz cnt--;
20802950219dSYuval Mintz usleep_range(1000, 2000);
20812950219dSYuval Mintz barrier();
20822950219dSYuval Mintz }
20832950219dSYuval Mintz
20842950219dSYuval Mintz /* FW finished processing, wait for HW to transmit all tx packets */
20852950219dSYuval Mintz usleep_range(1000, 2000);
20862950219dSYuval Mintz
20872950219dSYuval Mintz return 0;
20882950219dSYuval Mintz }
20892950219dSYuval Mintz
qede_stop_txq(struct qede_dev * edev,struct qede_tx_queue * txq,int rss_id)20903da7a37aSMintz, Yuval static int qede_stop_txq(struct qede_dev *edev,
20913da7a37aSMintz, Yuval struct qede_tx_queue *txq, int rss_id)
20923da7a37aSMintz, Yuval {
2093bd4db888SAriel Elior /* delete doorbell from doorbell recovery mechanism */
2094bd4db888SAriel Elior edev->ops->common->db_recovery_del(edev->cdev, txq->doorbell_addr,
2095bd4db888SAriel Elior &txq->tx_db);
2096bd4db888SAriel Elior
20973da7a37aSMintz, Yuval return edev->ops->q_tx_stop(edev->cdev, rss_id, txq->handle);
20983da7a37aSMintz, Yuval }
20993da7a37aSMintz, Yuval
qede_stop_queues(struct qede_dev * edev)21002950219dSYuval Mintz static int qede_stop_queues(struct qede_dev *edev)
21012950219dSYuval Mintz {
2102f29ffdb6SMintz, Yuval struct qed_update_vport_params *vport_update_params;
21032950219dSYuval Mintz struct qed_dev *cdev = edev->cdev;
210480439a17SMintz, Yuval struct qede_fastpath *fp;
210580439a17SMintz, Yuval int rc, i;
21062950219dSYuval Mintz
21072950219dSYuval Mintz /* Disable the vport */
2108f29ffdb6SMintz, Yuval vport_update_params = vzalloc(sizeof(*vport_update_params));
2109f29ffdb6SMintz, Yuval if (!vport_update_params)
2110f29ffdb6SMintz, Yuval return -ENOMEM;
21112950219dSYuval Mintz
2112f29ffdb6SMintz, Yuval vport_update_params->vport_id = 0;
2113f29ffdb6SMintz, Yuval vport_update_params->update_vport_active_flg = 1;
2114f29ffdb6SMintz, Yuval vport_update_params->vport_active_flg = 0;
2115f29ffdb6SMintz, Yuval vport_update_params->update_rss_flg = 0;
2116f29ffdb6SMintz, Yuval
2117f29ffdb6SMintz, Yuval rc = edev->ops->vport_update(cdev, vport_update_params);
2118f29ffdb6SMintz, Yuval vfree(vport_update_params);
2119f29ffdb6SMintz, Yuval
21202950219dSYuval Mintz if (rc) {
21212950219dSYuval Mintz DP_ERR(edev, "Failed to update vport\n");
21222950219dSYuval Mintz return rc;
21232950219dSYuval Mintz }
21242950219dSYuval Mintz
21252950219dSYuval Mintz /* Flush Tx queues. If needed, request drain from MCP */
21269a4d7e86SSudarsana Reddy Kalluru for_each_queue(i) {
212780439a17SMintz, Yuval fp = &edev->fp_array[i];
21282950219dSYuval Mintz
21299a4d7e86SSudarsana Reddy Kalluru if (fp->type & QEDE_FASTPATH_TX) {
21305e7baf0fSManish Chopra int cos;
21315e7baf0fSManish Chopra
21325e7baf0fSManish Chopra for_each_cos_in_txq(edev, cos) {
21335e7baf0fSManish Chopra rc = qede_drain_txq(edev, &fp->txq[cos], true);
21342950219dSYuval Mintz if (rc)
21352950219dSYuval Mintz return rc;
21362950219dSYuval Mintz }
21375e7baf0fSManish Chopra }
2138cb6aeb07SMintz, Yuval
2139cb6aeb07SMintz, Yuval if (fp->type & QEDE_FASTPATH_XDP) {
2140cb6aeb07SMintz, Yuval rc = qede_drain_txq(edev, fp->xdp_tx, true);
2141cb6aeb07SMintz, Yuval if (rc)
2142cb6aeb07SMintz, Yuval return rc;
2143cb6aeb07SMintz, Yuval }
21442950219dSYuval Mintz }
21452950219dSYuval Mintz
21462950219dSYuval Mintz /* Stop all Queues in reverse order */
21479a4d7e86SSudarsana Reddy Kalluru for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) {
214880439a17SMintz, Yuval fp = &edev->fp_array[i];
214980439a17SMintz, Yuval
21502950219dSYuval Mintz /* Stop the Tx Queue(s) */
215180439a17SMintz, Yuval if (fp->type & QEDE_FASTPATH_TX) {
21525e7baf0fSManish Chopra int cos;
21535e7baf0fSManish Chopra
21545e7baf0fSManish Chopra for_each_cos_in_txq(edev, cos) {
21555e7baf0fSManish Chopra rc = qede_stop_txq(edev, &fp->txq[cos], i);
215680439a17SMintz, Yuval if (rc)
21572950219dSYuval Mintz return rc;
21582950219dSYuval Mintz }
21595e7baf0fSManish Chopra }
21602950219dSYuval Mintz
21612950219dSYuval Mintz /* Stop the Rx Queue */
216280439a17SMintz, Yuval if (fp->type & QEDE_FASTPATH_RX) {
21633da7a37aSMintz, Yuval rc = edev->ops->q_rx_stop(cdev, i, fp->rxq->handle);
21642950219dSYuval Mintz if (rc) {
21652950219dSYuval Mintz DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
21662950219dSYuval Mintz return rc;
21672950219dSYuval Mintz }
21682950219dSYuval Mintz }
2169496e0517SMintz, Yuval
2170cb6aeb07SMintz, Yuval /* Stop the XDP forwarding queue */
2171cb6aeb07SMintz, Yuval if (fp->type & QEDE_FASTPATH_XDP) {
2172cb6aeb07SMintz, Yuval rc = qede_stop_txq(edev, fp->xdp_tx, i);
2173cb6aeb07SMintz, Yuval if (rc)
2174cb6aeb07SMintz, Yuval return rc;
2175cb6aeb07SMintz, Yuval
2176496e0517SMintz, Yuval bpf_prog_put(fp->rxq->xdp_prog);
21779a4d7e86SSudarsana Reddy Kalluru }
2178cb6aeb07SMintz, Yuval }
21792950219dSYuval Mintz
21802950219dSYuval Mintz /* Stop the vport */
21812950219dSYuval Mintz rc = edev->ops->vport_stop(cdev, 0);
21822950219dSYuval Mintz if (rc)
21832950219dSYuval Mintz DP_ERR(edev, "Failed to stop VPORT\n");
21842950219dSYuval Mintz
21852950219dSYuval Mintz return rc;
21862950219dSYuval Mintz }
21872950219dSYuval Mintz
qede_start_txq(struct qede_dev * edev,struct qede_fastpath * fp,struct qede_tx_queue * txq,u8 rss_id,u16 sb_idx)21883da7a37aSMintz, Yuval static int qede_start_txq(struct qede_dev *edev,
21893da7a37aSMintz, Yuval struct qede_fastpath *fp,
21903da7a37aSMintz, Yuval struct qede_tx_queue *txq, u8 rss_id, u16 sb_idx)
21913da7a37aSMintz, Yuval {
21923da7a37aSMintz, Yuval dma_addr_t phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl);
21933da7a37aSMintz, Yuval u32 page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl);
21943da7a37aSMintz, Yuval struct qed_queue_start_common_params params;
21953da7a37aSMintz, Yuval struct qed_txq_start_ret_params ret_params;
21963da7a37aSMintz, Yuval int rc;
21973da7a37aSMintz, Yuval
21983da7a37aSMintz, Yuval memset(¶ms, 0, sizeof(params));
21993da7a37aSMintz, Yuval memset(&ret_params, 0, sizeof(ret_params));
22003da7a37aSMintz, Yuval
2201cb6aeb07SMintz, Yuval /* Let the XDP queue share the queue-zone with one of the regular txq.
2202cb6aeb07SMintz, Yuval * We don't really care about its coalescing.
2203cb6aeb07SMintz, Yuval */
2204cb6aeb07SMintz, Yuval if (txq->is_xdp)
2205cb6aeb07SMintz, Yuval params.queue_id = QEDE_TXQ_XDP_TO_IDX(edev, txq);
2206cb6aeb07SMintz, Yuval else
22073da7a37aSMintz, Yuval params.queue_id = txq->index;
2208cb6aeb07SMintz, Yuval
2209f604b17dSMintz, Yuval params.p_sb = fp->sb_info;
22103da7a37aSMintz, Yuval params.sb_idx = sb_idx;
22115e7baf0fSManish Chopra params.tc = txq->cos;
22123da7a37aSMintz, Yuval
22133da7a37aSMintz, Yuval rc = edev->ops->q_tx_start(edev->cdev, rss_id, ¶ms, phys_table,
22143da7a37aSMintz, Yuval page_cnt, &ret_params);
22153da7a37aSMintz, Yuval if (rc) {
22163da7a37aSMintz, Yuval DP_ERR(edev, "Start TXQ #%d failed %d\n", txq->index, rc);
22173da7a37aSMintz, Yuval return rc;
22183da7a37aSMintz, Yuval }
22193da7a37aSMintz, Yuval
22203da7a37aSMintz, Yuval txq->doorbell_addr = ret_params.p_doorbell;
22213da7a37aSMintz, Yuval txq->handle = ret_params.p_handle;
22223da7a37aSMintz, Yuval
22233da7a37aSMintz, Yuval /* Determine the FW consumer address associated */
22243da7a37aSMintz, Yuval txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[sb_idx];
22253da7a37aSMintz, Yuval
22263da7a37aSMintz, Yuval /* Prepare the doorbell parameters */
22273da7a37aSMintz, Yuval SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST, DB_DEST_XCM);
22283da7a37aSMintz, Yuval SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
22293da7a37aSMintz, Yuval SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_VAL_SEL,
22303da7a37aSMintz, Yuval DQ_XCM_ETH_TX_BD_PROD_CMD);
22313da7a37aSMintz, Yuval txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
22323da7a37aSMintz, Yuval
2233bd4db888SAriel Elior /* register doorbell with doorbell recovery mechanism */
2234bd4db888SAriel Elior rc = edev->ops->common->db_recovery_add(edev->cdev, txq->doorbell_addr,
2235bd4db888SAriel Elior &txq->tx_db, DB_REC_WIDTH_32B,
2236bd4db888SAriel Elior DB_REC_KERNEL);
2237bd4db888SAriel Elior
22383da7a37aSMintz, Yuval return rc;
22393da7a37aSMintz, Yuval }
22403da7a37aSMintz, Yuval
qede_start_queues(struct qede_dev * edev,bool clear_stats)2241a0d26d5aSYuval Mintz static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
22422950219dSYuval Mintz {
2243088c8618SManish Chopra int vlan_removal_en = 1;
22442950219dSYuval Mintz struct qed_dev *cdev = edev->cdev;
2245fefb0202SYuval Mintz struct qed_dev_info *qed_info = &edev->dev_info.common;
2246f29ffdb6SMintz, Yuval struct qed_update_vport_params *vport_update_params;
2247f29ffdb6SMintz, Yuval struct qed_queue_start_common_params q_params;
2248088c8618SManish Chopra struct qed_start_vport_params start = {0};
224980439a17SMintz, Yuval int rc, i;
22502950219dSYuval Mintz
22519a4d7e86SSudarsana Reddy Kalluru if (!edev->num_queues) {
22522950219dSYuval Mintz DP_ERR(edev,
22532950219dSYuval Mintz "Cannot update V-VPORT as active as there are no Rx queues\n");
22542950219dSYuval Mintz return -EINVAL;
22552950219dSYuval Mintz }
22562950219dSYuval Mintz
2257f29ffdb6SMintz, Yuval vport_update_params = vzalloc(sizeof(*vport_update_params));
2258f29ffdb6SMintz, Yuval if (!vport_update_params)
2259f29ffdb6SMintz, Yuval return -ENOMEM;
2260f29ffdb6SMintz, Yuval
22614c55215cSSudarsana Reddy Kalluru start.handle_ptp_pkts = !!(edev->ptp);
226255482edcSManish Chopra start.gro_enable = !edev->gro_disable;
2263088c8618SManish Chopra start.mtu = edev->ndev->mtu;
2264088c8618SManish Chopra start.vport_id = 0;
2265088c8618SManish Chopra start.drop_ttl0 = true;
2266088c8618SManish Chopra start.remove_inner_vlan = vlan_removal_en;
22677f7a144fSYuval Mintz start.clear_stats = clear_stats;
2268088c8618SManish Chopra
2269088c8618SManish Chopra rc = edev->ops->vport_start(cdev, &start);
22702950219dSYuval Mintz
22712950219dSYuval Mintz if (rc) {
22722950219dSYuval Mintz DP_ERR(edev, "Start V-PORT failed %d\n", rc);
2273f29ffdb6SMintz, Yuval goto out;
22742950219dSYuval Mintz }
22752950219dSYuval Mintz
22762950219dSYuval Mintz DP_VERBOSE(edev, NETIF_MSG_IFUP,
22772950219dSYuval Mintz "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
2278088c8618SManish Chopra start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
22792950219dSYuval Mintz
22809a4d7e86SSudarsana Reddy Kalluru for_each_queue(i) {
22812950219dSYuval Mintz struct qede_fastpath *fp = &edev->fp_array[i];
22829a4d7e86SSudarsana Reddy Kalluru dma_addr_t p_phys_table;
22839a4d7e86SSudarsana Reddy Kalluru u32 page_cnt;
22849a4d7e86SSudarsana Reddy Kalluru
22859a4d7e86SSudarsana Reddy Kalluru if (fp->type & QEDE_FASTPATH_RX) {
22863da7a37aSMintz, Yuval struct qed_rxq_start_ret_params ret_params;
22879a4d7e86SSudarsana Reddy Kalluru struct qede_rx_queue *rxq = fp->rxq;
22889a4d7e86SSudarsana Reddy Kalluru __le16 *val;
22892950219dSYuval Mintz
22903da7a37aSMintz, Yuval memset(&ret_params, 0, sizeof(ret_params));
22912950219dSYuval Mintz memset(&q_params, 0, sizeof(q_params));
22929a4d7e86SSudarsana Reddy Kalluru q_params.queue_id = rxq->rxq_id;
22932950219dSYuval Mintz q_params.vport_id = 0;
2294f604b17dSMintz, Yuval q_params.p_sb = fp->sb_info;
22952950219dSYuval Mintz q_params.sb_idx = RX_PI;
22962950219dSYuval Mintz
22979a4d7e86SSudarsana Reddy Kalluru p_phys_table =
22989a4d7e86SSudarsana Reddy Kalluru qed_chain_get_pbl_phys(&rxq->rx_comp_ring);
22999a4d7e86SSudarsana Reddy Kalluru page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring);
23009a4d7e86SSudarsana Reddy Kalluru
23013da7a37aSMintz, Yuval rc = edev->ops->q_rx_start(cdev, i, &q_params,
23029a4d7e86SSudarsana Reddy Kalluru rxq->rx_buf_size,
23039a4d7e86SSudarsana Reddy Kalluru rxq->rx_bd_ring.p_phys_addr,
23049a4d7e86SSudarsana Reddy Kalluru p_phys_table,
23053da7a37aSMintz, Yuval page_cnt, &ret_params);
23062950219dSYuval Mintz if (rc) {
23079a4d7e86SSudarsana Reddy Kalluru DP_ERR(edev, "Start RXQ #%d failed %d\n", i,
23089a4d7e86SSudarsana Reddy Kalluru rc);
2309f29ffdb6SMintz, Yuval goto out;
23102950219dSYuval Mintz }
23112950219dSYuval Mintz
23123da7a37aSMintz, Yuval /* Use the return parameters */
23133da7a37aSMintz, Yuval rxq->hw_rxq_prod_addr = ret_params.p_prod;
23143da7a37aSMintz, Yuval rxq->handle = ret_params.p_handle;
23153da7a37aSMintz, Yuval
23169a4d7e86SSudarsana Reddy Kalluru val = &fp->sb_info->sb_virt->pi_array[RX_PI];
23179a4d7e86SSudarsana Reddy Kalluru rxq->hw_cons_ptr = val;
23182950219dSYuval Mintz
23199a4d7e86SSudarsana Reddy Kalluru qede_update_rx_prod(edev, rxq);
23209a4d7e86SSudarsana Reddy Kalluru }
23219a4d7e86SSudarsana Reddy Kalluru
2322496e0517SMintz, Yuval if (fp->type & QEDE_FASTPATH_XDP) {
2323cb6aeb07SMintz, Yuval rc = qede_start_txq(edev, fp, fp->xdp_tx, i, XDP_PI);
2324cb6aeb07SMintz, Yuval if (rc)
2325f29ffdb6SMintz, Yuval goto out;
2326cb6aeb07SMintz, Yuval
232785192dbfSAndrii Nakryiko bpf_prog_add(edev->xdp_prog, 1);
232885192dbfSAndrii Nakryiko fp->rxq->xdp_prog = edev->xdp_prog;
2329496e0517SMintz, Yuval }
2330496e0517SMintz, Yuval
233180439a17SMintz, Yuval if (fp->type & QEDE_FASTPATH_TX) {
23325e7baf0fSManish Chopra int cos;
23335e7baf0fSManish Chopra
23345e7baf0fSManish Chopra for_each_cos_in_txq(edev, cos) {
23355e7baf0fSManish Chopra rc = qede_start_txq(edev, fp, &fp->txq[cos], i,
23365e7baf0fSManish Chopra TX_PI(cos));
23373da7a37aSMintz, Yuval if (rc)
2338f29ffdb6SMintz, Yuval goto out;
23392950219dSYuval Mintz }
23402950219dSYuval Mintz }
23415e7baf0fSManish Chopra }
23422950219dSYuval Mintz
23432950219dSYuval Mintz /* Prepare and send the vport enable */
2344f29ffdb6SMintz, Yuval vport_update_params->vport_id = start.vport_id;
2345f29ffdb6SMintz, Yuval vport_update_params->update_vport_active_flg = 1;
2346f29ffdb6SMintz, Yuval vport_update_params->vport_active_flg = 1;
23472950219dSYuval Mintz
23480bc5fe85SSudarsana Reddy Kalluru if ((qed_info->b_inter_pf_switch || pci_num_vf(edev->pdev)) &&
2349831bfb0eSYuval Mintz qed_info->tx_switching) {
2350f29ffdb6SMintz, Yuval vport_update_params->update_tx_switching_flg = 1;
2351f29ffdb6SMintz, Yuval vport_update_params->tx_switching_flg = 1;
2352831bfb0eSYuval Mintz }
2353831bfb0eSYuval Mintz
2354f29ffdb6SMintz, Yuval qede_fill_rss_params(edev, &vport_update_params->rss_params,
2355f29ffdb6SMintz, Yuval &vport_update_params->update_rss_flg);
2356961acdeaSSudarsana Reddy Kalluru
2357f29ffdb6SMintz, Yuval rc = edev->ops->vport_update(cdev, vport_update_params);
2358f29ffdb6SMintz, Yuval if (rc)
23592950219dSYuval Mintz DP_ERR(edev, "Update V-PORT failed %d\n", rc);
2360f29ffdb6SMintz, Yuval
2361f29ffdb6SMintz, Yuval out:
2362f29ffdb6SMintz, Yuval vfree(vport_update_params);
23632950219dSYuval Mintz return rc;
23642950219dSYuval Mintz }
23652950219dSYuval Mintz
23662950219dSYuval Mintz enum qede_unload_mode {
23672950219dSYuval Mintz QEDE_UNLOAD_NORMAL,
2368ccc67ef5STomer Tayar QEDE_UNLOAD_RECOVERY,
23692950219dSYuval Mintz };
23702950219dSYuval Mintz
qede_unload(struct qede_dev * edev,enum qede_unload_mode mode,bool is_locked)2371567b3c12SMintz, Yuval static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
2372567b3c12SMintz, Yuval bool is_locked)
23732950219dSYuval Mintz {
2374a2ec6172SSudarsana Kalluru struct qed_link_params link_params;
23752950219dSYuval Mintz int rc;
23762950219dSYuval Mintz
23772950219dSYuval Mintz DP_INFO(edev, "Starting qede unload\n");
23782950219dSYuval Mintz
2379567b3c12SMintz, Yuval if (!is_locked)
2380567b3c12SMintz, Yuval __qede_lock(edev);
2381567b3c12SMintz, Yuval
2382f04e48dbSSudarsana Reddy Kalluru clear_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags);
2383f04e48dbSSudarsana Reddy Kalluru
2384ccc67ef5STomer Tayar if (mode != QEDE_UNLOAD_RECOVERY)
23850d8e0aa0SSudarsana Kalluru edev->state = QEDE_STATE_CLOSED;
23860d8e0aa0SSudarsana Kalluru
2387bbfcd1e8SMichal Kalderon qede_rdma_dev_event_close(edev);
23882e7022d6SMintz, Yuval
23892950219dSYuval Mintz /* Close OS Tx */
23902950219dSYuval Mintz netif_tx_disable(edev->ndev);
23912950219dSYuval Mintz netif_carrier_off(edev->ndev);
23922950219dSYuval Mintz
2393ccc67ef5STomer Tayar if (mode != QEDE_UNLOAD_RECOVERY) {
2394a2ec6172SSudarsana Kalluru /* Reset the link */
2395a2ec6172SSudarsana Kalluru memset(&link_params, 0, sizeof(link_params));
2396a2ec6172SSudarsana Kalluru link_params.link_up = false;
2397a2ec6172SSudarsana Kalluru edev->ops->common->set_link(edev->cdev, &link_params);
2398ccc67ef5STomer Tayar
23992950219dSYuval Mintz rc = qede_stop_queues(edev);
24002950219dSYuval Mintz if (rc) {
2401755f9053SAlok Prasad #ifdef CONFIG_RFS_ACCEL
2402755f9053SAlok Prasad if (edev->dev_info.common.b_arfs_capable) {
2403755f9053SAlok Prasad qede_poll_for_freeing_arfs_filters(edev);
2404755f9053SAlok Prasad if (edev->ndev->rx_cpu_rmap)
2405755f9053SAlok Prasad free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
2406755f9053SAlok Prasad
2407755f9053SAlok Prasad edev->ndev->rx_cpu_rmap = NULL;
2408755f9053SAlok Prasad }
2409755f9053SAlok Prasad #endif
24102950219dSYuval Mintz qede_sync_free_irqs(edev);
24112950219dSYuval Mintz goto out;
24122950219dSYuval Mintz }
24132950219dSYuval Mintz
24142950219dSYuval Mintz DP_INFO(edev, "Stopped Queues\n");
2415ccc67ef5STomer Tayar }
24162950219dSYuval Mintz
24177c1bfcadSSudarsana Reddy Kalluru qede_vlan_mark_nonconfigured(edev);
24182950219dSYuval Mintz edev->ops->fastpath_stop(edev->cdev);
24193f2a2b8bSChopra, Manish
24200367f058SDmitry Bogdanov if (edev->dev_info.common.b_arfs_capable) {
2421e4917d46SChopra, Manish qede_poll_for_freeing_arfs_filters(edev);
2422e4917d46SChopra, Manish qede_free_arfs(edev);
2423e4917d46SChopra, Manish }
24243f2a2b8bSChopra, Manish
24252950219dSYuval Mintz /* Release the interrupts */
24262950219dSYuval Mintz qede_sync_free_irqs(edev);
24272950219dSYuval Mintz edev->ops->common->set_fp_int(edev->cdev, 0);
24282950219dSYuval Mintz
24292950219dSYuval Mintz qede_napi_disable_remove(edev);
24302950219dSYuval Mintz
2431ccc67ef5STomer Tayar if (mode == QEDE_UNLOAD_RECOVERY)
2432ccc67ef5STomer Tayar qede_empty_tx_queues(edev);
2433ccc67ef5STomer Tayar
24342950219dSYuval Mintz qede_free_mem_load(edev);
24352950219dSYuval Mintz qede_free_fp_array(edev);
24362950219dSYuval Mintz
24372950219dSYuval Mintz out:
2438567b3c12SMintz, Yuval if (!is_locked)
2439567b3c12SMintz, Yuval __qede_unlock(edev);
2440ccc67ef5STomer Tayar
2441ccc67ef5STomer Tayar if (mode != QEDE_UNLOAD_RECOVERY)
2442ccc67ef5STomer Tayar DP_NOTICE(edev, "Link is down\n");
2443ccc67ef5STomer Tayar
24449adebac3SSudarsana Reddy Kalluru edev->ptp_skip_txts = 0;
24459adebac3SSudarsana Reddy Kalluru
24462950219dSYuval Mintz DP_INFO(edev, "Ending qede unload\n");
24472950219dSYuval Mintz }
24482950219dSYuval Mintz
24492950219dSYuval Mintz enum qede_load_mode {
24502950219dSYuval Mintz QEDE_LOAD_NORMAL,
2451a0d26d5aSYuval Mintz QEDE_LOAD_RELOAD,
2452ccc67ef5STomer Tayar QEDE_LOAD_RECOVERY,
24532950219dSYuval Mintz };
24542950219dSYuval Mintz
qede_load(struct qede_dev * edev,enum qede_load_mode mode,bool is_locked)2455567b3c12SMintz, Yuval static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
2456567b3c12SMintz, Yuval bool is_locked)
24572950219dSYuval Mintz {
2458a2ec6172SSudarsana Kalluru struct qed_link_params link_params;
2459b0ec5489SBhaskar Upadhaya struct ethtool_coalesce coal = {};
24605e7baf0fSManish Chopra u8 num_tc;
2461b0ec5489SBhaskar Upadhaya int rc, i;
24622950219dSYuval Mintz
24632950219dSYuval Mintz DP_INFO(edev, "Starting qede load\n");
24642950219dSYuval Mintz
2465567b3c12SMintz, Yuval if (!is_locked)
2466567b3c12SMintz, Yuval __qede_lock(edev);
2467567b3c12SMintz, Yuval
24682950219dSYuval Mintz rc = qede_set_num_queues(edev);
24692950219dSYuval Mintz if (rc)
2470567b3c12SMintz, Yuval goto out;
24712950219dSYuval Mintz
24722950219dSYuval Mintz rc = qede_alloc_fp_array(edev);
24732950219dSYuval Mintz if (rc)
2474567b3c12SMintz, Yuval goto out;
24752950219dSYuval Mintz
24762950219dSYuval Mintz qede_init_fp(edev);
24772950219dSYuval Mintz
24782950219dSYuval Mintz rc = qede_alloc_mem_load(edev);
24792950219dSYuval Mintz if (rc)
24802950219dSYuval Mintz goto err1;
248180439a17SMintz, Yuval DP_INFO(edev, "Allocated %d Rx, %d Tx queues\n",
248280439a17SMintz, Yuval QEDE_RSS_COUNT(edev), QEDE_TSS_COUNT(edev));
24832950219dSYuval Mintz
24842950219dSYuval Mintz rc = qede_set_real_num_queues(edev);
24852950219dSYuval Mintz if (rc)
24862950219dSYuval Mintz goto err2;
24872950219dSYuval Mintz
24880367f058SDmitry Bogdanov if (qede_alloc_arfs(edev)) {
24890367f058SDmitry Bogdanov edev->ndev->features &= ~NETIF_F_NTUPLE;
24900367f058SDmitry Bogdanov edev->dev_info.common.b_arfs_capable = false;
2491e4917d46SChopra, Manish }
24923f2a2b8bSChopra, Manish
24932950219dSYuval Mintz qede_napi_add_enable(edev);
24942950219dSYuval Mintz DP_INFO(edev, "Napi added and enabled\n");
24952950219dSYuval Mintz
24962950219dSYuval Mintz rc = qede_setup_irqs(edev);
24972950219dSYuval Mintz if (rc)
24982950219dSYuval Mintz goto err3;
24992950219dSYuval Mintz DP_INFO(edev, "Setup IRQs succeeded\n");
25002950219dSYuval Mintz
2501a0d26d5aSYuval Mintz rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD);
25022950219dSYuval Mintz if (rc)
25032950219dSYuval Mintz goto err4;
25042950219dSYuval Mintz DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
25052950219dSYuval Mintz
25065e7baf0fSManish Chopra num_tc = netdev_get_num_tc(edev->ndev);
25075e7baf0fSManish Chopra num_tc = num_tc ? num_tc : edev->dev_info.num_tc;
25085e7baf0fSManish Chopra qede_setup_tc(edev->ndev, num_tc);
25095e7baf0fSManish Chopra
25107c1bfcadSSudarsana Reddy Kalluru /* Program un-configured VLANs */
25117c1bfcadSSudarsana Reddy Kalluru qede_configure_vlan_filters(edev);
25127c1bfcadSSudarsana Reddy Kalluru
2513f04e48dbSSudarsana Reddy Kalluru set_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags);
2514f04e48dbSSudarsana Reddy Kalluru
2515a2ec6172SSudarsana Kalluru /* Ask for link-up using current configuration */
2516a2ec6172SSudarsana Kalluru memset(&link_params, 0, sizeof(link_params));
2517a2ec6172SSudarsana Kalluru link_params.link_up = true;
2518a2ec6172SSudarsana Kalluru edev->ops->common->set_link(edev->cdev, &link_params);
2519a2ec6172SSudarsana Kalluru
2520567b3c12SMintz, Yuval edev->state = QEDE_STATE_OPEN;
2521567b3c12SMintz, Yuval
2522b0ec5489SBhaskar Upadhaya coal.rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
2523b0ec5489SBhaskar Upadhaya coal.tx_coalesce_usecs = QED_DEFAULT_TX_USECS;
2524b0ec5489SBhaskar Upadhaya
2525b0ec5489SBhaskar Upadhaya for_each_queue(i) {
2526b0ec5489SBhaskar Upadhaya if (edev->coal_entry[i].isvalid) {
2527b0ec5489SBhaskar Upadhaya coal.rx_coalesce_usecs = edev->coal_entry[i].rxc;
2528b0ec5489SBhaskar Upadhaya coal.tx_coalesce_usecs = edev->coal_entry[i].txc;
2529b0ec5489SBhaskar Upadhaya }
2530b0ec5489SBhaskar Upadhaya __qede_unlock(edev);
2531b0ec5489SBhaskar Upadhaya qede_set_per_coalesce(edev->ndev, i, &coal);
2532b0ec5489SBhaskar Upadhaya __qede_lock(edev);
2533b0ec5489SBhaskar Upadhaya }
25342950219dSYuval Mintz DP_INFO(edev, "Ending successfully qede load\n");
25352950219dSYuval Mintz
2536567b3c12SMintz, Yuval goto out;
25372950219dSYuval Mintz err4:
25382950219dSYuval Mintz qede_sync_free_irqs(edev);
25392950219dSYuval Mintz err3:
25402950219dSYuval Mintz qede_napi_disable_remove(edev);
25412950219dSYuval Mintz err2:
25422950219dSYuval Mintz qede_free_mem_load(edev);
25432950219dSYuval Mintz err1:
25442950219dSYuval Mintz edev->ops->common->set_fp_int(edev->cdev, 0);
25452950219dSYuval Mintz qede_free_fp_array(edev);
25469a4d7e86SSudarsana Reddy Kalluru edev->num_queues = 0;
25479a4d7e86SSudarsana Reddy Kalluru edev->fp_num_tx = 0;
25489a4d7e86SSudarsana Reddy Kalluru edev->fp_num_rx = 0;
2549567b3c12SMintz, Yuval out:
2550567b3c12SMintz, Yuval if (!is_locked)
2551567b3c12SMintz, Yuval __qede_unlock(edev);
2552567b3c12SMintz, Yuval
25532950219dSYuval Mintz return rc;
25542950219dSYuval Mintz }
25552950219dSYuval Mintz
2556567b3c12SMintz, Yuval /* 'func' should be able to run between unload and reload assuming interface
2557567b3c12SMintz, Yuval * is actually running, or afterwards in case it's currently DOWN.
2558133fac0eSSudarsana Kalluru */
qede_reload(struct qede_dev * edev,struct qede_reload_args * args,bool is_locked)2559567b3c12SMintz, Yuval void qede_reload(struct qede_dev *edev,
2560567b3c12SMintz, Yuval struct qede_reload_args *args, bool is_locked)
2561567b3c12SMintz, Yuval {
2562567b3c12SMintz, Yuval if (!is_locked)
2563567b3c12SMintz, Yuval __qede_lock(edev);
2564133fac0eSSudarsana Kalluru
2565567b3c12SMintz, Yuval /* Since qede_lock is held, internal state wouldn't change even
2566567b3c12SMintz, Yuval * if netdev state would start transitioning. Check whether current
2567567b3c12SMintz, Yuval * internal configuration indicates device is up, then reload.
2568567b3c12SMintz, Yuval */
2569567b3c12SMintz, Yuval if (edev->state == QEDE_STATE_OPEN) {
2570567b3c12SMintz, Yuval qede_unload(edev, QEDE_UNLOAD_NORMAL, true);
2571567b3c12SMintz, Yuval if (args)
2572567b3c12SMintz, Yuval args->func(edev, args);
2573567b3c12SMintz, Yuval qede_load(edev, QEDE_LOAD_RELOAD, true);
2574133fac0eSSudarsana Kalluru
2575567b3c12SMintz, Yuval /* Since no one is going to do it for us, re-configure */
2576133fac0eSSudarsana Kalluru qede_config_rx_mode(edev->ndev);
2577567b3c12SMintz, Yuval } else if (args) {
2578567b3c12SMintz, Yuval args->func(edev, args);
2579567b3c12SMintz, Yuval }
2580567b3c12SMintz, Yuval
2581567b3c12SMintz, Yuval if (!is_locked)
2582567b3c12SMintz, Yuval __qede_unlock(edev);
2583133fac0eSSudarsana Kalluru }
2584133fac0eSSudarsana Kalluru
25852950219dSYuval Mintz /* called with rtnl_lock */
qede_open(struct net_device * ndev)25862950219dSYuval Mintz static int qede_open(struct net_device *ndev)
25872950219dSYuval Mintz {
25882950219dSYuval Mintz struct qede_dev *edev = netdev_priv(ndev);
2589b18e170cSManish Chopra int rc;
25902950219dSYuval Mintz
25912950219dSYuval Mintz netif_carrier_off(ndev);
25922950219dSYuval Mintz
25932950219dSYuval Mintz edev->ops->common->set_power_state(edev->cdev, PCI_D0);
25942950219dSYuval Mintz
2595567b3c12SMintz, Yuval rc = qede_load(edev, QEDE_LOAD_NORMAL, false);
2596b18e170cSManish Chopra if (rc)
2597b18e170cSManish Chopra return rc;
2598b18e170cSManish Chopra
25998cd160a2SJakub Kicinski udp_tunnel_nic_reset_ntf(ndev);
2600f9f082a9SAlexander Duyck
26010fefbfbaSSudarsana Kalluru edev->ops->common->update_drv_state(edev->cdev, true);
26020fefbfbaSSudarsana Kalluru
2603b18e170cSManish Chopra return 0;
26042950219dSYuval Mintz }
26052950219dSYuval Mintz
qede_close(struct net_device * ndev)26062950219dSYuval Mintz static int qede_close(struct net_device *ndev)
26072950219dSYuval Mintz {
26082950219dSYuval Mintz struct qede_dev *edev = netdev_priv(ndev);
26092950219dSYuval Mintz
2610567b3c12SMintz, Yuval qede_unload(edev, QEDE_UNLOAD_NORMAL, false);
26112950219dSYuval Mintz
2612adc100d0SIgor Russkikh if (edev->cdev)
26130fefbfbaSSudarsana Kalluru edev->ops->common->update_drv_state(edev->cdev, false);
26140fefbfbaSSudarsana Kalluru
26152950219dSYuval Mintz return 0;
26162950219dSYuval Mintz }
26170d8e0aa0SSudarsana Kalluru
qede_link_update(void * dev,struct qed_link_output * link)2618a2ec6172SSudarsana Kalluru static void qede_link_update(void *dev, struct qed_link_output *link)
2619a2ec6172SSudarsana Kalluru {
2620a2ec6172SSudarsana Kalluru struct qede_dev *edev = dev;
2621a2ec6172SSudarsana Kalluru
2622f04e48dbSSudarsana Reddy Kalluru if (!test_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags)) {
2623f04e48dbSSudarsana Reddy Kalluru DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not ready\n");
2624a2ec6172SSudarsana Kalluru return;
2625a2ec6172SSudarsana Kalluru }
2626a2ec6172SSudarsana Kalluru
2627a2ec6172SSudarsana Kalluru if (link->link_up) {
26288e025ae2SYuval Mintz if (!netif_carrier_ok(edev->ndev)) {
2629a2ec6172SSudarsana Kalluru DP_NOTICE(edev, "Link is up\n");
2630a2ec6172SSudarsana Kalluru netif_tx_start_all_queues(edev->ndev);
2631a2ec6172SSudarsana Kalluru netif_carrier_on(edev->ndev);
26324609adc2SMichal Kalderon qede_rdma_dev_event_open(edev);
26338e025ae2SYuval Mintz }
2634a2ec6172SSudarsana Kalluru } else {
26358e025ae2SYuval Mintz if (netif_carrier_ok(edev->ndev)) {
2636a2ec6172SSudarsana Kalluru DP_NOTICE(edev, "Link is down\n");
2637a2ec6172SSudarsana Kalluru netif_tx_disable(edev->ndev);
2638a2ec6172SSudarsana Kalluru netif_carrier_off(edev->ndev);
26394609adc2SMichal Kalderon qede_rdma_dev_event_close(edev);
2640a2ec6172SSudarsana Kalluru }
2641a2ec6172SSudarsana Kalluru }
26428e025ae2SYuval Mintz }
2643d25b859cSSudarsana Reddy Kalluru
qede_schedule_recovery_handler(void * dev)2644ccc67ef5STomer Tayar static void qede_schedule_recovery_handler(void *dev)
2645ccc67ef5STomer Tayar {
2646ccc67ef5STomer Tayar struct qede_dev *edev = dev;
2647ccc67ef5STomer Tayar
2648ccc67ef5STomer Tayar if (edev->state == QEDE_STATE_RECOVERY) {
2649ccc67ef5STomer Tayar DP_NOTICE(edev,
2650ccc67ef5STomer Tayar "Avoid scheduling a recovery handling since already in recovery state\n");
2651ccc67ef5STomer Tayar return;
2652ccc67ef5STomer Tayar }
2653ccc67ef5STomer Tayar
2654ccc67ef5STomer Tayar set_bit(QEDE_SP_RECOVERY, &edev->sp_flags);
2655ccc67ef5STomer Tayar schedule_delayed_work(&edev->sp_task, 0);
2656ccc67ef5STomer Tayar
2657ccc67ef5STomer Tayar DP_INFO(edev, "Scheduled a recovery handler\n");
2658ccc67ef5STomer Tayar }
2659ccc67ef5STomer Tayar
qede_recovery_failed(struct qede_dev * edev)2660ccc67ef5STomer Tayar static void qede_recovery_failed(struct qede_dev *edev)
2661ccc67ef5STomer Tayar {
2662ccc67ef5STomer Tayar netdev_err(edev->ndev, "Recovery handling has failed. Power cycle is needed.\n");
2663ccc67ef5STomer Tayar
2664ccc67ef5STomer Tayar netif_device_detach(edev->ndev);
2665ccc67ef5STomer Tayar
2666ccc67ef5STomer Tayar if (edev->cdev)
2667ccc67ef5STomer Tayar edev->ops->common->set_power_state(edev->cdev, PCI_D3hot);
2668ccc67ef5STomer Tayar }
2669ccc67ef5STomer Tayar
qede_recovery_handler(struct qede_dev * edev)2670ccc67ef5STomer Tayar static void qede_recovery_handler(struct qede_dev *edev)
2671ccc67ef5STomer Tayar {
2672ccc67ef5STomer Tayar u32 curr_state = edev->state;
2673ccc67ef5STomer Tayar int rc;
2674ccc67ef5STomer Tayar
2675ccc67ef5STomer Tayar DP_NOTICE(edev, "Starting a recovery process\n");
2676ccc67ef5STomer Tayar
2677ccc67ef5STomer Tayar /* No need to acquire first the qede_lock since is done by qede_sp_task
2678ccc67ef5STomer Tayar * before calling this function.
2679ccc67ef5STomer Tayar */
2680ccc67ef5STomer Tayar edev->state = QEDE_STATE_RECOVERY;
2681ccc67ef5STomer Tayar
2682ccc67ef5STomer Tayar edev->ops->common->recovery_prolog(edev->cdev);
2683ccc67ef5STomer Tayar
2684ccc67ef5STomer Tayar if (curr_state == QEDE_STATE_OPEN)
2685ccc67ef5STomer Tayar qede_unload(edev, QEDE_UNLOAD_RECOVERY, true);
2686ccc67ef5STomer Tayar
2687ccc67ef5STomer Tayar __qede_remove(edev->pdev, QEDE_REMOVE_RECOVERY);
2688ccc67ef5STomer Tayar
2689ccc67ef5STomer Tayar rc = __qede_probe(edev->pdev, edev->dp_module, edev->dp_level,
2690ccc67ef5STomer Tayar IS_VF(edev), QEDE_PROBE_RECOVERY);
2691ccc67ef5STomer Tayar if (rc) {
2692ccc67ef5STomer Tayar edev->cdev = NULL;
2693ccc67ef5STomer Tayar goto err;
2694ccc67ef5STomer Tayar }
2695ccc67ef5STomer Tayar
2696ccc67ef5STomer Tayar if (curr_state == QEDE_STATE_OPEN) {
2697ccc67ef5STomer Tayar rc = qede_load(edev, QEDE_LOAD_RECOVERY, true);
2698ccc67ef5STomer Tayar if (rc)
2699ccc67ef5STomer Tayar goto err;
2700ccc67ef5STomer Tayar
2701ccc67ef5STomer Tayar qede_config_rx_mode(edev->ndev);
27028cd160a2SJakub Kicinski udp_tunnel_nic_reset_ntf(edev->ndev);
2703ccc67ef5STomer Tayar }
2704ccc67ef5STomer Tayar
2705ccc67ef5STomer Tayar edev->state = curr_state;
2706ccc67ef5STomer Tayar
2707ccc67ef5STomer Tayar DP_NOTICE(edev, "Recovery handling is done\n");
2708ccc67ef5STomer Tayar
2709ccc67ef5STomer Tayar return;
2710ccc67ef5STomer Tayar
2711ccc67ef5STomer Tayar err:
2712ccc67ef5STomer Tayar qede_recovery_failed(edev);
2713ccc67ef5STomer Tayar }
2714ccc67ef5STomer Tayar
qede_atomic_hw_err_handler(struct qede_dev * edev)2715a8736ea8SIgor Russkikh static void qede_atomic_hw_err_handler(struct qede_dev *edev)
2716a8736ea8SIgor Russkikh {
2717936c7ba4SIgor Russkikh struct qed_dev *cdev = edev->cdev;
2718936c7ba4SIgor Russkikh
2719a8736ea8SIgor Russkikh DP_NOTICE(edev,
2720a8736ea8SIgor Russkikh "Generic non-sleepable HW error handling started - err_flags 0x%lx\n",
2721a8736ea8SIgor Russkikh edev->err_flags);
2722a8736ea8SIgor Russkikh
2723a8736ea8SIgor Russkikh /* Get a call trace of the flow that led to the error */
2724a8736ea8SIgor Russkikh WARN_ON(test_bit(QEDE_ERR_WARN, &edev->err_flags));
2725a8736ea8SIgor Russkikh
2726936c7ba4SIgor Russkikh /* Prevent HW attentions from being reasserted */
2727936c7ba4SIgor Russkikh if (test_bit(QEDE_ERR_ATTN_CLR_EN, &edev->err_flags))
2728936c7ba4SIgor Russkikh edev->ops->common->attn_clr_enable(cdev, true);
2729936c7ba4SIgor Russkikh
2730a8736ea8SIgor Russkikh DP_NOTICE(edev, "Generic non-sleepable HW error handling is done\n");
2731a8736ea8SIgor Russkikh }
2732a8736ea8SIgor Russkikh
qede_generic_hw_err_handler(struct qede_dev * edev)2733a8736ea8SIgor Russkikh static void qede_generic_hw_err_handler(struct qede_dev *edev)
2734a8736ea8SIgor Russkikh {
2735a8736ea8SIgor Russkikh DP_NOTICE(edev,
2736a8736ea8SIgor Russkikh "Generic sleepable HW error handling started - err_flags 0x%lx\n",
2737a8736ea8SIgor Russkikh edev->err_flags);
2738a8736ea8SIgor Russkikh
2739755f9053SAlok Prasad if (edev->devlink) {
2740755f9053SAlok Prasad DP_NOTICE(edev, "Reporting fatal error to devlink\n");
27414f5a8db2SIgor Russkikh edev->ops->common->report_fatal_error(edev->devlink, edev->last_err_type);
2742755f9053SAlok Prasad }
27434f5a8db2SIgor Russkikh
2744a8736ea8SIgor Russkikh clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags);
2745a8736ea8SIgor Russkikh
2746a8736ea8SIgor Russkikh DP_NOTICE(edev, "Generic sleepable HW error handling is done\n");
2747a8736ea8SIgor Russkikh }
2748a8736ea8SIgor Russkikh
qede_set_hw_err_flags(struct qede_dev * edev,enum qed_hw_err_type err_type)2749a8736ea8SIgor Russkikh static void qede_set_hw_err_flags(struct qede_dev *edev,
2750a8736ea8SIgor Russkikh enum qed_hw_err_type err_type)
2751a8736ea8SIgor Russkikh {
2752a8736ea8SIgor Russkikh unsigned long err_flags = 0;
2753a8736ea8SIgor Russkikh
2754a8736ea8SIgor Russkikh switch (err_type) {
2755a8736ea8SIgor Russkikh case QED_HW_ERR_DMAE_FAIL:
2756a8736ea8SIgor Russkikh set_bit(QEDE_ERR_WARN, &err_flags);
2757a8736ea8SIgor Russkikh fallthrough;
2758a8736ea8SIgor Russkikh case QED_HW_ERR_MFW_RESP_FAIL:
2759a8736ea8SIgor Russkikh case QED_HW_ERR_HW_ATTN:
2760a8736ea8SIgor Russkikh case QED_HW_ERR_RAMROD_FAIL:
2761a8736ea8SIgor Russkikh case QED_HW_ERR_FW_ASSERT:
2762a8736ea8SIgor Russkikh set_bit(QEDE_ERR_ATTN_CLR_EN, &err_flags);
2763a8736ea8SIgor Russkikh set_bit(QEDE_ERR_GET_DBG_INFO, &err_flags);
2764755f9053SAlok Prasad /* make this error as recoverable and start recovery*/
2765755f9053SAlok Prasad set_bit(QEDE_ERR_IS_RECOVERABLE, &err_flags);
2766a8736ea8SIgor Russkikh break;
2767a8736ea8SIgor Russkikh
2768a8736ea8SIgor Russkikh default:
2769a8736ea8SIgor Russkikh DP_NOTICE(edev, "Unexpected HW error [%d]\n", err_type);
2770a8736ea8SIgor Russkikh break;
2771a8736ea8SIgor Russkikh }
2772a8736ea8SIgor Russkikh
2773a8736ea8SIgor Russkikh edev->err_flags |= err_flags;
2774a8736ea8SIgor Russkikh }
2775a8736ea8SIgor Russkikh
qede_schedule_hw_err_handler(void * dev,enum qed_hw_err_type err_type)2776a8736ea8SIgor Russkikh static void qede_schedule_hw_err_handler(void *dev,
2777a8736ea8SIgor Russkikh enum qed_hw_err_type err_type)
2778a8736ea8SIgor Russkikh {
2779a8736ea8SIgor Russkikh struct qede_dev *edev = dev;
2780a8736ea8SIgor Russkikh
2781a8736ea8SIgor Russkikh /* Fan failure cannot be masked by handling of another HW error or by a
2782a8736ea8SIgor Russkikh * concurrent recovery process.
2783a8736ea8SIgor Russkikh */
2784a8736ea8SIgor Russkikh if ((test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) ||
2785a8736ea8SIgor Russkikh edev->state == QEDE_STATE_RECOVERY) &&
2786a8736ea8SIgor Russkikh err_type != QED_HW_ERR_FAN_FAIL) {
2787a8736ea8SIgor Russkikh DP_INFO(edev,
2788a8736ea8SIgor Russkikh "Avoid scheduling an error handling while another HW error is being handled\n");
2789a8736ea8SIgor Russkikh return;
2790a8736ea8SIgor Russkikh }
2791a8736ea8SIgor Russkikh
2792a8736ea8SIgor Russkikh if (err_type >= QED_HW_ERR_LAST) {
2793a8736ea8SIgor Russkikh DP_NOTICE(edev, "Unknown HW error [%d]\n", err_type);
2794a8736ea8SIgor Russkikh clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags);
2795a8736ea8SIgor Russkikh return;
2796a8736ea8SIgor Russkikh }
2797a8736ea8SIgor Russkikh
27984f5a8db2SIgor Russkikh edev->last_err_type = err_type;
2799a8736ea8SIgor Russkikh qede_set_hw_err_flags(edev, err_type);
2800a8736ea8SIgor Russkikh qede_atomic_hw_err_handler(edev);
2801a8736ea8SIgor Russkikh set_bit(QEDE_SP_HW_ERR, &edev->sp_flags);
2802a8736ea8SIgor Russkikh schedule_delayed_work(&edev->sp_task, 0);
2803a8736ea8SIgor Russkikh
2804a8736ea8SIgor Russkikh DP_INFO(edev, "Scheduled a error handler [err_type %d]\n", err_type);
2805a8736ea8SIgor Russkikh }
2806a8736ea8SIgor Russkikh
qede_is_txq_full(struct qede_dev * edev,struct qede_tx_queue * txq)2807d25b859cSSudarsana Reddy Kalluru static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq)
2808d25b859cSSudarsana Reddy Kalluru {
2809d25b859cSSudarsana Reddy Kalluru struct netdev_queue *netdev_txq;
2810d25b859cSSudarsana Reddy Kalluru
28115e7baf0fSManish Chopra netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
2812d25b859cSSudarsana Reddy Kalluru if (netif_xmit_stopped(netdev_txq))
2813d25b859cSSudarsana Reddy Kalluru return true;
2814d25b859cSSudarsana Reddy Kalluru
2815d25b859cSSudarsana Reddy Kalluru return false;
2816d25b859cSSudarsana Reddy Kalluru }
2817d25b859cSSudarsana Reddy Kalluru
qede_get_generic_tlv_data(void * dev,struct qed_generic_tlvs * data)2818d25b859cSSudarsana Reddy Kalluru static void qede_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
2819d25b859cSSudarsana Reddy Kalluru {
2820d25b859cSSudarsana Reddy Kalluru struct qede_dev *edev = dev;
2821d25b859cSSudarsana Reddy Kalluru struct netdev_hw_addr *ha;
2822d25b859cSSudarsana Reddy Kalluru int i;
2823d25b859cSSudarsana Reddy Kalluru
2824d25b859cSSudarsana Reddy Kalluru if (edev->ndev->features & NETIF_F_IP_CSUM)
2825d25b859cSSudarsana Reddy Kalluru data->feat_flags |= QED_TLV_IP_CSUM;
2826d25b859cSSudarsana Reddy Kalluru if (edev->ndev->features & NETIF_F_TSO)
2827d25b859cSSudarsana Reddy Kalluru data->feat_flags |= QED_TLV_LSO;
2828d25b859cSSudarsana Reddy Kalluru
2829d25b859cSSudarsana Reddy Kalluru ether_addr_copy(data->mac[0], edev->ndev->dev_addr);
28307ad9c26fSMiaohe Lin eth_zero_addr(data->mac[1]);
28317ad9c26fSMiaohe Lin eth_zero_addr(data->mac[2]);
2832d25b859cSSudarsana Reddy Kalluru /* Copy the first two UC macs */
2833d25b859cSSudarsana Reddy Kalluru netif_addr_lock_bh(edev->ndev);
2834d25b859cSSudarsana Reddy Kalluru i = 1;
2835d25b859cSSudarsana Reddy Kalluru netdev_for_each_uc_addr(ha, edev->ndev) {
2836d25b859cSSudarsana Reddy Kalluru ether_addr_copy(data->mac[i++], ha->addr);
2837d25b859cSSudarsana Reddy Kalluru if (i == QED_TLV_MAC_COUNT)
2838d25b859cSSudarsana Reddy Kalluru break;
2839d25b859cSSudarsana Reddy Kalluru }
2840d25b859cSSudarsana Reddy Kalluru
2841d25b859cSSudarsana Reddy Kalluru netif_addr_unlock_bh(edev->ndev);
2842d25b859cSSudarsana Reddy Kalluru }
2843d25b859cSSudarsana Reddy Kalluru
qede_get_eth_tlv_data(void * dev,void * data)2844d25b859cSSudarsana Reddy Kalluru static void qede_get_eth_tlv_data(void *dev, void *data)
2845d25b859cSSudarsana Reddy Kalluru {
2846d25b859cSSudarsana Reddy Kalluru struct qed_mfw_tlv_eth *etlv = data;
2847d25b859cSSudarsana Reddy Kalluru struct qede_dev *edev = dev;
2848d25b859cSSudarsana Reddy Kalluru struct qede_fastpath *fp;
2849d25b859cSSudarsana Reddy Kalluru int i;
2850d25b859cSSudarsana Reddy Kalluru
2851d25b859cSSudarsana Reddy Kalluru etlv->lso_maxoff_size = 0XFFFF;
2852d25b859cSSudarsana Reddy Kalluru etlv->lso_maxoff_size_set = true;
2853d25b859cSSudarsana Reddy Kalluru etlv->lso_minseg_size = (u16)ETH_TX_LSO_WINDOW_MIN_LEN;
2854d25b859cSSudarsana Reddy Kalluru etlv->lso_minseg_size_set = true;
2855d25b859cSSudarsana Reddy Kalluru etlv->prom_mode = !!(edev->ndev->flags & IFF_PROMISC);
2856d25b859cSSudarsana Reddy Kalluru etlv->prom_mode_set = true;
2857d25b859cSSudarsana Reddy Kalluru etlv->tx_descr_size = QEDE_TSS_COUNT(edev);
2858d25b859cSSudarsana Reddy Kalluru etlv->tx_descr_size_set = true;
2859d25b859cSSudarsana Reddy Kalluru etlv->rx_descr_size = QEDE_RSS_COUNT(edev);
2860d25b859cSSudarsana Reddy Kalluru etlv->rx_descr_size_set = true;
2861d25b859cSSudarsana Reddy Kalluru etlv->iov_offload = QED_MFW_TLV_IOV_OFFLOAD_VEB;
2862d25b859cSSudarsana Reddy Kalluru etlv->iov_offload_set = true;
2863d25b859cSSudarsana Reddy Kalluru
2864d25b859cSSudarsana Reddy Kalluru /* Fill information regarding queues; Should be done under the qede
2865d25b859cSSudarsana Reddy Kalluru * lock to guarantee those don't change beneath our feet.
2866d25b859cSSudarsana Reddy Kalluru */
2867d25b859cSSudarsana Reddy Kalluru etlv->txqs_empty = true;
2868d25b859cSSudarsana Reddy Kalluru etlv->rxqs_empty = true;
2869d25b859cSSudarsana Reddy Kalluru etlv->num_txqs_full = 0;
2870d25b859cSSudarsana Reddy Kalluru etlv->num_rxqs_full = 0;
2871d25b859cSSudarsana Reddy Kalluru
2872d25b859cSSudarsana Reddy Kalluru __qede_lock(edev);
2873d25b859cSSudarsana Reddy Kalluru for_each_queue(i) {
2874d25b859cSSudarsana Reddy Kalluru fp = &edev->fp_array[i];
2875d25b859cSSudarsana Reddy Kalluru if (fp->type & QEDE_FASTPATH_TX) {
28765e7baf0fSManish Chopra struct qede_tx_queue *txq = QEDE_FP_TC0_TXQ(fp);
28775e7baf0fSManish Chopra
28785e7baf0fSManish Chopra if (txq->sw_tx_cons != txq->sw_tx_prod)
2879d25b859cSSudarsana Reddy Kalluru etlv->txqs_empty = false;
28805e7baf0fSManish Chopra if (qede_is_txq_full(edev, txq))
2881d25b859cSSudarsana Reddy Kalluru etlv->num_txqs_full++;
2882d25b859cSSudarsana Reddy Kalluru }
2883d25b859cSSudarsana Reddy Kalluru if (fp->type & QEDE_FASTPATH_RX) {
2884d25b859cSSudarsana Reddy Kalluru if (qede_has_rx_work(fp->rxq))
2885d25b859cSSudarsana Reddy Kalluru etlv->rxqs_empty = false;
2886d25b859cSSudarsana Reddy Kalluru
2887d25b859cSSudarsana Reddy Kalluru /* This one is a bit tricky; Firmware might stop
2888d25b859cSSudarsana Reddy Kalluru * placing packets if ring is not yet full.
2889d25b859cSSudarsana Reddy Kalluru * Give an approximation.
2890d25b859cSSudarsana Reddy Kalluru */
2891d25b859cSSudarsana Reddy Kalluru if (le16_to_cpu(*fp->rxq->hw_cons_ptr) -
2892d25b859cSSudarsana Reddy Kalluru qed_chain_get_cons_idx(&fp->rxq->rx_comp_ring) >
2893d25b859cSSudarsana Reddy Kalluru RX_RING_SIZE - 100)
2894d25b859cSSudarsana Reddy Kalluru etlv->num_rxqs_full++;
2895d25b859cSSudarsana Reddy Kalluru }
2896d25b859cSSudarsana Reddy Kalluru }
2897d25b859cSSudarsana Reddy Kalluru __qede_unlock(edev);
2898d25b859cSSudarsana Reddy Kalluru
2899d25b859cSSudarsana Reddy Kalluru etlv->txqs_empty_set = true;
2900d25b859cSSudarsana Reddy Kalluru etlv->rxqs_empty_set = true;
2901d25b859cSSudarsana Reddy Kalluru etlv->num_txqs_full_set = true;
2902d25b859cSSudarsana Reddy Kalluru etlv->num_rxqs_full_set = true;
2903d25b859cSSudarsana Reddy Kalluru }
2904731815e7SSudarsana Reddy Kalluru
2905731815e7SSudarsana Reddy Kalluru /**
290619198e4eSPrabhakar Kushwaha * qede_io_error_detected(): Called when PCI error is detected
290719198e4eSPrabhakar Kushwaha *
2908731815e7SSudarsana Reddy Kalluru * @pdev: Pointer to PCI device
2909731815e7SSudarsana Reddy Kalluru * @state: The current pci connection state
2910731815e7SSudarsana Reddy Kalluru *
291119198e4eSPrabhakar Kushwaha *Return: pci_ers_result_t.
291219198e4eSPrabhakar Kushwaha *
2913731815e7SSudarsana Reddy Kalluru * This function is called after a PCI bus error affecting
2914731815e7SSudarsana Reddy Kalluru * this device has been detected.
2915731815e7SSudarsana Reddy Kalluru */
2916731815e7SSudarsana Reddy Kalluru static pci_ers_result_t
qede_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)2917731815e7SSudarsana Reddy Kalluru qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
2918731815e7SSudarsana Reddy Kalluru {
2919731815e7SSudarsana Reddy Kalluru struct net_device *dev = pci_get_drvdata(pdev);
2920731815e7SSudarsana Reddy Kalluru struct qede_dev *edev = netdev_priv(dev);
2921731815e7SSudarsana Reddy Kalluru
2922731815e7SSudarsana Reddy Kalluru if (!edev)
2923731815e7SSudarsana Reddy Kalluru return PCI_ERS_RESULT_NONE;
2924731815e7SSudarsana Reddy Kalluru
2925731815e7SSudarsana Reddy Kalluru DP_NOTICE(edev, "IO error detected [%d]\n", state);
2926731815e7SSudarsana Reddy Kalluru
2927731815e7SSudarsana Reddy Kalluru __qede_lock(edev);
2928731815e7SSudarsana Reddy Kalluru if (edev->state == QEDE_STATE_RECOVERY) {
2929731815e7SSudarsana Reddy Kalluru DP_NOTICE(edev, "Device already in the recovery state\n");
2930731815e7SSudarsana Reddy Kalluru __qede_unlock(edev);
2931731815e7SSudarsana Reddy Kalluru return PCI_ERS_RESULT_NONE;
2932731815e7SSudarsana Reddy Kalluru }
2933731815e7SSudarsana Reddy Kalluru
2934731815e7SSudarsana Reddy Kalluru /* PF handles the recovery of its VFs */
2935731815e7SSudarsana Reddy Kalluru if (IS_VF(edev)) {
2936731815e7SSudarsana Reddy Kalluru DP_VERBOSE(edev, QED_MSG_IOV,
2937731815e7SSudarsana Reddy Kalluru "VF recovery is handled by its PF\n");
2938731815e7SSudarsana Reddy Kalluru __qede_unlock(edev);
2939731815e7SSudarsana Reddy Kalluru return PCI_ERS_RESULT_RECOVERED;
2940731815e7SSudarsana Reddy Kalluru }
2941731815e7SSudarsana Reddy Kalluru
2942731815e7SSudarsana Reddy Kalluru /* Close OS Tx */
2943731815e7SSudarsana Reddy Kalluru netif_tx_disable(edev->ndev);
2944731815e7SSudarsana Reddy Kalluru netif_carrier_off(edev->ndev);
2945731815e7SSudarsana Reddy Kalluru
2946731815e7SSudarsana Reddy Kalluru set_bit(QEDE_SP_AER, &edev->sp_flags);
2947731815e7SSudarsana Reddy Kalluru schedule_delayed_work(&edev->sp_task, 0);
2948731815e7SSudarsana Reddy Kalluru
2949731815e7SSudarsana Reddy Kalluru __qede_unlock(edev);
2950731815e7SSudarsana Reddy Kalluru
2951731815e7SSudarsana Reddy Kalluru return PCI_ERS_RESULT_CAN_RECOVER;
2952731815e7SSudarsana Reddy Kalluru }
2953