1 /* QLogic qede NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #include <linux/module.h> 33 #include <linux/pci.h> 34 #include <linux/version.h> 35 #include <linux/device.h> 36 #include <linux/netdevice.h> 37 #include <linux/etherdevice.h> 38 #include <linux/skbuff.h> 39 #include <linux/errno.h> 40 #include <linux/list.h> 41 #include <linux/string.h> 42 #include <linux/dma-mapping.h> 43 #include <linux/interrupt.h> 44 #include <asm/byteorder.h> 45 #include <asm/param.h> 46 #include <linux/io.h> 47 #include <linux/netdev_features.h> 48 #include <linux/udp.h> 49 #include <linux/tcp.h> 50 #include <net/udp_tunnel.h> 51 #include <linux/ip.h> 52 #include <net/ipv6.h> 53 #include <net/tcp.h> 54 #include <linux/if_ether.h> 55 #include <linux/if_vlan.h> 56 #include <linux/pkt_sched.h> 57 #include <linux/ethtool.h> 58 #include <linux/in.h> 59 #include <linux/random.h> 60 #include <net/ip6_checksum.h> 61 #include <linux/bitops.h> 62 #include <linux/vmalloc.h> 63 #include <linux/qed/qede_roce.h> 64 #include "qede.h" 65 #include "qede_ptp.h" 66 67 static char version[] = 68 "QLogic FastLinQ 4xxxx Ethernet Driver qede " DRV_MODULE_VERSION "\n"; 69 70 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver"); 71 MODULE_LICENSE("GPL"); 72 MODULE_VERSION(DRV_MODULE_VERSION); 73 74 static uint debug; 75 module_param(debug, uint, 0); 76 MODULE_PARM_DESC(debug, " Default debug msglevel"); 77 78 static const struct qed_eth_ops *qed_ops; 79 80 #define CHIP_NUM_57980S_40 0x1634 81 #define CHIP_NUM_57980S_10 0x1666 82 #define CHIP_NUM_57980S_MF 0x1636 83 #define CHIP_NUM_57980S_100 0x1644 84 #define CHIP_NUM_57980S_50 0x1654 85 #define CHIP_NUM_57980S_25 0x1656 86 #define CHIP_NUM_57980S_IOV 0x1664 87 #define CHIP_NUM_AH 0x8070 88 #define CHIP_NUM_AH_IOV 0x8090 89 90 #ifndef PCI_DEVICE_ID_NX2_57980E 91 #define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40 92 #define PCI_DEVICE_ID_57980S_10 CHIP_NUM_57980S_10 93 #define PCI_DEVICE_ID_57980S_MF CHIP_NUM_57980S_MF 94 #define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100 95 #define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50 96 #define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25 97 #define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV 98 #define PCI_DEVICE_ID_AH CHIP_NUM_AH 99 #define PCI_DEVICE_ID_AH_IOV CHIP_NUM_AH_IOV 100 101 #endif 102 103 enum qede_pci_private { 104 QEDE_PRIVATE_PF, 105 QEDE_PRIVATE_VF 106 }; 107 108 static const struct pci_device_id qede_pci_tbl[] = { 109 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF}, 110 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF}, 111 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF}, 112 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF}, 113 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF}, 114 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF}, 115 #ifdef CONFIG_QED_SRIOV 116 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF}, 117 #endif 118 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH), QEDE_PRIVATE_PF}, 119 #ifdef CONFIG_QED_SRIOV 120 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH_IOV), QEDE_PRIVATE_VF}, 121 #endif 122 { 0 } 123 }; 124 125 MODULE_DEVICE_TABLE(pci, qede_pci_tbl); 126 127 static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id); 128 129 #define TX_TIMEOUT (5 * HZ) 130 131 /* Utilize last protocol index for XDP */ 132 #define XDP_PI 11 133 134 static void qede_remove(struct pci_dev *pdev); 135 static void qede_shutdown(struct pci_dev *pdev); 136 static void qede_link_update(void *dev, struct qed_link_output *link); 137 138 /* The qede lock is used to protect driver state change and driver flows that 139 * are not reentrant. 140 */ 141 void __qede_lock(struct qede_dev *edev) 142 { 143 mutex_lock(&edev->qede_lock); 144 } 145 146 void __qede_unlock(struct qede_dev *edev) 147 { 148 mutex_unlock(&edev->qede_lock); 149 } 150 151 #ifdef CONFIG_QED_SRIOV 152 static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos, 153 __be16 vlan_proto) 154 { 155 struct qede_dev *edev = netdev_priv(ndev); 156 157 if (vlan > 4095) { 158 DP_NOTICE(edev, "Illegal vlan value %d\n", vlan); 159 return -EINVAL; 160 } 161 162 if (vlan_proto != htons(ETH_P_8021Q)) 163 return -EPROTONOSUPPORT; 164 165 DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n", 166 vlan, vf); 167 168 return edev->ops->iov->set_vlan(edev->cdev, vlan, vf); 169 } 170 171 static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac) 172 { 173 struct qede_dev *edev = netdev_priv(ndev); 174 175 DP_VERBOSE(edev, QED_MSG_IOV, 176 "Setting MAC %02x:%02x:%02x:%02x:%02x:%02x to VF [%d]\n", 177 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], vfidx); 178 179 if (!is_valid_ether_addr(mac)) { 180 DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n"); 181 return -EINVAL; 182 } 183 184 return edev->ops->iov->set_mac(edev->cdev, mac, vfidx); 185 } 186 187 static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param) 188 { 189 struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev)); 190 struct qed_dev_info *qed_info = &edev->dev_info.common; 191 struct qed_update_vport_params *vport_params; 192 int rc; 193 194 vport_params = vzalloc(sizeof(*vport_params)); 195 if (!vport_params) 196 return -ENOMEM; 197 DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param); 198 199 rc = edev->ops->iov->configure(edev->cdev, num_vfs_param); 200 201 /* Enable/Disable Tx switching for PF */ 202 if ((rc == num_vfs_param) && netif_running(edev->ndev) && 203 qed_info->mf_mode != QED_MF_NPAR && qed_info->tx_switching) { 204 vport_params->vport_id = 0; 205 vport_params->update_tx_switching_flg = 1; 206 vport_params->tx_switching_flg = num_vfs_param ? 1 : 0; 207 edev->ops->vport_update(edev->cdev, vport_params); 208 } 209 210 vfree(vport_params); 211 return rc; 212 } 213 #endif 214 215 static struct pci_driver qede_pci_driver = { 216 .name = "qede", 217 .id_table = qede_pci_tbl, 218 .probe = qede_probe, 219 .remove = qede_remove, 220 .shutdown = qede_shutdown, 221 #ifdef CONFIG_QED_SRIOV 222 .sriov_configure = qede_sriov_configure, 223 #endif 224 }; 225 226 static struct qed_eth_cb_ops qede_ll_ops = { 227 { 228 #ifdef CONFIG_RFS_ACCEL 229 .arfs_filter_op = qede_arfs_filter_op, 230 #endif 231 .link_update = qede_link_update, 232 }, 233 .force_mac = qede_force_mac, 234 .ports_update = qede_udp_ports_update, 235 }; 236 237 static int qede_netdev_event(struct notifier_block *this, unsigned long event, 238 void *ptr) 239 { 240 struct net_device *ndev = netdev_notifier_info_to_dev(ptr); 241 struct ethtool_drvinfo drvinfo; 242 struct qede_dev *edev; 243 244 if (event != NETDEV_CHANGENAME && event != NETDEV_CHANGEADDR) 245 goto done; 246 247 /* Check whether this is a qede device */ 248 if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo) 249 goto done; 250 251 memset(&drvinfo, 0, sizeof(drvinfo)); 252 ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo); 253 if (strcmp(drvinfo.driver, "qede")) 254 goto done; 255 edev = netdev_priv(ndev); 256 257 switch (event) { 258 case NETDEV_CHANGENAME: 259 /* Notify qed of the name change */ 260 if (!edev->ops || !edev->ops->common) 261 goto done; 262 edev->ops->common->set_id(edev->cdev, edev->ndev->name, "qede"); 263 break; 264 case NETDEV_CHANGEADDR: 265 edev = netdev_priv(ndev); 266 qede_roce_event_changeaddr(edev); 267 break; 268 } 269 270 done: 271 return NOTIFY_DONE; 272 } 273 274 static struct notifier_block qede_netdev_notifier = { 275 .notifier_call = qede_netdev_event, 276 }; 277 278 static 279 int __init qede_init(void) 280 { 281 int ret; 282 283 pr_info("qede_init: %s\n", version); 284 285 qed_ops = qed_get_eth_ops(); 286 if (!qed_ops) { 287 pr_notice("Failed to get qed ethtool operations\n"); 288 return -EINVAL; 289 } 290 291 /* Must register notifier before pci ops, since we might miss 292 * interface rename after pci probe and netdev registeration. 293 */ 294 ret = register_netdevice_notifier(&qede_netdev_notifier); 295 if (ret) { 296 pr_notice("Failed to register netdevice_notifier\n"); 297 qed_put_eth_ops(); 298 return -EINVAL; 299 } 300 301 ret = pci_register_driver(&qede_pci_driver); 302 if (ret) { 303 pr_notice("Failed to register driver\n"); 304 unregister_netdevice_notifier(&qede_netdev_notifier); 305 qed_put_eth_ops(); 306 return -EINVAL; 307 } 308 309 return 0; 310 } 311 312 static void __exit qede_cleanup(void) 313 { 314 if (debug & QED_LOG_INFO_MASK) 315 pr_info("qede_cleanup called\n"); 316 317 unregister_netdevice_notifier(&qede_netdev_notifier); 318 pci_unregister_driver(&qede_pci_driver); 319 qed_put_eth_ops(); 320 } 321 322 module_init(qede_init); 323 module_exit(qede_cleanup); 324 325 static int qede_open(struct net_device *ndev); 326 static int qede_close(struct net_device *ndev); 327 328 void qede_fill_by_demand_stats(struct qede_dev *edev) 329 { 330 struct qede_stats_common *p_common = &edev->stats.common; 331 struct qed_eth_stats stats; 332 333 edev->ops->get_vport_stats(edev->cdev, &stats); 334 335 p_common->no_buff_discards = stats.common.no_buff_discards; 336 p_common->packet_too_big_discard = stats.common.packet_too_big_discard; 337 p_common->ttl0_discard = stats.common.ttl0_discard; 338 p_common->rx_ucast_bytes = stats.common.rx_ucast_bytes; 339 p_common->rx_mcast_bytes = stats.common.rx_mcast_bytes; 340 p_common->rx_bcast_bytes = stats.common.rx_bcast_bytes; 341 p_common->rx_ucast_pkts = stats.common.rx_ucast_pkts; 342 p_common->rx_mcast_pkts = stats.common.rx_mcast_pkts; 343 p_common->rx_bcast_pkts = stats.common.rx_bcast_pkts; 344 p_common->mftag_filter_discards = stats.common.mftag_filter_discards; 345 p_common->mac_filter_discards = stats.common.mac_filter_discards; 346 347 p_common->tx_ucast_bytes = stats.common.tx_ucast_bytes; 348 p_common->tx_mcast_bytes = stats.common.tx_mcast_bytes; 349 p_common->tx_bcast_bytes = stats.common.tx_bcast_bytes; 350 p_common->tx_ucast_pkts = stats.common.tx_ucast_pkts; 351 p_common->tx_mcast_pkts = stats.common.tx_mcast_pkts; 352 p_common->tx_bcast_pkts = stats.common.tx_bcast_pkts; 353 p_common->tx_err_drop_pkts = stats.common.tx_err_drop_pkts; 354 p_common->coalesced_pkts = stats.common.tpa_coalesced_pkts; 355 p_common->coalesced_events = stats.common.tpa_coalesced_events; 356 p_common->coalesced_aborts_num = stats.common.tpa_aborts_num; 357 p_common->non_coalesced_pkts = stats.common.tpa_not_coalesced_pkts; 358 p_common->coalesced_bytes = stats.common.tpa_coalesced_bytes; 359 360 p_common->rx_64_byte_packets = stats.common.rx_64_byte_packets; 361 p_common->rx_65_to_127_byte_packets = 362 stats.common.rx_65_to_127_byte_packets; 363 p_common->rx_128_to_255_byte_packets = 364 stats.common.rx_128_to_255_byte_packets; 365 p_common->rx_256_to_511_byte_packets = 366 stats.common.rx_256_to_511_byte_packets; 367 p_common->rx_512_to_1023_byte_packets = 368 stats.common.rx_512_to_1023_byte_packets; 369 p_common->rx_1024_to_1518_byte_packets = 370 stats.common.rx_1024_to_1518_byte_packets; 371 p_common->rx_crc_errors = stats.common.rx_crc_errors; 372 p_common->rx_mac_crtl_frames = stats.common.rx_mac_crtl_frames; 373 p_common->rx_pause_frames = stats.common.rx_pause_frames; 374 p_common->rx_pfc_frames = stats.common.rx_pfc_frames; 375 p_common->rx_align_errors = stats.common.rx_align_errors; 376 p_common->rx_carrier_errors = stats.common.rx_carrier_errors; 377 p_common->rx_oversize_packets = stats.common.rx_oversize_packets; 378 p_common->rx_jabbers = stats.common.rx_jabbers; 379 p_common->rx_undersize_packets = stats.common.rx_undersize_packets; 380 p_common->rx_fragments = stats.common.rx_fragments; 381 p_common->tx_64_byte_packets = stats.common.tx_64_byte_packets; 382 p_common->tx_65_to_127_byte_packets = 383 stats.common.tx_65_to_127_byte_packets; 384 p_common->tx_128_to_255_byte_packets = 385 stats.common.tx_128_to_255_byte_packets; 386 p_common->tx_256_to_511_byte_packets = 387 stats.common.tx_256_to_511_byte_packets; 388 p_common->tx_512_to_1023_byte_packets = 389 stats.common.tx_512_to_1023_byte_packets; 390 p_common->tx_1024_to_1518_byte_packets = 391 stats.common.tx_1024_to_1518_byte_packets; 392 p_common->tx_pause_frames = stats.common.tx_pause_frames; 393 p_common->tx_pfc_frames = stats.common.tx_pfc_frames; 394 p_common->brb_truncates = stats.common.brb_truncates; 395 p_common->brb_discards = stats.common.brb_discards; 396 p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames; 397 398 if (QEDE_IS_BB(edev)) { 399 struct qede_stats_bb *p_bb = &edev->stats.bb; 400 401 p_bb->rx_1519_to_1522_byte_packets = 402 stats.bb.rx_1519_to_1522_byte_packets; 403 p_bb->rx_1519_to_2047_byte_packets = 404 stats.bb.rx_1519_to_2047_byte_packets; 405 p_bb->rx_2048_to_4095_byte_packets = 406 stats.bb.rx_2048_to_4095_byte_packets; 407 p_bb->rx_4096_to_9216_byte_packets = 408 stats.bb.rx_4096_to_9216_byte_packets; 409 p_bb->rx_9217_to_16383_byte_packets = 410 stats.bb.rx_9217_to_16383_byte_packets; 411 p_bb->tx_1519_to_2047_byte_packets = 412 stats.bb.tx_1519_to_2047_byte_packets; 413 p_bb->tx_2048_to_4095_byte_packets = 414 stats.bb.tx_2048_to_4095_byte_packets; 415 p_bb->tx_4096_to_9216_byte_packets = 416 stats.bb.tx_4096_to_9216_byte_packets; 417 p_bb->tx_9217_to_16383_byte_packets = 418 stats.bb.tx_9217_to_16383_byte_packets; 419 p_bb->tx_lpi_entry_count = stats.bb.tx_lpi_entry_count; 420 p_bb->tx_total_collisions = stats.bb.tx_total_collisions; 421 } else { 422 struct qede_stats_ah *p_ah = &edev->stats.ah; 423 424 p_ah->rx_1519_to_max_byte_packets = 425 stats.ah.rx_1519_to_max_byte_packets; 426 p_ah->tx_1519_to_max_byte_packets = 427 stats.ah.tx_1519_to_max_byte_packets; 428 } 429 } 430 431 static void qede_get_stats64(struct net_device *dev, 432 struct rtnl_link_stats64 *stats) 433 { 434 struct qede_dev *edev = netdev_priv(dev); 435 struct qede_stats_common *p_common; 436 437 qede_fill_by_demand_stats(edev); 438 p_common = &edev->stats.common; 439 440 stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts + 441 p_common->rx_bcast_pkts; 442 stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts + 443 p_common->tx_bcast_pkts; 444 445 stats->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes + 446 p_common->rx_bcast_bytes; 447 stats->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes + 448 p_common->tx_bcast_bytes; 449 450 stats->tx_errors = p_common->tx_err_drop_pkts; 451 stats->multicast = p_common->rx_mcast_pkts + p_common->rx_bcast_pkts; 452 453 stats->rx_fifo_errors = p_common->no_buff_discards; 454 455 if (QEDE_IS_BB(edev)) 456 stats->collisions = edev->stats.bb.tx_total_collisions; 457 stats->rx_crc_errors = p_common->rx_crc_errors; 458 stats->rx_frame_errors = p_common->rx_align_errors; 459 } 460 461 #ifdef CONFIG_QED_SRIOV 462 static int qede_get_vf_config(struct net_device *dev, int vfidx, 463 struct ifla_vf_info *ivi) 464 { 465 struct qede_dev *edev = netdev_priv(dev); 466 467 if (!edev->ops) 468 return -EINVAL; 469 470 return edev->ops->iov->get_config(edev->cdev, vfidx, ivi); 471 } 472 473 static int qede_set_vf_rate(struct net_device *dev, int vfidx, 474 int min_tx_rate, int max_tx_rate) 475 { 476 struct qede_dev *edev = netdev_priv(dev); 477 478 return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate, 479 max_tx_rate); 480 } 481 482 static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val) 483 { 484 struct qede_dev *edev = netdev_priv(dev); 485 486 if (!edev->ops) 487 return -EINVAL; 488 489 return edev->ops->iov->set_spoof(edev->cdev, vfidx, val); 490 } 491 492 static int qede_set_vf_link_state(struct net_device *dev, int vfidx, 493 int link_state) 494 { 495 struct qede_dev *edev = netdev_priv(dev); 496 497 if (!edev->ops) 498 return -EINVAL; 499 500 return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state); 501 } 502 503 static int qede_set_vf_trust(struct net_device *dev, int vfidx, bool setting) 504 { 505 struct qede_dev *edev = netdev_priv(dev); 506 507 if (!edev->ops) 508 return -EINVAL; 509 510 return edev->ops->iov->set_trust(edev->cdev, vfidx, setting); 511 } 512 #endif 513 514 static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 515 { 516 struct qede_dev *edev = netdev_priv(dev); 517 518 if (!netif_running(dev)) 519 return -EAGAIN; 520 521 switch (cmd) { 522 case SIOCSHWTSTAMP: 523 return qede_ptp_hw_ts(edev, ifr); 524 default: 525 DP_VERBOSE(edev, QED_MSG_DEBUG, 526 "default IOCTL cmd 0x%x\n", cmd); 527 return -EOPNOTSUPP; 528 } 529 530 return 0; 531 } 532 533 static const struct net_device_ops qede_netdev_ops = { 534 .ndo_open = qede_open, 535 .ndo_stop = qede_close, 536 .ndo_start_xmit = qede_start_xmit, 537 .ndo_set_rx_mode = qede_set_rx_mode, 538 .ndo_set_mac_address = qede_set_mac_addr, 539 .ndo_validate_addr = eth_validate_addr, 540 .ndo_change_mtu = qede_change_mtu, 541 .ndo_do_ioctl = qede_ioctl, 542 #ifdef CONFIG_QED_SRIOV 543 .ndo_set_vf_mac = qede_set_vf_mac, 544 .ndo_set_vf_vlan = qede_set_vf_vlan, 545 .ndo_set_vf_trust = qede_set_vf_trust, 546 #endif 547 .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, 548 .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, 549 .ndo_set_features = qede_set_features, 550 .ndo_get_stats64 = qede_get_stats64, 551 #ifdef CONFIG_QED_SRIOV 552 .ndo_set_vf_link_state = qede_set_vf_link_state, 553 .ndo_set_vf_spoofchk = qede_set_vf_spoofchk, 554 .ndo_get_vf_config = qede_get_vf_config, 555 .ndo_set_vf_rate = qede_set_vf_rate, 556 #endif 557 .ndo_udp_tunnel_add = qede_udp_tunnel_add, 558 .ndo_udp_tunnel_del = qede_udp_tunnel_del, 559 .ndo_features_check = qede_features_check, 560 .ndo_xdp = qede_xdp, 561 #ifdef CONFIG_RFS_ACCEL 562 .ndo_rx_flow_steer = qede_rx_flow_steer, 563 #endif 564 }; 565 566 static const struct net_device_ops qede_netdev_vf_ops = { 567 .ndo_open = qede_open, 568 .ndo_stop = qede_close, 569 .ndo_start_xmit = qede_start_xmit, 570 .ndo_set_rx_mode = qede_set_rx_mode, 571 .ndo_set_mac_address = qede_set_mac_addr, 572 .ndo_validate_addr = eth_validate_addr, 573 .ndo_change_mtu = qede_change_mtu, 574 .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, 575 .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, 576 .ndo_set_features = qede_set_features, 577 .ndo_get_stats64 = qede_get_stats64, 578 .ndo_udp_tunnel_add = qede_udp_tunnel_add, 579 .ndo_udp_tunnel_del = qede_udp_tunnel_del, 580 .ndo_features_check = qede_features_check, 581 }; 582 583 /* ------------------------------------------------------------------------- 584 * START OF PROBE / REMOVE 585 * ------------------------------------------------------------------------- 586 */ 587 588 static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev, 589 struct pci_dev *pdev, 590 struct qed_dev_eth_info *info, 591 u32 dp_module, u8 dp_level) 592 { 593 struct net_device *ndev; 594 struct qede_dev *edev; 595 596 ndev = alloc_etherdev_mqs(sizeof(*edev), 597 info->num_queues, info->num_queues); 598 if (!ndev) { 599 pr_err("etherdev allocation failed\n"); 600 return NULL; 601 } 602 603 edev = netdev_priv(ndev); 604 edev->ndev = ndev; 605 edev->cdev = cdev; 606 edev->pdev = pdev; 607 edev->dp_module = dp_module; 608 edev->dp_level = dp_level; 609 edev->ops = qed_ops; 610 edev->q_num_rx_buffers = NUM_RX_BDS_DEF; 611 edev->q_num_tx_buffers = NUM_TX_BDS_DEF; 612 613 DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n", 614 info->num_queues, info->num_queues); 615 616 SET_NETDEV_DEV(ndev, &pdev->dev); 617 618 memset(&edev->stats, 0, sizeof(edev->stats)); 619 memcpy(&edev->dev_info, info, sizeof(*info)); 620 621 INIT_LIST_HEAD(&edev->vlan_list); 622 623 return edev; 624 } 625 626 static void qede_init_ndev(struct qede_dev *edev) 627 { 628 struct net_device *ndev = edev->ndev; 629 struct pci_dev *pdev = edev->pdev; 630 bool udp_tunnel_enable = false; 631 netdev_features_t hw_features; 632 633 pci_set_drvdata(pdev, ndev); 634 635 ndev->mem_start = edev->dev_info.common.pci_mem_start; 636 ndev->base_addr = ndev->mem_start; 637 ndev->mem_end = edev->dev_info.common.pci_mem_end; 638 ndev->irq = edev->dev_info.common.pci_irq; 639 640 ndev->watchdog_timeo = TX_TIMEOUT; 641 642 if (IS_VF(edev)) 643 ndev->netdev_ops = &qede_netdev_vf_ops; 644 else 645 ndev->netdev_ops = &qede_netdev_ops; 646 647 qede_set_ethtool_ops(ndev); 648 649 ndev->priv_flags |= IFF_UNICAST_FLT; 650 651 /* user-changeble features */ 652 hw_features = NETIF_F_GRO | NETIF_F_SG | 653 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 654 NETIF_F_TSO | NETIF_F_TSO6; 655 656 if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) 657 hw_features |= NETIF_F_NTUPLE; 658 659 if (edev->dev_info.common.vxlan_enable || 660 edev->dev_info.common.geneve_enable) 661 udp_tunnel_enable = true; 662 663 if (udp_tunnel_enable || edev->dev_info.common.gre_enable) { 664 hw_features |= NETIF_F_TSO_ECN; 665 ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 666 NETIF_F_SG | NETIF_F_TSO | 667 NETIF_F_TSO_ECN | NETIF_F_TSO6 | 668 NETIF_F_RXCSUM; 669 } 670 671 if (udp_tunnel_enable) { 672 hw_features |= (NETIF_F_GSO_UDP_TUNNEL | 673 NETIF_F_GSO_UDP_TUNNEL_CSUM); 674 ndev->hw_enc_features |= (NETIF_F_GSO_UDP_TUNNEL | 675 NETIF_F_GSO_UDP_TUNNEL_CSUM); 676 } 677 678 if (edev->dev_info.common.gre_enable) { 679 hw_features |= (NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM); 680 ndev->hw_enc_features |= (NETIF_F_GSO_GRE | 681 NETIF_F_GSO_GRE_CSUM); 682 } 683 684 ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM | 685 NETIF_F_HIGHDMA; 686 ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM | 687 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA | 688 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX; 689 690 ndev->hw_features = hw_features; 691 692 /* MTU range: 46 - 9600 */ 693 ndev->min_mtu = ETH_ZLEN - ETH_HLEN; 694 ndev->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE; 695 696 /* Set network device HW mac */ 697 ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac); 698 699 ndev->mtu = edev->dev_info.common.mtu; 700 } 701 702 /* This function converts from 32b param to two params of level and module 703 * Input 32b decoding: 704 * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the 705 * 'happy' flow, e.g. memory allocation failed. 706 * b30 - enable all INFO prints. INFO prints are for major steps in the flow 707 * and provide important parameters. 708 * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that 709 * module. VERBOSE prints are for tracking the specific flow in low level. 710 * 711 * Notice that the level should be that of the lowest required logs. 712 */ 713 void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level) 714 { 715 *p_dp_level = QED_LEVEL_NOTICE; 716 *p_dp_module = 0; 717 718 if (debug & QED_LOG_VERBOSE_MASK) { 719 *p_dp_level = QED_LEVEL_VERBOSE; 720 *p_dp_module = (debug & 0x3FFFFFFF); 721 } else if (debug & QED_LOG_INFO_MASK) { 722 *p_dp_level = QED_LEVEL_INFO; 723 } else if (debug & QED_LOG_NOTICE_MASK) { 724 *p_dp_level = QED_LEVEL_NOTICE; 725 } 726 } 727 728 static void qede_free_fp_array(struct qede_dev *edev) 729 { 730 if (edev->fp_array) { 731 struct qede_fastpath *fp; 732 int i; 733 734 for_each_queue(i) { 735 fp = &edev->fp_array[i]; 736 737 kfree(fp->sb_info); 738 kfree(fp->rxq); 739 kfree(fp->xdp_tx); 740 kfree(fp->txq); 741 } 742 kfree(edev->fp_array); 743 } 744 745 edev->num_queues = 0; 746 edev->fp_num_tx = 0; 747 edev->fp_num_rx = 0; 748 } 749 750 static int qede_alloc_fp_array(struct qede_dev *edev) 751 { 752 u8 fp_combined, fp_rx = edev->fp_num_rx; 753 struct qede_fastpath *fp; 754 int i; 755 756 edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev), 757 sizeof(*edev->fp_array), GFP_KERNEL); 758 if (!edev->fp_array) { 759 DP_NOTICE(edev, "fp array allocation failed\n"); 760 goto err; 761 } 762 763 fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx; 764 765 /* Allocate the FP elements for Rx queues followed by combined and then 766 * the Tx. This ordering should be maintained so that the respective 767 * queues (Rx or Tx) will be together in the fastpath array and the 768 * associated ids will be sequential. 769 */ 770 for_each_queue(i) { 771 fp = &edev->fp_array[i]; 772 773 fp->sb_info = kzalloc(sizeof(*fp->sb_info), GFP_KERNEL); 774 if (!fp->sb_info) { 775 DP_NOTICE(edev, "sb info struct allocation failed\n"); 776 goto err; 777 } 778 779 if (fp_rx) { 780 fp->type = QEDE_FASTPATH_RX; 781 fp_rx--; 782 } else if (fp_combined) { 783 fp->type = QEDE_FASTPATH_COMBINED; 784 fp_combined--; 785 } else { 786 fp->type = QEDE_FASTPATH_TX; 787 } 788 789 if (fp->type & QEDE_FASTPATH_TX) { 790 fp->txq = kzalloc(sizeof(*fp->txq), GFP_KERNEL); 791 if (!fp->txq) 792 goto err; 793 } 794 795 if (fp->type & QEDE_FASTPATH_RX) { 796 fp->rxq = kzalloc(sizeof(*fp->rxq), GFP_KERNEL); 797 if (!fp->rxq) 798 goto err; 799 800 if (edev->xdp_prog) { 801 fp->xdp_tx = kzalloc(sizeof(*fp->xdp_tx), 802 GFP_KERNEL); 803 if (!fp->xdp_tx) 804 goto err; 805 fp->type |= QEDE_FASTPATH_XDP; 806 } 807 } 808 } 809 810 return 0; 811 err: 812 qede_free_fp_array(edev); 813 return -ENOMEM; 814 } 815 816 static void qede_sp_task(struct work_struct *work) 817 { 818 struct qede_dev *edev = container_of(work, struct qede_dev, 819 sp_task.work); 820 821 __qede_lock(edev); 822 823 if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags)) 824 if (edev->state == QEDE_STATE_OPEN) 825 qede_config_rx_mode(edev->ndev); 826 827 #ifdef CONFIG_RFS_ACCEL 828 if (test_and_clear_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags)) { 829 if (edev->state == QEDE_STATE_OPEN) 830 qede_process_arfs_filters(edev, false); 831 } 832 #endif 833 __qede_unlock(edev); 834 } 835 836 static void qede_update_pf_params(struct qed_dev *cdev) 837 { 838 struct qed_pf_params pf_params; 839 840 /* 64 rx + 64 tx + 64 XDP */ 841 memset(&pf_params, 0, sizeof(struct qed_pf_params)); 842 pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * 3; 843 #ifdef CONFIG_RFS_ACCEL 844 pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR; 845 #endif 846 qed_ops->common->update_pf_params(cdev, &pf_params); 847 } 848 849 enum qede_probe_mode { 850 QEDE_PROBE_NORMAL, 851 }; 852 853 static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, 854 bool is_vf, enum qede_probe_mode mode) 855 { 856 struct qed_probe_params probe_params; 857 struct qed_slowpath_params sp_params; 858 struct qed_dev_eth_info dev_info; 859 struct qede_dev *edev; 860 struct qed_dev *cdev; 861 int rc; 862 863 if (unlikely(dp_level & QED_LEVEL_INFO)) 864 pr_notice("Starting qede probe\n"); 865 866 memset(&probe_params, 0, sizeof(probe_params)); 867 probe_params.protocol = QED_PROTOCOL_ETH; 868 probe_params.dp_module = dp_module; 869 probe_params.dp_level = dp_level; 870 probe_params.is_vf = is_vf; 871 cdev = qed_ops->common->probe(pdev, &probe_params); 872 if (!cdev) { 873 rc = -ENODEV; 874 goto err0; 875 } 876 877 qede_update_pf_params(cdev); 878 879 /* Start the Slowpath-process */ 880 memset(&sp_params, 0, sizeof(sp_params)); 881 sp_params.int_mode = QED_INT_MODE_MSIX; 882 sp_params.drv_major = QEDE_MAJOR_VERSION; 883 sp_params.drv_minor = QEDE_MINOR_VERSION; 884 sp_params.drv_rev = QEDE_REVISION_VERSION; 885 sp_params.drv_eng = QEDE_ENGINEERING_VERSION; 886 strlcpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE); 887 rc = qed_ops->common->slowpath_start(cdev, &sp_params); 888 if (rc) { 889 pr_notice("Cannot start slowpath\n"); 890 goto err1; 891 } 892 893 /* Learn information crucial for qede to progress */ 894 rc = qed_ops->fill_dev_info(cdev, &dev_info); 895 if (rc) 896 goto err2; 897 898 edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module, 899 dp_level); 900 if (!edev) { 901 rc = -ENOMEM; 902 goto err2; 903 } 904 905 if (is_vf) 906 edev->flags |= QEDE_FLAG_IS_VF; 907 908 qede_init_ndev(edev); 909 910 rc = qede_roce_dev_add(edev); 911 if (rc) 912 goto err3; 913 914 /* Prepare the lock prior to the registeration of the netdev, 915 * as once it's registered we might reach flows requiring it 916 * [it's even possible to reach a flow needing it directly 917 * from there, although it's unlikely]. 918 */ 919 INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task); 920 mutex_init(&edev->qede_lock); 921 rc = register_netdev(edev->ndev); 922 if (rc) { 923 DP_NOTICE(edev, "Cannot register net-device\n"); 924 goto err4; 925 } 926 927 edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION); 928 929 /* PTP not supported on VFs */ 930 if (!is_vf) 931 qede_ptp_enable(edev, true); 932 933 edev->ops->register_ops(cdev, &qede_ll_ops, edev); 934 935 #ifdef CONFIG_DCB 936 if (!IS_VF(edev)) 937 qede_set_dcbnl_ops(edev->ndev); 938 #endif 939 940 edev->rx_copybreak = QEDE_RX_HDR_SIZE; 941 942 DP_INFO(edev, "Ending successfully qede probe\n"); 943 944 return 0; 945 946 err4: 947 qede_roce_dev_remove(edev); 948 err3: 949 free_netdev(edev->ndev); 950 err2: 951 qed_ops->common->slowpath_stop(cdev); 952 err1: 953 qed_ops->common->remove(cdev); 954 err0: 955 return rc; 956 } 957 958 static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id) 959 { 960 bool is_vf = false; 961 u32 dp_module = 0; 962 u8 dp_level = 0; 963 964 switch ((enum qede_pci_private)id->driver_data) { 965 case QEDE_PRIVATE_VF: 966 if (debug & QED_LOG_VERBOSE_MASK) 967 dev_err(&pdev->dev, "Probing a VF\n"); 968 is_vf = true; 969 break; 970 default: 971 if (debug & QED_LOG_VERBOSE_MASK) 972 dev_err(&pdev->dev, "Probing a PF\n"); 973 } 974 975 qede_config_debug(debug, &dp_module, &dp_level); 976 977 return __qede_probe(pdev, dp_module, dp_level, is_vf, 978 QEDE_PROBE_NORMAL); 979 } 980 981 enum qede_remove_mode { 982 QEDE_REMOVE_NORMAL, 983 }; 984 985 static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) 986 { 987 struct net_device *ndev = pci_get_drvdata(pdev); 988 struct qede_dev *edev = netdev_priv(ndev); 989 struct qed_dev *cdev = edev->cdev; 990 991 DP_INFO(edev, "Starting qede_remove\n"); 992 993 unregister_netdev(ndev); 994 cancel_delayed_work_sync(&edev->sp_task); 995 996 qede_ptp_disable(edev); 997 998 qede_roce_dev_remove(edev); 999 1000 edev->ops->common->set_power_state(cdev, PCI_D0); 1001 1002 pci_set_drvdata(pdev, NULL); 1003 1004 /* Release edev's reference to XDP's bpf if such exist */ 1005 if (edev->xdp_prog) 1006 bpf_prog_put(edev->xdp_prog); 1007 1008 /* Use global ops since we've freed edev */ 1009 qed_ops->common->slowpath_stop(cdev); 1010 if (system_state == SYSTEM_POWER_OFF) 1011 return; 1012 qed_ops->common->remove(cdev); 1013 1014 /* Since this can happen out-of-sync with other flows, 1015 * don't release the netdevice until after slowpath stop 1016 * has been called to guarantee various other contexts 1017 * [e.g., QED register callbacks] won't break anything when 1018 * accessing the netdevice. 1019 */ 1020 free_netdev(ndev); 1021 1022 dev_info(&pdev->dev, "Ending qede_remove successfully\n"); 1023 } 1024 1025 static void qede_remove(struct pci_dev *pdev) 1026 { 1027 __qede_remove(pdev, QEDE_REMOVE_NORMAL); 1028 } 1029 1030 static void qede_shutdown(struct pci_dev *pdev) 1031 { 1032 __qede_remove(pdev, QEDE_REMOVE_NORMAL); 1033 } 1034 1035 /* ------------------------------------------------------------------------- 1036 * START OF LOAD / UNLOAD 1037 * ------------------------------------------------------------------------- 1038 */ 1039 1040 static int qede_set_num_queues(struct qede_dev *edev) 1041 { 1042 int rc; 1043 u16 rss_num; 1044 1045 /* Setup queues according to possible resources*/ 1046 if (edev->req_queues) 1047 rss_num = edev->req_queues; 1048 else 1049 rss_num = netif_get_num_default_rss_queues() * 1050 edev->dev_info.common.num_hwfns; 1051 1052 rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num); 1053 1054 rc = edev->ops->common->set_fp_int(edev->cdev, rss_num); 1055 if (rc > 0) { 1056 /* Managed to request interrupts for our queues */ 1057 edev->num_queues = rc; 1058 DP_INFO(edev, "Managed %d [of %d] RSS queues\n", 1059 QEDE_QUEUE_CNT(edev), rss_num); 1060 rc = 0; 1061 } 1062 1063 edev->fp_num_tx = edev->req_num_tx; 1064 edev->fp_num_rx = edev->req_num_rx; 1065 1066 return rc; 1067 } 1068 1069 static void qede_free_mem_sb(struct qede_dev *edev, 1070 struct qed_sb_info *sb_info) 1071 { 1072 if (sb_info->sb_virt) 1073 dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt), 1074 (void *)sb_info->sb_virt, sb_info->sb_phys); 1075 } 1076 1077 /* This function allocates fast-path status block memory */ 1078 static int qede_alloc_mem_sb(struct qede_dev *edev, 1079 struct qed_sb_info *sb_info, u16 sb_id) 1080 { 1081 struct status_block *sb_virt; 1082 dma_addr_t sb_phys; 1083 int rc; 1084 1085 sb_virt = dma_alloc_coherent(&edev->pdev->dev, 1086 sizeof(*sb_virt), &sb_phys, GFP_KERNEL); 1087 if (!sb_virt) { 1088 DP_ERR(edev, "Status block allocation failed\n"); 1089 return -ENOMEM; 1090 } 1091 1092 rc = edev->ops->common->sb_init(edev->cdev, sb_info, 1093 sb_virt, sb_phys, sb_id, 1094 QED_SB_TYPE_L2_QUEUE); 1095 if (rc) { 1096 DP_ERR(edev, "Status block initialization failed\n"); 1097 dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt), 1098 sb_virt, sb_phys); 1099 return rc; 1100 } 1101 1102 return 0; 1103 } 1104 1105 static void qede_free_rx_buffers(struct qede_dev *edev, 1106 struct qede_rx_queue *rxq) 1107 { 1108 u16 i; 1109 1110 for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) { 1111 struct sw_rx_data *rx_buf; 1112 struct page *data; 1113 1114 rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX]; 1115 data = rx_buf->data; 1116 1117 dma_unmap_page(&edev->pdev->dev, 1118 rx_buf->mapping, PAGE_SIZE, rxq->data_direction); 1119 1120 rx_buf->data = NULL; 1121 __free_page(data); 1122 } 1123 } 1124 1125 static void qede_free_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq) 1126 { 1127 int i; 1128 1129 if (edev->gro_disable) 1130 return; 1131 1132 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { 1133 struct qede_agg_info *tpa_info = &rxq->tpa_info[i]; 1134 struct sw_rx_data *replace_buf = &tpa_info->buffer; 1135 1136 if (replace_buf->data) { 1137 dma_unmap_page(&edev->pdev->dev, 1138 replace_buf->mapping, 1139 PAGE_SIZE, DMA_FROM_DEVICE); 1140 __free_page(replace_buf->data); 1141 } 1142 } 1143 } 1144 1145 static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) 1146 { 1147 qede_free_sge_mem(edev, rxq); 1148 1149 /* Free rx buffers */ 1150 qede_free_rx_buffers(edev, rxq); 1151 1152 /* Free the parallel SW ring */ 1153 kfree(rxq->sw_rx_ring); 1154 1155 /* Free the real RQ ring used by FW */ 1156 edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring); 1157 edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring); 1158 } 1159 1160 static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq) 1161 { 1162 dma_addr_t mapping; 1163 int i; 1164 1165 /* Don't perform FW aggregations in case of XDP */ 1166 if (edev->xdp_prog) 1167 edev->gro_disable = 1; 1168 1169 if (edev->gro_disable) 1170 return 0; 1171 1172 if (edev->ndev->mtu > PAGE_SIZE) { 1173 edev->gro_disable = 1; 1174 return 0; 1175 } 1176 1177 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { 1178 struct qede_agg_info *tpa_info = &rxq->tpa_info[i]; 1179 struct sw_rx_data *replace_buf = &tpa_info->buffer; 1180 1181 replace_buf->data = alloc_pages(GFP_ATOMIC, 0); 1182 if (unlikely(!replace_buf->data)) { 1183 DP_NOTICE(edev, 1184 "Failed to allocate TPA skb pool [replacement buffer]\n"); 1185 goto err; 1186 } 1187 1188 mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0, 1189 PAGE_SIZE, DMA_FROM_DEVICE); 1190 if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { 1191 DP_NOTICE(edev, 1192 "Failed to map TPA replacement buffer\n"); 1193 goto err; 1194 } 1195 1196 replace_buf->mapping = mapping; 1197 tpa_info->buffer.page_offset = 0; 1198 tpa_info->buffer_mapping = mapping; 1199 tpa_info->state = QEDE_AGG_STATE_NONE; 1200 } 1201 1202 return 0; 1203 err: 1204 qede_free_sge_mem(edev, rxq); 1205 edev->gro_disable = 1; 1206 return -ENOMEM; 1207 } 1208 1209 /* This function allocates all memory needed per Rx queue */ 1210 static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) 1211 { 1212 int i, rc, size; 1213 1214 rxq->num_rx_buffers = edev->q_num_rx_buffers; 1215 1216 rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu; 1217 rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : 0; 1218 1219 /* Make sure that the headroom and payload fit in a single page */ 1220 if (rxq->rx_buf_size + rxq->rx_headroom > PAGE_SIZE) 1221 rxq->rx_buf_size = PAGE_SIZE - rxq->rx_headroom; 1222 1223 /* Segment size to spilt a page in multiple equal parts, 1224 * unless XDP is used in which case we'd use the entire page. 1225 */ 1226 if (!edev->xdp_prog) 1227 rxq->rx_buf_seg_size = roundup_pow_of_two(rxq->rx_buf_size); 1228 else 1229 rxq->rx_buf_seg_size = PAGE_SIZE; 1230 1231 /* Allocate the parallel driver ring for Rx buffers */ 1232 size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE; 1233 rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL); 1234 if (!rxq->sw_rx_ring) { 1235 DP_ERR(edev, "Rx buffers ring allocation failed\n"); 1236 rc = -ENOMEM; 1237 goto err; 1238 } 1239 1240 /* Allocate FW Rx ring */ 1241 rc = edev->ops->common->chain_alloc(edev->cdev, 1242 QED_CHAIN_USE_TO_CONSUME_PRODUCE, 1243 QED_CHAIN_MODE_NEXT_PTR, 1244 QED_CHAIN_CNT_TYPE_U16, 1245 RX_RING_SIZE, 1246 sizeof(struct eth_rx_bd), 1247 &rxq->rx_bd_ring); 1248 1249 if (rc) 1250 goto err; 1251 1252 /* Allocate FW completion ring */ 1253 rc = edev->ops->common->chain_alloc(edev->cdev, 1254 QED_CHAIN_USE_TO_CONSUME, 1255 QED_CHAIN_MODE_PBL, 1256 QED_CHAIN_CNT_TYPE_U16, 1257 RX_RING_SIZE, 1258 sizeof(union eth_rx_cqe), 1259 &rxq->rx_comp_ring); 1260 if (rc) 1261 goto err; 1262 1263 /* Allocate buffers for the Rx ring */ 1264 rxq->filled_buffers = 0; 1265 for (i = 0; i < rxq->num_rx_buffers; i++) { 1266 rc = qede_alloc_rx_buffer(rxq, false); 1267 if (rc) { 1268 DP_ERR(edev, 1269 "Rx buffers allocation failed at index %d\n", i); 1270 goto err; 1271 } 1272 } 1273 1274 rc = qede_alloc_sge_mem(edev, rxq); 1275 err: 1276 return rc; 1277 } 1278 1279 static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) 1280 { 1281 /* Free the parallel SW ring */ 1282 if (txq->is_xdp) 1283 kfree(txq->sw_tx_ring.xdp); 1284 else 1285 kfree(txq->sw_tx_ring.skbs); 1286 1287 /* Free the real RQ ring used by FW */ 1288 edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl); 1289 } 1290 1291 /* This function allocates all memory needed per Tx queue */ 1292 static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) 1293 { 1294 union eth_tx_bd_types *p_virt; 1295 int size, rc; 1296 1297 txq->num_tx_buffers = edev->q_num_tx_buffers; 1298 1299 /* Allocate the parallel driver ring for Tx buffers */ 1300 if (txq->is_xdp) { 1301 size = sizeof(*txq->sw_tx_ring.xdp) * TX_RING_SIZE; 1302 txq->sw_tx_ring.xdp = kzalloc(size, GFP_KERNEL); 1303 if (!txq->sw_tx_ring.xdp) 1304 goto err; 1305 } else { 1306 size = sizeof(*txq->sw_tx_ring.skbs) * TX_RING_SIZE; 1307 txq->sw_tx_ring.skbs = kzalloc(size, GFP_KERNEL); 1308 if (!txq->sw_tx_ring.skbs) 1309 goto err; 1310 } 1311 1312 rc = edev->ops->common->chain_alloc(edev->cdev, 1313 QED_CHAIN_USE_TO_CONSUME_PRODUCE, 1314 QED_CHAIN_MODE_PBL, 1315 QED_CHAIN_CNT_TYPE_U16, 1316 TX_RING_SIZE, 1317 sizeof(*p_virt), &txq->tx_pbl); 1318 if (rc) 1319 goto err; 1320 1321 return 0; 1322 1323 err: 1324 qede_free_mem_txq(edev, txq); 1325 return -ENOMEM; 1326 } 1327 1328 /* This function frees all memory of a single fp */ 1329 static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp) 1330 { 1331 qede_free_mem_sb(edev, fp->sb_info); 1332 1333 if (fp->type & QEDE_FASTPATH_RX) 1334 qede_free_mem_rxq(edev, fp->rxq); 1335 1336 if (fp->type & QEDE_FASTPATH_XDP) 1337 qede_free_mem_txq(edev, fp->xdp_tx); 1338 1339 if (fp->type & QEDE_FASTPATH_TX) 1340 qede_free_mem_txq(edev, fp->txq); 1341 } 1342 1343 /* This function allocates all memory needed for a single fp (i.e. an entity 1344 * which contains status block, one rx queue and/or multiple per-TC tx queues. 1345 */ 1346 static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp) 1347 { 1348 int rc = 0; 1349 1350 rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id); 1351 if (rc) 1352 goto out; 1353 1354 if (fp->type & QEDE_FASTPATH_RX) { 1355 rc = qede_alloc_mem_rxq(edev, fp->rxq); 1356 if (rc) 1357 goto out; 1358 } 1359 1360 if (fp->type & QEDE_FASTPATH_XDP) { 1361 rc = qede_alloc_mem_txq(edev, fp->xdp_tx); 1362 if (rc) 1363 goto out; 1364 } 1365 1366 if (fp->type & QEDE_FASTPATH_TX) { 1367 rc = qede_alloc_mem_txq(edev, fp->txq); 1368 if (rc) 1369 goto out; 1370 } 1371 1372 out: 1373 return rc; 1374 } 1375 1376 static void qede_free_mem_load(struct qede_dev *edev) 1377 { 1378 int i; 1379 1380 for_each_queue(i) { 1381 struct qede_fastpath *fp = &edev->fp_array[i]; 1382 1383 qede_free_mem_fp(edev, fp); 1384 } 1385 } 1386 1387 /* This function allocates all qede memory at NIC load. */ 1388 static int qede_alloc_mem_load(struct qede_dev *edev) 1389 { 1390 int rc = 0, queue_id; 1391 1392 for (queue_id = 0; queue_id < QEDE_QUEUE_CNT(edev); queue_id++) { 1393 struct qede_fastpath *fp = &edev->fp_array[queue_id]; 1394 1395 rc = qede_alloc_mem_fp(edev, fp); 1396 if (rc) { 1397 DP_ERR(edev, 1398 "Failed to allocate memory for fastpath - rss id = %d\n", 1399 queue_id); 1400 qede_free_mem_load(edev); 1401 return rc; 1402 } 1403 } 1404 1405 return 0; 1406 } 1407 1408 /* This function inits fp content and resets the SB, RXQ and TXQ structures */ 1409 static void qede_init_fp(struct qede_dev *edev) 1410 { 1411 int queue_id, rxq_index = 0, txq_index = 0; 1412 struct qede_fastpath *fp; 1413 1414 for_each_queue(queue_id) { 1415 fp = &edev->fp_array[queue_id]; 1416 1417 fp->edev = edev; 1418 fp->id = queue_id; 1419 1420 if (fp->type & QEDE_FASTPATH_XDP) { 1421 fp->xdp_tx->index = QEDE_TXQ_IDX_TO_XDP(edev, 1422 rxq_index); 1423 fp->xdp_tx->is_xdp = 1; 1424 } 1425 1426 if (fp->type & QEDE_FASTPATH_RX) { 1427 fp->rxq->rxq_id = rxq_index++; 1428 1429 /* Determine how to map buffers for this queue */ 1430 if (fp->type & QEDE_FASTPATH_XDP) 1431 fp->rxq->data_direction = DMA_BIDIRECTIONAL; 1432 else 1433 fp->rxq->data_direction = DMA_FROM_DEVICE; 1434 fp->rxq->dev = &edev->pdev->dev; 1435 } 1436 1437 if (fp->type & QEDE_FASTPATH_TX) { 1438 fp->txq->index = txq_index++; 1439 if (edev->dev_info.is_legacy) 1440 fp->txq->is_legacy = 1; 1441 fp->txq->dev = &edev->pdev->dev; 1442 } 1443 1444 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", 1445 edev->ndev->name, queue_id); 1446 } 1447 1448 edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO); 1449 } 1450 1451 static int qede_set_real_num_queues(struct qede_dev *edev) 1452 { 1453 int rc = 0; 1454 1455 rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_COUNT(edev)); 1456 if (rc) { 1457 DP_NOTICE(edev, "Failed to set real number of Tx queues\n"); 1458 return rc; 1459 } 1460 1461 rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_COUNT(edev)); 1462 if (rc) { 1463 DP_NOTICE(edev, "Failed to set real number of Rx queues\n"); 1464 return rc; 1465 } 1466 1467 return 0; 1468 } 1469 1470 static void qede_napi_disable_remove(struct qede_dev *edev) 1471 { 1472 int i; 1473 1474 for_each_queue(i) { 1475 napi_disable(&edev->fp_array[i].napi); 1476 1477 netif_napi_del(&edev->fp_array[i].napi); 1478 } 1479 } 1480 1481 static void qede_napi_add_enable(struct qede_dev *edev) 1482 { 1483 int i; 1484 1485 /* Add NAPI objects */ 1486 for_each_queue(i) { 1487 netif_napi_add(edev->ndev, &edev->fp_array[i].napi, 1488 qede_poll, NAPI_POLL_WEIGHT); 1489 napi_enable(&edev->fp_array[i].napi); 1490 } 1491 } 1492 1493 static void qede_sync_free_irqs(struct qede_dev *edev) 1494 { 1495 int i; 1496 1497 for (i = 0; i < edev->int_info.used_cnt; i++) { 1498 if (edev->int_info.msix_cnt) { 1499 synchronize_irq(edev->int_info.msix[i].vector); 1500 free_irq(edev->int_info.msix[i].vector, 1501 &edev->fp_array[i]); 1502 } else { 1503 edev->ops->common->simd_handler_clean(edev->cdev, i); 1504 } 1505 } 1506 1507 edev->int_info.used_cnt = 0; 1508 } 1509 1510 static int qede_req_msix_irqs(struct qede_dev *edev) 1511 { 1512 int i, rc; 1513 1514 /* Sanitize number of interrupts == number of prepared RSS queues */ 1515 if (QEDE_QUEUE_CNT(edev) > edev->int_info.msix_cnt) { 1516 DP_ERR(edev, 1517 "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n", 1518 QEDE_QUEUE_CNT(edev), edev->int_info.msix_cnt); 1519 return -EINVAL; 1520 } 1521 1522 for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) { 1523 #ifdef CONFIG_RFS_ACCEL 1524 struct qede_fastpath *fp = &edev->fp_array[i]; 1525 1526 if (edev->ndev->rx_cpu_rmap && (fp->type & QEDE_FASTPATH_RX)) { 1527 rc = irq_cpu_rmap_add(edev->ndev->rx_cpu_rmap, 1528 edev->int_info.msix[i].vector); 1529 if (rc) { 1530 DP_ERR(edev, "Failed to add CPU rmap\n"); 1531 qede_free_arfs(edev); 1532 } 1533 } 1534 #endif 1535 rc = request_irq(edev->int_info.msix[i].vector, 1536 qede_msix_fp_int, 0, edev->fp_array[i].name, 1537 &edev->fp_array[i]); 1538 if (rc) { 1539 DP_ERR(edev, "Request fp %d irq failed\n", i); 1540 qede_sync_free_irqs(edev); 1541 return rc; 1542 } 1543 DP_VERBOSE(edev, NETIF_MSG_INTR, 1544 "Requested fp irq for %s [entry %d]. Cookie is at %p\n", 1545 edev->fp_array[i].name, i, 1546 &edev->fp_array[i]); 1547 edev->int_info.used_cnt++; 1548 } 1549 1550 return 0; 1551 } 1552 1553 static void qede_simd_fp_handler(void *cookie) 1554 { 1555 struct qede_fastpath *fp = (struct qede_fastpath *)cookie; 1556 1557 napi_schedule_irqoff(&fp->napi); 1558 } 1559 1560 static int qede_setup_irqs(struct qede_dev *edev) 1561 { 1562 int i, rc = 0; 1563 1564 /* Learn Interrupt configuration */ 1565 rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info); 1566 if (rc) 1567 return rc; 1568 1569 if (edev->int_info.msix_cnt) { 1570 rc = qede_req_msix_irqs(edev); 1571 if (rc) 1572 return rc; 1573 edev->ndev->irq = edev->int_info.msix[0].vector; 1574 } else { 1575 const struct qed_common_ops *ops; 1576 1577 /* qed should learn receive the RSS ids and callbacks */ 1578 ops = edev->ops->common; 1579 for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) 1580 ops->simd_handler_config(edev->cdev, 1581 &edev->fp_array[i], i, 1582 qede_simd_fp_handler); 1583 edev->int_info.used_cnt = QEDE_QUEUE_CNT(edev); 1584 } 1585 return 0; 1586 } 1587 1588 static int qede_drain_txq(struct qede_dev *edev, 1589 struct qede_tx_queue *txq, bool allow_drain) 1590 { 1591 int rc, cnt = 1000; 1592 1593 while (txq->sw_tx_cons != txq->sw_tx_prod) { 1594 if (!cnt) { 1595 if (allow_drain) { 1596 DP_NOTICE(edev, 1597 "Tx queue[%d] is stuck, requesting MCP to drain\n", 1598 txq->index); 1599 rc = edev->ops->common->drain(edev->cdev); 1600 if (rc) 1601 return rc; 1602 return qede_drain_txq(edev, txq, false); 1603 } 1604 DP_NOTICE(edev, 1605 "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n", 1606 txq->index, txq->sw_tx_prod, 1607 txq->sw_tx_cons); 1608 return -ENODEV; 1609 } 1610 cnt--; 1611 usleep_range(1000, 2000); 1612 barrier(); 1613 } 1614 1615 /* FW finished processing, wait for HW to transmit all tx packets */ 1616 usleep_range(1000, 2000); 1617 1618 return 0; 1619 } 1620 1621 static int qede_stop_txq(struct qede_dev *edev, 1622 struct qede_tx_queue *txq, int rss_id) 1623 { 1624 return edev->ops->q_tx_stop(edev->cdev, rss_id, txq->handle); 1625 } 1626 1627 static int qede_stop_queues(struct qede_dev *edev) 1628 { 1629 struct qed_update_vport_params *vport_update_params; 1630 struct qed_dev *cdev = edev->cdev; 1631 struct qede_fastpath *fp; 1632 int rc, i; 1633 1634 /* Disable the vport */ 1635 vport_update_params = vzalloc(sizeof(*vport_update_params)); 1636 if (!vport_update_params) 1637 return -ENOMEM; 1638 1639 vport_update_params->vport_id = 0; 1640 vport_update_params->update_vport_active_flg = 1; 1641 vport_update_params->vport_active_flg = 0; 1642 vport_update_params->update_rss_flg = 0; 1643 1644 rc = edev->ops->vport_update(cdev, vport_update_params); 1645 vfree(vport_update_params); 1646 1647 if (rc) { 1648 DP_ERR(edev, "Failed to update vport\n"); 1649 return rc; 1650 } 1651 1652 /* Flush Tx queues. If needed, request drain from MCP */ 1653 for_each_queue(i) { 1654 fp = &edev->fp_array[i]; 1655 1656 if (fp->type & QEDE_FASTPATH_TX) { 1657 rc = qede_drain_txq(edev, fp->txq, true); 1658 if (rc) 1659 return rc; 1660 } 1661 1662 if (fp->type & QEDE_FASTPATH_XDP) { 1663 rc = qede_drain_txq(edev, fp->xdp_tx, true); 1664 if (rc) 1665 return rc; 1666 } 1667 } 1668 1669 /* Stop all Queues in reverse order */ 1670 for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) { 1671 fp = &edev->fp_array[i]; 1672 1673 /* Stop the Tx Queue(s) */ 1674 if (fp->type & QEDE_FASTPATH_TX) { 1675 rc = qede_stop_txq(edev, fp->txq, i); 1676 if (rc) 1677 return rc; 1678 } 1679 1680 /* Stop the Rx Queue */ 1681 if (fp->type & QEDE_FASTPATH_RX) { 1682 rc = edev->ops->q_rx_stop(cdev, i, fp->rxq->handle); 1683 if (rc) { 1684 DP_ERR(edev, "Failed to stop RXQ #%d\n", i); 1685 return rc; 1686 } 1687 } 1688 1689 /* Stop the XDP forwarding queue */ 1690 if (fp->type & QEDE_FASTPATH_XDP) { 1691 rc = qede_stop_txq(edev, fp->xdp_tx, i); 1692 if (rc) 1693 return rc; 1694 1695 bpf_prog_put(fp->rxq->xdp_prog); 1696 } 1697 } 1698 1699 /* Stop the vport */ 1700 rc = edev->ops->vport_stop(cdev, 0); 1701 if (rc) 1702 DP_ERR(edev, "Failed to stop VPORT\n"); 1703 1704 return rc; 1705 } 1706 1707 static int qede_start_txq(struct qede_dev *edev, 1708 struct qede_fastpath *fp, 1709 struct qede_tx_queue *txq, u8 rss_id, u16 sb_idx) 1710 { 1711 dma_addr_t phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl); 1712 u32 page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl); 1713 struct qed_queue_start_common_params params; 1714 struct qed_txq_start_ret_params ret_params; 1715 int rc; 1716 1717 memset(¶ms, 0, sizeof(params)); 1718 memset(&ret_params, 0, sizeof(ret_params)); 1719 1720 /* Let the XDP queue share the queue-zone with one of the regular txq. 1721 * We don't really care about its coalescing. 1722 */ 1723 if (txq->is_xdp) 1724 params.queue_id = QEDE_TXQ_XDP_TO_IDX(edev, txq); 1725 else 1726 params.queue_id = txq->index; 1727 1728 params.sb = fp->sb_info->igu_sb_id; 1729 params.sb_idx = sb_idx; 1730 1731 rc = edev->ops->q_tx_start(edev->cdev, rss_id, ¶ms, phys_table, 1732 page_cnt, &ret_params); 1733 if (rc) { 1734 DP_ERR(edev, "Start TXQ #%d failed %d\n", txq->index, rc); 1735 return rc; 1736 } 1737 1738 txq->doorbell_addr = ret_params.p_doorbell; 1739 txq->handle = ret_params.p_handle; 1740 1741 /* Determine the FW consumer address associated */ 1742 txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[sb_idx]; 1743 1744 /* Prepare the doorbell parameters */ 1745 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST, DB_DEST_XCM); 1746 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, DB_AGG_CMD_SET); 1747 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_VAL_SEL, 1748 DQ_XCM_ETH_TX_BD_PROD_CMD); 1749 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD; 1750 1751 return rc; 1752 } 1753 1754 static int qede_start_queues(struct qede_dev *edev, bool clear_stats) 1755 { 1756 int vlan_removal_en = 1; 1757 struct qed_dev *cdev = edev->cdev; 1758 struct qed_dev_info *qed_info = &edev->dev_info.common; 1759 struct qed_update_vport_params *vport_update_params; 1760 struct qed_queue_start_common_params q_params; 1761 struct qed_start_vport_params start = {0}; 1762 int rc, i; 1763 1764 if (!edev->num_queues) { 1765 DP_ERR(edev, 1766 "Cannot update V-VPORT as active as there are no Rx queues\n"); 1767 return -EINVAL; 1768 } 1769 1770 vport_update_params = vzalloc(sizeof(*vport_update_params)); 1771 if (!vport_update_params) 1772 return -ENOMEM; 1773 1774 start.handle_ptp_pkts = !!(edev->ptp); 1775 start.gro_enable = !edev->gro_disable; 1776 start.mtu = edev->ndev->mtu; 1777 start.vport_id = 0; 1778 start.drop_ttl0 = true; 1779 start.remove_inner_vlan = vlan_removal_en; 1780 start.clear_stats = clear_stats; 1781 1782 rc = edev->ops->vport_start(cdev, &start); 1783 1784 if (rc) { 1785 DP_ERR(edev, "Start V-PORT failed %d\n", rc); 1786 goto out; 1787 } 1788 1789 DP_VERBOSE(edev, NETIF_MSG_IFUP, 1790 "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n", 1791 start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en); 1792 1793 for_each_queue(i) { 1794 struct qede_fastpath *fp = &edev->fp_array[i]; 1795 dma_addr_t p_phys_table; 1796 u32 page_cnt; 1797 1798 if (fp->type & QEDE_FASTPATH_RX) { 1799 struct qed_rxq_start_ret_params ret_params; 1800 struct qede_rx_queue *rxq = fp->rxq; 1801 __le16 *val; 1802 1803 memset(&ret_params, 0, sizeof(ret_params)); 1804 memset(&q_params, 0, sizeof(q_params)); 1805 q_params.queue_id = rxq->rxq_id; 1806 q_params.vport_id = 0; 1807 q_params.sb = fp->sb_info->igu_sb_id; 1808 q_params.sb_idx = RX_PI; 1809 1810 p_phys_table = 1811 qed_chain_get_pbl_phys(&rxq->rx_comp_ring); 1812 page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring); 1813 1814 rc = edev->ops->q_rx_start(cdev, i, &q_params, 1815 rxq->rx_buf_size, 1816 rxq->rx_bd_ring.p_phys_addr, 1817 p_phys_table, 1818 page_cnt, &ret_params); 1819 if (rc) { 1820 DP_ERR(edev, "Start RXQ #%d failed %d\n", i, 1821 rc); 1822 goto out; 1823 } 1824 1825 /* Use the return parameters */ 1826 rxq->hw_rxq_prod_addr = ret_params.p_prod; 1827 rxq->handle = ret_params.p_handle; 1828 1829 val = &fp->sb_info->sb_virt->pi_array[RX_PI]; 1830 rxq->hw_cons_ptr = val; 1831 1832 qede_update_rx_prod(edev, rxq); 1833 } 1834 1835 if (fp->type & QEDE_FASTPATH_XDP) { 1836 rc = qede_start_txq(edev, fp, fp->xdp_tx, i, XDP_PI); 1837 if (rc) 1838 goto out; 1839 1840 fp->rxq->xdp_prog = bpf_prog_add(edev->xdp_prog, 1); 1841 if (IS_ERR(fp->rxq->xdp_prog)) { 1842 rc = PTR_ERR(fp->rxq->xdp_prog); 1843 fp->rxq->xdp_prog = NULL; 1844 goto out; 1845 } 1846 } 1847 1848 if (fp->type & QEDE_FASTPATH_TX) { 1849 rc = qede_start_txq(edev, fp, fp->txq, i, TX_PI(0)); 1850 if (rc) 1851 goto out; 1852 } 1853 } 1854 1855 /* Prepare and send the vport enable */ 1856 vport_update_params->vport_id = start.vport_id; 1857 vport_update_params->update_vport_active_flg = 1; 1858 vport_update_params->vport_active_flg = 1; 1859 1860 if ((qed_info->mf_mode == QED_MF_NPAR || pci_num_vf(edev->pdev)) && 1861 qed_info->tx_switching) { 1862 vport_update_params->update_tx_switching_flg = 1; 1863 vport_update_params->tx_switching_flg = 1; 1864 } 1865 1866 qede_fill_rss_params(edev, &vport_update_params->rss_params, 1867 &vport_update_params->update_rss_flg); 1868 1869 rc = edev->ops->vport_update(cdev, vport_update_params); 1870 if (rc) 1871 DP_ERR(edev, "Update V-PORT failed %d\n", rc); 1872 1873 out: 1874 vfree(vport_update_params); 1875 return rc; 1876 } 1877 1878 enum qede_unload_mode { 1879 QEDE_UNLOAD_NORMAL, 1880 }; 1881 1882 static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode, 1883 bool is_locked) 1884 { 1885 struct qed_link_params link_params; 1886 int rc; 1887 1888 DP_INFO(edev, "Starting qede unload\n"); 1889 1890 if (!is_locked) 1891 __qede_lock(edev); 1892 1893 qede_roce_dev_event_close(edev); 1894 edev->state = QEDE_STATE_CLOSED; 1895 1896 /* Close OS Tx */ 1897 netif_tx_disable(edev->ndev); 1898 netif_carrier_off(edev->ndev); 1899 1900 /* Reset the link */ 1901 memset(&link_params, 0, sizeof(link_params)); 1902 link_params.link_up = false; 1903 edev->ops->common->set_link(edev->cdev, &link_params); 1904 rc = qede_stop_queues(edev); 1905 if (rc) { 1906 qede_sync_free_irqs(edev); 1907 goto out; 1908 } 1909 1910 DP_INFO(edev, "Stopped Queues\n"); 1911 1912 qede_vlan_mark_nonconfigured(edev); 1913 edev->ops->fastpath_stop(edev->cdev); 1914 #ifdef CONFIG_RFS_ACCEL 1915 if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) { 1916 qede_poll_for_freeing_arfs_filters(edev); 1917 qede_free_arfs(edev); 1918 } 1919 #endif 1920 /* Release the interrupts */ 1921 qede_sync_free_irqs(edev); 1922 edev->ops->common->set_fp_int(edev->cdev, 0); 1923 1924 qede_napi_disable_remove(edev); 1925 1926 qede_free_mem_load(edev); 1927 qede_free_fp_array(edev); 1928 1929 out: 1930 if (!is_locked) 1931 __qede_unlock(edev); 1932 DP_INFO(edev, "Ending qede unload\n"); 1933 } 1934 1935 enum qede_load_mode { 1936 QEDE_LOAD_NORMAL, 1937 QEDE_LOAD_RELOAD, 1938 }; 1939 1940 static int qede_load(struct qede_dev *edev, enum qede_load_mode mode, 1941 bool is_locked) 1942 { 1943 struct qed_link_params link_params; 1944 int rc; 1945 1946 DP_INFO(edev, "Starting qede load\n"); 1947 1948 if (!is_locked) 1949 __qede_lock(edev); 1950 1951 rc = qede_set_num_queues(edev); 1952 if (rc) 1953 goto out; 1954 1955 rc = qede_alloc_fp_array(edev); 1956 if (rc) 1957 goto out; 1958 1959 qede_init_fp(edev); 1960 1961 rc = qede_alloc_mem_load(edev); 1962 if (rc) 1963 goto err1; 1964 DP_INFO(edev, "Allocated %d Rx, %d Tx queues\n", 1965 QEDE_RSS_COUNT(edev), QEDE_TSS_COUNT(edev)); 1966 1967 rc = qede_set_real_num_queues(edev); 1968 if (rc) 1969 goto err2; 1970 1971 #ifdef CONFIG_RFS_ACCEL 1972 if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) { 1973 rc = qede_alloc_arfs(edev); 1974 if (rc) 1975 DP_NOTICE(edev, "aRFS memory allocation failed\n"); 1976 } 1977 #endif 1978 qede_napi_add_enable(edev); 1979 DP_INFO(edev, "Napi added and enabled\n"); 1980 1981 rc = qede_setup_irqs(edev); 1982 if (rc) 1983 goto err3; 1984 DP_INFO(edev, "Setup IRQs succeeded\n"); 1985 1986 rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD); 1987 if (rc) 1988 goto err4; 1989 DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n"); 1990 1991 /* Add primary mac and set Rx filters */ 1992 ether_addr_copy(edev->primary_mac, edev->ndev->dev_addr); 1993 1994 /* Program un-configured VLANs */ 1995 qede_configure_vlan_filters(edev); 1996 1997 /* Ask for link-up using current configuration */ 1998 memset(&link_params, 0, sizeof(link_params)); 1999 link_params.link_up = true; 2000 edev->ops->common->set_link(edev->cdev, &link_params); 2001 2002 qede_roce_dev_event_open(edev); 2003 2004 edev->state = QEDE_STATE_OPEN; 2005 2006 DP_INFO(edev, "Ending successfully qede load\n"); 2007 2008 goto out; 2009 err4: 2010 qede_sync_free_irqs(edev); 2011 memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info)); 2012 err3: 2013 qede_napi_disable_remove(edev); 2014 err2: 2015 qede_free_mem_load(edev); 2016 err1: 2017 edev->ops->common->set_fp_int(edev->cdev, 0); 2018 qede_free_fp_array(edev); 2019 edev->num_queues = 0; 2020 edev->fp_num_tx = 0; 2021 edev->fp_num_rx = 0; 2022 out: 2023 if (!is_locked) 2024 __qede_unlock(edev); 2025 2026 return rc; 2027 } 2028 2029 /* 'func' should be able to run between unload and reload assuming interface 2030 * is actually running, or afterwards in case it's currently DOWN. 2031 */ 2032 void qede_reload(struct qede_dev *edev, 2033 struct qede_reload_args *args, bool is_locked) 2034 { 2035 if (!is_locked) 2036 __qede_lock(edev); 2037 2038 /* Since qede_lock is held, internal state wouldn't change even 2039 * if netdev state would start transitioning. Check whether current 2040 * internal configuration indicates device is up, then reload. 2041 */ 2042 if (edev->state == QEDE_STATE_OPEN) { 2043 qede_unload(edev, QEDE_UNLOAD_NORMAL, true); 2044 if (args) 2045 args->func(edev, args); 2046 qede_load(edev, QEDE_LOAD_RELOAD, true); 2047 2048 /* Since no one is going to do it for us, re-configure */ 2049 qede_config_rx_mode(edev->ndev); 2050 } else if (args) { 2051 args->func(edev, args); 2052 } 2053 2054 if (!is_locked) 2055 __qede_unlock(edev); 2056 } 2057 2058 /* called with rtnl_lock */ 2059 static int qede_open(struct net_device *ndev) 2060 { 2061 struct qede_dev *edev = netdev_priv(ndev); 2062 int rc; 2063 2064 netif_carrier_off(ndev); 2065 2066 edev->ops->common->set_power_state(edev->cdev, PCI_D0); 2067 2068 rc = qede_load(edev, QEDE_LOAD_NORMAL, false); 2069 if (rc) 2070 return rc; 2071 2072 udp_tunnel_get_rx_info(ndev); 2073 2074 edev->ops->common->update_drv_state(edev->cdev, true); 2075 2076 return 0; 2077 } 2078 2079 static int qede_close(struct net_device *ndev) 2080 { 2081 struct qede_dev *edev = netdev_priv(ndev); 2082 2083 qede_unload(edev, QEDE_UNLOAD_NORMAL, false); 2084 2085 edev->ops->common->update_drv_state(edev->cdev, false); 2086 2087 return 0; 2088 } 2089 2090 static void qede_link_update(void *dev, struct qed_link_output *link) 2091 { 2092 struct qede_dev *edev = dev; 2093 2094 if (!netif_running(edev->ndev)) { 2095 DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not running\n"); 2096 return; 2097 } 2098 2099 if (link->link_up) { 2100 if (!netif_carrier_ok(edev->ndev)) { 2101 DP_NOTICE(edev, "Link is up\n"); 2102 netif_tx_start_all_queues(edev->ndev); 2103 netif_carrier_on(edev->ndev); 2104 } 2105 } else { 2106 if (netif_carrier_ok(edev->ndev)) { 2107 DP_NOTICE(edev, "Link is down\n"); 2108 netif_tx_disable(edev->ndev); 2109 netif_carrier_off(edev->ndev); 2110 } 2111 } 2112 } 2113