1 /* QLogic qede NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #include <linux/module.h> 33 #include <linux/pci.h> 34 #include <linux/version.h> 35 #include <linux/device.h> 36 #include <linux/netdevice.h> 37 #include <linux/etherdevice.h> 38 #include <linux/skbuff.h> 39 #include <linux/errno.h> 40 #include <linux/list.h> 41 #include <linux/string.h> 42 #include <linux/dma-mapping.h> 43 #include <linux/interrupt.h> 44 #include <asm/byteorder.h> 45 #include <asm/param.h> 46 #include <linux/io.h> 47 #include <linux/netdev_features.h> 48 #include <linux/udp.h> 49 #include <linux/tcp.h> 50 #include <net/udp_tunnel.h> 51 #include <linux/ip.h> 52 #include <net/ipv6.h> 53 #include <net/tcp.h> 54 #include <linux/if_ether.h> 55 #include <linux/if_vlan.h> 56 #include <linux/pkt_sched.h> 57 #include <linux/ethtool.h> 58 #include <linux/in.h> 59 #include <linux/random.h> 60 #include <net/ip6_checksum.h> 61 #include <linux/bitops.h> 62 #include <linux/vmalloc.h> 63 #include "qede.h" 64 #include "qede_ptp.h" 65 66 static char version[] = 67 "QLogic FastLinQ 4xxxx Ethernet Driver qede " DRV_MODULE_VERSION "\n"; 68 69 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver"); 70 MODULE_LICENSE("GPL"); 71 MODULE_VERSION(DRV_MODULE_VERSION); 72 73 static uint debug; 74 module_param(debug, uint, 0); 75 MODULE_PARM_DESC(debug, " Default debug msglevel"); 76 77 static const struct qed_eth_ops *qed_ops; 78 79 #define CHIP_NUM_57980S_40 0x1634 80 #define CHIP_NUM_57980S_10 0x1666 81 #define CHIP_NUM_57980S_MF 0x1636 82 #define CHIP_NUM_57980S_100 0x1644 83 #define CHIP_NUM_57980S_50 0x1654 84 #define CHIP_NUM_57980S_25 0x1656 85 #define CHIP_NUM_57980S_IOV 0x1664 86 #define CHIP_NUM_AH 0x8070 87 #define CHIP_NUM_AH_IOV 0x8090 88 89 #ifndef PCI_DEVICE_ID_NX2_57980E 90 #define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40 91 #define PCI_DEVICE_ID_57980S_10 CHIP_NUM_57980S_10 92 #define PCI_DEVICE_ID_57980S_MF CHIP_NUM_57980S_MF 93 #define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100 94 #define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50 95 #define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25 96 #define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV 97 #define PCI_DEVICE_ID_AH CHIP_NUM_AH 98 #define PCI_DEVICE_ID_AH_IOV CHIP_NUM_AH_IOV 99 100 #endif 101 102 enum qede_pci_private { 103 QEDE_PRIVATE_PF, 104 QEDE_PRIVATE_VF 105 }; 106 107 static const struct pci_device_id qede_pci_tbl[] = { 108 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF}, 109 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF}, 110 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF}, 111 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF}, 112 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF}, 113 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF}, 114 #ifdef CONFIG_QED_SRIOV 115 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF}, 116 #endif 117 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH), QEDE_PRIVATE_PF}, 118 #ifdef CONFIG_QED_SRIOV 119 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH_IOV), QEDE_PRIVATE_VF}, 120 #endif 121 { 0 } 122 }; 123 124 MODULE_DEVICE_TABLE(pci, qede_pci_tbl); 125 126 static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id); 127 128 #define TX_TIMEOUT (5 * HZ) 129 130 /* Utilize last protocol index for XDP */ 131 #define XDP_PI 11 132 133 static void qede_remove(struct pci_dev *pdev); 134 static void qede_shutdown(struct pci_dev *pdev); 135 static void qede_link_update(void *dev, struct qed_link_output *link); 136 137 /* The qede lock is used to protect driver state change and driver flows that 138 * are not reentrant. 139 */ 140 void __qede_lock(struct qede_dev *edev) 141 { 142 mutex_lock(&edev->qede_lock); 143 } 144 145 void __qede_unlock(struct qede_dev *edev) 146 { 147 mutex_unlock(&edev->qede_lock); 148 } 149 150 #ifdef CONFIG_QED_SRIOV 151 static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos, 152 __be16 vlan_proto) 153 { 154 struct qede_dev *edev = netdev_priv(ndev); 155 156 if (vlan > 4095) { 157 DP_NOTICE(edev, "Illegal vlan value %d\n", vlan); 158 return -EINVAL; 159 } 160 161 if (vlan_proto != htons(ETH_P_8021Q)) 162 return -EPROTONOSUPPORT; 163 164 DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n", 165 vlan, vf); 166 167 return edev->ops->iov->set_vlan(edev->cdev, vlan, vf); 168 } 169 170 static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac) 171 { 172 struct qede_dev *edev = netdev_priv(ndev); 173 174 DP_VERBOSE(edev, QED_MSG_IOV, 175 "Setting MAC %02x:%02x:%02x:%02x:%02x:%02x to VF [%d]\n", 176 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], vfidx); 177 178 if (!is_valid_ether_addr(mac)) { 179 DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n"); 180 return -EINVAL; 181 } 182 183 return edev->ops->iov->set_mac(edev->cdev, mac, vfidx); 184 } 185 186 static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param) 187 { 188 struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev)); 189 struct qed_dev_info *qed_info = &edev->dev_info.common; 190 struct qed_update_vport_params *vport_params; 191 int rc; 192 193 vport_params = vzalloc(sizeof(*vport_params)); 194 if (!vport_params) 195 return -ENOMEM; 196 DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param); 197 198 rc = edev->ops->iov->configure(edev->cdev, num_vfs_param); 199 200 /* Enable/Disable Tx switching for PF */ 201 if ((rc == num_vfs_param) && netif_running(edev->ndev) && 202 qed_info->mf_mode != QED_MF_NPAR && qed_info->tx_switching) { 203 vport_params->vport_id = 0; 204 vport_params->update_tx_switching_flg = 1; 205 vport_params->tx_switching_flg = num_vfs_param ? 1 : 0; 206 edev->ops->vport_update(edev->cdev, vport_params); 207 } 208 209 vfree(vport_params); 210 return rc; 211 } 212 #endif 213 214 static struct pci_driver qede_pci_driver = { 215 .name = "qede", 216 .id_table = qede_pci_tbl, 217 .probe = qede_probe, 218 .remove = qede_remove, 219 .shutdown = qede_shutdown, 220 #ifdef CONFIG_QED_SRIOV 221 .sriov_configure = qede_sriov_configure, 222 #endif 223 }; 224 225 static struct qed_eth_cb_ops qede_ll_ops = { 226 { 227 #ifdef CONFIG_RFS_ACCEL 228 .arfs_filter_op = qede_arfs_filter_op, 229 #endif 230 .link_update = qede_link_update, 231 }, 232 .force_mac = qede_force_mac, 233 .ports_update = qede_udp_ports_update, 234 }; 235 236 static int qede_netdev_event(struct notifier_block *this, unsigned long event, 237 void *ptr) 238 { 239 struct net_device *ndev = netdev_notifier_info_to_dev(ptr); 240 struct ethtool_drvinfo drvinfo; 241 struct qede_dev *edev; 242 243 if (event != NETDEV_CHANGENAME && event != NETDEV_CHANGEADDR) 244 goto done; 245 246 /* Check whether this is a qede device */ 247 if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo) 248 goto done; 249 250 memset(&drvinfo, 0, sizeof(drvinfo)); 251 ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo); 252 if (strcmp(drvinfo.driver, "qede")) 253 goto done; 254 edev = netdev_priv(ndev); 255 256 switch (event) { 257 case NETDEV_CHANGENAME: 258 /* Notify qed of the name change */ 259 if (!edev->ops || !edev->ops->common) 260 goto done; 261 edev->ops->common->set_name(edev->cdev, edev->ndev->name); 262 break; 263 case NETDEV_CHANGEADDR: 264 edev = netdev_priv(ndev); 265 qede_rdma_event_changeaddr(edev); 266 break; 267 } 268 269 done: 270 return NOTIFY_DONE; 271 } 272 273 static struct notifier_block qede_netdev_notifier = { 274 .notifier_call = qede_netdev_event, 275 }; 276 277 static 278 int __init qede_init(void) 279 { 280 int ret; 281 282 pr_info("qede_init: %s\n", version); 283 284 qed_ops = qed_get_eth_ops(); 285 if (!qed_ops) { 286 pr_notice("Failed to get qed ethtool operations\n"); 287 return -EINVAL; 288 } 289 290 /* Must register notifier before pci ops, since we might miss 291 * interface rename after pci probe and netdev registeration. 292 */ 293 ret = register_netdevice_notifier(&qede_netdev_notifier); 294 if (ret) { 295 pr_notice("Failed to register netdevice_notifier\n"); 296 qed_put_eth_ops(); 297 return -EINVAL; 298 } 299 300 ret = pci_register_driver(&qede_pci_driver); 301 if (ret) { 302 pr_notice("Failed to register driver\n"); 303 unregister_netdevice_notifier(&qede_netdev_notifier); 304 qed_put_eth_ops(); 305 return -EINVAL; 306 } 307 308 return 0; 309 } 310 311 static void __exit qede_cleanup(void) 312 { 313 if (debug & QED_LOG_INFO_MASK) 314 pr_info("qede_cleanup called\n"); 315 316 unregister_netdevice_notifier(&qede_netdev_notifier); 317 pci_unregister_driver(&qede_pci_driver); 318 qed_put_eth_ops(); 319 } 320 321 module_init(qede_init); 322 module_exit(qede_cleanup); 323 324 static int qede_open(struct net_device *ndev); 325 static int qede_close(struct net_device *ndev); 326 327 void qede_fill_by_demand_stats(struct qede_dev *edev) 328 { 329 struct qede_stats_common *p_common = &edev->stats.common; 330 struct qed_eth_stats stats; 331 332 edev->ops->get_vport_stats(edev->cdev, &stats); 333 334 p_common->no_buff_discards = stats.common.no_buff_discards; 335 p_common->packet_too_big_discard = stats.common.packet_too_big_discard; 336 p_common->ttl0_discard = stats.common.ttl0_discard; 337 p_common->rx_ucast_bytes = stats.common.rx_ucast_bytes; 338 p_common->rx_mcast_bytes = stats.common.rx_mcast_bytes; 339 p_common->rx_bcast_bytes = stats.common.rx_bcast_bytes; 340 p_common->rx_ucast_pkts = stats.common.rx_ucast_pkts; 341 p_common->rx_mcast_pkts = stats.common.rx_mcast_pkts; 342 p_common->rx_bcast_pkts = stats.common.rx_bcast_pkts; 343 p_common->mftag_filter_discards = stats.common.mftag_filter_discards; 344 p_common->mac_filter_discards = stats.common.mac_filter_discards; 345 346 p_common->tx_ucast_bytes = stats.common.tx_ucast_bytes; 347 p_common->tx_mcast_bytes = stats.common.tx_mcast_bytes; 348 p_common->tx_bcast_bytes = stats.common.tx_bcast_bytes; 349 p_common->tx_ucast_pkts = stats.common.tx_ucast_pkts; 350 p_common->tx_mcast_pkts = stats.common.tx_mcast_pkts; 351 p_common->tx_bcast_pkts = stats.common.tx_bcast_pkts; 352 p_common->tx_err_drop_pkts = stats.common.tx_err_drop_pkts; 353 p_common->coalesced_pkts = stats.common.tpa_coalesced_pkts; 354 p_common->coalesced_events = stats.common.tpa_coalesced_events; 355 p_common->coalesced_aborts_num = stats.common.tpa_aborts_num; 356 p_common->non_coalesced_pkts = stats.common.tpa_not_coalesced_pkts; 357 p_common->coalesced_bytes = stats.common.tpa_coalesced_bytes; 358 359 p_common->rx_64_byte_packets = stats.common.rx_64_byte_packets; 360 p_common->rx_65_to_127_byte_packets = 361 stats.common.rx_65_to_127_byte_packets; 362 p_common->rx_128_to_255_byte_packets = 363 stats.common.rx_128_to_255_byte_packets; 364 p_common->rx_256_to_511_byte_packets = 365 stats.common.rx_256_to_511_byte_packets; 366 p_common->rx_512_to_1023_byte_packets = 367 stats.common.rx_512_to_1023_byte_packets; 368 p_common->rx_1024_to_1518_byte_packets = 369 stats.common.rx_1024_to_1518_byte_packets; 370 p_common->rx_crc_errors = stats.common.rx_crc_errors; 371 p_common->rx_mac_crtl_frames = stats.common.rx_mac_crtl_frames; 372 p_common->rx_pause_frames = stats.common.rx_pause_frames; 373 p_common->rx_pfc_frames = stats.common.rx_pfc_frames; 374 p_common->rx_align_errors = stats.common.rx_align_errors; 375 p_common->rx_carrier_errors = stats.common.rx_carrier_errors; 376 p_common->rx_oversize_packets = stats.common.rx_oversize_packets; 377 p_common->rx_jabbers = stats.common.rx_jabbers; 378 p_common->rx_undersize_packets = stats.common.rx_undersize_packets; 379 p_common->rx_fragments = stats.common.rx_fragments; 380 p_common->tx_64_byte_packets = stats.common.tx_64_byte_packets; 381 p_common->tx_65_to_127_byte_packets = 382 stats.common.tx_65_to_127_byte_packets; 383 p_common->tx_128_to_255_byte_packets = 384 stats.common.tx_128_to_255_byte_packets; 385 p_common->tx_256_to_511_byte_packets = 386 stats.common.tx_256_to_511_byte_packets; 387 p_common->tx_512_to_1023_byte_packets = 388 stats.common.tx_512_to_1023_byte_packets; 389 p_common->tx_1024_to_1518_byte_packets = 390 stats.common.tx_1024_to_1518_byte_packets; 391 p_common->tx_pause_frames = stats.common.tx_pause_frames; 392 p_common->tx_pfc_frames = stats.common.tx_pfc_frames; 393 p_common->brb_truncates = stats.common.brb_truncates; 394 p_common->brb_discards = stats.common.brb_discards; 395 p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames; 396 397 if (QEDE_IS_BB(edev)) { 398 struct qede_stats_bb *p_bb = &edev->stats.bb; 399 400 p_bb->rx_1519_to_1522_byte_packets = 401 stats.bb.rx_1519_to_1522_byte_packets; 402 p_bb->rx_1519_to_2047_byte_packets = 403 stats.bb.rx_1519_to_2047_byte_packets; 404 p_bb->rx_2048_to_4095_byte_packets = 405 stats.bb.rx_2048_to_4095_byte_packets; 406 p_bb->rx_4096_to_9216_byte_packets = 407 stats.bb.rx_4096_to_9216_byte_packets; 408 p_bb->rx_9217_to_16383_byte_packets = 409 stats.bb.rx_9217_to_16383_byte_packets; 410 p_bb->tx_1519_to_2047_byte_packets = 411 stats.bb.tx_1519_to_2047_byte_packets; 412 p_bb->tx_2048_to_4095_byte_packets = 413 stats.bb.tx_2048_to_4095_byte_packets; 414 p_bb->tx_4096_to_9216_byte_packets = 415 stats.bb.tx_4096_to_9216_byte_packets; 416 p_bb->tx_9217_to_16383_byte_packets = 417 stats.bb.tx_9217_to_16383_byte_packets; 418 p_bb->tx_lpi_entry_count = stats.bb.tx_lpi_entry_count; 419 p_bb->tx_total_collisions = stats.bb.tx_total_collisions; 420 } else { 421 struct qede_stats_ah *p_ah = &edev->stats.ah; 422 423 p_ah->rx_1519_to_max_byte_packets = 424 stats.ah.rx_1519_to_max_byte_packets; 425 p_ah->tx_1519_to_max_byte_packets = 426 stats.ah.tx_1519_to_max_byte_packets; 427 } 428 } 429 430 static void qede_get_stats64(struct net_device *dev, 431 struct rtnl_link_stats64 *stats) 432 { 433 struct qede_dev *edev = netdev_priv(dev); 434 struct qede_stats_common *p_common; 435 436 qede_fill_by_demand_stats(edev); 437 p_common = &edev->stats.common; 438 439 stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts + 440 p_common->rx_bcast_pkts; 441 stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts + 442 p_common->tx_bcast_pkts; 443 444 stats->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes + 445 p_common->rx_bcast_bytes; 446 stats->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes + 447 p_common->tx_bcast_bytes; 448 449 stats->tx_errors = p_common->tx_err_drop_pkts; 450 stats->multicast = p_common->rx_mcast_pkts + p_common->rx_bcast_pkts; 451 452 stats->rx_fifo_errors = p_common->no_buff_discards; 453 454 if (QEDE_IS_BB(edev)) 455 stats->collisions = edev->stats.bb.tx_total_collisions; 456 stats->rx_crc_errors = p_common->rx_crc_errors; 457 stats->rx_frame_errors = p_common->rx_align_errors; 458 } 459 460 #ifdef CONFIG_QED_SRIOV 461 static int qede_get_vf_config(struct net_device *dev, int vfidx, 462 struct ifla_vf_info *ivi) 463 { 464 struct qede_dev *edev = netdev_priv(dev); 465 466 if (!edev->ops) 467 return -EINVAL; 468 469 return edev->ops->iov->get_config(edev->cdev, vfidx, ivi); 470 } 471 472 static int qede_set_vf_rate(struct net_device *dev, int vfidx, 473 int min_tx_rate, int max_tx_rate) 474 { 475 struct qede_dev *edev = netdev_priv(dev); 476 477 return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate, 478 max_tx_rate); 479 } 480 481 static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val) 482 { 483 struct qede_dev *edev = netdev_priv(dev); 484 485 if (!edev->ops) 486 return -EINVAL; 487 488 return edev->ops->iov->set_spoof(edev->cdev, vfidx, val); 489 } 490 491 static int qede_set_vf_link_state(struct net_device *dev, int vfidx, 492 int link_state) 493 { 494 struct qede_dev *edev = netdev_priv(dev); 495 496 if (!edev->ops) 497 return -EINVAL; 498 499 return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state); 500 } 501 502 static int qede_set_vf_trust(struct net_device *dev, int vfidx, bool setting) 503 { 504 struct qede_dev *edev = netdev_priv(dev); 505 506 if (!edev->ops) 507 return -EINVAL; 508 509 return edev->ops->iov->set_trust(edev->cdev, vfidx, setting); 510 } 511 #endif 512 513 static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 514 { 515 struct qede_dev *edev = netdev_priv(dev); 516 517 if (!netif_running(dev)) 518 return -EAGAIN; 519 520 switch (cmd) { 521 case SIOCSHWTSTAMP: 522 return qede_ptp_hw_ts(edev, ifr); 523 default: 524 DP_VERBOSE(edev, QED_MSG_DEBUG, 525 "default IOCTL cmd 0x%x\n", cmd); 526 return -EOPNOTSUPP; 527 } 528 529 return 0; 530 } 531 532 static const struct net_device_ops qede_netdev_ops = { 533 .ndo_open = qede_open, 534 .ndo_stop = qede_close, 535 .ndo_start_xmit = qede_start_xmit, 536 .ndo_set_rx_mode = qede_set_rx_mode, 537 .ndo_set_mac_address = qede_set_mac_addr, 538 .ndo_validate_addr = eth_validate_addr, 539 .ndo_change_mtu = qede_change_mtu, 540 .ndo_do_ioctl = qede_ioctl, 541 #ifdef CONFIG_QED_SRIOV 542 .ndo_set_vf_mac = qede_set_vf_mac, 543 .ndo_set_vf_vlan = qede_set_vf_vlan, 544 .ndo_set_vf_trust = qede_set_vf_trust, 545 #endif 546 .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, 547 .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, 548 .ndo_set_features = qede_set_features, 549 .ndo_get_stats64 = qede_get_stats64, 550 #ifdef CONFIG_QED_SRIOV 551 .ndo_set_vf_link_state = qede_set_vf_link_state, 552 .ndo_set_vf_spoofchk = qede_set_vf_spoofchk, 553 .ndo_get_vf_config = qede_get_vf_config, 554 .ndo_set_vf_rate = qede_set_vf_rate, 555 #endif 556 .ndo_udp_tunnel_add = qede_udp_tunnel_add, 557 .ndo_udp_tunnel_del = qede_udp_tunnel_del, 558 .ndo_features_check = qede_features_check, 559 .ndo_xdp = qede_xdp, 560 #ifdef CONFIG_RFS_ACCEL 561 .ndo_rx_flow_steer = qede_rx_flow_steer, 562 #endif 563 }; 564 565 static const struct net_device_ops qede_netdev_vf_ops = { 566 .ndo_open = qede_open, 567 .ndo_stop = qede_close, 568 .ndo_start_xmit = qede_start_xmit, 569 .ndo_set_rx_mode = qede_set_rx_mode, 570 .ndo_set_mac_address = qede_set_mac_addr, 571 .ndo_validate_addr = eth_validate_addr, 572 .ndo_change_mtu = qede_change_mtu, 573 .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, 574 .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, 575 .ndo_set_features = qede_set_features, 576 .ndo_get_stats64 = qede_get_stats64, 577 .ndo_udp_tunnel_add = qede_udp_tunnel_add, 578 .ndo_udp_tunnel_del = qede_udp_tunnel_del, 579 .ndo_features_check = qede_features_check, 580 }; 581 582 static const struct net_device_ops qede_netdev_vf_xdp_ops = { 583 .ndo_open = qede_open, 584 .ndo_stop = qede_close, 585 .ndo_start_xmit = qede_start_xmit, 586 .ndo_set_rx_mode = qede_set_rx_mode, 587 .ndo_set_mac_address = qede_set_mac_addr, 588 .ndo_validate_addr = eth_validate_addr, 589 .ndo_change_mtu = qede_change_mtu, 590 .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, 591 .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, 592 .ndo_set_features = qede_set_features, 593 .ndo_get_stats64 = qede_get_stats64, 594 .ndo_udp_tunnel_add = qede_udp_tunnel_add, 595 .ndo_udp_tunnel_del = qede_udp_tunnel_del, 596 .ndo_features_check = qede_features_check, 597 .ndo_xdp = qede_xdp, 598 }; 599 600 /* ------------------------------------------------------------------------- 601 * START OF PROBE / REMOVE 602 * ------------------------------------------------------------------------- 603 */ 604 605 static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev, 606 struct pci_dev *pdev, 607 struct qed_dev_eth_info *info, 608 u32 dp_module, u8 dp_level) 609 { 610 struct net_device *ndev; 611 struct qede_dev *edev; 612 613 ndev = alloc_etherdev_mqs(sizeof(*edev), 614 info->num_queues, info->num_queues); 615 if (!ndev) { 616 pr_err("etherdev allocation failed\n"); 617 return NULL; 618 } 619 620 edev = netdev_priv(ndev); 621 edev->ndev = ndev; 622 edev->cdev = cdev; 623 edev->pdev = pdev; 624 edev->dp_module = dp_module; 625 edev->dp_level = dp_level; 626 edev->ops = qed_ops; 627 edev->q_num_rx_buffers = NUM_RX_BDS_DEF; 628 edev->q_num_tx_buffers = NUM_TX_BDS_DEF; 629 630 DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n", 631 info->num_queues, info->num_queues); 632 633 SET_NETDEV_DEV(ndev, &pdev->dev); 634 635 memset(&edev->stats, 0, sizeof(edev->stats)); 636 memcpy(&edev->dev_info, info, sizeof(*info)); 637 638 /* As ethtool doesn't have the ability to show WoL behavior as 639 * 'default', if device supports it declare it's enabled. 640 */ 641 if (edev->dev_info.common.wol_support) 642 edev->wol_enabled = true; 643 644 INIT_LIST_HEAD(&edev->vlan_list); 645 646 return edev; 647 } 648 649 static void qede_init_ndev(struct qede_dev *edev) 650 { 651 struct net_device *ndev = edev->ndev; 652 struct pci_dev *pdev = edev->pdev; 653 bool udp_tunnel_enable = false; 654 netdev_features_t hw_features; 655 656 pci_set_drvdata(pdev, ndev); 657 658 ndev->mem_start = edev->dev_info.common.pci_mem_start; 659 ndev->base_addr = ndev->mem_start; 660 ndev->mem_end = edev->dev_info.common.pci_mem_end; 661 ndev->irq = edev->dev_info.common.pci_irq; 662 663 ndev->watchdog_timeo = TX_TIMEOUT; 664 665 if (IS_VF(edev)) { 666 if (edev->dev_info.xdp_supported) 667 ndev->netdev_ops = &qede_netdev_vf_xdp_ops; 668 else 669 ndev->netdev_ops = &qede_netdev_vf_ops; 670 } else { 671 ndev->netdev_ops = &qede_netdev_ops; 672 } 673 674 qede_set_ethtool_ops(ndev); 675 676 ndev->priv_flags |= IFF_UNICAST_FLT; 677 678 /* user-changeble features */ 679 hw_features = NETIF_F_GRO | NETIF_F_SG | 680 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 681 NETIF_F_TSO | NETIF_F_TSO6; 682 683 if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) 684 hw_features |= NETIF_F_NTUPLE; 685 686 if (edev->dev_info.common.vxlan_enable || 687 edev->dev_info.common.geneve_enable) 688 udp_tunnel_enable = true; 689 690 if (udp_tunnel_enable || edev->dev_info.common.gre_enable) { 691 hw_features |= NETIF_F_TSO_ECN; 692 ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 693 NETIF_F_SG | NETIF_F_TSO | 694 NETIF_F_TSO_ECN | NETIF_F_TSO6 | 695 NETIF_F_RXCSUM; 696 } 697 698 if (udp_tunnel_enable) { 699 hw_features |= (NETIF_F_GSO_UDP_TUNNEL | 700 NETIF_F_GSO_UDP_TUNNEL_CSUM); 701 ndev->hw_enc_features |= (NETIF_F_GSO_UDP_TUNNEL | 702 NETIF_F_GSO_UDP_TUNNEL_CSUM); 703 } 704 705 if (edev->dev_info.common.gre_enable) { 706 hw_features |= (NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM); 707 ndev->hw_enc_features |= (NETIF_F_GSO_GRE | 708 NETIF_F_GSO_GRE_CSUM); 709 } 710 711 ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM | 712 NETIF_F_HIGHDMA; 713 ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM | 714 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA | 715 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX; 716 717 ndev->hw_features = hw_features; 718 719 /* MTU range: 46 - 9600 */ 720 ndev->min_mtu = ETH_ZLEN - ETH_HLEN; 721 ndev->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE; 722 723 /* Set network device HW mac */ 724 ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac); 725 726 ndev->mtu = edev->dev_info.common.mtu; 727 } 728 729 /* This function converts from 32b param to two params of level and module 730 * Input 32b decoding: 731 * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the 732 * 'happy' flow, e.g. memory allocation failed. 733 * b30 - enable all INFO prints. INFO prints are for major steps in the flow 734 * and provide important parameters. 735 * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that 736 * module. VERBOSE prints are for tracking the specific flow in low level. 737 * 738 * Notice that the level should be that of the lowest required logs. 739 */ 740 void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level) 741 { 742 *p_dp_level = QED_LEVEL_NOTICE; 743 *p_dp_module = 0; 744 745 if (debug & QED_LOG_VERBOSE_MASK) { 746 *p_dp_level = QED_LEVEL_VERBOSE; 747 *p_dp_module = (debug & 0x3FFFFFFF); 748 } else if (debug & QED_LOG_INFO_MASK) { 749 *p_dp_level = QED_LEVEL_INFO; 750 } else if (debug & QED_LOG_NOTICE_MASK) { 751 *p_dp_level = QED_LEVEL_NOTICE; 752 } 753 } 754 755 static void qede_free_fp_array(struct qede_dev *edev) 756 { 757 if (edev->fp_array) { 758 struct qede_fastpath *fp; 759 int i; 760 761 for_each_queue(i) { 762 fp = &edev->fp_array[i]; 763 764 kfree(fp->sb_info); 765 kfree(fp->rxq); 766 kfree(fp->xdp_tx); 767 kfree(fp->txq); 768 } 769 kfree(edev->fp_array); 770 } 771 772 edev->num_queues = 0; 773 edev->fp_num_tx = 0; 774 edev->fp_num_rx = 0; 775 } 776 777 static int qede_alloc_fp_array(struct qede_dev *edev) 778 { 779 u8 fp_combined, fp_rx = edev->fp_num_rx; 780 struct qede_fastpath *fp; 781 int i; 782 783 edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev), 784 sizeof(*edev->fp_array), GFP_KERNEL); 785 if (!edev->fp_array) { 786 DP_NOTICE(edev, "fp array allocation failed\n"); 787 goto err; 788 } 789 790 fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx; 791 792 /* Allocate the FP elements for Rx queues followed by combined and then 793 * the Tx. This ordering should be maintained so that the respective 794 * queues (Rx or Tx) will be together in the fastpath array and the 795 * associated ids will be sequential. 796 */ 797 for_each_queue(i) { 798 fp = &edev->fp_array[i]; 799 800 fp->sb_info = kzalloc(sizeof(*fp->sb_info), GFP_KERNEL); 801 if (!fp->sb_info) { 802 DP_NOTICE(edev, "sb info struct allocation failed\n"); 803 goto err; 804 } 805 806 if (fp_rx) { 807 fp->type = QEDE_FASTPATH_RX; 808 fp_rx--; 809 } else if (fp_combined) { 810 fp->type = QEDE_FASTPATH_COMBINED; 811 fp_combined--; 812 } else { 813 fp->type = QEDE_FASTPATH_TX; 814 } 815 816 if (fp->type & QEDE_FASTPATH_TX) { 817 fp->txq = kzalloc(sizeof(*fp->txq), GFP_KERNEL); 818 if (!fp->txq) 819 goto err; 820 } 821 822 if (fp->type & QEDE_FASTPATH_RX) { 823 fp->rxq = kzalloc(sizeof(*fp->rxq), GFP_KERNEL); 824 if (!fp->rxq) 825 goto err; 826 827 if (edev->xdp_prog) { 828 fp->xdp_tx = kzalloc(sizeof(*fp->xdp_tx), 829 GFP_KERNEL); 830 if (!fp->xdp_tx) 831 goto err; 832 fp->type |= QEDE_FASTPATH_XDP; 833 } 834 } 835 } 836 837 return 0; 838 err: 839 qede_free_fp_array(edev); 840 return -ENOMEM; 841 } 842 843 static void qede_sp_task(struct work_struct *work) 844 { 845 struct qede_dev *edev = container_of(work, struct qede_dev, 846 sp_task.work); 847 848 __qede_lock(edev); 849 850 if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags)) 851 if (edev->state == QEDE_STATE_OPEN) 852 qede_config_rx_mode(edev->ndev); 853 854 #ifdef CONFIG_RFS_ACCEL 855 if (test_and_clear_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags)) { 856 if (edev->state == QEDE_STATE_OPEN) 857 qede_process_arfs_filters(edev, false); 858 } 859 #endif 860 __qede_unlock(edev); 861 } 862 863 static void qede_update_pf_params(struct qed_dev *cdev) 864 { 865 struct qed_pf_params pf_params; 866 867 /* 64 rx + 64 tx + 64 XDP */ 868 memset(&pf_params, 0, sizeof(struct qed_pf_params)); 869 pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * 3; 870 871 /* Same for VFs - make sure they'll have sufficient connections 872 * to support XDP Tx queues. 873 */ 874 pf_params.eth_pf_params.num_vf_cons = 48; 875 876 #ifdef CONFIG_RFS_ACCEL 877 pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR; 878 #endif 879 qed_ops->common->update_pf_params(cdev, &pf_params); 880 } 881 882 #define QEDE_FW_VER_STR_SIZE 80 883 884 static void qede_log_probe(struct qede_dev *edev) 885 { 886 struct qed_dev_info *p_dev_info = &edev->dev_info.common; 887 u8 buf[QEDE_FW_VER_STR_SIZE]; 888 size_t left_size; 889 890 snprintf(buf, QEDE_FW_VER_STR_SIZE, 891 "Storm FW %d.%d.%d.%d, Management FW %d.%d.%d.%d", 892 p_dev_info->fw_major, p_dev_info->fw_minor, p_dev_info->fw_rev, 893 p_dev_info->fw_eng, 894 (p_dev_info->mfw_rev & QED_MFW_VERSION_3_MASK) >> 895 QED_MFW_VERSION_3_OFFSET, 896 (p_dev_info->mfw_rev & QED_MFW_VERSION_2_MASK) >> 897 QED_MFW_VERSION_2_OFFSET, 898 (p_dev_info->mfw_rev & QED_MFW_VERSION_1_MASK) >> 899 QED_MFW_VERSION_1_OFFSET, 900 (p_dev_info->mfw_rev & QED_MFW_VERSION_0_MASK) >> 901 QED_MFW_VERSION_0_OFFSET); 902 903 left_size = QEDE_FW_VER_STR_SIZE - strlen(buf); 904 if (p_dev_info->mbi_version && left_size) 905 snprintf(buf + strlen(buf), left_size, 906 " [MBI %d.%d.%d]", 907 (p_dev_info->mbi_version & QED_MBI_VERSION_2_MASK) >> 908 QED_MBI_VERSION_2_OFFSET, 909 (p_dev_info->mbi_version & QED_MBI_VERSION_1_MASK) >> 910 QED_MBI_VERSION_1_OFFSET, 911 (p_dev_info->mbi_version & QED_MBI_VERSION_0_MASK) >> 912 QED_MBI_VERSION_0_OFFSET); 913 914 pr_info("qede %02x:%02x.%02x: %s [%s]\n", edev->pdev->bus->number, 915 PCI_SLOT(edev->pdev->devfn), PCI_FUNC(edev->pdev->devfn), 916 buf, edev->ndev->name); 917 } 918 919 enum qede_probe_mode { 920 QEDE_PROBE_NORMAL, 921 }; 922 923 static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, 924 bool is_vf, enum qede_probe_mode mode) 925 { 926 struct qed_probe_params probe_params; 927 struct qed_slowpath_params sp_params; 928 struct qed_dev_eth_info dev_info; 929 struct qede_dev *edev; 930 struct qed_dev *cdev; 931 int rc; 932 933 if (unlikely(dp_level & QED_LEVEL_INFO)) 934 pr_notice("Starting qede probe\n"); 935 936 memset(&probe_params, 0, sizeof(probe_params)); 937 probe_params.protocol = QED_PROTOCOL_ETH; 938 probe_params.dp_module = dp_module; 939 probe_params.dp_level = dp_level; 940 probe_params.is_vf = is_vf; 941 cdev = qed_ops->common->probe(pdev, &probe_params); 942 if (!cdev) { 943 rc = -ENODEV; 944 goto err0; 945 } 946 947 qede_update_pf_params(cdev); 948 949 /* Start the Slowpath-process */ 950 memset(&sp_params, 0, sizeof(sp_params)); 951 sp_params.int_mode = QED_INT_MODE_MSIX; 952 sp_params.drv_major = QEDE_MAJOR_VERSION; 953 sp_params.drv_minor = QEDE_MINOR_VERSION; 954 sp_params.drv_rev = QEDE_REVISION_VERSION; 955 sp_params.drv_eng = QEDE_ENGINEERING_VERSION; 956 strlcpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE); 957 rc = qed_ops->common->slowpath_start(cdev, &sp_params); 958 if (rc) { 959 pr_notice("Cannot start slowpath\n"); 960 goto err1; 961 } 962 963 /* Learn information crucial for qede to progress */ 964 rc = qed_ops->fill_dev_info(cdev, &dev_info); 965 if (rc) 966 goto err2; 967 968 edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module, 969 dp_level); 970 if (!edev) { 971 rc = -ENOMEM; 972 goto err2; 973 } 974 975 if (is_vf) 976 edev->flags |= QEDE_FLAG_IS_VF; 977 978 qede_init_ndev(edev); 979 980 rc = qede_rdma_dev_add(edev); 981 if (rc) 982 goto err3; 983 984 /* Prepare the lock prior to the registeration of the netdev, 985 * as once it's registered we might reach flows requiring it 986 * [it's even possible to reach a flow needing it directly 987 * from there, although it's unlikely]. 988 */ 989 INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task); 990 mutex_init(&edev->qede_lock); 991 rc = register_netdev(edev->ndev); 992 if (rc) { 993 DP_NOTICE(edev, "Cannot register net-device\n"); 994 goto err4; 995 } 996 997 edev->ops->common->set_name(cdev, edev->ndev->name); 998 999 /* PTP not supported on VFs */ 1000 if (!is_vf) 1001 qede_ptp_enable(edev, true); 1002 1003 edev->ops->register_ops(cdev, &qede_ll_ops, edev); 1004 1005 #ifdef CONFIG_DCB 1006 if (!IS_VF(edev)) 1007 qede_set_dcbnl_ops(edev->ndev); 1008 #endif 1009 1010 edev->rx_copybreak = QEDE_RX_HDR_SIZE; 1011 1012 qede_log_probe(edev); 1013 return 0; 1014 1015 err4: 1016 qede_rdma_dev_remove(edev); 1017 err3: 1018 free_netdev(edev->ndev); 1019 err2: 1020 qed_ops->common->slowpath_stop(cdev); 1021 err1: 1022 qed_ops->common->remove(cdev); 1023 err0: 1024 return rc; 1025 } 1026 1027 static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1028 { 1029 bool is_vf = false; 1030 u32 dp_module = 0; 1031 u8 dp_level = 0; 1032 1033 switch ((enum qede_pci_private)id->driver_data) { 1034 case QEDE_PRIVATE_VF: 1035 if (debug & QED_LOG_VERBOSE_MASK) 1036 dev_err(&pdev->dev, "Probing a VF\n"); 1037 is_vf = true; 1038 break; 1039 default: 1040 if (debug & QED_LOG_VERBOSE_MASK) 1041 dev_err(&pdev->dev, "Probing a PF\n"); 1042 } 1043 1044 qede_config_debug(debug, &dp_module, &dp_level); 1045 1046 return __qede_probe(pdev, dp_module, dp_level, is_vf, 1047 QEDE_PROBE_NORMAL); 1048 } 1049 1050 enum qede_remove_mode { 1051 QEDE_REMOVE_NORMAL, 1052 }; 1053 1054 static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) 1055 { 1056 struct net_device *ndev = pci_get_drvdata(pdev); 1057 struct qede_dev *edev = netdev_priv(ndev); 1058 struct qed_dev *cdev = edev->cdev; 1059 1060 DP_INFO(edev, "Starting qede_remove\n"); 1061 1062 unregister_netdev(ndev); 1063 cancel_delayed_work_sync(&edev->sp_task); 1064 1065 qede_ptp_disable(edev); 1066 1067 qede_rdma_dev_remove(edev); 1068 1069 edev->ops->common->set_power_state(cdev, PCI_D0); 1070 1071 pci_set_drvdata(pdev, NULL); 1072 1073 /* Release edev's reference to XDP's bpf if such exist */ 1074 if (edev->xdp_prog) 1075 bpf_prog_put(edev->xdp_prog); 1076 1077 /* Use global ops since we've freed edev */ 1078 qed_ops->common->slowpath_stop(cdev); 1079 if (system_state == SYSTEM_POWER_OFF) 1080 return; 1081 qed_ops->common->remove(cdev); 1082 1083 /* Since this can happen out-of-sync with other flows, 1084 * don't release the netdevice until after slowpath stop 1085 * has been called to guarantee various other contexts 1086 * [e.g., QED register callbacks] won't break anything when 1087 * accessing the netdevice. 1088 */ 1089 free_netdev(ndev); 1090 1091 dev_info(&pdev->dev, "Ending qede_remove successfully\n"); 1092 } 1093 1094 static void qede_remove(struct pci_dev *pdev) 1095 { 1096 __qede_remove(pdev, QEDE_REMOVE_NORMAL); 1097 } 1098 1099 static void qede_shutdown(struct pci_dev *pdev) 1100 { 1101 __qede_remove(pdev, QEDE_REMOVE_NORMAL); 1102 } 1103 1104 /* ------------------------------------------------------------------------- 1105 * START OF LOAD / UNLOAD 1106 * ------------------------------------------------------------------------- 1107 */ 1108 1109 static int qede_set_num_queues(struct qede_dev *edev) 1110 { 1111 int rc; 1112 u16 rss_num; 1113 1114 /* Setup queues according to possible resources*/ 1115 if (edev->req_queues) 1116 rss_num = edev->req_queues; 1117 else 1118 rss_num = netif_get_num_default_rss_queues() * 1119 edev->dev_info.common.num_hwfns; 1120 1121 rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num); 1122 1123 rc = edev->ops->common->set_fp_int(edev->cdev, rss_num); 1124 if (rc > 0) { 1125 /* Managed to request interrupts for our queues */ 1126 edev->num_queues = rc; 1127 DP_INFO(edev, "Managed %d [of %d] RSS queues\n", 1128 QEDE_QUEUE_CNT(edev), rss_num); 1129 rc = 0; 1130 } 1131 1132 edev->fp_num_tx = edev->req_num_tx; 1133 edev->fp_num_rx = edev->req_num_rx; 1134 1135 return rc; 1136 } 1137 1138 static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info, 1139 u16 sb_id) 1140 { 1141 if (sb_info->sb_virt) { 1142 edev->ops->common->sb_release(edev->cdev, sb_info, sb_id); 1143 dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt), 1144 (void *)sb_info->sb_virt, sb_info->sb_phys); 1145 memset(sb_info, 0, sizeof(*sb_info)); 1146 } 1147 } 1148 1149 /* This function allocates fast-path status block memory */ 1150 static int qede_alloc_mem_sb(struct qede_dev *edev, 1151 struct qed_sb_info *sb_info, u16 sb_id) 1152 { 1153 struct status_block *sb_virt; 1154 dma_addr_t sb_phys; 1155 int rc; 1156 1157 sb_virt = dma_alloc_coherent(&edev->pdev->dev, 1158 sizeof(*sb_virt), &sb_phys, GFP_KERNEL); 1159 if (!sb_virt) { 1160 DP_ERR(edev, "Status block allocation failed\n"); 1161 return -ENOMEM; 1162 } 1163 1164 rc = edev->ops->common->sb_init(edev->cdev, sb_info, 1165 sb_virt, sb_phys, sb_id, 1166 QED_SB_TYPE_L2_QUEUE); 1167 if (rc) { 1168 DP_ERR(edev, "Status block initialization failed\n"); 1169 dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt), 1170 sb_virt, sb_phys); 1171 return rc; 1172 } 1173 1174 return 0; 1175 } 1176 1177 static void qede_free_rx_buffers(struct qede_dev *edev, 1178 struct qede_rx_queue *rxq) 1179 { 1180 u16 i; 1181 1182 for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) { 1183 struct sw_rx_data *rx_buf; 1184 struct page *data; 1185 1186 rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX]; 1187 data = rx_buf->data; 1188 1189 dma_unmap_page(&edev->pdev->dev, 1190 rx_buf->mapping, PAGE_SIZE, rxq->data_direction); 1191 1192 rx_buf->data = NULL; 1193 __free_page(data); 1194 } 1195 } 1196 1197 static void qede_free_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq) 1198 { 1199 int i; 1200 1201 if (edev->gro_disable) 1202 return; 1203 1204 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { 1205 struct qede_agg_info *tpa_info = &rxq->tpa_info[i]; 1206 struct sw_rx_data *replace_buf = &tpa_info->buffer; 1207 1208 if (replace_buf->data) { 1209 dma_unmap_page(&edev->pdev->dev, 1210 replace_buf->mapping, 1211 PAGE_SIZE, DMA_FROM_DEVICE); 1212 __free_page(replace_buf->data); 1213 } 1214 } 1215 } 1216 1217 static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) 1218 { 1219 qede_free_sge_mem(edev, rxq); 1220 1221 /* Free rx buffers */ 1222 qede_free_rx_buffers(edev, rxq); 1223 1224 /* Free the parallel SW ring */ 1225 kfree(rxq->sw_rx_ring); 1226 1227 /* Free the real RQ ring used by FW */ 1228 edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring); 1229 edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring); 1230 } 1231 1232 static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq) 1233 { 1234 dma_addr_t mapping; 1235 int i; 1236 1237 /* Don't perform FW aggregations in case of XDP */ 1238 if (edev->xdp_prog) 1239 edev->gro_disable = 1; 1240 1241 if (edev->gro_disable) 1242 return 0; 1243 1244 if (edev->ndev->mtu > PAGE_SIZE) { 1245 edev->gro_disable = 1; 1246 return 0; 1247 } 1248 1249 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { 1250 struct qede_agg_info *tpa_info = &rxq->tpa_info[i]; 1251 struct sw_rx_data *replace_buf = &tpa_info->buffer; 1252 1253 replace_buf->data = alloc_pages(GFP_ATOMIC, 0); 1254 if (unlikely(!replace_buf->data)) { 1255 DP_NOTICE(edev, 1256 "Failed to allocate TPA skb pool [replacement buffer]\n"); 1257 goto err; 1258 } 1259 1260 mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0, 1261 PAGE_SIZE, DMA_FROM_DEVICE); 1262 if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { 1263 DP_NOTICE(edev, 1264 "Failed to map TPA replacement buffer\n"); 1265 goto err; 1266 } 1267 1268 replace_buf->mapping = mapping; 1269 tpa_info->buffer.page_offset = 0; 1270 tpa_info->buffer_mapping = mapping; 1271 tpa_info->state = QEDE_AGG_STATE_NONE; 1272 } 1273 1274 return 0; 1275 err: 1276 qede_free_sge_mem(edev, rxq); 1277 edev->gro_disable = 1; 1278 return -ENOMEM; 1279 } 1280 1281 /* This function allocates all memory needed per Rx queue */ 1282 static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) 1283 { 1284 int i, rc, size; 1285 1286 rxq->num_rx_buffers = edev->q_num_rx_buffers; 1287 1288 rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu; 1289 rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : 0; 1290 1291 /* Make sure that the headroom and payload fit in a single page */ 1292 if (rxq->rx_buf_size + rxq->rx_headroom > PAGE_SIZE) 1293 rxq->rx_buf_size = PAGE_SIZE - rxq->rx_headroom; 1294 1295 /* Segment size to spilt a page in multiple equal parts, 1296 * unless XDP is used in which case we'd use the entire page. 1297 */ 1298 if (!edev->xdp_prog) 1299 rxq->rx_buf_seg_size = roundup_pow_of_two(rxq->rx_buf_size); 1300 else 1301 rxq->rx_buf_seg_size = PAGE_SIZE; 1302 1303 /* Allocate the parallel driver ring for Rx buffers */ 1304 size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE; 1305 rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL); 1306 if (!rxq->sw_rx_ring) { 1307 DP_ERR(edev, "Rx buffers ring allocation failed\n"); 1308 rc = -ENOMEM; 1309 goto err; 1310 } 1311 1312 /* Allocate FW Rx ring */ 1313 rc = edev->ops->common->chain_alloc(edev->cdev, 1314 QED_CHAIN_USE_TO_CONSUME_PRODUCE, 1315 QED_CHAIN_MODE_NEXT_PTR, 1316 QED_CHAIN_CNT_TYPE_U16, 1317 RX_RING_SIZE, 1318 sizeof(struct eth_rx_bd), 1319 &rxq->rx_bd_ring, NULL); 1320 if (rc) 1321 goto err; 1322 1323 /* Allocate FW completion ring */ 1324 rc = edev->ops->common->chain_alloc(edev->cdev, 1325 QED_CHAIN_USE_TO_CONSUME, 1326 QED_CHAIN_MODE_PBL, 1327 QED_CHAIN_CNT_TYPE_U16, 1328 RX_RING_SIZE, 1329 sizeof(union eth_rx_cqe), 1330 &rxq->rx_comp_ring, NULL); 1331 if (rc) 1332 goto err; 1333 1334 /* Allocate buffers for the Rx ring */ 1335 rxq->filled_buffers = 0; 1336 for (i = 0; i < rxq->num_rx_buffers; i++) { 1337 rc = qede_alloc_rx_buffer(rxq, false); 1338 if (rc) { 1339 DP_ERR(edev, 1340 "Rx buffers allocation failed at index %d\n", i); 1341 goto err; 1342 } 1343 } 1344 1345 rc = qede_alloc_sge_mem(edev, rxq); 1346 err: 1347 return rc; 1348 } 1349 1350 static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) 1351 { 1352 /* Free the parallel SW ring */ 1353 if (txq->is_xdp) 1354 kfree(txq->sw_tx_ring.xdp); 1355 else 1356 kfree(txq->sw_tx_ring.skbs); 1357 1358 /* Free the real RQ ring used by FW */ 1359 edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl); 1360 } 1361 1362 /* This function allocates all memory needed per Tx queue */ 1363 static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) 1364 { 1365 union eth_tx_bd_types *p_virt; 1366 int size, rc; 1367 1368 txq->num_tx_buffers = edev->q_num_tx_buffers; 1369 1370 /* Allocate the parallel driver ring for Tx buffers */ 1371 if (txq->is_xdp) { 1372 size = sizeof(*txq->sw_tx_ring.xdp) * txq->num_tx_buffers; 1373 txq->sw_tx_ring.xdp = kzalloc(size, GFP_KERNEL); 1374 if (!txq->sw_tx_ring.xdp) 1375 goto err; 1376 } else { 1377 size = sizeof(*txq->sw_tx_ring.skbs) * txq->num_tx_buffers; 1378 txq->sw_tx_ring.skbs = kzalloc(size, GFP_KERNEL); 1379 if (!txq->sw_tx_ring.skbs) 1380 goto err; 1381 } 1382 1383 rc = edev->ops->common->chain_alloc(edev->cdev, 1384 QED_CHAIN_USE_TO_CONSUME_PRODUCE, 1385 QED_CHAIN_MODE_PBL, 1386 QED_CHAIN_CNT_TYPE_U16, 1387 txq->num_tx_buffers, 1388 sizeof(*p_virt), 1389 &txq->tx_pbl, NULL); 1390 if (rc) 1391 goto err; 1392 1393 return 0; 1394 1395 err: 1396 qede_free_mem_txq(edev, txq); 1397 return -ENOMEM; 1398 } 1399 1400 /* This function frees all memory of a single fp */ 1401 static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp) 1402 { 1403 qede_free_mem_sb(edev, fp->sb_info, fp->id); 1404 1405 if (fp->type & QEDE_FASTPATH_RX) 1406 qede_free_mem_rxq(edev, fp->rxq); 1407 1408 if (fp->type & QEDE_FASTPATH_XDP) 1409 qede_free_mem_txq(edev, fp->xdp_tx); 1410 1411 if (fp->type & QEDE_FASTPATH_TX) 1412 qede_free_mem_txq(edev, fp->txq); 1413 } 1414 1415 /* This function allocates all memory needed for a single fp (i.e. an entity 1416 * which contains status block, one rx queue and/or multiple per-TC tx queues. 1417 */ 1418 static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp) 1419 { 1420 int rc = 0; 1421 1422 rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id); 1423 if (rc) 1424 goto out; 1425 1426 if (fp->type & QEDE_FASTPATH_RX) { 1427 rc = qede_alloc_mem_rxq(edev, fp->rxq); 1428 if (rc) 1429 goto out; 1430 } 1431 1432 if (fp->type & QEDE_FASTPATH_XDP) { 1433 rc = qede_alloc_mem_txq(edev, fp->xdp_tx); 1434 if (rc) 1435 goto out; 1436 } 1437 1438 if (fp->type & QEDE_FASTPATH_TX) { 1439 rc = qede_alloc_mem_txq(edev, fp->txq); 1440 if (rc) 1441 goto out; 1442 } 1443 1444 out: 1445 return rc; 1446 } 1447 1448 static void qede_free_mem_load(struct qede_dev *edev) 1449 { 1450 int i; 1451 1452 for_each_queue(i) { 1453 struct qede_fastpath *fp = &edev->fp_array[i]; 1454 1455 qede_free_mem_fp(edev, fp); 1456 } 1457 } 1458 1459 /* This function allocates all qede memory at NIC load. */ 1460 static int qede_alloc_mem_load(struct qede_dev *edev) 1461 { 1462 int rc = 0, queue_id; 1463 1464 for (queue_id = 0; queue_id < QEDE_QUEUE_CNT(edev); queue_id++) { 1465 struct qede_fastpath *fp = &edev->fp_array[queue_id]; 1466 1467 rc = qede_alloc_mem_fp(edev, fp); 1468 if (rc) { 1469 DP_ERR(edev, 1470 "Failed to allocate memory for fastpath - rss id = %d\n", 1471 queue_id); 1472 qede_free_mem_load(edev); 1473 return rc; 1474 } 1475 } 1476 1477 return 0; 1478 } 1479 1480 /* This function inits fp content and resets the SB, RXQ and TXQ structures */ 1481 static void qede_init_fp(struct qede_dev *edev) 1482 { 1483 int queue_id, rxq_index = 0, txq_index = 0; 1484 struct qede_fastpath *fp; 1485 1486 for_each_queue(queue_id) { 1487 fp = &edev->fp_array[queue_id]; 1488 1489 fp->edev = edev; 1490 fp->id = queue_id; 1491 1492 if (fp->type & QEDE_FASTPATH_XDP) { 1493 fp->xdp_tx->index = QEDE_TXQ_IDX_TO_XDP(edev, 1494 rxq_index); 1495 fp->xdp_tx->is_xdp = 1; 1496 } 1497 1498 if (fp->type & QEDE_FASTPATH_RX) { 1499 fp->rxq->rxq_id = rxq_index++; 1500 1501 /* Determine how to map buffers for this queue */ 1502 if (fp->type & QEDE_FASTPATH_XDP) 1503 fp->rxq->data_direction = DMA_BIDIRECTIONAL; 1504 else 1505 fp->rxq->data_direction = DMA_FROM_DEVICE; 1506 fp->rxq->dev = &edev->pdev->dev; 1507 } 1508 1509 if (fp->type & QEDE_FASTPATH_TX) { 1510 fp->txq->index = txq_index++; 1511 if (edev->dev_info.is_legacy) 1512 fp->txq->is_legacy = 1; 1513 fp->txq->dev = &edev->pdev->dev; 1514 } 1515 1516 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", 1517 edev->ndev->name, queue_id); 1518 } 1519 1520 edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO); 1521 } 1522 1523 static int qede_set_real_num_queues(struct qede_dev *edev) 1524 { 1525 int rc = 0; 1526 1527 rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_COUNT(edev)); 1528 if (rc) { 1529 DP_NOTICE(edev, "Failed to set real number of Tx queues\n"); 1530 return rc; 1531 } 1532 1533 rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_COUNT(edev)); 1534 if (rc) { 1535 DP_NOTICE(edev, "Failed to set real number of Rx queues\n"); 1536 return rc; 1537 } 1538 1539 return 0; 1540 } 1541 1542 static void qede_napi_disable_remove(struct qede_dev *edev) 1543 { 1544 int i; 1545 1546 for_each_queue(i) { 1547 napi_disable(&edev->fp_array[i].napi); 1548 1549 netif_napi_del(&edev->fp_array[i].napi); 1550 } 1551 } 1552 1553 static void qede_napi_add_enable(struct qede_dev *edev) 1554 { 1555 int i; 1556 1557 /* Add NAPI objects */ 1558 for_each_queue(i) { 1559 netif_napi_add(edev->ndev, &edev->fp_array[i].napi, 1560 qede_poll, NAPI_POLL_WEIGHT); 1561 napi_enable(&edev->fp_array[i].napi); 1562 } 1563 } 1564 1565 static void qede_sync_free_irqs(struct qede_dev *edev) 1566 { 1567 int i; 1568 1569 for (i = 0; i < edev->int_info.used_cnt; i++) { 1570 if (edev->int_info.msix_cnt) { 1571 synchronize_irq(edev->int_info.msix[i].vector); 1572 free_irq(edev->int_info.msix[i].vector, 1573 &edev->fp_array[i]); 1574 } else { 1575 edev->ops->common->simd_handler_clean(edev->cdev, i); 1576 } 1577 } 1578 1579 edev->int_info.used_cnt = 0; 1580 } 1581 1582 static int qede_req_msix_irqs(struct qede_dev *edev) 1583 { 1584 int i, rc; 1585 1586 /* Sanitize number of interrupts == number of prepared RSS queues */ 1587 if (QEDE_QUEUE_CNT(edev) > edev->int_info.msix_cnt) { 1588 DP_ERR(edev, 1589 "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n", 1590 QEDE_QUEUE_CNT(edev), edev->int_info.msix_cnt); 1591 return -EINVAL; 1592 } 1593 1594 for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) { 1595 #ifdef CONFIG_RFS_ACCEL 1596 struct qede_fastpath *fp = &edev->fp_array[i]; 1597 1598 if (edev->ndev->rx_cpu_rmap && (fp->type & QEDE_FASTPATH_RX)) { 1599 rc = irq_cpu_rmap_add(edev->ndev->rx_cpu_rmap, 1600 edev->int_info.msix[i].vector); 1601 if (rc) { 1602 DP_ERR(edev, "Failed to add CPU rmap\n"); 1603 qede_free_arfs(edev); 1604 } 1605 } 1606 #endif 1607 rc = request_irq(edev->int_info.msix[i].vector, 1608 qede_msix_fp_int, 0, edev->fp_array[i].name, 1609 &edev->fp_array[i]); 1610 if (rc) { 1611 DP_ERR(edev, "Request fp %d irq failed\n", i); 1612 qede_sync_free_irqs(edev); 1613 return rc; 1614 } 1615 DP_VERBOSE(edev, NETIF_MSG_INTR, 1616 "Requested fp irq for %s [entry %d]. Cookie is at %p\n", 1617 edev->fp_array[i].name, i, 1618 &edev->fp_array[i]); 1619 edev->int_info.used_cnt++; 1620 } 1621 1622 return 0; 1623 } 1624 1625 static void qede_simd_fp_handler(void *cookie) 1626 { 1627 struct qede_fastpath *fp = (struct qede_fastpath *)cookie; 1628 1629 napi_schedule_irqoff(&fp->napi); 1630 } 1631 1632 static int qede_setup_irqs(struct qede_dev *edev) 1633 { 1634 int i, rc = 0; 1635 1636 /* Learn Interrupt configuration */ 1637 rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info); 1638 if (rc) 1639 return rc; 1640 1641 if (edev->int_info.msix_cnt) { 1642 rc = qede_req_msix_irqs(edev); 1643 if (rc) 1644 return rc; 1645 edev->ndev->irq = edev->int_info.msix[0].vector; 1646 } else { 1647 const struct qed_common_ops *ops; 1648 1649 /* qed should learn receive the RSS ids and callbacks */ 1650 ops = edev->ops->common; 1651 for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) 1652 ops->simd_handler_config(edev->cdev, 1653 &edev->fp_array[i], i, 1654 qede_simd_fp_handler); 1655 edev->int_info.used_cnt = QEDE_QUEUE_CNT(edev); 1656 } 1657 return 0; 1658 } 1659 1660 static int qede_drain_txq(struct qede_dev *edev, 1661 struct qede_tx_queue *txq, bool allow_drain) 1662 { 1663 int rc, cnt = 1000; 1664 1665 while (txq->sw_tx_cons != txq->sw_tx_prod) { 1666 if (!cnt) { 1667 if (allow_drain) { 1668 DP_NOTICE(edev, 1669 "Tx queue[%d] is stuck, requesting MCP to drain\n", 1670 txq->index); 1671 rc = edev->ops->common->drain(edev->cdev); 1672 if (rc) 1673 return rc; 1674 return qede_drain_txq(edev, txq, false); 1675 } 1676 DP_NOTICE(edev, 1677 "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n", 1678 txq->index, txq->sw_tx_prod, 1679 txq->sw_tx_cons); 1680 return -ENODEV; 1681 } 1682 cnt--; 1683 usleep_range(1000, 2000); 1684 barrier(); 1685 } 1686 1687 /* FW finished processing, wait for HW to transmit all tx packets */ 1688 usleep_range(1000, 2000); 1689 1690 return 0; 1691 } 1692 1693 static int qede_stop_txq(struct qede_dev *edev, 1694 struct qede_tx_queue *txq, int rss_id) 1695 { 1696 return edev->ops->q_tx_stop(edev->cdev, rss_id, txq->handle); 1697 } 1698 1699 static int qede_stop_queues(struct qede_dev *edev) 1700 { 1701 struct qed_update_vport_params *vport_update_params; 1702 struct qed_dev *cdev = edev->cdev; 1703 struct qede_fastpath *fp; 1704 int rc, i; 1705 1706 /* Disable the vport */ 1707 vport_update_params = vzalloc(sizeof(*vport_update_params)); 1708 if (!vport_update_params) 1709 return -ENOMEM; 1710 1711 vport_update_params->vport_id = 0; 1712 vport_update_params->update_vport_active_flg = 1; 1713 vport_update_params->vport_active_flg = 0; 1714 vport_update_params->update_rss_flg = 0; 1715 1716 rc = edev->ops->vport_update(cdev, vport_update_params); 1717 vfree(vport_update_params); 1718 1719 if (rc) { 1720 DP_ERR(edev, "Failed to update vport\n"); 1721 return rc; 1722 } 1723 1724 /* Flush Tx queues. If needed, request drain from MCP */ 1725 for_each_queue(i) { 1726 fp = &edev->fp_array[i]; 1727 1728 if (fp->type & QEDE_FASTPATH_TX) { 1729 rc = qede_drain_txq(edev, fp->txq, true); 1730 if (rc) 1731 return rc; 1732 } 1733 1734 if (fp->type & QEDE_FASTPATH_XDP) { 1735 rc = qede_drain_txq(edev, fp->xdp_tx, true); 1736 if (rc) 1737 return rc; 1738 } 1739 } 1740 1741 /* Stop all Queues in reverse order */ 1742 for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) { 1743 fp = &edev->fp_array[i]; 1744 1745 /* Stop the Tx Queue(s) */ 1746 if (fp->type & QEDE_FASTPATH_TX) { 1747 rc = qede_stop_txq(edev, fp->txq, i); 1748 if (rc) 1749 return rc; 1750 } 1751 1752 /* Stop the Rx Queue */ 1753 if (fp->type & QEDE_FASTPATH_RX) { 1754 rc = edev->ops->q_rx_stop(cdev, i, fp->rxq->handle); 1755 if (rc) { 1756 DP_ERR(edev, "Failed to stop RXQ #%d\n", i); 1757 return rc; 1758 } 1759 } 1760 1761 /* Stop the XDP forwarding queue */ 1762 if (fp->type & QEDE_FASTPATH_XDP) { 1763 rc = qede_stop_txq(edev, fp->xdp_tx, i); 1764 if (rc) 1765 return rc; 1766 1767 bpf_prog_put(fp->rxq->xdp_prog); 1768 } 1769 } 1770 1771 /* Stop the vport */ 1772 rc = edev->ops->vport_stop(cdev, 0); 1773 if (rc) 1774 DP_ERR(edev, "Failed to stop VPORT\n"); 1775 1776 return rc; 1777 } 1778 1779 static int qede_start_txq(struct qede_dev *edev, 1780 struct qede_fastpath *fp, 1781 struct qede_tx_queue *txq, u8 rss_id, u16 sb_idx) 1782 { 1783 dma_addr_t phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl); 1784 u32 page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl); 1785 struct qed_queue_start_common_params params; 1786 struct qed_txq_start_ret_params ret_params; 1787 int rc; 1788 1789 memset(¶ms, 0, sizeof(params)); 1790 memset(&ret_params, 0, sizeof(ret_params)); 1791 1792 /* Let the XDP queue share the queue-zone with one of the regular txq. 1793 * We don't really care about its coalescing. 1794 */ 1795 if (txq->is_xdp) 1796 params.queue_id = QEDE_TXQ_XDP_TO_IDX(edev, txq); 1797 else 1798 params.queue_id = txq->index; 1799 1800 params.p_sb = fp->sb_info; 1801 params.sb_idx = sb_idx; 1802 1803 rc = edev->ops->q_tx_start(edev->cdev, rss_id, ¶ms, phys_table, 1804 page_cnt, &ret_params); 1805 if (rc) { 1806 DP_ERR(edev, "Start TXQ #%d failed %d\n", txq->index, rc); 1807 return rc; 1808 } 1809 1810 txq->doorbell_addr = ret_params.p_doorbell; 1811 txq->handle = ret_params.p_handle; 1812 1813 /* Determine the FW consumer address associated */ 1814 txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[sb_idx]; 1815 1816 /* Prepare the doorbell parameters */ 1817 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST, DB_DEST_XCM); 1818 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, DB_AGG_CMD_SET); 1819 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_VAL_SEL, 1820 DQ_XCM_ETH_TX_BD_PROD_CMD); 1821 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD; 1822 1823 return rc; 1824 } 1825 1826 static int qede_start_queues(struct qede_dev *edev, bool clear_stats) 1827 { 1828 int vlan_removal_en = 1; 1829 struct qed_dev *cdev = edev->cdev; 1830 struct qed_dev_info *qed_info = &edev->dev_info.common; 1831 struct qed_update_vport_params *vport_update_params; 1832 struct qed_queue_start_common_params q_params; 1833 struct qed_start_vport_params start = {0}; 1834 int rc, i; 1835 1836 if (!edev->num_queues) { 1837 DP_ERR(edev, 1838 "Cannot update V-VPORT as active as there are no Rx queues\n"); 1839 return -EINVAL; 1840 } 1841 1842 vport_update_params = vzalloc(sizeof(*vport_update_params)); 1843 if (!vport_update_params) 1844 return -ENOMEM; 1845 1846 start.handle_ptp_pkts = !!(edev->ptp); 1847 start.gro_enable = !edev->gro_disable; 1848 start.mtu = edev->ndev->mtu; 1849 start.vport_id = 0; 1850 start.drop_ttl0 = true; 1851 start.remove_inner_vlan = vlan_removal_en; 1852 start.clear_stats = clear_stats; 1853 1854 rc = edev->ops->vport_start(cdev, &start); 1855 1856 if (rc) { 1857 DP_ERR(edev, "Start V-PORT failed %d\n", rc); 1858 goto out; 1859 } 1860 1861 DP_VERBOSE(edev, NETIF_MSG_IFUP, 1862 "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n", 1863 start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en); 1864 1865 for_each_queue(i) { 1866 struct qede_fastpath *fp = &edev->fp_array[i]; 1867 dma_addr_t p_phys_table; 1868 u32 page_cnt; 1869 1870 if (fp->type & QEDE_FASTPATH_RX) { 1871 struct qed_rxq_start_ret_params ret_params; 1872 struct qede_rx_queue *rxq = fp->rxq; 1873 __le16 *val; 1874 1875 memset(&ret_params, 0, sizeof(ret_params)); 1876 memset(&q_params, 0, sizeof(q_params)); 1877 q_params.queue_id = rxq->rxq_id; 1878 q_params.vport_id = 0; 1879 q_params.p_sb = fp->sb_info; 1880 q_params.sb_idx = RX_PI; 1881 1882 p_phys_table = 1883 qed_chain_get_pbl_phys(&rxq->rx_comp_ring); 1884 page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring); 1885 1886 rc = edev->ops->q_rx_start(cdev, i, &q_params, 1887 rxq->rx_buf_size, 1888 rxq->rx_bd_ring.p_phys_addr, 1889 p_phys_table, 1890 page_cnt, &ret_params); 1891 if (rc) { 1892 DP_ERR(edev, "Start RXQ #%d failed %d\n", i, 1893 rc); 1894 goto out; 1895 } 1896 1897 /* Use the return parameters */ 1898 rxq->hw_rxq_prod_addr = ret_params.p_prod; 1899 rxq->handle = ret_params.p_handle; 1900 1901 val = &fp->sb_info->sb_virt->pi_array[RX_PI]; 1902 rxq->hw_cons_ptr = val; 1903 1904 qede_update_rx_prod(edev, rxq); 1905 } 1906 1907 if (fp->type & QEDE_FASTPATH_XDP) { 1908 rc = qede_start_txq(edev, fp, fp->xdp_tx, i, XDP_PI); 1909 if (rc) 1910 goto out; 1911 1912 fp->rxq->xdp_prog = bpf_prog_add(edev->xdp_prog, 1); 1913 if (IS_ERR(fp->rxq->xdp_prog)) { 1914 rc = PTR_ERR(fp->rxq->xdp_prog); 1915 fp->rxq->xdp_prog = NULL; 1916 goto out; 1917 } 1918 } 1919 1920 if (fp->type & QEDE_FASTPATH_TX) { 1921 rc = qede_start_txq(edev, fp, fp->txq, i, TX_PI(0)); 1922 if (rc) 1923 goto out; 1924 } 1925 } 1926 1927 /* Prepare and send the vport enable */ 1928 vport_update_params->vport_id = start.vport_id; 1929 vport_update_params->update_vport_active_flg = 1; 1930 vport_update_params->vport_active_flg = 1; 1931 1932 if ((qed_info->mf_mode == QED_MF_NPAR || pci_num_vf(edev->pdev)) && 1933 qed_info->tx_switching) { 1934 vport_update_params->update_tx_switching_flg = 1; 1935 vport_update_params->tx_switching_flg = 1; 1936 } 1937 1938 qede_fill_rss_params(edev, &vport_update_params->rss_params, 1939 &vport_update_params->update_rss_flg); 1940 1941 rc = edev->ops->vport_update(cdev, vport_update_params); 1942 if (rc) 1943 DP_ERR(edev, "Update V-PORT failed %d\n", rc); 1944 1945 out: 1946 vfree(vport_update_params); 1947 return rc; 1948 } 1949 1950 enum qede_unload_mode { 1951 QEDE_UNLOAD_NORMAL, 1952 }; 1953 1954 static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode, 1955 bool is_locked) 1956 { 1957 struct qed_link_params link_params; 1958 int rc; 1959 1960 DP_INFO(edev, "Starting qede unload\n"); 1961 1962 if (!is_locked) 1963 __qede_lock(edev); 1964 1965 edev->state = QEDE_STATE_CLOSED; 1966 1967 qede_rdma_dev_event_close(edev); 1968 1969 /* Close OS Tx */ 1970 netif_tx_disable(edev->ndev); 1971 netif_carrier_off(edev->ndev); 1972 1973 /* Reset the link */ 1974 memset(&link_params, 0, sizeof(link_params)); 1975 link_params.link_up = false; 1976 edev->ops->common->set_link(edev->cdev, &link_params); 1977 rc = qede_stop_queues(edev); 1978 if (rc) { 1979 qede_sync_free_irqs(edev); 1980 goto out; 1981 } 1982 1983 DP_INFO(edev, "Stopped Queues\n"); 1984 1985 qede_vlan_mark_nonconfigured(edev); 1986 edev->ops->fastpath_stop(edev->cdev); 1987 #ifdef CONFIG_RFS_ACCEL 1988 if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) { 1989 qede_poll_for_freeing_arfs_filters(edev); 1990 qede_free_arfs(edev); 1991 } 1992 #endif 1993 /* Release the interrupts */ 1994 qede_sync_free_irqs(edev); 1995 edev->ops->common->set_fp_int(edev->cdev, 0); 1996 1997 qede_napi_disable_remove(edev); 1998 1999 qede_free_mem_load(edev); 2000 qede_free_fp_array(edev); 2001 2002 out: 2003 if (!is_locked) 2004 __qede_unlock(edev); 2005 DP_INFO(edev, "Ending qede unload\n"); 2006 } 2007 2008 enum qede_load_mode { 2009 QEDE_LOAD_NORMAL, 2010 QEDE_LOAD_RELOAD, 2011 }; 2012 2013 static int qede_load(struct qede_dev *edev, enum qede_load_mode mode, 2014 bool is_locked) 2015 { 2016 struct qed_link_params link_params; 2017 int rc; 2018 2019 DP_INFO(edev, "Starting qede load\n"); 2020 2021 if (!is_locked) 2022 __qede_lock(edev); 2023 2024 rc = qede_set_num_queues(edev); 2025 if (rc) 2026 goto out; 2027 2028 rc = qede_alloc_fp_array(edev); 2029 if (rc) 2030 goto out; 2031 2032 qede_init_fp(edev); 2033 2034 rc = qede_alloc_mem_load(edev); 2035 if (rc) 2036 goto err1; 2037 DP_INFO(edev, "Allocated %d Rx, %d Tx queues\n", 2038 QEDE_RSS_COUNT(edev), QEDE_TSS_COUNT(edev)); 2039 2040 rc = qede_set_real_num_queues(edev); 2041 if (rc) 2042 goto err2; 2043 2044 #ifdef CONFIG_RFS_ACCEL 2045 if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) { 2046 rc = qede_alloc_arfs(edev); 2047 if (rc) 2048 DP_NOTICE(edev, "aRFS memory allocation failed\n"); 2049 } 2050 #endif 2051 qede_napi_add_enable(edev); 2052 DP_INFO(edev, "Napi added and enabled\n"); 2053 2054 rc = qede_setup_irqs(edev); 2055 if (rc) 2056 goto err3; 2057 DP_INFO(edev, "Setup IRQs succeeded\n"); 2058 2059 rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD); 2060 if (rc) 2061 goto err4; 2062 DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n"); 2063 2064 /* Program un-configured VLANs */ 2065 qede_configure_vlan_filters(edev); 2066 2067 /* Ask for link-up using current configuration */ 2068 memset(&link_params, 0, sizeof(link_params)); 2069 link_params.link_up = true; 2070 edev->ops->common->set_link(edev->cdev, &link_params); 2071 2072 qede_rdma_dev_event_open(edev); 2073 2074 edev->state = QEDE_STATE_OPEN; 2075 2076 DP_INFO(edev, "Ending successfully qede load\n"); 2077 2078 goto out; 2079 err4: 2080 qede_sync_free_irqs(edev); 2081 memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info)); 2082 err3: 2083 qede_napi_disable_remove(edev); 2084 err2: 2085 qede_free_mem_load(edev); 2086 err1: 2087 edev->ops->common->set_fp_int(edev->cdev, 0); 2088 qede_free_fp_array(edev); 2089 edev->num_queues = 0; 2090 edev->fp_num_tx = 0; 2091 edev->fp_num_rx = 0; 2092 out: 2093 if (!is_locked) 2094 __qede_unlock(edev); 2095 2096 return rc; 2097 } 2098 2099 /* 'func' should be able to run between unload and reload assuming interface 2100 * is actually running, or afterwards in case it's currently DOWN. 2101 */ 2102 void qede_reload(struct qede_dev *edev, 2103 struct qede_reload_args *args, bool is_locked) 2104 { 2105 if (!is_locked) 2106 __qede_lock(edev); 2107 2108 /* Since qede_lock is held, internal state wouldn't change even 2109 * if netdev state would start transitioning. Check whether current 2110 * internal configuration indicates device is up, then reload. 2111 */ 2112 if (edev->state == QEDE_STATE_OPEN) { 2113 qede_unload(edev, QEDE_UNLOAD_NORMAL, true); 2114 if (args) 2115 args->func(edev, args); 2116 qede_load(edev, QEDE_LOAD_RELOAD, true); 2117 2118 /* Since no one is going to do it for us, re-configure */ 2119 qede_config_rx_mode(edev->ndev); 2120 } else if (args) { 2121 args->func(edev, args); 2122 } 2123 2124 if (!is_locked) 2125 __qede_unlock(edev); 2126 } 2127 2128 /* called with rtnl_lock */ 2129 static int qede_open(struct net_device *ndev) 2130 { 2131 struct qede_dev *edev = netdev_priv(ndev); 2132 int rc; 2133 2134 netif_carrier_off(ndev); 2135 2136 edev->ops->common->set_power_state(edev->cdev, PCI_D0); 2137 2138 rc = qede_load(edev, QEDE_LOAD_NORMAL, false); 2139 if (rc) 2140 return rc; 2141 2142 udp_tunnel_get_rx_info(ndev); 2143 2144 edev->ops->common->update_drv_state(edev->cdev, true); 2145 2146 return 0; 2147 } 2148 2149 static int qede_close(struct net_device *ndev) 2150 { 2151 struct qede_dev *edev = netdev_priv(ndev); 2152 2153 qede_unload(edev, QEDE_UNLOAD_NORMAL, false); 2154 2155 edev->ops->common->update_drv_state(edev->cdev, false); 2156 2157 return 0; 2158 } 2159 2160 static void qede_link_update(void *dev, struct qed_link_output *link) 2161 { 2162 struct qede_dev *edev = dev; 2163 2164 if (!netif_running(edev->ndev)) { 2165 DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not running\n"); 2166 return; 2167 } 2168 2169 if (link->link_up) { 2170 if (!netif_carrier_ok(edev->ndev)) { 2171 DP_NOTICE(edev, "Link is up\n"); 2172 netif_tx_start_all_queues(edev->ndev); 2173 netif_carrier_on(edev->ndev); 2174 } 2175 } else { 2176 if (netif_carrier_ok(edev->ndev)) { 2177 DP_NOTICE(edev, "Link is down\n"); 2178 netif_tx_disable(edev->ndev); 2179 netif_carrier_off(edev->ndev); 2180 } 2181 } 2182 } 2183