1 /* QLogic qede NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #include <linux/crash_dump.h> 33 #include <linux/module.h> 34 #include <linux/pci.h> 35 #include <linux/version.h> 36 #include <linux/device.h> 37 #include <linux/netdevice.h> 38 #include <linux/etherdevice.h> 39 #include <linux/skbuff.h> 40 #include <linux/errno.h> 41 #include <linux/list.h> 42 #include <linux/string.h> 43 #include <linux/dma-mapping.h> 44 #include <linux/interrupt.h> 45 #include <asm/byteorder.h> 46 #include <asm/param.h> 47 #include <linux/io.h> 48 #include <linux/netdev_features.h> 49 #include <linux/udp.h> 50 #include <linux/tcp.h> 51 #include <net/udp_tunnel.h> 52 #include <linux/ip.h> 53 #include <net/ipv6.h> 54 #include <net/tcp.h> 55 #include <linux/if_ether.h> 56 #include <linux/if_vlan.h> 57 #include <linux/pkt_sched.h> 58 #include <linux/ethtool.h> 59 #include <linux/in.h> 60 #include <linux/random.h> 61 #include <net/ip6_checksum.h> 62 #include <linux/bitops.h> 63 #include <linux/vmalloc.h> 64 #include <linux/aer.h> 65 #include "qede.h" 66 #include "qede_ptp.h" 67 68 static char version[] = 69 "QLogic FastLinQ 4xxxx Ethernet Driver qede " DRV_MODULE_VERSION "\n"; 70 71 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver"); 72 MODULE_LICENSE("GPL"); 73 MODULE_VERSION(DRV_MODULE_VERSION); 74 75 static uint debug; 76 module_param(debug, uint, 0); 77 MODULE_PARM_DESC(debug, " Default debug msglevel"); 78 79 static const struct qed_eth_ops *qed_ops; 80 81 #define CHIP_NUM_57980S_40 0x1634 82 #define CHIP_NUM_57980S_10 0x1666 83 #define CHIP_NUM_57980S_MF 0x1636 84 #define CHIP_NUM_57980S_100 0x1644 85 #define CHIP_NUM_57980S_50 0x1654 86 #define CHIP_NUM_57980S_25 0x1656 87 #define CHIP_NUM_57980S_IOV 0x1664 88 #define CHIP_NUM_AH 0x8070 89 #define CHIP_NUM_AH_IOV 0x8090 90 91 #ifndef PCI_DEVICE_ID_NX2_57980E 92 #define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40 93 #define PCI_DEVICE_ID_57980S_10 CHIP_NUM_57980S_10 94 #define PCI_DEVICE_ID_57980S_MF CHIP_NUM_57980S_MF 95 #define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100 96 #define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50 97 #define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25 98 #define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV 99 #define PCI_DEVICE_ID_AH CHIP_NUM_AH 100 #define PCI_DEVICE_ID_AH_IOV CHIP_NUM_AH_IOV 101 102 #endif 103 104 enum qede_pci_private { 105 QEDE_PRIVATE_PF, 106 QEDE_PRIVATE_VF 107 }; 108 109 static const struct pci_device_id qede_pci_tbl[] = { 110 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF}, 111 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF}, 112 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF}, 113 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF}, 114 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF}, 115 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF}, 116 #ifdef CONFIG_QED_SRIOV 117 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF}, 118 #endif 119 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH), QEDE_PRIVATE_PF}, 120 #ifdef CONFIG_QED_SRIOV 121 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH_IOV), QEDE_PRIVATE_VF}, 122 #endif 123 { 0 } 124 }; 125 126 MODULE_DEVICE_TABLE(pci, qede_pci_tbl); 127 128 static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id); 129 static pci_ers_result_t 130 qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state); 131 132 #define TX_TIMEOUT (5 * HZ) 133 134 /* Utilize last protocol index for XDP */ 135 #define XDP_PI 11 136 137 static void qede_remove(struct pci_dev *pdev); 138 static void qede_shutdown(struct pci_dev *pdev); 139 static void qede_link_update(void *dev, struct qed_link_output *link); 140 static void qede_schedule_recovery_handler(void *dev); 141 static void qede_recovery_handler(struct qede_dev *edev); 142 static void qede_schedule_hw_err_handler(void *dev, 143 enum qed_hw_err_type err_type); 144 static void qede_get_eth_tlv_data(void *edev, void *data); 145 static void qede_get_generic_tlv_data(void *edev, 146 struct qed_generic_tlvs *data); 147 static void qede_generic_hw_err_handler(struct qede_dev *edev); 148 #ifdef CONFIG_QED_SRIOV 149 static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos, 150 __be16 vlan_proto) 151 { 152 struct qede_dev *edev = netdev_priv(ndev); 153 154 if (vlan > 4095) { 155 DP_NOTICE(edev, "Illegal vlan value %d\n", vlan); 156 return -EINVAL; 157 } 158 159 if (vlan_proto != htons(ETH_P_8021Q)) 160 return -EPROTONOSUPPORT; 161 162 DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n", 163 vlan, vf); 164 165 return edev->ops->iov->set_vlan(edev->cdev, vlan, vf); 166 } 167 168 static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac) 169 { 170 struct qede_dev *edev = netdev_priv(ndev); 171 172 DP_VERBOSE(edev, QED_MSG_IOV, 173 "Setting MAC %02x:%02x:%02x:%02x:%02x:%02x to VF [%d]\n", 174 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], vfidx); 175 176 if (!is_valid_ether_addr(mac)) { 177 DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n"); 178 return -EINVAL; 179 } 180 181 return edev->ops->iov->set_mac(edev->cdev, mac, vfidx); 182 } 183 184 static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param) 185 { 186 struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev)); 187 struct qed_dev_info *qed_info = &edev->dev_info.common; 188 struct qed_update_vport_params *vport_params; 189 int rc; 190 191 vport_params = vzalloc(sizeof(*vport_params)); 192 if (!vport_params) 193 return -ENOMEM; 194 DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param); 195 196 rc = edev->ops->iov->configure(edev->cdev, num_vfs_param); 197 198 /* Enable/Disable Tx switching for PF */ 199 if ((rc == num_vfs_param) && netif_running(edev->ndev) && 200 !qed_info->b_inter_pf_switch && qed_info->tx_switching) { 201 vport_params->vport_id = 0; 202 vport_params->update_tx_switching_flg = 1; 203 vport_params->tx_switching_flg = num_vfs_param ? 1 : 0; 204 edev->ops->vport_update(edev->cdev, vport_params); 205 } 206 207 vfree(vport_params); 208 return rc; 209 } 210 #endif 211 212 static const struct pci_error_handlers qede_err_handler = { 213 .error_detected = qede_io_error_detected, 214 }; 215 216 static struct pci_driver qede_pci_driver = { 217 .name = "qede", 218 .id_table = qede_pci_tbl, 219 .probe = qede_probe, 220 .remove = qede_remove, 221 .shutdown = qede_shutdown, 222 #ifdef CONFIG_QED_SRIOV 223 .sriov_configure = qede_sriov_configure, 224 #endif 225 .err_handler = &qede_err_handler, 226 }; 227 228 static struct qed_eth_cb_ops qede_ll_ops = { 229 { 230 #ifdef CONFIG_RFS_ACCEL 231 .arfs_filter_op = qede_arfs_filter_op, 232 #endif 233 .link_update = qede_link_update, 234 .schedule_recovery_handler = qede_schedule_recovery_handler, 235 .schedule_hw_err_handler = qede_schedule_hw_err_handler, 236 .get_generic_tlv_data = qede_get_generic_tlv_data, 237 .get_protocol_tlv_data = qede_get_eth_tlv_data, 238 }, 239 .force_mac = qede_force_mac, 240 .ports_update = qede_udp_ports_update, 241 }; 242 243 static int qede_netdev_event(struct notifier_block *this, unsigned long event, 244 void *ptr) 245 { 246 struct net_device *ndev = netdev_notifier_info_to_dev(ptr); 247 struct ethtool_drvinfo drvinfo; 248 struct qede_dev *edev; 249 250 if (event != NETDEV_CHANGENAME && event != NETDEV_CHANGEADDR) 251 goto done; 252 253 /* Check whether this is a qede device */ 254 if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo) 255 goto done; 256 257 memset(&drvinfo, 0, sizeof(drvinfo)); 258 ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo); 259 if (strcmp(drvinfo.driver, "qede")) 260 goto done; 261 edev = netdev_priv(ndev); 262 263 switch (event) { 264 case NETDEV_CHANGENAME: 265 /* Notify qed of the name change */ 266 if (!edev->ops || !edev->ops->common) 267 goto done; 268 edev->ops->common->set_name(edev->cdev, edev->ndev->name); 269 break; 270 case NETDEV_CHANGEADDR: 271 edev = netdev_priv(ndev); 272 qede_rdma_event_changeaddr(edev); 273 break; 274 } 275 276 done: 277 return NOTIFY_DONE; 278 } 279 280 static struct notifier_block qede_netdev_notifier = { 281 .notifier_call = qede_netdev_event, 282 }; 283 284 static 285 int __init qede_init(void) 286 { 287 int ret; 288 289 pr_info("qede_init: %s\n", version); 290 291 qed_ops = qed_get_eth_ops(); 292 if (!qed_ops) { 293 pr_notice("Failed to get qed ethtool operations\n"); 294 return -EINVAL; 295 } 296 297 /* Must register notifier before pci ops, since we might miss 298 * interface rename after pci probe and netdev registration. 299 */ 300 ret = register_netdevice_notifier(&qede_netdev_notifier); 301 if (ret) { 302 pr_notice("Failed to register netdevice_notifier\n"); 303 qed_put_eth_ops(); 304 return -EINVAL; 305 } 306 307 ret = pci_register_driver(&qede_pci_driver); 308 if (ret) { 309 pr_notice("Failed to register driver\n"); 310 unregister_netdevice_notifier(&qede_netdev_notifier); 311 qed_put_eth_ops(); 312 return -EINVAL; 313 } 314 315 return 0; 316 } 317 318 static void __exit qede_cleanup(void) 319 { 320 if (debug & QED_LOG_INFO_MASK) 321 pr_info("qede_cleanup called\n"); 322 323 unregister_netdevice_notifier(&qede_netdev_notifier); 324 pci_unregister_driver(&qede_pci_driver); 325 qed_put_eth_ops(); 326 } 327 328 module_init(qede_init); 329 module_exit(qede_cleanup); 330 331 static int qede_open(struct net_device *ndev); 332 static int qede_close(struct net_device *ndev); 333 334 void qede_fill_by_demand_stats(struct qede_dev *edev) 335 { 336 struct qede_stats_common *p_common = &edev->stats.common; 337 struct qed_eth_stats stats; 338 339 edev->ops->get_vport_stats(edev->cdev, &stats); 340 341 p_common->no_buff_discards = stats.common.no_buff_discards; 342 p_common->packet_too_big_discard = stats.common.packet_too_big_discard; 343 p_common->ttl0_discard = stats.common.ttl0_discard; 344 p_common->rx_ucast_bytes = stats.common.rx_ucast_bytes; 345 p_common->rx_mcast_bytes = stats.common.rx_mcast_bytes; 346 p_common->rx_bcast_bytes = stats.common.rx_bcast_bytes; 347 p_common->rx_ucast_pkts = stats.common.rx_ucast_pkts; 348 p_common->rx_mcast_pkts = stats.common.rx_mcast_pkts; 349 p_common->rx_bcast_pkts = stats.common.rx_bcast_pkts; 350 p_common->mftag_filter_discards = stats.common.mftag_filter_discards; 351 p_common->mac_filter_discards = stats.common.mac_filter_discards; 352 p_common->gft_filter_drop = stats.common.gft_filter_drop; 353 354 p_common->tx_ucast_bytes = stats.common.tx_ucast_bytes; 355 p_common->tx_mcast_bytes = stats.common.tx_mcast_bytes; 356 p_common->tx_bcast_bytes = stats.common.tx_bcast_bytes; 357 p_common->tx_ucast_pkts = stats.common.tx_ucast_pkts; 358 p_common->tx_mcast_pkts = stats.common.tx_mcast_pkts; 359 p_common->tx_bcast_pkts = stats.common.tx_bcast_pkts; 360 p_common->tx_err_drop_pkts = stats.common.tx_err_drop_pkts; 361 p_common->coalesced_pkts = stats.common.tpa_coalesced_pkts; 362 p_common->coalesced_events = stats.common.tpa_coalesced_events; 363 p_common->coalesced_aborts_num = stats.common.tpa_aborts_num; 364 p_common->non_coalesced_pkts = stats.common.tpa_not_coalesced_pkts; 365 p_common->coalesced_bytes = stats.common.tpa_coalesced_bytes; 366 367 p_common->rx_64_byte_packets = stats.common.rx_64_byte_packets; 368 p_common->rx_65_to_127_byte_packets = 369 stats.common.rx_65_to_127_byte_packets; 370 p_common->rx_128_to_255_byte_packets = 371 stats.common.rx_128_to_255_byte_packets; 372 p_common->rx_256_to_511_byte_packets = 373 stats.common.rx_256_to_511_byte_packets; 374 p_common->rx_512_to_1023_byte_packets = 375 stats.common.rx_512_to_1023_byte_packets; 376 p_common->rx_1024_to_1518_byte_packets = 377 stats.common.rx_1024_to_1518_byte_packets; 378 p_common->rx_crc_errors = stats.common.rx_crc_errors; 379 p_common->rx_mac_crtl_frames = stats.common.rx_mac_crtl_frames; 380 p_common->rx_pause_frames = stats.common.rx_pause_frames; 381 p_common->rx_pfc_frames = stats.common.rx_pfc_frames; 382 p_common->rx_align_errors = stats.common.rx_align_errors; 383 p_common->rx_carrier_errors = stats.common.rx_carrier_errors; 384 p_common->rx_oversize_packets = stats.common.rx_oversize_packets; 385 p_common->rx_jabbers = stats.common.rx_jabbers; 386 p_common->rx_undersize_packets = stats.common.rx_undersize_packets; 387 p_common->rx_fragments = stats.common.rx_fragments; 388 p_common->tx_64_byte_packets = stats.common.tx_64_byte_packets; 389 p_common->tx_65_to_127_byte_packets = 390 stats.common.tx_65_to_127_byte_packets; 391 p_common->tx_128_to_255_byte_packets = 392 stats.common.tx_128_to_255_byte_packets; 393 p_common->tx_256_to_511_byte_packets = 394 stats.common.tx_256_to_511_byte_packets; 395 p_common->tx_512_to_1023_byte_packets = 396 stats.common.tx_512_to_1023_byte_packets; 397 p_common->tx_1024_to_1518_byte_packets = 398 stats.common.tx_1024_to_1518_byte_packets; 399 p_common->tx_pause_frames = stats.common.tx_pause_frames; 400 p_common->tx_pfc_frames = stats.common.tx_pfc_frames; 401 p_common->brb_truncates = stats.common.brb_truncates; 402 p_common->brb_discards = stats.common.brb_discards; 403 p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames; 404 p_common->link_change_count = stats.common.link_change_count; 405 p_common->ptp_skip_txts = edev->ptp_skip_txts; 406 407 if (QEDE_IS_BB(edev)) { 408 struct qede_stats_bb *p_bb = &edev->stats.bb; 409 410 p_bb->rx_1519_to_1522_byte_packets = 411 stats.bb.rx_1519_to_1522_byte_packets; 412 p_bb->rx_1519_to_2047_byte_packets = 413 stats.bb.rx_1519_to_2047_byte_packets; 414 p_bb->rx_2048_to_4095_byte_packets = 415 stats.bb.rx_2048_to_4095_byte_packets; 416 p_bb->rx_4096_to_9216_byte_packets = 417 stats.bb.rx_4096_to_9216_byte_packets; 418 p_bb->rx_9217_to_16383_byte_packets = 419 stats.bb.rx_9217_to_16383_byte_packets; 420 p_bb->tx_1519_to_2047_byte_packets = 421 stats.bb.tx_1519_to_2047_byte_packets; 422 p_bb->tx_2048_to_4095_byte_packets = 423 stats.bb.tx_2048_to_4095_byte_packets; 424 p_bb->tx_4096_to_9216_byte_packets = 425 stats.bb.tx_4096_to_9216_byte_packets; 426 p_bb->tx_9217_to_16383_byte_packets = 427 stats.bb.tx_9217_to_16383_byte_packets; 428 p_bb->tx_lpi_entry_count = stats.bb.tx_lpi_entry_count; 429 p_bb->tx_total_collisions = stats.bb.tx_total_collisions; 430 } else { 431 struct qede_stats_ah *p_ah = &edev->stats.ah; 432 433 p_ah->rx_1519_to_max_byte_packets = 434 stats.ah.rx_1519_to_max_byte_packets; 435 p_ah->tx_1519_to_max_byte_packets = 436 stats.ah.tx_1519_to_max_byte_packets; 437 } 438 } 439 440 static void qede_get_stats64(struct net_device *dev, 441 struct rtnl_link_stats64 *stats) 442 { 443 struct qede_dev *edev = netdev_priv(dev); 444 struct qede_stats_common *p_common; 445 446 qede_fill_by_demand_stats(edev); 447 p_common = &edev->stats.common; 448 449 stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts + 450 p_common->rx_bcast_pkts; 451 stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts + 452 p_common->tx_bcast_pkts; 453 454 stats->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes + 455 p_common->rx_bcast_bytes; 456 stats->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes + 457 p_common->tx_bcast_bytes; 458 459 stats->tx_errors = p_common->tx_err_drop_pkts; 460 stats->multicast = p_common->rx_mcast_pkts + p_common->rx_bcast_pkts; 461 462 stats->rx_fifo_errors = p_common->no_buff_discards; 463 464 if (QEDE_IS_BB(edev)) 465 stats->collisions = edev->stats.bb.tx_total_collisions; 466 stats->rx_crc_errors = p_common->rx_crc_errors; 467 stats->rx_frame_errors = p_common->rx_align_errors; 468 } 469 470 #ifdef CONFIG_QED_SRIOV 471 static int qede_get_vf_config(struct net_device *dev, int vfidx, 472 struct ifla_vf_info *ivi) 473 { 474 struct qede_dev *edev = netdev_priv(dev); 475 476 if (!edev->ops) 477 return -EINVAL; 478 479 return edev->ops->iov->get_config(edev->cdev, vfidx, ivi); 480 } 481 482 static int qede_set_vf_rate(struct net_device *dev, int vfidx, 483 int min_tx_rate, int max_tx_rate) 484 { 485 struct qede_dev *edev = netdev_priv(dev); 486 487 return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate, 488 max_tx_rate); 489 } 490 491 static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val) 492 { 493 struct qede_dev *edev = netdev_priv(dev); 494 495 if (!edev->ops) 496 return -EINVAL; 497 498 return edev->ops->iov->set_spoof(edev->cdev, vfidx, val); 499 } 500 501 static int qede_set_vf_link_state(struct net_device *dev, int vfidx, 502 int link_state) 503 { 504 struct qede_dev *edev = netdev_priv(dev); 505 506 if (!edev->ops) 507 return -EINVAL; 508 509 return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state); 510 } 511 512 static int qede_set_vf_trust(struct net_device *dev, int vfidx, bool setting) 513 { 514 struct qede_dev *edev = netdev_priv(dev); 515 516 if (!edev->ops) 517 return -EINVAL; 518 519 return edev->ops->iov->set_trust(edev->cdev, vfidx, setting); 520 } 521 #endif 522 523 static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 524 { 525 struct qede_dev *edev = netdev_priv(dev); 526 527 if (!netif_running(dev)) 528 return -EAGAIN; 529 530 switch (cmd) { 531 case SIOCSHWTSTAMP: 532 return qede_ptp_hw_ts(edev, ifr); 533 default: 534 DP_VERBOSE(edev, QED_MSG_DEBUG, 535 "default IOCTL cmd 0x%x\n", cmd); 536 return -EOPNOTSUPP; 537 } 538 539 return 0; 540 } 541 542 static void qede_tx_log_print(struct qede_dev *edev, struct qede_tx_queue *txq) 543 { 544 DP_NOTICE(edev, 545 "Txq[%d]: FW cons [host] %04x, SW cons %04x, SW prod %04x [Jiffies %lu]\n", 546 txq->index, le16_to_cpu(*txq->hw_cons_ptr), 547 qed_chain_get_cons_idx(&txq->tx_pbl), 548 qed_chain_get_prod_idx(&txq->tx_pbl), 549 jiffies); 550 } 551 552 static void qede_tx_timeout(struct net_device *dev, unsigned int txqueue) 553 { 554 struct qede_dev *edev = netdev_priv(dev); 555 struct qede_tx_queue *txq; 556 int cos; 557 558 netif_carrier_off(dev); 559 DP_NOTICE(edev, "TX timeout on queue %u!\n", txqueue); 560 561 if (!(edev->fp_array[txqueue].type & QEDE_FASTPATH_TX)) 562 return; 563 564 for_each_cos_in_txq(edev, cos) { 565 txq = &edev->fp_array[txqueue].txq[cos]; 566 567 if (qed_chain_get_cons_idx(&txq->tx_pbl) != 568 qed_chain_get_prod_idx(&txq->tx_pbl)) 569 qede_tx_log_print(edev, txq); 570 } 571 572 if (IS_VF(edev)) 573 return; 574 575 if (test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) || 576 edev->state == QEDE_STATE_RECOVERY) { 577 DP_INFO(edev, 578 "Avoid handling a Tx timeout while another HW error is being handled\n"); 579 return; 580 } 581 582 set_bit(QEDE_ERR_GET_DBG_INFO, &edev->err_flags); 583 set_bit(QEDE_SP_HW_ERR, &edev->sp_flags); 584 schedule_delayed_work(&edev->sp_task, 0); 585 } 586 587 static int qede_setup_tc(struct net_device *ndev, u8 num_tc) 588 { 589 struct qede_dev *edev = netdev_priv(ndev); 590 int cos, count, offset; 591 592 if (num_tc > edev->dev_info.num_tc) 593 return -EINVAL; 594 595 netdev_reset_tc(ndev); 596 netdev_set_num_tc(ndev, num_tc); 597 598 for_each_cos_in_txq(edev, cos) { 599 count = QEDE_TSS_COUNT(edev); 600 offset = cos * QEDE_TSS_COUNT(edev); 601 netdev_set_tc_queue(ndev, cos, count, offset); 602 } 603 604 return 0; 605 } 606 607 static int 608 qede_set_flower(struct qede_dev *edev, struct flow_cls_offload *f, 609 __be16 proto) 610 { 611 switch (f->command) { 612 case FLOW_CLS_REPLACE: 613 return qede_add_tc_flower_fltr(edev, proto, f); 614 case FLOW_CLS_DESTROY: 615 return qede_delete_flow_filter(edev, f->cookie); 616 default: 617 return -EOPNOTSUPP; 618 } 619 } 620 621 static int qede_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 622 void *cb_priv) 623 { 624 struct flow_cls_offload *f; 625 struct qede_dev *edev = cb_priv; 626 627 if (!tc_cls_can_offload_and_chain0(edev->ndev, type_data)) 628 return -EOPNOTSUPP; 629 630 switch (type) { 631 case TC_SETUP_CLSFLOWER: 632 f = type_data; 633 return qede_set_flower(edev, f, f->common.protocol); 634 default: 635 return -EOPNOTSUPP; 636 } 637 } 638 639 static LIST_HEAD(qede_block_cb_list); 640 641 static int 642 qede_setup_tc_offload(struct net_device *dev, enum tc_setup_type type, 643 void *type_data) 644 { 645 struct qede_dev *edev = netdev_priv(dev); 646 struct tc_mqprio_qopt *mqprio; 647 648 switch (type) { 649 case TC_SETUP_BLOCK: 650 return flow_block_cb_setup_simple(type_data, 651 &qede_block_cb_list, 652 qede_setup_tc_block_cb, 653 edev, edev, true); 654 case TC_SETUP_QDISC_MQPRIO: 655 mqprio = type_data; 656 657 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; 658 return qede_setup_tc(dev, mqprio->num_tc); 659 default: 660 return -EOPNOTSUPP; 661 } 662 } 663 664 static const struct net_device_ops qede_netdev_ops = { 665 .ndo_open = qede_open, 666 .ndo_stop = qede_close, 667 .ndo_start_xmit = qede_start_xmit, 668 .ndo_select_queue = qede_select_queue, 669 .ndo_set_rx_mode = qede_set_rx_mode, 670 .ndo_set_mac_address = qede_set_mac_addr, 671 .ndo_validate_addr = eth_validate_addr, 672 .ndo_change_mtu = qede_change_mtu, 673 .ndo_do_ioctl = qede_ioctl, 674 .ndo_tx_timeout = qede_tx_timeout, 675 #ifdef CONFIG_QED_SRIOV 676 .ndo_set_vf_mac = qede_set_vf_mac, 677 .ndo_set_vf_vlan = qede_set_vf_vlan, 678 .ndo_set_vf_trust = qede_set_vf_trust, 679 #endif 680 .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, 681 .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, 682 .ndo_fix_features = qede_fix_features, 683 .ndo_set_features = qede_set_features, 684 .ndo_get_stats64 = qede_get_stats64, 685 #ifdef CONFIG_QED_SRIOV 686 .ndo_set_vf_link_state = qede_set_vf_link_state, 687 .ndo_set_vf_spoofchk = qede_set_vf_spoofchk, 688 .ndo_get_vf_config = qede_get_vf_config, 689 .ndo_set_vf_rate = qede_set_vf_rate, 690 #endif 691 .ndo_udp_tunnel_add = qede_udp_tunnel_add, 692 .ndo_udp_tunnel_del = qede_udp_tunnel_del, 693 .ndo_features_check = qede_features_check, 694 .ndo_bpf = qede_xdp, 695 #ifdef CONFIG_RFS_ACCEL 696 .ndo_rx_flow_steer = qede_rx_flow_steer, 697 #endif 698 .ndo_setup_tc = qede_setup_tc_offload, 699 }; 700 701 static const struct net_device_ops qede_netdev_vf_ops = { 702 .ndo_open = qede_open, 703 .ndo_stop = qede_close, 704 .ndo_start_xmit = qede_start_xmit, 705 .ndo_select_queue = qede_select_queue, 706 .ndo_set_rx_mode = qede_set_rx_mode, 707 .ndo_set_mac_address = qede_set_mac_addr, 708 .ndo_validate_addr = eth_validate_addr, 709 .ndo_change_mtu = qede_change_mtu, 710 .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, 711 .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, 712 .ndo_fix_features = qede_fix_features, 713 .ndo_set_features = qede_set_features, 714 .ndo_get_stats64 = qede_get_stats64, 715 .ndo_udp_tunnel_add = qede_udp_tunnel_add, 716 .ndo_udp_tunnel_del = qede_udp_tunnel_del, 717 .ndo_features_check = qede_features_check, 718 }; 719 720 static const struct net_device_ops qede_netdev_vf_xdp_ops = { 721 .ndo_open = qede_open, 722 .ndo_stop = qede_close, 723 .ndo_start_xmit = qede_start_xmit, 724 .ndo_select_queue = qede_select_queue, 725 .ndo_set_rx_mode = qede_set_rx_mode, 726 .ndo_set_mac_address = qede_set_mac_addr, 727 .ndo_validate_addr = eth_validate_addr, 728 .ndo_change_mtu = qede_change_mtu, 729 .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, 730 .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, 731 .ndo_fix_features = qede_fix_features, 732 .ndo_set_features = qede_set_features, 733 .ndo_get_stats64 = qede_get_stats64, 734 .ndo_udp_tunnel_add = qede_udp_tunnel_add, 735 .ndo_udp_tunnel_del = qede_udp_tunnel_del, 736 .ndo_features_check = qede_features_check, 737 .ndo_bpf = qede_xdp, 738 }; 739 740 /* ------------------------------------------------------------------------- 741 * START OF PROBE / REMOVE 742 * ------------------------------------------------------------------------- 743 */ 744 745 static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev, 746 struct pci_dev *pdev, 747 struct qed_dev_eth_info *info, 748 u32 dp_module, u8 dp_level) 749 { 750 struct net_device *ndev; 751 struct qede_dev *edev; 752 753 ndev = alloc_etherdev_mqs(sizeof(*edev), 754 info->num_queues * info->num_tc, 755 info->num_queues); 756 if (!ndev) { 757 pr_err("etherdev allocation failed\n"); 758 return NULL; 759 } 760 761 edev = netdev_priv(ndev); 762 edev->ndev = ndev; 763 edev->cdev = cdev; 764 edev->pdev = pdev; 765 edev->dp_module = dp_module; 766 edev->dp_level = dp_level; 767 edev->ops = qed_ops; 768 769 if (is_kdump_kernel()) { 770 edev->q_num_rx_buffers = NUM_RX_BDS_KDUMP_MIN; 771 edev->q_num_tx_buffers = NUM_TX_BDS_KDUMP_MIN; 772 } else { 773 edev->q_num_rx_buffers = NUM_RX_BDS_DEF; 774 edev->q_num_tx_buffers = NUM_TX_BDS_DEF; 775 } 776 777 DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n", 778 info->num_queues, info->num_queues); 779 780 SET_NETDEV_DEV(ndev, &pdev->dev); 781 782 memset(&edev->stats, 0, sizeof(edev->stats)); 783 memcpy(&edev->dev_info, info, sizeof(*info)); 784 785 /* As ethtool doesn't have the ability to show WoL behavior as 786 * 'default', if device supports it declare it's enabled. 787 */ 788 if (edev->dev_info.common.wol_support) 789 edev->wol_enabled = true; 790 791 INIT_LIST_HEAD(&edev->vlan_list); 792 793 return edev; 794 } 795 796 static void qede_init_ndev(struct qede_dev *edev) 797 { 798 struct net_device *ndev = edev->ndev; 799 struct pci_dev *pdev = edev->pdev; 800 bool udp_tunnel_enable = false; 801 netdev_features_t hw_features; 802 803 pci_set_drvdata(pdev, ndev); 804 805 ndev->mem_start = edev->dev_info.common.pci_mem_start; 806 ndev->base_addr = ndev->mem_start; 807 ndev->mem_end = edev->dev_info.common.pci_mem_end; 808 ndev->irq = edev->dev_info.common.pci_irq; 809 810 ndev->watchdog_timeo = TX_TIMEOUT; 811 812 if (IS_VF(edev)) { 813 if (edev->dev_info.xdp_supported) 814 ndev->netdev_ops = &qede_netdev_vf_xdp_ops; 815 else 816 ndev->netdev_ops = &qede_netdev_vf_ops; 817 } else { 818 ndev->netdev_ops = &qede_netdev_ops; 819 } 820 821 qede_set_ethtool_ops(ndev); 822 823 ndev->priv_flags |= IFF_UNICAST_FLT; 824 825 /* user-changeble features */ 826 hw_features = NETIF_F_GRO | NETIF_F_GRO_HW | NETIF_F_SG | 827 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 828 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_TC; 829 830 if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) 831 hw_features |= NETIF_F_NTUPLE; 832 833 if (edev->dev_info.common.vxlan_enable || 834 edev->dev_info.common.geneve_enable) 835 udp_tunnel_enable = true; 836 837 if (udp_tunnel_enable || edev->dev_info.common.gre_enable) { 838 hw_features |= NETIF_F_TSO_ECN; 839 ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 840 NETIF_F_SG | NETIF_F_TSO | 841 NETIF_F_TSO_ECN | NETIF_F_TSO6 | 842 NETIF_F_RXCSUM; 843 } 844 845 if (udp_tunnel_enable) { 846 hw_features |= (NETIF_F_GSO_UDP_TUNNEL | 847 NETIF_F_GSO_UDP_TUNNEL_CSUM); 848 ndev->hw_enc_features |= (NETIF_F_GSO_UDP_TUNNEL | 849 NETIF_F_GSO_UDP_TUNNEL_CSUM); 850 } 851 852 if (edev->dev_info.common.gre_enable) { 853 hw_features |= (NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM); 854 ndev->hw_enc_features |= (NETIF_F_GSO_GRE | 855 NETIF_F_GSO_GRE_CSUM); 856 } 857 858 ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM | 859 NETIF_F_HIGHDMA; 860 ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM | 861 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA | 862 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX; 863 864 ndev->hw_features = hw_features; 865 866 /* MTU range: 46 - 9600 */ 867 ndev->min_mtu = ETH_ZLEN - ETH_HLEN; 868 ndev->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE; 869 870 /* Set network device HW mac */ 871 ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac); 872 873 ndev->mtu = edev->dev_info.common.mtu; 874 } 875 876 /* This function converts from 32b param to two params of level and module 877 * Input 32b decoding: 878 * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the 879 * 'happy' flow, e.g. memory allocation failed. 880 * b30 - enable all INFO prints. INFO prints are for major steps in the flow 881 * and provide important parameters. 882 * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that 883 * module. VERBOSE prints are for tracking the specific flow in low level. 884 * 885 * Notice that the level should be that of the lowest required logs. 886 */ 887 void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level) 888 { 889 *p_dp_level = QED_LEVEL_NOTICE; 890 *p_dp_module = 0; 891 892 if (debug & QED_LOG_VERBOSE_MASK) { 893 *p_dp_level = QED_LEVEL_VERBOSE; 894 *p_dp_module = (debug & 0x3FFFFFFF); 895 } else if (debug & QED_LOG_INFO_MASK) { 896 *p_dp_level = QED_LEVEL_INFO; 897 } else if (debug & QED_LOG_NOTICE_MASK) { 898 *p_dp_level = QED_LEVEL_NOTICE; 899 } 900 } 901 902 static void qede_free_fp_array(struct qede_dev *edev) 903 { 904 if (edev->fp_array) { 905 struct qede_fastpath *fp; 906 int i; 907 908 for_each_queue(i) { 909 fp = &edev->fp_array[i]; 910 911 kfree(fp->sb_info); 912 /* Handle mem alloc failure case where qede_init_fp 913 * didn't register xdp_rxq_info yet. 914 * Implicit only (fp->type & QEDE_FASTPATH_RX) 915 */ 916 if (fp->rxq && xdp_rxq_info_is_reg(&fp->rxq->xdp_rxq)) 917 xdp_rxq_info_unreg(&fp->rxq->xdp_rxq); 918 kfree(fp->rxq); 919 kfree(fp->xdp_tx); 920 kfree(fp->txq); 921 } 922 kfree(edev->fp_array); 923 } 924 925 edev->num_queues = 0; 926 edev->fp_num_tx = 0; 927 edev->fp_num_rx = 0; 928 } 929 930 static int qede_alloc_fp_array(struct qede_dev *edev) 931 { 932 u8 fp_combined, fp_rx = edev->fp_num_rx; 933 struct qede_fastpath *fp; 934 int i; 935 936 edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev), 937 sizeof(*edev->fp_array), GFP_KERNEL); 938 if (!edev->fp_array) { 939 DP_NOTICE(edev, "fp array allocation failed\n"); 940 goto err; 941 } 942 943 fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx; 944 945 /* Allocate the FP elements for Rx queues followed by combined and then 946 * the Tx. This ordering should be maintained so that the respective 947 * queues (Rx or Tx) will be together in the fastpath array and the 948 * associated ids will be sequential. 949 */ 950 for_each_queue(i) { 951 fp = &edev->fp_array[i]; 952 953 fp->sb_info = kzalloc(sizeof(*fp->sb_info), GFP_KERNEL); 954 if (!fp->sb_info) { 955 DP_NOTICE(edev, "sb info struct allocation failed\n"); 956 goto err; 957 } 958 959 if (fp_rx) { 960 fp->type = QEDE_FASTPATH_RX; 961 fp_rx--; 962 } else if (fp_combined) { 963 fp->type = QEDE_FASTPATH_COMBINED; 964 fp_combined--; 965 } else { 966 fp->type = QEDE_FASTPATH_TX; 967 } 968 969 if (fp->type & QEDE_FASTPATH_TX) { 970 fp->txq = kcalloc(edev->dev_info.num_tc, 971 sizeof(*fp->txq), GFP_KERNEL); 972 if (!fp->txq) 973 goto err; 974 } 975 976 if (fp->type & QEDE_FASTPATH_RX) { 977 fp->rxq = kzalloc(sizeof(*fp->rxq), GFP_KERNEL); 978 if (!fp->rxq) 979 goto err; 980 981 if (edev->xdp_prog) { 982 fp->xdp_tx = kzalloc(sizeof(*fp->xdp_tx), 983 GFP_KERNEL); 984 if (!fp->xdp_tx) 985 goto err; 986 fp->type |= QEDE_FASTPATH_XDP; 987 } 988 } 989 } 990 991 return 0; 992 err: 993 qede_free_fp_array(edev); 994 return -ENOMEM; 995 } 996 997 /* The qede lock is used to protect driver state change and driver flows that 998 * are not reentrant. 999 */ 1000 void __qede_lock(struct qede_dev *edev) 1001 { 1002 mutex_lock(&edev->qede_lock); 1003 } 1004 1005 void __qede_unlock(struct qede_dev *edev) 1006 { 1007 mutex_unlock(&edev->qede_lock); 1008 } 1009 1010 /* This version of the lock should be used when acquiring the RTNL lock is also 1011 * needed in addition to the internal qede lock. 1012 */ 1013 static void qede_lock(struct qede_dev *edev) 1014 { 1015 rtnl_lock(); 1016 __qede_lock(edev); 1017 } 1018 1019 static void qede_unlock(struct qede_dev *edev) 1020 { 1021 __qede_unlock(edev); 1022 rtnl_unlock(); 1023 } 1024 1025 static void qede_sp_task(struct work_struct *work) 1026 { 1027 struct qede_dev *edev = container_of(work, struct qede_dev, 1028 sp_task.work); 1029 1030 /* The locking scheme depends on the specific flag: 1031 * In case of QEDE_SP_RECOVERY, acquiring the RTNL lock is required to 1032 * ensure that ongoing flows are ended and new ones are not started. 1033 * In other cases - only the internal qede lock should be acquired. 1034 */ 1035 1036 if (test_and_clear_bit(QEDE_SP_RECOVERY, &edev->sp_flags)) { 1037 #ifdef CONFIG_QED_SRIOV 1038 /* SRIOV must be disabled outside the lock to avoid a deadlock. 1039 * The recovery of the active VFs is currently not supported. 1040 */ 1041 if (pci_num_vf(edev->pdev)) 1042 qede_sriov_configure(edev->pdev, 0); 1043 #endif 1044 qede_lock(edev); 1045 qede_recovery_handler(edev); 1046 qede_unlock(edev); 1047 } 1048 1049 __qede_lock(edev); 1050 1051 if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags)) 1052 if (edev->state == QEDE_STATE_OPEN) 1053 qede_config_rx_mode(edev->ndev); 1054 1055 #ifdef CONFIG_RFS_ACCEL 1056 if (test_and_clear_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags)) { 1057 if (edev->state == QEDE_STATE_OPEN) 1058 qede_process_arfs_filters(edev, false); 1059 } 1060 #endif 1061 if (test_and_clear_bit(QEDE_SP_HW_ERR, &edev->sp_flags)) 1062 qede_generic_hw_err_handler(edev); 1063 __qede_unlock(edev); 1064 1065 if (test_and_clear_bit(QEDE_SP_AER, &edev->sp_flags)) { 1066 #ifdef CONFIG_QED_SRIOV 1067 /* SRIOV must be disabled outside the lock to avoid a deadlock. 1068 * The recovery of the active VFs is currently not supported. 1069 */ 1070 if (pci_num_vf(edev->pdev)) 1071 qede_sriov_configure(edev->pdev, 0); 1072 #endif 1073 edev->ops->common->recovery_process(edev->cdev); 1074 } 1075 } 1076 1077 static void qede_update_pf_params(struct qed_dev *cdev) 1078 { 1079 struct qed_pf_params pf_params; 1080 u16 num_cons; 1081 1082 /* 64 rx + 64 tx + 64 XDP */ 1083 memset(&pf_params, 0, sizeof(struct qed_pf_params)); 1084 1085 /* 1 rx + 1 xdp + max tx cos */ 1086 num_cons = QED_MIN_L2_CONS; 1087 1088 pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * num_cons; 1089 1090 /* Same for VFs - make sure they'll have sufficient connections 1091 * to support XDP Tx queues. 1092 */ 1093 pf_params.eth_pf_params.num_vf_cons = 48; 1094 1095 pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR; 1096 qed_ops->common->update_pf_params(cdev, &pf_params); 1097 } 1098 1099 #define QEDE_FW_VER_STR_SIZE 80 1100 1101 static void qede_log_probe(struct qede_dev *edev) 1102 { 1103 struct qed_dev_info *p_dev_info = &edev->dev_info.common; 1104 u8 buf[QEDE_FW_VER_STR_SIZE]; 1105 size_t left_size; 1106 1107 snprintf(buf, QEDE_FW_VER_STR_SIZE, 1108 "Storm FW %d.%d.%d.%d, Management FW %d.%d.%d.%d", 1109 p_dev_info->fw_major, p_dev_info->fw_minor, p_dev_info->fw_rev, 1110 p_dev_info->fw_eng, 1111 (p_dev_info->mfw_rev & QED_MFW_VERSION_3_MASK) >> 1112 QED_MFW_VERSION_3_OFFSET, 1113 (p_dev_info->mfw_rev & QED_MFW_VERSION_2_MASK) >> 1114 QED_MFW_VERSION_2_OFFSET, 1115 (p_dev_info->mfw_rev & QED_MFW_VERSION_1_MASK) >> 1116 QED_MFW_VERSION_1_OFFSET, 1117 (p_dev_info->mfw_rev & QED_MFW_VERSION_0_MASK) >> 1118 QED_MFW_VERSION_0_OFFSET); 1119 1120 left_size = QEDE_FW_VER_STR_SIZE - strlen(buf); 1121 if (p_dev_info->mbi_version && left_size) 1122 snprintf(buf + strlen(buf), left_size, 1123 " [MBI %d.%d.%d]", 1124 (p_dev_info->mbi_version & QED_MBI_VERSION_2_MASK) >> 1125 QED_MBI_VERSION_2_OFFSET, 1126 (p_dev_info->mbi_version & QED_MBI_VERSION_1_MASK) >> 1127 QED_MBI_VERSION_1_OFFSET, 1128 (p_dev_info->mbi_version & QED_MBI_VERSION_0_MASK) >> 1129 QED_MBI_VERSION_0_OFFSET); 1130 1131 pr_info("qede %02x:%02x.%02x: %s [%s]\n", edev->pdev->bus->number, 1132 PCI_SLOT(edev->pdev->devfn), PCI_FUNC(edev->pdev->devfn), 1133 buf, edev->ndev->name); 1134 } 1135 1136 enum qede_probe_mode { 1137 QEDE_PROBE_NORMAL, 1138 QEDE_PROBE_RECOVERY, 1139 }; 1140 1141 static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, 1142 bool is_vf, enum qede_probe_mode mode) 1143 { 1144 struct qed_probe_params probe_params; 1145 struct qed_slowpath_params sp_params; 1146 struct qed_dev_eth_info dev_info; 1147 struct qede_dev *edev; 1148 struct qed_dev *cdev; 1149 int rc; 1150 1151 if (unlikely(dp_level & QED_LEVEL_INFO)) 1152 pr_notice("Starting qede probe\n"); 1153 1154 memset(&probe_params, 0, sizeof(probe_params)); 1155 probe_params.protocol = QED_PROTOCOL_ETH; 1156 probe_params.dp_module = dp_module; 1157 probe_params.dp_level = dp_level; 1158 probe_params.is_vf = is_vf; 1159 probe_params.recov_in_prog = (mode == QEDE_PROBE_RECOVERY); 1160 cdev = qed_ops->common->probe(pdev, &probe_params); 1161 if (!cdev) { 1162 rc = -ENODEV; 1163 goto err0; 1164 } 1165 1166 qede_update_pf_params(cdev); 1167 1168 /* Start the Slowpath-process */ 1169 memset(&sp_params, 0, sizeof(sp_params)); 1170 sp_params.int_mode = QED_INT_MODE_MSIX; 1171 sp_params.drv_major = QEDE_MAJOR_VERSION; 1172 sp_params.drv_minor = QEDE_MINOR_VERSION; 1173 sp_params.drv_rev = QEDE_REVISION_VERSION; 1174 sp_params.drv_eng = QEDE_ENGINEERING_VERSION; 1175 strlcpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE); 1176 rc = qed_ops->common->slowpath_start(cdev, &sp_params); 1177 if (rc) { 1178 pr_notice("Cannot start slowpath\n"); 1179 goto err1; 1180 } 1181 1182 /* Learn information crucial for qede to progress */ 1183 rc = qed_ops->fill_dev_info(cdev, &dev_info); 1184 if (rc) 1185 goto err2; 1186 1187 if (mode != QEDE_PROBE_RECOVERY) { 1188 edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module, 1189 dp_level); 1190 if (!edev) { 1191 rc = -ENOMEM; 1192 goto err2; 1193 } 1194 } else { 1195 struct net_device *ndev = pci_get_drvdata(pdev); 1196 1197 edev = netdev_priv(ndev); 1198 edev->cdev = cdev; 1199 memset(&edev->stats, 0, sizeof(edev->stats)); 1200 memcpy(&edev->dev_info, &dev_info, sizeof(dev_info)); 1201 } 1202 1203 if (is_vf) 1204 set_bit(QEDE_FLAGS_IS_VF, &edev->flags); 1205 1206 qede_init_ndev(edev); 1207 1208 rc = qede_rdma_dev_add(edev, (mode == QEDE_PROBE_RECOVERY)); 1209 if (rc) 1210 goto err3; 1211 1212 if (mode != QEDE_PROBE_RECOVERY) { 1213 /* Prepare the lock prior to the registration of the netdev, 1214 * as once it's registered we might reach flows requiring it 1215 * [it's even possible to reach a flow needing it directly 1216 * from there, although it's unlikely]. 1217 */ 1218 INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task); 1219 mutex_init(&edev->qede_lock); 1220 1221 rc = register_netdev(edev->ndev); 1222 if (rc) { 1223 DP_NOTICE(edev, "Cannot register net-device\n"); 1224 goto err4; 1225 } 1226 } 1227 1228 edev->ops->common->set_name(cdev, edev->ndev->name); 1229 1230 /* PTP not supported on VFs */ 1231 if (!is_vf) 1232 qede_ptp_enable(edev); 1233 1234 edev->ops->register_ops(cdev, &qede_ll_ops, edev); 1235 1236 #ifdef CONFIG_DCB 1237 if (!IS_VF(edev)) 1238 qede_set_dcbnl_ops(edev->ndev); 1239 #endif 1240 1241 edev->rx_copybreak = QEDE_RX_HDR_SIZE; 1242 1243 qede_log_probe(edev); 1244 return 0; 1245 1246 err4: 1247 qede_rdma_dev_remove(edev, (mode == QEDE_PROBE_RECOVERY)); 1248 err3: 1249 free_netdev(edev->ndev); 1250 err2: 1251 qed_ops->common->slowpath_stop(cdev); 1252 err1: 1253 qed_ops->common->remove(cdev); 1254 err0: 1255 return rc; 1256 } 1257 1258 static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1259 { 1260 bool is_vf = false; 1261 u32 dp_module = 0; 1262 u8 dp_level = 0; 1263 1264 switch ((enum qede_pci_private)id->driver_data) { 1265 case QEDE_PRIVATE_VF: 1266 if (debug & QED_LOG_VERBOSE_MASK) 1267 dev_err(&pdev->dev, "Probing a VF\n"); 1268 is_vf = true; 1269 break; 1270 default: 1271 if (debug & QED_LOG_VERBOSE_MASK) 1272 dev_err(&pdev->dev, "Probing a PF\n"); 1273 } 1274 1275 qede_config_debug(debug, &dp_module, &dp_level); 1276 1277 return __qede_probe(pdev, dp_module, dp_level, is_vf, 1278 QEDE_PROBE_NORMAL); 1279 } 1280 1281 enum qede_remove_mode { 1282 QEDE_REMOVE_NORMAL, 1283 QEDE_REMOVE_RECOVERY, 1284 }; 1285 1286 static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) 1287 { 1288 struct net_device *ndev = pci_get_drvdata(pdev); 1289 struct qede_dev *edev; 1290 struct qed_dev *cdev; 1291 1292 if (!ndev) { 1293 dev_info(&pdev->dev, "Device has already been removed\n"); 1294 return; 1295 } 1296 1297 edev = netdev_priv(ndev); 1298 cdev = edev->cdev; 1299 1300 DP_INFO(edev, "Starting qede_remove\n"); 1301 1302 qede_rdma_dev_remove(edev, (mode == QEDE_REMOVE_RECOVERY)); 1303 1304 if (mode != QEDE_REMOVE_RECOVERY) { 1305 unregister_netdev(ndev); 1306 1307 cancel_delayed_work_sync(&edev->sp_task); 1308 1309 edev->ops->common->set_power_state(cdev, PCI_D0); 1310 1311 pci_set_drvdata(pdev, NULL); 1312 } 1313 1314 qede_ptp_disable(edev); 1315 1316 /* Use global ops since we've freed edev */ 1317 qed_ops->common->slowpath_stop(cdev); 1318 if (system_state == SYSTEM_POWER_OFF) 1319 return; 1320 qed_ops->common->remove(cdev); 1321 edev->cdev = NULL; 1322 1323 /* Since this can happen out-of-sync with other flows, 1324 * don't release the netdevice until after slowpath stop 1325 * has been called to guarantee various other contexts 1326 * [e.g., QED register callbacks] won't break anything when 1327 * accessing the netdevice. 1328 */ 1329 if (mode != QEDE_REMOVE_RECOVERY) 1330 free_netdev(ndev); 1331 1332 dev_info(&pdev->dev, "Ending qede_remove successfully\n"); 1333 } 1334 1335 static void qede_remove(struct pci_dev *pdev) 1336 { 1337 __qede_remove(pdev, QEDE_REMOVE_NORMAL); 1338 } 1339 1340 static void qede_shutdown(struct pci_dev *pdev) 1341 { 1342 __qede_remove(pdev, QEDE_REMOVE_NORMAL); 1343 } 1344 1345 /* ------------------------------------------------------------------------- 1346 * START OF LOAD / UNLOAD 1347 * ------------------------------------------------------------------------- 1348 */ 1349 1350 static int qede_set_num_queues(struct qede_dev *edev) 1351 { 1352 int rc; 1353 u16 rss_num; 1354 1355 /* Setup queues according to possible resources*/ 1356 if (edev->req_queues) 1357 rss_num = edev->req_queues; 1358 else 1359 rss_num = netif_get_num_default_rss_queues() * 1360 edev->dev_info.common.num_hwfns; 1361 1362 rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num); 1363 1364 rc = edev->ops->common->set_fp_int(edev->cdev, rss_num); 1365 if (rc > 0) { 1366 /* Managed to request interrupts for our queues */ 1367 edev->num_queues = rc; 1368 DP_INFO(edev, "Managed %d [of %d] RSS queues\n", 1369 QEDE_QUEUE_CNT(edev), rss_num); 1370 rc = 0; 1371 } 1372 1373 edev->fp_num_tx = edev->req_num_tx; 1374 edev->fp_num_rx = edev->req_num_rx; 1375 1376 return rc; 1377 } 1378 1379 static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info, 1380 u16 sb_id) 1381 { 1382 if (sb_info->sb_virt) { 1383 edev->ops->common->sb_release(edev->cdev, sb_info, sb_id, 1384 QED_SB_TYPE_L2_QUEUE); 1385 dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt), 1386 (void *)sb_info->sb_virt, sb_info->sb_phys); 1387 memset(sb_info, 0, sizeof(*sb_info)); 1388 } 1389 } 1390 1391 /* This function allocates fast-path status block memory */ 1392 static int qede_alloc_mem_sb(struct qede_dev *edev, 1393 struct qed_sb_info *sb_info, u16 sb_id) 1394 { 1395 struct status_block_e4 *sb_virt; 1396 dma_addr_t sb_phys; 1397 int rc; 1398 1399 sb_virt = dma_alloc_coherent(&edev->pdev->dev, 1400 sizeof(*sb_virt), &sb_phys, GFP_KERNEL); 1401 if (!sb_virt) { 1402 DP_ERR(edev, "Status block allocation failed\n"); 1403 return -ENOMEM; 1404 } 1405 1406 rc = edev->ops->common->sb_init(edev->cdev, sb_info, 1407 sb_virt, sb_phys, sb_id, 1408 QED_SB_TYPE_L2_QUEUE); 1409 if (rc) { 1410 DP_ERR(edev, "Status block initialization failed\n"); 1411 dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt), 1412 sb_virt, sb_phys); 1413 return rc; 1414 } 1415 1416 return 0; 1417 } 1418 1419 static void qede_free_rx_buffers(struct qede_dev *edev, 1420 struct qede_rx_queue *rxq) 1421 { 1422 u16 i; 1423 1424 for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) { 1425 struct sw_rx_data *rx_buf; 1426 struct page *data; 1427 1428 rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX]; 1429 data = rx_buf->data; 1430 1431 dma_unmap_page(&edev->pdev->dev, 1432 rx_buf->mapping, PAGE_SIZE, rxq->data_direction); 1433 1434 rx_buf->data = NULL; 1435 __free_page(data); 1436 } 1437 } 1438 1439 static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) 1440 { 1441 /* Free rx buffers */ 1442 qede_free_rx_buffers(edev, rxq); 1443 1444 /* Free the parallel SW ring */ 1445 kfree(rxq->sw_rx_ring); 1446 1447 /* Free the real RQ ring used by FW */ 1448 edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring); 1449 edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring); 1450 } 1451 1452 static void qede_set_tpa_param(struct qede_rx_queue *rxq) 1453 { 1454 int i; 1455 1456 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { 1457 struct qede_agg_info *tpa_info = &rxq->tpa_info[i]; 1458 1459 tpa_info->state = QEDE_AGG_STATE_NONE; 1460 } 1461 } 1462 1463 /* This function allocates all memory needed per Rx queue */ 1464 static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) 1465 { 1466 int i, rc, size; 1467 1468 rxq->num_rx_buffers = edev->q_num_rx_buffers; 1469 1470 rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu; 1471 1472 rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : NET_SKB_PAD; 1473 size = rxq->rx_headroom + 1474 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1475 1476 /* Make sure that the headroom and payload fit in a single page */ 1477 if (rxq->rx_buf_size + size > PAGE_SIZE) 1478 rxq->rx_buf_size = PAGE_SIZE - size; 1479 1480 /* Segment size to split a page in multiple equal parts, 1481 * unless XDP is used in which case we'd use the entire page. 1482 */ 1483 if (!edev->xdp_prog) { 1484 size = size + rxq->rx_buf_size; 1485 rxq->rx_buf_seg_size = roundup_pow_of_two(size); 1486 } else { 1487 rxq->rx_buf_seg_size = PAGE_SIZE; 1488 edev->ndev->features &= ~NETIF_F_GRO_HW; 1489 } 1490 1491 /* Allocate the parallel driver ring for Rx buffers */ 1492 size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE; 1493 rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL); 1494 if (!rxq->sw_rx_ring) { 1495 DP_ERR(edev, "Rx buffers ring allocation failed\n"); 1496 rc = -ENOMEM; 1497 goto err; 1498 } 1499 1500 /* Allocate FW Rx ring */ 1501 rc = edev->ops->common->chain_alloc(edev->cdev, 1502 QED_CHAIN_USE_TO_CONSUME_PRODUCE, 1503 QED_CHAIN_MODE_NEXT_PTR, 1504 QED_CHAIN_CNT_TYPE_U16, 1505 RX_RING_SIZE, 1506 sizeof(struct eth_rx_bd), 1507 &rxq->rx_bd_ring, NULL); 1508 if (rc) 1509 goto err; 1510 1511 /* Allocate FW completion ring */ 1512 rc = edev->ops->common->chain_alloc(edev->cdev, 1513 QED_CHAIN_USE_TO_CONSUME, 1514 QED_CHAIN_MODE_PBL, 1515 QED_CHAIN_CNT_TYPE_U16, 1516 RX_RING_SIZE, 1517 sizeof(union eth_rx_cqe), 1518 &rxq->rx_comp_ring, NULL); 1519 if (rc) 1520 goto err; 1521 1522 /* Allocate buffers for the Rx ring */ 1523 rxq->filled_buffers = 0; 1524 for (i = 0; i < rxq->num_rx_buffers; i++) { 1525 rc = qede_alloc_rx_buffer(rxq, false); 1526 if (rc) { 1527 DP_ERR(edev, 1528 "Rx buffers allocation failed at index %d\n", i); 1529 goto err; 1530 } 1531 } 1532 1533 edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW); 1534 if (!edev->gro_disable) 1535 qede_set_tpa_param(rxq); 1536 err: 1537 return rc; 1538 } 1539 1540 static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) 1541 { 1542 /* Free the parallel SW ring */ 1543 if (txq->is_xdp) 1544 kfree(txq->sw_tx_ring.xdp); 1545 else 1546 kfree(txq->sw_tx_ring.skbs); 1547 1548 /* Free the real RQ ring used by FW */ 1549 edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl); 1550 } 1551 1552 /* This function allocates all memory needed per Tx queue */ 1553 static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) 1554 { 1555 union eth_tx_bd_types *p_virt; 1556 int size, rc; 1557 1558 txq->num_tx_buffers = edev->q_num_tx_buffers; 1559 1560 /* Allocate the parallel driver ring for Tx buffers */ 1561 if (txq->is_xdp) { 1562 size = sizeof(*txq->sw_tx_ring.xdp) * txq->num_tx_buffers; 1563 txq->sw_tx_ring.xdp = kzalloc(size, GFP_KERNEL); 1564 if (!txq->sw_tx_ring.xdp) 1565 goto err; 1566 } else { 1567 size = sizeof(*txq->sw_tx_ring.skbs) * txq->num_tx_buffers; 1568 txq->sw_tx_ring.skbs = kzalloc(size, GFP_KERNEL); 1569 if (!txq->sw_tx_ring.skbs) 1570 goto err; 1571 } 1572 1573 rc = edev->ops->common->chain_alloc(edev->cdev, 1574 QED_CHAIN_USE_TO_CONSUME_PRODUCE, 1575 QED_CHAIN_MODE_PBL, 1576 QED_CHAIN_CNT_TYPE_U16, 1577 txq->num_tx_buffers, 1578 sizeof(*p_virt), 1579 &txq->tx_pbl, NULL); 1580 if (rc) 1581 goto err; 1582 1583 return 0; 1584 1585 err: 1586 qede_free_mem_txq(edev, txq); 1587 return -ENOMEM; 1588 } 1589 1590 /* This function frees all memory of a single fp */ 1591 static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp) 1592 { 1593 qede_free_mem_sb(edev, fp->sb_info, fp->id); 1594 1595 if (fp->type & QEDE_FASTPATH_RX) 1596 qede_free_mem_rxq(edev, fp->rxq); 1597 1598 if (fp->type & QEDE_FASTPATH_XDP) 1599 qede_free_mem_txq(edev, fp->xdp_tx); 1600 1601 if (fp->type & QEDE_FASTPATH_TX) { 1602 int cos; 1603 1604 for_each_cos_in_txq(edev, cos) 1605 qede_free_mem_txq(edev, &fp->txq[cos]); 1606 } 1607 } 1608 1609 /* This function allocates all memory needed for a single fp (i.e. an entity 1610 * which contains status block, one rx queue and/or multiple per-TC tx queues. 1611 */ 1612 static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp) 1613 { 1614 int rc = 0; 1615 1616 rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id); 1617 if (rc) 1618 goto out; 1619 1620 if (fp->type & QEDE_FASTPATH_RX) { 1621 rc = qede_alloc_mem_rxq(edev, fp->rxq); 1622 if (rc) 1623 goto out; 1624 } 1625 1626 if (fp->type & QEDE_FASTPATH_XDP) { 1627 rc = qede_alloc_mem_txq(edev, fp->xdp_tx); 1628 if (rc) 1629 goto out; 1630 } 1631 1632 if (fp->type & QEDE_FASTPATH_TX) { 1633 int cos; 1634 1635 for_each_cos_in_txq(edev, cos) { 1636 rc = qede_alloc_mem_txq(edev, &fp->txq[cos]); 1637 if (rc) 1638 goto out; 1639 } 1640 } 1641 1642 out: 1643 return rc; 1644 } 1645 1646 static void qede_free_mem_load(struct qede_dev *edev) 1647 { 1648 int i; 1649 1650 for_each_queue(i) { 1651 struct qede_fastpath *fp = &edev->fp_array[i]; 1652 1653 qede_free_mem_fp(edev, fp); 1654 } 1655 } 1656 1657 /* This function allocates all qede memory at NIC load. */ 1658 static int qede_alloc_mem_load(struct qede_dev *edev) 1659 { 1660 int rc = 0, queue_id; 1661 1662 for (queue_id = 0; queue_id < QEDE_QUEUE_CNT(edev); queue_id++) { 1663 struct qede_fastpath *fp = &edev->fp_array[queue_id]; 1664 1665 rc = qede_alloc_mem_fp(edev, fp); 1666 if (rc) { 1667 DP_ERR(edev, 1668 "Failed to allocate memory for fastpath - rss id = %d\n", 1669 queue_id); 1670 qede_free_mem_load(edev); 1671 return rc; 1672 } 1673 } 1674 1675 return 0; 1676 } 1677 1678 static void qede_empty_tx_queue(struct qede_dev *edev, 1679 struct qede_tx_queue *txq) 1680 { 1681 unsigned int pkts_compl = 0, bytes_compl = 0; 1682 struct netdev_queue *netdev_txq; 1683 int rc, len = 0; 1684 1685 netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id); 1686 1687 while (qed_chain_get_cons_idx(&txq->tx_pbl) != 1688 qed_chain_get_prod_idx(&txq->tx_pbl)) { 1689 DP_VERBOSE(edev, NETIF_MSG_IFDOWN, 1690 "Freeing a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n", 1691 txq->index, qed_chain_get_cons_idx(&txq->tx_pbl), 1692 qed_chain_get_prod_idx(&txq->tx_pbl)); 1693 1694 rc = qede_free_tx_pkt(edev, txq, &len); 1695 if (rc) { 1696 DP_NOTICE(edev, 1697 "Failed to free a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n", 1698 txq->index, 1699 qed_chain_get_cons_idx(&txq->tx_pbl), 1700 qed_chain_get_prod_idx(&txq->tx_pbl)); 1701 break; 1702 } 1703 1704 bytes_compl += len; 1705 pkts_compl++; 1706 txq->sw_tx_cons++; 1707 } 1708 1709 netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl); 1710 } 1711 1712 static void qede_empty_tx_queues(struct qede_dev *edev) 1713 { 1714 int i; 1715 1716 for_each_queue(i) 1717 if (edev->fp_array[i].type & QEDE_FASTPATH_TX) { 1718 int cos; 1719 1720 for_each_cos_in_txq(edev, cos) { 1721 struct qede_fastpath *fp; 1722 1723 fp = &edev->fp_array[i]; 1724 qede_empty_tx_queue(edev, 1725 &fp->txq[cos]); 1726 } 1727 } 1728 } 1729 1730 /* This function inits fp content and resets the SB, RXQ and TXQ structures */ 1731 static void qede_init_fp(struct qede_dev *edev) 1732 { 1733 int queue_id, rxq_index = 0, txq_index = 0; 1734 struct qede_fastpath *fp; 1735 1736 for_each_queue(queue_id) { 1737 fp = &edev->fp_array[queue_id]; 1738 1739 fp->edev = edev; 1740 fp->id = queue_id; 1741 1742 if (fp->type & QEDE_FASTPATH_XDP) { 1743 fp->xdp_tx->index = QEDE_TXQ_IDX_TO_XDP(edev, 1744 rxq_index); 1745 fp->xdp_tx->is_xdp = 1; 1746 } 1747 1748 if (fp->type & QEDE_FASTPATH_RX) { 1749 fp->rxq->rxq_id = rxq_index++; 1750 1751 /* Determine how to map buffers for this queue */ 1752 if (fp->type & QEDE_FASTPATH_XDP) 1753 fp->rxq->data_direction = DMA_BIDIRECTIONAL; 1754 else 1755 fp->rxq->data_direction = DMA_FROM_DEVICE; 1756 fp->rxq->dev = &edev->pdev->dev; 1757 1758 /* Driver have no error path from here */ 1759 WARN_ON(xdp_rxq_info_reg(&fp->rxq->xdp_rxq, edev->ndev, 1760 fp->rxq->rxq_id) < 0); 1761 } 1762 1763 if (fp->type & QEDE_FASTPATH_TX) { 1764 int cos; 1765 1766 for_each_cos_in_txq(edev, cos) { 1767 struct qede_tx_queue *txq = &fp->txq[cos]; 1768 u16 ndev_tx_id; 1769 1770 txq->cos = cos; 1771 txq->index = txq_index; 1772 ndev_tx_id = QEDE_TXQ_TO_NDEV_TXQ_ID(edev, txq); 1773 txq->ndev_txq_id = ndev_tx_id; 1774 1775 if (edev->dev_info.is_legacy) 1776 txq->is_legacy = true; 1777 txq->dev = &edev->pdev->dev; 1778 } 1779 1780 txq_index++; 1781 } 1782 1783 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", 1784 edev->ndev->name, queue_id); 1785 } 1786 } 1787 1788 static int qede_set_real_num_queues(struct qede_dev *edev) 1789 { 1790 int rc = 0; 1791 1792 rc = netif_set_real_num_tx_queues(edev->ndev, 1793 QEDE_TSS_COUNT(edev) * 1794 edev->dev_info.num_tc); 1795 if (rc) { 1796 DP_NOTICE(edev, "Failed to set real number of Tx queues\n"); 1797 return rc; 1798 } 1799 1800 rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_COUNT(edev)); 1801 if (rc) { 1802 DP_NOTICE(edev, "Failed to set real number of Rx queues\n"); 1803 return rc; 1804 } 1805 1806 return 0; 1807 } 1808 1809 static void qede_napi_disable_remove(struct qede_dev *edev) 1810 { 1811 int i; 1812 1813 for_each_queue(i) { 1814 napi_disable(&edev->fp_array[i].napi); 1815 1816 netif_napi_del(&edev->fp_array[i].napi); 1817 } 1818 } 1819 1820 static void qede_napi_add_enable(struct qede_dev *edev) 1821 { 1822 int i; 1823 1824 /* Add NAPI objects */ 1825 for_each_queue(i) { 1826 netif_napi_add(edev->ndev, &edev->fp_array[i].napi, 1827 qede_poll, NAPI_POLL_WEIGHT); 1828 napi_enable(&edev->fp_array[i].napi); 1829 } 1830 } 1831 1832 static void qede_sync_free_irqs(struct qede_dev *edev) 1833 { 1834 int i; 1835 1836 for (i = 0; i < edev->int_info.used_cnt; i++) { 1837 if (edev->int_info.msix_cnt) { 1838 synchronize_irq(edev->int_info.msix[i].vector); 1839 free_irq(edev->int_info.msix[i].vector, 1840 &edev->fp_array[i]); 1841 } else { 1842 edev->ops->common->simd_handler_clean(edev->cdev, i); 1843 } 1844 } 1845 1846 edev->int_info.used_cnt = 0; 1847 } 1848 1849 static int qede_req_msix_irqs(struct qede_dev *edev) 1850 { 1851 int i, rc; 1852 1853 /* Sanitize number of interrupts == number of prepared RSS queues */ 1854 if (QEDE_QUEUE_CNT(edev) > edev->int_info.msix_cnt) { 1855 DP_ERR(edev, 1856 "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n", 1857 QEDE_QUEUE_CNT(edev), edev->int_info.msix_cnt); 1858 return -EINVAL; 1859 } 1860 1861 for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) { 1862 #ifdef CONFIG_RFS_ACCEL 1863 struct qede_fastpath *fp = &edev->fp_array[i]; 1864 1865 if (edev->ndev->rx_cpu_rmap && (fp->type & QEDE_FASTPATH_RX)) { 1866 rc = irq_cpu_rmap_add(edev->ndev->rx_cpu_rmap, 1867 edev->int_info.msix[i].vector); 1868 if (rc) { 1869 DP_ERR(edev, "Failed to add CPU rmap\n"); 1870 qede_free_arfs(edev); 1871 } 1872 } 1873 #endif 1874 rc = request_irq(edev->int_info.msix[i].vector, 1875 qede_msix_fp_int, 0, edev->fp_array[i].name, 1876 &edev->fp_array[i]); 1877 if (rc) { 1878 DP_ERR(edev, "Request fp %d irq failed\n", i); 1879 qede_sync_free_irqs(edev); 1880 return rc; 1881 } 1882 DP_VERBOSE(edev, NETIF_MSG_INTR, 1883 "Requested fp irq for %s [entry %d]. Cookie is at %p\n", 1884 edev->fp_array[i].name, i, 1885 &edev->fp_array[i]); 1886 edev->int_info.used_cnt++; 1887 } 1888 1889 return 0; 1890 } 1891 1892 static void qede_simd_fp_handler(void *cookie) 1893 { 1894 struct qede_fastpath *fp = (struct qede_fastpath *)cookie; 1895 1896 napi_schedule_irqoff(&fp->napi); 1897 } 1898 1899 static int qede_setup_irqs(struct qede_dev *edev) 1900 { 1901 int i, rc = 0; 1902 1903 /* Learn Interrupt configuration */ 1904 rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info); 1905 if (rc) 1906 return rc; 1907 1908 if (edev->int_info.msix_cnt) { 1909 rc = qede_req_msix_irqs(edev); 1910 if (rc) 1911 return rc; 1912 edev->ndev->irq = edev->int_info.msix[0].vector; 1913 } else { 1914 const struct qed_common_ops *ops; 1915 1916 /* qed should learn receive the RSS ids and callbacks */ 1917 ops = edev->ops->common; 1918 for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) 1919 ops->simd_handler_config(edev->cdev, 1920 &edev->fp_array[i], i, 1921 qede_simd_fp_handler); 1922 edev->int_info.used_cnt = QEDE_QUEUE_CNT(edev); 1923 } 1924 return 0; 1925 } 1926 1927 static int qede_drain_txq(struct qede_dev *edev, 1928 struct qede_tx_queue *txq, bool allow_drain) 1929 { 1930 int rc, cnt = 1000; 1931 1932 while (txq->sw_tx_cons != txq->sw_tx_prod) { 1933 if (!cnt) { 1934 if (allow_drain) { 1935 DP_NOTICE(edev, 1936 "Tx queue[%d] is stuck, requesting MCP to drain\n", 1937 txq->index); 1938 rc = edev->ops->common->drain(edev->cdev); 1939 if (rc) 1940 return rc; 1941 return qede_drain_txq(edev, txq, false); 1942 } 1943 DP_NOTICE(edev, 1944 "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n", 1945 txq->index, txq->sw_tx_prod, 1946 txq->sw_tx_cons); 1947 return -ENODEV; 1948 } 1949 cnt--; 1950 usleep_range(1000, 2000); 1951 barrier(); 1952 } 1953 1954 /* FW finished processing, wait for HW to transmit all tx packets */ 1955 usleep_range(1000, 2000); 1956 1957 return 0; 1958 } 1959 1960 static int qede_stop_txq(struct qede_dev *edev, 1961 struct qede_tx_queue *txq, int rss_id) 1962 { 1963 /* delete doorbell from doorbell recovery mechanism */ 1964 edev->ops->common->db_recovery_del(edev->cdev, txq->doorbell_addr, 1965 &txq->tx_db); 1966 1967 return edev->ops->q_tx_stop(edev->cdev, rss_id, txq->handle); 1968 } 1969 1970 static int qede_stop_queues(struct qede_dev *edev) 1971 { 1972 struct qed_update_vport_params *vport_update_params; 1973 struct qed_dev *cdev = edev->cdev; 1974 struct qede_fastpath *fp; 1975 int rc, i; 1976 1977 /* Disable the vport */ 1978 vport_update_params = vzalloc(sizeof(*vport_update_params)); 1979 if (!vport_update_params) 1980 return -ENOMEM; 1981 1982 vport_update_params->vport_id = 0; 1983 vport_update_params->update_vport_active_flg = 1; 1984 vport_update_params->vport_active_flg = 0; 1985 vport_update_params->update_rss_flg = 0; 1986 1987 rc = edev->ops->vport_update(cdev, vport_update_params); 1988 vfree(vport_update_params); 1989 1990 if (rc) { 1991 DP_ERR(edev, "Failed to update vport\n"); 1992 return rc; 1993 } 1994 1995 /* Flush Tx queues. If needed, request drain from MCP */ 1996 for_each_queue(i) { 1997 fp = &edev->fp_array[i]; 1998 1999 if (fp->type & QEDE_FASTPATH_TX) { 2000 int cos; 2001 2002 for_each_cos_in_txq(edev, cos) { 2003 rc = qede_drain_txq(edev, &fp->txq[cos], true); 2004 if (rc) 2005 return rc; 2006 } 2007 } 2008 2009 if (fp->type & QEDE_FASTPATH_XDP) { 2010 rc = qede_drain_txq(edev, fp->xdp_tx, true); 2011 if (rc) 2012 return rc; 2013 } 2014 } 2015 2016 /* Stop all Queues in reverse order */ 2017 for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) { 2018 fp = &edev->fp_array[i]; 2019 2020 /* Stop the Tx Queue(s) */ 2021 if (fp->type & QEDE_FASTPATH_TX) { 2022 int cos; 2023 2024 for_each_cos_in_txq(edev, cos) { 2025 rc = qede_stop_txq(edev, &fp->txq[cos], i); 2026 if (rc) 2027 return rc; 2028 } 2029 } 2030 2031 /* Stop the Rx Queue */ 2032 if (fp->type & QEDE_FASTPATH_RX) { 2033 rc = edev->ops->q_rx_stop(cdev, i, fp->rxq->handle); 2034 if (rc) { 2035 DP_ERR(edev, "Failed to stop RXQ #%d\n", i); 2036 return rc; 2037 } 2038 } 2039 2040 /* Stop the XDP forwarding queue */ 2041 if (fp->type & QEDE_FASTPATH_XDP) { 2042 rc = qede_stop_txq(edev, fp->xdp_tx, i); 2043 if (rc) 2044 return rc; 2045 2046 bpf_prog_put(fp->rxq->xdp_prog); 2047 } 2048 } 2049 2050 /* Stop the vport */ 2051 rc = edev->ops->vport_stop(cdev, 0); 2052 if (rc) 2053 DP_ERR(edev, "Failed to stop VPORT\n"); 2054 2055 return rc; 2056 } 2057 2058 static int qede_start_txq(struct qede_dev *edev, 2059 struct qede_fastpath *fp, 2060 struct qede_tx_queue *txq, u8 rss_id, u16 sb_idx) 2061 { 2062 dma_addr_t phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl); 2063 u32 page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl); 2064 struct qed_queue_start_common_params params; 2065 struct qed_txq_start_ret_params ret_params; 2066 int rc; 2067 2068 memset(¶ms, 0, sizeof(params)); 2069 memset(&ret_params, 0, sizeof(ret_params)); 2070 2071 /* Let the XDP queue share the queue-zone with one of the regular txq. 2072 * We don't really care about its coalescing. 2073 */ 2074 if (txq->is_xdp) 2075 params.queue_id = QEDE_TXQ_XDP_TO_IDX(edev, txq); 2076 else 2077 params.queue_id = txq->index; 2078 2079 params.p_sb = fp->sb_info; 2080 params.sb_idx = sb_idx; 2081 params.tc = txq->cos; 2082 2083 rc = edev->ops->q_tx_start(edev->cdev, rss_id, ¶ms, phys_table, 2084 page_cnt, &ret_params); 2085 if (rc) { 2086 DP_ERR(edev, "Start TXQ #%d failed %d\n", txq->index, rc); 2087 return rc; 2088 } 2089 2090 txq->doorbell_addr = ret_params.p_doorbell; 2091 txq->handle = ret_params.p_handle; 2092 2093 /* Determine the FW consumer address associated */ 2094 txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[sb_idx]; 2095 2096 /* Prepare the doorbell parameters */ 2097 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST, DB_DEST_XCM); 2098 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, DB_AGG_CMD_SET); 2099 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_VAL_SEL, 2100 DQ_XCM_ETH_TX_BD_PROD_CMD); 2101 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD; 2102 2103 /* register doorbell with doorbell recovery mechanism */ 2104 rc = edev->ops->common->db_recovery_add(edev->cdev, txq->doorbell_addr, 2105 &txq->tx_db, DB_REC_WIDTH_32B, 2106 DB_REC_KERNEL); 2107 2108 return rc; 2109 } 2110 2111 static int qede_start_queues(struct qede_dev *edev, bool clear_stats) 2112 { 2113 int vlan_removal_en = 1; 2114 struct qed_dev *cdev = edev->cdev; 2115 struct qed_dev_info *qed_info = &edev->dev_info.common; 2116 struct qed_update_vport_params *vport_update_params; 2117 struct qed_queue_start_common_params q_params; 2118 struct qed_start_vport_params start = {0}; 2119 int rc, i; 2120 2121 if (!edev->num_queues) { 2122 DP_ERR(edev, 2123 "Cannot update V-VPORT as active as there are no Rx queues\n"); 2124 return -EINVAL; 2125 } 2126 2127 vport_update_params = vzalloc(sizeof(*vport_update_params)); 2128 if (!vport_update_params) 2129 return -ENOMEM; 2130 2131 start.handle_ptp_pkts = !!(edev->ptp); 2132 start.gro_enable = !edev->gro_disable; 2133 start.mtu = edev->ndev->mtu; 2134 start.vport_id = 0; 2135 start.drop_ttl0 = true; 2136 start.remove_inner_vlan = vlan_removal_en; 2137 start.clear_stats = clear_stats; 2138 2139 rc = edev->ops->vport_start(cdev, &start); 2140 2141 if (rc) { 2142 DP_ERR(edev, "Start V-PORT failed %d\n", rc); 2143 goto out; 2144 } 2145 2146 DP_VERBOSE(edev, NETIF_MSG_IFUP, 2147 "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n", 2148 start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en); 2149 2150 for_each_queue(i) { 2151 struct qede_fastpath *fp = &edev->fp_array[i]; 2152 dma_addr_t p_phys_table; 2153 u32 page_cnt; 2154 2155 if (fp->type & QEDE_FASTPATH_RX) { 2156 struct qed_rxq_start_ret_params ret_params; 2157 struct qede_rx_queue *rxq = fp->rxq; 2158 __le16 *val; 2159 2160 memset(&ret_params, 0, sizeof(ret_params)); 2161 memset(&q_params, 0, sizeof(q_params)); 2162 q_params.queue_id = rxq->rxq_id; 2163 q_params.vport_id = 0; 2164 q_params.p_sb = fp->sb_info; 2165 q_params.sb_idx = RX_PI; 2166 2167 p_phys_table = 2168 qed_chain_get_pbl_phys(&rxq->rx_comp_ring); 2169 page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring); 2170 2171 rc = edev->ops->q_rx_start(cdev, i, &q_params, 2172 rxq->rx_buf_size, 2173 rxq->rx_bd_ring.p_phys_addr, 2174 p_phys_table, 2175 page_cnt, &ret_params); 2176 if (rc) { 2177 DP_ERR(edev, "Start RXQ #%d failed %d\n", i, 2178 rc); 2179 goto out; 2180 } 2181 2182 /* Use the return parameters */ 2183 rxq->hw_rxq_prod_addr = ret_params.p_prod; 2184 rxq->handle = ret_params.p_handle; 2185 2186 val = &fp->sb_info->sb_virt->pi_array[RX_PI]; 2187 rxq->hw_cons_ptr = val; 2188 2189 qede_update_rx_prod(edev, rxq); 2190 } 2191 2192 if (fp->type & QEDE_FASTPATH_XDP) { 2193 rc = qede_start_txq(edev, fp, fp->xdp_tx, i, XDP_PI); 2194 if (rc) 2195 goto out; 2196 2197 bpf_prog_add(edev->xdp_prog, 1); 2198 fp->rxq->xdp_prog = edev->xdp_prog; 2199 } 2200 2201 if (fp->type & QEDE_FASTPATH_TX) { 2202 int cos; 2203 2204 for_each_cos_in_txq(edev, cos) { 2205 rc = qede_start_txq(edev, fp, &fp->txq[cos], i, 2206 TX_PI(cos)); 2207 if (rc) 2208 goto out; 2209 } 2210 } 2211 } 2212 2213 /* Prepare and send the vport enable */ 2214 vport_update_params->vport_id = start.vport_id; 2215 vport_update_params->update_vport_active_flg = 1; 2216 vport_update_params->vport_active_flg = 1; 2217 2218 if ((qed_info->b_inter_pf_switch || pci_num_vf(edev->pdev)) && 2219 qed_info->tx_switching) { 2220 vport_update_params->update_tx_switching_flg = 1; 2221 vport_update_params->tx_switching_flg = 1; 2222 } 2223 2224 qede_fill_rss_params(edev, &vport_update_params->rss_params, 2225 &vport_update_params->update_rss_flg); 2226 2227 rc = edev->ops->vport_update(cdev, vport_update_params); 2228 if (rc) 2229 DP_ERR(edev, "Update V-PORT failed %d\n", rc); 2230 2231 out: 2232 vfree(vport_update_params); 2233 return rc; 2234 } 2235 2236 enum qede_unload_mode { 2237 QEDE_UNLOAD_NORMAL, 2238 QEDE_UNLOAD_RECOVERY, 2239 }; 2240 2241 static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode, 2242 bool is_locked) 2243 { 2244 struct qed_link_params link_params; 2245 int rc; 2246 2247 DP_INFO(edev, "Starting qede unload\n"); 2248 2249 if (!is_locked) 2250 __qede_lock(edev); 2251 2252 clear_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags); 2253 2254 if (mode != QEDE_UNLOAD_RECOVERY) 2255 edev->state = QEDE_STATE_CLOSED; 2256 2257 qede_rdma_dev_event_close(edev); 2258 2259 /* Close OS Tx */ 2260 netif_tx_disable(edev->ndev); 2261 netif_carrier_off(edev->ndev); 2262 2263 if (mode != QEDE_UNLOAD_RECOVERY) { 2264 /* Reset the link */ 2265 memset(&link_params, 0, sizeof(link_params)); 2266 link_params.link_up = false; 2267 edev->ops->common->set_link(edev->cdev, &link_params); 2268 2269 rc = qede_stop_queues(edev); 2270 if (rc) { 2271 qede_sync_free_irqs(edev); 2272 goto out; 2273 } 2274 2275 DP_INFO(edev, "Stopped Queues\n"); 2276 } 2277 2278 qede_vlan_mark_nonconfigured(edev); 2279 edev->ops->fastpath_stop(edev->cdev); 2280 2281 if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) { 2282 qede_poll_for_freeing_arfs_filters(edev); 2283 qede_free_arfs(edev); 2284 } 2285 2286 /* Release the interrupts */ 2287 qede_sync_free_irqs(edev); 2288 edev->ops->common->set_fp_int(edev->cdev, 0); 2289 2290 qede_napi_disable_remove(edev); 2291 2292 if (mode == QEDE_UNLOAD_RECOVERY) 2293 qede_empty_tx_queues(edev); 2294 2295 qede_free_mem_load(edev); 2296 qede_free_fp_array(edev); 2297 2298 out: 2299 if (!is_locked) 2300 __qede_unlock(edev); 2301 2302 if (mode != QEDE_UNLOAD_RECOVERY) 2303 DP_NOTICE(edev, "Link is down\n"); 2304 2305 edev->ptp_skip_txts = 0; 2306 2307 DP_INFO(edev, "Ending qede unload\n"); 2308 } 2309 2310 enum qede_load_mode { 2311 QEDE_LOAD_NORMAL, 2312 QEDE_LOAD_RELOAD, 2313 QEDE_LOAD_RECOVERY, 2314 }; 2315 2316 static int qede_load(struct qede_dev *edev, enum qede_load_mode mode, 2317 bool is_locked) 2318 { 2319 struct qed_link_params link_params; 2320 u8 num_tc; 2321 int rc; 2322 2323 DP_INFO(edev, "Starting qede load\n"); 2324 2325 if (!is_locked) 2326 __qede_lock(edev); 2327 2328 rc = qede_set_num_queues(edev); 2329 if (rc) 2330 goto out; 2331 2332 rc = qede_alloc_fp_array(edev); 2333 if (rc) 2334 goto out; 2335 2336 qede_init_fp(edev); 2337 2338 rc = qede_alloc_mem_load(edev); 2339 if (rc) 2340 goto err1; 2341 DP_INFO(edev, "Allocated %d Rx, %d Tx queues\n", 2342 QEDE_RSS_COUNT(edev), QEDE_TSS_COUNT(edev)); 2343 2344 rc = qede_set_real_num_queues(edev); 2345 if (rc) 2346 goto err2; 2347 2348 if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) { 2349 rc = qede_alloc_arfs(edev); 2350 if (rc) 2351 DP_NOTICE(edev, "aRFS memory allocation failed\n"); 2352 } 2353 2354 qede_napi_add_enable(edev); 2355 DP_INFO(edev, "Napi added and enabled\n"); 2356 2357 rc = qede_setup_irqs(edev); 2358 if (rc) 2359 goto err3; 2360 DP_INFO(edev, "Setup IRQs succeeded\n"); 2361 2362 rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD); 2363 if (rc) 2364 goto err4; 2365 DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n"); 2366 2367 num_tc = netdev_get_num_tc(edev->ndev); 2368 num_tc = num_tc ? num_tc : edev->dev_info.num_tc; 2369 qede_setup_tc(edev->ndev, num_tc); 2370 2371 /* Program un-configured VLANs */ 2372 qede_configure_vlan_filters(edev); 2373 2374 set_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags); 2375 2376 /* Ask for link-up using current configuration */ 2377 memset(&link_params, 0, sizeof(link_params)); 2378 link_params.link_up = true; 2379 edev->ops->common->set_link(edev->cdev, &link_params); 2380 2381 edev->state = QEDE_STATE_OPEN; 2382 2383 DP_INFO(edev, "Ending successfully qede load\n"); 2384 2385 goto out; 2386 err4: 2387 qede_sync_free_irqs(edev); 2388 memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info)); 2389 err3: 2390 qede_napi_disable_remove(edev); 2391 err2: 2392 qede_free_mem_load(edev); 2393 err1: 2394 edev->ops->common->set_fp_int(edev->cdev, 0); 2395 qede_free_fp_array(edev); 2396 edev->num_queues = 0; 2397 edev->fp_num_tx = 0; 2398 edev->fp_num_rx = 0; 2399 out: 2400 if (!is_locked) 2401 __qede_unlock(edev); 2402 2403 return rc; 2404 } 2405 2406 /* 'func' should be able to run between unload and reload assuming interface 2407 * is actually running, or afterwards in case it's currently DOWN. 2408 */ 2409 void qede_reload(struct qede_dev *edev, 2410 struct qede_reload_args *args, bool is_locked) 2411 { 2412 if (!is_locked) 2413 __qede_lock(edev); 2414 2415 /* Since qede_lock is held, internal state wouldn't change even 2416 * if netdev state would start transitioning. Check whether current 2417 * internal configuration indicates device is up, then reload. 2418 */ 2419 if (edev->state == QEDE_STATE_OPEN) { 2420 qede_unload(edev, QEDE_UNLOAD_NORMAL, true); 2421 if (args) 2422 args->func(edev, args); 2423 qede_load(edev, QEDE_LOAD_RELOAD, true); 2424 2425 /* Since no one is going to do it for us, re-configure */ 2426 qede_config_rx_mode(edev->ndev); 2427 } else if (args) { 2428 args->func(edev, args); 2429 } 2430 2431 if (!is_locked) 2432 __qede_unlock(edev); 2433 } 2434 2435 /* called with rtnl_lock */ 2436 static int qede_open(struct net_device *ndev) 2437 { 2438 struct qede_dev *edev = netdev_priv(ndev); 2439 int rc; 2440 2441 netif_carrier_off(ndev); 2442 2443 edev->ops->common->set_power_state(edev->cdev, PCI_D0); 2444 2445 rc = qede_load(edev, QEDE_LOAD_NORMAL, false); 2446 if (rc) 2447 return rc; 2448 2449 udp_tunnel_get_rx_info(ndev); 2450 2451 edev->ops->common->update_drv_state(edev->cdev, true); 2452 2453 return 0; 2454 } 2455 2456 static int qede_close(struct net_device *ndev) 2457 { 2458 struct qede_dev *edev = netdev_priv(ndev); 2459 2460 qede_unload(edev, QEDE_UNLOAD_NORMAL, false); 2461 2462 edev->ops->common->update_drv_state(edev->cdev, false); 2463 2464 return 0; 2465 } 2466 2467 static void qede_link_update(void *dev, struct qed_link_output *link) 2468 { 2469 struct qede_dev *edev = dev; 2470 2471 if (!test_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags)) { 2472 DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not ready\n"); 2473 return; 2474 } 2475 2476 if (link->link_up) { 2477 if (!netif_carrier_ok(edev->ndev)) { 2478 DP_NOTICE(edev, "Link is up\n"); 2479 netif_tx_start_all_queues(edev->ndev); 2480 netif_carrier_on(edev->ndev); 2481 qede_rdma_dev_event_open(edev); 2482 } 2483 } else { 2484 if (netif_carrier_ok(edev->ndev)) { 2485 DP_NOTICE(edev, "Link is down\n"); 2486 netif_tx_disable(edev->ndev); 2487 netif_carrier_off(edev->ndev); 2488 qede_rdma_dev_event_close(edev); 2489 } 2490 } 2491 } 2492 2493 static void qede_schedule_recovery_handler(void *dev) 2494 { 2495 struct qede_dev *edev = dev; 2496 2497 if (edev->state == QEDE_STATE_RECOVERY) { 2498 DP_NOTICE(edev, 2499 "Avoid scheduling a recovery handling since already in recovery state\n"); 2500 return; 2501 } 2502 2503 set_bit(QEDE_SP_RECOVERY, &edev->sp_flags); 2504 schedule_delayed_work(&edev->sp_task, 0); 2505 2506 DP_INFO(edev, "Scheduled a recovery handler\n"); 2507 } 2508 2509 static void qede_recovery_failed(struct qede_dev *edev) 2510 { 2511 netdev_err(edev->ndev, "Recovery handling has failed. Power cycle is needed.\n"); 2512 2513 netif_device_detach(edev->ndev); 2514 2515 if (edev->cdev) 2516 edev->ops->common->set_power_state(edev->cdev, PCI_D3hot); 2517 } 2518 2519 static void qede_recovery_handler(struct qede_dev *edev) 2520 { 2521 u32 curr_state = edev->state; 2522 int rc; 2523 2524 DP_NOTICE(edev, "Starting a recovery process\n"); 2525 2526 /* No need to acquire first the qede_lock since is done by qede_sp_task 2527 * before calling this function. 2528 */ 2529 edev->state = QEDE_STATE_RECOVERY; 2530 2531 edev->ops->common->recovery_prolog(edev->cdev); 2532 2533 if (curr_state == QEDE_STATE_OPEN) 2534 qede_unload(edev, QEDE_UNLOAD_RECOVERY, true); 2535 2536 __qede_remove(edev->pdev, QEDE_REMOVE_RECOVERY); 2537 2538 rc = __qede_probe(edev->pdev, edev->dp_module, edev->dp_level, 2539 IS_VF(edev), QEDE_PROBE_RECOVERY); 2540 if (rc) { 2541 edev->cdev = NULL; 2542 goto err; 2543 } 2544 2545 if (curr_state == QEDE_STATE_OPEN) { 2546 rc = qede_load(edev, QEDE_LOAD_RECOVERY, true); 2547 if (rc) 2548 goto err; 2549 2550 qede_config_rx_mode(edev->ndev); 2551 udp_tunnel_get_rx_info(edev->ndev); 2552 } 2553 2554 edev->state = curr_state; 2555 2556 DP_NOTICE(edev, "Recovery handling is done\n"); 2557 2558 return; 2559 2560 err: 2561 qede_recovery_failed(edev); 2562 } 2563 2564 static void qede_atomic_hw_err_handler(struct qede_dev *edev) 2565 { 2566 struct qed_dev *cdev = edev->cdev; 2567 2568 DP_NOTICE(edev, 2569 "Generic non-sleepable HW error handling started - err_flags 0x%lx\n", 2570 edev->err_flags); 2571 2572 /* Get a call trace of the flow that led to the error */ 2573 WARN_ON(test_bit(QEDE_ERR_WARN, &edev->err_flags)); 2574 2575 /* Prevent HW attentions from being reasserted */ 2576 if (test_bit(QEDE_ERR_ATTN_CLR_EN, &edev->err_flags)) 2577 edev->ops->common->attn_clr_enable(cdev, true); 2578 2579 DP_NOTICE(edev, "Generic non-sleepable HW error handling is done\n"); 2580 } 2581 2582 static void qede_generic_hw_err_handler(struct qede_dev *edev) 2583 { 2584 struct qed_dev *cdev = edev->cdev; 2585 2586 DP_NOTICE(edev, 2587 "Generic sleepable HW error handling started - err_flags 0x%lx\n", 2588 edev->err_flags); 2589 2590 /* Trigger a recovery process. 2591 * This is placed in the sleep requiring section just to make 2592 * sure it is the last one, and that all the other operations 2593 * were completed. 2594 */ 2595 if (test_bit(QEDE_ERR_IS_RECOVERABLE, &edev->err_flags)) 2596 edev->ops->common->recovery_process(cdev); 2597 2598 clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags); 2599 2600 DP_NOTICE(edev, "Generic sleepable HW error handling is done\n"); 2601 } 2602 2603 static void qede_set_hw_err_flags(struct qede_dev *edev, 2604 enum qed_hw_err_type err_type) 2605 { 2606 unsigned long err_flags = 0; 2607 2608 switch (err_type) { 2609 case QED_HW_ERR_DMAE_FAIL: 2610 set_bit(QEDE_ERR_WARN, &err_flags); 2611 fallthrough; 2612 case QED_HW_ERR_MFW_RESP_FAIL: 2613 case QED_HW_ERR_HW_ATTN: 2614 case QED_HW_ERR_RAMROD_FAIL: 2615 case QED_HW_ERR_FW_ASSERT: 2616 set_bit(QEDE_ERR_ATTN_CLR_EN, &err_flags); 2617 set_bit(QEDE_ERR_GET_DBG_INFO, &err_flags); 2618 break; 2619 2620 default: 2621 DP_NOTICE(edev, "Unexpected HW error [%d]\n", err_type); 2622 break; 2623 } 2624 2625 edev->err_flags |= err_flags; 2626 } 2627 2628 static void qede_schedule_hw_err_handler(void *dev, 2629 enum qed_hw_err_type err_type) 2630 { 2631 struct qede_dev *edev = dev; 2632 2633 /* Fan failure cannot be masked by handling of another HW error or by a 2634 * concurrent recovery process. 2635 */ 2636 if ((test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) || 2637 edev->state == QEDE_STATE_RECOVERY) && 2638 err_type != QED_HW_ERR_FAN_FAIL) { 2639 DP_INFO(edev, 2640 "Avoid scheduling an error handling while another HW error is being handled\n"); 2641 return; 2642 } 2643 2644 if (err_type >= QED_HW_ERR_LAST) { 2645 DP_NOTICE(edev, "Unknown HW error [%d]\n", err_type); 2646 clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags); 2647 return; 2648 } 2649 2650 qede_set_hw_err_flags(edev, err_type); 2651 qede_atomic_hw_err_handler(edev); 2652 set_bit(QEDE_SP_HW_ERR, &edev->sp_flags); 2653 schedule_delayed_work(&edev->sp_task, 0); 2654 2655 DP_INFO(edev, "Scheduled a error handler [err_type %d]\n", err_type); 2656 } 2657 2658 static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq) 2659 { 2660 struct netdev_queue *netdev_txq; 2661 2662 netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id); 2663 if (netif_xmit_stopped(netdev_txq)) 2664 return true; 2665 2666 return false; 2667 } 2668 2669 static void qede_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data) 2670 { 2671 struct qede_dev *edev = dev; 2672 struct netdev_hw_addr *ha; 2673 int i; 2674 2675 if (edev->ndev->features & NETIF_F_IP_CSUM) 2676 data->feat_flags |= QED_TLV_IP_CSUM; 2677 if (edev->ndev->features & NETIF_F_TSO) 2678 data->feat_flags |= QED_TLV_LSO; 2679 2680 ether_addr_copy(data->mac[0], edev->ndev->dev_addr); 2681 memset(data->mac[1], 0, ETH_ALEN); 2682 memset(data->mac[2], 0, ETH_ALEN); 2683 /* Copy the first two UC macs */ 2684 netif_addr_lock_bh(edev->ndev); 2685 i = 1; 2686 netdev_for_each_uc_addr(ha, edev->ndev) { 2687 ether_addr_copy(data->mac[i++], ha->addr); 2688 if (i == QED_TLV_MAC_COUNT) 2689 break; 2690 } 2691 2692 netif_addr_unlock_bh(edev->ndev); 2693 } 2694 2695 static void qede_get_eth_tlv_data(void *dev, void *data) 2696 { 2697 struct qed_mfw_tlv_eth *etlv = data; 2698 struct qede_dev *edev = dev; 2699 struct qede_fastpath *fp; 2700 int i; 2701 2702 etlv->lso_maxoff_size = 0XFFFF; 2703 etlv->lso_maxoff_size_set = true; 2704 etlv->lso_minseg_size = (u16)ETH_TX_LSO_WINDOW_MIN_LEN; 2705 etlv->lso_minseg_size_set = true; 2706 etlv->prom_mode = !!(edev->ndev->flags & IFF_PROMISC); 2707 etlv->prom_mode_set = true; 2708 etlv->tx_descr_size = QEDE_TSS_COUNT(edev); 2709 etlv->tx_descr_size_set = true; 2710 etlv->rx_descr_size = QEDE_RSS_COUNT(edev); 2711 etlv->rx_descr_size_set = true; 2712 etlv->iov_offload = QED_MFW_TLV_IOV_OFFLOAD_VEB; 2713 etlv->iov_offload_set = true; 2714 2715 /* Fill information regarding queues; Should be done under the qede 2716 * lock to guarantee those don't change beneath our feet. 2717 */ 2718 etlv->txqs_empty = true; 2719 etlv->rxqs_empty = true; 2720 etlv->num_txqs_full = 0; 2721 etlv->num_rxqs_full = 0; 2722 2723 __qede_lock(edev); 2724 for_each_queue(i) { 2725 fp = &edev->fp_array[i]; 2726 if (fp->type & QEDE_FASTPATH_TX) { 2727 struct qede_tx_queue *txq = QEDE_FP_TC0_TXQ(fp); 2728 2729 if (txq->sw_tx_cons != txq->sw_tx_prod) 2730 etlv->txqs_empty = false; 2731 if (qede_is_txq_full(edev, txq)) 2732 etlv->num_txqs_full++; 2733 } 2734 if (fp->type & QEDE_FASTPATH_RX) { 2735 if (qede_has_rx_work(fp->rxq)) 2736 etlv->rxqs_empty = false; 2737 2738 /* This one is a bit tricky; Firmware might stop 2739 * placing packets if ring is not yet full. 2740 * Give an approximation. 2741 */ 2742 if (le16_to_cpu(*fp->rxq->hw_cons_ptr) - 2743 qed_chain_get_cons_idx(&fp->rxq->rx_comp_ring) > 2744 RX_RING_SIZE - 100) 2745 etlv->num_rxqs_full++; 2746 } 2747 } 2748 __qede_unlock(edev); 2749 2750 etlv->txqs_empty_set = true; 2751 etlv->rxqs_empty_set = true; 2752 etlv->num_txqs_full_set = true; 2753 etlv->num_rxqs_full_set = true; 2754 } 2755 2756 /** 2757 * qede_io_error_detected - called when PCI error is detected 2758 * @pdev: Pointer to PCI device 2759 * @state: The current pci connection state 2760 * 2761 * This function is called after a PCI bus error affecting 2762 * this device has been detected. 2763 */ 2764 static pci_ers_result_t 2765 qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 2766 { 2767 struct net_device *dev = pci_get_drvdata(pdev); 2768 struct qede_dev *edev = netdev_priv(dev); 2769 2770 if (!edev) 2771 return PCI_ERS_RESULT_NONE; 2772 2773 DP_NOTICE(edev, "IO error detected [%d]\n", state); 2774 2775 __qede_lock(edev); 2776 if (edev->state == QEDE_STATE_RECOVERY) { 2777 DP_NOTICE(edev, "Device already in the recovery state\n"); 2778 __qede_unlock(edev); 2779 return PCI_ERS_RESULT_NONE; 2780 } 2781 2782 /* PF handles the recovery of its VFs */ 2783 if (IS_VF(edev)) { 2784 DP_VERBOSE(edev, QED_MSG_IOV, 2785 "VF recovery is handled by its PF\n"); 2786 __qede_unlock(edev); 2787 return PCI_ERS_RESULT_RECOVERED; 2788 } 2789 2790 /* Close OS Tx */ 2791 netif_tx_disable(edev->ndev); 2792 netif_carrier_off(edev->ndev); 2793 2794 set_bit(QEDE_SP_AER, &edev->sp_flags); 2795 schedule_delayed_work(&edev->sp_task, 0); 2796 2797 __qede_unlock(edev); 2798 2799 return PCI_ERS_RESULT_CAN_RECOVER; 2800 } 2801