1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 2 /* QLogic qede NIC Driver 3 * Copyright (c) 2015-2017 QLogic Corporation 4 * Copyright (c) 2019-2020 Marvell International Ltd. 5 */ 6 7 #include <linux/crash_dump.h> 8 #include <linux/module.h> 9 #include <linux/pci.h> 10 #include <linux/device.h> 11 #include <linux/netdevice.h> 12 #include <linux/etherdevice.h> 13 #include <linux/skbuff.h> 14 #include <linux/errno.h> 15 #include <linux/list.h> 16 #include <linux/string.h> 17 #include <linux/dma-mapping.h> 18 #include <linux/interrupt.h> 19 #include <asm/byteorder.h> 20 #include <asm/param.h> 21 #include <linux/io.h> 22 #include <linux/netdev_features.h> 23 #include <linux/udp.h> 24 #include <linux/tcp.h> 25 #include <net/udp_tunnel.h> 26 #include <linux/ip.h> 27 #include <net/ipv6.h> 28 #include <net/tcp.h> 29 #include <linux/if_ether.h> 30 #include <linux/if_vlan.h> 31 #include <linux/pkt_sched.h> 32 #include <linux/ethtool.h> 33 #include <linux/in.h> 34 #include <linux/random.h> 35 #include <net/ip6_checksum.h> 36 #include <linux/bitops.h> 37 #include <linux/vmalloc.h> 38 #include "qede.h" 39 #include "qede_ptp.h" 40 41 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver"); 42 MODULE_LICENSE("GPL"); 43 44 static uint debug; 45 module_param(debug, uint, 0); 46 MODULE_PARM_DESC(debug, " Default debug msglevel"); 47 48 static const struct qed_eth_ops *qed_ops; 49 50 #define CHIP_NUM_57980S_40 0x1634 51 #define CHIP_NUM_57980S_10 0x1666 52 #define CHIP_NUM_57980S_MF 0x1636 53 #define CHIP_NUM_57980S_100 0x1644 54 #define CHIP_NUM_57980S_50 0x1654 55 #define CHIP_NUM_57980S_25 0x1656 56 #define CHIP_NUM_57980S_IOV 0x1664 57 #define CHIP_NUM_AH 0x8070 58 #define CHIP_NUM_AH_IOV 0x8090 59 60 #ifndef PCI_DEVICE_ID_NX2_57980E 61 #define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40 62 #define PCI_DEVICE_ID_57980S_10 CHIP_NUM_57980S_10 63 #define PCI_DEVICE_ID_57980S_MF CHIP_NUM_57980S_MF 64 #define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100 65 #define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50 66 #define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25 67 #define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV 68 #define PCI_DEVICE_ID_AH CHIP_NUM_AH 69 #define PCI_DEVICE_ID_AH_IOV CHIP_NUM_AH_IOV 70 71 #endif 72 73 enum qede_pci_private { 74 QEDE_PRIVATE_PF, 75 QEDE_PRIVATE_VF 76 }; 77 78 static const struct pci_device_id qede_pci_tbl[] = { 79 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF}, 80 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF}, 81 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF}, 82 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF}, 83 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF}, 84 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF}, 85 #ifdef CONFIG_QED_SRIOV 86 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF}, 87 #endif 88 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH), QEDE_PRIVATE_PF}, 89 #ifdef CONFIG_QED_SRIOV 90 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH_IOV), QEDE_PRIVATE_VF}, 91 #endif 92 { 0 } 93 }; 94 95 MODULE_DEVICE_TABLE(pci, qede_pci_tbl); 96 97 static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id); 98 static pci_ers_result_t 99 qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state); 100 101 #define TX_TIMEOUT (5 * HZ) 102 103 /* Utilize last protocol index for XDP */ 104 #define XDP_PI 11 105 106 static void qede_remove(struct pci_dev *pdev); 107 static void qede_shutdown(struct pci_dev *pdev); 108 static void qede_link_update(void *dev, struct qed_link_output *link); 109 static void qede_schedule_recovery_handler(void *dev); 110 static void qede_recovery_handler(struct qede_dev *edev); 111 static void qede_schedule_hw_err_handler(void *dev, 112 enum qed_hw_err_type err_type); 113 static void qede_get_eth_tlv_data(void *edev, void *data); 114 static void qede_get_generic_tlv_data(void *edev, 115 struct qed_generic_tlvs *data); 116 static void qede_generic_hw_err_handler(struct qede_dev *edev); 117 #ifdef CONFIG_QED_SRIOV 118 static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos, 119 __be16 vlan_proto) 120 { 121 struct qede_dev *edev = netdev_priv(ndev); 122 123 if (vlan > 4095) { 124 DP_NOTICE(edev, "Illegal vlan value %d\n", vlan); 125 return -EINVAL; 126 } 127 128 if (vlan_proto != htons(ETH_P_8021Q)) 129 return -EPROTONOSUPPORT; 130 131 DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n", 132 vlan, vf); 133 134 return edev->ops->iov->set_vlan(edev->cdev, vlan, vf); 135 } 136 137 static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac) 138 { 139 struct qede_dev *edev = netdev_priv(ndev); 140 141 DP_VERBOSE(edev, QED_MSG_IOV, "Setting MAC %pM to VF [%d]\n", mac, vfidx); 142 143 if (!is_valid_ether_addr(mac)) { 144 DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n"); 145 return -EINVAL; 146 } 147 148 return edev->ops->iov->set_mac(edev->cdev, mac, vfidx); 149 } 150 151 static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param) 152 { 153 struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev)); 154 struct qed_dev_info *qed_info = &edev->dev_info.common; 155 struct qed_update_vport_params *vport_params; 156 int rc; 157 158 vport_params = vzalloc(sizeof(*vport_params)); 159 if (!vport_params) 160 return -ENOMEM; 161 DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param); 162 163 rc = edev->ops->iov->configure(edev->cdev, num_vfs_param); 164 165 /* Enable/Disable Tx switching for PF */ 166 if ((rc == num_vfs_param) && netif_running(edev->ndev) && 167 !qed_info->b_inter_pf_switch && qed_info->tx_switching) { 168 vport_params->vport_id = 0; 169 vport_params->update_tx_switching_flg = 1; 170 vport_params->tx_switching_flg = num_vfs_param ? 1 : 0; 171 edev->ops->vport_update(edev->cdev, vport_params); 172 } 173 174 vfree(vport_params); 175 return rc; 176 } 177 #endif 178 179 static const struct pci_error_handlers qede_err_handler = { 180 .error_detected = qede_io_error_detected, 181 }; 182 183 static struct pci_driver qede_pci_driver = { 184 .name = "qede", 185 .id_table = qede_pci_tbl, 186 .probe = qede_probe, 187 .remove = qede_remove, 188 .shutdown = qede_shutdown, 189 #ifdef CONFIG_QED_SRIOV 190 .sriov_configure = qede_sriov_configure, 191 #endif 192 .err_handler = &qede_err_handler, 193 }; 194 195 static struct qed_eth_cb_ops qede_ll_ops = { 196 { 197 #ifdef CONFIG_RFS_ACCEL 198 .arfs_filter_op = qede_arfs_filter_op, 199 #endif 200 .link_update = qede_link_update, 201 .schedule_recovery_handler = qede_schedule_recovery_handler, 202 .schedule_hw_err_handler = qede_schedule_hw_err_handler, 203 .get_generic_tlv_data = qede_get_generic_tlv_data, 204 .get_protocol_tlv_data = qede_get_eth_tlv_data, 205 }, 206 .force_mac = qede_force_mac, 207 .ports_update = qede_udp_ports_update, 208 }; 209 210 static int qede_netdev_event(struct notifier_block *this, unsigned long event, 211 void *ptr) 212 { 213 struct net_device *ndev = netdev_notifier_info_to_dev(ptr); 214 struct ethtool_drvinfo drvinfo; 215 struct qede_dev *edev; 216 217 if (event != NETDEV_CHANGENAME && event != NETDEV_CHANGEADDR) 218 goto done; 219 220 /* Check whether this is a qede device */ 221 if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo) 222 goto done; 223 224 memset(&drvinfo, 0, sizeof(drvinfo)); 225 ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo); 226 if (strcmp(drvinfo.driver, "qede")) 227 goto done; 228 edev = netdev_priv(ndev); 229 230 switch (event) { 231 case NETDEV_CHANGENAME: 232 /* Notify qed of the name change */ 233 if (!edev->ops || !edev->ops->common) 234 goto done; 235 edev->ops->common->set_name(edev->cdev, edev->ndev->name); 236 break; 237 case NETDEV_CHANGEADDR: 238 edev = netdev_priv(ndev); 239 qede_rdma_event_changeaddr(edev); 240 break; 241 } 242 243 done: 244 return NOTIFY_DONE; 245 } 246 247 static struct notifier_block qede_netdev_notifier = { 248 .notifier_call = qede_netdev_event, 249 }; 250 251 static 252 int __init qede_init(void) 253 { 254 int ret; 255 256 pr_info("qede init: QLogic FastLinQ 4xxxx Ethernet Driver qede\n"); 257 258 qede_forced_speed_maps_init(); 259 260 qed_ops = qed_get_eth_ops(); 261 if (!qed_ops) { 262 pr_notice("Failed to get qed ethtool operations\n"); 263 return -EINVAL; 264 } 265 266 /* Must register notifier before pci ops, since we might miss 267 * interface rename after pci probe and netdev registration. 268 */ 269 ret = register_netdevice_notifier(&qede_netdev_notifier); 270 if (ret) { 271 pr_notice("Failed to register netdevice_notifier\n"); 272 qed_put_eth_ops(); 273 return -EINVAL; 274 } 275 276 ret = pci_register_driver(&qede_pci_driver); 277 if (ret) { 278 pr_notice("Failed to register driver\n"); 279 unregister_netdevice_notifier(&qede_netdev_notifier); 280 qed_put_eth_ops(); 281 return -EINVAL; 282 } 283 284 return 0; 285 } 286 287 static void __exit qede_cleanup(void) 288 { 289 if (debug & QED_LOG_INFO_MASK) 290 pr_info("qede_cleanup called\n"); 291 292 unregister_netdevice_notifier(&qede_netdev_notifier); 293 pci_unregister_driver(&qede_pci_driver); 294 qed_put_eth_ops(); 295 } 296 297 module_init(qede_init); 298 module_exit(qede_cleanup); 299 300 static int qede_open(struct net_device *ndev); 301 static int qede_close(struct net_device *ndev); 302 303 void qede_fill_by_demand_stats(struct qede_dev *edev) 304 { 305 struct qede_stats_common *p_common = &edev->stats.common; 306 struct qed_eth_stats stats; 307 308 edev->ops->get_vport_stats(edev->cdev, &stats); 309 310 p_common->no_buff_discards = stats.common.no_buff_discards; 311 p_common->packet_too_big_discard = stats.common.packet_too_big_discard; 312 p_common->ttl0_discard = stats.common.ttl0_discard; 313 p_common->rx_ucast_bytes = stats.common.rx_ucast_bytes; 314 p_common->rx_mcast_bytes = stats.common.rx_mcast_bytes; 315 p_common->rx_bcast_bytes = stats.common.rx_bcast_bytes; 316 p_common->rx_ucast_pkts = stats.common.rx_ucast_pkts; 317 p_common->rx_mcast_pkts = stats.common.rx_mcast_pkts; 318 p_common->rx_bcast_pkts = stats.common.rx_bcast_pkts; 319 p_common->mftag_filter_discards = stats.common.mftag_filter_discards; 320 p_common->mac_filter_discards = stats.common.mac_filter_discards; 321 p_common->gft_filter_drop = stats.common.gft_filter_drop; 322 323 p_common->tx_ucast_bytes = stats.common.tx_ucast_bytes; 324 p_common->tx_mcast_bytes = stats.common.tx_mcast_bytes; 325 p_common->tx_bcast_bytes = stats.common.tx_bcast_bytes; 326 p_common->tx_ucast_pkts = stats.common.tx_ucast_pkts; 327 p_common->tx_mcast_pkts = stats.common.tx_mcast_pkts; 328 p_common->tx_bcast_pkts = stats.common.tx_bcast_pkts; 329 p_common->tx_err_drop_pkts = stats.common.tx_err_drop_pkts; 330 p_common->coalesced_pkts = stats.common.tpa_coalesced_pkts; 331 p_common->coalesced_events = stats.common.tpa_coalesced_events; 332 p_common->coalesced_aborts_num = stats.common.tpa_aborts_num; 333 p_common->non_coalesced_pkts = stats.common.tpa_not_coalesced_pkts; 334 p_common->coalesced_bytes = stats.common.tpa_coalesced_bytes; 335 336 p_common->rx_64_byte_packets = stats.common.rx_64_byte_packets; 337 p_common->rx_65_to_127_byte_packets = 338 stats.common.rx_65_to_127_byte_packets; 339 p_common->rx_128_to_255_byte_packets = 340 stats.common.rx_128_to_255_byte_packets; 341 p_common->rx_256_to_511_byte_packets = 342 stats.common.rx_256_to_511_byte_packets; 343 p_common->rx_512_to_1023_byte_packets = 344 stats.common.rx_512_to_1023_byte_packets; 345 p_common->rx_1024_to_1518_byte_packets = 346 stats.common.rx_1024_to_1518_byte_packets; 347 p_common->rx_crc_errors = stats.common.rx_crc_errors; 348 p_common->rx_mac_crtl_frames = stats.common.rx_mac_crtl_frames; 349 p_common->rx_pause_frames = stats.common.rx_pause_frames; 350 p_common->rx_pfc_frames = stats.common.rx_pfc_frames; 351 p_common->rx_align_errors = stats.common.rx_align_errors; 352 p_common->rx_carrier_errors = stats.common.rx_carrier_errors; 353 p_common->rx_oversize_packets = stats.common.rx_oversize_packets; 354 p_common->rx_jabbers = stats.common.rx_jabbers; 355 p_common->rx_undersize_packets = stats.common.rx_undersize_packets; 356 p_common->rx_fragments = stats.common.rx_fragments; 357 p_common->tx_64_byte_packets = stats.common.tx_64_byte_packets; 358 p_common->tx_65_to_127_byte_packets = 359 stats.common.tx_65_to_127_byte_packets; 360 p_common->tx_128_to_255_byte_packets = 361 stats.common.tx_128_to_255_byte_packets; 362 p_common->tx_256_to_511_byte_packets = 363 stats.common.tx_256_to_511_byte_packets; 364 p_common->tx_512_to_1023_byte_packets = 365 stats.common.tx_512_to_1023_byte_packets; 366 p_common->tx_1024_to_1518_byte_packets = 367 stats.common.tx_1024_to_1518_byte_packets; 368 p_common->tx_pause_frames = stats.common.tx_pause_frames; 369 p_common->tx_pfc_frames = stats.common.tx_pfc_frames; 370 p_common->brb_truncates = stats.common.brb_truncates; 371 p_common->brb_discards = stats.common.brb_discards; 372 p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames; 373 p_common->link_change_count = stats.common.link_change_count; 374 p_common->ptp_skip_txts = edev->ptp_skip_txts; 375 376 if (QEDE_IS_BB(edev)) { 377 struct qede_stats_bb *p_bb = &edev->stats.bb; 378 379 p_bb->rx_1519_to_1522_byte_packets = 380 stats.bb.rx_1519_to_1522_byte_packets; 381 p_bb->rx_1519_to_2047_byte_packets = 382 stats.bb.rx_1519_to_2047_byte_packets; 383 p_bb->rx_2048_to_4095_byte_packets = 384 stats.bb.rx_2048_to_4095_byte_packets; 385 p_bb->rx_4096_to_9216_byte_packets = 386 stats.bb.rx_4096_to_9216_byte_packets; 387 p_bb->rx_9217_to_16383_byte_packets = 388 stats.bb.rx_9217_to_16383_byte_packets; 389 p_bb->tx_1519_to_2047_byte_packets = 390 stats.bb.tx_1519_to_2047_byte_packets; 391 p_bb->tx_2048_to_4095_byte_packets = 392 stats.bb.tx_2048_to_4095_byte_packets; 393 p_bb->tx_4096_to_9216_byte_packets = 394 stats.bb.tx_4096_to_9216_byte_packets; 395 p_bb->tx_9217_to_16383_byte_packets = 396 stats.bb.tx_9217_to_16383_byte_packets; 397 p_bb->tx_lpi_entry_count = stats.bb.tx_lpi_entry_count; 398 p_bb->tx_total_collisions = stats.bb.tx_total_collisions; 399 } else { 400 struct qede_stats_ah *p_ah = &edev->stats.ah; 401 402 p_ah->rx_1519_to_max_byte_packets = 403 stats.ah.rx_1519_to_max_byte_packets; 404 p_ah->tx_1519_to_max_byte_packets = 405 stats.ah.tx_1519_to_max_byte_packets; 406 } 407 } 408 409 static void qede_get_stats64(struct net_device *dev, 410 struct rtnl_link_stats64 *stats) 411 { 412 struct qede_dev *edev = netdev_priv(dev); 413 struct qede_stats_common *p_common; 414 415 qede_fill_by_demand_stats(edev); 416 p_common = &edev->stats.common; 417 418 stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts + 419 p_common->rx_bcast_pkts; 420 stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts + 421 p_common->tx_bcast_pkts; 422 423 stats->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes + 424 p_common->rx_bcast_bytes; 425 stats->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes + 426 p_common->tx_bcast_bytes; 427 428 stats->tx_errors = p_common->tx_err_drop_pkts; 429 stats->multicast = p_common->rx_mcast_pkts + p_common->rx_bcast_pkts; 430 431 stats->rx_fifo_errors = p_common->no_buff_discards; 432 433 if (QEDE_IS_BB(edev)) 434 stats->collisions = edev->stats.bb.tx_total_collisions; 435 stats->rx_crc_errors = p_common->rx_crc_errors; 436 stats->rx_frame_errors = p_common->rx_align_errors; 437 } 438 439 #ifdef CONFIG_QED_SRIOV 440 static int qede_get_vf_config(struct net_device *dev, int vfidx, 441 struct ifla_vf_info *ivi) 442 { 443 struct qede_dev *edev = netdev_priv(dev); 444 445 if (!edev->ops) 446 return -EINVAL; 447 448 return edev->ops->iov->get_config(edev->cdev, vfidx, ivi); 449 } 450 451 static int qede_set_vf_rate(struct net_device *dev, int vfidx, 452 int min_tx_rate, int max_tx_rate) 453 { 454 struct qede_dev *edev = netdev_priv(dev); 455 456 return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate, 457 max_tx_rate); 458 } 459 460 static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val) 461 { 462 struct qede_dev *edev = netdev_priv(dev); 463 464 if (!edev->ops) 465 return -EINVAL; 466 467 return edev->ops->iov->set_spoof(edev->cdev, vfidx, val); 468 } 469 470 static int qede_set_vf_link_state(struct net_device *dev, int vfidx, 471 int link_state) 472 { 473 struct qede_dev *edev = netdev_priv(dev); 474 475 if (!edev->ops) 476 return -EINVAL; 477 478 return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state); 479 } 480 481 static int qede_set_vf_trust(struct net_device *dev, int vfidx, bool setting) 482 { 483 struct qede_dev *edev = netdev_priv(dev); 484 485 if (!edev->ops) 486 return -EINVAL; 487 488 return edev->ops->iov->set_trust(edev->cdev, vfidx, setting); 489 } 490 #endif 491 492 static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 493 { 494 struct qede_dev *edev = netdev_priv(dev); 495 496 if (!netif_running(dev)) 497 return -EAGAIN; 498 499 switch (cmd) { 500 case SIOCSHWTSTAMP: 501 return qede_ptp_hw_ts(edev, ifr); 502 default: 503 DP_VERBOSE(edev, QED_MSG_DEBUG, 504 "default IOCTL cmd 0x%x\n", cmd); 505 return -EOPNOTSUPP; 506 } 507 508 return 0; 509 } 510 511 static void qede_fp_sb_dump(struct qede_dev *edev, struct qede_fastpath *fp) 512 { 513 char *p_sb = (char *)fp->sb_info->sb_virt; 514 u32 sb_size, i; 515 516 sb_size = sizeof(struct status_block); 517 518 for (i = 0; i < sb_size; i += 8) 519 DP_NOTICE(edev, 520 "%02hhX %02hhX %02hhX %02hhX %02hhX %02hhX %02hhX %02hhX\n", 521 p_sb[i], p_sb[i + 1], p_sb[i + 2], p_sb[i + 3], 522 p_sb[i + 4], p_sb[i + 5], p_sb[i + 6], p_sb[i + 7]); 523 } 524 525 static void 526 qede_txq_fp_log_metadata(struct qede_dev *edev, 527 struct qede_fastpath *fp, struct qede_tx_queue *txq) 528 { 529 struct qed_chain *p_chain = &txq->tx_pbl; 530 531 /* Dump txq/fp/sb ids etc. other metadata */ 532 DP_NOTICE(edev, 533 "fpid 0x%x sbid 0x%x txqid [0x%x] ndev_qid [0x%x] cos [0x%x] p_chain %p cap %d size %d jiffies %lu HZ 0x%x\n", 534 fp->id, fp->sb_info->igu_sb_id, txq->index, txq->ndev_txq_id, txq->cos, 535 p_chain, p_chain->capacity, p_chain->size, jiffies, HZ); 536 537 /* Dump all the relevant prod/cons indexes */ 538 DP_NOTICE(edev, 539 "hw cons %04x sw_tx_prod=0x%x, sw_tx_cons=0x%x, bd_prod 0x%x bd_cons 0x%x\n", 540 le16_to_cpu(*txq->hw_cons_ptr), txq->sw_tx_prod, txq->sw_tx_cons, 541 qed_chain_get_prod_idx(p_chain), qed_chain_get_cons_idx(p_chain)); 542 } 543 544 static void 545 qede_tx_log_print(struct qede_dev *edev, struct qede_fastpath *fp, struct qede_tx_queue *txq) 546 { 547 struct qed_sb_info_dbg sb_dbg; 548 int rc; 549 550 /* sb info */ 551 qede_fp_sb_dump(edev, fp); 552 553 memset(&sb_dbg, 0, sizeof(sb_dbg)); 554 rc = edev->ops->common->get_sb_info(edev->cdev, fp->sb_info, (u16)fp->id, &sb_dbg); 555 556 DP_NOTICE(edev, "IGU: prod %08x cons %08x CAU Tx %04x\n", 557 sb_dbg.igu_prod, sb_dbg.igu_cons, sb_dbg.pi[TX_PI(txq->cos)]); 558 559 /* report to mfw */ 560 edev->ops->common->mfw_report(edev->cdev, 561 "Txq[%d]: FW cons [host] %04x, SW cons %04x, SW prod %04x [Jiffies %lu]\n", 562 txq->index, le16_to_cpu(*txq->hw_cons_ptr), 563 qed_chain_get_cons_idx(&txq->tx_pbl), 564 qed_chain_get_prod_idx(&txq->tx_pbl), jiffies); 565 if (!rc) 566 edev->ops->common->mfw_report(edev->cdev, 567 "Txq[%d]: SB[0x%04x] - IGU: prod %08x cons %08x CAU Tx %04x\n", 568 txq->index, fp->sb_info->igu_sb_id, 569 sb_dbg.igu_prod, sb_dbg.igu_cons, 570 sb_dbg.pi[TX_PI(txq->cos)]); 571 } 572 573 static void qede_tx_timeout(struct net_device *dev, unsigned int txqueue) 574 { 575 struct qede_dev *edev = netdev_priv(dev); 576 int i; 577 578 netif_carrier_off(dev); 579 DP_NOTICE(edev, "TX timeout on queue %u!\n", txqueue); 580 581 for_each_queue(i) { 582 struct qede_tx_queue *txq; 583 struct qede_fastpath *fp; 584 int cos; 585 586 fp = &edev->fp_array[i]; 587 if (!(fp->type & QEDE_FASTPATH_TX)) 588 continue; 589 590 for_each_cos_in_txq(edev, cos) { 591 txq = &fp->txq[cos]; 592 593 /* Dump basic metadata for all queues */ 594 qede_txq_fp_log_metadata(edev, fp, txq); 595 596 if (qed_chain_get_cons_idx(&txq->tx_pbl) != 597 qed_chain_get_prod_idx(&txq->tx_pbl)) 598 qede_tx_log_print(edev, fp, txq); 599 } 600 } 601 602 if (IS_VF(edev)) 603 return; 604 605 if (test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) || 606 edev->state == QEDE_STATE_RECOVERY) { 607 DP_INFO(edev, 608 "Avoid handling a Tx timeout while another HW error is being handled\n"); 609 return; 610 } 611 612 set_bit(QEDE_ERR_GET_DBG_INFO, &edev->err_flags); 613 set_bit(QEDE_SP_HW_ERR, &edev->sp_flags); 614 schedule_delayed_work(&edev->sp_task, 0); 615 } 616 617 static int qede_setup_tc(struct net_device *ndev, u8 num_tc) 618 { 619 struct qede_dev *edev = netdev_priv(ndev); 620 int cos, count, offset; 621 622 if (num_tc > edev->dev_info.num_tc) 623 return -EINVAL; 624 625 netdev_reset_tc(ndev); 626 netdev_set_num_tc(ndev, num_tc); 627 628 for_each_cos_in_txq(edev, cos) { 629 count = QEDE_TSS_COUNT(edev); 630 offset = cos * QEDE_TSS_COUNT(edev); 631 netdev_set_tc_queue(ndev, cos, count, offset); 632 } 633 634 return 0; 635 } 636 637 static int 638 qede_set_flower(struct qede_dev *edev, struct flow_cls_offload *f, 639 __be16 proto) 640 { 641 switch (f->command) { 642 case FLOW_CLS_REPLACE: 643 return qede_add_tc_flower_fltr(edev, proto, f); 644 case FLOW_CLS_DESTROY: 645 return qede_delete_flow_filter(edev, f->cookie); 646 default: 647 return -EOPNOTSUPP; 648 } 649 } 650 651 static int qede_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 652 void *cb_priv) 653 { 654 struct flow_cls_offload *f; 655 struct qede_dev *edev = cb_priv; 656 657 if (!tc_cls_can_offload_and_chain0(edev->ndev, type_data)) 658 return -EOPNOTSUPP; 659 660 switch (type) { 661 case TC_SETUP_CLSFLOWER: 662 f = type_data; 663 return qede_set_flower(edev, f, f->common.protocol); 664 default: 665 return -EOPNOTSUPP; 666 } 667 } 668 669 static LIST_HEAD(qede_block_cb_list); 670 671 static int 672 qede_setup_tc_offload(struct net_device *dev, enum tc_setup_type type, 673 void *type_data) 674 { 675 struct qede_dev *edev = netdev_priv(dev); 676 struct tc_mqprio_qopt *mqprio; 677 678 switch (type) { 679 case TC_SETUP_BLOCK: 680 return flow_block_cb_setup_simple(type_data, 681 &qede_block_cb_list, 682 qede_setup_tc_block_cb, 683 edev, edev, true); 684 case TC_SETUP_QDISC_MQPRIO: 685 mqprio = type_data; 686 687 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; 688 return qede_setup_tc(dev, mqprio->num_tc); 689 default: 690 return -EOPNOTSUPP; 691 } 692 } 693 694 static const struct net_device_ops qede_netdev_ops = { 695 .ndo_open = qede_open, 696 .ndo_stop = qede_close, 697 .ndo_start_xmit = qede_start_xmit, 698 .ndo_select_queue = qede_select_queue, 699 .ndo_set_rx_mode = qede_set_rx_mode, 700 .ndo_set_mac_address = qede_set_mac_addr, 701 .ndo_validate_addr = eth_validate_addr, 702 .ndo_change_mtu = qede_change_mtu, 703 .ndo_eth_ioctl = qede_ioctl, 704 .ndo_tx_timeout = qede_tx_timeout, 705 #ifdef CONFIG_QED_SRIOV 706 .ndo_set_vf_mac = qede_set_vf_mac, 707 .ndo_set_vf_vlan = qede_set_vf_vlan, 708 .ndo_set_vf_trust = qede_set_vf_trust, 709 #endif 710 .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, 711 .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, 712 .ndo_fix_features = qede_fix_features, 713 .ndo_set_features = qede_set_features, 714 .ndo_get_stats64 = qede_get_stats64, 715 #ifdef CONFIG_QED_SRIOV 716 .ndo_set_vf_link_state = qede_set_vf_link_state, 717 .ndo_set_vf_spoofchk = qede_set_vf_spoofchk, 718 .ndo_get_vf_config = qede_get_vf_config, 719 .ndo_set_vf_rate = qede_set_vf_rate, 720 #endif 721 .ndo_features_check = qede_features_check, 722 .ndo_bpf = qede_xdp, 723 #ifdef CONFIG_RFS_ACCEL 724 .ndo_rx_flow_steer = qede_rx_flow_steer, 725 #endif 726 .ndo_xdp_xmit = qede_xdp_transmit, 727 .ndo_setup_tc = qede_setup_tc_offload, 728 }; 729 730 static const struct net_device_ops qede_netdev_vf_ops = { 731 .ndo_open = qede_open, 732 .ndo_stop = qede_close, 733 .ndo_start_xmit = qede_start_xmit, 734 .ndo_select_queue = qede_select_queue, 735 .ndo_set_rx_mode = qede_set_rx_mode, 736 .ndo_set_mac_address = qede_set_mac_addr, 737 .ndo_validate_addr = eth_validate_addr, 738 .ndo_change_mtu = qede_change_mtu, 739 .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, 740 .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, 741 .ndo_fix_features = qede_fix_features, 742 .ndo_set_features = qede_set_features, 743 .ndo_get_stats64 = qede_get_stats64, 744 .ndo_features_check = qede_features_check, 745 }; 746 747 static const struct net_device_ops qede_netdev_vf_xdp_ops = { 748 .ndo_open = qede_open, 749 .ndo_stop = qede_close, 750 .ndo_start_xmit = qede_start_xmit, 751 .ndo_select_queue = qede_select_queue, 752 .ndo_set_rx_mode = qede_set_rx_mode, 753 .ndo_set_mac_address = qede_set_mac_addr, 754 .ndo_validate_addr = eth_validate_addr, 755 .ndo_change_mtu = qede_change_mtu, 756 .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, 757 .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, 758 .ndo_fix_features = qede_fix_features, 759 .ndo_set_features = qede_set_features, 760 .ndo_get_stats64 = qede_get_stats64, 761 .ndo_features_check = qede_features_check, 762 .ndo_bpf = qede_xdp, 763 .ndo_xdp_xmit = qede_xdp_transmit, 764 }; 765 766 /* ------------------------------------------------------------------------- 767 * START OF PROBE / REMOVE 768 * ------------------------------------------------------------------------- 769 */ 770 771 static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev, 772 struct pci_dev *pdev, 773 struct qed_dev_eth_info *info, 774 u32 dp_module, u8 dp_level) 775 { 776 struct net_device *ndev; 777 struct qede_dev *edev; 778 779 ndev = alloc_etherdev_mqs(sizeof(*edev), 780 info->num_queues * info->num_tc, 781 info->num_queues); 782 if (!ndev) { 783 pr_err("etherdev allocation failed\n"); 784 return NULL; 785 } 786 787 edev = netdev_priv(ndev); 788 edev->ndev = ndev; 789 edev->cdev = cdev; 790 edev->pdev = pdev; 791 edev->dp_module = dp_module; 792 edev->dp_level = dp_level; 793 edev->ops = qed_ops; 794 795 if (is_kdump_kernel()) { 796 edev->q_num_rx_buffers = NUM_RX_BDS_KDUMP_MIN; 797 edev->q_num_tx_buffers = NUM_TX_BDS_KDUMP_MIN; 798 } else { 799 edev->q_num_rx_buffers = NUM_RX_BDS_DEF; 800 edev->q_num_tx_buffers = NUM_TX_BDS_DEF; 801 } 802 803 DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n", 804 info->num_queues, info->num_queues); 805 806 SET_NETDEV_DEV(ndev, &pdev->dev); 807 808 memset(&edev->stats, 0, sizeof(edev->stats)); 809 memcpy(&edev->dev_info, info, sizeof(*info)); 810 811 /* As ethtool doesn't have the ability to show WoL behavior as 812 * 'default', if device supports it declare it's enabled. 813 */ 814 if (edev->dev_info.common.wol_support) 815 edev->wol_enabled = true; 816 817 INIT_LIST_HEAD(&edev->vlan_list); 818 819 return edev; 820 } 821 822 static void qede_init_ndev(struct qede_dev *edev) 823 { 824 struct net_device *ndev = edev->ndev; 825 struct pci_dev *pdev = edev->pdev; 826 bool udp_tunnel_enable = false; 827 netdev_features_t hw_features; 828 829 pci_set_drvdata(pdev, ndev); 830 831 ndev->mem_start = edev->dev_info.common.pci_mem_start; 832 ndev->base_addr = ndev->mem_start; 833 ndev->mem_end = edev->dev_info.common.pci_mem_end; 834 ndev->irq = edev->dev_info.common.pci_irq; 835 836 ndev->watchdog_timeo = TX_TIMEOUT; 837 838 if (IS_VF(edev)) { 839 if (edev->dev_info.xdp_supported) 840 ndev->netdev_ops = &qede_netdev_vf_xdp_ops; 841 else 842 ndev->netdev_ops = &qede_netdev_vf_ops; 843 } else { 844 ndev->netdev_ops = &qede_netdev_ops; 845 } 846 847 qede_set_ethtool_ops(ndev); 848 849 ndev->priv_flags |= IFF_UNICAST_FLT; 850 851 /* user-changeble features */ 852 hw_features = NETIF_F_GRO | NETIF_F_GRO_HW | NETIF_F_SG | 853 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 854 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_TC; 855 856 if (edev->dev_info.common.b_arfs_capable) 857 hw_features |= NETIF_F_NTUPLE; 858 859 if (edev->dev_info.common.vxlan_enable || 860 edev->dev_info.common.geneve_enable) 861 udp_tunnel_enable = true; 862 863 if (udp_tunnel_enable || edev->dev_info.common.gre_enable) { 864 hw_features |= NETIF_F_TSO_ECN; 865 ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 866 NETIF_F_SG | NETIF_F_TSO | 867 NETIF_F_TSO_ECN | NETIF_F_TSO6 | 868 NETIF_F_RXCSUM; 869 } 870 871 if (udp_tunnel_enable) { 872 hw_features |= (NETIF_F_GSO_UDP_TUNNEL | 873 NETIF_F_GSO_UDP_TUNNEL_CSUM); 874 ndev->hw_enc_features |= (NETIF_F_GSO_UDP_TUNNEL | 875 NETIF_F_GSO_UDP_TUNNEL_CSUM); 876 877 qede_set_udp_tunnels(edev); 878 } 879 880 if (edev->dev_info.common.gre_enable) { 881 hw_features |= (NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM); 882 ndev->hw_enc_features |= (NETIF_F_GSO_GRE | 883 NETIF_F_GSO_GRE_CSUM); 884 } 885 886 ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM | 887 NETIF_F_HIGHDMA; 888 ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM | 889 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA | 890 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX; 891 892 ndev->hw_features = hw_features; 893 894 ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | 895 NETDEV_XDP_ACT_NDO_XMIT; 896 897 /* MTU range: 46 - 9600 */ 898 ndev->min_mtu = ETH_ZLEN - ETH_HLEN; 899 ndev->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE; 900 901 /* Set network device HW mac */ 902 eth_hw_addr_set(edev->ndev, edev->dev_info.common.hw_mac); 903 904 ndev->mtu = edev->dev_info.common.mtu; 905 } 906 907 /* This function converts from 32b param to two params of level and module 908 * Input 32b decoding: 909 * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the 910 * 'happy' flow, e.g. memory allocation failed. 911 * b30 - enable all INFO prints. INFO prints are for major steps in the flow 912 * and provide important parameters. 913 * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that 914 * module. VERBOSE prints are for tracking the specific flow in low level. 915 * 916 * Notice that the level should be that of the lowest required logs. 917 */ 918 void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level) 919 { 920 *p_dp_level = QED_LEVEL_NOTICE; 921 *p_dp_module = 0; 922 923 if (debug & QED_LOG_VERBOSE_MASK) { 924 *p_dp_level = QED_LEVEL_VERBOSE; 925 *p_dp_module = (debug & 0x3FFFFFFF); 926 } else if (debug & QED_LOG_INFO_MASK) { 927 *p_dp_level = QED_LEVEL_INFO; 928 } else if (debug & QED_LOG_NOTICE_MASK) { 929 *p_dp_level = QED_LEVEL_NOTICE; 930 } 931 } 932 933 static void qede_free_fp_array(struct qede_dev *edev) 934 { 935 if (edev->fp_array) { 936 struct qede_fastpath *fp; 937 int i; 938 939 for_each_queue(i) { 940 fp = &edev->fp_array[i]; 941 942 kfree(fp->sb_info); 943 /* Handle mem alloc failure case where qede_init_fp 944 * didn't register xdp_rxq_info yet. 945 * Implicit only (fp->type & QEDE_FASTPATH_RX) 946 */ 947 if (fp->rxq && xdp_rxq_info_is_reg(&fp->rxq->xdp_rxq)) 948 xdp_rxq_info_unreg(&fp->rxq->xdp_rxq); 949 kfree(fp->rxq); 950 kfree(fp->xdp_tx); 951 kfree(fp->txq); 952 } 953 kfree(edev->fp_array); 954 } 955 956 edev->num_queues = 0; 957 edev->fp_num_tx = 0; 958 edev->fp_num_rx = 0; 959 } 960 961 static int qede_alloc_fp_array(struct qede_dev *edev) 962 { 963 u8 fp_combined, fp_rx = edev->fp_num_rx; 964 struct qede_fastpath *fp; 965 int i; 966 967 edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev), 968 sizeof(*edev->fp_array), GFP_KERNEL); 969 if (!edev->fp_array) { 970 DP_NOTICE(edev, "fp array allocation failed\n"); 971 goto err; 972 } 973 974 if (!edev->coal_entry) { 975 edev->coal_entry = kcalloc(QEDE_MAX_RSS_CNT(edev), 976 sizeof(*edev->coal_entry), 977 GFP_KERNEL); 978 if (!edev->coal_entry) { 979 DP_ERR(edev, "coalesce entry allocation failed\n"); 980 goto err; 981 } 982 } 983 984 fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx; 985 986 /* Allocate the FP elements for Rx queues followed by combined and then 987 * the Tx. This ordering should be maintained so that the respective 988 * queues (Rx or Tx) will be together in the fastpath array and the 989 * associated ids will be sequential. 990 */ 991 for_each_queue(i) { 992 fp = &edev->fp_array[i]; 993 994 fp->sb_info = kzalloc(sizeof(*fp->sb_info), GFP_KERNEL); 995 if (!fp->sb_info) { 996 DP_NOTICE(edev, "sb info struct allocation failed\n"); 997 goto err; 998 } 999 1000 if (fp_rx) { 1001 fp->type = QEDE_FASTPATH_RX; 1002 fp_rx--; 1003 } else if (fp_combined) { 1004 fp->type = QEDE_FASTPATH_COMBINED; 1005 fp_combined--; 1006 } else { 1007 fp->type = QEDE_FASTPATH_TX; 1008 } 1009 1010 if (fp->type & QEDE_FASTPATH_TX) { 1011 fp->txq = kcalloc(edev->dev_info.num_tc, 1012 sizeof(*fp->txq), GFP_KERNEL); 1013 if (!fp->txq) 1014 goto err; 1015 } 1016 1017 if (fp->type & QEDE_FASTPATH_RX) { 1018 fp->rxq = kzalloc(sizeof(*fp->rxq), GFP_KERNEL); 1019 if (!fp->rxq) 1020 goto err; 1021 1022 if (edev->xdp_prog) { 1023 fp->xdp_tx = kzalloc(sizeof(*fp->xdp_tx), 1024 GFP_KERNEL); 1025 if (!fp->xdp_tx) 1026 goto err; 1027 fp->type |= QEDE_FASTPATH_XDP; 1028 } 1029 } 1030 } 1031 1032 return 0; 1033 err: 1034 qede_free_fp_array(edev); 1035 return -ENOMEM; 1036 } 1037 1038 /* The qede lock is used to protect driver state change and driver flows that 1039 * are not reentrant. 1040 */ 1041 void __qede_lock(struct qede_dev *edev) 1042 { 1043 mutex_lock(&edev->qede_lock); 1044 } 1045 1046 void __qede_unlock(struct qede_dev *edev) 1047 { 1048 mutex_unlock(&edev->qede_lock); 1049 } 1050 1051 /* This version of the lock should be used when acquiring the RTNL lock is also 1052 * needed in addition to the internal qede lock. 1053 */ 1054 static void qede_lock(struct qede_dev *edev) 1055 { 1056 rtnl_lock(); 1057 __qede_lock(edev); 1058 } 1059 1060 static void qede_unlock(struct qede_dev *edev) 1061 { 1062 __qede_unlock(edev); 1063 rtnl_unlock(); 1064 } 1065 1066 static void qede_sp_task(struct work_struct *work) 1067 { 1068 struct qede_dev *edev = container_of(work, struct qede_dev, 1069 sp_task.work); 1070 1071 /* Disable execution of this deferred work once 1072 * qede removal is in progress, this stop any future 1073 * scheduling of sp_task. 1074 */ 1075 if (test_bit(QEDE_SP_DISABLE, &edev->sp_flags)) 1076 return; 1077 1078 /* The locking scheme depends on the specific flag: 1079 * In case of QEDE_SP_RECOVERY, acquiring the RTNL lock is required to 1080 * ensure that ongoing flows are ended and new ones are not started. 1081 * In other cases - only the internal qede lock should be acquired. 1082 */ 1083 1084 if (test_and_clear_bit(QEDE_SP_RECOVERY, &edev->sp_flags)) { 1085 #ifdef CONFIG_QED_SRIOV 1086 /* SRIOV must be disabled outside the lock to avoid a deadlock. 1087 * The recovery of the active VFs is currently not supported. 1088 */ 1089 if (pci_num_vf(edev->pdev)) 1090 qede_sriov_configure(edev->pdev, 0); 1091 #endif 1092 qede_lock(edev); 1093 qede_recovery_handler(edev); 1094 qede_unlock(edev); 1095 } 1096 1097 __qede_lock(edev); 1098 1099 if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags)) 1100 if (edev->state == QEDE_STATE_OPEN) 1101 qede_config_rx_mode(edev->ndev); 1102 1103 #ifdef CONFIG_RFS_ACCEL 1104 if (test_and_clear_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags)) { 1105 if (edev->state == QEDE_STATE_OPEN) 1106 qede_process_arfs_filters(edev, false); 1107 } 1108 #endif 1109 if (test_and_clear_bit(QEDE_SP_HW_ERR, &edev->sp_flags)) 1110 qede_generic_hw_err_handler(edev); 1111 __qede_unlock(edev); 1112 1113 if (test_and_clear_bit(QEDE_SP_AER, &edev->sp_flags)) { 1114 #ifdef CONFIG_QED_SRIOV 1115 /* SRIOV must be disabled outside the lock to avoid a deadlock. 1116 * The recovery of the active VFs is currently not supported. 1117 */ 1118 if (pci_num_vf(edev->pdev)) 1119 qede_sriov_configure(edev->pdev, 0); 1120 #endif 1121 edev->ops->common->recovery_process(edev->cdev); 1122 } 1123 } 1124 1125 static void qede_update_pf_params(struct qed_dev *cdev) 1126 { 1127 struct qed_pf_params pf_params; 1128 u16 num_cons; 1129 1130 /* 64 rx + 64 tx + 64 XDP */ 1131 memset(&pf_params, 0, sizeof(struct qed_pf_params)); 1132 1133 /* 1 rx + 1 xdp + max tx cos */ 1134 num_cons = QED_MIN_L2_CONS; 1135 1136 pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * num_cons; 1137 1138 /* Same for VFs - make sure they'll have sufficient connections 1139 * to support XDP Tx queues. 1140 */ 1141 pf_params.eth_pf_params.num_vf_cons = 48; 1142 1143 pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR; 1144 qed_ops->common->update_pf_params(cdev, &pf_params); 1145 } 1146 1147 #define QEDE_FW_VER_STR_SIZE 80 1148 1149 static void qede_log_probe(struct qede_dev *edev) 1150 { 1151 struct qed_dev_info *p_dev_info = &edev->dev_info.common; 1152 u8 buf[QEDE_FW_VER_STR_SIZE]; 1153 size_t left_size; 1154 1155 snprintf(buf, QEDE_FW_VER_STR_SIZE, 1156 "Storm FW %d.%d.%d.%d, Management FW %d.%d.%d.%d", 1157 p_dev_info->fw_major, p_dev_info->fw_minor, p_dev_info->fw_rev, 1158 p_dev_info->fw_eng, 1159 (p_dev_info->mfw_rev & QED_MFW_VERSION_3_MASK) >> 1160 QED_MFW_VERSION_3_OFFSET, 1161 (p_dev_info->mfw_rev & QED_MFW_VERSION_2_MASK) >> 1162 QED_MFW_VERSION_2_OFFSET, 1163 (p_dev_info->mfw_rev & QED_MFW_VERSION_1_MASK) >> 1164 QED_MFW_VERSION_1_OFFSET, 1165 (p_dev_info->mfw_rev & QED_MFW_VERSION_0_MASK) >> 1166 QED_MFW_VERSION_0_OFFSET); 1167 1168 left_size = QEDE_FW_VER_STR_SIZE - strlen(buf); 1169 if (p_dev_info->mbi_version && left_size) 1170 snprintf(buf + strlen(buf), left_size, 1171 " [MBI %d.%d.%d]", 1172 (p_dev_info->mbi_version & QED_MBI_VERSION_2_MASK) >> 1173 QED_MBI_VERSION_2_OFFSET, 1174 (p_dev_info->mbi_version & QED_MBI_VERSION_1_MASK) >> 1175 QED_MBI_VERSION_1_OFFSET, 1176 (p_dev_info->mbi_version & QED_MBI_VERSION_0_MASK) >> 1177 QED_MBI_VERSION_0_OFFSET); 1178 1179 pr_info("qede %02x:%02x.%02x: %s [%s]\n", edev->pdev->bus->number, 1180 PCI_SLOT(edev->pdev->devfn), PCI_FUNC(edev->pdev->devfn), 1181 buf, edev->ndev->name); 1182 } 1183 1184 enum qede_probe_mode { 1185 QEDE_PROBE_NORMAL, 1186 QEDE_PROBE_RECOVERY, 1187 }; 1188 1189 static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, 1190 bool is_vf, enum qede_probe_mode mode) 1191 { 1192 struct qed_probe_params probe_params; 1193 struct qed_slowpath_params sp_params; 1194 struct qed_dev_eth_info dev_info; 1195 struct qede_dev *edev; 1196 struct qed_dev *cdev; 1197 int rc; 1198 1199 if (unlikely(dp_level & QED_LEVEL_INFO)) 1200 pr_notice("Starting qede probe\n"); 1201 1202 memset(&probe_params, 0, sizeof(probe_params)); 1203 probe_params.protocol = QED_PROTOCOL_ETH; 1204 probe_params.dp_module = dp_module; 1205 probe_params.dp_level = dp_level; 1206 probe_params.is_vf = is_vf; 1207 probe_params.recov_in_prog = (mode == QEDE_PROBE_RECOVERY); 1208 cdev = qed_ops->common->probe(pdev, &probe_params); 1209 if (!cdev) { 1210 rc = -ENODEV; 1211 goto err0; 1212 } 1213 1214 qede_update_pf_params(cdev); 1215 1216 /* Start the Slowpath-process */ 1217 memset(&sp_params, 0, sizeof(sp_params)); 1218 sp_params.int_mode = QED_INT_MODE_MSIX; 1219 strscpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE); 1220 rc = qed_ops->common->slowpath_start(cdev, &sp_params); 1221 if (rc) { 1222 pr_notice("Cannot start slowpath\n"); 1223 goto err1; 1224 } 1225 1226 /* Learn information crucial for qede to progress */ 1227 rc = qed_ops->fill_dev_info(cdev, &dev_info); 1228 if (rc) 1229 goto err2; 1230 1231 if (mode != QEDE_PROBE_RECOVERY) { 1232 edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module, 1233 dp_level); 1234 if (!edev) { 1235 rc = -ENOMEM; 1236 goto err2; 1237 } 1238 1239 edev->devlink = qed_ops->common->devlink_register(cdev); 1240 if (IS_ERR(edev->devlink)) { 1241 DP_NOTICE(edev, "Cannot register devlink\n"); 1242 rc = PTR_ERR(edev->devlink); 1243 edev->devlink = NULL; 1244 goto err3; 1245 } 1246 } else { 1247 struct net_device *ndev = pci_get_drvdata(pdev); 1248 struct qed_devlink *qdl; 1249 1250 edev = netdev_priv(ndev); 1251 qdl = devlink_priv(edev->devlink); 1252 qdl->cdev = cdev; 1253 edev->cdev = cdev; 1254 memset(&edev->stats, 0, sizeof(edev->stats)); 1255 memcpy(&edev->dev_info, &dev_info, sizeof(dev_info)); 1256 } 1257 1258 if (is_vf) 1259 set_bit(QEDE_FLAGS_IS_VF, &edev->flags); 1260 1261 qede_init_ndev(edev); 1262 1263 rc = qede_rdma_dev_add(edev, (mode == QEDE_PROBE_RECOVERY)); 1264 if (rc) 1265 goto err3; 1266 1267 if (mode != QEDE_PROBE_RECOVERY) { 1268 /* Prepare the lock prior to the registration of the netdev, 1269 * as once it's registered we might reach flows requiring it 1270 * [it's even possible to reach a flow needing it directly 1271 * from there, although it's unlikely]. 1272 */ 1273 INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task); 1274 mutex_init(&edev->qede_lock); 1275 1276 rc = register_netdev(edev->ndev); 1277 if (rc) { 1278 DP_NOTICE(edev, "Cannot register net-device\n"); 1279 goto err4; 1280 } 1281 } 1282 1283 edev->ops->common->set_name(cdev, edev->ndev->name); 1284 1285 /* PTP not supported on VFs */ 1286 if (!is_vf) 1287 qede_ptp_enable(edev); 1288 1289 edev->ops->register_ops(cdev, &qede_ll_ops, edev); 1290 1291 #ifdef CONFIG_DCB 1292 if (!IS_VF(edev)) 1293 qede_set_dcbnl_ops(edev->ndev); 1294 #endif 1295 1296 edev->rx_copybreak = QEDE_RX_HDR_SIZE; 1297 1298 qede_log_probe(edev); 1299 return 0; 1300 1301 err4: 1302 qede_rdma_dev_remove(edev, (mode == QEDE_PROBE_RECOVERY)); 1303 err3: 1304 if (mode != QEDE_PROBE_RECOVERY) 1305 free_netdev(edev->ndev); 1306 else 1307 edev->cdev = NULL; 1308 err2: 1309 qed_ops->common->slowpath_stop(cdev); 1310 err1: 1311 qed_ops->common->remove(cdev); 1312 err0: 1313 return rc; 1314 } 1315 1316 static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1317 { 1318 bool is_vf = false; 1319 u32 dp_module = 0; 1320 u8 dp_level = 0; 1321 1322 switch ((enum qede_pci_private)id->driver_data) { 1323 case QEDE_PRIVATE_VF: 1324 if (debug & QED_LOG_VERBOSE_MASK) 1325 dev_err(&pdev->dev, "Probing a VF\n"); 1326 is_vf = true; 1327 break; 1328 default: 1329 if (debug & QED_LOG_VERBOSE_MASK) 1330 dev_err(&pdev->dev, "Probing a PF\n"); 1331 } 1332 1333 qede_config_debug(debug, &dp_module, &dp_level); 1334 1335 return __qede_probe(pdev, dp_module, dp_level, is_vf, 1336 QEDE_PROBE_NORMAL); 1337 } 1338 1339 enum qede_remove_mode { 1340 QEDE_REMOVE_NORMAL, 1341 QEDE_REMOVE_RECOVERY, 1342 }; 1343 1344 static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) 1345 { 1346 struct net_device *ndev = pci_get_drvdata(pdev); 1347 struct qede_dev *edev; 1348 struct qed_dev *cdev; 1349 1350 if (!ndev) { 1351 dev_info(&pdev->dev, "Device has already been removed\n"); 1352 return; 1353 } 1354 1355 edev = netdev_priv(ndev); 1356 cdev = edev->cdev; 1357 1358 DP_INFO(edev, "Starting qede_remove\n"); 1359 1360 qede_rdma_dev_remove(edev, (mode == QEDE_REMOVE_RECOVERY)); 1361 1362 if (mode != QEDE_REMOVE_RECOVERY) { 1363 set_bit(QEDE_SP_DISABLE, &edev->sp_flags); 1364 unregister_netdev(ndev); 1365 1366 cancel_delayed_work_sync(&edev->sp_task); 1367 1368 edev->ops->common->set_power_state(cdev, PCI_D0); 1369 1370 pci_set_drvdata(pdev, NULL); 1371 } 1372 1373 qede_ptp_disable(edev); 1374 1375 /* Use global ops since we've freed edev */ 1376 qed_ops->common->slowpath_stop(cdev); 1377 if (system_state == SYSTEM_POWER_OFF) 1378 return; 1379 1380 if (mode != QEDE_REMOVE_RECOVERY && edev->devlink) { 1381 qed_ops->common->devlink_unregister(edev->devlink); 1382 edev->devlink = NULL; 1383 } 1384 qed_ops->common->remove(cdev); 1385 edev->cdev = NULL; 1386 1387 /* Since this can happen out-of-sync with other flows, 1388 * don't release the netdevice until after slowpath stop 1389 * has been called to guarantee various other contexts 1390 * [e.g., QED register callbacks] won't break anything when 1391 * accessing the netdevice. 1392 */ 1393 if (mode != QEDE_REMOVE_RECOVERY) { 1394 kfree(edev->coal_entry); 1395 free_netdev(ndev); 1396 } 1397 1398 dev_info(&pdev->dev, "Ending qede_remove successfully\n"); 1399 } 1400 1401 static void qede_remove(struct pci_dev *pdev) 1402 { 1403 __qede_remove(pdev, QEDE_REMOVE_NORMAL); 1404 } 1405 1406 static void qede_shutdown(struct pci_dev *pdev) 1407 { 1408 __qede_remove(pdev, QEDE_REMOVE_NORMAL); 1409 } 1410 1411 /* ------------------------------------------------------------------------- 1412 * START OF LOAD / UNLOAD 1413 * ------------------------------------------------------------------------- 1414 */ 1415 1416 static int qede_set_num_queues(struct qede_dev *edev) 1417 { 1418 int rc; 1419 u16 rss_num; 1420 1421 /* Setup queues according to possible resources*/ 1422 if (edev->req_queues) 1423 rss_num = edev->req_queues; 1424 else 1425 rss_num = netif_get_num_default_rss_queues() * 1426 edev->dev_info.common.num_hwfns; 1427 1428 rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num); 1429 1430 rc = edev->ops->common->set_fp_int(edev->cdev, rss_num); 1431 if (rc > 0) { 1432 /* Managed to request interrupts for our queues */ 1433 edev->num_queues = rc; 1434 DP_INFO(edev, "Managed %d [of %d] RSS queues\n", 1435 QEDE_QUEUE_CNT(edev), rss_num); 1436 rc = 0; 1437 } 1438 1439 edev->fp_num_tx = edev->req_num_tx; 1440 edev->fp_num_rx = edev->req_num_rx; 1441 1442 return rc; 1443 } 1444 1445 static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info, 1446 u16 sb_id) 1447 { 1448 if (sb_info->sb_virt) { 1449 edev->ops->common->sb_release(edev->cdev, sb_info, sb_id, 1450 QED_SB_TYPE_L2_QUEUE); 1451 dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt), 1452 (void *)sb_info->sb_virt, sb_info->sb_phys); 1453 memset(sb_info, 0, sizeof(*sb_info)); 1454 } 1455 } 1456 1457 /* This function allocates fast-path status block memory */ 1458 static int qede_alloc_mem_sb(struct qede_dev *edev, 1459 struct qed_sb_info *sb_info, u16 sb_id) 1460 { 1461 struct status_block *sb_virt; 1462 dma_addr_t sb_phys; 1463 int rc; 1464 1465 sb_virt = dma_alloc_coherent(&edev->pdev->dev, 1466 sizeof(*sb_virt), &sb_phys, GFP_KERNEL); 1467 if (!sb_virt) { 1468 DP_ERR(edev, "Status block allocation failed\n"); 1469 return -ENOMEM; 1470 } 1471 1472 rc = edev->ops->common->sb_init(edev->cdev, sb_info, 1473 sb_virt, sb_phys, sb_id, 1474 QED_SB_TYPE_L2_QUEUE); 1475 if (rc) { 1476 DP_ERR(edev, "Status block initialization failed\n"); 1477 dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt), 1478 sb_virt, sb_phys); 1479 return rc; 1480 } 1481 1482 return 0; 1483 } 1484 1485 static void qede_free_rx_buffers(struct qede_dev *edev, 1486 struct qede_rx_queue *rxq) 1487 { 1488 u16 i; 1489 1490 for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) { 1491 struct sw_rx_data *rx_buf; 1492 struct page *data; 1493 1494 rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX]; 1495 data = rx_buf->data; 1496 1497 dma_unmap_page(&edev->pdev->dev, 1498 rx_buf->mapping, PAGE_SIZE, rxq->data_direction); 1499 1500 rx_buf->data = NULL; 1501 __free_page(data); 1502 } 1503 } 1504 1505 static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) 1506 { 1507 /* Free rx buffers */ 1508 qede_free_rx_buffers(edev, rxq); 1509 1510 /* Free the parallel SW ring */ 1511 kfree(rxq->sw_rx_ring); 1512 1513 /* Free the real RQ ring used by FW */ 1514 edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring); 1515 edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring); 1516 } 1517 1518 static void qede_set_tpa_param(struct qede_rx_queue *rxq) 1519 { 1520 int i; 1521 1522 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { 1523 struct qede_agg_info *tpa_info = &rxq->tpa_info[i]; 1524 1525 tpa_info->state = QEDE_AGG_STATE_NONE; 1526 } 1527 } 1528 1529 /* This function allocates all memory needed per Rx queue */ 1530 static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) 1531 { 1532 struct qed_chain_init_params params = { 1533 .cnt_type = QED_CHAIN_CNT_TYPE_U16, 1534 .num_elems = RX_RING_SIZE, 1535 }; 1536 struct qed_dev *cdev = edev->cdev; 1537 int i, rc, size; 1538 1539 rxq->num_rx_buffers = edev->q_num_rx_buffers; 1540 1541 rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu; 1542 1543 rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : NET_SKB_PAD; 1544 size = rxq->rx_headroom + 1545 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1546 1547 /* Make sure that the headroom and payload fit in a single page */ 1548 if (rxq->rx_buf_size + size > PAGE_SIZE) 1549 rxq->rx_buf_size = PAGE_SIZE - size; 1550 1551 /* Segment size to split a page in multiple equal parts, 1552 * unless XDP is used in which case we'd use the entire page. 1553 */ 1554 if (!edev->xdp_prog) { 1555 size = size + rxq->rx_buf_size; 1556 rxq->rx_buf_seg_size = roundup_pow_of_two(size); 1557 } else { 1558 rxq->rx_buf_seg_size = PAGE_SIZE; 1559 edev->ndev->features &= ~NETIF_F_GRO_HW; 1560 } 1561 1562 /* Allocate the parallel driver ring for Rx buffers */ 1563 size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE; 1564 rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL); 1565 if (!rxq->sw_rx_ring) { 1566 DP_ERR(edev, "Rx buffers ring allocation failed\n"); 1567 rc = -ENOMEM; 1568 goto err; 1569 } 1570 1571 /* Allocate FW Rx ring */ 1572 params.mode = QED_CHAIN_MODE_NEXT_PTR; 1573 params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE; 1574 params.elem_size = sizeof(struct eth_rx_bd); 1575 1576 rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_bd_ring, ¶ms); 1577 if (rc) 1578 goto err; 1579 1580 /* Allocate FW completion ring */ 1581 params.mode = QED_CHAIN_MODE_PBL; 1582 params.intended_use = QED_CHAIN_USE_TO_CONSUME; 1583 params.elem_size = sizeof(union eth_rx_cqe); 1584 1585 rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_comp_ring, ¶ms); 1586 if (rc) 1587 goto err; 1588 1589 /* Allocate buffers for the Rx ring */ 1590 rxq->filled_buffers = 0; 1591 for (i = 0; i < rxq->num_rx_buffers; i++) { 1592 rc = qede_alloc_rx_buffer(rxq, false); 1593 if (rc) { 1594 DP_ERR(edev, 1595 "Rx buffers allocation failed at index %d\n", i); 1596 goto err; 1597 } 1598 } 1599 1600 edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW); 1601 if (!edev->gro_disable) 1602 qede_set_tpa_param(rxq); 1603 err: 1604 return rc; 1605 } 1606 1607 static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) 1608 { 1609 /* Free the parallel SW ring */ 1610 if (txq->is_xdp) 1611 kfree(txq->sw_tx_ring.xdp); 1612 else 1613 kfree(txq->sw_tx_ring.skbs); 1614 1615 /* Free the real RQ ring used by FW */ 1616 edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl); 1617 } 1618 1619 /* This function allocates all memory needed per Tx queue */ 1620 static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) 1621 { 1622 struct qed_chain_init_params params = { 1623 .mode = QED_CHAIN_MODE_PBL, 1624 .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE, 1625 .cnt_type = QED_CHAIN_CNT_TYPE_U16, 1626 .num_elems = edev->q_num_tx_buffers, 1627 .elem_size = sizeof(union eth_tx_bd_types), 1628 }; 1629 int size, rc; 1630 1631 txq->num_tx_buffers = edev->q_num_tx_buffers; 1632 1633 /* Allocate the parallel driver ring for Tx buffers */ 1634 if (txq->is_xdp) { 1635 size = sizeof(*txq->sw_tx_ring.xdp) * txq->num_tx_buffers; 1636 txq->sw_tx_ring.xdp = kzalloc(size, GFP_KERNEL); 1637 if (!txq->sw_tx_ring.xdp) 1638 goto err; 1639 } else { 1640 size = sizeof(*txq->sw_tx_ring.skbs) * txq->num_tx_buffers; 1641 txq->sw_tx_ring.skbs = kzalloc(size, GFP_KERNEL); 1642 if (!txq->sw_tx_ring.skbs) 1643 goto err; 1644 } 1645 1646 rc = edev->ops->common->chain_alloc(edev->cdev, &txq->tx_pbl, ¶ms); 1647 if (rc) 1648 goto err; 1649 1650 return 0; 1651 1652 err: 1653 qede_free_mem_txq(edev, txq); 1654 return -ENOMEM; 1655 } 1656 1657 /* This function frees all memory of a single fp */ 1658 static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp) 1659 { 1660 qede_free_mem_sb(edev, fp->sb_info, fp->id); 1661 1662 if (fp->type & QEDE_FASTPATH_RX) 1663 qede_free_mem_rxq(edev, fp->rxq); 1664 1665 if (fp->type & QEDE_FASTPATH_XDP) 1666 qede_free_mem_txq(edev, fp->xdp_tx); 1667 1668 if (fp->type & QEDE_FASTPATH_TX) { 1669 int cos; 1670 1671 for_each_cos_in_txq(edev, cos) 1672 qede_free_mem_txq(edev, &fp->txq[cos]); 1673 } 1674 } 1675 1676 /* This function allocates all memory needed for a single fp (i.e. an entity 1677 * which contains status block, one rx queue and/or multiple per-TC tx queues. 1678 */ 1679 static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp) 1680 { 1681 int rc = 0; 1682 1683 rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id); 1684 if (rc) 1685 goto out; 1686 1687 if (fp->type & QEDE_FASTPATH_RX) { 1688 rc = qede_alloc_mem_rxq(edev, fp->rxq); 1689 if (rc) 1690 goto out; 1691 } 1692 1693 if (fp->type & QEDE_FASTPATH_XDP) { 1694 rc = qede_alloc_mem_txq(edev, fp->xdp_tx); 1695 if (rc) 1696 goto out; 1697 } 1698 1699 if (fp->type & QEDE_FASTPATH_TX) { 1700 int cos; 1701 1702 for_each_cos_in_txq(edev, cos) { 1703 rc = qede_alloc_mem_txq(edev, &fp->txq[cos]); 1704 if (rc) 1705 goto out; 1706 } 1707 } 1708 1709 out: 1710 return rc; 1711 } 1712 1713 static void qede_free_mem_load(struct qede_dev *edev) 1714 { 1715 int i; 1716 1717 for_each_queue(i) { 1718 struct qede_fastpath *fp = &edev->fp_array[i]; 1719 1720 qede_free_mem_fp(edev, fp); 1721 } 1722 } 1723 1724 /* This function allocates all qede memory at NIC load. */ 1725 static int qede_alloc_mem_load(struct qede_dev *edev) 1726 { 1727 int rc = 0, queue_id; 1728 1729 for (queue_id = 0; queue_id < QEDE_QUEUE_CNT(edev); queue_id++) { 1730 struct qede_fastpath *fp = &edev->fp_array[queue_id]; 1731 1732 rc = qede_alloc_mem_fp(edev, fp); 1733 if (rc) { 1734 DP_ERR(edev, 1735 "Failed to allocate memory for fastpath - rss id = %d\n", 1736 queue_id); 1737 qede_free_mem_load(edev); 1738 return rc; 1739 } 1740 } 1741 1742 return 0; 1743 } 1744 1745 static void qede_empty_tx_queue(struct qede_dev *edev, 1746 struct qede_tx_queue *txq) 1747 { 1748 unsigned int pkts_compl = 0, bytes_compl = 0; 1749 struct netdev_queue *netdev_txq; 1750 int rc, len = 0; 1751 1752 netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id); 1753 1754 while (qed_chain_get_cons_idx(&txq->tx_pbl) != 1755 qed_chain_get_prod_idx(&txq->tx_pbl)) { 1756 DP_VERBOSE(edev, NETIF_MSG_IFDOWN, 1757 "Freeing a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n", 1758 txq->index, qed_chain_get_cons_idx(&txq->tx_pbl), 1759 qed_chain_get_prod_idx(&txq->tx_pbl)); 1760 1761 rc = qede_free_tx_pkt(edev, txq, &len); 1762 if (rc) { 1763 DP_NOTICE(edev, 1764 "Failed to free a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n", 1765 txq->index, 1766 qed_chain_get_cons_idx(&txq->tx_pbl), 1767 qed_chain_get_prod_idx(&txq->tx_pbl)); 1768 break; 1769 } 1770 1771 bytes_compl += len; 1772 pkts_compl++; 1773 txq->sw_tx_cons++; 1774 } 1775 1776 netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl); 1777 } 1778 1779 static void qede_empty_tx_queues(struct qede_dev *edev) 1780 { 1781 int i; 1782 1783 for_each_queue(i) 1784 if (edev->fp_array[i].type & QEDE_FASTPATH_TX) { 1785 int cos; 1786 1787 for_each_cos_in_txq(edev, cos) { 1788 struct qede_fastpath *fp; 1789 1790 fp = &edev->fp_array[i]; 1791 qede_empty_tx_queue(edev, 1792 &fp->txq[cos]); 1793 } 1794 } 1795 } 1796 1797 /* This function inits fp content and resets the SB, RXQ and TXQ structures */ 1798 static void qede_init_fp(struct qede_dev *edev) 1799 { 1800 int queue_id, rxq_index = 0, txq_index = 0; 1801 struct qede_fastpath *fp; 1802 bool init_xdp = false; 1803 1804 for_each_queue(queue_id) { 1805 fp = &edev->fp_array[queue_id]; 1806 1807 fp->edev = edev; 1808 fp->id = queue_id; 1809 1810 if (fp->type & QEDE_FASTPATH_XDP) { 1811 fp->xdp_tx->index = QEDE_TXQ_IDX_TO_XDP(edev, 1812 rxq_index); 1813 fp->xdp_tx->is_xdp = 1; 1814 1815 spin_lock_init(&fp->xdp_tx->xdp_tx_lock); 1816 init_xdp = true; 1817 } 1818 1819 if (fp->type & QEDE_FASTPATH_RX) { 1820 fp->rxq->rxq_id = rxq_index++; 1821 1822 /* Determine how to map buffers for this queue */ 1823 if (fp->type & QEDE_FASTPATH_XDP) 1824 fp->rxq->data_direction = DMA_BIDIRECTIONAL; 1825 else 1826 fp->rxq->data_direction = DMA_FROM_DEVICE; 1827 fp->rxq->dev = &edev->pdev->dev; 1828 1829 /* Driver have no error path from here */ 1830 WARN_ON(xdp_rxq_info_reg(&fp->rxq->xdp_rxq, edev->ndev, 1831 fp->rxq->rxq_id, 0) < 0); 1832 1833 if (xdp_rxq_info_reg_mem_model(&fp->rxq->xdp_rxq, 1834 MEM_TYPE_PAGE_ORDER0, 1835 NULL)) { 1836 DP_NOTICE(edev, 1837 "Failed to register XDP memory model\n"); 1838 } 1839 } 1840 1841 if (fp->type & QEDE_FASTPATH_TX) { 1842 int cos; 1843 1844 for_each_cos_in_txq(edev, cos) { 1845 struct qede_tx_queue *txq = &fp->txq[cos]; 1846 u16 ndev_tx_id; 1847 1848 txq->cos = cos; 1849 txq->index = txq_index; 1850 ndev_tx_id = QEDE_TXQ_TO_NDEV_TXQ_ID(edev, txq); 1851 txq->ndev_txq_id = ndev_tx_id; 1852 1853 if (edev->dev_info.is_legacy) 1854 txq->is_legacy = true; 1855 txq->dev = &edev->pdev->dev; 1856 } 1857 1858 txq_index++; 1859 } 1860 1861 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", 1862 edev->ndev->name, queue_id); 1863 } 1864 1865 if (init_xdp) { 1866 edev->total_xdp_queues = QEDE_RSS_COUNT(edev); 1867 DP_INFO(edev, "Total XDP queues: %u\n", edev->total_xdp_queues); 1868 } 1869 } 1870 1871 static int qede_set_real_num_queues(struct qede_dev *edev) 1872 { 1873 int rc = 0; 1874 1875 rc = netif_set_real_num_tx_queues(edev->ndev, 1876 QEDE_TSS_COUNT(edev) * 1877 edev->dev_info.num_tc); 1878 if (rc) { 1879 DP_NOTICE(edev, "Failed to set real number of Tx queues\n"); 1880 return rc; 1881 } 1882 1883 rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_COUNT(edev)); 1884 if (rc) { 1885 DP_NOTICE(edev, "Failed to set real number of Rx queues\n"); 1886 return rc; 1887 } 1888 1889 return 0; 1890 } 1891 1892 static void qede_napi_disable_remove(struct qede_dev *edev) 1893 { 1894 int i; 1895 1896 for_each_queue(i) { 1897 napi_disable(&edev->fp_array[i].napi); 1898 1899 netif_napi_del(&edev->fp_array[i].napi); 1900 } 1901 } 1902 1903 static void qede_napi_add_enable(struct qede_dev *edev) 1904 { 1905 int i; 1906 1907 /* Add NAPI objects */ 1908 for_each_queue(i) { 1909 netif_napi_add(edev->ndev, &edev->fp_array[i].napi, qede_poll); 1910 napi_enable(&edev->fp_array[i].napi); 1911 } 1912 } 1913 1914 static void qede_sync_free_irqs(struct qede_dev *edev) 1915 { 1916 int i; 1917 1918 for (i = 0; i < edev->int_info.used_cnt; i++) { 1919 if (edev->int_info.msix_cnt) { 1920 free_irq(edev->int_info.msix[i].vector, 1921 &edev->fp_array[i]); 1922 } else { 1923 edev->ops->common->simd_handler_clean(edev->cdev, i); 1924 } 1925 } 1926 1927 edev->int_info.used_cnt = 0; 1928 edev->int_info.msix_cnt = 0; 1929 } 1930 1931 static int qede_req_msix_irqs(struct qede_dev *edev) 1932 { 1933 int i, rc; 1934 1935 /* Sanitize number of interrupts == number of prepared RSS queues */ 1936 if (QEDE_QUEUE_CNT(edev) > edev->int_info.msix_cnt) { 1937 DP_ERR(edev, 1938 "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n", 1939 QEDE_QUEUE_CNT(edev), edev->int_info.msix_cnt); 1940 return -EINVAL; 1941 } 1942 1943 for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) { 1944 #ifdef CONFIG_RFS_ACCEL 1945 struct qede_fastpath *fp = &edev->fp_array[i]; 1946 1947 if (edev->ndev->rx_cpu_rmap && (fp->type & QEDE_FASTPATH_RX)) { 1948 rc = irq_cpu_rmap_add(edev->ndev->rx_cpu_rmap, 1949 edev->int_info.msix[i].vector); 1950 if (rc) { 1951 DP_ERR(edev, "Failed to add CPU rmap\n"); 1952 qede_free_arfs(edev); 1953 } 1954 } 1955 #endif 1956 rc = request_irq(edev->int_info.msix[i].vector, 1957 qede_msix_fp_int, 0, edev->fp_array[i].name, 1958 &edev->fp_array[i]); 1959 if (rc) { 1960 DP_ERR(edev, "Request fp %d irq failed\n", i); 1961 #ifdef CONFIG_RFS_ACCEL 1962 if (edev->ndev->rx_cpu_rmap) 1963 free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap); 1964 1965 edev->ndev->rx_cpu_rmap = NULL; 1966 #endif 1967 qede_sync_free_irqs(edev); 1968 return rc; 1969 } 1970 DP_VERBOSE(edev, NETIF_MSG_INTR, 1971 "Requested fp irq for %s [entry %d]. Cookie is at %p\n", 1972 edev->fp_array[i].name, i, 1973 &edev->fp_array[i]); 1974 edev->int_info.used_cnt++; 1975 } 1976 1977 return 0; 1978 } 1979 1980 static void qede_simd_fp_handler(void *cookie) 1981 { 1982 struct qede_fastpath *fp = (struct qede_fastpath *)cookie; 1983 1984 napi_schedule_irqoff(&fp->napi); 1985 } 1986 1987 static int qede_setup_irqs(struct qede_dev *edev) 1988 { 1989 int i, rc = 0; 1990 1991 /* Learn Interrupt configuration */ 1992 rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info); 1993 if (rc) 1994 return rc; 1995 1996 if (edev->int_info.msix_cnt) { 1997 rc = qede_req_msix_irqs(edev); 1998 if (rc) 1999 return rc; 2000 edev->ndev->irq = edev->int_info.msix[0].vector; 2001 } else { 2002 const struct qed_common_ops *ops; 2003 2004 /* qed should learn receive the RSS ids and callbacks */ 2005 ops = edev->ops->common; 2006 for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) 2007 ops->simd_handler_config(edev->cdev, 2008 &edev->fp_array[i], i, 2009 qede_simd_fp_handler); 2010 edev->int_info.used_cnt = QEDE_QUEUE_CNT(edev); 2011 } 2012 return 0; 2013 } 2014 2015 static int qede_drain_txq(struct qede_dev *edev, 2016 struct qede_tx_queue *txq, bool allow_drain) 2017 { 2018 int rc, cnt = 1000; 2019 2020 while (txq->sw_tx_cons != txq->sw_tx_prod) { 2021 if (!cnt) { 2022 if (allow_drain) { 2023 DP_NOTICE(edev, 2024 "Tx queue[%d] is stuck, requesting MCP to drain\n", 2025 txq->index); 2026 rc = edev->ops->common->drain(edev->cdev); 2027 if (rc) 2028 return rc; 2029 return qede_drain_txq(edev, txq, false); 2030 } 2031 DP_NOTICE(edev, 2032 "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n", 2033 txq->index, txq->sw_tx_prod, 2034 txq->sw_tx_cons); 2035 return -ENODEV; 2036 } 2037 cnt--; 2038 usleep_range(1000, 2000); 2039 barrier(); 2040 } 2041 2042 /* FW finished processing, wait for HW to transmit all tx packets */ 2043 usleep_range(1000, 2000); 2044 2045 return 0; 2046 } 2047 2048 static int qede_stop_txq(struct qede_dev *edev, 2049 struct qede_tx_queue *txq, int rss_id) 2050 { 2051 /* delete doorbell from doorbell recovery mechanism */ 2052 edev->ops->common->db_recovery_del(edev->cdev, txq->doorbell_addr, 2053 &txq->tx_db); 2054 2055 return edev->ops->q_tx_stop(edev->cdev, rss_id, txq->handle); 2056 } 2057 2058 static int qede_stop_queues(struct qede_dev *edev) 2059 { 2060 struct qed_update_vport_params *vport_update_params; 2061 struct qed_dev *cdev = edev->cdev; 2062 struct qede_fastpath *fp; 2063 int rc, i; 2064 2065 /* Disable the vport */ 2066 vport_update_params = vzalloc(sizeof(*vport_update_params)); 2067 if (!vport_update_params) 2068 return -ENOMEM; 2069 2070 vport_update_params->vport_id = 0; 2071 vport_update_params->update_vport_active_flg = 1; 2072 vport_update_params->vport_active_flg = 0; 2073 vport_update_params->update_rss_flg = 0; 2074 2075 rc = edev->ops->vport_update(cdev, vport_update_params); 2076 vfree(vport_update_params); 2077 2078 if (rc) { 2079 DP_ERR(edev, "Failed to update vport\n"); 2080 return rc; 2081 } 2082 2083 /* Flush Tx queues. If needed, request drain from MCP */ 2084 for_each_queue(i) { 2085 fp = &edev->fp_array[i]; 2086 2087 if (fp->type & QEDE_FASTPATH_TX) { 2088 int cos; 2089 2090 for_each_cos_in_txq(edev, cos) { 2091 rc = qede_drain_txq(edev, &fp->txq[cos], true); 2092 if (rc) 2093 return rc; 2094 } 2095 } 2096 2097 if (fp->type & QEDE_FASTPATH_XDP) { 2098 rc = qede_drain_txq(edev, fp->xdp_tx, true); 2099 if (rc) 2100 return rc; 2101 } 2102 } 2103 2104 /* Stop all Queues in reverse order */ 2105 for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) { 2106 fp = &edev->fp_array[i]; 2107 2108 /* Stop the Tx Queue(s) */ 2109 if (fp->type & QEDE_FASTPATH_TX) { 2110 int cos; 2111 2112 for_each_cos_in_txq(edev, cos) { 2113 rc = qede_stop_txq(edev, &fp->txq[cos], i); 2114 if (rc) 2115 return rc; 2116 } 2117 } 2118 2119 /* Stop the Rx Queue */ 2120 if (fp->type & QEDE_FASTPATH_RX) { 2121 rc = edev->ops->q_rx_stop(cdev, i, fp->rxq->handle); 2122 if (rc) { 2123 DP_ERR(edev, "Failed to stop RXQ #%d\n", i); 2124 return rc; 2125 } 2126 } 2127 2128 /* Stop the XDP forwarding queue */ 2129 if (fp->type & QEDE_FASTPATH_XDP) { 2130 rc = qede_stop_txq(edev, fp->xdp_tx, i); 2131 if (rc) 2132 return rc; 2133 2134 bpf_prog_put(fp->rxq->xdp_prog); 2135 } 2136 } 2137 2138 /* Stop the vport */ 2139 rc = edev->ops->vport_stop(cdev, 0); 2140 if (rc) 2141 DP_ERR(edev, "Failed to stop VPORT\n"); 2142 2143 return rc; 2144 } 2145 2146 static int qede_start_txq(struct qede_dev *edev, 2147 struct qede_fastpath *fp, 2148 struct qede_tx_queue *txq, u8 rss_id, u16 sb_idx) 2149 { 2150 dma_addr_t phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl); 2151 u32 page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl); 2152 struct qed_queue_start_common_params params; 2153 struct qed_txq_start_ret_params ret_params; 2154 int rc; 2155 2156 memset(¶ms, 0, sizeof(params)); 2157 memset(&ret_params, 0, sizeof(ret_params)); 2158 2159 /* Let the XDP queue share the queue-zone with one of the regular txq. 2160 * We don't really care about its coalescing. 2161 */ 2162 if (txq->is_xdp) 2163 params.queue_id = QEDE_TXQ_XDP_TO_IDX(edev, txq); 2164 else 2165 params.queue_id = txq->index; 2166 2167 params.p_sb = fp->sb_info; 2168 params.sb_idx = sb_idx; 2169 params.tc = txq->cos; 2170 2171 rc = edev->ops->q_tx_start(edev->cdev, rss_id, ¶ms, phys_table, 2172 page_cnt, &ret_params); 2173 if (rc) { 2174 DP_ERR(edev, "Start TXQ #%d failed %d\n", txq->index, rc); 2175 return rc; 2176 } 2177 2178 txq->doorbell_addr = ret_params.p_doorbell; 2179 txq->handle = ret_params.p_handle; 2180 2181 /* Determine the FW consumer address associated */ 2182 txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[sb_idx]; 2183 2184 /* Prepare the doorbell parameters */ 2185 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST, DB_DEST_XCM); 2186 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, DB_AGG_CMD_SET); 2187 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_VAL_SEL, 2188 DQ_XCM_ETH_TX_BD_PROD_CMD); 2189 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD; 2190 2191 /* register doorbell with doorbell recovery mechanism */ 2192 rc = edev->ops->common->db_recovery_add(edev->cdev, txq->doorbell_addr, 2193 &txq->tx_db, DB_REC_WIDTH_32B, 2194 DB_REC_KERNEL); 2195 2196 return rc; 2197 } 2198 2199 static int qede_start_queues(struct qede_dev *edev, bool clear_stats) 2200 { 2201 int vlan_removal_en = 1; 2202 struct qed_dev *cdev = edev->cdev; 2203 struct qed_dev_info *qed_info = &edev->dev_info.common; 2204 struct qed_update_vport_params *vport_update_params; 2205 struct qed_queue_start_common_params q_params; 2206 struct qed_start_vport_params start = {0}; 2207 int rc, i; 2208 2209 if (!edev->num_queues) { 2210 DP_ERR(edev, 2211 "Cannot update V-VPORT as active as there are no Rx queues\n"); 2212 return -EINVAL; 2213 } 2214 2215 vport_update_params = vzalloc(sizeof(*vport_update_params)); 2216 if (!vport_update_params) 2217 return -ENOMEM; 2218 2219 start.handle_ptp_pkts = !!(edev->ptp); 2220 start.gro_enable = !edev->gro_disable; 2221 start.mtu = edev->ndev->mtu; 2222 start.vport_id = 0; 2223 start.drop_ttl0 = true; 2224 start.remove_inner_vlan = vlan_removal_en; 2225 start.clear_stats = clear_stats; 2226 2227 rc = edev->ops->vport_start(cdev, &start); 2228 2229 if (rc) { 2230 DP_ERR(edev, "Start V-PORT failed %d\n", rc); 2231 goto out; 2232 } 2233 2234 DP_VERBOSE(edev, NETIF_MSG_IFUP, 2235 "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n", 2236 start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en); 2237 2238 for_each_queue(i) { 2239 struct qede_fastpath *fp = &edev->fp_array[i]; 2240 dma_addr_t p_phys_table; 2241 u32 page_cnt; 2242 2243 if (fp->type & QEDE_FASTPATH_RX) { 2244 struct qed_rxq_start_ret_params ret_params; 2245 struct qede_rx_queue *rxq = fp->rxq; 2246 __le16 *val; 2247 2248 memset(&ret_params, 0, sizeof(ret_params)); 2249 memset(&q_params, 0, sizeof(q_params)); 2250 q_params.queue_id = rxq->rxq_id; 2251 q_params.vport_id = 0; 2252 q_params.p_sb = fp->sb_info; 2253 q_params.sb_idx = RX_PI; 2254 2255 p_phys_table = 2256 qed_chain_get_pbl_phys(&rxq->rx_comp_ring); 2257 page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring); 2258 2259 rc = edev->ops->q_rx_start(cdev, i, &q_params, 2260 rxq->rx_buf_size, 2261 rxq->rx_bd_ring.p_phys_addr, 2262 p_phys_table, 2263 page_cnt, &ret_params); 2264 if (rc) { 2265 DP_ERR(edev, "Start RXQ #%d failed %d\n", i, 2266 rc); 2267 goto out; 2268 } 2269 2270 /* Use the return parameters */ 2271 rxq->hw_rxq_prod_addr = ret_params.p_prod; 2272 rxq->handle = ret_params.p_handle; 2273 2274 val = &fp->sb_info->sb_virt->pi_array[RX_PI]; 2275 rxq->hw_cons_ptr = val; 2276 2277 qede_update_rx_prod(edev, rxq); 2278 } 2279 2280 if (fp->type & QEDE_FASTPATH_XDP) { 2281 rc = qede_start_txq(edev, fp, fp->xdp_tx, i, XDP_PI); 2282 if (rc) 2283 goto out; 2284 2285 bpf_prog_add(edev->xdp_prog, 1); 2286 fp->rxq->xdp_prog = edev->xdp_prog; 2287 } 2288 2289 if (fp->type & QEDE_FASTPATH_TX) { 2290 int cos; 2291 2292 for_each_cos_in_txq(edev, cos) { 2293 rc = qede_start_txq(edev, fp, &fp->txq[cos], i, 2294 TX_PI(cos)); 2295 if (rc) 2296 goto out; 2297 } 2298 } 2299 } 2300 2301 /* Prepare and send the vport enable */ 2302 vport_update_params->vport_id = start.vport_id; 2303 vport_update_params->update_vport_active_flg = 1; 2304 vport_update_params->vport_active_flg = 1; 2305 2306 if ((qed_info->b_inter_pf_switch || pci_num_vf(edev->pdev)) && 2307 qed_info->tx_switching) { 2308 vport_update_params->update_tx_switching_flg = 1; 2309 vport_update_params->tx_switching_flg = 1; 2310 } 2311 2312 qede_fill_rss_params(edev, &vport_update_params->rss_params, 2313 &vport_update_params->update_rss_flg); 2314 2315 rc = edev->ops->vport_update(cdev, vport_update_params); 2316 if (rc) 2317 DP_ERR(edev, "Update V-PORT failed %d\n", rc); 2318 2319 out: 2320 vfree(vport_update_params); 2321 return rc; 2322 } 2323 2324 enum qede_unload_mode { 2325 QEDE_UNLOAD_NORMAL, 2326 QEDE_UNLOAD_RECOVERY, 2327 }; 2328 2329 static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode, 2330 bool is_locked) 2331 { 2332 struct qed_link_params link_params; 2333 int rc; 2334 2335 DP_INFO(edev, "Starting qede unload\n"); 2336 2337 if (!is_locked) 2338 __qede_lock(edev); 2339 2340 clear_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags); 2341 2342 if (mode != QEDE_UNLOAD_RECOVERY) 2343 edev->state = QEDE_STATE_CLOSED; 2344 2345 qede_rdma_dev_event_close(edev); 2346 2347 /* Close OS Tx */ 2348 netif_tx_disable(edev->ndev); 2349 netif_carrier_off(edev->ndev); 2350 2351 if (mode != QEDE_UNLOAD_RECOVERY) { 2352 /* Reset the link */ 2353 memset(&link_params, 0, sizeof(link_params)); 2354 link_params.link_up = false; 2355 edev->ops->common->set_link(edev->cdev, &link_params); 2356 2357 rc = qede_stop_queues(edev); 2358 if (rc) { 2359 #ifdef CONFIG_RFS_ACCEL 2360 if (edev->dev_info.common.b_arfs_capable) { 2361 qede_poll_for_freeing_arfs_filters(edev); 2362 if (edev->ndev->rx_cpu_rmap) 2363 free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap); 2364 2365 edev->ndev->rx_cpu_rmap = NULL; 2366 } 2367 #endif 2368 qede_sync_free_irqs(edev); 2369 goto out; 2370 } 2371 2372 DP_INFO(edev, "Stopped Queues\n"); 2373 } 2374 2375 qede_vlan_mark_nonconfigured(edev); 2376 edev->ops->fastpath_stop(edev->cdev); 2377 2378 if (edev->dev_info.common.b_arfs_capable) { 2379 qede_poll_for_freeing_arfs_filters(edev); 2380 qede_free_arfs(edev); 2381 } 2382 2383 /* Release the interrupts */ 2384 qede_sync_free_irqs(edev); 2385 edev->ops->common->set_fp_int(edev->cdev, 0); 2386 2387 qede_napi_disable_remove(edev); 2388 2389 if (mode == QEDE_UNLOAD_RECOVERY) 2390 qede_empty_tx_queues(edev); 2391 2392 qede_free_mem_load(edev); 2393 qede_free_fp_array(edev); 2394 2395 out: 2396 if (!is_locked) 2397 __qede_unlock(edev); 2398 2399 if (mode != QEDE_UNLOAD_RECOVERY) 2400 DP_NOTICE(edev, "Link is down\n"); 2401 2402 edev->ptp_skip_txts = 0; 2403 2404 DP_INFO(edev, "Ending qede unload\n"); 2405 } 2406 2407 enum qede_load_mode { 2408 QEDE_LOAD_NORMAL, 2409 QEDE_LOAD_RELOAD, 2410 QEDE_LOAD_RECOVERY, 2411 }; 2412 2413 static int qede_load(struct qede_dev *edev, enum qede_load_mode mode, 2414 bool is_locked) 2415 { 2416 struct qed_link_params link_params; 2417 struct ethtool_coalesce coal = {}; 2418 u8 num_tc; 2419 int rc, i; 2420 2421 DP_INFO(edev, "Starting qede load\n"); 2422 2423 if (!is_locked) 2424 __qede_lock(edev); 2425 2426 rc = qede_set_num_queues(edev); 2427 if (rc) 2428 goto out; 2429 2430 rc = qede_alloc_fp_array(edev); 2431 if (rc) 2432 goto out; 2433 2434 qede_init_fp(edev); 2435 2436 rc = qede_alloc_mem_load(edev); 2437 if (rc) 2438 goto err1; 2439 DP_INFO(edev, "Allocated %d Rx, %d Tx queues\n", 2440 QEDE_RSS_COUNT(edev), QEDE_TSS_COUNT(edev)); 2441 2442 rc = qede_set_real_num_queues(edev); 2443 if (rc) 2444 goto err2; 2445 2446 if (qede_alloc_arfs(edev)) { 2447 edev->ndev->features &= ~NETIF_F_NTUPLE; 2448 edev->dev_info.common.b_arfs_capable = false; 2449 } 2450 2451 qede_napi_add_enable(edev); 2452 DP_INFO(edev, "Napi added and enabled\n"); 2453 2454 rc = qede_setup_irqs(edev); 2455 if (rc) 2456 goto err3; 2457 DP_INFO(edev, "Setup IRQs succeeded\n"); 2458 2459 rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD); 2460 if (rc) 2461 goto err4; 2462 DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n"); 2463 2464 num_tc = netdev_get_num_tc(edev->ndev); 2465 num_tc = num_tc ? num_tc : edev->dev_info.num_tc; 2466 qede_setup_tc(edev->ndev, num_tc); 2467 2468 /* Program un-configured VLANs */ 2469 qede_configure_vlan_filters(edev); 2470 2471 set_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags); 2472 2473 /* Ask for link-up using current configuration */ 2474 memset(&link_params, 0, sizeof(link_params)); 2475 link_params.link_up = true; 2476 edev->ops->common->set_link(edev->cdev, &link_params); 2477 2478 edev->state = QEDE_STATE_OPEN; 2479 2480 coal.rx_coalesce_usecs = QED_DEFAULT_RX_USECS; 2481 coal.tx_coalesce_usecs = QED_DEFAULT_TX_USECS; 2482 2483 for_each_queue(i) { 2484 if (edev->coal_entry[i].isvalid) { 2485 coal.rx_coalesce_usecs = edev->coal_entry[i].rxc; 2486 coal.tx_coalesce_usecs = edev->coal_entry[i].txc; 2487 } 2488 __qede_unlock(edev); 2489 qede_set_per_coalesce(edev->ndev, i, &coal); 2490 __qede_lock(edev); 2491 } 2492 DP_INFO(edev, "Ending successfully qede load\n"); 2493 2494 goto out; 2495 err4: 2496 qede_sync_free_irqs(edev); 2497 err3: 2498 qede_napi_disable_remove(edev); 2499 err2: 2500 qede_free_mem_load(edev); 2501 err1: 2502 edev->ops->common->set_fp_int(edev->cdev, 0); 2503 qede_free_fp_array(edev); 2504 edev->num_queues = 0; 2505 edev->fp_num_tx = 0; 2506 edev->fp_num_rx = 0; 2507 out: 2508 if (!is_locked) 2509 __qede_unlock(edev); 2510 2511 return rc; 2512 } 2513 2514 /* 'func' should be able to run between unload and reload assuming interface 2515 * is actually running, or afterwards in case it's currently DOWN. 2516 */ 2517 void qede_reload(struct qede_dev *edev, 2518 struct qede_reload_args *args, bool is_locked) 2519 { 2520 if (!is_locked) 2521 __qede_lock(edev); 2522 2523 /* Since qede_lock is held, internal state wouldn't change even 2524 * if netdev state would start transitioning. Check whether current 2525 * internal configuration indicates device is up, then reload. 2526 */ 2527 if (edev->state == QEDE_STATE_OPEN) { 2528 qede_unload(edev, QEDE_UNLOAD_NORMAL, true); 2529 if (args) 2530 args->func(edev, args); 2531 qede_load(edev, QEDE_LOAD_RELOAD, true); 2532 2533 /* Since no one is going to do it for us, re-configure */ 2534 qede_config_rx_mode(edev->ndev); 2535 } else if (args) { 2536 args->func(edev, args); 2537 } 2538 2539 if (!is_locked) 2540 __qede_unlock(edev); 2541 } 2542 2543 /* called with rtnl_lock */ 2544 static int qede_open(struct net_device *ndev) 2545 { 2546 struct qede_dev *edev = netdev_priv(ndev); 2547 int rc; 2548 2549 netif_carrier_off(ndev); 2550 2551 edev->ops->common->set_power_state(edev->cdev, PCI_D0); 2552 2553 rc = qede_load(edev, QEDE_LOAD_NORMAL, false); 2554 if (rc) 2555 return rc; 2556 2557 udp_tunnel_nic_reset_ntf(ndev); 2558 2559 edev->ops->common->update_drv_state(edev->cdev, true); 2560 2561 return 0; 2562 } 2563 2564 static int qede_close(struct net_device *ndev) 2565 { 2566 struct qede_dev *edev = netdev_priv(ndev); 2567 2568 qede_unload(edev, QEDE_UNLOAD_NORMAL, false); 2569 2570 if (edev->cdev) 2571 edev->ops->common->update_drv_state(edev->cdev, false); 2572 2573 return 0; 2574 } 2575 2576 static void qede_link_update(void *dev, struct qed_link_output *link) 2577 { 2578 struct qede_dev *edev = dev; 2579 2580 if (!test_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags)) { 2581 DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not ready\n"); 2582 return; 2583 } 2584 2585 if (link->link_up) { 2586 if (!netif_carrier_ok(edev->ndev)) { 2587 DP_NOTICE(edev, "Link is up\n"); 2588 netif_tx_start_all_queues(edev->ndev); 2589 netif_carrier_on(edev->ndev); 2590 qede_rdma_dev_event_open(edev); 2591 } 2592 } else { 2593 if (netif_carrier_ok(edev->ndev)) { 2594 DP_NOTICE(edev, "Link is down\n"); 2595 netif_tx_disable(edev->ndev); 2596 netif_carrier_off(edev->ndev); 2597 qede_rdma_dev_event_close(edev); 2598 } 2599 } 2600 } 2601 2602 static void qede_schedule_recovery_handler(void *dev) 2603 { 2604 struct qede_dev *edev = dev; 2605 2606 if (edev->state == QEDE_STATE_RECOVERY) { 2607 DP_NOTICE(edev, 2608 "Avoid scheduling a recovery handling since already in recovery state\n"); 2609 return; 2610 } 2611 2612 set_bit(QEDE_SP_RECOVERY, &edev->sp_flags); 2613 schedule_delayed_work(&edev->sp_task, 0); 2614 2615 DP_INFO(edev, "Scheduled a recovery handler\n"); 2616 } 2617 2618 static void qede_recovery_failed(struct qede_dev *edev) 2619 { 2620 netdev_err(edev->ndev, "Recovery handling has failed. Power cycle is needed.\n"); 2621 2622 netif_device_detach(edev->ndev); 2623 2624 if (edev->cdev) 2625 edev->ops->common->set_power_state(edev->cdev, PCI_D3hot); 2626 } 2627 2628 static void qede_recovery_handler(struct qede_dev *edev) 2629 { 2630 u32 curr_state = edev->state; 2631 int rc; 2632 2633 DP_NOTICE(edev, "Starting a recovery process\n"); 2634 2635 /* No need to acquire first the qede_lock since is done by qede_sp_task 2636 * before calling this function. 2637 */ 2638 edev->state = QEDE_STATE_RECOVERY; 2639 2640 edev->ops->common->recovery_prolog(edev->cdev); 2641 2642 if (curr_state == QEDE_STATE_OPEN) 2643 qede_unload(edev, QEDE_UNLOAD_RECOVERY, true); 2644 2645 __qede_remove(edev->pdev, QEDE_REMOVE_RECOVERY); 2646 2647 rc = __qede_probe(edev->pdev, edev->dp_module, edev->dp_level, 2648 IS_VF(edev), QEDE_PROBE_RECOVERY); 2649 if (rc) { 2650 edev->cdev = NULL; 2651 goto err; 2652 } 2653 2654 if (curr_state == QEDE_STATE_OPEN) { 2655 rc = qede_load(edev, QEDE_LOAD_RECOVERY, true); 2656 if (rc) 2657 goto err; 2658 2659 qede_config_rx_mode(edev->ndev); 2660 udp_tunnel_nic_reset_ntf(edev->ndev); 2661 } 2662 2663 edev->state = curr_state; 2664 2665 DP_NOTICE(edev, "Recovery handling is done\n"); 2666 2667 return; 2668 2669 err: 2670 qede_recovery_failed(edev); 2671 } 2672 2673 static void qede_atomic_hw_err_handler(struct qede_dev *edev) 2674 { 2675 struct qed_dev *cdev = edev->cdev; 2676 2677 DP_NOTICE(edev, 2678 "Generic non-sleepable HW error handling started - err_flags 0x%lx\n", 2679 edev->err_flags); 2680 2681 /* Get a call trace of the flow that led to the error */ 2682 WARN_ON(test_bit(QEDE_ERR_WARN, &edev->err_flags)); 2683 2684 /* Prevent HW attentions from being reasserted */ 2685 if (test_bit(QEDE_ERR_ATTN_CLR_EN, &edev->err_flags)) 2686 edev->ops->common->attn_clr_enable(cdev, true); 2687 2688 DP_NOTICE(edev, "Generic non-sleepable HW error handling is done\n"); 2689 } 2690 2691 static void qede_generic_hw_err_handler(struct qede_dev *edev) 2692 { 2693 DP_NOTICE(edev, 2694 "Generic sleepable HW error handling started - err_flags 0x%lx\n", 2695 edev->err_flags); 2696 2697 if (edev->devlink) { 2698 DP_NOTICE(edev, "Reporting fatal error to devlink\n"); 2699 edev->ops->common->report_fatal_error(edev->devlink, edev->last_err_type); 2700 } 2701 2702 clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags); 2703 2704 DP_NOTICE(edev, "Generic sleepable HW error handling is done\n"); 2705 } 2706 2707 static void qede_set_hw_err_flags(struct qede_dev *edev, 2708 enum qed_hw_err_type err_type) 2709 { 2710 unsigned long err_flags = 0; 2711 2712 switch (err_type) { 2713 case QED_HW_ERR_DMAE_FAIL: 2714 set_bit(QEDE_ERR_WARN, &err_flags); 2715 fallthrough; 2716 case QED_HW_ERR_MFW_RESP_FAIL: 2717 case QED_HW_ERR_HW_ATTN: 2718 case QED_HW_ERR_RAMROD_FAIL: 2719 case QED_HW_ERR_FW_ASSERT: 2720 set_bit(QEDE_ERR_ATTN_CLR_EN, &err_flags); 2721 set_bit(QEDE_ERR_GET_DBG_INFO, &err_flags); 2722 /* make this error as recoverable and start recovery*/ 2723 set_bit(QEDE_ERR_IS_RECOVERABLE, &err_flags); 2724 break; 2725 2726 default: 2727 DP_NOTICE(edev, "Unexpected HW error [%d]\n", err_type); 2728 break; 2729 } 2730 2731 edev->err_flags |= err_flags; 2732 } 2733 2734 static void qede_schedule_hw_err_handler(void *dev, 2735 enum qed_hw_err_type err_type) 2736 { 2737 struct qede_dev *edev = dev; 2738 2739 /* Fan failure cannot be masked by handling of another HW error or by a 2740 * concurrent recovery process. 2741 */ 2742 if ((test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) || 2743 edev->state == QEDE_STATE_RECOVERY) && 2744 err_type != QED_HW_ERR_FAN_FAIL) { 2745 DP_INFO(edev, 2746 "Avoid scheduling an error handling while another HW error is being handled\n"); 2747 return; 2748 } 2749 2750 if (err_type >= QED_HW_ERR_LAST) { 2751 DP_NOTICE(edev, "Unknown HW error [%d]\n", err_type); 2752 clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags); 2753 return; 2754 } 2755 2756 edev->last_err_type = err_type; 2757 qede_set_hw_err_flags(edev, err_type); 2758 qede_atomic_hw_err_handler(edev); 2759 set_bit(QEDE_SP_HW_ERR, &edev->sp_flags); 2760 schedule_delayed_work(&edev->sp_task, 0); 2761 2762 DP_INFO(edev, "Scheduled a error handler [err_type %d]\n", err_type); 2763 } 2764 2765 static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq) 2766 { 2767 struct netdev_queue *netdev_txq; 2768 2769 netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id); 2770 if (netif_xmit_stopped(netdev_txq)) 2771 return true; 2772 2773 return false; 2774 } 2775 2776 static void qede_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data) 2777 { 2778 struct qede_dev *edev = dev; 2779 struct netdev_hw_addr *ha; 2780 int i; 2781 2782 if (edev->ndev->features & NETIF_F_IP_CSUM) 2783 data->feat_flags |= QED_TLV_IP_CSUM; 2784 if (edev->ndev->features & NETIF_F_TSO) 2785 data->feat_flags |= QED_TLV_LSO; 2786 2787 ether_addr_copy(data->mac[0], edev->ndev->dev_addr); 2788 eth_zero_addr(data->mac[1]); 2789 eth_zero_addr(data->mac[2]); 2790 /* Copy the first two UC macs */ 2791 netif_addr_lock_bh(edev->ndev); 2792 i = 1; 2793 netdev_for_each_uc_addr(ha, edev->ndev) { 2794 ether_addr_copy(data->mac[i++], ha->addr); 2795 if (i == QED_TLV_MAC_COUNT) 2796 break; 2797 } 2798 2799 netif_addr_unlock_bh(edev->ndev); 2800 } 2801 2802 static void qede_get_eth_tlv_data(void *dev, void *data) 2803 { 2804 struct qed_mfw_tlv_eth *etlv = data; 2805 struct qede_dev *edev = dev; 2806 struct qede_fastpath *fp; 2807 int i; 2808 2809 etlv->lso_maxoff_size = 0XFFFF; 2810 etlv->lso_maxoff_size_set = true; 2811 etlv->lso_minseg_size = (u16)ETH_TX_LSO_WINDOW_MIN_LEN; 2812 etlv->lso_minseg_size_set = true; 2813 etlv->prom_mode = !!(edev->ndev->flags & IFF_PROMISC); 2814 etlv->prom_mode_set = true; 2815 etlv->tx_descr_size = QEDE_TSS_COUNT(edev); 2816 etlv->tx_descr_size_set = true; 2817 etlv->rx_descr_size = QEDE_RSS_COUNT(edev); 2818 etlv->rx_descr_size_set = true; 2819 etlv->iov_offload = QED_MFW_TLV_IOV_OFFLOAD_VEB; 2820 etlv->iov_offload_set = true; 2821 2822 /* Fill information regarding queues; Should be done under the qede 2823 * lock to guarantee those don't change beneath our feet. 2824 */ 2825 etlv->txqs_empty = true; 2826 etlv->rxqs_empty = true; 2827 etlv->num_txqs_full = 0; 2828 etlv->num_rxqs_full = 0; 2829 2830 __qede_lock(edev); 2831 for_each_queue(i) { 2832 fp = &edev->fp_array[i]; 2833 if (fp->type & QEDE_FASTPATH_TX) { 2834 struct qede_tx_queue *txq = QEDE_FP_TC0_TXQ(fp); 2835 2836 if (txq->sw_tx_cons != txq->sw_tx_prod) 2837 etlv->txqs_empty = false; 2838 if (qede_is_txq_full(edev, txq)) 2839 etlv->num_txqs_full++; 2840 } 2841 if (fp->type & QEDE_FASTPATH_RX) { 2842 if (qede_has_rx_work(fp->rxq)) 2843 etlv->rxqs_empty = false; 2844 2845 /* This one is a bit tricky; Firmware might stop 2846 * placing packets if ring is not yet full. 2847 * Give an approximation. 2848 */ 2849 if (le16_to_cpu(*fp->rxq->hw_cons_ptr) - 2850 qed_chain_get_cons_idx(&fp->rxq->rx_comp_ring) > 2851 RX_RING_SIZE - 100) 2852 etlv->num_rxqs_full++; 2853 } 2854 } 2855 __qede_unlock(edev); 2856 2857 etlv->txqs_empty_set = true; 2858 etlv->rxqs_empty_set = true; 2859 etlv->num_txqs_full_set = true; 2860 etlv->num_rxqs_full_set = true; 2861 } 2862 2863 /** 2864 * qede_io_error_detected(): Called when PCI error is detected 2865 * 2866 * @pdev: Pointer to PCI device 2867 * @state: The current pci connection state 2868 * 2869 *Return: pci_ers_result_t. 2870 * 2871 * This function is called after a PCI bus error affecting 2872 * this device has been detected. 2873 */ 2874 static pci_ers_result_t 2875 qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 2876 { 2877 struct net_device *dev = pci_get_drvdata(pdev); 2878 struct qede_dev *edev = netdev_priv(dev); 2879 2880 if (!edev) 2881 return PCI_ERS_RESULT_NONE; 2882 2883 DP_NOTICE(edev, "IO error detected [%d]\n", state); 2884 2885 __qede_lock(edev); 2886 if (edev->state == QEDE_STATE_RECOVERY) { 2887 DP_NOTICE(edev, "Device already in the recovery state\n"); 2888 __qede_unlock(edev); 2889 return PCI_ERS_RESULT_NONE; 2890 } 2891 2892 /* PF handles the recovery of its VFs */ 2893 if (IS_VF(edev)) { 2894 DP_VERBOSE(edev, QED_MSG_IOV, 2895 "VF recovery is handled by its PF\n"); 2896 __qede_unlock(edev); 2897 return PCI_ERS_RESULT_RECOVERED; 2898 } 2899 2900 /* Close OS Tx */ 2901 netif_tx_disable(edev->ndev); 2902 netif_carrier_off(edev->ndev); 2903 2904 set_bit(QEDE_SP_AER, &edev->sp_flags); 2905 schedule_delayed_work(&edev->sp_task, 0); 2906 2907 __qede_unlock(edev); 2908 2909 return PCI_ERS_RESULT_CAN_RECOVER; 2910 } 2911