1 /* Broadcom NetXtreme-C/E network driver. 2 * 3 * Copyright (c) 2014-2016 Broadcom Corporation 4 * Copyright (c) 2016-2019 Broadcom Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 */ 10 11 #include <linux/module.h> 12 13 #include <linux/stringify.h> 14 #include <linux/kernel.h> 15 #include <linux/timer.h> 16 #include <linux/errno.h> 17 #include <linux/ioport.h> 18 #include <linux/slab.h> 19 #include <linux/vmalloc.h> 20 #include <linux/interrupt.h> 21 #include <linux/pci.h> 22 #include <linux/netdevice.h> 23 #include <linux/etherdevice.h> 24 #include <linux/skbuff.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/bitops.h> 27 #include <linux/io.h> 28 #include <linux/irq.h> 29 #include <linux/delay.h> 30 #include <asm/byteorder.h> 31 #include <asm/page.h> 32 #include <linux/time.h> 33 #include <linux/mii.h> 34 #include <linux/mdio.h> 35 #include <linux/if.h> 36 #include <linux/if_vlan.h> 37 #include <linux/if_bridge.h> 38 #include <linux/rtc.h> 39 #include <linux/bpf.h> 40 #include <net/ip.h> 41 #include <net/tcp.h> 42 #include <net/udp.h> 43 #include <net/checksum.h> 44 #include <net/ip6_checksum.h> 45 #include <net/udp_tunnel.h> 46 #include <linux/workqueue.h> 47 #include <linux/prefetch.h> 48 #include <linux/cache.h> 49 #include <linux/log2.h> 50 #include <linux/aer.h> 51 #include <linux/bitmap.h> 52 #include <linux/cpu_rmap.h> 53 #include <linux/cpumask.h> 54 #include <net/pkt_cls.h> 55 #include <linux/hwmon.h> 56 #include <linux/hwmon-sysfs.h> 57 #include <net/page_pool.h> 58 59 #include "bnxt_hsi.h" 60 #include "bnxt.h" 61 #include "bnxt_ulp.h" 62 #include "bnxt_sriov.h" 63 #include "bnxt_ethtool.h" 64 #include "bnxt_dcb.h" 65 #include "bnxt_xdp.h" 66 #include "bnxt_vfr.h" 67 #include "bnxt_tc.h" 68 #include "bnxt_devlink.h" 69 #include "bnxt_debugfs.h" 70 71 #define BNXT_TX_TIMEOUT (5 * HZ) 72 #define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW) 73 74 MODULE_LICENSE("GPL"); 75 MODULE_DESCRIPTION("Broadcom BCM573xx network driver"); 76 77 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN) 78 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD 79 #define BNXT_RX_COPY_THRESH 256 80 81 #define BNXT_TX_PUSH_THRESH 164 82 83 enum board_idx { 84 BCM57301, 85 BCM57302, 86 BCM57304, 87 BCM57417_NPAR, 88 BCM58700, 89 BCM57311, 90 BCM57312, 91 BCM57402, 92 BCM57404, 93 BCM57406, 94 BCM57402_NPAR, 95 BCM57407, 96 BCM57412, 97 BCM57414, 98 BCM57416, 99 BCM57417, 100 BCM57412_NPAR, 101 BCM57314, 102 BCM57417_SFP, 103 BCM57416_SFP, 104 BCM57404_NPAR, 105 BCM57406_NPAR, 106 BCM57407_SFP, 107 BCM57407_NPAR, 108 BCM57414_NPAR, 109 BCM57416_NPAR, 110 BCM57452, 111 BCM57454, 112 BCM5745x_NPAR, 113 BCM57508, 114 BCM57504, 115 BCM57502, 116 BCM57508_NPAR, 117 BCM57504_NPAR, 118 BCM57502_NPAR, 119 BCM58802, 120 BCM58804, 121 BCM58808, 122 NETXTREME_E_VF, 123 NETXTREME_C_VF, 124 NETXTREME_S_VF, 125 NETXTREME_C_VF_HV, 126 NETXTREME_E_VF_HV, 127 NETXTREME_E_P5_VF, 128 NETXTREME_E_P5_VF_HV, 129 }; 130 131 /* indexed by enum above */ 132 static const struct { 133 char *name; 134 } board_info[] = { 135 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" }, 136 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" }, 137 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, 138 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" }, 139 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" }, 140 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" }, 141 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" }, 142 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" }, 143 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" }, 144 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" }, 145 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" }, 146 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" }, 147 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" }, 148 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" }, 149 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" }, 150 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" }, 151 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" }, 152 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, 153 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" }, 154 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" }, 155 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" }, 156 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" }, 157 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" }, 158 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" }, 159 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" }, 160 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" }, 161 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" }, 162 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 163 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" }, 164 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, 165 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, 166 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" }, 167 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" }, 168 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" }, 169 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" }, 170 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" }, 171 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 172 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 173 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" }, 174 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" }, 175 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" }, 176 [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" }, 177 [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" }, 178 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" }, 179 [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" }, 180 }; 181 182 static const struct pci_device_id bnxt_pci_tbl[] = { 183 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR }, 184 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR }, 185 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 }, 186 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR }, 187 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 }, 188 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 }, 189 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 }, 190 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR }, 191 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 }, 192 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 }, 193 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 }, 194 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 }, 195 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 }, 196 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 }, 197 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR }, 198 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 }, 199 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 }, 200 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 }, 201 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 }, 202 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 }, 203 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR }, 204 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 }, 205 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP }, 206 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP }, 207 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR }, 208 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR }, 209 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP }, 210 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR }, 211 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR }, 212 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR }, 213 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR }, 214 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR }, 215 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR }, 216 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 }, 217 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 }, 218 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 }, 219 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 }, 220 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 }, 221 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR }, 222 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR }, 223 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR }, 224 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR }, 225 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR }, 226 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR }, 227 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 }, 228 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 }, 229 #ifdef CONFIG_BNXT_SRIOV 230 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF }, 231 { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV }, 232 { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV }, 233 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF }, 234 { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV }, 235 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF }, 236 { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV }, 237 { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV }, 238 { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV }, 239 { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV }, 240 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF }, 241 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF }, 242 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF }, 243 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF }, 244 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF }, 245 { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV }, 246 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF }, 247 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF }, 248 { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV }, 249 { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV }, 250 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF }, 251 #endif 252 { 0 } 253 }; 254 255 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl); 256 257 static const u16 bnxt_vf_req_snif[] = { 258 HWRM_FUNC_CFG, 259 HWRM_FUNC_VF_CFG, 260 HWRM_PORT_PHY_QCFG, 261 HWRM_CFA_L2_FILTER_ALLOC, 262 }; 263 264 static const u16 bnxt_async_events_arr[] = { 265 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE, 266 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE, 267 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD, 268 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED, 269 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE, 270 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, 271 ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE, 272 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY, 273 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY, 274 ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION, 275 ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG, 276 ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST, 277 }; 278 279 static struct workqueue_struct *bnxt_pf_wq; 280 281 static bool bnxt_vf_pciid(enum board_idx idx) 282 { 283 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF || 284 idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV || 285 idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF || 286 idx == NETXTREME_E_P5_VF_HV); 287 } 288 289 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) 290 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS) 291 #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS) 292 293 #define BNXT_CP_DB_IRQ_DIS(db) \ 294 writel(DB_CP_IRQ_DIS_FLAGS, db) 295 296 #define BNXT_DB_CQ(db, idx) \ 297 writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell) 298 299 #define BNXT_DB_NQ_P5(db, idx) \ 300 writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell) 301 302 #define BNXT_DB_CQ_ARM(db, idx) \ 303 writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell) 304 305 #define BNXT_DB_NQ_ARM_P5(db, idx) \ 306 writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell) 307 308 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 309 { 310 if (bp->flags & BNXT_FLAG_CHIP_P5) 311 BNXT_DB_NQ_P5(db, idx); 312 else 313 BNXT_DB_CQ(db, idx); 314 } 315 316 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 317 { 318 if (bp->flags & BNXT_FLAG_CHIP_P5) 319 BNXT_DB_NQ_ARM_P5(db, idx); 320 else 321 BNXT_DB_CQ_ARM(db, idx); 322 } 323 324 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 325 { 326 if (bp->flags & BNXT_FLAG_CHIP_P5) 327 writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx), 328 db->doorbell); 329 else 330 BNXT_DB_CQ(db, idx); 331 } 332 333 const u16 bnxt_lhint_arr[] = { 334 TX_BD_FLAGS_LHINT_512_AND_SMALLER, 335 TX_BD_FLAGS_LHINT_512_TO_1023, 336 TX_BD_FLAGS_LHINT_1024_TO_2047, 337 TX_BD_FLAGS_LHINT_1024_TO_2047, 338 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 339 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 340 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 341 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 342 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 343 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 344 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 345 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 346 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 347 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 348 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 349 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 350 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 351 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 352 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 353 }; 354 355 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb) 356 { 357 struct metadata_dst *md_dst = skb_metadata_dst(skb); 358 359 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX) 360 return 0; 361 362 return md_dst->u.port_info.port_id; 363 } 364 365 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) 366 { 367 struct bnxt *bp = netdev_priv(dev); 368 struct tx_bd *txbd; 369 struct tx_bd_ext *txbd1; 370 struct netdev_queue *txq; 371 int i; 372 dma_addr_t mapping; 373 unsigned int length, pad = 0; 374 u32 len, free_size, vlan_tag_flags, cfa_action, flags; 375 u16 prod, last_frag; 376 struct pci_dev *pdev = bp->pdev; 377 struct bnxt_tx_ring_info *txr; 378 struct bnxt_sw_tx_bd *tx_buf; 379 __le32 lflags = 0; 380 381 i = skb_get_queue_mapping(skb); 382 if (unlikely(i >= bp->tx_nr_rings)) { 383 dev_kfree_skb_any(skb); 384 return NETDEV_TX_OK; 385 } 386 387 txq = netdev_get_tx_queue(dev, i); 388 txr = &bp->tx_ring[bp->tx_ring_map[i]]; 389 prod = txr->tx_prod; 390 391 free_size = bnxt_tx_avail(bp, txr); 392 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) { 393 netif_tx_stop_queue(txq); 394 return NETDEV_TX_BUSY; 395 } 396 397 length = skb->len; 398 len = skb_headlen(skb); 399 last_frag = skb_shinfo(skb)->nr_frags; 400 401 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 402 403 txbd->tx_bd_opaque = prod; 404 405 tx_buf = &txr->tx_buf_ring[prod]; 406 tx_buf->skb = skb; 407 tx_buf->nr_frags = last_frag; 408 409 vlan_tag_flags = 0; 410 cfa_action = bnxt_xmit_get_cfa_action(skb); 411 if (skb_vlan_tag_present(skb)) { 412 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN | 413 skb_vlan_tag_get(skb); 414 /* Currently supports 8021Q, 8021AD vlan offloads 415 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated 416 */ 417 if (skb->vlan_proto == htons(ETH_P_8021Q)) 418 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT; 419 } 420 421 if (unlikely(skb->no_fcs)) { 422 lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC); 423 goto normal_tx; 424 } 425 426 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) { 427 struct tx_push_buffer *tx_push_buf = txr->tx_push; 428 struct tx_push_bd *tx_push = &tx_push_buf->push_bd; 429 struct tx_bd_ext *tx_push1 = &tx_push->txbd2; 430 void __iomem *db = txr->tx_db.doorbell; 431 void *pdata = tx_push_buf->data; 432 u64 *end; 433 int j, push_len; 434 435 /* Set COAL_NOW to be ready quickly for the next push */ 436 tx_push->tx_bd_len_flags_type = 437 cpu_to_le32((length << TX_BD_LEN_SHIFT) | 438 TX_BD_TYPE_LONG_TX_BD | 439 TX_BD_FLAGS_LHINT_512_AND_SMALLER | 440 TX_BD_FLAGS_COAL_NOW | 441 TX_BD_FLAGS_PACKET_END | 442 (2 << TX_BD_FLAGS_BD_CNT_SHIFT)); 443 444 if (skb->ip_summed == CHECKSUM_PARTIAL) 445 tx_push1->tx_bd_hsize_lflags = 446 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); 447 else 448 tx_push1->tx_bd_hsize_lflags = 0; 449 450 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 451 tx_push1->tx_bd_cfa_action = 452 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); 453 454 end = pdata + length; 455 end = PTR_ALIGN(end, 8) - 1; 456 *end = 0; 457 458 skb_copy_from_linear_data(skb, pdata, len); 459 pdata += len; 460 for (j = 0; j < last_frag; j++) { 461 skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; 462 void *fptr; 463 464 fptr = skb_frag_address_safe(frag); 465 if (!fptr) 466 goto normal_tx; 467 468 memcpy(pdata, fptr, skb_frag_size(frag)); 469 pdata += skb_frag_size(frag); 470 } 471 472 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type; 473 txbd->tx_bd_haddr = txr->data_mapping; 474 prod = NEXT_TX(prod); 475 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 476 memcpy(txbd, tx_push1, sizeof(*txbd)); 477 prod = NEXT_TX(prod); 478 tx_push->doorbell = 479 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod); 480 txr->tx_prod = prod; 481 482 tx_buf->is_push = 1; 483 netdev_tx_sent_queue(txq, skb->len); 484 wmb(); /* Sync is_push and byte queue before pushing data */ 485 486 push_len = (length + sizeof(*tx_push) + 7) / 8; 487 if (push_len > 16) { 488 __iowrite64_copy(db, tx_push_buf, 16); 489 __iowrite32_copy(db + 4, tx_push_buf + 1, 490 (push_len - 16) << 1); 491 } else { 492 __iowrite64_copy(db, tx_push_buf, push_len); 493 } 494 495 goto tx_done; 496 } 497 498 normal_tx: 499 if (length < BNXT_MIN_PKT_SIZE) { 500 pad = BNXT_MIN_PKT_SIZE - length; 501 if (skb_pad(skb, pad)) { 502 /* SKB already freed. */ 503 tx_buf->skb = NULL; 504 return NETDEV_TX_OK; 505 } 506 length = BNXT_MIN_PKT_SIZE; 507 } 508 509 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE); 510 511 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) { 512 dev_kfree_skb_any(skb); 513 tx_buf->skb = NULL; 514 return NETDEV_TX_OK; 515 } 516 517 dma_unmap_addr_set(tx_buf, mapping, mapping); 518 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD | 519 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT); 520 521 txbd->tx_bd_haddr = cpu_to_le64(mapping); 522 523 prod = NEXT_TX(prod); 524 txbd1 = (struct tx_bd_ext *) 525 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 526 527 txbd1->tx_bd_hsize_lflags = lflags; 528 if (skb_is_gso(skb)) { 529 u32 hdr_len; 530 531 if (skb->encapsulation) 532 hdr_len = skb_inner_network_offset(skb) + 533 skb_inner_network_header_len(skb) + 534 inner_tcp_hdrlen(skb); 535 else 536 hdr_len = skb_transport_offset(skb) + 537 tcp_hdrlen(skb); 538 539 txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO | 540 TX_BD_FLAGS_T_IPID | 541 (hdr_len << (TX_BD_HSIZE_SHIFT - 1))); 542 length = skb_shinfo(skb)->gso_size; 543 txbd1->tx_bd_mss = cpu_to_le32(length); 544 length += hdr_len; 545 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 546 txbd1->tx_bd_hsize_lflags |= 547 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); 548 txbd1->tx_bd_mss = 0; 549 } 550 551 length >>= 9; 552 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) { 553 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n", 554 skb->len); 555 i = 0; 556 goto tx_dma_error; 557 } 558 flags |= bnxt_lhint_arr[length]; 559 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 560 561 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 562 txbd1->tx_bd_cfa_action = 563 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); 564 for (i = 0; i < last_frag; i++) { 565 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 566 567 prod = NEXT_TX(prod); 568 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 569 570 len = skb_frag_size(frag); 571 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len, 572 DMA_TO_DEVICE); 573 574 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) 575 goto tx_dma_error; 576 577 tx_buf = &txr->tx_buf_ring[prod]; 578 dma_unmap_addr_set(tx_buf, mapping, mapping); 579 580 txbd->tx_bd_haddr = cpu_to_le64(mapping); 581 582 flags = len << TX_BD_LEN_SHIFT; 583 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 584 } 585 586 flags &= ~TX_BD_LEN; 587 txbd->tx_bd_len_flags_type = 588 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags | 589 TX_BD_FLAGS_PACKET_END); 590 591 netdev_tx_sent_queue(txq, skb->len); 592 593 /* Sync BD data before updating doorbell */ 594 wmb(); 595 596 prod = NEXT_TX(prod); 597 txr->tx_prod = prod; 598 599 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) 600 bnxt_db_write(bp, &txr->tx_db, prod); 601 602 tx_done: 603 604 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) { 605 if (netdev_xmit_more() && !tx_buf->is_push) 606 bnxt_db_write(bp, &txr->tx_db, prod); 607 608 netif_tx_stop_queue(txq); 609 610 /* netif_tx_stop_queue() must be done before checking 611 * tx index in bnxt_tx_avail() below, because in 612 * bnxt_tx_int(), we update tx index before checking for 613 * netif_tx_queue_stopped(). 614 */ 615 smp_mb(); 616 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) 617 netif_tx_wake_queue(txq); 618 } 619 return NETDEV_TX_OK; 620 621 tx_dma_error: 622 last_frag = i; 623 624 /* start back at beginning and unmap skb */ 625 prod = txr->tx_prod; 626 tx_buf = &txr->tx_buf_ring[prod]; 627 tx_buf->skb = NULL; 628 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 629 skb_headlen(skb), PCI_DMA_TODEVICE); 630 prod = NEXT_TX(prod); 631 632 /* unmap remaining mapped pages */ 633 for (i = 0; i < last_frag; i++) { 634 prod = NEXT_TX(prod); 635 tx_buf = &txr->tx_buf_ring[prod]; 636 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 637 skb_frag_size(&skb_shinfo(skb)->frags[i]), 638 PCI_DMA_TODEVICE); 639 } 640 641 dev_kfree_skb_any(skb); 642 return NETDEV_TX_OK; 643 } 644 645 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) 646 { 647 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; 648 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index); 649 u16 cons = txr->tx_cons; 650 struct pci_dev *pdev = bp->pdev; 651 int i; 652 unsigned int tx_bytes = 0; 653 654 for (i = 0; i < nr_pkts; i++) { 655 struct bnxt_sw_tx_bd *tx_buf; 656 struct sk_buff *skb; 657 int j, last; 658 659 tx_buf = &txr->tx_buf_ring[cons]; 660 cons = NEXT_TX(cons); 661 skb = tx_buf->skb; 662 tx_buf->skb = NULL; 663 664 if (tx_buf->is_push) { 665 tx_buf->is_push = 0; 666 goto next_tx_int; 667 } 668 669 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 670 skb_headlen(skb), PCI_DMA_TODEVICE); 671 last = tx_buf->nr_frags; 672 673 for (j = 0; j < last; j++) { 674 cons = NEXT_TX(cons); 675 tx_buf = &txr->tx_buf_ring[cons]; 676 dma_unmap_page( 677 &pdev->dev, 678 dma_unmap_addr(tx_buf, mapping), 679 skb_frag_size(&skb_shinfo(skb)->frags[j]), 680 PCI_DMA_TODEVICE); 681 } 682 683 next_tx_int: 684 cons = NEXT_TX(cons); 685 686 tx_bytes += skb->len; 687 dev_kfree_skb_any(skb); 688 } 689 690 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes); 691 txr->tx_cons = cons; 692 693 /* Need to make the tx_cons update visible to bnxt_start_xmit() 694 * before checking for netif_tx_queue_stopped(). Without the 695 * memory barrier, there is a small possibility that bnxt_start_xmit() 696 * will miss it and cause the queue to be stopped forever. 697 */ 698 smp_mb(); 699 700 if (unlikely(netif_tx_queue_stopped(txq)) && 701 (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) { 702 __netif_tx_lock(txq, smp_processor_id()); 703 if (netif_tx_queue_stopped(txq) && 704 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh && 705 txr->dev_state != BNXT_DEV_STATE_CLOSING) 706 netif_tx_wake_queue(txq); 707 __netif_tx_unlock(txq); 708 } 709 } 710 711 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, 712 struct bnxt_rx_ring_info *rxr, 713 gfp_t gfp) 714 { 715 struct device *dev = &bp->pdev->dev; 716 struct page *page; 717 718 page = page_pool_dev_alloc_pages(rxr->page_pool); 719 if (!page) 720 return NULL; 721 722 *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir, 723 DMA_ATTR_WEAK_ORDERING); 724 if (dma_mapping_error(dev, *mapping)) { 725 page_pool_recycle_direct(rxr->page_pool, page); 726 return NULL; 727 } 728 *mapping += bp->rx_dma_offset; 729 return page; 730 } 731 732 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping, 733 gfp_t gfp) 734 { 735 u8 *data; 736 struct pci_dev *pdev = bp->pdev; 737 738 data = kmalloc(bp->rx_buf_size, gfp); 739 if (!data) 740 return NULL; 741 742 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset, 743 bp->rx_buf_use_size, bp->rx_dir, 744 DMA_ATTR_WEAK_ORDERING); 745 746 if (dma_mapping_error(&pdev->dev, *mapping)) { 747 kfree(data); 748 data = NULL; 749 } 750 return data; 751 } 752 753 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 754 u16 prod, gfp_t gfp) 755 { 756 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 757 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod]; 758 dma_addr_t mapping; 759 760 if (BNXT_RX_PAGE_MODE(bp)) { 761 struct page *page = 762 __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp); 763 764 if (!page) 765 return -ENOMEM; 766 767 rx_buf->data = page; 768 rx_buf->data_ptr = page_address(page) + bp->rx_offset; 769 } else { 770 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp); 771 772 if (!data) 773 return -ENOMEM; 774 775 rx_buf->data = data; 776 rx_buf->data_ptr = data + bp->rx_offset; 777 } 778 rx_buf->mapping = mapping; 779 780 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 781 return 0; 782 } 783 784 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data) 785 { 786 u16 prod = rxr->rx_prod; 787 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 788 struct rx_bd *cons_bd, *prod_bd; 789 790 prod_rx_buf = &rxr->rx_buf_ring[prod]; 791 cons_rx_buf = &rxr->rx_buf_ring[cons]; 792 793 prod_rx_buf->data = data; 794 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr; 795 796 prod_rx_buf->mapping = cons_rx_buf->mapping; 797 798 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 799 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)]; 800 801 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr; 802 } 803 804 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) 805 { 806 u16 next, max = rxr->rx_agg_bmap_size; 807 808 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx); 809 if (next >= max) 810 next = find_first_zero_bit(rxr->rx_agg_bmap, max); 811 return next; 812 } 813 814 static inline int bnxt_alloc_rx_page(struct bnxt *bp, 815 struct bnxt_rx_ring_info *rxr, 816 u16 prod, gfp_t gfp) 817 { 818 struct rx_bd *rxbd = 819 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 820 struct bnxt_sw_rx_agg_bd *rx_agg_buf; 821 struct pci_dev *pdev = bp->pdev; 822 struct page *page; 823 dma_addr_t mapping; 824 u16 sw_prod = rxr->rx_sw_agg_prod; 825 unsigned int offset = 0; 826 827 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) { 828 page = rxr->rx_page; 829 if (!page) { 830 page = alloc_page(gfp); 831 if (!page) 832 return -ENOMEM; 833 rxr->rx_page = page; 834 rxr->rx_page_offset = 0; 835 } 836 offset = rxr->rx_page_offset; 837 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE; 838 if (rxr->rx_page_offset == PAGE_SIZE) 839 rxr->rx_page = NULL; 840 else 841 get_page(page); 842 } else { 843 page = alloc_page(gfp); 844 if (!page) 845 return -ENOMEM; 846 } 847 848 mapping = dma_map_page_attrs(&pdev->dev, page, offset, 849 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE, 850 DMA_ATTR_WEAK_ORDERING); 851 if (dma_mapping_error(&pdev->dev, mapping)) { 852 __free_page(page); 853 return -EIO; 854 } 855 856 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) 857 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); 858 859 __set_bit(sw_prod, rxr->rx_agg_bmap); 860 rx_agg_buf = &rxr->rx_agg_ring[sw_prod]; 861 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod); 862 863 rx_agg_buf->page = page; 864 rx_agg_buf->offset = offset; 865 rx_agg_buf->mapping = mapping; 866 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 867 rxbd->rx_bd_opaque = sw_prod; 868 return 0; 869 } 870 871 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp, 872 struct bnxt_cp_ring_info *cpr, 873 u16 cp_cons, u16 curr) 874 { 875 struct rx_agg_cmp *agg; 876 877 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr)); 878 agg = (struct rx_agg_cmp *) 879 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 880 return agg; 881 } 882 883 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp, 884 struct bnxt_rx_ring_info *rxr, 885 u16 agg_id, u16 curr) 886 { 887 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id]; 888 889 return &tpa_info->agg_arr[curr]; 890 } 891 892 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx, 893 u16 start, u32 agg_bufs, bool tpa) 894 { 895 struct bnxt_napi *bnapi = cpr->bnapi; 896 struct bnxt *bp = bnapi->bp; 897 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 898 u16 prod = rxr->rx_agg_prod; 899 u16 sw_prod = rxr->rx_sw_agg_prod; 900 bool p5_tpa = false; 901 u32 i; 902 903 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa) 904 p5_tpa = true; 905 906 for (i = 0; i < agg_bufs; i++) { 907 u16 cons; 908 struct rx_agg_cmp *agg; 909 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf; 910 struct rx_bd *prod_bd; 911 struct page *page; 912 913 if (p5_tpa) 914 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i); 915 else 916 agg = bnxt_get_agg(bp, cpr, idx, start + i); 917 cons = agg->rx_agg_cmp_opaque; 918 __clear_bit(cons, rxr->rx_agg_bmap); 919 920 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) 921 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); 922 923 __set_bit(sw_prod, rxr->rx_agg_bmap); 924 prod_rx_buf = &rxr->rx_agg_ring[sw_prod]; 925 cons_rx_buf = &rxr->rx_agg_ring[cons]; 926 927 /* It is possible for sw_prod to be equal to cons, so 928 * set cons_rx_buf->page to NULL first. 929 */ 930 page = cons_rx_buf->page; 931 cons_rx_buf->page = NULL; 932 prod_rx_buf->page = page; 933 prod_rx_buf->offset = cons_rx_buf->offset; 934 935 prod_rx_buf->mapping = cons_rx_buf->mapping; 936 937 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 938 939 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping); 940 prod_bd->rx_bd_opaque = sw_prod; 941 942 prod = NEXT_RX_AGG(prod); 943 sw_prod = NEXT_RX_AGG(sw_prod); 944 } 945 rxr->rx_agg_prod = prod; 946 rxr->rx_sw_agg_prod = sw_prod; 947 } 948 949 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, 950 struct bnxt_rx_ring_info *rxr, 951 u16 cons, void *data, u8 *data_ptr, 952 dma_addr_t dma_addr, 953 unsigned int offset_and_len) 954 { 955 unsigned int payload = offset_and_len >> 16; 956 unsigned int len = offset_and_len & 0xffff; 957 skb_frag_t *frag; 958 struct page *page = data; 959 u16 prod = rxr->rx_prod; 960 struct sk_buff *skb; 961 int off, err; 962 963 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 964 if (unlikely(err)) { 965 bnxt_reuse_rx_data(rxr, cons, data); 966 return NULL; 967 } 968 dma_addr -= bp->rx_dma_offset; 969 dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir, 970 DMA_ATTR_WEAK_ORDERING); 971 page_pool_release_page(rxr->page_pool, page); 972 973 if (unlikely(!payload)) 974 payload = eth_get_headlen(bp->dev, data_ptr, len); 975 976 skb = napi_alloc_skb(&rxr->bnapi->napi, payload); 977 if (!skb) { 978 __free_page(page); 979 return NULL; 980 } 981 982 off = (void *)data_ptr - page_address(page); 983 skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE); 984 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN, 985 payload + NET_IP_ALIGN); 986 987 frag = &skb_shinfo(skb)->frags[0]; 988 skb_frag_size_sub(frag, payload); 989 skb_frag_off_add(frag, payload); 990 skb->data_len -= payload; 991 skb->tail += payload; 992 993 return skb; 994 } 995 996 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, 997 struct bnxt_rx_ring_info *rxr, u16 cons, 998 void *data, u8 *data_ptr, 999 dma_addr_t dma_addr, 1000 unsigned int offset_and_len) 1001 { 1002 u16 prod = rxr->rx_prod; 1003 struct sk_buff *skb; 1004 int err; 1005 1006 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 1007 if (unlikely(err)) { 1008 bnxt_reuse_rx_data(rxr, cons, data); 1009 return NULL; 1010 } 1011 1012 skb = build_skb(data, 0); 1013 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, 1014 bp->rx_dir, DMA_ATTR_WEAK_ORDERING); 1015 if (!skb) { 1016 kfree(data); 1017 return NULL; 1018 } 1019 1020 skb_reserve(skb, bp->rx_offset); 1021 skb_put(skb, offset_and_len & 0xffff); 1022 return skb; 1023 } 1024 1025 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, 1026 struct bnxt_cp_ring_info *cpr, 1027 struct sk_buff *skb, u16 idx, 1028 u32 agg_bufs, bool tpa) 1029 { 1030 struct bnxt_napi *bnapi = cpr->bnapi; 1031 struct pci_dev *pdev = bp->pdev; 1032 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1033 u16 prod = rxr->rx_agg_prod; 1034 bool p5_tpa = false; 1035 u32 i; 1036 1037 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa) 1038 p5_tpa = true; 1039 1040 for (i = 0; i < agg_bufs; i++) { 1041 u16 cons, frag_len; 1042 struct rx_agg_cmp *agg; 1043 struct bnxt_sw_rx_agg_bd *cons_rx_buf; 1044 struct page *page; 1045 dma_addr_t mapping; 1046 1047 if (p5_tpa) 1048 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i); 1049 else 1050 agg = bnxt_get_agg(bp, cpr, idx, i); 1051 cons = agg->rx_agg_cmp_opaque; 1052 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) & 1053 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT; 1054 1055 cons_rx_buf = &rxr->rx_agg_ring[cons]; 1056 skb_fill_page_desc(skb, i, cons_rx_buf->page, 1057 cons_rx_buf->offset, frag_len); 1058 __clear_bit(cons, rxr->rx_agg_bmap); 1059 1060 /* It is possible for bnxt_alloc_rx_page() to allocate 1061 * a sw_prod index that equals the cons index, so we 1062 * need to clear the cons entry now. 1063 */ 1064 mapping = cons_rx_buf->mapping; 1065 page = cons_rx_buf->page; 1066 cons_rx_buf->page = NULL; 1067 1068 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) { 1069 struct skb_shared_info *shinfo; 1070 unsigned int nr_frags; 1071 1072 shinfo = skb_shinfo(skb); 1073 nr_frags = --shinfo->nr_frags; 1074 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL); 1075 1076 dev_kfree_skb(skb); 1077 1078 cons_rx_buf->page = page; 1079 1080 /* Update prod since possibly some pages have been 1081 * allocated already. 1082 */ 1083 rxr->rx_agg_prod = prod; 1084 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa); 1085 return NULL; 1086 } 1087 1088 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE, 1089 PCI_DMA_FROMDEVICE, 1090 DMA_ATTR_WEAK_ORDERING); 1091 1092 skb->data_len += frag_len; 1093 skb->len += frag_len; 1094 skb->truesize += PAGE_SIZE; 1095 1096 prod = NEXT_RX_AGG(prod); 1097 } 1098 rxr->rx_agg_prod = prod; 1099 return skb; 1100 } 1101 1102 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 1103 u8 agg_bufs, u32 *raw_cons) 1104 { 1105 u16 last; 1106 struct rx_agg_cmp *agg; 1107 1108 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs); 1109 last = RING_CMP(*raw_cons); 1110 agg = (struct rx_agg_cmp *) 1111 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)]; 1112 return RX_AGG_CMP_VALID(agg, *raw_cons); 1113 } 1114 1115 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data, 1116 unsigned int len, 1117 dma_addr_t mapping) 1118 { 1119 struct bnxt *bp = bnapi->bp; 1120 struct pci_dev *pdev = bp->pdev; 1121 struct sk_buff *skb; 1122 1123 skb = napi_alloc_skb(&bnapi->napi, len); 1124 if (!skb) 1125 return NULL; 1126 1127 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh, 1128 bp->rx_dir); 1129 1130 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN, 1131 len + NET_IP_ALIGN); 1132 1133 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh, 1134 bp->rx_dir); 1135 1136 skb_put(skb, len); 1137 return skb; 1138 } 1139 1140 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 1141 u32 *raw_cons, void *cmp) 1142 { 1143 struct rx_cmp *rxcmp = cmp; 1144 u32 tmp_raw_cons = *raw_cons; 1145 u8 cmp_type, agg_bufs = 0; 1146 1147 cmp_type = RX_CMP_TYPE(rxcmp); 1148 1149 if (cmp_type == CMP_TYPE_RX_L2_CMP) { 1150 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & 1151 RX_CMP_AGG_BUFS) >> 1152 RX_CMP_AGG_BUFS_SHIFT; 1153 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1154 struct rx_tpa_end_cmp *tpa_end = cmp; 1155 1156 if (bp->flags & BNXT_FLAG_CHIP_P5) 1157 return 0; 1158 1159 agg_bufs = TPA_END_AGG_BUFS(tpa_end); 1160 } 1161 1162 if (agg_bufs) { 1163 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) 1164 return -EBUSY; 1165 } 1166 *raw_cons = tmp_raw_cons; 1167 return 0; 1168 } 1169 1170 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay) 1171 { 1172 if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))) 1173 return; 1174 1175 if (BNXT_PF(bp)) 1176 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay); 1177 else 1178 schedule_delayed_work(&bp->fw_reset_task, delay); 1179 } 1180 1181 static void bnxt_queue_sp_work(struct bnxt *bp) 1182 { 1183 if (BNXT_PF(bp)) 1184 queue_work(bnxt_pf_wq, &bp->sp_task); 1185 else 1186 schedule_work(&bp->sp_task); 1187 } 1188 1189 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 1190 { 1191 if (!rxr->bnapi->in_reset) { 1192 rxr->bnapi->in_reset = true; 1193 if (bp->flags & BNXT_FLAG_CHIP_P5) 1194 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 1195 else 1196 set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event); 1197 bnxt_queue_sp_work(bp); 1198 } 1199 rxr->rx_next_cons = 0xffff; 1200 } 1201 1202 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) 1203 { 1204 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; 1205 u16 idx = agg_id & MAX_TPA_P5_MASK; 1206 1207 if (test_bit(idx, map->agg_idx_bmap)) 1208 idx = find_first_zero_bit(map->agg_idx_bmap, 1209 BNXT_AGG_IDX_BMAP_SIZE); 1210 __set_bit(idx, map->agg_idx_bmap); 1211 map->agg_id_tbl[agg_id] = idx; 1212 return idx; 1213 } 1214 1215 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) 1216 { 1217 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; 1218 1219 __clear_bit(idx, map->agg_idx_bmap); 1220 } 1221 1222 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) 1223 { 1224 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; 1225 1226 return map->agg_id_tbl[agg_id]; 1227 } 1228 1229 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 1230 struct rx_tpa_start_cmp *tpa_start, 1231 struct rx_tpa_start_cmp_ext *tpa_start1) 1232 { 1233 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 1234 struct bnxt_tpa_info *tpa_info; 1235 u16 cons, prod, agg_id; 1236 struct rx_bd *prod_bd; 1237 dma_addr_t mapping; 1238 1239 if (bp->flags & BNXT_FLAG_CHIP_P5) { 1240 agg_id = TPA_START_AGG_ID_P5(tpa_start); 1241 agg_id = bnxt_alloc_agg_idx(rxr, agg_id); 1242 } else { 1243 agg_id = TPA_START_AGG_ID(tpa_start); 1244 } 1245 cons = tpa_start->rx_tpa_start_cmp_opaque; 1246 prod = rxr->rx_prod; 1247 cons_rx_buf = &rxr->rx_buf_ring[cons]; 1248 prod_rx_buf = &rxr->rx_buf_ring[prod]; 1249 tpa_info = &rxr->rx_tpa[agg_id]; 1250 1251 if (unlikely(cons != rxr->rx_next_cons || 1252 TPA_START_ERROR(tpa_start))) { 1253 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n", 1254 cons, rxr->rx_next_cons, 1255 TPA_START_ERROR_CODE(tpa_start1)); 1256 bnxt_sched_reset(bp, rxr); 1257 return; 1258 } 1259 /* Store cfa_code in tpa_info to use in tpa_end 1260 * completion processing. 1261 */ 1262 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1); 1263 prod_rx_buf->data = tpa_info->data; 1264 prod_rx_buf->data_ptr = tpa_info->data_ptr; 1265 1266 mapping = tpa_info->mapping; 1267 prod_rx_buf->mapping = mapping; 1268 1269 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 1270 1271 prod_bd->rx_bd_haddr = cpu_to_le64(mapping); 1272 1273 tpa_info->data = cons_rx_buf->data; 1274 tpa_info->data_ptr = cons_rx_buf->data_ptr; 1275 cons_rx_buf->data = NULL; 1276 tpa_info->mapping = cons_rx_buf->mapping; 1277 1278 tpa_info->len = 1279 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >> 1280 RX_TPA_START_CMP_LEN_SHIFT; 1281 if (likely(TPA_START_HASH_VALID(tpa_start))) { 1282 u32 hash_type = TPA_START_HASH_TYPE(tpa_start); 1283 1284 tpa_info->hash_type = PKT_HASH_TYPE_L4; 1285 tpa_info->gso_type = SKB_GSO_TCPV4; 1286 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ 1287 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1)) 1288 tpa_info->gso_type = SKB_GSO_TCPV6; 1289 tpa_info->rss_hash = 1290 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash); 1291 } else { 1292 tpa_info->hash_type = PKT_HASH_TYPE_NONE; 1293 tpa_info->gso_type = 0; 1294 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n"); 1295 } 1296 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2); 1297 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata); 1298 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info); 1299 tpa_info->agg_count = 0; 1300 1301 rxr->rx_prod = NEXT_RX(prod); 1302 cons = NEXT_RX(cons); 1303 rxr->rx_next_cons = NEXT_RX(cons); 1304 cons_rx_buf = &rxr->rx_buf_ring[cons]; 1305 1306 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data); 1307 rxr->rx_prod = NEXT_RX(rxr->rx_prod); 1308 cons_rx_buf->data = NULL; 1309 } 1310 1311 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs) 1312 { 1313 if (agg_bufs) 1314 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true); 1315 } 1316 1317 #ifdef CONFIG_INET 1318 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto) 1319 { 1320 struct udphdr *uh = NULL; 1321 1322 if (ip_proto == htons(ETH_P_IP)) { 1323 struct iphdr *iph = (struct iphdr *)skb->data; 1324 1325 if (iph->protocol == IPPROTO_UDP) 1326 uh = (struct udphdr *)(iph + 1); 1327 } else { 1328 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; 1329 1330 if (iph->nexthdr == IPPROTO_UDP) 1331 uh = (struct udphdr *)(iph + 1); 1332 } 1333 if (uh) { 1334 if (uh->check) 1335 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM; 1336 else 1337 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; 1338 } 1339 } 1340 #endif 1341 1342 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info, 1343 int payload_off, int tcp_ts, 1344 struct sk_buff *skb) 1345 { 1346 #ifdef CONFIG_INET 1347 struct tcphdr *th; 1348 int len, nw_off; 1349 u16 outer_ip_off, inner_ip_off, inner_mac_off; 1350 u32 hdr_info = tpa_info->hdr_info; 1351 bool loopback = false; 1352 1353 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); 1354 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); 1355 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); 1356 1357 /* If the packet is an internal loopback packet, the offsets will 1358 * have an extra 4 bytes. 1359 */ 1360 if (inner_mac_off == 4) { 1361 loopback = true; 1362 } else if (inner_mac_off > 4) { 1363 __be16 proto = *((__be16 *)(skb->data + inner_ip_off - 1364 ETH_HLEN - 2)); 1365 1366 /* We only support inner iPv4/ipv6. If we don't see the 1367 * correct protocol ID, it must be a loopback packet where 1368 * the offsets are off by 4. 1369 */ 1370 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6)) 1371 loopback = true; 1372 } 1373 if (loopback) { 1374 /* internal loopback packet, subtract all offsets by 4 */ 1375 inner_ip_off -= 4; 1376 inner_mac_off -= 4; 1377 outer_ip_off -= 4; 1378 } 1379 1380 nw_off = inner_ip_off - ETH_HLEN; 1381 skb_set_network_header(skb, nw_off); 1382 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) { 1383 struct ipv6hdr *iph = ipv6_hdr(skb); 1384 1385 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); 1386 len = skb->len - skb_transport_offset(skb); 1387 th = tcp_hdr(skb); 1388 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); 1389 } else { 1390 struct iphdr *iph = ip_hdr(skb); 1391 1392 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); 1393 len = skb->len - skb_transport_offset(skb); 1394 th = tcp_hdr(skb); 1395 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); 1396 } 1397 1398 if (inner_mac_off) { /* tunnel */ 1399 __be16 proto = *((__be16 *)(skb->data + outer_ip_off - 1400 ETH_HLEN - 2)); 1401 1402 bnxt_gro_tunnel(skb, proto); 1403 } 1404 #endif 1405 return skb; 1406 } 1407 1408 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info, 1409 int payload_off, int tcp_ts, 1410 struct sk_buff *skb) 1411 { 1412 #ifdef CONFIG_INET 1413 u16 outer_ip_off, inner_ip_off, inner_mac_off; 1414 u32 hdr_info = tpa_info->hdr_info; 1415 int iphdr_len, nw_off; 1416 1417 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); 1418 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); 1419 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); 1420 1421 nw_off = inner_ip_off - ETH_HLEN; 1422 skb_set_network_header(skb, nw_off); 1423 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ? 1424 sizeof(struct ipv6hdr) : sizeof(struct iphdr); 1425 skb_set_transport_header(skb, nw_off + iphdr_len); 1426 1427 if (inner_mac_off) { /* tunnel */ 1428 __be16 proto = *((__be16 *)(skb->data + outer_ip_off - 1429 ETH_HLEN - 2)); 1430 1431 bnxt_gro_tunnel(skb, proto); 1432 } 1433 #endif 1434 return skb; 1435 } 1436 1437 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr)) 1438 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr)) 1439 1440 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info, 1441 int payload_off, int tcp_ts, 1442 struct sk_buff *skb) 1443 { 1444 #ifdef CONFIG_INET 1445 struct tcphdr *th; 1446 int len, nw_off, tcp_opt_len = 0; 1447 1448 if (tcp_ts) 1449 tcp_opt_len = 12; 1450 1451 if (tpa_info->gso_type == SKB_GSO_TCPV4) { 1452 struct iphdr *iph; 1453 1454 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len - 1455 ETH_HLEN; 1456 skb_set_network_header(skb, nw_off); 1457 iph = ip_hdr(skb); 1458 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); 1459 len = skb->len - skb_transport_offset(skb); 1460 th = tcp_hdr(skb); 1461 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); 1462 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) { 1463 struct ipv6hdr *iph; 1464 1465 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len - 1466 ETH_HLEN; 1467 skb_set_network_header(skb, nw_off); 1468 iph = ipv6_hdr(skb); 1469 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); 1470 len = skb->len - skb_transport_offset(skb); 1471 th = tcp_hdr(skb); 1472 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); 1473 } else { 1474 dev_kfree_skb_any(skb); 1475 return NULL; 1476 } 1477 1478 if (nw_off) /* tunnel */ 1479 bnxt_gro_tunnel(skb, skb->protocol); 1480 #endif 1481 return skb; 1482 } 1483 1484 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp, 1485 struct bnxt_tpa_info *tpa_info, 1486 struct rx_tpa_end_cmp *tpa_end, 1487 struct rx_tpa_end_cmp_ext *tpa_end1, 1488 struct sk_buff *skb) 1489 { 1490 #ifdef CONFIG_INET 1491 int payload_off; 1492 u16 segs; 1493 1494 segs = TPA_END_TPA_SEGS(tpa_end); 1495 if (segs == 1) 1496 return skb; 1497 1498 NAPI_GRO_CB(skb)->count = segs; 1499 skb_shinfo(skb)->gso_size = 1500 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len); 1501 skb_shinfo(skb)->gso_type = tpa_info->gso_type; 1502 if (bp->flags & BNXT_FLAG_CHIP_P5) 1503 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1); 1504 else 1505 payload_off = TPA_END_PAYLOAD_OFF(tpa_end); 1506 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb); 1507 if (likely(skb)) 1508 tcp_gro_complete(skb); 1509 #endif 1510 return skb; 1511 } 1512 1513 /* Given the cfa_code of a received packet determine which 1514 * netdev (vf-rep or PF) the packet is destined to. 1515 */ 1516 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code) 1517 { 1518 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code); 1519 1520 /* if vf-rep dev is NULL, the must belongs to the PF */ 1521 return dev ? dev : bp->dev; 1522 } 1523 1524 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, 1525 struct bnxt_cp_ring_info *cpr, 1526 u32 *raw_cons, 1527 struct rx_tpa_end_cmp *tpa_end, 1528 struct rx_tpa_end_cmp_ext *tpa_end1, 1529 u8 *event) 1530 { 1531 struct bnxt_napi *bnapi = cpr->bnapi; 1532 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1533 u8 *data_ptr, agg_bufs; 1534 unsigned int len; 1535 struct bnxt_tpa_info *tpa_info; 1536 dma_addr_t mapping; 1537 struct sk_buff *skb; 1538 u16 idx = 0, agg_id; 1539 void *data; 1540 bool gro; 1541 1542 if (unlikely(bnapi->in_reset)) { 1543 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end); 1544 1545 if (rc < 0) 1546 return ERR_PTR(-EBUSY); 1547 return NULL; 1548 } 1549 1550 if (bp->flags & BNXT_FLAG_CHIP_P5) { 1551 agg_id = TPA_END_AGG_ID_P5(tpa_end); 1552 agg_id = bnxt_lookup_agg_idx(rxr, agg_id); 1553 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1); 1554 tpa_info = &rxr->rx_tpa[agg_id]; 1555 if (unlikely(agg_bufs != tpa_info->agg_count)) { 1556 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n", 1557 agg_bufs, tpa_info->agg_count); 1558 agg_bufs = tpa_info->agg_count; 1559 } 1560 tpa_info->agg_count = 0; 1561 *event |= BNXT_AGG_EVENT; 1562 bnxt_free_agg_idx(rxr, agg_id); 1563 idx = agg_id; 1564 gro = !!(bp->flags & BNXT_FLAG_GRO); 1565 } else { 1566 agg_id = TPA_END_AGG_ID(tpa_end); 1567 agg_bufs = TPA_END_AGG_BUFS(tpa_end); 1568 tpa_info = &rxr->rx_tpa[agg_id]; 1569 idx = RING_CMP(*raw_cons); 1570 if (agg_bufs) { 1571 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons)) 1572 return ERR_PTR(-EBUSY); 1573 1574 *event |= BNXT_AGG_EVENT; 1575 idx = NEXT_CMP(idx); 1576 } 1577 gro = !!TPA_END_GRO(tpa_end); 1578 } 1579 data = tpa_info->data; 1580 data_ptr = tpa_info->data_ptr; 1581 prefetch(data_ptr); 1582 len = tpa_info->len; 1583 mapping = tpa_info->mapping; 1584 1585 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) { 1586 bnxt_abort_tpa(cpr, idx, agg_bufs); 1587 if (agg_bufs > MAX_SKB_FRAGS) 1588 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", 1589 agg_bufs, (int)MAX_SKB_FRAGS); 1590 return NULL; 1591 } 1592 1593 if (len <= bp->rx_copy_thresh) { 1594 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping); 1595 if (!skb) { 1596 bnxt_abort_tpa(cpr, idx, agg_bufs); 1597 return NULL; 1598 } 1599 } else { 1600 u8 *new_data; 1601 dma_addr_t new_mapping; 1602 1603 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC); 1604 if (!new_data) { 1605 bnxt_abort_tpa(cpr, idx, agg_bufs); 1606 return NULL; 1607 } 1608 1609 tpa_info->data = new_data; 1610 tpa_info->data_ptr = new_data + bp->rx_offset; 1611 tpa_info->mapping = new_mapping; 1612 1613 skb = build_skb(data, 0); 1614 dma_unmap_single_attrs(&bp->pdev->dev, mapping, 1615 bp->rx_buf_use_size, bp->rx_dir, 1616 DMA_ATTR_WEAK_ORDERING); 1617 1618 if (!skb) { 1619 kfree(data); 1620 bnxt_abort_tpa(cpr, idx, agg_bufs); 1621 return NULL; 1622 } 1623 skb_reserve(skb, bp->rx_offset); 1624 skb_put(skb, len); 1625 } 1626 1627 if (agg_bufs) { 1628 skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true); 1629 if (!skb) { 1630 /* Page reuse already handled by bnxt_rx_pages(). */ 1631 return NULL; 1632 } 1633 } 1634 1635 skb->protocol = 1636 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code)); 1637 1638 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE) 1639 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); 1640 1641 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) && 1642 (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) { 1643 u16 vlan_proto = tpa_info->metadata >> 1644 RX_CMP_FLAGS2_METADATA_TPID_SFT; 1645 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK; 1646 1647 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); 1648 } 1649 1650 skb_checksum_none_assert(skb); 1651 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) { 1652 skb->ip_summed = CHECKSUM_UNNECESSARY; 1653 skb->csum_level = 1654 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3; 1655 } 1656 1657 if (gro) 1658 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb); 1659 1660 return skb; 1661 } 1662 1663 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 1664 struct rx_agg_cmp *rx_agg) 1665 { 1666 u16 agg_id = TPA_AGG_AGG_ID(rx_agg); 1667 struct bnxt_tpa_info *tpa_info; 1668 1669 agg_id = bnxt_lookup_agg_idx(rxr, agg_id); 1670 tpa_info = &rxr->rx_tpa[agg_id]; 1671 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS); 1672 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg; 1673 } 1674 1675 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi, 1676 struct sk_buff *skb) 1677 { 1678 if (skb->dev != bp->dev) { 1679 /* this packet belongs to a vf-rep */ 1680 bnxt_vf_rep_rx(bp, skb); 1681 return; 1682 } 1683 skb_record_rx_queue(skb, bnapi->index); 1684 napi_gro_receive(&bnapi->napi, skb); 1685 } 1686 1687 /* returns the following: 1688 * 1 - 1 packet successfully received 1689 * 0 - successful TPA_START, packet not completed yet 1690 * -EBUSY - completion ring does not have all the agg buffers yet 1691 * -ENOMEM - packet aborted due to out of memory 1692 * -EIO - packet aborted due to hw error indicated in BD 1693 */ 1694 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 1695 u32 *raw_cons, u8 *event) 1696 { 1697 struct bnxt_napi *bnapi = cpr->bnapi; 1698 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1699 struct net_device *dev = bp->dev; 1700 struct rx_cmp *rxcmp; 1701 struct rx_cmp_ext *rxcmp1; 1702 u32 tmp_raw_cons = *raw_cons; 1703 u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons); 1704 struct bnxt_sw_rx_bd *rx_buf; 1705 unsigned int len; 1706 u8 *data_ptr, agg_bufs, cmp_type; 1707 dma_addr_t dma_addr; 1708 struct sk_buff *skb; 1709 void *data; 1710 int rc = 0; 1711 u32 misc; 1712 1713 rxcmp = (struct rx_cmp *) 1714 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1715 1716 cmp_type = RX_CMP_TYPE(rxcmp); 1717 1718 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) { 1719 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp); 1720 goto next_rx_no_prod_no_len; 1721 } 1722 1723 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 1724 cp_cons = RING_CMP(tmp_raw_cons); 1725 rxcmp1 = (struct rx_cmp_ext *) 1726 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1727 1728 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 1729 return -EBUSY; 1730 1731 prod = rxr->rx_prod; 1732 1733 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) { 1734 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp, 1735 (struct rx_tpa_start_cmp_ext *)rxcmp1); 1736 1737 *event |= BNXT_RX_EVENT; 1738 goto next_rx_no_prod_no_len; 1739 1740 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1741 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons, 1742 (struct rx_tpa_end_cmp *)rxcmp, 1743 (struct rx_tpa_end_cmp_ext *)rxcmp1, event); 1744 1745 if (IS_ERR(skb)) 1746 return -EBUSY; 1747 1748 rc = -ENOMEM; 1749 if (likely(skb)) { 1750 bnxt_deliver_skb(bp, bnapi, skb); 1751 rc = 1; 1752 } 1753 *event |= BNXT_RX_EVENT; 1754 goto next_rx_no_prod_no_len; 1755 } 1756 1757 cons = rxcmp->rx_cmp_opaque; 1758 if (unlikely(cons != rxr->rx_next_cons)) { 1759 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp); 1760 1761 /* 0xffff is forced error, don't print it */ 1762 if (rxr->rx_next_cons != 0xffff) 1763 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n", 1764 cons, rxr->rx_next_cons); 1765 bnxt_sched_reset(bp, rxr); 1766 if (rc1) 1767 return rc1; 1768 goto next_rx_no_prod_no_len; 1769 } 1770 rx_buf = &rxr->rx_buf_ring[cons]; 1771 data = rx_buf->data; 1772 data_ptr = rx_buf->data_ptr; 1773 prefetch(data_ptr); 1774 1775 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1); 1776 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT; 1777 1778 if (agg_bufs) { 1779 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) 1780 return -EBUSY; 1781 1782 cp_cons = NEXT_CMP(cp_cons); 1783 *event |= BNXT_AGG_EVENT; 1784 } 1785 *event |= BNXT_RX_EVENT; 1786 1787 rx_buf->data = NULL; 1788 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) { 1789 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2); 1790 1791 bnxt_reuse_rx_data(rxr, cons, data); 1792 if (agg_bufs) 1793 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs, 1794 false); 1795 1796 rc = -EIO; 1797 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) { 1798 bnapi->cp_ring.sw_stats.rx.rx_buf_errors++; 1799 if (!(bp->flags & BNXT_FLAG_CHIP_P5) && 1800 !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) { 1801 netdev_warn_once(bp->dev, "RX buffer error %x\n", 1802 rx_err); 1803 bnxt_sched_reset(bp, rxr); 1804 } 1805 } 1806 goto next_rx_no_len; 1807 } 1808 1809 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT; 1810 dma_addr = rx_buf->mapping; 1811 1812 if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) { 1813 rc = 1; 1814 goto next_rx; 1815 } 1816 1817 if (len <= bp->rx_copy_thresh) { 1818 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr); 1819 bnxt_reuse_rx_data(rxr, cons, data); 1820 if (!skb) { 1821 if (agg_bufs) 1822 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, 1823 agg_bufs, false); 1824 rc = -ENOMEM; 1825 goto next_rx; 1826 } 1827 } else { 1828 u32 payload; 1829 1830 if (rx_buf->data_ptr == data_ptr) 1831 payload = misc & RX_CMP_PAYLOAD_OFFSET; 1832 else 1833 payload = 0; 1834 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr, 1835 payload | len); 1836 if (!skb) { 1837 rc = -ENOMEM; 1838 goto next_rx; 1839 } 1840 } 1841 1842 if (agg_bufs) { 1843 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false); 1844 if (!skb) { 1845 rc = -ENOMEM; 1846 goto next_rx; 1847 } 1848 } 1849 1850 if (RX_CMP_HASH_VALID(rxcmp)) { 1851 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp); 1852 enum pkt_hash_types type = PKT_HASH_TYPE_L4; 1853 1854 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ 1855 if (hash_type != 1 && hash_type != 3) 1856 type = PKT_HASH_TYPE_L3; 1857 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type); 1858 } 1859 1860 cfa_code = RX_CMP_CFA_CODE(rxcmp1); 1861 skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code)); 1862 1863 if ((rxcmp1->rx_cmp_flags2 & 1864 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) && 1865 (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) { 1866 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); 1867 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK; 1868 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; 1869 1870 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); 1871 } 1872 1873 skb_checksum_none_assert(skb); 1874 if (RX_CMP_L4_CS_OK(rxcmp1)) { 1875 if (dev->features & NETIF_F_RXCSUM) { 1876 skb->ip_summed = CHECKSUM_UNNECESSARY; 1877 skb->csum_level = RX_CMP_ENCAP(rxcmp1); 1878 } 1879 } else { 1880 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) { 1881 if (dev->features & NETIF_F_RXCSUM) 1882 bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++; 1883 } 1884 } 1885 1886 bnxt_deliver_skb(bp, bnapi, skb); 1887 rc = 1; 1888 1889 next_rx: 1890 cpr->rx_packets += 1; 1891 cpr->rx_bytes += len; 1892 1893 next_rx_no_len: 1894 rxr->rx_prod = NEXT_RX(prod); 1895 rxr->rx_next_cons = NEXT_RX(cons); 1896 1897 next_rx_no_prod_no_len: 1898 *raw_cons = tmp_raw_cons; 1899 1900 return rc; 1901 } 1902 1903 /* In netpoll mode, if we are using a combined completion ring, we need to 1904 * discard the rx packets and recycle the buffers. 1905 */ 1906 static int bnxt_force_rx_discard(struct bnxt *bp, 1907 struct bnxt_cp_ring_info *cpr, 1908 u32 *raw_cons, u8 *event) 1909 { 1910 u32 tmp_raw_cons = *raw_cons; 1911 struct rx_cmp_ext *rxcmp1; 1912 struct rx_cmp *rxcmp; 1913 u16 cp_cons; 1914 u8 cmp_type; 1915 1916 cp_cons = RING_CMP(tmp_raw_cons); 1917 rxcmp = (struct rx_cmp *) 1918 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1919 1920 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 1921 cp_cons = RING_CMP(tmp_raw_cons); 1922 rxcmp1 = (struct rx_cmp_ext *) 1923 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1924 1925 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 1926 return -EBUSY; 1927 1928 cmp_type = RX_CMP_TYPE(rxcmp); 1929 if (cmp_type == CMP_TYPE_RX_L2_CMP) { 1930 rxcmp1->rx_cmp_cfa_code_errors_v2 |= 1931 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); 1932 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1933 struct rx_tpa_end_cmp_ext *tpa_end1; 1934 1935 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1; 1936 tpa_end1->rx_tpa_end_cmp_errors_v2 |= 1937 cpu_to_le32(RX_TPA_END_CMP_ERRORS); 1938 } 1939 return bnxt_rx_pkt(bp, cpr, raw_cons, event); 1940 } 1941 1942 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx) 1943 { 1944 struct bnxt_fw_health *fw_health = bp->fw_health; 1945 u32 reg = fw_health->regs[reg_idx]; 1946 u32 reg_type, reg_off, val = 0; 1947 1948 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg); 1949 reg_off = BNXT_FW_HEALTH_REG_OFF(reg); 1950 switch (reg_type) { 1951 case BNXT_FW_HEALTH_REG_TYPE_CFG: 1952 pci_read_config_dword(bp->pdev, reg_off, &val); 1953 break; 1954 case BNXT_FW_HEALTH_REG_TYPE_GRC: 1955 reg_off = fw_health->mapped_regs[reg_idx]; 1956 fallthrough; 1957 case BNXT_FW_HEALTH_REG_TYPE_BAR0: 1958 val = readl(bp->bar0 + reg_off); 1959 break; 1960 case BNXT_FW_HEALTH_REG_TYPE_BAR1: 1961 val = readl(bp->bar1 + reg_off); 1962 break; 1963 } 1964 if (reg_idx == BNXT_FW_RESET_INPROG_REG) 1965 val &= fw_health->fw_reset_inprog_reg_mask; 1966 return val; 1967 } 1968 1969 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id) 1970 { 1971 int i; 1972 1973 for (i = 0; i < bp->rx_nr_rings; i++) { 1974 u16 grp_idx = bp->rx_ring[i].bnapi->index; 1975 struct bnxt_ring_grp_info *grp_info; 1976 1977 grp_info = &bp->grp_info[grp_idx]; 1978 if (grp_info->agg_fw_ring_id == ring_id) 1979 return grp_idx; 1980 } 1981 return INVALID_HW_RING_ID; 1982 } 1983 1984 #define BNXT_GET_EVENT_PORT(data) \ 1985 ((data) & \ 1986 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK) 1987 1988 #define BNXT_EVENT_RING_TYPE(data2) \ 1989 ((data2) & \ 1990 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK) 1991 1992 #define BNXT_EVENT_RING_TYPE_RX(data2) \ 1993 (BNXT_EVENT_RING_TYPE(data2) == \ 1994 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX) 1995 1996 static int bnxt_async_event_process(struct bnxt *bp, 1997 struct hwrm_async_event_cmpl *cmpl) 1998 { 1999 u16 event_id = le16_to_cpu(cmpl->event_id); 2000 u32 data1 = le32_to_cpu(cmpl->event_data1); 2001 u32 data2 = le32_to_cpu(cmpl->event_data2); 2002 2003 /* TODO CHIMP_FW: Define event id's for link change, error etc */ 2004 switch (event_id) { 2005 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: { 2006 struct bnxt_link_info *link_info = &bp->link_info; 2007 2008 if (BNXT_VF(bp)) 2009 goto async_event_process_exit; 2010 2011 /* print unsupported speed warning in forced speed mode only */ 2012 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) && 2013 (data1 & 0x20000)) { 2014 u16 fw_speed = link_info->force_link_speed; 2015 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed); 2016 2017 if (speed != SPEED_UNKNOWN) 2018 netdev_warn(bp->dev, "Link speed %d no longer supported\n", 2019 speed); 2020 } 2021 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); 2022 } 2023 fallthrough; 2024 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE: 2025 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE: 2026 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event); 2027 fallthrough; 2028 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: 2029 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); 2030 break; 2031 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD: 2032 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event); 2033 break; 2034 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: { 2035 u16 port_id = BNXT_GET_EVENT_PORT(data1); 2036 2037 if (BNXT_VF(bp)) 2038 break; 2039 2040 if (bp->pf.port_id != port_id) 2041 break; 2042 2043 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event); 2044 break; 2045 } 2046 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE: 2047 if (BNXT_PF(bp)) 2048 goto async_event_process_exit; 2049 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event); 2050 break; 2051 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: { 2052 char *fatal_str = "non-fatal"; 2053 2054 if (!bp->fw_health) 2055 goto async_event_process_exit; 2056 2057 bp->fw_reset_timestamp = jiffies; 2058 bp->fw_reset_min_dsecs = cmpl->timestamp_lo; 2059 if (!bp->fw_reset_min_dsecs) 2060 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS; 2061 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi); 2062 if (!bp->fw_reset_max_dsecs) 2063 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS; 2064 if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) { 2065 fatal_str = "fatal"; 2066 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); 2067 } 2068 netif_warn(bp, hw, bp->dev, 2069 "Firmware %s reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n", 2070 fatal_str, data1, data2, 2071 bp->fw_reset_min_dsecs * 100, 2072 bp->fw_reset_max_dsecs * 100); 2073 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event); 2074 break; 2075 } 2076 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: { 2077 struct bnxt_fw_health *fw_health = bp->fw_health; 2078 2079 if (!fw_health) 2080 goto async_event_process_exit; 2081 2082 fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1); 2083 fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1); 2084 if (!fw_health->enabled) { 2085 netif_info(bp, drv, bp->dev, 2086 "Error recovery info: error recovery[0]\n"); 2087 break; 2088 } 2089 fw_health->tmr_multiplier = 2090 DIV_ROUND_UP(fw_health->polling_dsecs * HZ, 2091 bp->current_interval * 10); 2092 fw_health->tmr_counter = fw_health->tmr_multiplier; 2093 fw_health->last_fw_heartbeat = 2094 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 2095 fw_health->last_fw_reset_cnt = 2096 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 2097 netif_info(bp, drv, bp->dev, 2098 "Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n", 2099 fw_health->master, fw_health->last_fw_reset_cnt, 2100 bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG)); 2101 goto async_event_process_exit; 2102 } 2103 case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION: 2104 netif_notice(bp, hw, bp->dev, 2105 "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n", 2106 data1, data2); 2107 goto async_event_process_exit; 2108 case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: { 2109 struct bnxt_rx_ring_info *rxr; 2110 u16 grp_idx; 2111 2112 if (bp->flags & BNXT_FLAG_CHIP_P5) 2113 goto async_event_process_exit; 2114 2115 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n", 2116 BNXT_EVENT_RING_TYPE(data2), data1); 2117 if (!BNXT_EVENT_RING_TYPE_RX(data2)) 2118 goto async_event_process_exit; 2119 2120 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1); 2121 if (grp_idx == INVALID_HW_RING_ID) { 2122 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n", 2123 data1); 2124 goto async_event_process_exit; 2125 } 2126 rxr = bp->bnapi[grp_idx]->rx_ring; 2127 bnxt_sched_reset(bp, rxr); 2128 goto async_event_process_exit; 2129 } 2130 case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: { 2131 struct bnxt_fw_health *fw_health = bp->fw_health; 2132 2133 netif_notice(bp, hw, bp->dev, 2134 "Received firmware echo request, data1: 0x%x, data2: 0x%x\n", 2135 data1, data2); 2136 if (fw_health) { 2137 fw_health->echo_req_data1 = data1; 2138 fw_health->echo_req_data2 = data2; 2139 set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event); 2140 break; 2141 } 2142 goto async_event_process_exit; 2143 } 2144 default: 2145 goto async_event_process_exit; 2146 } 2147 bnxt_queue_sp_work(bp); 2148 async_event_process_exit: 2149 bnxt_ulp_async_events(bp, cmpl); 2150 return 0; 2151 } 2152 2153 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) 2154 { 2155 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id; 2156 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp; 2157 struct hwrm_fwd_req_cmpl *fwd_req_cmpl = 2158 (struct hwrm_fwd_req_cmpl *)txcmp; 2159 2160 switch (cmpl_type) { 2161 case CMPL_BASE_TYPE_HWRM_DONE: 2162 seq_id = le16_to_cpu(h_cmpl->sequence_id); 2163 if (seq_id == bp->hwrm_intr_seq_id) 2164 bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id; 2165 else 2166 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id); 2167 break; 2168 2169 case CMPL_BASE_TYPE_HWRM_FWD_REQ: 2170 vf_id = le16_to_cpu(fwd_req_cmpl->source_id); 2171 2172 if ((vf_id < bp->pf.first_vf_id) || 2173 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) { 2174 netdev_err(bp->dev, "Msg contains invalid VF id %x\n", 2175 vf_id); 2176 return -EINVAL; 2177 } 2178 2179 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap); 2180 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event); 2181 bnxt_queue_sp_work(bp); 2182 break; 2183 2184 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: 2185 bnxt_async_event_process(bp, 2186 (struct hwrm_async_event_cmpl *)txcmp); 2187 2188 default: 2189 break; 2190 } 2191 2192 return 0; 2193 } 2194 2195 static irqreturn_t bnxt_msix(int irq, void *dev_instance) 2196 { 2197 struct bnxt_napi *bnapi = dev_instance; 2198 struct bnxt *bp = bnapi->bp; 2199 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2200 u32 cons = RING_CMP(cpr->cp_raw_cons); 2201 2202 cpr->event_ctr++; 2203 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); 2204 napi_schedule(&bnapi->napi); 2205 return IRQ_HANDLED; 2206 } 2207 2208 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) 2209 { 2210 u32 raw_cons = cpr->cp_raw_cons; 2211 u16 cons = RING_CMP(raw_cons); 2212 struct tx_cmp *txcmp; 2213 2214 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 2215 2216 return TX_CMP_VALID(txcmp, raw_cons); 2217 } 2218 2219 static irqreturn_t bnxt_inta(int irq, void *dev_instance) 2220 { 2221 struct bnxt_napi *bnapi = dev_instance; 2222 struct bnxt *bp = bnapi->bp; 2223 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2224 u32 cons = RING_CMP(cpr->cp_raw_cons); 2225 u32 int_status; 2226 2227 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); 2228 2229 if (!bnxt_has_work(bp, cpr)) { 2230 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS); 2231 /* return if erroneous interrupt */ 2232 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id))) 2233 return IRQ_NONE; 2234 } 2235 2236 /* disable ring IRQ */ 2237 BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell); 2238 2239 /* Return here if interrupt is shared and is disabled. */ 2240 if (unlikely(atomic_read(&bp->intr_sem) != 0)) 2241 return IRQ_HANDLED; 2242 2243 napi_schedule(&bnapi->napi); 2244 return IRQ_HANDLED; 2245 } 2246 2247 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 2248 int budget) 2249 { 2250 struct bnxt_napi *bnapi = cpr->bnapi; 2251 u32 raw_cons = cpr->cp_raw_cons; 2252 u32 cons; 2253 int tx_pkts = 0; 2254 int rx_pkts = 0; 2255 u8 event = 0; 2256 struct tx_cmp *txcmp; 2257 2258 cpr->has_more_work = 0; 2259 cpr->had_work_done = 1; 2260 while (1) { 2261 int rc; 2262 2263 cons = RING_CMP(raw_cons); 2264 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 2265 2266 if (!TX_CMP_VALID(txcmp, raw_cons)) 2267 break; 2268 2269 /* The valid test of the entry must be done first before 2270 * reading any further. 2271 */ 2272 dma_rmb(); 2273 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) { 2274 tx_pkts++; 2275 /* return full budget so NAPI will complete. */ 2276 if (unlikely(tx_pkts > bp->tx_wake_thresh)) { 2277 rx_pkts = budget; 2278 raw_cons = NEXT_RAW_CMP(raw_cons); 2279 if (budget) 2280 cpr->has_more_work = 1; 2281 break; 2282 } 2283 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { 2284 if (likely(budget)) 2285 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event); 2286 else 2287 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons, 2288 &event); 2289 if (likely(rc >= 0)) 2290 rx_pkts += rc; 2291 /* Increment rx_pkts when rc is -ENOMEM to count towards 2292 * the NAPI budget. Otherwise, we may potentially loop 2293 * here forever if we consistently cannot allocate 2294 * buffers. 2295 */ 2296 else if (rc == -ENOMEM && budget) 2297 rx_pkts++; 2298 else if (rc == -EBUSY) /* partial completion */ 2299 break; 2300 } else if (unlikely((TX_CMP_TYPE(txcmp) == 2301 CMPL_BASE_TYPE_HWRM_DONE) || 2302 (TX_CMP_TYPE(txcmp) == 2303 CMPL_BASE_TYPE_HWRM_FWD_REQ) || 2304 (TX_CMP_TYPE(txcmp) == 2305 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) { 2306 bnxt_hwrm_handler(bp, txcmp); 2307 } 2308 raw_cons = NEXT_RAW_CMP(raw_cons); 2309 2310 if (rx_pkts && rx_pkts == budget) { 2311 cpr->has_more_work = 1; 2312 break; 2313 } 2314 } 2315 2316 if (event & BNXT_REDIRECT_EVENT) 2317 xdp_do_flush_map(); 2318 2319 if (event & BNXT_TX_EVENT) { 2320 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; 2321 u16 prod = txr->tx_prod; 2322 2323 /* Sync BD data before updating doorbell */ 2324 wmb(); 2325 2326 bnxt_db_write_relaxed(bp, &txr->tx_db, prod); 2327 } 2328 2329 cpr->cp_raw_cons = raw_cons; 2330 bnapi->tx_pkts += tx_pkts; 2331 bnapi->events |= event; 2332 return rx_pkts; 2333 } 2334 2335 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi) 2336 { 2337 if (bnapi->tx_pkts) { 2338 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts); 2339 bnapi->tx_pkts = 0; 2340 } 2341 2342 if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) { 2343 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 2344 2345 if (bnapi->events & BNXT_AGG_EVENT) 2346 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 2347 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 2348 } 2349 bnapi->events = 0; 2350 } 2351 2352 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 2353 int budget) 2354 { 2355 struct bnxt_napi *bnapi = cpr->bnapi; 2356 int rx_pkts; 2357 2358 rx_pkts = __bnxt_poll_work(bp, cpr, budget); 2359 2360 /* ACK completion ring before freeing tx ring and producing new 2361 * buffers in rx/agg rings to prevent overflowing the completion 2362 * ring. 2363 */ 2364 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons); 2365 2366 __bnxt_poll_work_done(bp, bnapi); 2367 return rx_pkts; 2368 } 2369 2370 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) 2371 { 2372 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 2373 struct bnxt *bp = bnapi->bp; 2374 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2375 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 2376 struct tx_cmp *txcmp; 2377 struct rx_cmp_ext *rxcmp1; 2378 u32 cp_cons, tmp_raw_cons; 2379 u32 raw_cons = cpr->cp_raw_cons; 2380 u32 rx_pkts = 0; 2381 u8 event = 0; 2382 2383 while (1) { 2384 int rc; 2385 2386 cp_cons = RING_CMP(raw_cons); 2387 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2388 2389 if (!TX_CMP_VALID(txcmp, raw_cons)) 2390 break; 2391 2392 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { 2393 tmp_raw_cons = NEXT_RAW_CMP(raw_cons); 2394 cp_cons = RING_CMP(tmp_raw_cons); 2395 rxcmp1 = (struct rx_cmp_ext *) 2396 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2397 2398 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 2399 break; 2400 2401 /* force an error to recycle the buffer */ 2402 rxcmp1->rx_cmp_cfa_code_errors_v2 |= 2403 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); 2404 2405 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event); 2406 if (likely(rc == -EIO) && budget) 2407 rx_pkts++; 2408 else if (rc == -EBUSY) /* partial completion */ 2409 break; 2410 } else if (unlikely(TX_CMP_TYPE(txcmp) == 2411 CMPL_BASE_TYPE_HWRM_DONE)) { 2412 bnxt_hwrm_handler(bp, txcmp); 2413 } else { 2414 netdev_err(bp->dev, 2415 "Invalid completion received on special ring\n"); 2416 } 2417 raw_cons = NEXT_RAW_CMP(raw_cons); 2418 2419 if (rx_pkts == budget) 2420 break; 2421 } 2422 2423 cpr->cp_raw_cons = raw_cons; 2424 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons); 2425 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 2426 2427 if (event & BNXT_AGG_EVENT) 2428 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 2429 2430 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) { 2431 napi_complete_done(napi, rx_pkts); 2432 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 2433 } 2434 return rx_pkts; 2435 } 2436 2437 static int bnxt_poll(struct napi_struct *napi, int budget) 2438 { 2439 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 2440 struct bnxt *bp = bnapi->bp; 2441 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2442 int work_done = 0; 2443 2444 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) { 2445 napi_complete(napi); 2446 return 0; 2447 } 2448 while (1) { 2449 work_done += bnxt_poll_work(bp, cpr, budget - work_done); 2450 2451 if (work_done >= budget) { 2452 if (!budget) 2453 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 2454 break; 2455 } 2456 2457 if (!bnxt_has_work(bp, cpr)) { 2458 if (napi_complete_done(napi, work_done)) 2459 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 2460 break; 2461 } 2462 } 2463 if (bp->flags & BNXT_FLAG_DIM) { 2464 struct dim_sample dim_sample = {}; 2465 2466 dim_update_sample(cpr->event_ctr, 2467 cpr->rx_packets, 2468 cpr->rx_bytes, 2469 &dim_sample); 2470 net_dim(&cpr->dim, dim_sample); 2471 } 2472 return work_done; 2473 } 2474 2475 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) 2476 { 2477 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2478 int i, work_done = 0; 2479 2480 for (i = 0; i < 2; i++) { 2481 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i]; 2482 2483 if (cpr2) { 2484 work_done += __bnxt_poll_work(bp, cpr2, 2485 budget - work_done); 2486 cpr->has_more_work |= cpr2->has_more_work; 2487 } 2488 } 2489 return work_done; 2490 } 2491 2492 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi, 2493 u64 dbr_type) 2494 { 2495 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2496 int i; 2497 2498 for (i = 0; i < 2; i++) { 2499 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i]; 2500 struct bnxt_db_info *db; 2501 2502 if (cpr2 && cpr2->had_work_done) { 2503 db = &cpr2->cp_db; 2504 writeq(db->db_key64 | dbr_type | 2505 RING_CMP(cpr2->cp_raw_cons), db->doorbell); 2506 cpr2->had_work_done = 0; 2507 } 2508 } 2509 __bnxt_poll_work_done(bp, bnapi); 2510 } 2511 2512 static int bnxt_poll_p5(struct napi_struct *napi, int budget) 2513 { 2514 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 2515 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2516 u32 raw_cons = cpr->cp_raw_cons; 2517 struct bnxt *bp = bnapi->bp; 2518 struct nqe_cn *nqcmp; 2519 int work_done = 0; 2520 u32 cons; 2521 2522 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) { 2523 napi_complete(napi); 2524 return 0; 2525 } 2526 if (cpr->has_more_work) { 2527 cpr->has_more_work = 0; 2528 work_done = __bnxt_poll_cqs(bp, bnapi, budget); 2529 } 2530 while (1) { 2531 cons = RING_CMP(raw_cons); 2532 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 2533 2534 if (!NQ_CMP_VALID(nqcmp, raw_cons)) { 2535 if (cpr->has_more_work) 2536 break; 2537 2538 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL); 2539 cpr->cp_raw_cons = raw_cons; 2540 if (napi_complete_done(napi, work_done)) 2541 BNXT_DB_NQ_ARM_P5(&cpr->cp_db, 2542 cpr->cp_raw_cons); 2543 return work_done; 2544 } 2545 2546 /* The valid test of the entry must be done first before 2547 * reading any further. 2548 */ 2549 dma_rmb(); 2550 2551 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) { 2552 u32 idx = le32_to_cpu(nqcmp->cq_handle_low); 2553 struct bnxt_cp_ring_info *cpr2; 2554 2555 cpr2 = cpr->cp_ring_arr[idx]; 2556 work_done += __bnxt_poll_work(bp, cpr2, 2557 budget - work_done); 2558 cpr->has_more_work |= cpr2->has_more_work; 2559 } else { 2560 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp); 2561 } 2562 raw_cons = NEXT_RAW_CMP(raw_cons); 2563 } 2564 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ); 2565 if (raw_cons != cpr->cp_raw_cons) { 2566 cpr->cp_raw_cons = raw_cons; 2567 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons); 2568 } 2569 return work_done; 2570 } 2571 2572 static void bnxt_free_tx_skbs(struct bnxt *bp) 2573 { 2574 int i, max_idx; 2575 struct pci_dev *pdev = bp->pdev; 2576 2577 if (!bp->tx_ring) 2578 return; 2579 2580 max_idx = bp->tx_nr_pages * TX_DESC_CNT; 2581 for (i = 0; i < bp->tx_nr_rings; i++) { 2582 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 2583 int j; 2584 2585 for (j = 0; j < max_idx;) { 2586 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; 2587 struct sk_buff *skb; 2588 int k, last; 2589 2590 if (i < bp->tx_nr_rings_xdp && 2591 tx_buf->action == XDP_REDIRECT) { 2592 dma_unmap_single(&pdev->dev, 2593 dma_unmap_addr(tx_buf, mapping), 2594 dma_unmap_len(tx_buf, len), 2595 PCI_DMA_TODEVICE); 2596 xdp_return_frame(tx_buf->xdpf); 2597 tx_buf->action = 0; 2598 tx_buf->xdpf = NULL; 2599 j++; 2600 continue; 2601 } 2602 2603 skb = tx_buf->skb; 2604 if (!skb) { 2605 j++; 2606 continue; 2607 } 2608 2609 tx_buf->skb = NULL; 2610 2611 if (tx_buf->is_push) { 2612 dev_kfree_skb(skb); 2613 j += 2; 2614 continue; 2615 } 2616 2617 dma_unmap_single(&pdev->dev, 2618 dma_unmap_addr(tx_buf, mapping), 2619 skb_headlen(skb), 2620 PCI_DMA_TODEVICE); 2621 2622 last = tx_buf->nr_frags; 2623 j += 2; 2624 for (k = 0; k < last; k++, j++) { 2625 int ring_idx = j & bp->tx_ring_mask; 2626 skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; 2627 2628 tx_buf = &txr->tx_buf_ring[ring_idx]; 2629 dma_unmap_page( 2630 &pdev->dev, 2631 dma_unmap_addr(tx_buf, mapping), 2632 skb_frag_size(frag), PCI_DMA_TODEVICE); 2633 } 2634 dev_kfree_skb(skb); 2635 } 2636 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i)); 2637 } 2638 } 2639 2640 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr) 2641 { 2642 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; 2643 struct pci_dev *pdev = bp->pdev; 2644 struct bnxt_tpa_idx_map *map; 2645 int i, max_idx, max_agg_idx; 2646 2647 max_idx = bp->rx_nr_pages * RX_DESC_CNT; 2648 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; 2649 if (!rxr->rx_tpa) 2650 goto skip_rx_tpa_free; 2651 2652 for (i = 0; i < bp->max_tpa; i++) { 2653 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i]; 2654 u8 *data = tpa_info->data; 2655 2656 if (!data) 2657 continue; 2658 2659 dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping, 2660 bp->rx_buf_use_size, bp->rx_dir, 2661 DMA_ATTR_WEAK_ORDERING); 2662 2663 tpa_info->data = NULL; 2664 2665 kfree(data); 2666 } 2667 2668 skip_rx_tpa_free: 2669 for (i = 0; i < max_idx; i++) { 2670 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i]; 2671 dma_addr_t mapping = rx_buf->mapping; 2672 void *data = rx_buf->data; 2673 2674 if (!data) 2675 continue; 2676 2677 rx_buf->data = NULL; 2678 if (BNXT_RX_PAGE_MODE(bp)) { 2679 mapping -= bp->rx_dma_offset; 2680 dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE, 2681 bp->rx_dir, 2682 DMA_ATTR_WEAK_ORDERING); 2683 page_pool_recycle_direct(rxr->page_pool, data); 2684 } else { 2685 dma_unmap_single_attrs(&pdev->dev, mapping, 2686 bp->rx_buf_use_size, bp->rx_dir, 2687 DMA_ATTR_WEAK_ORDERING); 2688 kfree(data); 2689 } 2690 } 2691 for (i = 0; i < max_agg_idx; i++) { 2692 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i]; 2693 struct page *page = rx_agg_buf->page; 2694 2695 if (!page) 2696 continue; 2697 2698 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping, 2699 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE, 2700 DMA_ATTR_WEAK_ORDERING); 2701 2702 rx_agg_buf->page = NULL; 2703 __clear_bit(i, rxr->rx_agg_bmap); 2704 2705 __free_page(page); 2706 } 2707 if (rxr->rx_page) { 2708 __free_page(rxr->rx_page); 2709 rxr->rx_page = NULL; 2710 } 2711 map = rxr->rx_tpa_idx_map; 2712 if (map) 2713 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap)); 2714 } 2715 2716 static void bnxt_free_rx_skbs(struct bnxt *bp) 2717 { 2718 int i; 2719 2720 if (!bp->rx_ring) 2721 return; 2722 2723 for (i = 0; i < bp->rx_nr_rings; i++) 2724 bnxt_free_one_rx_ring_skbs(bp, i); 2725 } 2726 2727 static void bnxt_free_skbs(struct bnxt *bp) 2728 { 2729 bnxt_free_tx_skbs(bp); 2730 bnxt_free_rx_skbs(bp); 2731 } 2732 2733 static void bnxt_init_ctx_mem(struct bnxt_mem_init *mem_init, void *p, int len) 2734 { 2735 u8 init_val = mem_init->init_val; 2736 u16 offset = mem_init->offset; 2737 u8 *p2 = p; 2738 int i; 2739 2740 if (!init_val) 2741 return; 2742 if (offset == BNXT_MEM_INVALID_OFFSET) { 2743 memset(p, init_val, len); 2744 return; 2745 } 2746 for (i = 0; i < len; i += mem_init->size) 2747 *(p2 + i + offset) = init_val; 2748 } 2749 2750 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) 2751 { 2752 struct pci_dev *pdev = bp->pdev; 2753 int i; 2754 2755 for (i = 0; i < rmem->nr_pages; i++) { 2756 if (!rmem->pg_arr[i]) 2757 continue; 2758 2759 dma_free_coherent(&pdev->dev, rmem->page_size, 2760 rmem->pg_arr[i], rmem->dma_arr[i]); 2761 2762 rmem->pg_arr[i] = NULL; 2763 } 2764 if (rmem->pg_tbl) { 2765 size_t pg_tbl_size = rmem->nr_pages * 8; 2766 2767 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) 2768 pg_tbl_size = rmem->page_size; 2769 dma_free_coherent(&pdev->dev, pg_tbl_size, 2770 rmem->pg_tbl, rmem->pg_tbl_map); 2771 rmem->pg_tbl = NULL; 2772 } 2773 if (rmem->vmem_size && *rmem->vmem) { 2774 vfree(*rmem->vmem); 2775 *rmem->vmem = NULL; 2776 } 2777 } 2778 2779 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) 2780 { 2781 struct pci_dev *pdev = bp->pdev; 2782 u64 valid_bit = 0; 2783 int i; 2784 2785 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG)) 2786 valid_bit = PTU_PTE_VALID; 2787 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) { 2788 size_t pg_tbl_size = rmem->nr_pages * 8; 2789 2790 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) 2791 pg_tbl_size = rmem->page_size; 2792 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size, 2793 &rmem->pg_tbl_map, 2794 GFP_KERNEL); 2795 if (!rmem->pg_tbl) 2796 return -ENOMEM; 2797 } 2798 2799 for (i = 0; i < rmem->nr_pages; i++) { 2800 u64 extra_bits = valid_bit; 2801 2802 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev, 2803 rmem->page_size, 2804 &rmem->dma_arr[i], 2805 GFP_KERNEL); 2806 if (!rmem->pg_arr[i]) 2807 return -ENOMEM; 2808 2809 if (rmem->mem_init) 2810 bnxt_init_ctx_mem(rmem->mem_init, rmem->pg_arr[i], 2811 rmem->page_size); 2812 if (rmem->nr_pages > 1 || rmem->depth > 0) { 2813 if (i == rmem->nr_pages - 2 && 2814 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 2815 extra_bits |= PTU_PTE_NEXT_TO_LAST; 2816 else if (i == rmem->nr_pages - 1 && 2817 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 2818 extra_bits |= PTU_PTE_LAST; 2819 rmem->pg_tbl[i] = 2820 cpu_to_le64(rmem->dma_arr[i] | extra_bits); 2821 } 2822 } 2823 2824 if (rmem->vmem_size) { 2825 *rmem->vmem = vzalloc(rmem->vmem_size); 2826 if (!(*rmem->vmem)) 2827 return -ENOMEM; 2828 } 2829 return 0; 2830 } 2831 2832 static void bnxt_free_tpa_info(struct bnxt *bp) 2833 { 2834 int i; 2835 2836 for (i = 0; i < bp->rx_nr_rings; i++) { 2837 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2838 2839 kfree(rxr->rx_tpa_idx_map); 2840 rxr->rx_tpa_idx_map = NULL; 2841 if (rxr->rx_tpa) { 2842 kfree(rxr->rx_tpa[0].agg_arr); 2843 rxr->rx_tpa[0].agg_arr = NULL; 2844 } 2845 kfree(rxr->rx_tpa); 2846 rxr->rx_tpa = NULL; 2847 } 2848 } 2849 2850 static int bnxt_alloc_tpa_info(struct bnxt *bp) 2851 { 2852 int i, j, total_aggs = 0; 2853 2854 bp->max_tpa = MAX_TPA; 2855 if (bp->flags & BNXT_FLAG_CHIP_P5) { 2856 if (!bp->max_tpa_v2) 2857 return 0; 2858 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5); 2859 total_aggs = bp->max_tpa * MAX_SKB_FRAGS; 2860 } 2861 2862 for (i = 0; i < bp->rx_nr_rings; i++) { 2863 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2864 struct rx_agg_cmp *agg; 2865 2866 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info), 2867 GFP_KERNEL); 2868 if (!rxr->rx_tpa) 2869 return -ENOMEM; 2870 2871 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 2872 continue; 2873 agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL); 2874 rxr->rx_tpa[0].agg_arr = agg; 2875 if (!agg) 2876 return -ENOMEM; 2877 for (j = 1; j < bp->max_tpa; j++) 2878 rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS; 2879 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map), 2880 GFP_KERNEL); 2881 if (!rxr->rx_tpa_idx_map) 2882 return -ENOMEM; 2883 } 2884 return 0; 2885 } 2886 2887 static void bnxt_free_rx_rings(struct bnxt *bp) 2888 { 2889 int i; 2890 2891 if (!bp->rx_ring) 2892 return; 2893 2894 bnxt_free_tpa_info(bp); 2895 for (i = 0; i < bp->rx_nr_rings; i++) { 2896 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2897 struct bnxt_ring_struct *ring; 2898 2899 if (rxr->xdp_prog) 2900 bpf_prog_put(rxr->xdp_prog); 2901 2902 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq)) 2903 xdp_rxq_info_unreg(&rxr->xdp_rxq); 2904 2905 page_pool_destroy(rxr->page_pool); 2906 rxr->page_pool = NULL; 2907 2908 kfree(rxr->rx_agg_bmap); 2909 rxr->rx_agg_bmap = NULL; 2910 2911 ring = &rxr->rx_ring_struct; 2912 bnxt_free_ring(bp, &ring->ring_mem); 2913 2914 ring = &rxr->rx_agg_ring_struct; 2915 bnxt_free_ring(bp, &ring->ring_mem); 2916 } 2917 } 2918 2919 static int bnxt_alloc_rx_page_pool(struct bnxt *bp, 2920 struct bnxt_rx_ring_info *rxr) 2921 { 2922 struct page_pool_params pp = { 0 }; 2923 2924 pp.pool_size = bp->rx_ring_size; 2925 pp.nid = dev_to_node(&bp->pdev->dev); 2926 pp.dev = &bp->pdev->dev; 2927 pp.dma_dir = DMA_BIDIRECTIONAL; 2928 2929 rxr->page_pool = page_pool_create(&pp); 2930 if (IS_ERR(rxr->page_pool)) { 2931 int err = PTR_ERR(rxr->page_pool); 2932 2933 rxr->page_pool = NULL; 2934 return err; 2935 } 2936 return 0; 2937 } 2938 2939 static int bnxt_alloc_rx_rings(struct bnxt *bp) 2940 { 2941 int i, rc = 0, agg_rings = 0; 2942 2943 if (!bp->rx_ring) 2944 return -ENOMEM; 2945 2946 if (bp->flags & BNXT_FLAG_AGG_RINGS) 2947 agg_rings = 1; 2948 2949 for (i = 0; i < bp->rx_nr_rings; i++) { 2950 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2951 struct bnxt_ring_struct *ring; 2952 2953 ring = &rxr->rx_ring_struct; 2954 2955 rc = bnxt_alloc_rx_page_pool(bp, rxr); 2956 if (rc) 2957 return rc; 2958 2959 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0); 2960 if (rc < 0) 2961 return rc; 2962 2963 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq, 2964 MEM_TYPE_PAGE_POOL, 2965 rxr->page_pool); 2966 if (rc) { 2967 xdp_rxq_info_unreg(&rxr->xdp_rxq); 2968 return rc; 2969 } 2970 2971 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 2972 if (rc) 2973 return rc; 2974 2975 ring->grp_idx = i; 2976 if (agg_rings) { 2977 u16 mem_size; 2978 2979 ring = &rxr->rx_agg_ring_struct; 2980 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 2981 if (rc) 2982 return rc; 2983 2984 ring->grp_idx = i; 2985 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1; 2986 mem_size = rxr->rx_agg_bmap_size / 8; 2987 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL); 2988 if (!rxr->rx_agg_bmap) 2989 return -ENOMEM; 2990 } 2991 } 2992 if (bp->flags & BNXT_FLAG_TPA) 2993 rc = bnxt_alloc_tpa_info(bp); 2994 return rc; 2995 } 2996 2997 static void bnxt_free_tx_rings(struct bnxt *bp) 2998 { 2999 int i; 3000 struct pci_dev *pdev = bp->pdev; 3001 3002 if (!bp->tx_ring) 3003 return; 3004 3005 for (i = 0; i < bp->tx_nr_rings; i++) { 3006 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 3007 struct bnxt_ring_struct *ring; 3008 3009 if (txr->tx_push) { 3010 dma_free_coherent(&pdev->dev, bp->tx_push_size, 3011 txr->tx_push, txr->tx_push_mapping); 3012 txr->tx_push = NULL; 3013 } 3014 3015 ring = &txr->tx_ring_struct; 3016 3017 bnxt_free_ring(bp, &ring->ring_mem); 3018 } 3019 } 3020 3021 static int bnxt_alloc_tx_rings(struct bnxt *bp) 3022 { 3023 int i, j, rc; 3024 struct pci_dev *pdev = bp->pdev; 3025 3026 bp->tx_push_size = 0; 3027 if (bp->tx_push_thresh) { 3028 int push_size; 3029 3030 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) + 3031 bp->tx_push_thresh); 3032 3033 if (push_size > 256) { 3034 push_size = 0; 3035 bp->tx_push_thresh = 0; 3036 } 3037 3038 bp->tx_push_size = push_size; 3039 } 3040 3041 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) { 3042 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 3043 struct bnxt_ring_struct *ring; 3044 u8 qidx; 3045 3046 ring = &txr->tx_ring_struct; 3047 3048 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 3049 if (rc) 3050 return rc; 3051 3052 ring->grp_idx = txr->bnapi->index; 3053 if (bp->tx_push_size) { 3054 dma_addr_t mapping; 3055 3056 /* One pre-allocated DMA buffer to backup 3057 * TX push operation 3058 */ 3059 txr->tx_push = dma_alloc_coherent(&pdev->dev, 3060 bp->tx_push_size, 3061 &txr->tx_push_mapping, 3062 GFP_KERNEL); 3063 3064 if (!txr->tx_push) 3065 return -ENOMEM; 3066 3067 mapping = txr->tx_push_mapping + 3068 sizeof(struct tx_push_bd); 3069 txr->data_mapping = cpu_to_le64(mapping); 3070 } 3071 qidx = bp->tc_to_qidx[j]; 3072 ring->queue_id = bp->q_info[qidx].queue_id; 3073 if (i < bp->tx_nr_rings_xdp) 3074 continue; 3075 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1)) 3076 j++; 3077 } 3078 return 0; 3079 } 3080 3081 static void bnxt_free_cp_rings(struct bnxt *bp) 3082 { 3083 int i; 3084 3085 if (!bp->bnapi) 3086 return; 3087 3088 for (i = 0; i < bp->cp_nr_rings; i++) { 3089 struct bnxt_napi *bnapi = bp->bnapi[i]; 3090 struct bnxt_cp_ring_info *cpr; 3091 struct bnxt_ring_struct *ring; 3092 int j; 3093 3094 if (!bnapi) 3095 continue; 3096 3097 cpr = &bnapi->cp_ring; 3098 ring = &cpr->cp_ring_struct; 3099 3100 bnxt_free_ring(bp, &ring->ring_mem); 3101 3102 for (j = 0; j < 2; j++) { 3103 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; 3104 3105 if (cpr2) { 3106 ring = &cpr2->cp_ring_struct; 3107 bnxt_free_ring(bp, &ring->ring_mem); 3108 kfree(cpr2); 3109 cpr->cp_ring_arr[j] = NULL; 3110 } 3111 } 3112 } 3113 } 3114 3115 static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp) 3116 { 3117 struct bnxt_ring_mem_info *rmem; 3118 struct bnxt_ring_struct *ring; 3119 struct bnxt_cp_ring_info *cpr; 3120 int rc; 3121 3122 cpr = kzalloc(sizeof(*cpr), GFP_KERNEL); 3123 if (!cpr) 3124 return NULL; 3125 3126 ring = &cpr->cp_ring_struct; 3127 rmem = &ring->ring_mem; 3128 rmem->nr_pages = bp->cp_nr_pages; 3129 rmem->page_size = HW_CMPD_RING_SIZE; 3130 rmem->pg_arr = (void **)cpr->cp_desc_ring; 3131 rmem->dma_arr = cpr->cp_desc_mapping; 3132 rmem->flags = BNXT_RMEM_RING_PTE_FLAG; 3133 rc = bnxt_alloc_ring(bp, rmem); 3134 if (rc) { 3135 bnxt_free_ring(bp, rmem); 3136 kfree(cpr); 3137 cpr = NULL; 3138 } 3139 return cpr; 3140 } 3141 3142 static int bnxt_alloc_cp_rings(struct bnxt *bp) 3143 { 3144 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS); 3145 int i, rc, ulp_base_vec, ulp_msix; 3146 3147 ulp_msix = bnxt_get_ulp_msix_num(bp); 3148 ulp_base_vec = bnxt_get_ulp_msix_base(bp); 3149 for (i = 0; i < bp->cp_nr_rings; i++) { 3150 struct bnxt_napi *bnapi = bp->bnapi[i]; 3151 struct bnxt_cp_ring_info *cpr; 3152 struct bnxt_ring_struct *ring; 3153 3154 if (!bnapi) 3155 continue; 3156 3157 cpr = &bnapi->cp_ring; 3158 cpr->bnapi = bnapi; 3159 ring = &cpr->cp_ring_struct; 3160 3161 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 3162 if (rc) 3163 return rc; 3164 3165 if (ulp_msix && i >= ulp_base_vec) 3166 ring->map_idx = i + ulp_msix; 3167 else 3168 ring->map_idx = i; 3169 3170 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 3171 continue; 3172 3173 if (i < bp->rx_nr_rings) { 3174 struct bnxt_cp_ring_info *cpr2 = 3175 bnxt_alloc_cp_sub_ring(bp); 3176 3177 cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2; 3178 if (!cpr2) 3179 return -ENOMEM; 3180 cpr2->bnapi = bnapi; 3181 } 3182 if ((sh && i < bp->tx_nr_rings) || 3183 (!sh && i >= bp->rx_nr_rings)) { 3184 struct bnxt_cp_ring_info *cpr2 = 3185 bnxt_alloc_cp_sub_ring(bp); 3186 3187 cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2; 3188 if (!cpr2) 3189 return -ENOMEM; 3190 cpr2->bnapi = bnapi; 3191 } 3192 } 3193 return 0; 3194 } 3195 3196 static void bnxt_init_ring_struct(struct bnxt *bp) 3197 { 3198 int i; 3199 3200 for (i = 0; i < bp->cp_nr_rings; i++) { 3201 struct bnxt_napi *bnapi = bp->bnapi[i]; 3202 struct bnxt_ring_mem_info *rmem; 3203 struct bnxt_cp_ring_info *cpr; 3204 struct bnxt_rx_ring_info *rxr; 3205 struct bnxt_tx_ring_info *txr; 3206 struct bnxt_ring_struct *ring; 3207 3208 if (!bnapi) 3209 continue; 3210 3211 cpr = &bnapi->cp_ring; 3212 ring = &cpr->cp_ring_struct; 3213 rmem = &ring->ring_mem; 3214 rmem->nr_pages = bp->cp_nr_pages; 3215 rmem->page_size = HW_CMPD_RING_SIZE; 3216 rmem->pg_arr = (void **)cpr->cp_desc_ring; 3217 rmem->dma_arr = cpr->cp_desc_mapping; 3218 rmem->vmem_size = 0; 3219 3220 rxr = bnapi->rx_ring; 3221 if (!rxr) 3222 goto skip_rx; 3223 3224 ring = &rxr->rx_ring_struct; 3225 rmem = &ring->ring_mem; 3226 rmem->nr_pages = bp->rx_nr_pages; 3227 rmem->page_size = HW_RXBD_RING_SIZE; 3228 rmem->pg_arr = (void **)rxr->rx_desc_ring; 3229 rmem->dma_arr = rxr->rx_desc_mapping; 3230 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; 3231 rmem->vmem = (void **)&rxr->rx_buf_ring; 3232 3233 ring = &rxr->rx_agg_ring_struct; 3234 rmem = &ring->ring_mem; 3235 rmem->nr_pages = bp->rx_agg_nr_pages; 3236 rmem->page_size = HW_RXBD_RING_SIZE; 3237 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring; 3238 rmem->dma_arr = rxr->rx_agg_desc_mapping; 3239 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; 3240 rmem->vmem = (void **)&rxr->rx_agg_ring; 3241 3242 skip_rx: 3243 txr = bnapi->tx_ring; 3244 if (!txr) 3245 continue; 3246 3247 ring = &txr->tx_ring_struct; 3248 rmem = &ring->ring_mem; 3249 rmem->nr_pages = bp->tx_nr_pages; 3250 rmem->page_size = HW_RXBD_RING_SIZE; 3251 rmem->pg_arr = (void **)txr->tx_desc_ring; 3252 rmem->dma_arr = txr->tx_desc_mapping; 3253 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages; 3254 rmem->vmem = (void **)&txr->tx_buf_ring; 3255 } 3256 } 3257 3258 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type) 3259 { 3260 int i; 3261 u32 prod; 3262 struct rx_bd **rx_buf_ring; 3263 3264 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr; 3265 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) { 3266 int j; 3267 struct rx_bd *rxbd; 3268 3269 rxbd = rx_buf_ring[i]; 3270 if (!rxbd) 3271 continue; 3272 3273 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) { 3274 rxbd->rx_bd_len_flags_type = cpu_to_le32(type); 3275 rxbd->rx_bd_opaque = prod; 3276 } 3277 } 3278 } 3279 3280 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr) 3281 { 3282 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; 3283 struct net_device *dev = bp->dev; 3284 u32 prod; 3285 int i; 3286 3287 prod = rxr->rx_prod; 3288 for (i = 0; i < bp->rx_ring_size; i++) { 3289 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) { 3290 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n", 3291 ring_nr, i, bp->rx_ring_size); 3292 break; 3293 } 3294 prod = NEXT_RX(prod); 3295 } 3296 rxr->rx_prod = prod; 3297 3298 if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) 3299 return 0; 3300 3301 prod = rxr->rx_agg_prod; 3302 for (i = 0; i < bp->rx_agg_ring_size; i++) { 3303 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) { 3304 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n", 3305 ring_nr, i, bp->rx_ring_size); 3306 break; 3307 } 3308 prod = NEXT_RX_AGG(prod); 3309 } 3310 rxr->rx_agg_prod = prod; 3311 3312 if (rxr->rx_tpa) { 3313 dma_addr_t mapping; 3314 u8 *data; 3315 3316 for (i = 0; i < bp->max_tpa; i++) { 3317 data = __bnxt_alloc_rx_data(bp, &mapping, GFP_KERNEL); 3318 if (!data) 3319 return -ENOMEM; 3320 3321 rxr->rx_tpa[i].data = data; 3322 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset; 3323 rxr->rx_tpa[i].mapping = mapping; 3324 } 3325 } 3326 return 0; 3327 } 3328 3329 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) 3330 { 3331 struct bnxt_rx_ring_info *rxr; 3332 struct bnxt_ring_struct *ring; 3333 u32 type; 3334 3335 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) | 3336 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP; 3337 3338 if (NET_IP_ALIGN == 2) 3339 type |= RX_BD_FLAGS_SOP; 3340 3341 rxr = &bp->rx_ring[ring_nr]; 3342 ring = &rxr->rx_ring_struct; 3343 bnxt_init_rxbd_pages(ring, type); 3344 3345 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) { 3346 bpf_prog_add(bp->xdp_prog, 1); 3347 rxr->xdp_prog = bp->xdp_prog; 3348 } 3349 ring->fw_ring_id = INVALID_HW_RING_ID; 3350 3351 ring = &rxr->rx_agg_ring_struct; 3352 ring->fw_ring_id = INVALID_HW_RING_ID; 3353 3354 if ((bp->flags & BNXT_FLAG_AGG_RINGS)) { 3355 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) | 3356 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; 3357 3358 bnxt_init_rxbd_pages(ring, type); 3359 } 3360 3361 return bnxt_alloc_one_rx_ring(bp, ring_nr); 3362 } 3363 3364 static void bnxt_init_cp_rings(struct bnxt *bp) 3365 { 3366 int i, j; 3367 3368 for (i = 0; i < bp->cp_nr_rings; i++) { 3369 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; 3370 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 3371 3372 ring->fw_ring_id = INVALID_HW_RING_ID; 3373 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; 3374 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; 3375 for (j = 0; j < 2; j++) { 3376 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; 3377 3378 if (!cpr2) 3379 continue; 3380 3381 ring = &cpr2->cp_ring_struct; 3382 ring->fw_ring_id = INVALID_HW_RING_ID; 3383 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; 3384 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; 3385 } 3386 } 3387 } 3388 3389 static int bnxt_init_rx_rings(struct bnxt *bp) 3390 { 3391 int i, rc = 0; 3392 3393 if (BNXT_RX_PAGE_MODE(bp)) { 3394 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM; 3395 bp->rx_dma_offset = XDP_PACKET_HEADROOM; 3396 } else { 3397 bp->rx_offset = BNXT_RX_OFFSET; 3398 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET; 3399 } 3400 3401 for (i = 0; i < bp->rx_nr_rings; i++) { 3402 rc = bnxt_init_one_rx_ring(bp, i); 3403 if (rc) 3404 break; 3405 } 3406 3407 return rc; 3408 } 3409 3410 static int bnxt_init_tx_rings(struct bnxt *bp) 3411 { 3412 u16 i; 3413 3414 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2, 3415 MAX_SKB_FRAGS + 1); 3416 3417 for (i = 0; i < bp->tx_nr_rings; i++) { 3418 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 3419 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 3420 3421 ring->fw_ring_id = INVALID_HW_RING_ID; 3422 } 3423 3424 return 0; 3425 } 3426 3427 static void bnxt_free_ring_grps(struct bnxt *bp) 3428 { 3429 kfree(bp->grp_info); 3430 bp->grp_info = NULL; 3431 } 3432 3433 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init) 3434 { 3435 int i; 3436 3437 if (irq_re_init) { 3438 bp->grp_info = kcalloc(bp->cp_nr_rings, 3439 sizeof(struct bnxt_ring_grp_info), 3440 GFP_KERNEL); 3441 if (!bp->grp_info) 3442 return -ENOMEM; 3443 } 3444 for (i = 0; i < bp->cp_nr_rings; i++) { 3445 if (irq_re_init) 3446 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID; 3447 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; 3448 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID; 3449 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID; 3450 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; 3451 } 3452 return 0; 3453 } 3454 3455 static void bnxt_free_vnics(struct bnxt *bp) 3456 { 3457 kfree(bp->vnic_info); 3458 bp->vnic_info = NULL; 3459 bp->nr_vnics = 0; 3460 } 3461 3462 static int bnxt_alloc_vnics(struct bnxt *bp) 3463 { 3464 int num_vnics = 1; 3465 3466 #ifdef CONFIG_RFS_ACCEL 3467 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS) 3468 num_vnics += bp->rx_nr_rings; 3469 #endif 3470 3471 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 3472 num_vnics++; 3473 3474 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info), 3475 GFP_KERNEL); 3476 if (!bp->vnic_info) 3477 return -ENOMEM; 3478 3479 bp->nr_vnics = num_vnics; 3480 return 0; 3481 } 3482 3483 static void bnxt_init_vnics(struct bnxt *bp) 3484 { 3485 int i; 3486 3487 for (i = 0; i < bp->nr_vnics; i++) { 3488 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 3489 int j; 3490 3491 vnic->fw_vnic_id = INVALID_HW_RING_ID; 3492 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) 3493 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID; 3494 3495 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID; 3496 3497 if (bp->vnic_info[i].rss_hash_key) { 3498 if (i == 0) 3499 prandom_bytes(vnic->rss_hash_key, 3500 HW_HASH_KEY_SIZE); 3501 else 3502 memcpy(vnic->rss_hash_key, 3503 bp->vnic_info[0].rss_hash_key, 3504 HW_HASH_KEY_SIZE); 3505 } 3506 } 3507 } 3508 3509 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg) 3510 { 3511 int pages; 3512 3513 pages = ring_size / desc_per_pg; 3514 3515 if (!pages) 3516 return 1; 3517 3518 pages++; 3519 3520 while (pages & (pages - 1)) 3521 pages++; 3522 3523 return pages; 3524 } 3525 3526 void bnxt_set_tpa_flags(struct bnxt *bp) 3527 { 3528 bp->flags &= ~BNXT_FLAG_TPA; 3529 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 3530 return; 3531 if (bp->dev->features & NETIF_F_LRO) 3532 bp->flags |= BNXT_FLAG_LRO; 3533 else if (bp->dev->features & NETIF_F_GRO_HW) 3534 bp->flags |= BNXT_FLAG_GRO; 3535 } 3536 3537 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must 3538 * be set on entry. 3539 */ 3540 void bnxt_set_ring_params(struct bnxt *bp) 3541 { 3542 u32 ring_size, rx_size, rx_space, max_rx_cmpl; 3543 u32 agg_factor = 0, agg_ring_size = 0; 3544 3545 /* 8 for CRC and VLAN */ 3546 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8); 3547 3548 rx_space = rx_size + NET_SKB_PAD + 3549 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 3550 3551 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH; 3552 ring_size = bp->rx_ring_size; 3553 bp->rx_agg_ring_size = 0; 3554 bp->rx_agg_nr_pages = 0; 3555 3556 if (bp->flags & BNXT_FLAG_TPA) 3557 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE); 3558 3559 bp->flags &= ~BNXT_FLAG_JUMBO; 3560 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) { 3561 u32 jumbo_factor; 3562 3563 bp->flags |= BNXT_FLAG_JUMBO; 3564 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT; 3565 if (jumbo_factor > agg_factor) 3566 agg_factor = jumbo_factor; 3567 } 3568 agg_ring_size = ring_size * agg_factor; 3569 3570 if (agg_ring_size) { 3571 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size, 3572 RX_DESC_CNT); 3573 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) { 3574 u32 tmp = agg_ring_size; 3575 3576 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES; 3577 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1; 3578 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n", 3579 tmp, agg_ring_size); 3580 } 3581 bp->rx_agg_ring_size = agg_ring_size; 3582 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1; 3583 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN); 3584 rx_space = rx_size + NET_SKB_PAD + 3585 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 3586 } 3587 3588 bp->rx_buf_use_size = rx_size; 3589 bp->rx_buf_size = rx_space; 3590 3591 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT); 3592 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1; 3593 3594 ring_size = bp->tx_ring_size; 3595 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT); 3596 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1; 3597 3598 max_rx_cmpl = bp->rx_ring_size; 3599 /* MAX TPA needs to be added because TPA_START completions are 3600 * immediately recycled, so the TPA completions are not bound by 3601 * the RX ring size. 3602 */ 3603 if (bp->flags & BNXT_FLAG_TPA) 3604 max_rx_cmpl += bp->max_tpa; 3605 /* RX and TPA completions are 32-byte, all others are 16-byte */ 3606 ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size; 3607 bp->cp_ring_size = ring_size; 3608 3609 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT); 3610 if (bp->cp_nr_pages > MAX_CP_PAGES) { 3611 bp->cp_nr_pages = MAX_CP_PAGES; 3612 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1; 3613 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n", 3614 ring_size, bp->cp_ring_size); 3615 } 3616 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT; 3617 bp->cp_ring_mask = bp->cp_bit - 1; 3618 } 3619 3620 /* Changing allocation mode of RX rings. 3621 * TODO: Update when extending xdp_rxq_info to support allocation modes. 3622 */ 3623 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) 3624 { 3625 if (page_mode) { 3626 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) 3627 return -EOPNOTSUPP; 3628 bp->dev->max_mtu = 3629 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU); 3630 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 3631 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE; 3632 bp->rx_dir = DMA_BIDIRECTIONAL; 3633 bp->rx_skb_func = bnxt_rx_page_skb; 3634 /* Disable LRO or GRO_HW */ 3635 netdev_update_features(bp->dev); 3636 } else { 3637 bp->dev->max_mtu = bp->max_mtu; 3638 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE; 3639 bp->rx_dir = DMA_FROM_DEVICE; 3640 bp->rx_skb_func = bnxt_rx_skb; 3641 } 3642 return 0; 3643 } 3644 3645 static void bnxt_free_vnic_attributes(struct bnxt *bp) 3646 { 3647 int i; 3648 struct bnxt_vnic_info *vnic; 3649 struct pci_dev *pdev = bp->pdev; 3650 3651 if (!bp->vnic_info) 3652 return; 3653 3654 for (i = 0; i < bp->nr_vnics; i++) { 3655 vnic = &bp->vnic_info[i]; 3656 3657 kfree(vnic->fw_grp_ids); 3658 vnic->fw_grp_ids = NULL; 3659 3660 kfree(vnic->uc_list); 3661 vnic->uc_list = NULL; 3662 3663 if (vnic->mc_list) { 3664 dma_free_coherent(&pdev->dev, vnic->mc_list_size, 3665 vnic->mc_list, vnic->mc_list_mapping); 3666 vnic->mc_list = NULL; 3667 } 3668 3669 if (vnic->rss_table) { 3670 dma_free_coherent(&pdev->dev, vnic->rss_table_size, 3671 vnic->rss_table, 3672 vnic->rss_table_dma_addr); 3673 vnic->rss_table = NULL; 3674 } 3675 3676 vnic->rss_hash_key = NULL; 3677 vnic->flags = 0; 3678 } 3679 } 3680 3681 static int bnxt_alloc_vnic_attributes(struct bnxt *bp) 3682 { 3683 int i, rc = 0, size; 3684 struct bnxt_vnic_info *vnic; 3685 struct pci_dev *pdev = bp->pdev; 3686 int max_rings; 3687 3688 for (i = 0; i < bp->nr_vnics; i++) { 3689 vnic = &bp->vnic_info[i]; 3690 3691 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) { 3692 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN; 3693 3694 if (mem_size > 0) { 3695 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL); 3696 if (!vnic->uc_list) { 3697 rc = -ENOMEM; 3698 goto out; 3699 } 3700 } 3701 } 3702 3703 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) { 3704 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN; 3705 vnic->mc_list = 3706 dma_alloc_coherent(&pdev->dev, 3707 vnic->mc_list_size, 3708 &vnic->mc_list_mapping, 3709 GFP_KERNEL); 3710 if (!vnic->mc_list) { 3711 rc = -ENOMEM; 3712 goto out; 3713 } 3714 } 3715 3716 if (bp->flags & BNXT_FLAG_CHIP_P5) 3717 goto vnic_skip_grps; 3718 3719 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 3720 max_rings = bp->rx_nr_rings; 3721 else 3722 max_rings = 1; 3723 3724 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL); 3725 if (!vnic->fw_grp_ids) { 3726 rc = -ENOMEM; 3727 goto out; 3728 } 3729 vnic_skip_grps: 3730 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) && 3731 !(vnic->flags & BNXT_VNIC_RSS_FLAG)) 3732 continue; 3733 3734 /* Allocate rss table and hash key */ 3735 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16)); 3736 if (bp->flags & BNXT_FLAG_CHIP_P5) 3737 size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5); 3738 3739 vnic->rss_table_size = size + HW_HASH_KEY_SIZE; 3740 vnic->rss_table = dma_alloc_coherent(&pdev->dev, 3741 vnic->rss_table_size, 3742 &vnic->rss_table_dma_addr, 3743 GFP_KERNEL); 3744 if (!vnic->rss_table) { 3745 rc = -ENOMEM; 3746 goto out; 3747 } 3748 3749 vnic->rss_hash_key = ((void *)vnic->rss_table) + size; 3750 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size; 3751 } 3752 return 0; 3753 3754 out: 3755 return rc; 3756 } 3757 3758 static void bnxt_free_hwrm_resources(struct bnxt *bp) 3759 { 3760 struct pci_dev *pdev = bp->pdev; 3761 3762 if (bp->hwrm_cmd_resp_addr) { 3763 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr, 3764 bp->hwrm_cmd_resp_dma_addr); 3765 bp->hwrm_cmd_resp_addr = NULL; 3766 } 3767 3768 if (bp->hwrm_cmd_kong_resp_addr) { 3769 dma_free_coherent(&pdev->dev, PAGE_SIZE, 3770 bp->hwrm_cmd_kong_resp_addr, 3771 bp->hwrm_cmd_kong_resp_dma_addr); 3772 bp->hwrm_cmd_kong_resp_addr = NULL; 3773 } 3774 } 3775 3776 static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp) 3777 { 3778 struct pci_dev *pdev = bp->pdev; 3779 3780 if (bp->hwrm_cmd_kong_resp_addr) 3781 return 0; 3782 3783 bp->hwrm_cmd_kong_resp_addr = 3784 dma_alloc_coherent(&pdev->dev, PAGE_SIZE, 3785 &bp->hwrm_cmd_kong_resp_dma_addr, 3786 GFP_KERNEL); 3787 if (!bp->hwrm_cmd_kong_resp_addr) 3788 return -ENOMEM; 3789 3790 return 0; 3791 } 3792 3793 static int bnxt_alloc_hwrm_resources(struct bnxt *bp) 3794 { 3795 struct pci_dev *pdev = bp->pdev; 3796 3797 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, 3798 &bp->hwrm_cmd_resp_dma_addr, 3799 GFP_KERNEL); 3800 if (!bp->hwrm_cmd_resp_addr) 3801 return -ENOMEM; 3802 3803 return 0; 3804 } 3805 3806 static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp) 3807 { 3808 if (bp->hwrm_short_cmd_req_addr) { 3809 struct pci_dev *pdev = bp->pdev; 3810 3811 dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len, 3812 bp->hwrm_short_cmd_req_addr, 3813 bp->hwrm_short_cmd_req_dma_addr); 3814 bp->hwrm_short_cmd_req_addr = NULL; 3815 } 3816 } 3817 3818 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp) 3819 { 3820 struct pci_dev *pdev = bp->pdev; 3821 3822 if (bp->hwrm_short_cmd_req_addr) 3823 return 0; 3824 3825 bp->hwrm_short_cmd_req_addr = 3826 dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len, 3827 &bp->hwrm_short_cmd_req_dma_addr, 3828 GFP_KERNEL); 3829 if (!bp->hwrm_short_cmd_req_addr) 3830 return -ENOMEM; 3831 3832 return 0; 3833 } 3834 3835 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats) 3836 { 3837 kfree(stats->hw_masks); 3838 stats->hw_masks = NULL; 3839 kfree(stats->sw_stats); 3840 stats->sw_stats = NULL; 3841 if (stats->hw_stats) { 3842 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats, 3843 stats->hw_stats_map); 3844 stats->hw_stats = NULL; 3845 } 3846 } 3847 3848 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats, 3849 bool alloc_masks) 3850 { 3851 stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len, 3852 &stats->hw_stats_map, GFP_KERNEL); 3853 if (!stats->hw_stats) 3854 return -ENOMEM; 3855 3856 stats->sw_stats = kzalloc(stats->len, GFP_KERNEL); 3857 if (!stats->sw_stats) 3858 goto stats_mem_err; 3859 3860 if (alloc_masks) { 3861 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL); 3862 if (!stats->hw_masks) 3863 goto stats_mem_err; 3864 } 3865 return 0; 3866 3867 stats_mem_err: 3868 bnxt_free_stats_mem(bp, stats); 3869 return -ENOMEM; 3870 } 3871 3872 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count) 3873 { 3874 int i; 3875 3876 for (i = 0; i < count; i++) 3877 mask_arr[i] = mask; 3878 } 3879 3880 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count) 3881 { 3882 int i; 3883 3884 for (i = 0; i < count; i++) 3885 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]); 3886 } 3887 3888 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp, 3889 struct bnxt_stats_mem *stats) 3890 { 3891 struct hwrm_func_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr; 3892 struct hwrm_func_qstats_ext_input req = {0}; 3893 __le64 *hw_masks; 3894 int rc; 3895 3896 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) || 3897 !(bp->flags & BNXT_FLAG_CHIP_P5)) 3898 return -EOPNOTSUPP; 3899 3900 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QSTATS_EXT, -1, -1); 3901 req.fid = cpu_to_le16(0xffff); 3902 req.flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; 3903 mutex_lock(&bp->hwrm_cmd_lock); 3904 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3905 if (rc) 3906 goto qstat_exit; 3907 3908 hw_masks = &resp->rx_ucast_pkts; 3909 bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8); 3910 3911 qstat_exit: 3912 mutex_unlock(&bp->hwrm_cmd_lock); 3913 return rc; 3914 } 3915 3916 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags); 3917 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags); 3918 3919 static void bnxt_init_stats(struct bnxt *bp) 3920 { 3921 struct bnxt_napi *bnapi = bp->bnapi[0]; 3922 struct bnxt_cp_ring_info *cpr; 3923 struct bnxt_stats_mem *stats; 3924 __le64 *rx_stats, *tx_stats; 3925 int rc, rx_count, tx_count; 3926 u64 *rx_masks, *tx_masks; 3927 u64 mask; 3928 u8 flags; 3929 3930 cpr = &bnapi->cp_ring; 3931 stats = &cpr->stats; 3932 rc = bnxt_hwrm_func_qstat_ext(bp, stats); 3933 if (rc) { 3934 if (bp->flags & BNXT_FLAG_CHIP_P5) 3935 mask = (1ULL << 48) - 1; 3936 else 3937 mask = -1ULL; 3938 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8); 3939 } 3940 if (bp->flags & BNXT_FLAG_PORT_STATS) { 3941 stats = &bp->port_stats; 3942 rx_stats = stats->hw_stats; 3943 rx_masks = stats->hw_masks; 3944 rx_count = sizeof(struct rx_port_stats) / 8; 3945 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 3946 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 3947 tx_count = sizeof(struct tx_port_stats) / 8; 3948 3949 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK; 3950 rc = bnxt_hwrm_port_qstats(bp, flags); 3951 if (rc) { 3952 mask = (1ULL << 40) - 1; 3953 3954 bnxt_fill_masks(rx_masks, mask, rx_count); 3955 bnxt_fill_masks(tx_masks, mask, tx_count); 3956 } else { 3957 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count); 3958 bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count); 3959 bnxt_hwrm_port_qstats(bp, 0); 3960 } 3961 } 3962 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 3963 stats = &bp->rx_port_stats_ext; 3964 rx_stats = stats->hw_stats; 3965 rx_masks = stats->hw_masks; 3966 rx_count = sizeof(struct rx_port_stats_ext) / 8; 3967 stats = &bp->tx_port_stats_ext; 3968 tx_stats = stats->hw_stats; 3969 tx_masks = stats->hw_masks; 3970 tx_count = sizeof(struct tx_port_stats_ext) / 8; 3971 3972 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; 3973 rc = bnxt_hwrm_port_qstats_ext(bp, flags); 3974 if (rc) { 3975 mask = (1ULL << 40) - 1; 3976 3977 bnxt_fill_masks(rx_masks, mask, rx_count); 3978 if (tx_stats) 3979 bnxt_fill_masks(tx_masks, mask, tx_count); 3980 } else { 3981 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count); 3982 if (tx_stats) 3983 bnxt_copy_hw_masks(tx_masks, tx_stats, 3984 tx_count); 3985 bnxt_hwrm_port_qstats_ext(bp, 0); 3986 } 3987 } 3988 } 3989 3990 static void bnxt_free_port_stats(struct bnxt *bp) 3991 { 3992 bp->flags &= ~BNXT_FLAG_PORT_STATS; 3993 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT; 3994 3995 bnxt_free_stats_mem(bp, &bp->port_stats); 3996 bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext); 3997 bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext); 3998 } 3999 4000 static void bnxt_free_ring_stats(struct bnxt *bp) 4001 { 4002 int i; 4003 4004 if (!bp->bnapi) 4005 return; 4006 4007 for (i = 0; i < bp->cp_nr_rings; i++) { 4008 struct bnxt_napi *bnapi = bp->bnapi[i]; 4009 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4010 4011 bnxt_free_stats_mem(bp, &cpr->stats); 4012 } 4013 } 4014 4015 static int bnxt_alloc_stats(struct bnxt *bp) 4016 { 4017 u32 size, i; 4018 int rc; 4019 4020 size = bp->hw_ring_stats_size; 4021 4022 for (i = 0; i < bp->cp_nr_rings; i++) { 4023 struct bnxt_napi *bnapi = bp->bnapi[i]; 4024 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4025 4026 cpr->stats.len = size; 4027 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i); 4028 if (rc) 4029 return rc; 4030 4031 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; 4032 } 4033 4034 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700) 4035 return 0; 4036 4037 if (bp->port_stats.hw_stats) 4038 goto alloc_ext_stats; 4039 4040 bp->port_stats.len = BNXT_PORT_STATS_SIZE; 4041 rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true); 4042 if (rc) 4043 return rc; 4044 4045 bp->flags |= BNXT_FLAG_PORT_STATS; 4046 4047 alloc_ext_stats: 4048 /* Display extended statistics only if FW supports it */ 4049 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900) 4050 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) 4051 return 0; 4052 4053 if (bp->rx_port_stats_ext.hw_stats) 4054 goto alloc_tx_ext_stats; 4055 4056 bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext); 4057 rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true); 4058 /* Extended stats are optional */ 4059 if (rc) 4060 return 0; 4061 4062 alloc_tx_ext_stats: 4063 if (bp->tx_port_stats_ext.hw_stats) 4064 return 0; 4065 4066 if (bp->hwrm_spec_code >= 0x10902 || 4067 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) { 4068 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext); 4069 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true); 4070 /* Extended stats are optional */ 4071 if (rc) 4072 return 0; 4073 } 4074 bp->flags |= BNXT_FLAG_PORT_STATS_EXT; 4075 return 0; 4076 } 4077 4078 static void bnxt_clear_ring_indices(struct bnxt *bp) 4079 { 4080 int i; 4081 4082 if (!bp->bnapi) 4083 return; 4084 4085 for (i = 0; i < bp->cp_nr_rings; i++) { 4086 struct bnxt_napi *bnapi = bp->bnapi[i]; 4087 struct bnxt_cp_ring_info *cpr; 4088 struct bnxt_rx_ring_info *rxr; 4089 struct bnxt_tx_ring_info *txr; 4090 4091 if (!bnapi) 4092 continue; 4093 4094 cpr = &bnapi->cp_ring; 4095 cpr->cp_raw_cons = 0; 4096 4097 txr = bnapi->tx_ring; 4098 if (txr) { 4099 txr->tx_prod = 0; 4100 txr->tx_cons = 0; 4101 } 4102 4103 rxr = bnapi->rx_ring; 4104 if (rxr) { 4105 rxr->rx_prod = 0; 4106 rxr->rx_agg_prod = 0; 4107 rxr->rx_sw_agg_prod = 0; 4108 rxr->rx_next_cons = 0; 4109 } 4110 } 4111 } 4112 4113 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit) 4114 { 4115 #ifdef CONFIG_RFS_ACCEL 4116 int i; 4117 4118 /* Under rtnl_lock and all our NAPIs have been disabled. It's 4119 * safe to delete the hash table. 4120 */ 4121 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 4122 struct hlist_head *head; 4123 struct hlist_node *tmp; 4124 struct bnxt_ntuple_filter *fltr; 4125 4126 head = &bp->ntp_fltr_hash_tbl[i]; 4127 hlist_for_each_entry_safe(fltr, tmp, head, hash) { 4128 hlist_del(&fltr->hash); 4129 kfree(fltr); 4130 } 4131 } 4132 if (irq_reinit) { 4133 kfree(bp->ntp_fltr_bmap); 4134 bp->ntp_fltr_bmap = NULL; 4135 } 4136 bp->ntp_fltr_count = 0; 4137 #endif 4138 } 4139 4140 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp) 4141 { 4142 #ifdef CONFIG_RFS_ACCEL 4143 int i, rc = 0; 4144 4145 if (!(bp->flags & BNXT_FLAG_RFS)) 4146 return 0; 4147 4148 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) 4149 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]); 4150 4151 bp->ntp_fltr_count = 0; 4152 bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR), 4153 sizeof(long), 4154 GFP_KERNEL); 4155 4156 if (!bp->ntp_fltr_bmap) 4157 rc = -ENOMEM; 4158 4159 return rc; 4160 #else 4161 return 0; 4162 #endif 4163 } 4164 4165 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) 4166 { 4167 bnxt_free_vnic_attributes(bp); 4168 bnxt_free_tx_rings(bp); 4169 bnxt_free_rx_rings(bp); 4170 bnxt_free_cp_rings(bp); 4171 bnxt_free_ntp_fltrs(bp, irq_re_init); 4172 if (irq_re_init) { 4173 bnxt_free_ring_stats(bp); 4174 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) || 4175 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 4176 bnxt_free_port_stats(bp); 4177 bnxt_free_ring_grps(bp); 4178 bnxt_free_vnics(bp); 4179 kfree(bp->tx_ring_map); 4180 bp->tx_ring_map = NULL; 4181 kfree(bp->tx_ring); 4182 bp->tx_ring = NULL; 4183 kfree(bp->rx_ring); 4184 bp->rx_ring = NULL; 4185 kfree(bp->bnapi); 4186 bp->bnapi = NULL; 4187 } else { 4188 bnxt_clear_ring_indices(bp); 4189 } 4190 } 4191 4192 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) 4193 { 4194 int i, j, rc, size, arr_size; 4195 void *bnapi; 4196 4197 if (irq_re_init) { 4198 /* Allocate bnapi mem pointer array and mem block for 4199 * all queues 4200 */ 4201 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) * 4202 bp->cp_nr_rings); 4203 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi)); 4204 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL); 4205 if (!bnapi) 4206 return -ENOMEM; 4207 4208 bp->bnapi = bnapi; 4209 bnapi += arr_size; 4210 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) { 4211 bp->bnapi[i] = bnapi; 4212 bp->bnapi[i]->index = i; 4213 bp->bnapi[i]->bp = bp; 4214 if (bp->flags & BNXT_FLAG_CHIP_P5) { 4215 struct bnxt_cp_ring_info *cpr = 4216 &bp->bnapi[i]->cp_ring; 4217 4218 cpr->cp_ring_struct.ring_mem.flags = 4219 BNXT_RMEM_RING_PTE_FLAG; 4220 } 4221 } 4222 4223 bp->rx_ring = kcalloc(bp->rx_nr_rings, 4224 sizeof(struct bnxt_rx_ring_info), 4225 GFP_KERNEL); 4226 if (!bp->rx_ring) 4227 return -ENOMEM; 4228 4229 for (i = 0; i < bp->rx_nr_rings; i++) { 4230 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 4231 4232 if (bp->flags & BNXT_FLAG_CHIP_P5) { 4233 rxr->rx_ring_struct.ring_mem.flags = 4234 BNXT_RMEM_RING_PTE_FLAG; 4235 rxr->rx_agg_ring_struct.ring_mem.flags = 4236 BNXT_RMEM_RING_PTE_FLAG; 4237 } 4238 rxr->bnapi = bp->bnapi[i]; 4239 bp->bnapi[i]->rx_ring = &bp->rx_ring[i]; 4240 } 4241 4242 bp->tx_ring = kcalloc(bp->tx_nr_rings, 4243 sizeof(struct bnxt_tx_ring_info), 4244 GFP_KERNEL); 4245 if (!bp->tx_ring) 4246 return -ENOMEM; 4247 4248 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16), 4249 GFP_KERNEL); 4250 4251 if (!bp->tx_ring_map) 4252 return -ENOMEM; 4253 4254 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 4255 j = 0; 4256 else 4257 j = bp->rx_nr_rings; 4258 4259 for (i = 0; i < bp->tx_nr_rings; i++, j++) { 4260 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 4261 4262 if (bp->flags & BNXT_FLAG_CHIP_P5) 4263 txr->tx_ring_struct.ring_mem.flags = 4264 BNXT_RMEM_RING_PTE_FLAG; 4265 txr->bnapi = bp->bnapi[j]; 4266 bp->bnapi[j]->tx_ring = txr; 4267 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i; 4268 if (i >= bp->tx_nr_rings_xdp) { 4269 txr->txq_index = i - bp->tx_nr_rings_xdp; 4270 bp->bnapi[j]->tx_int = bnxt_tx_int; 4271 } else { 4272 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP; 4273 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp; 4274 } 4275 } 4276 4277 rc = bnxt_alloc_stats(bp); 4278 if (rc) 4279 goto alloc_mem_err; 4280 bnxt_init_stats(bp); 4281 4282 rc = bnxt_alloc_ntp_fltrs(bp); 4283 if (rc) 4284 goto alloc_mem_err; 4285 4286 rc = bnxt_alloc_vnics(bp); 4287 if (rc) 4288 goto alloc_mem_err; 4289 } 4290 4291 bnxt_init_ring_struct(bp); 4292 4293 rc = bnxt_alloc_rx_rings(bp); 4294 if (rc) 4295 goto alloc_mem_err; 4296 4297 rc = bnxt_alloc_tx_rings(bp); 4298 if (rc) 4299 goto alloc_mem_err; 4300 4301 rc = bnxt_alloc_cp_rings(bp); 4302 if (rc) 4303 goto alloc_mem_err; 4304 4305 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG | 4306 BNXT_VNIC_UCAST_FLAG; 4307 rc = bnxt_alloc_vnic_attributes(bp); 4308 if (rc) 4309 goto alloc_mem_err; 4310 return 0; 4311 4312 alloc_mem_err: 4313 bnxt_free_mem(bp, true); 4314 return rc; 4315 } 4316 4317 static void bnxt_disable_int(struct bnxt *bp) 4318 { 4319 int i; 4320 4321 if (!bp->bnapi) 4322 return; 4323 4324 for (i = 0; i < bp->cp_nr_rings; i++) { 4325 struct bnxt_napi *bnapi = bp->bnapi[i]; 4326 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4327 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 4328 4329 if (ring->fw_ring_id != INVALID_HW_RING_ID) 4330 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); 4331 } 4332 } 4333 4334 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n) 4335 { 4336 struct bnxt_napi *bnapi = bp->bnapi[n]; 4337 struct bnxt_cp_ring_info *cpr; 4338 4339 cpr = &bnapi->cp_ring; 4340 return cpr->cp_ring_struct.map_idx; 4341 } 4342 4343 static void bnxt_disable_int_sync(struct bnxt *bp) 4344 { 4345 int i; 4346 4347 if (!bp->irq_tbl) 4348 return; 4349 4350 atomic_inc(&bp->intr_sem); 4351 4352 bnxt_disable_int(bp); 4353 for (i = 0; i < bp->cp_nr_rings; i++) { 4354 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 4355 4356 synchronize_irq(bp->irq_tbl[map_idx].vector); 4357 } 4358 } 4359 4360 static void bnxt_enable_int(struct bnxt *bp) 4361 { 4362 int i; 4363 4364 atomic_set(&bp->intr_sem, 0); 4365 for (i = 0; i < bp->cp_nr_rings; i++) { 4366 struct bnxt_napi *bnapi = bp->bnapi[i]; 4367 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4368 4369 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons); 4370 } 4371 } 4372 4373 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type, 4374 u16 cmpl_ring, u16 target_id) 4375 { 4376 struct input *req = request; 4377 4378 req->req_type = cpu_to_le16(req_type); 4379 req->cmpl_ring = cpu_to_le16(cmpl_ring); 4380 req->target_id = cpu_to_le16(target_id); 4381 if (bnxt_kong_hwrm_message(bp, req)) 4382 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr); 4383 else 4384 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr); 4385 } 4386 4387 static int bnxt_hwrm_to_stderr(u32 hwrm_err) 4388 { 4389 switch (hwrm_err) { 4390 case HWRM_ERR_CODE_SUCCESS: 4391 return 0; 4392 case HWRM_ERR_CODE_RESOURCE_LOCKED: 4393 return -EROFS; 4394 case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED: 4395 return -EACCES; 4396 case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR: 4397 return -ENOSPC; 4398 case HWRM_ERR_CODE_INVALID_PARAMS: 4399 case HWRM_ERR_CODE_INVALID_FLAGS: 4400 case HWRM_ERR_CODE_INVALID_ENABLES: 4401 case HWRM_ERR_CODE_UNSUPPORTED_TLV: 4402 case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR: 4403 return -EINVAL; 4404 case HWRM_ERR_CODE_NO_BUFFER: 4405 return -ENOMEM; 4406 case HWRM_ERR_CODE_HOT_RESET_PROGRESS: 4407 case HWRM_ERR_CODE_BUSY: 4408 return -EAGAIN; 4409 case HWRM_ERR_CODE_CMD_NOT_SUPPORTED: 4410 return -EOPNOTSUPP; 4411 default: 4412 return -EIO; 4413 } 4414 } 4415 4416 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, 4417 int timeout, bool silent) 4418 { 4419 int i, intr_process, rc, tmo_count; 4420 struct input *req = msg; 4421 u32 *data = msg; 4422 u8 *valid; 4423 u16 cp_ring_id, len = 0; 4424 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; 4425 u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN; 4426 struct hwrm_short_input short_input = {0}; 4427 u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER; 4428 u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM; 4429 u16 dst = BNXT_HWRM_CHNL_CHIMP; 4430 4431 if (BNXT_NO_FW_ACCESS(bp) && 4432 le16_to_cpu(req->req_type) != HWRM_FUNC_RESET) 4433 return -EBUSY; 4434 4435 if (msg_len > BNXT_HWRM_MAX_REQ_LEN) { 4436 if (msg_len > bp->hwrm_max_ext_req_len || 4437 !bp->hwrm_short_cmd_req_addr) 4438 return -EINVAL; 4439 } 4440 4441 if (bnxt_hwrm_kong_chnl(bp, req)) { 4442 dst = BNXT_HWRM_CHNL_KONG; 4443 bar_offset = BNXT_GRCPF_REG_KONG_COMM; 4444 doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER; 4445 resp = bp->hwrm_cmd_kong_resp_addr; 4446 } 4447 4448 memset(resp, 0, PAGE_SIZE); 4449 cp_ring_id = le16_to_cpu(req->cmpl_ring); 4450 intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1; 4451 4452 req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst)); 4453 /* currently supports only one outstanding message */ 4454 if (intr_process) 4455 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id); 4456 4457 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || 4458 msg_len > BNXT_HWRM_MAX_REQ_LEN) { 4459 void *short_cmd_req = bp->hwrm_short_cmd_req_addr; 4460 u16 max_msg_len; 4461 4462 /* Set boundary for maximum extended request length for short 4463 * cmd format. If passed up from device use the max supported 4464 * internal req length. 4465 */ 4466 max_msg_len = bp->hwrm_max_ext_req_len; 4467 4468 memcpy(short_cmd_req, req, msg_len); 4469 if (msg_len < max_msg_len) 4470 memset(short_cmd_req + msg_len, 0, 4471 max_msg_len - msg_len); 4472 4473 short_input.req_type = req->req_type; 4474 short_input.signature = 4475 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD); 4476 short_input.size = cpu_to_le16(msg_len); 4477 short_input.req_addr = 4478 cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr); 4479 4480 data = (u32 *)&short_input; 4481 msg_len = sizeof(short_input); 4482 4483 /* Sync memory write before updating doorbell */ 4484 wmb(); 4485 4486 max_req_len = BNXT_HWRM_SHORT_REQ_LEN; 4487 } 4488 4489 /* Write request msg to hwrm channel */ 4490 __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4); 4491 4492 for (i = msg_len; i < max_req_len; i += 4) 4493 writel(0, bp->bar0 + bar_offset + i); 4494 4495 /* Ring channel doorbell */ 4496 writel(1, bp->bar0 + doorbell_offset); 4497 4498 if (!pci_is_enabled(bp->pdev)) 4499 return -ENODEV; 4500 4501 if (!timeout) 4502 timeout = DFLT_HWRM_CMD_TIMEOUT; 4503 /* Limit timeout to an upper limit */ 4504 timeout = min(timeout, HWRM_CMD_MAX_TIMEOUT); 4505 /* convert timeout to usec */ 4506 timeout *= 1000; 4507 4508 i = 0; 4509 /* Short timeout for the first few iterations: 4510 * number of loops = number of loops for short timeout + 4511 * number of loops for standard timeout. 4512 */ 4513 tmo_count = HWRM_SHORT_TIMEOUT_COUNTER; 4514 timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER; 4515 tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT); 4516 4517 if (intr_process) { 4518 u16 seq_id = bp->hwrm_intr_seq_id; 4519 4520 /* Wait until hwrm response cmpl interrupt is processed */ 4521 while (bp->hwrm_intr_seq_id != (u16)~seq_id && 4522 i++ < tmo_count) { 4523 /* Abort the wait for completion if the FW health 4524 * check has failed. 4525 */ 4526 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 4527 return -EBUSY; 4528 /* on first few passes, just barely sleep */ 4529 if (i < HWRM_SHORT_TIMEOUT_COUNTER) { 4530 usleep_range(HWRM_SHORT_MIN_TIMEOUT, 4531 HWRM_SHORT_MAX_TIMEOUT); 4532 } else { 4533 if (HWRM_WAIT_MUST_ABORT(bp, req)) 4534 break; 4535 usleep_range(HWRM_MIN_TIMEOUT, 4536 HWRM_MAX_TIMEOUT); 4537 } 4538 } 4539 4540 if (bp->hwrm_intr_seq_id != (u16)~seq_id) { 4541 if (!silent) 4542 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n", 4543 le16_to_cpu(req->req_type)); 4544 return -EBUSY; 4545 } 4546 len = le16_to_cpu(resp->resp_len); 4547 valid = ((u8 *)resp) + len - 1; 4548 } else { 4549 int j; 4550 4551 /* Check if response len is updated */ 4552 for (i = 0; i < tmo_count; i++) { 4553 /* Abort the wait for completion if the FW health 4554 * check has failed. 4555 */ 4556 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 4557 return -EBUSY; 4558 len = le16_to_cpu(resp->resp_len); 4559 if (len) 4560 break; 4561 /* on first few passes, just barely sleep */ 4562 if (i < HWRM_SHORT_TIMEOUT_COUNTER) { 4563 usleep_range(HWRM_SHORT_MIN_TIMEOUT, 4564 HWRM_SHORT_MAX_TIMEOUT); 4565 } else { 4566 if (HWRM_WAIT_MUST_ABORT(bp, req)) 4567 goto timeout_abort; 4568 usleep_range(HWRM_MIN_TIMEOUT, 4569 HWRM_MAX_TIMEOUT); 4570 } 4571 } 4572 4573 if (i >= tmo_count) { 4574 timeout_abort: 4575 if (!silent) 4576 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n", 4577 HWRM_TOTAL_TIMEOUT(i), 4578 le16_to_cpu(req->req_type), 4579 le16_to_cpu(req->seq_id), len); 4580 return -EBUSY; 4581 } 4582 4583 /* Last byte of resp contains valid bit */ 4584 valid = ((u8 *)resp) + len - 1; 4585 for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) { 4586 /* make sure we read from updated DMA memory */ 4587 dma_rmb(); 4588 if (*valid) 4589 break; 4590 usleep_range(1, 5); 4591 } 4592 4593 if (j >= HWRM_VALID_BIT_DELAY_USEC) { 4594 if (!silent) 4595 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n", 4596 HWRM_TOTAL_TIMEOUT(i), 4597 le16_to_cpu(req->req_type), 4598 le16_to_cpu(req->seq_id), len, 4599 *valid); 4600 return -EBUSY; 4601 } 4602 } 4603 4604 /* Zero valid bit for compatibility. Valid bit in an older spec 4605 * may become a new field in a newer spec. We must make sure that 4606 * a new field not implemented by old spec will read zero. 4607 */ 4608 *valid = 0; 4609 rc = le16_to_cpu(resp->error_code); 4610 if (rc && !silent) 4611 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n", 4612 le16_to_cpu(resp->req_type), 4613 le16_to_cpu(resp->seq_id), rc); 4614 return bnxt_hwrm_to_stderr(rc); 4615 } 4616 4617 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) 4618 { 4619 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false); 4620 } 4621 4622 int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len, 4623 int timeout) 4624 { 4625 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true); 4626 } 4627 4628 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) 4629 { 4630 int rc; 4631 4632 mutex_lock(&bp->hwrm_cmd_lock); 4633 rc = _hwrm_send_message(bp, msg, msg_len, timeout); 4634 mutex_unlock(&bp->hwrm_cmd_lock); 4635 return rc; 4636 } 4637 4638 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len, 4639 int timeout) 4640 { 4641 int rc; 4642 4643 mutex_lock(&bp->hwrm_cmd_lock); 4644 rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true); 4645 mutex_unlock(&bp->hwrm_cmd_lock); 4646 return rc; 4647 } 4648 4649 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, 4650 bool async_only) 4651 { 4652 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr; 4653 struct hwrm_func_drv_rgtr_input req = {0}; 4654 DECLARE_BITMAP(async_events_bmap, 256); 4655 u32 *events = (u32 *)async_events_bmap; 4656 u32 flags; 4657 int rc, i; 4658 4659 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1); 4660 4661 req.enables = 4662 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | 4663 FUNC_DRV_RGTR_REQ_ENABLES_VER | 4664 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); 4665 4666 req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); 4667 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE; 4668 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) 4669 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT; 4670 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) 4671 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT | 4672 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT; 4673 req.flags = cpu_to_le32(flags); 4674 req.ver_maj_8b = DRV_VER_MAJ; 4675 req.ver_min_8b = DRV_VER_MIN; 4676 req.ver_upd_8b = DRV_VER_UPD; 4677 req.ver_maj = cpu_to_le16(DRV_VER_MAJ); 4678 req.ver_min = cpu_to_le16(DRV_VER_MIN); 4679 req.ver_upd = cpu_to_le16(DRV_VER_UPD); 4680 4681 if (BNXT_PF(bp)) { 4682 u32 data[8]; 4683 int i; 4684 4685 memset(data, 0, sizeof(data)); 4686 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) { 4687 u16 cmd = bnxt_vf_req_snif[i]; 4688 unsigned int bit, idx; 4689 4690 idx = cmd / 32; 4691 bit = cmd % 32; 4692 data[idx] |= 1 << bit; 4693 } 4694 4695 for (i = 0; i < 8; i++) 4696 req.vf_req_fwd[i] = cpu_to_le32(data[i]); 4697 4698 req.enables |= 4699 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); 4700 } 4701 4702 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) 4703 req.flags |= cpu_to_le32( 4704 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE); 4705 4706 memset(async_events_bmap, 0, sizeof(async_events_bmap)); 4707 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) { 4708 u16 event_id = bnxt_async_events_arr[i]; 4709 4710 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY && 4711 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 4712 continue; 4713 __set_bit(bnxt_async_events_arr[i], async_events_bmap); 4714 } 4715 if (bmap && bmap_size) { 4716 for (i = 0; i < bmap_size; i++) { 4717 if (test_bit(i, bmap)) 4718 __set_bit(i, async_events_bmap); 4719 } 4720 } 4721 for (i = 0; i < 8; i++) 4722 req.async_event_fwd[i] |= cpu_to_le32(events[i]); 4723 4724 if (async_only) 4725 req.enables = 4726 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); 4727 4728 mutex_lock(&bp->hwrm_cmd_lock); 4729 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4730 if (!rc) { 4731 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state); 4732 if (resp->flags & 4733 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED)) 4734 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE; 4735 } 4736 mutex_unlock(&bp->hwrm_cmd_lock); 4737 return rc; 4738 } 4739 4740 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp) 4741 { 4742 struct hwrm_func_drv_unrgtr_input req = {0}; 4743 4744 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state)) 4745 return 0; 4746 4747 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1); 4748 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4749 } 4750 4751 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type) 4752 { 4753 u32 rc = 0; 4754 struct hwrm_tunnel_dst_port_free_input req = {0}; 4755 4756 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1); 4757 req.tunnel_type = tunnel_type; 4758 4759 switch (tunnel_type) { 4760 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN: 4761 req.tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id); 4762 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID; 4763 break; 4764 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE: 4765 req.tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id); 4766 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID; 4767 break; 4768 default: 4769 break; 4770 } 4771 4772 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4773 if (rc) 4774 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n", 4775 rc); 4776 return rc; 4777 } 4778 4779 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port, 4780 u8 tunnel_type) 4781 { 4782 u32 rc = 0; 4783 struct hwrm_tunnel_dst_port_alloc_input req = {0}; 4784 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr; 4785 4786 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1); 4787 4788 req.tunnel_type = tunnel_type; 4789 req.tunnel_dst_port_val = port; 4790 4791 mutex_lock(&bp->hwrm_cmd_lock); 4792 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4793 if (rc) { 4794 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n", 4795 rc); 4796 goto err_out; 4797 } 4798 4799 switch (tunnel_type) { 4800 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN: 4801 bp->vxlan_fw_dst_port_id = 4802 le16_to_cpu(resp->tunnel_dst_port_id); 4803 break; 4804 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE: 4805 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id); 4806 break; 4807 default: 4808 break; 4809 } 4810 4811 err_out: 4812 mutex_unlock(&bp->hwrm_cmd_lock); 4813 return rc; 4814 } 4815 4816 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id) 4817 { 4818 struct hwrm_cfa_l2_set_rx_mask_input req = {0}; 4819 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 4820 4821 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1); 4822 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id); 4823 4824 req.num_mc_entries = cpu_to_le32(vnic->mc_list_count); 4825 req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); 4826 req.mask = cpu_to_le32(vnic->rx_mask); 4827 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4828 } 4829 4830 #ifdef CONFIG_RFS_ACCEL 4831 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, 4832 struct bnxt_ntuple_filter *fltr) 4833 { 4834 struct hwrm_cfa_ntuple_filter_free_input req = {0}; 4835 4836 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1); 4837 req.ntuple_filter_id = fltr->filter_id; 4838 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4839 } 4840 4841 #define BNXT_NTP_FLTR_FLAGS \ 4842 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \ 4843 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \ 4844 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \ 4845 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \ 4846 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \ 4847 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \ 4848 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \ 4849 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \ 4850 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \ 4851 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \ 4852 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \ 4853 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \ 4854 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \ 4855 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID) 4856 4857 #define BNXT_NTP_TUNNEL_FLTR_FLAG \ 4858 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 4859 4860 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, 4861 struct bnxt_ntuple_filter *fltr) 4862 { 4863 struct hwrm_cfa_ntuple_filter_alloc_input req = {0}; 4864 struct hwrm_cfa_ntuple_filter_alloc_output *resp; 4865 struct flow_keys *keys = &fltr->fkeys; 4866 struct bnxt_vnic_info *vnic; 4867 u32 flags = 0; 4868 int rc = 0; 4869 4870 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1); 4871 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx]; 4872 4873 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) { 4874 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX; 4875 req.dst_id = cpu_to_le16(fltr->rxq); 4876 } else { 4877 vnic = &bp->vnic_info[fltr->rxq + 1]; 4878 req.dst_id = cpu_to_le16(vnic->fw_vnic_id); 4879 } 4880 req.flags = cpu_to_le32(flags); 4881 req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS); 4882 4883 req.ethertype = htons(ETH_P_IP); 4884 memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN); 4885 req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; 4886 req.ip_protocol = keys->basic.ip_proto; 4887 4888 if (keys->basic.n_proto == htons(ETH_P_IPV6)) { 4889 int i; 4890 4891 req.ethertype = htons(ETH_P_IPV6); 4892 req.ip_addr_type = 4893 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6; 4894 *(struct in6_addr *)&req.src_ipaddr[0] = 4895 keys->addrs.v6addrs.src; 4896 *(struct in6_addr *)&req.dst_ipaddr[0] = 4897 keys->addrs.v6addrs.dst; 4898 for (i = 0; i < 4; i++) { 4899 req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff); 4900 req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff); 4901 } 4902 } else { 4903 req.src_ipaddr[0] = keys->addrs.v4addrs.src; 4904 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff); 4905 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst; 4906 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff); 4907 } 4908 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) { 4909 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG); 4910 req.tunnel_type = 4911 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL; 4912 } 4913 4914 req.src_port = keys->ports.src; 4915 req.src_port_mask = cpu_to_be16(0xffff); 4916 req.dst_port = keys->ports.dst; 4917 req.dst_port_mask = cpu_to_be16(0xffff); 4918 4919 mutex_lock(&bp->hwrm_cmd_lock); 4920 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4921 if (!rc) { 4922 resp = bnxt_get_hwrm_resp_addr(bp, &req); 4923 fltr->filter_id = resp->ntuple_filter_id; 4924 } 4925 mutex_unlock(&bp->hwrm_cmd_lock); 4926 return rc; 4927 } 4928 #endif 4929 4930 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx, 4931 u8 *mac_addr) 4932 { 4933 u32 rc = 0; 4934 struct hwrm_cfa_l2_filter_alloc_input req = {0}; 4935 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; 4936 4937 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1); 4938 req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX); 4939 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) 4940 req.flags |= 4941 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST); 4942 req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id); 4943 req.enables = 4944 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR | 4945 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID | 4946 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK); 4947 memcpy(req.l2_addr, mac_addr, ETH_ALEN); 4948 req.l2_addr_mask[0] = 0xff; 4949 req.l2_addr_mask[1] = 0xff; 4950 req.l2_addr_mask[2] = 0xff; 4951 req.l2_addr_mask[3] = 0xff; 4952 req.l2_addr_mask[4] = 0xff; 4953 req.l2_addr_mask[5] = 0xff; 4954 4955 mutex_lock(&bp->hwrm_cmd_lock); 4956 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4957 if (!rc) 4958 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] = 4959 resp->l2_filter_id; 4960 mutex_unlock(&bp->hwrm_cmd_lock); 4961 return rc; 4962 } 4963 4964 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp) 4965 { 4966 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */ 4967 int rc = 0; 4968 4969 /* Any associated ntuple filters will also be cleared by firmware. */ 4970 mutex_lock(&bp->hwrm_cmd_lock); 4971 for (i = 0; i < num_of_vnics; i++) { 4972 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 4973 4974 for (j = 0; j < vnic->uc_filter_count; j++) { 4975 struct hwrm_cfa_l2_filter_free_input req = {0}; 4976 4977 bnxt_hwrm_cmd_hdr_init(bp, &req, 4978 HWRM_CFA_L2_FILTER_FREE, -1, -1); 4979 4980 req.l2_filter_id = vnic->fw_l2_filter_id[j]; 4981 4982 rc = _hwrm_send_message(bp, &req, sizeof(req), 4983 HWRM_CMD_TIMEOUT); 4984 } 4985 vnic->uc_filter_count = 0; 4986 } 4987 mutex_unlock(&bp->hwrm_cmd_lock); 4988 4989 return rc; 4990 } 4991 4992 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) 4993 { 4994 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 4995 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX; 4996 struct hwrm_vnic_tpa_cfg_input req = {0}; 4997 4998 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) 4999 return 0; 5000 5001 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1); 5002 5003 if (tpa_flags) { 5004 u16 mss = bp->dev->mtu - 40; 5005 u32 nsegs, n, segs = 0, flags; 5006 5007 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA | 5008 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA | 5009 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE | 5010 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN | 5011 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ; 5012 if (tpa_flags & BNXT_FLAG_GRO) 5013 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO; 5014 5015 req.flags = cpu_to_le32(flags); 5016 5017 req.enables = 5018 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS | 5019 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS | 5020 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN); 5021 5022 /* Number of segs are log2 units, and first packet is not 5023 * included as part of this units. 5024 */ 5025 if (mss <= BNXT_RX_PAGE_SIZE) { 5026 n = BNXT_RX_PAGE_SIZE / mss; 5027 nsegs = (MAX_SKB_FRAGS - 1) * n; 5028 } else { 5029 n = mss / BNXT_RX_PAGE_SIZE; 5030 if (mss & (BNXT_RX_PAGE_SIZE - 1)) 5031 n++; 5032 nsegs = (MAX_SKB_FRAGS - n) / n; 5033 } 5034 5035 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5036 segs = MAX_TPA_SEGS_P5; 5037 max_aggs = bp->max_tpa; 5038 } else { 5039 segs = ilog2(nsegs); 5040 } 5041 req.max_agg_segs = cpu_to_le16(segs); 5042 req.max_aggs = cpu_to_le16(max_aggs); 5043 5044 req.min_agg_len = cpu_to_le32(512); 5045 } 5046 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); 5047 5048 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5049 } 5050 5051 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring) 5052 { 5053 struct bnxt_ring_grp_info *grp_info; 5054 5055 grp_info = &bp->grp_info[ring->grp_idx]; 5056 return grp_info->cp_fw_ring_id; 5057 } 5058 5059 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 5060 { 5061 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5062 struct bnxt_napi *bnapi = rxr->bnapi; 5063 struct bnxt_cp_ring_info *cpr; 5064 5065 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL]; 5066 return cpr->cp_ring_struct.fw_ring_id; 5067 } else { 5068 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct); 5069 } 5070 } 5071 5072 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr) 5073 { 5074 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5075 struct bnxt_napi *bnapi = txr->bnapi; 5076 struct bnxt_cp_ring_info *cpr; 5077 5078 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL]; 5079 return cpr->cp_ring_struct.fw_ring_id; 5080 } else { 5081 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct); 5082 } 5083 } 5084 5085 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp) 5086 { 5087 int entries; 5088 5089 if (bp->flags & BNXT_FLAG_CHIP_P5) 5090 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5; 5091 else 5092 entries = HW_HASH_INDEX_SIZE; 5093 5094 bp->rss_indir_tbl_entries = entries; 5095 bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), 5096 GFP_KERNEL); 5097 if (!bp->rss_indir_tbl) 5098 return -ENOMEM; 5099 return 0; 5100 } 5101 5102 static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp) 5103 { 5104 u16 max_rings, max_entries, pad, i; 5105 5106 if (!bp->rx_nr_rings) 5107 return; 5108 5109 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 5110 max_rings = bp->rx_nr_rings - 1; 5111 else 5112 max_rings = bp->rx_nr_rings; 5113 5114 max_entries = bnxt_get_rxfh_indir_size(bp->dev); 5115 5116 for (i = 0; i < max_entries; i++) 5117 bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings); 5118 5119 pad = bp->rss_indir_tbl_entries - max_entries; 5120 if (pad) 5121 memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16)); 5122 } 5123 5124 static u16 bnxt_get_max_rss_ring(struct bnxt *bp) 5125 { 5126 u16 i, tbl_size, max_ring = 0; 5127 5128 if (!bp->rss_indir_tbl) 5129 return 0; 5130 5131 tbl_size = bnxt_get_rxfh_indir_size(bp->dev); 5132 for (i = 0; i < tbl_size; i++) 5133 max_ring = max(max_ring, bp->rss_indir_tbl[i]); 5134 return max_ring; 5135 } 5136 5137 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings) 5138 { 5139 if (bp->flags & BNXT_FLAG_CHIP_P5) 5140 return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5); 5141 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 5142 return 2; 5143 return 1; 5144 } 5145 5146 static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic) 5147 { 5148 bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG); 5149 u16 i, j; 5150 5151 /* Fill the RSS indirection table with ring group ids */ 5152 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) { 5153 if (!no_rss) 5154 j = bp->rss_indir_tbl[i]; 5155 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]); 5156 } 5157 } 5158 5159 static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp, 5160 struct bnxt_vnic_info *vnic) 5161 { 5162 __le16 *ring_tbl = vnic->rss_table; 5163 struct bnxt_rx_ring_info *rxr; 5164 u16 tbl_size, i; 5165 5166 tbl_size = bnxt_get_rxfh_indir_size(bp->dev); 5167 5168 for (i = 0; i < tbl_size; i++) { 5169 u16 ring_id, j; 5170 5171 j = bp->rss_indir_tbl[i]; 5172 rxr = &bp->rx_ring[j]; 5173 5174 ring_id = rxr->rx_ring_struct.fw_ring_id; 5175 *ring_tbl++ = cpu_to_le16(ring_id); 5176 ring_id = bnxt_cp_ring_for_rx(bp, rxr); 5177 *ring_tbl++ = cpu_to_le16(ring_id); 5178 } 5179 } 5180 5181 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic) 5182 { 5183 if (bp->flags & BNXT_FLAG_CHIP_P5) 5184 __bnxt_fill_hw_rss_tbl_p5(bp, vnic); 5185 else 5186 __bnxt_fill_hw_rss_tbl(bp, vnic); 5187 } 5188 5189 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) 5190 { 5191 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 5192 struct hwrm_vnic_rss_cfg_input req = {0}; 5193 5194 if ((bp->flags & BNXT_FLAG_CHIP_P5) || 5195 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID) 5196 return 0; 5197 5198 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); 5199 if (set_rss) { 5200 bnxt_fill_hw_rss_tbl(bp, vnic); 5201 req.hash_type = cpu_to_le32(bp->rss_hash_cfg); 5202 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; 5203 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); 5204 req.hash_key_tbl_addr = 5205 cpu_to_le64(vnic->rss_hash_key_dma_addr); 5206 } 5207 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 5208 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5209 } 5210 5211 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss) 5212 { 5213 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 5214 struct hwrm_vnic_rss_cfg_input req = {0}; 5215 dma_addr_t ring_tbl_map; 5216 u32 i, nr_ctxs; 5217 5218 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); 5219 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); 5220 if (!set_rss) { 5221 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5222 return 0; 5223 } 5224 bnxt_fill_hw_rss_tbl(bp, vnic); 5225 req.hash_type = cpu_to_le32(bp->rss_hash_cfg); 5226 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; 5227 req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); 5228 ring_tbl_map = vnic->rss_table_dma_addr; 5229 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); 5230 for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) { 5231 int rc; 5232 5233 req.ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map); 5234 req.ring_table_pair_index = i; 5235 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]); 5236 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5237 if (rc) 5238 return rc; 5239 } 5240 return 0; 5241 } 5242 5243 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id) 5244 { 5245 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 5246 struct hwrm_vnic_plcmodes_cfg_input req = {0}; 5247 5248 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1); 5249 req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT | 5250 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | 5251 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); 5252 req.enables = 5253 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID | 5254 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID); 5255 /* thresholds not implemented in firmware yet */ 5256 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh); 5257 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh); 5258 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id); 5259 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5260 } 5261 5262 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id, 5263 u16 ctx_idx) 5264 { 5265 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0}; 5266 5267 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1); 5268 req.rss_cos_lb_ctx_id = 5269 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]); 5270 5271 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5272 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID; 5273 } 5274 5275 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp) 5276 { 5277 int i, j; 5278 5279 for (i = 0; i < bp->nr_vnics; i++) { 5280 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 5281 5282 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) { 5283 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID) 5284 bnxt_hwrm_vnic_ctx_free_one(bp, i, j); 5285 } 5286 } 5287 bp->rsscos_nr_ctxs = 0; 5288 } 5289 5290 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx) 5291 { 5292 int rc; 5293 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0}; 5294 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp = 5295 bp->hwrm_cmd_resp_addr; 5296 5297 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1, 5298 -1); 5299 5300 mutex_lock(&bp->hwrm_cmd_lock); 5301 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5302 if (!rc) 5303 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = 5304 le16_to_cpu(resp->rss_cos_lb_ctx_id); 5305 mutex_unlock(&bp->hwrm_cmd_lock); 5306 5307 return rc; 5308 } 5309 5310 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp) 5311 { 5312 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP) 5313 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE; 5314 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE; 5315 } 5316 5317 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) 5318 { 5319 unsigned int ring = 0, grp_idx; 5320 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 5321 struct hwrm_vnic_cfg_input req = {0}; 5322 u16 def_vlan = 0; 5323 5324 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1); 5325 5326 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5327 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; 5328 5329 req.default_rx_ring_id = 5330 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id); 5331 req.default_cmpl_ring_id = 5332 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr)); 5333 req.enables = 5334 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID | 5335 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID); 5336 goto vnic_mru; 5337 } 5338 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP); 5339 /* Only RSS support for now TBD: COS & LB */ 5340 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) { 5341 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 5342 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | 5343 VNIC_CFG_REQ_ENABLES_MRU); 5344 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) { 5345 req.rss_rule = 5346 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]); 5347 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | 5348 VNIC_CFG_REQ_ENABLES_MRU); 5349 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE); 5350 } else { 5351 req.rss_rule = cpu_to_le16(0xffff); 5352 } 5353 5354 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && 5355 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) { 5356 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]); 5357 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE); 5358 } else { 5359 req.cos_rule = cpu_to_le16(0xffff); 5360 } 5361 5362 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 5363 ring = 0; 5364 else if (vnic->flags & BNXT_VNIC_RFS_FLAG) 5365 ring = vnic_id - 1; 5366 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp)) 5367 ring = bp->rx_nr_rings - 1; 5368 5369 grp_idx = bp->rx_ring[ring].bnapi->index; 5370 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); 5371 req.lb_rule = cpu_to_le16(0xffff); 5372 vnic_mru: 5373 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN); 5374 5375 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); 5376 #ifdef CONFIG_BNXT_SRIOV 5377 if (BNXT_VF(bp)) 5378 def_vlan = bp->vf.vlan; 5379 #endif 5380 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan) 5381 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); 5382 if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) 5383 req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp)); 5384 5385 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5386 } 5387 5388 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id) 5389 { 5390 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) { 5391 struct hwrm_vnic_free_input req = {0}; 5392 5393 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1); 5394 req.vnic_id = 5395 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id); 5396 5397 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5398 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID; 5399 } 5400 } 5401 5402 static void bnxt_hwrm_vnic_free(struct bnxt *bp) 5403 { 5404 u16 i; 5405 5406 for (i = 0; i < bp->nr_vnics; i++) 5407 bnxt_hwrm_vnic_free_one(bp, i); 5408 } 5409 5410 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, 5411 unsigned int start_rx_ring_idx, 5412 unsigned int nr_rings) 5413 { 5414 int rc = 0; 5415 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings; 5416 struct hwrm_vnic_alloc_input req = {0}; 5417 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr; 5418 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 5419 5420 if (bp->flags & BNXT_FLAG_CHIP_P5) 5421 goto vnic_no_ring_grps; 5422 5423 /* map ring groups to this vnic */ 5424 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) { 5425 grp_idx = bp->rx_ring[i].bnapi->index; 5426 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) { 5427 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n", 5428 j, nr_rings); 5429 break; 5430 } 5431 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id; 5432 } 5433 5434 vnic_no_ring_grps: 5435 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) 5436 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID; 5437 if (vnic_id == 0) 5438 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); 5439 5440 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1); 5441 5442 mutex_lock(&bp->hwrm_cmd_lock); 5443 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5444 if (!rc) 5445 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id); 5446 mutex_unlock(&bp->hwrm_cmd_lock); 5447 return rc; 5448 } 5449 5450 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) 5451 { 5452 struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 5453 struct hwrm_vnic_qcaps_input req = {0}; 5454 int rc; 5455 5456 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats); 5457 bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP); 5458 if (bp->hwrm_spec_code < 0x10600) 5459 return 0; 5460 5461 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1); 5462 mutex_lock(&bp->hwrm_cmd_lock); 5463 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5464 if (!rc) { 5465 u32 flags = le32_to_cpu(resp->flags); 5466 5467 if (!(bp->flags & BNXT_FLAG_CHIP_P5) && 5468 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP)) 5469 bp->flags |= BNXT_FLAG_NEW_RSS_CAP; 5470 if (flags & 5471 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP) 5472 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP; 5473 5474 /* Older P5 fw before EXT_HW_STATS support did not set 5475 * VLAN_STRIP_CAP properly. 5476 */ 5477 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) || 5478 (BNXT_CHIP_P5_THOR(bp) && 5479 !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))) 5480 bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP; 5481 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported); 5482 if (bp->max_tpa_v2) { 5483 if (BNXT_CHIP_P5_THOR(bp)) 5484 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5; 5485 else 5486 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2; 5487 } 5488 } 5489 mutex_unlock(&bp->hwrm_cmd_lock); 5490 return rc; 5491 } 5492 5493 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp) 5494 { 5495 u16 i; 5496 u32 rc = 0; 5497 5498 if (bp->flags & BNXT_FLAG_CHIP_P5) 5499 return 0; 5500 5501 mutex_lock(&bp->hwrm_cmd_lock); 5502 for (i = 0; i < bp->rx_nr_rings; i++) { 5503 struct hwrm_ring_grp_alloc_input req = {0}; 5504 struct hwrm_ring_grp_alloc_output *resp = 5505 bp->hwrm_cmd_resp_addr; 5506 unsigned int grp_idx = bp->rx_ring[i].bnapi->index; 5507 5508 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1); 5509 5510 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id); 5511 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id); 5512 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id); 5513 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx); 5514 5515 rc = _hwrm_send_message(bp, &req, sizeof(req), 5516 HWRM_CMD_TIMEOUT); 5517 if (rc) 5518 break; 5519 5520 bp->grp_info[grp_idx].fw_grp_id = 5521 le32_to_cpu(resp->ring_group_id); 5522 } 5523 mutex_unlock(&bp->hwrm_cmd_lock); 5524 return rc; 5525 } 5526 5527 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp) 5528 { 5529 u16 i; 5530 struct hwrm_ring_grp_free_input req = {0}; 5531 5532 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5)) 5533 return; 5534 5535 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1); 5536 5537 mutex_lock(&bp->hwrm_cmd_lock); 5538 for (i = 0; i < bp->cp_nr_rings; i++) { 5539 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) 5540 continue; 5541 req.ring_group_id = 5542 cpu_to_le32(bp->grp_info[i].fw_grp_id); 5543 5544 _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5545 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; 5546 } 5547 mutex_unlock(&bp->hwrm_cmd_lock); 5548 } 5549 5550 static int hwrm_ring_alloc_send_msg(struct bnxt *bp, 5551 struct bnxt_ring_struct *ring, 5552 u32 ring_type, u32 map_index) 5553 { 5554 int rc = 0, err = 0; 5555 struct hwrm_ring_alloc_input req = {0}; 5556 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr; 5557 struct bnxt_ring_mem_info *rmem = &ring->ring_mem; 5558 struct bnxt_ring_grp_info *grp_info; 5559 u16 ring_id; 5560 5561 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1); 5562 5563 req.enables = 0; 5564 if (rmem->nr_pages > 1) { 5565 req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map); 5566 /* Page size is in log2 units */ 5567 req.page_size = BNXT_PAGE_SHIFT; 5568 req.page_tbl_depth = 1; 5569 } else { 5570 req.page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]); 5571 } 5572 req.fbo = 0; 5573 /* Association of ring index with doorbell index and MSIX number */ 5574 req.logical_id = cpu_to_le16(map_index); 5575 5576 switch (ring_type) { 5577 case HWRM_RING_ALLOC_TX: { 5578 struct bnxt_tx_ring_info *txr; 5579 5580 txr = container_of(ring, struct bnxt_tx_ring_info, 5581 tx_ring_struct); 5582 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX; 5583 /* Association of transmit ring with completion ring */ 5584 grp_info = &bp->grp_info[ring->grp_idx]; 5585 req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr)); 5586 req.length = cpu_to_le32(bp->tx_ring_mask + 1); 5587 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); 5588 req.queue_id = cpu_to_le16(ring->queue_id); 5589 break; 5590 } 5591 case HWRM_RING_ALLOC_RX: 5592 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; 5593 req.length = cpu_to_le32(bp->rx_ring_mask + 1); 5594 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5595 u16 flags = 0; 5596 5597 /* Association of rx ring with stats context */ 5598 grp_info = &bp->grp_info[ring->grp_idx]; 5599 req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size); 5600 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); 5601 req.enables |= cpu_to_le32( 5602 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); 5603 if (NET_IP_ALIGN == 2) 5604 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD; 5605 req.flags = cpu_to_le16(flags); 5606 } 5607 break; 5608 case HWRM_RING_ALLOC_AGG: 5609 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5610 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG; 5611 /* Association of agg ring with rx ring */ 5612 grp_info = &bp->grp_info[ring->grp_idx]; 5613 req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id); 5614 req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE); 5615 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); 5616 req.enables |= cpu_to_le32( 5617 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID | 5618 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); 5619 } else { 5620 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; 5621 } 5622 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1); 5623 break; 5624 case HWRM_RING_ALLOC_CMPL: 5625 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; 5626 req.length = cpu_to_le32(bp->cp_ring_mask + 1); 5627 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5628 /* Association of cp ring with nq */ 5629 grp_info = &bp->grp_info[map_index]; 5630 req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id); 5631 req.cq_handle = cpu_to_le64(ring->handle); 5632 req.enables |= cpu_to_le32( 5633 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID); 5634 } else if (bp->flags & BNXT_FLAG_USING_MSIX) { 5635 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; 5636 } 5637 break; 5638 case HWRM_RING_ALLOC_NQ: 5639 req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ; 5640 req.length = cpu_to_le32(bp->cp_ring_mask + 1); 5641 if (bp->flags & BNXT_FLAG_USING_MSIX) 5642 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; 5643 break; 5644 default: 5645 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n", 5646 ring_type); 5647 return -1; 5648 } 5649 5650 mutex_lock(&bp->hwrm_cmd_lock); 5651 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5652 err = le16_to_cpu(resp->error_code); 5653 ring_id = le16_to_cpu(resp->ring_id); 5654 mutex_unlock(&bp->hwrm_cmd_lock); 5655 5656 if (rc || err) { 5657 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n", 5658 ring_type, rc, err); 5659 return -EIO; 5660 } 5661 ring->fw_ring_id = ring_id; 5662 return rc; 5663 } 5664 5665 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx) 5666 { 5667 int rc; 5668 5669 if (BNXT_PF(bp)) { 5670 struct hwrm_func_cfg_input req = {0}; 5671 5672 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 5673 req.fid = cpu_to_le16(0xffff); 5674 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR); 5675 req.async_event_cr = cpu_to_le16(idx); 5676 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5677 } else { 5678 struct hwrm_func_vf_cfg_input req = {0}; 5679 5680 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); 5681 req.enables = 5682 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR); 5683 req.async_event_cr = cpu_to_le16(idx); 5684 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5685 } 5686 return rc; 5687 } 5688 5689 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type, 5690 u32 map_idx, u32 xid) 5691 { 5692 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5693 if (BNXT_PF(bp)) 5694 db->doorbell = bp->bar1 + DB_PF_OFFSET_P5; 5695 else 5696 db->doorbell = bp->bar1 + DB_VF_OFFSET_P5; 5697 switch (ring_type) { 5698 case HWRM_RING_ALLOC_TX: 5699 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ; 5700 break; 5701 case HWRM_RING_ALLOC_RX: 5702 case HWRM_RING_ALLOC_AGG: 5703 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ; 5704 break; 5705 case HWRM_RING_ALLOC_CMPL: 5706 db->db_key64 = DBR_PATH_L2; 5707 break; 5708 case HWRM_RING_ALLOC_NQ: 5709 db->db_key64 = DBR_PATH_L2; 5710 break; 5711 } 5712 db->db_key64 |= (u64)xid << DBR_XID_SFT; 5713 } else { 5714 db->doorbell = bp->bar1 + map_idx * 0x80; 5715 switch (ring_type) { 5716 case HWRM_RING_ALLOC_TX: 5717 db->db_key32 = DB_KEY_TX; 5718 break; 5719 case HWRM_RING_ALLOC_RX: 5720 case HWRM_RING_ALLOC_AGG: 5721 db->db_key32 = DB_KEY_RX; 5722 break; 5723 case HWRM_RING_ALLOC_CMPL: 5724 db->db_key32 = DB_KEY_CP; 5725 break; 5726 } 5727 } 5728 } 5729 5730 static int bnxt_hwrm_ring_alloc(struct bnxt *bp) 5731 { 5732 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS); 5733 int i, rc = 0; 5734 u32 type; 5735 5736 if (bp->flags & BNXT_FLAG_CHIP_P5) 5737 type = HWRM_RING_ALLOC_NQ; 5738 else 5739 type = HWRM_RING_ALLOC_CMPL; 5740 for (i = 0; i < bp->cp_nr_rings; i++) { 5741 struct bnxt_napi *bnapi = bp->bnapi[i]; 5742 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5743 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 5744 u32 map_idx = ring->map_idx; 5745 unsigned int vector; 5746 5747 vector = bp->irq_tbl[map_idx].vector; 5748 disable_irq_nosync(vector); 5749 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 5750 if (rc) { 5751 enable_irq(vector); 5752 goto err_out; 5753 } 5754 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id); 5755 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); 5756 enable_irq(vector); 5757 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; 5758 5759 if (!i) { 5760 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id); 5761 if (rc) 5762 netdev_warn(bp->dev, "Failed to set async event completion ring.\n"); 5763 } 5764 } 5765 5766 type = HWRM_RING_ALLOC_TX; 5767 for (i = 0; i < bp->tx_nr_rings; i++) { 5768 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 5769 struct bnxt_ring_struct *ring; 5770 u32 map_idx; 5771 5772 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5773 struct bnxt_napi *bnapi = txr->bnapi; 5774 struct bnxt_cp_ring_info *cpr, *cpr2; 5775 u32 type2 = HWRM_RING_ALLOC_CMPL; 5776 5777 cpr = &bnapi->cp_ring; 5778 cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL]; 5779 ring = &cpr2->cp_ring_struct; 5780 ring->handle = BNXT_TX_HDL; 5781 map_idx = bnapi->index; 5782 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx); 5783 if (rc) 5784 goto err_out; 5785 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx, 5786 ring->fw_ring_id); 5787 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons); 5788 } 5789 ring = &txr->tx_ring_struct; 5790 map_idx = i; 5791 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 5792 if (rc) 5793 goto err_out; 5794 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id); 5795 } 5796 5797 type = HWRM_RING_ALLOC_RX; 5798 for (i = 0; i < bp->rx_nr_rings; i++) { 5799 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 5800 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; 5801 struct bnxt_napi *bnapi = rxr->bnapi; 5802 u32 map_idx = bnapi->index; 5803 5804 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 5805 if (rc) 5806 goto err_out; 5807 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id); 5808 /* If we have agg rings, post agg buffers first. */ 5809 if (!agg_rings) 5810 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 5811 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; 5812 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5813 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5814 u32 type2 = HWRM_RING_ALLOC_CMPL; 5815 struct bnxt_cp_ring_info *cpr2; 5816 5817 cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL]; 5818 ring = &cpr2->cp_ring_struct; 5819 ring->handle = BNXT_RX_HDL; 5820 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx); 5821 if (rc) 5822 goto err_out; 5823 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx, 5824 ring->fw_ring_id); 5825 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons); 5826 } 5827 } 5828 5829 if (agg_rings) { 5830 type = HWRM_RING_ALLOC_AGG; 5831 for (i = 0; i < bp->rx_nr_rings; i++) { 5832 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 5833 struct bnxt_ring_struct *ring = 5834 &rxr->rx_agg_ring_struct; 5835 u32 grp_idx = ring->grp_idx; 5836 u32 map_idx = grp_idx + bp->rx_nr_rings; 5837 5838 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 5839 if (rc) 5840 goto err_out; 5841 5842 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx, 5843 ring->fw_ring_id); 5844 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 5845 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 5846 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; 5847 } 5848 } 5849 err_out: 5850 return rc; 5851 } 5852 5853 static int hwrm_ring_free_send_msg(struct bnxt *bp, 5854 struct bnxt_ring_struct *ring, 5855 u32 ring_type, int cmpl_ring_id) 5856 { 5857 int rc; 5858 struct hwrm_ring_free_input req = {0}; 5859 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr; 5860 u16 error_code; 5861 5862 if (BNXT_NO_FW_ACCESS(bp)) 5863 return 0; 5864 5865 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1); 5866 req.ring_type = ring_type; 5867 req.ring_id = cpu_to_le16(ring->fw_ring_id); 5868 5869 mutex_lock(&bp->hwrm_cmd_lock); 5870 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5871 error_code = le16_to_cpu(resp->error_code); 5872 mutex_unlock(&bp->hwrm_cmd_lock); 5873 5874 if (rc || error_code) { 5875 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n", 5876 ring_type, rc, error_code); 5877 return -EIO; 5878 } 5879 return 0; 5880 } 5881 5882 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) 5883 { 5884 u32 type; 5885 int i; 5886 5887 if (!bp->bnapi) 5888 return; 5889 5890 for (i = 0; i < bp->tx_nr_rings; i++) { 5891 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 5892 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 5893 5894 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 5895 u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr); 5896 5897 hwrm_ring_free_send_msg(bp, ring, 5898 RING_FREE_REQ_RING_TYPE_TX, 5899 close_path ? cmpl_ring_id : 5900 INVALID_HW_RING_ID); 5901 ring->fw_ring_id = INVALID_HW_RING_ID; 5902 } 5903 } 5904 5905 for (i = 0; i < bp->rx_nr_rings; i++) { 5906 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 5907 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; 5908 u32 grp_idx = rxr->bnapi->index; 5909 5910 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 5911 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); 5912 5913 hwrm_ring_free_send_msg(bp, ring, 5914 RING_FREE_REQ_RING_TYPE_RX, 5915 close_path ? cmpl_ring_id : 5916 INVALID_HW_RING_ID); 5917 ring->fw_ring_id = INVALID_HW_RING_ID; 5918 bp->grp_info[grp_idx].rx_fw_ring_id = 5919 INVALID_HW_RING_ID; 5920 } 5921 } 5922 5923 if (bp->flags & BNXT_FLAG_CHIP_P5) 5924 type = RING_FREE_REQ_RING_TYPE_RX_AGG; 5925 else 5926 type = RING_FREE_REQ_RING_TYPE_RX; 5927 for (i = 0; i < bp->rx_nr_rings; i++) { 5928 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 5929 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; 5930 u32 grp_idx = rxr->bnapi->index; 5931 5932 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 5933 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); 5934 5935 hwrm_ring_free_send_msg(bp, ring, type, 5936 close_path ? cmpl_ring_id : 5937 INVALID_HW_RING_ID); 5938 ring->fw_ring_id = INVALID_HW_RING_ID; 5939 bp->grp_info[grp_idx].agg_fw_ring_id = 5940 INVALID_HW_RING_ID; 5941 } 5942 } 5943 5944 /* The completion rings are about to be freed. After that the 5945 * IRQ doorbell will not work anymore. So we need to disable 5946 * IRQ here. 5947 */ 5948 bnxt_disable_int_sync(bp); 5949 5950 if (bp->flags & BNXT_FLAG_CHIP_P5) 5951 type = RING_FREE_REQ_RING_TYPE_NQ; 5952 else 5953 type = RING_FREE_REQ_RING_TYPE_L2_CMPL; 5954 for (i = 0; i < bp->cp_nr_rings; i++) { 5955 struct bnxt_napi *bnapi = bp->bnapi[i]; 5956 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5957 struct bnxt_ring_struct *ring; 5958 int j; 5959 5960 for (j = 0; j < 2; j++) { 5961 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; 5962 5963 if (cpr2) { 5964 ring = &cpr2->cp_ring_struct; 5965 if (ring->fw_ring_id == INVALID_HW_RING_ID) 5966 continue; 5967 hwrm_ring_free_send_msg(bp, ring, 5968 RING_FREE_REQ_RING_TYPE_L2_CMPL, 5969 INVALID_HW_RING_ID); 5970 ring->fw_ring_id = INVALID_HW_RING_ID; 5971 } 5972 } 5973 ring = &cpr->cp_ring_struct; 5974 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 5975 hwrm_ring_free_send_msg(bp, ring, type, 5976 INVALID_HW_RING_ID); 5977 ring->fw_ring_id = INVALID_HW_RING_ID; 5978 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; 5979 } 5980 } 5981 } 5982 5983 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 5984 bool shared); 5985 5986 static int bnxt_hwrm_get_rings(struct bnxt *bp) 5987 { 5988 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 5989 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 5990 struct hwrm_func_qcfg_input req = {0}; 5991 int rc; 5992 5993 if (bp->hwrm_spec_code < 0x10601) 5994 return 0; 5995 5996 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); 5997 req.fid = cpu_to_le16(0xffff); 5998 mutex_lock(&bp->hwrm_cmd_lock); 5999 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6000 if (rc) { 6001 mutex_unlock(&bp->hwrm_cmd_lock); 6002 return rc; 6003 } 6004 6005 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings); 6006 if (BNXT_NEW_RM(bp)) { 6007 u16 cp, stats; 6008 6009 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings); 6010 hw_resc->resv_hw_ring_grps = 6011 le32_to_cpu(resp->alloc_hw_ring_grps); 6012 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics); 6013 cp = le16_to_cpu(resp->alloc_cmpl_rings); 6014 stats = le16_to_cpu(resp->alloc_stat_ctx); 6015 hw_resc->resv_irqs = cp; 6016 if (bp->flags & BNXT_FLAG_CHIP_P5) { 6017 int rx = hw_resc->resv_rx_rings; 6018 int tx = hw_resc->resv_tx_rings; 6019 6020 if (bp->flags & BNXT_FLAG_AGG_RINGS) 6021 rx >>= 1; 6022 if (cp < (rx + tx)) { 6023 bnxt_trim_rings(bp, &rx, &tx, cp, false); 6024 if (bp->flags & BNXT_FLAG_AGG_RINGS) 6025 rx <<= 1; 6026 hw_resc->resv_rx_rings = rx; 6027 hw_resc->resv_tx_rings = tx; 6028 } 6029 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix); 6030 hw_resc->resv_hw_ring_grps = rx; 6031 } 6032 hw_resc->resv_cp_rings = cp; 6033 hw_resc->resv_stat_ctxs = stats; 6034 } 6035 mutex_unlock(&bp->hwrm_cmd_lock); 6036 return 0; 6037 } 6038 6039 /* Caller must hold bp->hwrm_cmd_lock */ 6040 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) 6041 { 6042 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 6043 struct hwrm_func_qcfg_input req = {0}; 6044 int rc; 6045 6046 if (bp->hwrm_spec_code < 0x10601) 6047 return 0; 6048 6049 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); 6050 req.fid = cpu_to_le16(fid); 6051 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6052 if (!rc) 6053 *tx_rings = le16_to_cpu(resp->alloc_tx_rings); 6054 6055 return rc; 6056 } 6057 6058 static bool bnxt_rfs_supported(struct bnxt *bp); 6059 6060 static void 6061 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req, 6062 int tx_rings, int rx_rings, int ring_grps, 6063 int cp_rings, int stats, int vnics) 6064 { 6065 u32 enables = 0; 6066 6067 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1); 6068 req->fid = cpu_to_le16(0xffff); 6069 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; 6070 req->num_tx_rings = cpu_to_le16(tx_rings); 6071 if (BNXT_NEW_RM(bp)) { 6072 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; 6073 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; 6074 if (bp->flags & BNXT_FLAG_CHIP_P5) { 6075 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0; 6076 enables |= tx_rings + ring_grps ? 6077 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 6078 enables |= rx_rings ? 6079 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 6080 } else { 6081 enables |= cp_rings ? 6082 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 6083 enables |= ring_grps ? 6084 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS | 6085 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 6086 } 6087 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0; 6088 6089 req->num_rx_rings = cpu_to_le16(rx_rings); 6090 if (bp->flags & BNXT_FLAG_CHIP_P5) { 6091 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); 6092 req->num_msix = cpu_to_le16(cp_rings); 6093 req->num_rsscos_ctxs = 6094 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64)); 6095 } else { 6096 req->num_cmpl_rings = cpu_to_le16(cp_rings); 6097 req->num_hw_ring_grps = cpu_to_le16(ring_grps); 6098 req->num_rsscos_ctxs = cpu_to_le16(1); 6099 if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) && 6100 bnxt_rfs_supported(bp)) 6101 req->num_rsscos_ctxs = 6102 cpu_to_le16(ring_grps + 1); 6103 } 6104 req->num_stat_ctxs = cpu_to_le16(stats); 6105 req->num_vnics = cpu_to_le16(vnics); 6106 } 6107 req->enables = cpu_to_le32(enables); 6108 } 6109 6110 static void 6111 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, 6112 struct hwrm_func_vf_cfg_input *req, int tx_rings, 6113 int rx_rings, int ring_grps, int cp_rings, 6114 int stats, int vnics) 6115 { 6116 u32 enables = 0; 6117 6118 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1); 6119 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; 6120 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS | 6121 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 6122 enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; 6123 if (bp->flags & BNXT_FLAG_CHIP_P5) { 6124 enables |= tx_rings + ring_grps ? 6125 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 6126 } else { 6127 enables |= cp_rings ? 6128 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 6129 enables |= ring_grps ? 6130 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; 6131 } 6132 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; 6133 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS; 6134 6135 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); 6136 req->num_tx_rings = cpu_to_le16(tx_rings); 6137 req->num_rx_rings = cpu_to_le16(rx_rings); 6138 if (bp->flags & BNXT_FLAG_CHIP_P5) { 6139 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); 6140 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64)); 6141 } else { 6142 req->num_cmpl_rings = cpu_to_le16(cp_rings); 6143 req->num_hw_ring_grps = cpu_to_le16(ring_grps); 6144 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX); 6145 } 6146 req->num_stat_ctxs = cpu_to_le16(stats); 6147 req->num_vnics = cpu_to_le16(vnics); 6148 6149 req->enables = cpu_to_le32(enables); 6150 } 6151 6152 static int 6153 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 6154 int ring_grps, int cp_rings, int stats, int vnics) 6155 { 6156 struct hwrm_func_cfg_input req = {0}; 6157 int rc; 6158 6159 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, 6160 cp_rings, stats, vnics); 6161 if (!req.enables) 6162 return 0; 6163 6164 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6165 if (rc) 6166 return rc; 6167 6168 if (bp->hwrm_spec_code < 0x10601) 6169 bp->hw_resc.resv_tx_rings = tx_rings; 6170 6171 return bnxt_hwrm_get_rings(bp); 6172 } 6173 6174 static int 6175 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 6176 int ring_grps, int cp_rings, int stats, int vnics) 6177 { 6178 struct hwrm_func_vf_cfg_input req = {0}; 6179 int rc; 6180 6181 if (!BNXT_NEW_RM(bp)) { 6182 bp->hw_resc.resv_tx_rings = tx_rings; 6183 return 0; 6184 } 6185 6186 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, 6187 cp_rings, stats, vnics); 6188 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6189 if (rc) 6190 return rc; 6191 6192 return bnxt_hwrm_get_rings(bp); 6193 } 6194 6195 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp, 6196 int cp, int stat, int vnic) 6197 { 6198 if (BNXT_PF(bp)) 6199 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat, 6200 vnic); 6201 else 6202 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat, 6203 vnic); 6204 } 6205 6206 int bnxt_nq_rings_in_use(struct bnxt *bp) 6207 { 6208 int cp = bp->cp_nr_rings; 6209 int ulp_msix, ulp_base; 6210 6211 ulp_msix = bnxt_get_ulp_msix_num(bp); 6212 if (ulp_msix) { 6213 ulp_base = bnxt_get_ulp_msix_base(bp); 6214 cp += ulp_msix; 6215 if ((ulp_base + ulp_msix) > cp) 6216 cp = ulp_base + ulp_msix; 6217 } 6218 return cp; 6219 } 6220 6221 static int bnxt_cp_rings_in_use(struct bnxt *bp) 6222 { 6223 int cp; 6224 6225 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 6226 return bnxt_nq_rings_in_use(bp); 6227 6228 cp = bp->tx_nr_rings + bp->rx_nr_rings; 6229 return cp; 6230 } 6231 6232 static int bnxt_get_func_stat_ctxs(struct bnxt *bp) 6233 { 6234 int ulp_stat = bnxt_get_ulp_stat_ctxs(bp); 6235 int cp = bp->cp_nr_rings; 6236 6237 if (!ulp_stat) 6238 return cp; 6239 6240 if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp)) 6241 return bnxt_get_ulp_msix_base(bp) + ulp_stat; 6242 6243 return cp + ulp_stat; 6244 } 6245 6246 /* Check if a default RSS map needs to be setup. This function is only 6247 * used on older firmware that does not require reserving RX rings. 6248 */ 6249 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp) 6250 { 6251 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 6252 6253 /* The RSS map is valid for RX rings set to resv_rx_rings */ 6254 if (hw_resc->resv_rx_rings != bp->rx_nr_rings) { 6255 hw_resc->resv_rx_rings = bp->rx_nr_rings; 6256 if (!netif_is_rxfh_configured(bp->dev)) 6257 bnxt_set_dflt_rss_indir_tbl(bp); 6258 } 6259 } 6260 6261 static bool bnxt_need_reserve_rings(struct bnxt *bp) 6262 { 6263 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 6264 int cp = bnxt_cp_rings_in_use(bp); 6265 int nq = bnxt_nq_rings_in_use(bp); 6266 int rx = bp->rx_nr_rings, stat; 6267 int vnic = 1, grp = rx; 6268 6269 if (hw_resc->resv_tx_rings != bp->tx_nr_rings && 6270 bp->hwrm_spec_code >= 0x10601) 6271 return true; 6272 6273 /* Old firmware does not need RX ring reservations but we still 6274 * need to setup a default RSS map when needed. With new firmware 6275 * we go through RX ring reservations first and then set up the 6276 * RSS map for the successfully reserved RX rings when needed. 6277 */ 6278 if (!BNXT_NEW_RM(bp)) { 6279 bnxt_check_rss_tbl_no_rmgr(bp); 6280 return false; 6281 } 6282 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5)) 6283 vnic = rx + 1; 6284 if (bp->flags & BNXT_FLAG_AGG_RINGS) 6285 rx <<= 1; 6286 stat = bnxt_get_func_stat_ctxs(bp); 6287 if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp || 6288 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat || 6289 (hw_resc->resv_hw_ring_grps != grp && 6290 !(bp->flags & BNXT_FLAG_CHIP_P5))) 6291 return true; 6292 if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) && 6293 hw_resc->resv_irqs != nq) 6294 return true; 6295 return false; 6296 } 6297 6298 static int __bnxt_reserve_rings(struct bnxt *bp) 6299 { 6300 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 6301 int cp = bnxt_nq_rings_in_use(bp); 6302 int tx = bp->tx_nr_rings; 6303 int rx = bp->rx_nr_rings; 6304 int grp, rx_rings, rc; 6305 int vnic = 1, stat; 6306 bool sh = false; 6307 6308 if (!bnxt_need_reserve_rings(bp)) 6309 return 0; 6310 6311 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 6312 sh = true; 6313 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5)) 6314 vnic = rx + 1; 6315 if (bp->flags & BNXT_FLAG_AGG_RINGS) 6316 rx <<= 1; 6317 grp = bp->rx_nr_rings; 6318 stat = bnxt_get_func_stat_ctxs(bp); 6319 6320 rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic); 6321 if (rc) 6322 return rc; 6323 6324 tx = hw_resc->resv_tx_rings; 6325 if (BNXT_NEW_RM(bp)) { 6326 rx = hw_resc->resv_rx_rings; 6327 cp = hw_resc->resv_irqs; 6328 grp = hw_resc->resv_hw_ring_grps; 6329 vnic = hw_resc->resv_vnics; 6330 stat = hw_resc->resv_stat_ctxs; 6331 } 6332 6333 rx_rings = rx; 6334 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 6335 if (rx >= 2) { 6336 rx_rings = rx >> 1; 6337 } else { 6338 if (netif_running(bp->dev)) 6339 return -ENOMEM; 6340 6341 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 6342 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 6343 bp->dev->hw_features &= ~NETIF_F_LRO; 6344 bp->dev->features &= ~NETIF_F_LRO; 6345 bnxt_set_ring_params(bp); 6346 } 6347 } 6348 rx_rings = min_t(int, rx_rings, grp); 6349 cp = min_t(int, cp, bp->cp_nr_rings); 6350 if (stat > bnxt_get_ulp_stat_ctxs(bp)) 6351 stat -= bnxt_get_ulp_stat_ctxs(bp); 6352 cp = min_t(int, cp, stat); 6353 rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh); 6354 if (bp->flags & BNXT_FLAG_AGG_RINGS) 6355 rx = rx_rings << 1; 6356 cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings; 6357 bp->tx_nr_rings = tx; 6358 6359 /* If we cannot reserve all the RX rings, reset the RSS map only 6360 * if absolutely necessary 6361 */ 6362 if (rx_rings != bp->rx_nr_rings) { 6363 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n", 6364 rx_rings, bp->rx_nr_rings); 6365 if ((bp->dev->priv_flags & IFF_RXFH_CONFIGURED) && 6366 (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) != 6367 bnxt_get_nr_rss_ctxs(bp, rx_rings) || 6368 bnxt_get_max_rss_ring(bp) >= rx_rings)) { 6369 netdev_warn(bp->dev, "RSS table entries reverting to default\n"); 6370 bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED; 6371 } 6372 } 6373 bp->rx_nr_rings = rx_rings; 6374 bp->cp_nr_rings = cp; 6375 6376 if (!tx || !rx || !cp || !grp || !vnic || !stat) 6377 return -ENOMEM; 6378 6379 if (!netif_is_rxfh_configured(bp->dev)) 6380 bnxt_set_dflt_rss_indir_tbl(bp); 6381 6382 return rc; 6383 } 6384 6385 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 6386 int ring_grps, int cp_rings, int stats, 6387 int vnics) 6388 { 6389 struct hwrm_func_vf_cfg_input req = {0}; 6390 u32 flags; 6391 6392 if (!BNXT_NEW_RM(bp)) 6393 return 0; 6394 6395 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, 6396 cp_rings, stats, vnics); 6397 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST | 6398 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST | 6399 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | 6400 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | 6401 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST | 6402 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST; 6403 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 6404 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; 6405 6406 req.flags = cpu_to_le32(flags); 6407 return hwrm_send_message_silent(bp, &req, sizeof(req), 6408 HWRM_CMD_TIMEOUT); 6409 } 6410 6411 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 6412 int ring_grps, int cp_rings, int stats, 6413 int vnics) 6414 { 6415 struct hwrm_func_cfg_input req = {0}; 6416 u32 flags; 6417 6418 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, 6419 cp_rings, stats, vnics); 6420 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST; 6421 if (BNXT_NEW_RM(bp)) { 6422 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST | 6423 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | 6424 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | 6425 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; 6426 if (bp->flags & BNXT_FLAG_CHIP_P5) 6427 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST | 6428 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST; 6429 else 6430 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; 6431 } 6432 6433 req.flags = cpu_to_le32(flags); 6434 return hwrm_send_message_silent(bp, &req, sizeof(req), 6435 HWRM_CMD_TIMEOUT); 6436 } 6437 6438 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings, 6439 int ring_grps, int cp_rings, int stats, 6440 int vnics) 6441 { 6442 if (bp->hwrm_spec_code < 0x10801) 6443 return 0; 6444 6445 if (BNXT_PF(bp)) 6446 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings, 6447 ring_grps, cp_rings, stats, 6448 vnics); 6449 6450 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps, 6451 cp_rings, stats, vnics); 6452 } 6453 6454 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp) 6455 { 6456 struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 6457 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 6458 struct hwrm_ring_aggint_qcaps_input req = {0}; 6459 int rc; 6460 6461 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS; 6462 coal_cap->num_cmpl_dma_aggr_max = 63; 6463 coal_cap->num_cmpl_dma_aggr_during_int_max = 63; 6464 coal_cap->cmpl_aggr_dma_tmr_max = 65535; 6465 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535; 6466 coal_cap->int_lat_tmr_min_max = 65535; 6467 coal_cap->int_lat_tmr_max_max = 65535; 6468 coal_cap->num_cmpl_aggr_int_max = 65535; 6469 coal_cap->timer_units = 80; 6470 6471 if (bp->hwrm_spec_code < 0x10902) 6472 return; 6473 6474 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1); 6475 mutex_lock(&bp->hwrm_cmd_lock); 6476 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6477 if (!rc) { 6478 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params); 6479 coal_cap->nq_params = le32_to_cpu(resp->nq_params); 6480 coal_cap->num_cmpl_dma_aggr_max = 6481 le16_to_cpu(resp->num_cmpl_dma_aggr_max); 6482 coal_cap->num_cmpl_dma_aggr_during_int_max = 6483 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max); 6484 coal_cap->cmpl_aggr_dma_tmr_max = 6485 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max); 6486 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 6487 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max); 6488 coal_cap->int_lat_tmr_min_max = 6489 le16_to_cpu(resp->int_lat_tmr_min_max); 6490 coal_cap->int_lat_tmr_max_max = 6491 le16_to_cpu(resp->int_lat_tmr_max_max); 6492 coal_cap->num_cmpl_aggr_int_max = 6493 le16_to_cpu(resp->num_cmpl_aggr_int_max); 6494 coal_cap->timer_units = le16_to_cpu(resp->timer_units); 6495 } 6496 mutex_unlock(&bp->hwrm_cmd_lock); 6497 } 6498 6499 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec) 6500 { 6501 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 6502 6503 return usec * 1000 / coal_cap->timer_units; 6504 } 6505 6506 static void bnxt_hwrm_set_coal_params(struct bnxt *bp, 6507 struct bnxt_coal *hw_coal, 6508 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) 6509 { 6510 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 6511 u32 cmpl_params = coal_cap->cmpl_params; 6512 u16 val, tmr, max, flags = 0; 6513 6514 max = hw_coal->bufs_per_record * 128; 6515 if (hw_coal->budget) 6516 max = hw_coal->bufs_per_record * hw_coal->budget; 6517 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max); 6518 6519 val = clamp_t(u16, hw_coal->coal_bufs, 1, max); 6520 req->num_cmpl_aggr_int = cpu_to_le16(val); 6521 6522 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max); 6523 req->num_cmpl_dma_aggr = cpu_to_le16(val); 6524 6525 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1, 6526 coal_cap->num_cmpl_dma_aggr_during_int_max); 6527 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val); 6528 6529 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks); 6530 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max); 6531 req->int_lat_tmr_max = cpu_to_le16(tmr); 6532 6533 /* min timer set to 1/2 of interrupt timer */ 6534 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) { 6535 val = tmr / 2; 6536 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max); 6537 req->int_lat_tmr_min = cpu_to_le16(val); 6538 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); 6539 } 6540 6541 /* buf timer set to 1/4 of interrupt timer */ 6542 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max); 6543 req->cmpl_aggr_dma_tmr = cpu_to_le16(val); 6544 6545 if (cmpl_params & 6546 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) { 6547 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq); 6548 val = clamp_t(u16, tmr, 1, 6549 coal_cap->cmpl_aggr_dma_tmr_during_int_max); 6550 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val); 6551 req->enables |= 6552 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE); 6553 } 6554 6555 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET) 6556 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 6557 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) && 6558 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh) 6559 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE; 6560 req->flags = cpu_to_le16(flags); 6561 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES); 6562 } 6563 6564 /* Caller holds bp->hwrm_cmd_lock */ 6565 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi, 6566 struct bnxt_coal *hw_coal) 6567 { 6568 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0}; 6569 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 6570 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 6571 u32 nq_params = coal_cap->nq_params; 6572 u16 tmr; 6573 6574 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN)) 6575 return 0; 6576 6577 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, 6578 -1, -1); 6579 req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id); 6580 req.flags = 6581 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ); 6582 6583 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2; 6584 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max); 6585 req.int_lat_tmr_min = cpu_to_le16(tmr); 6586 req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); 6587 return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6588 } 6589 6590 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi) 6591 { 6592 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0}; 6593 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 6594 struct bnxt_coal coal; 6595 6596 /* Tick values in micro seconds. 6597 * 1 coal_buf x bufs_per_record = 1 completion record. 6598 */ 6599 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal)); 6600 6601 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks; 6602 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs; 6603 6604 if (!bnapi->rx_ring) 6605 return -ENODEV; 6606 6607 bnxt_hwrm_cmd_hdr_init(bp, &req_rx, 6608 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); 6609 6610 bnxt_hwrm_set_coal_params(bp, &coal, &req_rx); 6611 6612 req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring)); 6613 6614 return hwrm_send_message(bp, &req_rx, sizeof(req_rx), 6615 HWRM_CMD_TIMEOUT); 6616 } 6617 6618 int bnxt_hwrm_set_coal(struct bnxt *bp) 6619 { 6620 int i, rc = 0; 6621 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0}, 6622 req_tx = {0}, *req; 6623 6624 bnxt_hwrm_cmd_hdr_init(bp, &req_rx, 6625 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); 6626 bnxt_hwrm_cmd_hdr_init(bp, &req_tx, 6627 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); 6628 6629 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx); 6630 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx); 6631 6632 mutex_lock(&bp->hwrm_cmd_lock); 6633 for (i = 0; i < bp->cp_nr_rings; i++) { 6634 struct bnxt_napi *bnapi = bp->bnapi[i]; 6635 struct bnxt_coal *hw_coal; 6636 u16 ring_id; 6637 6638 req = &req_rx; 6639 if (!bnapi->rx_ring) { 6640 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring); 6641 req = &req_tx; 6642 } else { 6643 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring); 6644 } 6645 req->ring_id = cpu_to_le16(ring_id); 6646 6647 rc = _hwrm_send_message(bp, req, sizeof(*req), 6648 HWRM_CMD_TIMEOUT); 6649 if (rc) 6650 break; 6651 6652 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 6653 continue; 6654 6655 if (bnapi->rx_ring && bnapi->tx_ring) { 6656 req = &req_tx; 6657 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring); 6658 req->ring_id = cpu_to_le16(ring_id); 6659 rc = _hwrm_send_message(bp, req, sizeof(*req), 6660 HWRM_CMD_TIMEOUT); 6661 if (rc) 6662 break; 6663 } 6664 if (bnapi->rx_ring) 6665 hw_coal = &bp->rx_coal; 6666 else 6667 hw_coal = &bp->tx_coal; 6668 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal); 6669 } 6670 mutex_unlock(&bp->hwrm_cmd_lock); 6671 return rc; 6672 } 6673 6674 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp) 6675 { 6676 struct hwrm_stat_ctx_clr_stats_input req0 = {0}; 6677 struct hwrm_stat_ctx_free_input req = {0}; 6678 int i; 6679 6680 if (!bp->bnapi) 6681 return; 6682 6683 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 6684 return; 6685 6686 bnxt_hwrm_cmd_hdr_init(bp, &req0, HWRM_STAT_CTX_CLR_STATS, -1, -1); 6687 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1); 6688 6689 mutex_lock(&bp->hwrm_cmd_lock); 6690 for (i = 0; i < bp->cp_nr_rings; i++) { 6691 struct bnxt_napi *bnapi = bp->bnapi[i]; 6692 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 6693 6694 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) { 6695 req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id); 6696 if (BNXT_FW_MAJ(bp) <= 20) { 6697 req0.stat_ctx_id = req.stat_ctx_id; 6698 _hwrm_send_message(bp, &req0, sizeof(req0), 6699 HWRM_CMD_TIMEOUT); 6700 } 6701 _hwrm_send_message(bp, &req, sizeof(req), 6702 HWRM_CMD_TIMEOUT); 6703 6704 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; 6705 } 6706 } 6707 mutex_unlock(&bp->hwrm_cmd_lock); 6708 } 6709 6710 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) 6711 { 6712 int rc = 0, i; 6713 struct hwrm_stat_ctx_alloc_input req = {0}; 6714 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr; 6715 6716 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 6717 return 0; 6718 6719 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1); 6720 6721 req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size); 6722 req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000); 6723 6724 mutex_lock(&bp->hwrm_cmd_lock); 6725 for (i = 0; i < bp->cp_nr_rings; i++) { 6726 struct bnxt_napi *bnapi = bp->bnapi[i]; 6727 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 6728 6729 req.stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map); 6730 6731 rc = _hwrm_send_message(bp, &req, sizeof(req), 6732 HWRM_CMD_TIMEOUT); 6733 if (rc) 6734 break; 6735 6736 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id); 6737 6738 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; 6739 } 6740 mutex_unlock(&bp->hwrm_cmd_lock); 6741 return rc; 6742 } 6743 6744 static int bnxt_hwrm_func_qcfg(struct bnxt *bp) 6745 { 6746 struct hwrm_func_qcfg_input req = {0}; 6747 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 6748 u32 min_db_offset = 0; 6749 u16 flags; 6750 int rc; 6751 6752 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); 6753 req.fid = cpu_to_le16(0xffff); 6754 mutex_lock(&bp->hwrm_cmd_lock); 6755 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6756 if (rc) 6757 goto func_qcfg_exit; 6758 6759 #ifdef CONFIG_BNXT_SRIOV 6760 if (BNXT_VF(bp)) { 6761 struct bnxt_vf_info *vf = &bp->vf; 6762 6763 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK; 6764 } else { 6765 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs); 6766 } 6767 #endif 6768 flags = le16_to_cpu(resp->flags); 6769 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED | 6770 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) { 6771 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT; 6772 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED) 6773 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT; 6774 } 6775 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)) 6776 bp->flags |= BNXT_FLAG_MULTI_HOST; 6777 if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED) 6778 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR; 6779 6780 switch (resp->port_partition_type) { 6781 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0: 6782 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5: 6783 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0: 6784 bp->port_partition_type = resp->port_partition_type; 6785 break; 6786 } 6787 if (bp->hwrm_spec_code < 0x10707 || 6788 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB) 6789 bp->br_mode = BRIDGE_MODE_VEB; 6790 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA) 6791 bp->br_mode = BRIDGE_MODE_VEPA; 6792 else 6793 bp->br_mode = BRIDGE_MODE_UNDEF; 6794 6795 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured); 6796 if (!bp->max_mtu) 6797 bp->max_mtu = BNXT_MAX_MTU; 6798 6799 if (bp->db_size) 6800 goto func_qcfg_exit; 6801 6802 if (bp->flags & BNXT_FLAG_CHIP_P5) { 6803 if (BNXT_PF(bp)) 6804 min_db_offset = DB_PF_OFFSET_P5; 6805 else 6806 min_db_offset = DB_VF_OFFSET_P5; 6807 } 6808 bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) * 6809 1024); 6810 if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) || 6811 bp->db_size <= min_db_offset) 6812 bp->db_size = pci_resource_len(bp->pdev, 2); 6813 6814 func_qcfg_exit: 6815 mutex_unlock(&bp->hwrm_cmd_lock); 6816 return rc; 6817 } 6818 6819 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info *ctx, 6820 struct hwrm_func_backing_store_qcaps_output *resp) 6821 { 6822 struct bnxt_mem_init *mem_init; 6823 u16 init_mask; 6824 u8 init_val; 6825 u8 *offset; 6826 int i; 6827 6828 init_val = resp->ctx_kind_initializer; 6829 init_mask = le16_to_cpu(resp->ctx_init_mask); 6830 offset = &resp->qp_init_offset; 6831 mem_init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP]; 6832 for (i = 0; i < BNXT_CTX_MEM_INIT_MAX; i++, mem_init++, offset++) { 6833 mem_init->init_val = init_val; 6834 mem_init->offset = BNXT_MEM_INVALID_OFFSET; 6835 if (!init_mask) 6836 continue; 6837 if (i == BNXT_CTX_MEM_INIT_STAT) 6838 offset = &resp->stat_init_offset; 6839 if (init_mask & (1 << i)) 6840 mem_init->offset = *offset * 4; 6841 else 6842 mem_init->init_val = 0; 6843 } 6844 ctx->mem_init[BNXT_CTX_MEM_INIT_QP].size = ctx->qp_entry_size; 6845 ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ].size = ctx->srq_entry_size; 6846 ctx->mem_init[BNXT_CTX_MEM_INIT_CQ].size = ctx->cq_entry_size; 6847 ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC].size = ctx->vnic_entry_size; 6848 ctx->mem_init[BNXT_CTX_MEM_INIT_STAT].size = ctx->stat_entry_size; 6849 ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV].size = ctx->mrav_entry_size; 6850 } 6851 6852 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) 6853 { 6854 struct hwrm_func_backing_store_qcaps_input req = {0}; 6855 struct hwrm_func_backing_store_qcaps_output *resp = 6856 bp->hwrm_cmd_resp_addr; 6857 int rc; 6858 6859 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx) 6860 return 0; 6861 6862 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1); 6863 mutex_lock(&bp->hwrm_cmd_lock); 6864 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6865 if (!rc) { 6866 struct bnxt_ctx_pg_info *ctx_pg; 6867 struct bnxt_ctx_mem_info *ctx; 6868 int i, tqm_rings; 6869 6870 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 6871 if (!ctx) { 6872 rc = -ENOMEM; 6873 goto ctx_err; 6874 } 6875 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries); 6876 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries); 6877 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries); 6878 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size); 6879 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries); 6880 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries); 6881 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size); 6882 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries); 6883 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries); 6884 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size); 6885 ctx->vnic_max_vnic_entries = 6886 le16_to_cpu(resp->vnic_max_vnic_entries); 6887 ctx->vnic_max_ring_table_entries = 6888 le16_to_cpu(resp->vnic_max_ring_table_entries); 6889 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size); 6890 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries); 6891 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size); 6892 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size); 6893 ctx->tqm_min_entries_per_ring = 6894 le32_to_cpu(resp->tqm_min_entries_per_ring); 6895 ctx->tqm_max_entries_per_ring = 6896 le32_to_cpu(resp->tqm_max_entries_per_ring); 6897 ctx->tqm_entries_multiple = resp->tqm_entries_multiple; 6898 if (!ctx->tqm_entries_multiple) 6899 ctx->tqm_entries_multiple = 1; 6900 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries); 6901 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size); 6902 ctx->mrav_num_entries_units = 6903 le16_to_cpu(resp->mrav_num_entries_units); 6904 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size); 6905 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries); 6906 6907 bnxt_init_ctx_initializer(ctx, resp); 6908 6909 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count; 6910 if (!ctx->tqm_fp_rings_count) 6911 ctx->tqm_fp_rings_count = bp->max_q; 6912 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS) 6913 ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS; 6914 6915 tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS; 6916 ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL); 6917 if (!ctx_pg) { 6918 kfree(ctx); 6919 rc = -ENOMEM; 6920 goto ctx_err; 6921 } 6922 for (i = 0; i < tqm_rings; i++, ctx_pg++) 6923 ctx->tqm_mem[i] = ctx_pg; 6924 bp->ctx = ctx; 6925 } else { 6926 rc = 0; 6927 } 6928 ctx_err: 6929 mutex_unlock(&bp->hwrm_cmd_lock); 6930 return rc; 6931 } 6932 6933 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr, 6934 __le64 *pg_dir) 6935 { 6936 if (!rmem->nr_pages) 6937 return; 6938 6939 BNXT_SET_CTX_PAGE_ATTR(*pg_attr); 6940 if (rmem->depth >= 1) { 6941 if (rmem->depth == 2) 6942 *pg_attr |= 2; 6943 else 6944 *pg_attr |= 1; 6945 *pg_dir = cpu_to_le64(rmem->pg_tbl_map); 6946 } else { 6947 *pg_dir = cpu_to_le64(rmem->dma_arr[0]); 6948 } 6949 } 6950 6951 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \ 6952 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \ 6953 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \ 6954 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \ 6955 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \ 6956 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) 6957 6958 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) 6959 { 6960 struct hwrm_func_backing_store_cfg_input req = {0}; 6961 struct bnxt_ctx_mem_info *ctx = bp->ctx; 6962 struct bnxt_ctx_pg_info *ctx_pg; 6963 u32 req_len = sizeof(req); 6964 __le32 *num_entries; 6965 __le64 *pg_dir; 6966 u32 flags = 0; 6967 u8 *pg_attr; 6968 u32 ena; 6969 int i; 6970 6971 if (!ctx) 6972 return 0; 6973 6974 if (req_len > bp->hwrm_max_ext_req_len) 6975 req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN; 6976 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1); 6977 req.enables = cpu_to_le32(enables); 6978 6979 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) { 6980 ctx_pg = &ctx->qp_mem; 6981 req.qp_num_entries = cpu_to_le32(ctx_pg->entries); 6982 req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries); 6983 req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries); 6984 req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size); 6985 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 6986 &req.qpc_pg_size_qpc_lvl, 6987 &req.qpc_page_dir); 6988 } 6989 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) { 6990 ctx_pg = &ctx->srq_mem; 6991 req.srq_num_entries = cpu_to_le32(ctx_pg->entries); 6992 req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries); 6993 req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size); 6994 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 6995 &req.srq_pg_size_srq_lvl, 6996 &req.srq_page_dir); 6997 } 6998 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) { 6999 ctx_pg = &ctx->cq_mem; 7000 req.cq_num_entries = cpu_to_le32(ctx_pg->entries); 7001 req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries); 7002 req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size); 7003 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl, 7004 &req.cq_page_dir); 7005 } 7006 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) { 7007 ctx_pg = &ctx->vnic_mem; 7008 req.vnic_num_vnic_entries = 7009 cpu_to_le16(ctx->vnic_max_vnic_entries); 7010 req.vnic_num_ring_table_entries = 7011 cpu_to_le16(ctx->vnic_max_ring_table_entries); 7012 req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size); 7013 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 7014 &req.vnic_pg_size_vnic_lvl, 7015 &req.vnic_page_dir); 7016 } 7017 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) { 7018 ctx_pg = &ctx->stat_mem; 7019 req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries); 7020 req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size); 7021 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 7022 &req.stat_pg_size_stat_lvl, 7023 &req.stat_page_dir); 7024 } 7025 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) { 7026 ctx_pg = &ctx->mrav_mem; 7027 req.mrav_num_entries = cpu_to_le32(ctx_pg->entries); 7028 if (ctx->mrav_num_entries_units) 7029 flags |= 7030 FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT; 7031 req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size); 7032 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 7033 &req.mrav_pg_size_mrav_lvl, 7034 &req.mrav_page_dir); 7035 } 7036 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) { 7037 ctx_pg = &ctx->tim_mem; 7038 req.tim_num_entries = cpu_to_le32(ctx_pg->entries); 7039 req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size); 7040 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 7041 &req.tim_pg_size_tim_lvl, 7042 &req.tim_page_dir); 7043 } 7044 for (i = 0, num_entries = &req.tqm_sp_num_entries, 7045 pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl, 7046 pg_dir = &req.tqm_sp_page_dir, 7047 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP; 7048 i < BNXT_MAX_TQM_RINGS; 7049 i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) { 7050 if (!(enables & ena)) 7051 continue; 7052 7053 req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size); 7054 ctx_pg = ctx->tqm_mem[i]; 7055 *num_entries = cpu_to_le32(ctx_pg->entries); 7056 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir); 7057 } 7058 req.flags = cpu_to_le32(flags); 7059 return hwrm_send_message(bp, &req, req_len, HWRM_CMD_TIMEOUT); 7060 } 7061 7062 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, 7063 struct bnxt_ctx_pg_info *ctx_pg) 7064 { 7065 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 7066 7067 rmem->page_size = BNXT_PAGE_SIZE; 7068 rmem->pg_arr = ctx_pg->ctx_pg_arr; 7069 rmem->dma_arr = ctx_pg->ctx_dma_arr; 7070 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; 7071 if (rmem->depth >= 1) 7072 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG; 7073 return bnxt_alloc_ring(bp, rmem); 7074 } 7075 7076 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, 7077 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size, 7078 u8 depth, struct bnxt_mem_init *mem_init) 7079 { 7080 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 7081 int rc; 7082 7083 if (!mem_size) 7084 return -EINVAL; 7085 7086 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); 7087 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) { 7088 ctx_pg->nr_pages = 0; 7089 return -EINVAL; 7090 } 7091 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) { 7092 int nr_tbls, i; 7093 7094 rmem->depth = 2; 7095 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg), 7096 GFP_KERNEL); 7097 if (!ctx_pg->ctx_pg_tbl) 7098 return -ENOMEM; 7099 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES); 7100 rmem->nr_pages = nr_tbls; 7101 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); 7102 if (rc) 7103 return rc; 7104 for (i = 0; i < nr_tbls; i++) { 7105 struct bnxt_ctx_pg_info *pg_tbl; 7106 7107 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL); 7108 if (!pg_tbl) 7109 return -ENOMEM; 7110 ctx_pg->ctx_pg_tbl[i] = pg_tbl; 7111 rmem = &pg_tbl->ring_mem; 7112 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i]; 7113 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i]; 7114 rmem->depth = 1; 7115 rmem->nr_pages = MAX_CTX_PAGES; 7116 rmem->mem_init = mem_init; 7117 if (i == (nr_tbls - 1)) { 7118 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES; 7119 7120 if (rem) 7121 rmem->nr_pages = rem; 7122 } 7123 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl); 7124 if (rc) 7125 break; 7126 } 7127 } else { 7128 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); 7129 if (rmem->nr_pages > 1 || depth) 7130 rmem->depth = 1; 7131 rmem->mem_init = mem_init; 7132 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); 7133 } 7134 return rc; 7135 } 7136 7137 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp, 7138 struct bnxt_ctx_pg_info *ctx_pg) 7139 { 7140 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 7141 7142 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES || 7143 ctx_pg->ctx_pg_tbl) { 7144 int i, nr_tbls = rmem->nr_pages; 7145 7146 for (i = 0; i < nr_tbls; i++) { 7147 struct bnxt_ctx_pg_info *pg_tbl; 7148 struct bnxt_ring_mem_info *rmem2; 7149 7150 pg_tbl = ctx_pg->ctx_pg_tbl[i]; 7151 if (!pg_tbl) 7152 continue; 7153 rmem2 = &pg_tbl->ring_mem; 7154 bnxt_free_ring(bp, rmem2); 7155 ctx_pg->ctx_pg_arr[i] = NULL; 7156 kfree(pg_tbl); 7157 ctx_pg->ctx_pg_tbl[i] = NULL; 7158 } 7159 kfree(ctx_pg->ctx_pg_tbl); 7160 ctx_pg->ctx_pg_tbl = NULL; 7161 } 7162 bnxt_free_ring(bp, rmem); 7163 ctx_pg->nr_pages = 0; 7164 } 7165 7166 static void bnxt_free_ctx_mem(struct bnxt *bp) 7167 { 7168 struct bnxt_ctx_mem_info *ctx = bp->ctx; 7169 int i; 7170 7171 if (!ctx) 7172 return; 7173 7174 if (ctx->tqm_mem[0]) { 7175 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) 7176 bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]); 7177 kfree(ctx->tqm_mem[0]); 7178 ctx->tqm_mem[0] = NULL; 7179 } 7180 7181 bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem); 7182 bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem); 7183 bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem); 7184 bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem); 7185 bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem); 7186 bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem); 7187 bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem); 7188 ctx->flags &= ~BNXT_CTX_FLAG_INITED; 7189 } 7190 7191 static int bnxt_alloc_ctx_mem(struct bnxt *bp) 7192 { 7193 struct bnxt_ctx_pg_info *ctx_pg; 7194 struct bnxt_ctx_mem_info *ctx; 7195 struct bnxt_mem_init *init; 7196 u32 mem_size, ena, entries; 7197 u32 entries_sp, min; 7198 u32 num_mr, num_ah; 7199 u32 extra_srqs = 0; 7200 u32 extra_qps = 0; 7201 u8 pg_lvl = 1; 7202 int i, rc; 7203 7204 rc = bnxt_hwrm_func_backing_store_qcaps(bp); 7205 if (rc) { 7206 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n", 7207 rc); 7208 return rc; 7209 } 7210 ctx = bp->ctx; 7211 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) 7212 return 0; 7213 7214 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) { 7215 pg_lvl = 2; 7216 extra_qps = 65536; 7217 extra_srqs = 8192; 7218 } 7219 7220 ctx_pg = &ctx->qp_mem; 7221 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries + 7222 extra_qps; 7223 if (ctx->qp_entry_size) { 7224 mem_size = ctx->qp_entry_size * ctx_pg->entries; 7225 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP]; 7226 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init); 7227 if (rc) 7228 return rc; 7229 } 7230 7231 ctx_pg = &ctx->srq_mem; 7232 ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs; 7233 if (ctx->srq_entry_size) { 7234 mem_size = ctx->srq_entry_size * ctx_pg->entries; 7235 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ]; 7236 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init); 7237 if (rc) 7238 return rc; 7239 } 7240 7241 ctx_pg = &ctx->cq_mem; 7242 ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2; 7243 if (ctx->cq_entry_size) { 7244 mem_size = ctx->cq_entry_size * ctx_pg->entries; 7245 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_CQ]; 7246 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init); 7247 if (rc) 7248 return rc; 7249 } 7250 7251 ctx_pg = &ctx->vnic_mem; 7252 ctx_pg->entries = ctx->vnic_max_vnic_entries + 7253 ctx->vnic_max_ring_table_entries; 7254 if (ctx->vnic_entry_size) { 7255 mem_size = ctx->vnic_entry_size * ctx_pg->entries; 7256 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC]; 7257 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init); 7258 if (rc) 7259 return rc; 7260 } 7261 7262 ctx_pg = &ctx->stat_mem; 7263 ctx_pg->entries = ctx->stat_max_entries; 7264 if (ctx->stat_entry_size) { 7265 mem_size = ctx->stat_entry_size * ctx_pg->entries; 7266 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_STAT]; 7267 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init); 7268 if (rc) 7269 return rc; 7270 } 7271 7272 ena = 0; 7273 if (!(bp->flags & BNXT_FLAG_ROCE_CAP)) 7274 goto skip_rdma; 7275 7276 ctx_pg = &ctx->mrav_mem; 7277 /* 128K extra is needed to accommodate static AH context 7278 * allocation by f/w. 7279 */ 7280 num_mr = 1024 * 256; 7281 num_ah = 1024 * 128; 7282 ctx_pg->entries = num_mr + num_ah; 7283 if (ctx->mrav_entry_size) { 7284 mem_size = ctx->mrav_entry_size * ctx_pg->entries; 7285 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV]; 7286 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, init); 7287 if (rc) 7288 return rc; 7289 } 7290 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV; 7291 if (ctx->mrav_num_entries_units) 7292 ctx_pg->entries = 7293 ((num_mr / ctx->mrav_num_entries_units) << 16) | 7294 (num_ah / ctx->mrav_num_entries_units); 7295 7296 ctx_pg = &ctx->tim_mem; 7297 ctx_pg->entries = ctx->qp_mem.entries; 7298 if (ctx->tim_entry_size) { 7299 mem_size = ctx->tim_entry_size * ctx_pg->entries; 7300 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, NULL); 7301 if (rc) 7302 return rc; 7303 } 7304 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM; 7305 7306 skip_rdma: 7307 min = ctx->tqm_min_entries_per_ring; 7308 entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries + 7309 2 * (extra_qps + ctx->qp_min_qp1_entries) + min; 7310 entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple); 7311 entries = ctx->qp_max_l2_entries + extra_qps + ctx->qp_min_qp1_entries; 7312 entries = roundup(entries, ctx->tqm_entries_multiple); 7313 entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring); 7314 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) { 7315 ctx_pg = ctx->tqm_mem[i]; 7316 ctx_pg->entries = i ? entries : entries_sp; 7317 if (ctx->tqm_entry_size) { 7318 mem_size = ctx->tqm_entry_size * ctx_pg->entries; 7319 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, 7320 NULL); 7321 if (rc) 7322 return rc; 7323 } 7324 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i; 7325 } 7326 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES; 7327 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena); 7328 if (rc) { 7329 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n", 7330 rc); 7331 return rc; 7332 } 7333 ctx->flags |= BNXT_CTX_FLAG_INITED; 7334 return 0; 7335 } 7336 7337 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all) 7338 { 7339 struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 7340 struct hwrm_func_resource_qcaps_input req = {0}; 7341 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 7342 int rc; 7343 7344 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1); 7345 req.fid = cpu_to_le16(0xffff); 7346 7347 mutex_lock(&bp->hwrm_cmd_lock); 7348 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), 7349 HWRM_CMD_TIMEOUT); 7350 if (rc) 7351 goto hwrm_func_resc_qcaps_exit; 7352 7353 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs); 7354 if (!all) 7355 goto hwrm_func_resc_qcaps_exit; 7356 7357 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx); 7358 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 7359 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings); 7360 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 7361 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings); 7362 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 7363 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings); 7364 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings); 7365 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps); 7366 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps); 7367 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs); 7368 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); 7369 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics); 7370 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics); 7371 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx); 7372 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 7373 7374 if (bp->flags & BNXT_FLAG_CHIP_P5) { 7375 u16 max_msix = le16_to_cpu(resp->max_msix); 7376 7377 hw_resc->max_nqs = max_msix; 7378 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings; 7379 } 7380 7381 if (BNXT_PF(bp)) { 7382 struct bnxt_pf_info *pf = &bp->pf; 7383 7384 pf->vf_resv_strategy = 7385 le16_to_cpu(resp->vf_reservation_strategy); 7386 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) 7387 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL; 7388 } 7389 hwrm_func_resc_qcaps_exit: 7390 mutex_unlock(&bp->hwrm_cmd_lock); 7391 return rc; 7392 } 7393 7394 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) 7395 { 7396 int rc = 0; 7397 struct hwrm_func_qcaps_input req = {0}; 7398 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 7399 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 7400 u32 flags, flags_ext; 7401 7402 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); 7403 req.fid = cpu_to_le16(0xffff); 7404 7405 mutex_lock(&bp->hwrm_cmd_lock); 7406 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7407 if (rc) 7408 goto hwrm_func_qcaps_exit; 7409 7410 flags = le32_to_cpu(resp->flags); 7411 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED) 7412 bp->flags |= BNXT_FLAG_ROCEV1_CAP; 7413 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED) 7414 bp->flags |= BNXT_FLAG_ROCEV2_CAP; 7415 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED) 7416 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED; 7417 if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE) 7418 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET; 7419 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED) 7420 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED; 7421 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE) 7422 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY; 7423 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD) 7424 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD; 7425 if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED)) 7426 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT; 7427 7428 flags_ext = le32_to_cpu(resp->flags_ext); 7429 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED) 7430 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED; 7431 7432 bp->tx_push_thresh = 0; 7433 if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) && 7434 BNXT_FW_MAJ(bp) > 217) 7435 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH; 7436 7437 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 7438 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 7439 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 7440 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings); 7441 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps); 7442 if (!hw_resc->max_hw_ring_grps) 7443 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings; 7444 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); 7445 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics); 7446 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 7447 7448 if (BNXT_PF(bp)) { 7449 struct bnxt_pf_info *pf = &bp->pf; 7450 7451 pf->fw_fid = le16_to_cpu(resp->fid); 7452 pf->port_id = le16_to_cpu(resp->port_id); 7453 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); 7454 pf->first_vf_id = le16_to_cpu(resp->first_vf_id); 7455 pf->max_vfs = le16_to_cpu(resp->max_vfs); 7456 pf->max_encap_records = le32_to_cpu(resp->max_encap_records); 7457 pf->max_decap_records = le32_to_cpu(resp->max_decap_records); 7458 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows); 7459 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows); 7460 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows); 7461 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows); 7462 bp->flags &= ~BNXT_FLAG_WOL_CAP; 7463 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED) 7464 bp->flags |= BNXT_FLAG_WOL_CAP; 7465 } else { 7466 #ifdef CONFIG_BNXT_SRIOV 7467 struct bnxt_vf_info *vf = &bp->vf; 7468 7469 vf->fw_fid = le16_to_cpu(resp->fid); 7470 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN); 7471 #endif 7472 } 7473 7474 hwrm_func_qcaps_exit: 7475 mutex_unlock(&bp->hwrm_cmd_lock); 7476 return rc; 7477 } 7478 7479 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp); 7480 7481 static int bnxt_hwrm_func_qcaps(struct bnxt *bp) 7482 { 7483 int rc; 7484 7485 rc = __bnxt_hwrm_func_qcaps(bp); 7486 if (rc) 7487 return rc; 7488 rc = bnxt_hwrm_queue_qportcfg(bp); 7489 if (rc) { 7490 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc); 7491 return rc; 7492 } 7493 if (bp->hwrm_spec_code >= 0x10803) { 7494 rc = bnxt_alloc_ctx_mem(bp); 7495 if (rc) 7496 return rc; 7497 rc = bnxt_hwrm_func_resc_qcaps(bp, true); 7498 if (!rc) 7499 bp->fw_cap |= BNXT_FW_CAP_NEW_RM; 7500 } 7501 return 0; 7502 } 7503 7504 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp) 7505 { 7506 struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0}; 7507 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp; 7508 int rc = 0; 7509 u32 flags; 7510 7511 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW)) 7512 return 0; 7513 7514 resp = bp->hwrm_cmd_resp_addr; 7515 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, -1, -1); 7516 7517 mutex_lock(&bp->hwrm_cmd_lock); 7518 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7519 if (rc) 7520 goto hwrm_cfa_adv_qcaps_exit; 7521 7522 flags = le32_to_cpu(resp->flags); 7523 if (flags & 7524 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED) 7525 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2; 7526 7527 hwrm_cfa_adv_qcaps_exit: 7528 mutex_unlock(&bp->hwrm_cmd_lock); 7529 return rc; 7530 } 7531 7532 static int __bnxt_alloc_fw_health(struct bnxt *bp) 7533 { 7534 if (bp->fw_health) 7535 return 0; 7536 7537 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL); 7538 if (!bp->fw_health) 7539 return -ENOMEM; 7540 7541 return 0; 7542 } 7543 7544 static int bnxt_alloc_fw_health(struct bnxt *bp) 7545 { 7546 int rc; 7547 7548 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) && 7549 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 7550 return 0; 7551 7552 rc = __bnxt_alloc_fw_health(bp); 7553 if (rc) { 7554 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET; 7555 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 7556 return rc; 7557 } 7558 7559 return 0; 7560 } 7561 7562 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg) 7563 { 7564 writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 + 7565 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 7566 BNXT_FW_HEALTH_WIN_MAP_OFF); 7567 } 7568 7569 bool bnxt_is_fw_healthy(struct bnxt *bp) 7570 { 7571 if (bp->fw_health && bp->fw_health->status_reliable) { 7572 u32 fw_status; 7573 7574 fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 7575 if (fw_status && !BNXT_FW_IS_HEALTHY(fw_status)) 7576 return false; 7577 } 7578 7579 return true; 7580 } 7581 7582 static void bnxt_inv_fw_health_reg(struct bnxt *bp) 7583 { 7584 struct bnxt_fw_health *fw_health = bp->fw_health; 7585 u32 reg_type; 7586 7587 if (!fw_health || !fw_health->status_reliable) 7588 return; 7589 7590 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]); 7591 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) 7592 fw_health->status_reliable = false; 7593 } 7594 7595 static void bnxt_try_map_fw_health_reg(struct bnxt *bp) 7596 { 7597 void __iomem *hs; 7598 u32 status_loc; 7599 u32 reg_type; 7600 u32 sig; 7601 7602 if (bp->fw_health) 7603 bp->fw_health->status_reliable = false; 7604 7605 __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC); 7606 hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC); 7607 7608 sig = readl(hs + offsetof(struct hcomm_status, sig_ver)); 7609 if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) { 7610 if (!bp->chip_num) { 7611 __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE); 7612 bp->chip_num = readl(bp->bar0 + 7613 BNXT_FW_HEALTH_WIN_BASE + 7614 BNXT_GRC_REG_CHIP_NUM); 7615 } 7616 if (!BNXT_CHIP_P5(bp)) 7617 return; 7618 7619 status_loc = BNXT_GRC_REG_STATUS_P5 | 7620 BNXT_FW_HEALTH_REG_TYPE_BAR0; 7621 } else { 7622 status_loc = readl(hs + offsetof(struct hcomm_status, 7623 fw_status_loc)); 7624 } 7625 7626 if (__bnxt_alloc_fw_health(bp)) { 7627 netdev_warn(bp->dev, "no memory for firmware status checks\n"); 7628 return; 7629 } 7630 7631 bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc; 7632 reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc); 7633 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) { 7634 __bnxt_map_fw_health_reg(bp, status_loc); 7635 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] = 7636 BNXT_FW_HEALTH_WIN_OFF(status_loc); 7637 } 7638 7639 bp->fw_health->status_reliable = true; 7640 } 7641 7642 static int bnxt_map_fw_health_regs(struct bnxt *bp) 7643 { 7644 struct bnxt_fw_health *fw_health = bp->fw_health; 7645 u32 reg_base = 0xffffffff; 7646 int i; 7647 7648 bp->fw_health->status_reliable = false; 7649 /* Only pre-map the monitoring GRC registers using window 3 */ 7650 for (i = 0; i < 4; i++) { 7651 u32 reg = fw_health->regs[i]; 7652 7653 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC) 7654 continue; 7655 if (reg_base == 0xffffffff) 7656 reg_base = reg & BNXT_GRC_BASE_MASK; 7657 if ((reg & BNXT_GRC_BASE_MASK) != reg_base) 7658 return -ERANGE; 7659 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg); 7660 } 7661 bp->fw_health->status_reliable = true; 7662 if (reg_base == 0xffffffff) 7663 return 0; 7664 7665 __bnxt_map_fw_health_reg(bp, reg_base); 7666 return 0; 7667 } 7668 7669 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) 7670 { 7671 struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 7672 struct bnxt_fw_health *fw_health = bp->fw_health; 7673 struct hwrm_error_recovery_qcfg_input req = {0}; 7674 int rc, i; 7675 7676 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 7677 return 0; 7678 7679 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG, -1, -1); 7680 mutex_lock(&bp->hwrm_cmd_lock); 7681 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7682 if (rc) 7683 goto err_recovery_out; 7684 fw_health->flags = le32_to_cpu(resp->flags); 7685 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) && 7686 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) { 7687 rc = -EINVAL; 7688 goto err_recovery_out; 7689 } 7690 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq); 7691 fw_health->master_func_wait_dsecs = 7692 le32_to_cpu(resp->master_func_wait_period); 7693 fw_health->normal_func_wait_dsecs = 7694 le32_to_cpu(resp->normal_func_wait_period); 7695 fw_health->post_reset_wait_dsecs = 7696 le32_to_cpu(resp->master_func_wait_period_after_reset); 7697 fw_health->post_reset_max_wait_dsecs = 7698 le32_to_cpu(resp->max_bailout_time_after_reset); 7699 fw_health->regs[BNXT_FW_HEALTH_REG] = 7700 le32_to_cpu(resp->fw_health_status_reg); 7701 fw_health->regs[BNXT_FW_HEARTBEAT_REG] = 7702 le32_to_cpu(resp->fw_heartbeat_reg); 7703 fw_health->regs[BNXT_FW_RESET_CNT_REG] = 7704 le32_to_cpu(resp->fw_reset_cnt_reg); 7705 fw_health->regs[BNXT_FW_RESET_INPROG_REG] = 7706 le32_to_cpu(resp->reset_inprogress_reg); 7707 fw_health->fw_reset_inprog_reg_mask = 7708 le32_to_cpu(resp->reset_inprogress_reg_mask); 7709 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt; 7710 if (fw_health->fw_reset_seq_cnt >= 16) { 7711 rc = -EINVAL; 7712 goto err_recovery_out; 7713 } 7714 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) { 7715 fw_health->fw_reset_seq_regs[i] = 7716 le32_to_cpu(resp->reset_reg[i]); 7717 fw_health->fw_reset_seq_vals[i] = 7718 le32_to_cpu(resp->reset_reg_val[i]); 7719 fw_health->fw_reset_seq_delay_msec[i] = 7720 resp->delay_after_reset[i]; 7721 } 7722 err_recovery_out: 7723 mutex_unlock(&bp->hwrm_cmd_lock); 7724 if (!rc) 7725 rc = bnxt_map_fw_health_regs(bp); 7726 if (rc) 7727 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 7728 return rc; 7729 } 7730 7731 static int bnxt_hwrm_func_reset(struct bnxt *bp) 7732 { 7733 struct hwrm_func_reset_input req = {0}; 7734 7735 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1); 7736 req.enables = 0; 7737 7738 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT); 7739 } 7740 7741 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp) 7742 { 7743 struct hwrm_nvm_get_dev_info_output nvm_info; 7744 7745 if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info)) 7746 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d", 7747 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min, 7748 nvm_info.nvm_cfg_ver_upd); 7749 } 7750 7751 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) 7752 { 7753 int rc = 0; 7754 struct hwrm_queue_qportcfg_input req = {0}; 7755 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr; 7756 u8 i, j, *qptr; 7757 bool no_rdma; 7758 7759 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1); 7760 7761 mutex_lock(&bp->hwrm_cmd_lock); 7762 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7763 if (rc) 7764 goto qportcfg_exit; 7765 7766 if (!resp->max_configurable_queues) { 7767 rc = -EINVAL; 7768 goto qportcfg_exit; 7769 } 7770 bp->max_tc = resp->max_configurable_queues; 7771 bp->max_lltc = resp->max_configurable_lossless_queues; 7772 if (bp->max_tc > BNXT_MAX_QUEUE) 7773 bp->max_tc = BNXT_MAX_QUEUE; 7774 7775 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP); 7776 qptr = &resp->queue_id0; 7777 for (i = 0, j = 0; i < bp->max_tc; i++) { 7778 bp->q_info[j].queue_id = *qptr; 7779 bp->q_ids[i] = *qptr++; 7780 bp->q_info[j].queue_profile = *qptr++; 7781 bp->tc_to_qidx[j] = j; 7782 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) || 7783 (no_rdma && BNXT_PF(bp))) 7784 j++; 7785 } 7786 bp->max_q = bp->max_tc; 7787 bp->max_tc = max_t(u8, j, 1); 7788 7789 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG) 7790 bp->max_tc = 1; 7791 7792 if (bp->max_lltc > bp->max_tc) 7793 bp->max_lltc = bp->max_tc; 7794 7795 qportcfg_exit: 7796 mutex_unlock(&bp->hwrm_cmd_lock); 7797 return rc; 7798 } 7799 7800 static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent) 7801 { 7802 struct hwrm_ver_get_input req = {0}; 7803 int rc; 7804 7805 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1); 7806 req.hwrm_intf_maj = HWRM_VERSION_MAJOR; 7807 req.hwrm_intf_min = HWRM_VERSION_MINOR; 7808 req.hwrm_intf_upd = HWRM_VERSION_UPDATE; 7809 7810 rc = bnxt_hwrm_do_send_msg(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT, 7811 silent); 7812 return rc; 7813 } 7814 7815 static int bnxt_hwrm_ver_get(struct bnxt *bp) 7816 { 7817 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr; 7818 u16 fw_maj, fw_min, fw_bld, fw_rsv; 7819 u32 dev_caps_cfg, hwrm_ver; 7820 int rc, len; 7821 7822 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; 7823 mutex_lock(&bp->hwrm_cmd_lock); 7824 rc = __bnxt_hwrm_ver_get(bp, false); 7825 if (rc) 7826 goto hwrm_ver_get_exit; 7827 7828 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output)); 7829 7830 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 | 7831 resp->hwrm_intf_min_8b << 8 | 7832 resp->hwrm_intf_upd_8b; 7833 if (resp->hwrm_intf_maj_8b < 1) { 7834 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n", 7835 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, 7836 resp->hwrm_intf_upd_8b); 7837 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n"); 7838 } 7839 7840 hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 | 7841 HWRM_VERSION_UPDATE; 7842 7843 if (bp->hwrm_spec_code > hwrm_ver) 7844 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d", 7845 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, 7846 HWRM_VERSION_UPDATE); 7847 else 7848 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d", 7849 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, 7850 resp->hwrm_intf_upd_8b); 7851 7852 fw_maj = le16_to_cpu(resp->hwrm_fw_major); 7853 if (bp->hwrm_spec_code > 0x10803 && fw_maj) { 7854 fw_min = le16_to_cpu(resp->hwrm_fw_minor); 7855 fw_bld = le16_to_cpu(resp->hwrm_fw_build); 7856 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch); 7857 len = FW_VER_STR_LEN; 7858 } else { 7859 fw_maj = resp->hwrm_fw_maj_8b; 7860 fw_min = resp->hwrm_fw_min_8b; 7861 fw_bld = resp->hwrm_fw_bld_8b; 7862 fw_rsv = resp->hwrm_fw_rsvd_8b; 7863 len = BC_HWRM_STR_LEN; 7864 } 7865 bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv); 7866 snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld, 7867 fw_rsv); 7868 7869 if (strlen(resp->active_pkg_name)) { 7870 int fw_ver_len = strlen(bp->fw_ver_str); 7871 7872 snprintf(bp->fw_ver_str + fw_ver_len, 7873 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s", 7874 resp->active_pkg_name); 7875 bp->fw_cap |= BNXT_FW_CAP_PKG_VER; 7876 } 7877 7878 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout); 7879 if (!bp->hwrm_cmd_timeout) 7880 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; 7881 7882 if (resp->hwrm_intf_maj_8b >= 1) { 7883 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len); 7884 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len); 7885 } 7886 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN) 7887 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN; 7888 7889 bp->chip_num = le16_to_cpu(resp->chip_num); 7890 bp->chip_rev = resp->chip_rev; 7891 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev && 7892 !resp->chip_metal) 7893 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0; 7894 7895 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg); 7896 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) && 7897 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) 7898 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD; 7899 7900 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) 7901 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL; 7902 7903 if (dev_caps_cfg & 7904 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED) 7905 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE; 7906 7907 if (dev_caps_cfg & 7908 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED) 7909 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF; 7910 7911 if (dev_caps_cfg & 7912 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) 7913 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW; 7914 7915 hwrm_ver_get_exit: 7916 mutex_unlock(&bp->hwrm_cmd_lock); 7917 return rc; 7918 } 7919 7920 int bnxt_hwrm_fw_set_time(struct bnxt *bp) 7921 { 7922 struct hwrm_fw_set_time_input req = {0}; 7923 struct tm tm; 7924 time64_t now = ktime_get_real_seconds(); 7925 7926 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) || 7927 bp->hwrm_spec_code < 0x10400) 7928 return -EOPNOTSUPP; 7929 7930 time64_to_tm(now, 0, &tm); 7931 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1); 7932 req.year = cpu_to_le16(1900 + tm.tm_year); 7933 req.month = 1 + tm.tm_mon; 7934 req.day = tm.tm_mday; 7935 req.hour = tm.tm_hour; 7936 req.minute = tm.tm_min; 7937 req.second = tm.tm_sec; 7938 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7939 } 7940 7941 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask) 7942 { 7943 u64 sw_tmp; 7944 7945 hw &= mask; 7946 sw_tmp = (*sw & ~mask) | hw; 7947 if (hw < (*sw & mask)) 7948 sw_tmp += mask + 1; 7949 WRITE_ONCE(*sw, sw_tmp); 7950 } 7951 7952 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks, 7953 int count, bool ignore_zero) 7954 { 7955 int i; 7956 7957 for (i = 0; i < count; i++) { 7958 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i])); 7959 7960 if (ignore_zero && !hw) 7961 continue; 7962 7963 if (masks[i] == -1ULL) 7964 sw_stats[i] = hw; 7965 else 7966 bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]); 7967 } 7968 } 7969 7970 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats) 7971 { 7972 if (!stats->hw_stats) 7973 return; 7974 7975 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats, 7976 stats->hw_masks, stats->len / 8, false); 7977 } 7978 7979 static void bnxt_accumulate_all_stats(struct bnxt *bp) 7980 { 7981 struct bnxt_stats_mem *ring0_stats; 7982 bool ignore_zero = false; 7983 int i; 7984 7985 /* Chip bug. Counter intermittently becomes 0. */ 7986 if (bp->flags & BNXT_FLAG_CHIP_P5) 7987 ignore_zero = true; 7988 7989 for (i = 0; i < bp->cp_nr_rings; i++) { 7990 struct bnxt_napi *bnapi = bp->bnapi[i]; 7991 struct bnxt_cp_ring_info *cpr; 7992 struct bnxt_stats_mem *stats; 7993 7994 cpr = &bnapi->cp_ring; 7995 stats = &cpr->stats; 7996 if (!i) 7997 ring0_stats = stats; 7998 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats, 7999 ring0_stats->hw_masks, 8000 ring0_stats->len / 8, ignore_zero); 8001 } 8002 if (bp->flags & BNXT_FLAG_PORT_STATS) { 8003 struct bnxt_stats_mem *stats = &bp->port_stats; 8004 __le64 *hw_stats = stats->hw_stats; 8005 u64 *sw_stats = stats->sw_stats; 8006 u64 *masks = stats->hw_masks; 8007 int cnt; 8008 8009 cnt = sizeof(struct rx_port_stats) / 8; 8010 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false); 8011 8012 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 8013 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 8014 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 8015 cnt = sizeof(struct tx_port_stats) / 8; 8016 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false); 8017 } 8018 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 8019 bnxt_accumulate_stats(&bp->rx_port_stats_ext); 8020 bnxt_accumulate_stats(&bp->tx_port_stats_ext); 8021 } 8022 } 8023 8024 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags) 8025 { 8026 struct bnxt_pf_info *pf = &bp->pf; 8027 struct hwrm_port_qstats_input req = {0}; 8028 8029 if (!(bp->flags & BNXT_FLAG_PORT_STATS)) 8030 return 0; 8031 8032 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) 8033 return -EOPNOTSUPP; 8034 8035 req.flags = flags; 8036 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1); 8037 req.port_id = cpu_to_le16(pf->port_id); 8038 req.tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map + 8039 BNXT_TX_PORT_STATS_BYTE_OFFSET); 8040 req.rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map); 8041 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8042 } 8043 8044 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags) 8045 { 8046 struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr; 8047 struct hwrm_queue_pri2cos_qcfg_input req2 = {0}; 8048 struct hwrm_port_qstats_ext_input req = {0}; 8049 struct bnxt_pf_info *pf = &bp->pf; 8050 u32 tx_stat_size; 8051 int rc; 8052 8053 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) 8054 return 0; 8055 8056 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) 8057 return -EOPNOTSUPP; 8058 8059 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1); 8060 req.flags = flags; 8061 req.port_id = cpu_to_le16(pf->port_id); 8062 req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext)); 8063 req.rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map); 8064 tx_stat_size = bp->tx_port_stats_ext.hw_stats ? 8065 sizeof(struct tx_port_stats_ext) : 0; 8066 req.tx_stat_size = cpu_to_le16(tx_stat_size); 8067 req.tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map); 8068 mutex_lock(&bp->hwrm_cmd_lock); 8069 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8070 if (!rc) { 8071 bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8; 8072 bp->fw_tx_stats_ext_size = tx_stat_size ? 8073 le16_to_cpu(resp->tx_stat_size) / 8 : 0; 8074 } else { 8075 bp->fw_rx_stats_ext_size = 0; 8076 bp->fw_tx_stats_ext_size = 0; 8077 } 8078 if (flags) 8079 goto qstats_done; 8080 8081 if (bp->fw_tx_stats_ext_size <= 8082 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) { 8083 mutex_unlock(&bp->hwrm_cmd_lock); 8084 bp->pri2cos_valid = 0; 8085 return rc; 8086 } 8087 8088 bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1); 8089 req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); 8090 8091 rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT); 8092 if (!rc) { 8093 struct hwrm_queue_pri2cos_qcfg_output *resp2; 8094 u8 *pri2cos; 8095 int i, j; 8096 8097 resp2 = bp->hwrm_cmd_resp_addr; 8098 pri2cos = &resp2->pri0_cos_queue_id; 8099 for (i = 0; i < 8; i++) { 8100 u8 queue_id = pri2cos[i]; 8101 u8 queue_idx; 8102 8103 /* Per port queue IDs start from 0, 10, 20, etc */ 8104 queue_idx = queue_id % 10; 8105 if (queue_idx > BNXT_MAX_QUEUE) { 8106 bp->pri2cos_valid = false; 8107 goto qstats_done; 8108 } 8109 for (j = 0; j < bp->max_q; j++) { 8110 if (bp->q_ids[j] == queue_id) 8111 bp->pri2cos_idx[i] = queue_idx; 8112 } 8113 } 8114 bp->pri2cos_valid = 1; 8115 } 8116 qstats_done: 8117 mutex_unlock(&bp->hwrm_cmd_lock); 8118 return rc; 8119 } 8120 8121 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) 8122 { 8123 if (bp->vxlan_fw_dst_port_id != INVALID_HW_RING_ID) 8124 bnxt_hwrm_tunnel_dst_port_free( 8125 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 8126 if (bp->nge_fw_dst_port_id != INVALID_HW_RING_ID) 8127 bnxt_hwrm_tunnel_dst_port_free( 8128 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 8129 } 8130 8131 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa) 8132 { 8133 int rc, i; 8134 u32 tpa_flags = 0; 8135 8136 if (set_tpa) 8137 tpa_flags = bp->flags & BNXT_FLAG_TPA; 8138 else if (BNXT_NO_FW_ACCESS(bp)) 8139 return 0; 8140 for (i = 0; i < bp->nr_vnics; i++) { 8141 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags); 8142 if (rc) { 8143 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n", 8144 i, rc); 8145 return rc; 8146 } 8147 } 8148 return 0; 8149 } 8150 8151 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp) 8152 { 8153 int i; 8154 8155 for (i = 0; i < bp->nr_vnics; i++) 8156 bnxt_hwrm_vnic_set_rss(bp, i, false); 8157 } 8158 8159 static void bnxt_clear_vnic(struct bnxt *bp) 8160 { 8161 if (!bp->vnic_info) 8162 return; 8163 8164 bnxt_hwrm_clear_vnic_filter(bp); 8165 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) { 8166 /* clear all RSS setting before free vnic ctx */ 8167 bnxt_hwrm_clear_vnic_rss(bp); 8168 bnxt_hwrm_vnic_ctx_free(bp); 8169 } 8170 /* before free the vnic, undo the vnic tpa settings */ 8171 if (bp->flags & BNXT_FLAG_TPA) 8172 bnxt_set_tpa(bp, false); 8173 bnxt_hwrm_vnic_free(bp); 8174 if (bp->flags & BNXT_FLAG_CHIP_P5) 8175 bnxt_hwrm_vnic_ctx_free(bp); 8176 } 8177 8178 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, 8179 bool irq_re_init) 8180 { 8181 bnxt_clear_vnic(bp); 8182 bnxt_hwrm_ring_free(bp, close_path); 8183 bnxt_hwrm_ring_grp_free(bp); 8184 if (irq_re_init) { 8185 bnxt_hwrm_stat_ctx_free(bp); 8186 bnxt_hwrm_free_tunnel_ports(bp); 8187 } 8188 } 8189 8190 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode) 8191 { 8192 struct hwrm_func_cfg_input req = {0}; 8193 8194 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 8195 req.fid = cpu_to_le16(0xffff); 8196 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE); 8197 if (br_mode == BRIDGE_MODE_VEB) 8198 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB; 8199 else if (br_mode == BRIDGE_MODE_VEPA) 8200 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA; 8201 else 8202 return -EINVAL; 8203 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8204 } 8205 8206 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size) 8207 { 8208 struct hwrm_func_cfg_input req = {0}; 8209 8210 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803) 8211 return 0; 8212 8213 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 8214 req.fid = cpu_to_le16(0xffff); 8215 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE); 8216 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64; 8217 if (size == 128) 8218 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128; 8219 8220 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8221 } 8222 8223 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) 8224 { 8225 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 8226 int rc; 8227 8228 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) 8229 goto skip_rss_ctx; 8230 8231 /* allocate context for vnic */ 8232 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0); 8233 if (rc) { 8234 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 8235 vnic_id, rc); 8236 goto vnic_setup_err; 8237 } 8238 bp->rsscos_nr_ctxs++; 8239 8240 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 8241 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1); 8242 if (rc) { 8243 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n", 8244 vnic_id, rc); 8245 goto vnic_setup_err; 8246 } 8247 bp->rsscos_nr_ctxs++; 8248 } 8249 8250 skip_rss_ctx: 8251 /* configure default vnic, ring grp */ 8252 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); 8253 if (rc) { 8254 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", 8255 vnic_id, rc); 8256 goto vnic_setup_err; 8257 } 8258 8259 /* Enable RSS hashing on vnic */ 8260 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true); 8261 if (rc) { 8262 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n", 8263 vnic_id, rc); 8264 goto vnic_setup_err; 8265 } 8266 8267 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 8268 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id); 8269 if (rc) { 8270 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", 8271 vnic_id, rc); 8272 } 8273 } 8274 8275 vnic_setup_err: 8276 return rc; 8277 } 8278 8279 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id) 8280 { 8281 int rc, i, nr_ctxs; 8282 8283 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); 8284 for (i = 0; i < nr_ctxs; i++) { 8285 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i); 8286 if (rc) { 8287 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n", 8288 vnic_id, i, rc); 8289 break; 8290 } 8291 bp->rsscos_nr_ctxs++; 8292 } 8293 if (i < nr_ctxs) 8294 return -ENOMEM; 8295 8296 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true); 8297 if (rc) { 8298 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n", 8299 vnic_id, rc); 8300 return rc; 8301 } 8302 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); 8303 if (rc) { 8304 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", 8305 vnic_id, rc); 8306 return rc; 8307 } 8308 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 8309 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id); 8310 if (rc) { 8311 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", 8312 vnic_id, rc); 8313 } 8314 } 8315 return rc; 8316 } 8317 8318 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) 8319 { 8320 if (bp->flags & BNXT_FLAG_CHIP_P5) 8321 return __bnxt_setup_vnic_p5(bp, vnic_id); 8322 else 8323 return __bnxt_setup_vnic(bp, vnic_id); 8324 } 8325 8326 static int bnxt_alloc_rfs_vnics(struct bnxt *bp) 8327 { 8328 #ifdef CONFIG_RFS_ACCEL 8329 int i, rc = 0; 8330 8331 if (bp->flags & BNXT_FLAG_CHIP_P5) 8332 return 0; 8333 8334 for (i = 0; i < bp->rx_nr_rings; i++) { 8335 struct bnxt_vnic_info *vnic; 8336 u16 vnic_id = i + 1; 8337 u16 ring_id = i; 8338 8339 if (vnic_id >= bp->nr_vnics) 8340 break; 8341 8342 vnic = &bp->vnic_info[vnic_id]; 8343 vnic->flags |= BNXT_VNIC_RFS_FLAG; 8344 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) 8345 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG; 8346 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1); 8347 if (rc) { 8348 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 8349 vnic_id, rc); 8350 break; 8351 } 8352 rc = bnxt_setup_vnic(bp, vnic_id); 8353 if (rc) 8354 break; 8355 } 8356 return rc; 8357 #else 8358 return 0; 8359 #endif 8360 } 8361 8362 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */ 8363 static bool bnxt_promisc_ok(struct bnxt *bp) 8364 { 8365 #ifdef CONFIG_BNXT_SRIOV 8366 if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf)) 8367 return false; 8368 #endif 8369 return true; 8370 } 8371 8372 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp) 8373 { 8374 unsigned int rc = 0; 8375 8376 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1); 8377 if (rc) { 8378 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", 8379 rc); 8380 return rc; 8381 } 8382 8383 rc = bnxt_hwrm_vnic_cfg(bp, 1); 8384 if (rc) { 8385 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", 8386 rc); 8387 return rc; 8388 } 8389 return rc; 8390 } 8391 8392 static int bnxt_cfg_rx_mode(struct bnxt *); 8393 static bool bnxt_mc_list_updated(struct bnxt *, u32 *); 8394 8395 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) 8396 { 8397 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 8398 int rc = 0; 8399 unsigned int rx_nr_rings = bp->rx_nr_rings; 8400 8401 if (irq_re_init) { 8402 rc = bnxt_hwrm_stat_ctx_alloc(bp); 8403 if (rc) { 8404 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n", 8405 rc); 8406 goto err_out; 8407 } 8408 } 8409 8410 rc = bnxt_hwrm_ring_alloc(bp); 8411 if (rc) { 8412 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc); 8413 goto err_out; 8414 } 8415 8416 rc = bnxt_hwrm_ring_grp_alloc(bp); 8417 if (rc) { 8418 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc); 8419 goto err_out; 8420 } 8421 8422 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 8423 rx_nr_rings--; 8424 8425 /* default vnic 0 */ 8426 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings); 8427 if (rc) { 8428 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc); 8429 goto err_out; 8430 } 8431 8432 rc = bnxt_setup_vnic(bp, 0); 8433 if (rc) 8434 goto err_out; 8435 8436 if (bp->flags & BNXT_FLAG_RFS) { 8437 rc = bnxt_alloc_rfs_vnics(bp); 8438 if (rc) 8439 goto err_out; 8440 } 8441 8442 if (bp->flags & BNXT_FLAG_TPA) { 8443 rc = bnxt_set_tpa(bp, true); 8444 if (rc) 8445 goto err_out; 8446 } 8447 8448 if (BNXT_VF(bp)) 8449 bnxt_update_vf_mac(bp); 8450 8451 /* Filter for default vnic 0 */ 8452 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr); 8453 if (rc) { 8454 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); 8455 goto err_out; 8456 } 8457 vnic->uc_filter_count = 1; 8458 8459 vnic->rx_mask = 0; 8460 if (bp->dev->flags & IFF_BROADCAST) 8461 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 8462 8463 if (bp->dev->flags & IFF_PROMISC) 8464 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 8465 8466 if (bp->dev->flags & IFF_ALLMULTI) { 8467 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 8468 vnic->mc_list_count = 0; 8469 } else { 8470 u32 mask = 0; 8471 8472 bnxt_mc_list_updated(bp, &mask); 8473 vnic->rx_mask |= mask; 8474 } 8475 8476 rc = bnxt_cfg_rx_mode(bp); 8477 if (rc) 8478 goto err_out; 8479 8480 rc = bnxt_hwrm_set_coal(bp); 8481 if (rc) 8482 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n", 8483 rc); 8484 8485 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 8486 rc = bnxt_setup_nitroa0_vnic(bp); 8487 if (rc) 8488 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n", 8489 rc); 8490 } 8491 8492 if (BNXT_VF(bp)) { 8493 bnxt_hwrm_func_qcfg(bp); 8494 netdev_update_features(bp->dev); 8495 } 8496 8497 return 0; 8498 8499 err_out: 8500 bnxt_hwrm_resource_free(bp, 0, true); 8501 8502 return rc; 8503 } 8504 8505 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init) 8506 { 8507 bnxt_hwrm_resource_free(bp, 1, irq_re_init); 8508 return 0; 8509 } 8510 8511 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init) 8512 { 8513 bnxt_init_cp_rings(bp); 8514 bnxt_init_rx_rings(bp); 8515 bnxt_init_tx_rings(bp); 8516 bnxt_init_ring_grps(bp, irq_re_init); 8517 bnxt_init_vnics(bp); 8518 8519 return bnxt_init_chip(bp, irq_re_init); 8520 } 8521 8522 static int bnxt_set_real_num_queues(struct bnxt *bp) 8523 { 8524 int rc; 8525 struct net_device *dev = bp->dev; 8526 8527 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings - 8528 bp->tx_nr_rings_xdp); 8529 if (rc) 8530 return rc; 8531 8532 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings); 8533 if (rc) 8534 return rc; 8535 8536 #ifdef CONFIG_RFS_ACCEL 8537 if (bp->flags & BNXT_FLAG_RFS) 8538 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings); 8539 #endif 8540 8541 return rc; 8542 } 8543 8544 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 8545 bool shared) 8546 { 8547 int _rx = *rx, _tx = *tx; 8548 8549 if (shared) { 8550 *rx = min_t(int, _rx, max); 8551 *tx = min_t(int, _tx, max); 8552 } else { 8553 if (max < 2) 8554 return -ENOMEM; 8555 8556 while (_rx + _tx > max) { 8557 if (_rx > _tx && _rx > 1) 8558 _rx--; 8559 else if (_tx > 1) 8560 _tx--; 8561 } 8562 *rx = _rx; 8563 *tx = _tx; 8564 } 8565 return 0; 8566 } 8567 8568 static void bnxt_setup_msix(struct bnxt *bp) 8569 { 8570 const int len = sizeof(bp->irq_tbl[0].name); 8571 struct net_device *dev = bp->dev; 8572 int tcs, i; 8573 8574 tcs = netdev_get_num_tc(dev); 8575 if (tcs) { 8576 int i, off, count; 8577 8578 for (i = 0; i < tcs; i++) { 8579 count = bp->tx_nr_rings_per_tc; 8580 off = i * count; 8581 netdev_set_tc_queue(dev, i, count, off); 8582 } 8583 } 8584 8585 for (i = 0; i < bp->cp_nr_rings; i++) { 8586 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 8587 char *attr; 8588 8589 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 8590 attr = "TxRx"; 8591 else if (i < bp->rx_nr_rings) 8592 attr = "rx"; 8593 else 8594 attr = "tx"; 8595 8596 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name, 8597 attr, i); 8598 bp->irq_tbl[map_idx].handler = bnxt_msix; 8599 } 8600 } 8601 8602 static void bnxt_setup_inta(struct bnxt *bp) 8603 { 8604 const int len = sizeof(bp->irq_tbl[0].name); 8605 8606 if (netdev_get_num_tc(bp->dev)) 8607 netdev_reset_tc(bp->dev); 8608 8609 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx", 8610 0); 8611 bp->irq_tbl[0].handler = bnxt_inta; 8612 } 8613 8614 static int bnxt_init_int_mode(struct bnxt *bp); 8615 8616 static int bnxt_setup_int_mode(struct bnxt *bp) 8617 { 8618 int rc; 8619 8620 if (!bp->irq_tbl) { 8621 rc = bnxt_init_int_mode(bp); 8622 if (rc || !bp->irq_tbl) 8623 return rc ?: -ENODEV; 8624 } 8625 8626 if (bp->flags & BNXT_FLAG_USING_MSIX) 8627 bnxt_setup_msix(bp); 8628 else 8629 bnxt_setup_inta(bp); 8630 8631 rc = bnxt_set_real_num_queues(bp); 8632 return rc; 8633 } 8634 8635 #ifdef CONFIG_RFS_ACCEL 8636 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp) 8637 { 8638 return bp->hw_resc.max_rsscos_ctxs; 8639 } 8640 8641 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp) 8642 { 8643 return bp->hw_resc.max_vnics; 8644 } 8645 #endif 8646 8647 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp) 8648 { 8649 return bp->hw_resc.max_stat_ctxs; 8650 } 8651 8652 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp) 8653 { 8654 return bp->hw_resc.max_cp_rings; 8655 } 8656 8657 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp) 8658 { 8659 unsigned int cp = bp->hw_resc.max_cp_rings; 8660 8661 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 8662 cp -= bnxt_get_ulp_msix_num(bp); 8663 8664 return cp; 8665 } 8666 8667 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) 8668 { 8669 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 8670 8671 if (bp->flags & BNXT_FLAG_CHIP_P5) 8672 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs); 8673 8674 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings); 8675 } 8676 8677 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs) 8678 { 8679 bp->hw_resc.max_irqs = max_irqs; 8680 } 8681 8682 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp) 8683 { 8684 unsigned int cp; 8685 8686 cp = bnxt_get_max_func_cp_rings_for_en(bp); 8687 if (bp->flags & BNXT_FLAG_CHIP_P5) 8688 return cp - bp->rx_nr_rings - bp->tx_nr_rings; 8689 else 8690 return cp - bp->cp_nr_rings; 8691 } 8692 8693 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp) 8694 { 8695 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp); 8696 } 8697 8698 int bnxt_get_avail_msix(struct bnxt *bp, int num) 8699 { 8700 int max_cp = bnxt_get_max_func_cp_rings(bp); 8701 int max_irq = bnxt_get_max_func_irqs(bp); 8702 int total_req = bp->cp_nr_rings + num; 8703 int max_idx, avail_msix; 8704 8705 max_idx = bp->total_irqs; 8706 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 8707 max_idx = min_t(int, bp->total_irqs, max_cp); 8708 avail_msix = max_idx - bp->cp_nr_rings; 8709 if (!BNXT_NEW_RM(bp) || avail_msix >= num) 8710 return avail_msix; 8711 8712 if (max_irq < total_req) { 8713 num = max_irq - bp->cp_nr_rings; 8714 if (num <= 0) 8715 return 0; 8716 } 8717 return num; 8718 } 8719 8720 static int bnxt_get_num_msix(struct bnxt *bp) 8721 { 8722 if (!BNXT_NEW_RM(bp)) 8723 return bnxt_get_max_func_irqs(bp); 8724 8725 return bnxt_nq_rings_in_use(bp); 8726 } 8727 8728 static int bnxt_init_msix(struct bnxt *bp) 8729 { 8730 int i, total_vecs, max, rc = 0, min = 1, ulp_msix; 8731 struct msix_entry *msix_ent; 8732 8733 total_vecs = bnxt_get_num_msix(bp); 8734 max = bnxt_get_max_func_irqs(bp); 8735 if (total_vecs > max) 8736 total_vecs = max; 8737 8738 if (!total_vecs) 8739 return 0; 8740 8741 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL); 8742 if (!msix_ent) 8743 return -ENOMEM; 8744 8745 for (i = 0; i < total_vecs; i++) { 8746 msix_ent[i].entry = i; 8747 msix_ent[i].vector = 0; 8748 } 8749 8750 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) 8751 min = 2; 8752 8753 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs); 8754 ulp_msix = bnxt_get_ulp_msix_num(bp); 8755 if (total_vecs < 0 || total_vecs < ulp_msix) { 8756 rc = -ENODEV; 8757 goto msix_setup_exit; 8758 } 8759 8760 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL); 8761 if (bp->irq_tbl) { 8762 for (i = 0; i < total_vecs; i++) 8763 bp->irq_tbl[i].vector = msix_ent[i].vector; 8764 8765 bp->total_irqs = total_vecs; 8766 /* Trim rings based upon num of vectors allocated */ 8767 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings, 8768 total_vecs - ulp_msix, min == 1); 8769 if (rc) 8770 goto msix_setup_exit; 8771 8772 bp->cp_nr_rings = (min == 1) ? 8773 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : 8774 bp->tx_nr_rings + bp->rx_nr_rings; 8775 8776 } else { 8777 rc = -ENOMEM; 8778 goto msix_setup_exit; 8779 } 8780 bp->flags |= BNXT_FLAG_USING_MSIX; 8781 kfree(msix_ent); 8782 return 0; 8783 8784 msix_setup_exit: 8785 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc); 8786 kfree(bp->irq_tbl); 8787 bp->irq_tbl = NULL; 8788 pci_disable_msix(bp->pdev); 8789 kfree(msix_ent); 8790 return rc; 8791 } 8792 8793 static int bnxt_init_inta(struct bnxt *bp) 8794 { 8795 bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL); 8796 if (!bp->irq_tbl) 8797 return -ENOMEM; 8798 8799 bp->total_irqs = 1; 8800 bp->rx_nr_rings = 1; 8801 bp->tx_nr_rings = 1; 8802 bp->cp_nr_rings = 1; 8803 bp->flags |= BNXT_FLAG_SHARED_RINGS; 8804 bp->irq_tbl[0].vector = bp->pdev->irq; 8805 return 0; 8806 } 8807 8808 static int bnxt_init_int_mode(struct bnxt *bp) 8809 { 8810 int rc = -ENODEV; 8811 8812 if (bp->flags & BNXT_FLAG_MSIX_CAP) 8813 rc = bnxt_init_msix(bp); 8814 8815 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) { 8816 /* fallback to INTA */ 8817 rc = bnxt_init_inta(bp); 8818 } 8819 return rc; 8820 } 8821 8822 static void bnxt_clear_int_mode(struct bnxt *bp) 8823 { 8824 if (bp->flags & BNXT_FLAG_USING_MSIX) 8825 pci_disable_msix(bp->pdev); 8826 8827 kfree(bp->irq_tbl); 8828 bp->irq_tbl = NULL; 8829 bp->flags &= ~BNXT_FLAG_USING_MSIX; 8830 } 8831 8832 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init) 8833 { 8834 int tcs = netdev_get_num_tc(bp->dev); 8835 bool irq_cleared = false; 8836 int rc; 8837 8838 if (!bnxt_need_reserve_rings(bp)) 8839 return 0; 8840 8841 if (irq_re_init && BNXT_NEW_RM(bp) && 8842 bnxt_get_num_msix(bp) != bp->total_irqs) { 8843 bnxt_ulp_irq_stop(bp); 8844 bnxt_clear_int_mode(bp); 8845 irq_cleared = true; 8846 } 8847 rc = __bnxt_reserve_rings(bp); 8848 if (irq_cleared) { 8849 if (!rc) 8850 rc = bnxt_init_int_mode(bp); 8851 bnxt_ulp_irq_restart(bp, rc); 8852 } 8853 if (rc) { 8854 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc); 8855 return rc; 8856 } 8857 if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) { 8858 netdev_err(bp->dev, "tx ring reservation failure\n"); 8859 netdev_reset_tc(bp->dev); 8860 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 8861 return -ENOMEM; 8862 } 8863 return 0; 8864 } 8865 8866 static void bnxt_free_irq(struct bnxt *bp) 8867 { 8868 struct bnxt_irq *irq; 8869 int i; 8870 8871 #ifdef CONFIG_RFS_ACCEL 8872 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap); 8873 bp->dev->rx_cpu_rmap = NULL; 8874 #endif 8875 if (!bp->irq_tbl || !bp->bnapi) 8876 return; 8877 8878 for (i = 0; i < bp->cp_nr_rings; i++) { 8879 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 8880 8881 irq = &bp->irq_tbl[map_idx]; 8882 if (irq->requested) { 8883 if (irq->have_cpumask) { 8884 irq_set_affinity_hint(irq->vector, NULL); 8885 free_cpumask_var(irq->cpu_mask); 8886 irq->have_cpumask = 0; 8887 } 8888 free_irq(irq->vector, bp->bnapi[i]); 8889 } 8890 8891 irq->requested = 0; 8892 } 8893 } 8894 8895 static int bnxt_request_irq(struct bnxt *bp) 8896 { 8897 int i, j, rc = 0; 8898 unsigned long flags = 0; 8899 #ifdef CONFIG_RFS_ACCEL 8900 struct cpu_rmap *rmap; 8901 #endif 8902 8903 rc = bnxt_setup_int_mode(bp); 8904 if (rc) { 8905 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n", 8906 rc); 8907 return rc; 8908 } 8909 #ifdef CONFIG_RFS_ACCEL 8910 rmap = bp->dev->rx_cpu_rmap; 8911 #endif 8912 if (!(bp->flags & BNXT_FLAG_USING_MSIX)) 8913 flags = IRQF_SHARED; 8914 8915 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { 8916 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 8917 struct bnxt_irq *irq = &bp->irq_tbl[map_idx]; 8918 8919 #ifdef CONFIG_RFS_ACCEL 8920 if (rmap && bp->bnapi[i]->rx_ring) { 8921 rc = irq_cpu_rmap_add(rmap, irq->vector); 8922 if (rc) 8923 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n", 8924 j); 8925 j++; 8926 } 8927 #endif 8928 rc = request_irq(irq->vector, irq->handler, flags, irq->name, 8929 bp->bnapi[i]); 8930 if (rc) 8931 break; 8932 8933 irq->requested = 1; 8934 8935 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) { 8936 int numa_node = dev_to_node(&bp->pdev->dev); 8937 8938 irq->have_cpumask = 1; 8939 cpumask_set_cpu(cpumask_local_spread(i, numa_node), 8940 irq->cpu_mask); 8941 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask); 8942 if (rc) { 8943 netdev_warn(bp->dev, 8944 "Set affinity failed, IRQ = %d\n", 8945 irq->vector); 8946 break; 8947 } 8948 } 8949 } 8950 return rc; 8951 } 8952 8953 static void bnxt_del_napi(struct bnxt *bp) 8954 { 8955 int i; 8956 8957 if (!bp->bnapi) 8958 return; 8959 8960 for (i = 0; i < bp->cp_nr_rings; i++) { 8961 struct bnxt_napi *bnapi = bp->bnapi[i]; 8962 8963 __netif_napi_del(&bnapi->napi); 8964 } 8965 /* We called __netif_napi_del(), we need 8966 * to respect an RCU grace period before freeing napi structures. 8967 */ 8968 synchronize_net(); 8969 } 8970 8971 static void bnxt_init_napi(struct bnxt *bp) 8972 { 8973 int i; 8974 unsigned int cp_nr_rings = bp->cp_nr_rings; 8975 struct bnxt_napi *bnapi; 8976 8977 if (bp->flags & BNXT_FLAG_USING_MSIX) { 8978 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll; 8979 8980 if (bp->flags & BNXT_FLAG_CHIP_P5) 8981 poll_fn = bnxt_poll_p5; 8982 else if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 8983 cp_nr_rings--; 8984 for (i = 0; i < cp_nr_rings; i++) { 8985 bnapi = bp->bnapi[i]; 8986 netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64); 8987 } 8988 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 8989 bnapi = bp->bnapi[cp_nr_rings]; 8990 netif_napi_add(bp->dev, &bnapi->napi, 8991 bnxt_poll_nitroa0, 64); 8992 } 8993 } else { 8994 bnapi = bp->bnapi[0]; 8995 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64); 8996 } 8997 } 8998 8999 static void bnxt_disable_napi(struct bnxt *bp) 9000 { 9001 int i; 9002 9003 if (!bp->bnapi || 9004 test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state)) 9005 return; 9006 9007 for (i = 0; i < bp->cp_nr_rings; i++) { 9008 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; 9009 9010 if (bp->bnapi[i]->rx_ring) 9011 cancel_work_sync(&cpr->dim.work); 9012 9013 napi_disable(&bp->bnapi[i]->napi); 9014 } 9015 } 9016 9017 static void bnxt_enable_napi(struct bnxt *bp) 9018 { 9019 int i; 9020 9021 clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state); 9022 for (i = 0; i < bp->cp_nr_rings; i++) { 9023 struct bnxt_napi *bnapi = bp->bnapi[i]; 9024 struct bnxt_cp_ring_info *cpr; 9025 9026 cpr = &bnapi->cp_ring; 9027 if (bnapi->in_reset) 9028 cpr->sw_stats.rx.rx_resets++; 9029 bnapi->in_reset = false; 9030 9031 if (bnapi->rx_ring) { 9032 INIT_WORK(&cpr->dim.work, bnxt_dim_work); 9033 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 9034 } 9035 napi_enable(&bnapi->napi); 9036 } 9037 } 9038 9039 void bnxt_tx_disable(struct bnxt *bp) 9040 { 9041 int i; 9042 struct bnxt_tx_ring_info *txr; 9043 9044 if (bp->tx_ring) { 9045 for (i = 0; i < bp->tx_nr_rings; i++) { 9046 txr = &bp->tx_ring[i]; 9047 txr->dev_state = BNXT_DEV_STATE_CLOSING; 9048 } 9049 } 9050 /* Drop carrier first to prevent TX timeout */ 9051 netif_carrier_off(bp->dev); 9052 /* Stop all TX queues */ 9053 netif_tx_disable(bp->dev); 9054 } 9055 9056 void bnxt_tx_enable(struct bnxt *bp) 9057 { 9058 int i; 9059 struct bnxt_tx_ring_info *txr; 9060 9061 for (i = 0; i < bp->tx_nr_rings; i++) { 9062 txr = &bp->tx_ring[i]; 9063 txr->dev_state = 0; 9064 } 9065 netif_tx_wake_all_queues(bp->dev); 9066 if (bp->link_info.link_up) 9067 netif_carrier_on(bp->dev); 9068 } 9069 9070 static char *bnxt_report_fec(struct bnxt_link_info *link_info) 9071 { 9072 u8 active_fec = link_info->active_fec_sig_mode & 9073 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK; 9074 9075 switch (active_fec) { 9076 default: 9077 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE: 9078 return "None"; 9079 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE: 9080 return "Clause 74 BaseR"; 9081 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE: 9082 return "Clause 91 RS(528,514)"; 9083 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE: 9084 return "Clause 91 RS544_1XN"; 9085 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE: 9086 return "Clause 91 RS(544,514)"; 9087 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE: 9088 return "Clause 91 RS272_1XN"; 9089 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE: 9090 return "Clause 91 RS(272,257)"; 9091 } 9092 } 9093 9094 static void bnxt_report_link(struct bnxt *bp) 9095 { 9096 if (bp->link_info.link_up) { 9097 const char *signal = ""; 9098 const char *flow_ctrl; 9099 const char *duplex; 9100 u32 speed; 9101 u16 fec; 9102 9103 netif_carrier_on(bp->dev); 9104 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); 9105 if (speed == SPEED_UNKNOWN) { 9106 netdev_info(bp->dev, "NIC Link is Up, speed unknown\n"); 9107 return; 9108 } 9109 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL) 9110 duplex = "full"; 9111 else 9112 duplex = "half"; 9113 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH) 9114 flow_ctrl = "ON - receive & transmit"; 9115 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX) 9116 flow_ctrl = "ON - transmit"; 9117 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX) 9118 flow_ctrl = "ON - receive"; 9119 else 9120 flow_ctrl = "none"; 9121 if (bp->link_info.phy_qcfg_resp.option_flags & 9122 PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) { 9123 u8 sig_mode = bp->link_info.active_fec_sig_mode & 9124 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK; 9125 switch (sig_mode) { 9126 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ: 9127 signal = "(NRZ) "; 9128 break; 9129 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4: 9130 signal = "(PAM4) "; 9131 break; 9132 default: 9133 break; 9134 } 9135 } 9136 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n", 9137 speed, signal, duplex, flow_ctrl); 9138 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) 9139 netdev_info(bp->dev, "EEE is %s\n", 9140 bp->eee.eee_active ? "active" : 9141 "not active"); 9142 fec = bp->link_info.fec_cfg; 9143 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED)) 9144 netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n", 9145 (fec & BNXT_FEC_AUTONEG) ? "on" : "off", 9146 bnxt_report_fec(&bp->link_info)); 9147 } else { 9148 netif_carrier_off(bp->dev); 9149 netdev_err(bp->dev, "NIC Link is Down\n"); 9150 } 9151 } 9152 9153 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp) 9154 { 9155 if (!resp->supported_speeds_auto_mode && 9156 !resp->supported_speeds_force_mode && 9157 !resp->supported_pam4_speeds_auto_mode && 9158 !resp->supported_pam4_speeds_force_mode) 9159 return true; 9160 return false; 9161 } 9162 9163 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) 9164 { 9165 int rc = 0; 9166 struct hwrm_port_phy_qcaps_input req = {0}; 9167 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 9168 struct bnxt_link_info *link_info = &bp->link_info; 9169 9170 if (bp->hwrm_spec_code < 0x10201) 9171 return 0; 9172 9173 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1); 9174 9175 mutex_lock(&bp->hwrm_cmd_lock); 9176 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 9177 if (rc) 9178 goto hwrm_phy_qcaps_exit; 9179 9180 bp->phy_flags = resp->flags; 9181 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) { 9182 struct ethtool_eee *eee = &bp->eee; 9183 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode); 9184 9185 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 9186 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) & 9187 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK; 9188 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) & 9189 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK; 9190 } 9191 9192 if (bp->hwrm_spec_code >= 0x10a01) { 9193 if (bnxt_phy_qcaps_no_speed(resp)) { 9194 link_info->phy_state = BNXT_PHY_STATE_DISABLED; 9195 netdev_warn(bp->dev, "Ethernet link disabled\n"); 9196 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) { 9197 link_info->phy_state = BNXT_PHY_STATE_ENABLED; 9198 netdev_info(bp->dev, "Ethernet link enabled\n"); 9199 /* Phy re-enabled, reprobe the speeds */ 9200 link_info->support_auto_speeds = 0; 9201 link_info->support_pam4_auto_speeds = 0; 9202 } 9203 } 9204 if (resp->supported_speeds_auto_mode) 9205 link_info->support_auto_speeds = 9206 le16_to_cpu(resp->supported_speeds_auto_mode); 9207 if (resp->supported_pam4_speeds_auto_mode) 9208 link_info->support_pam4_auto_speeds = 9209 le16_to_cpu(resp->supported_pam4_speeds_auto_mode); 9210 9211 bp->port_count = resp->port_cnt; 9212 9213 hwrm_phy_qcaps_exit: 9214 mutex_unlock(&bp->hwrm_cmd_lock); 9215 return rc; 9216 } 9217 9218 static bool bnxt_support_dropped(u16 advertising, u16 supported) 9219 { 9220 u16 diff = advertising ^ supported; 9221 9222 return ((supported | diff) != supported); 9223 } 9224 9225 int bnxt_update_link(struct bnxt *bp, bool chng_link_state) 9226 { 9227 int rc = 0; 9228 struct bnxt_link_info *link_info = &bp->link_info; 9229 struct hwrm_port_phy_qcfg_input req = {0}; 9230 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 9231 u8 link_up = link_info->link_up; 9232 bool support_changed = false; 9233 9234 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1); 9235 9236 mutex_lock(&bp->hwrm_cmd_lock); 9237 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 9238 if (rc) { 9239 mutex_unlock(&bp->hwrm_cmd_lock); 9240 return rc; 9241 } 9242 9243 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp)); 9244 link_info->phy_link_status = resp->link; 9245 link_info->duplex = resp->duplex_cfg; 9246 if (bp->hwrm_spec_code >= 0x10800) 9247 link_info->duplex = resp->duplex_state; 9248 link_info->pause = resp->pause; 9249 link_info->auto_mode = resp->auto_mode; 9250 link_info->auto_pause_setting = resp->auto_pause; 9251 link_info->lp_pause = resp->link_partner_adv_pause; 9252 link_info->force_pause_setting = resp->force_pause; 9253 link_info->duplex_setting = resp->duplex_cfg; 9254 if (link_info->phy_link_status == BNXT_LINK_LINK) 9255 link_info->link_speed = le16_to_cpu(resp->link_speed); 9256 else 9257 link_info->link_speed = 0; 9258 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed); 9259 link_info->force_pam4_link_speed = 9260 le16_to_cpu(resp->force_pam4_link_speed); 9261 link_info->support_speeds = le16_to_cpu(resp->support_speeds); 9262 link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds); 9263 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask); 9264 link_info->auto_pam4_link_speeds = 9265 le16_to_cpu(resp->auto_pam4_link_speed_mask); 9266 link_info->lp_auto_link_speeds = 9267 le16_to_cpu(resp->link_partner_adv_speeds); 9268 link_info->lp_auto_pam4_link_speeds = 9269 resp->link_partner_pam4_adv_speeds; 9270 link_info->preemphasis = le32_to_cpu(resp->preemphasis); 9271 link_info->phy_ver[0] = resp->phy_maj; 9272 link_info->phy_ver[1] = resp->phy_min; 9273 link_info->phy_ver[2] = resp->phy_bld; 9274 link_info->media_type = resp->media_type; 9275 link_info->phy_type = resp->phy_type; 9276 link_info->transceiver = resp->xcvr_pkg_type; 9277 link_info->phy_addr = resp->eee_config_phy_addr & 9278 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK; 9279 link_info->module_status = resp->module_status; 9280 9281 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) { 9282 struct ethtool_eee *eee = &bp->eee; 9283 u16 fw_speeds; 9284 9285 eee->eee_active = 0; 9286 if (resp->eee_config_phy_addr & 9287 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) { 9288 eee->eee_active = 1; 9289 fw_speeds = le16_to_cpu( 9290 resp->link_partner_adv_eee_link_speed_mask); 9291 eee->lp_advertised = 9292 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 9293 } 9294 9295 /* Pull initial EEE config */ 9296 if (!chng_link_state) { 9297 if (resp->eee_config_phy_addr & 9298 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED) 9299 eee->eee_enabled = 1; 9300 9301 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask); 9302 eee->advertised = 9303 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 9304 9305 if (resp->eee_config_phy_addr & 9306 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) { 9307 __le32 tmr; 9308 9309 eee->tx_lpi_enabled = 1; 9310 tmr = resp->xcvr_identifier_type_tx_lpi_timer; 9311 eee->tx_lpi_timer = le32_to_cpu(tmr) & 9312 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK; 9313 } 9314 } 9315 } 9316 9317 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED; 9318 if (bp->hwrm_spec_code >= 0x10504) { 9319 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg); 9320 link_info->active_fec_sig_mode = resp->active_fec_signal_mode; 9321 } 9322 /* TODO: need to add more logic to report VF link */ 9323 if (chng_link_state) { 9324 if (link_info->phy_link_status == BNXT_LINK_LINK) 9325 link_info->link_up = 1; 9326 else 9327 link_info->link_up = 0; 9328 if (link_up != link_info->link_up) 9329 bnxt_report_link(bp); 9330 } else { 9331 /* alwasy link down if not require to update link state */ 9332 link_info->link_up = 0; 9333 } 9334 mutex_unlock(&bp->hwrm_cmd_lock); 9335 9336 if (!BNXT_PHY_CFG_ABLE(bp)) 9337 return 0; 9338 9339 /* Check if any advertised speeds are no longer supported. The caller 9340 * holds the link_lock mutex, so we can modify link_info settings. 9341 */ 9342 if (bnxt_support_dropped(link_info->advertising, 9343 link_info->support_auto_speeds)) { 9344 link_info->advertising = link_info->support_auto_speeds; 9345 support_changed = true; 9346 } 9347 if (bnxt_support_dropped(link_info->advertising_pam4, 9348 link_info->support_pam4_auto_speeds)) { 9349 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds; 9350 support_changed = true; 9351 } 9352 if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED)) 9353 bnxt_hwrm_set_link_setting(bp, true, false); 9354 return 0; 9355 } 9356 9357 static void bnxt_get_port_module_status(struct bnxt *bp) 9358 { 9359 struct bnxt_link_info *link_info = &bp->link_info; 9360 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp; 9361 u8 module_status; 9362 9363 if (bnxt_update_link(bp, true)) 9364 return; 9365 9366 module_status = link_info->module_status; 9367 switch (module_status) { 9368 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX: 9369 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN: 9370 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG: 9371 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n", 9372 bp->pf.port_id); 9373 if (bp->hwrm_spec_code >= 0x10201) { 9374 netdev_warn(bp->dev, "Module part number %s\n", 9375 resp->phy_vendor_partnumber); 9376 } 9377 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX) 9378 netdev_warn(bp->dev, "TX is disabled\n"); 9379 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN) 9380 netdev_warn(bp->dev, "SFP+ module is shutdown\n"); 9381 } 9382 } 9383 9384 static void 9385 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) 9386 { 9387 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) { 9388 if (bp->hwrm_spec_code >= 0x10201) 9389 req->auto_pause = 9390 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE; 9391 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 9392 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; 9393 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 9394 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX; 9395 req->enables |= 9396 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 9397 } else { 9398 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 9399 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX; 9400 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 9401 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX; 9402 req->enables |= 9403 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE); 9404 if (bp->hwrm_spec_code >= 0x10201) { 9405 req->auto_pause = req->force_pause; 9406 req->enables |= cpu_to_le32( 9407 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 9408 } 9409 } 9410 } 9411 9412 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) 9413 { 9414 if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) { 9415 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK; 9416 if (bp->link_info.advertising) { 9417 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK); 9418 req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising); 9419 } 9420 if (bp->link_info.advertising_pam4) { 9421 req->enables |= 9422 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK); 9423 req->auto_link_pam4_speed_mask = 9424 cpu_to_le16(bp->link_info.advertising_pam4); 9425 } 9426 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE); 9427 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG); 9428 } else { 9429 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE); 9430 if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) { 9431 req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed); 9432 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED); 9433 } else { 9434 req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed); 9435 } 9436 } 9437 9438 /* tell chimp that the setting takes effect immediately */ 9439 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); 9440 } 9441 9442 int bnxt_hwrm_set_pause(struct bnxt *bp) 9443 { 9444 struct hwrm_port_phy_cfg_input req = {0}; 9445 int rc; 9446 9447 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 9448 bnxt_hwrm_set_pause_common(bp, &req); 9449 9450 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) || 9451 bp->link_info.force_link_chng) 9452 bnxt_hwrm_set_link_common(bp, &req); 9453 9454 mutex_lock(&bp->hwrm_cmd_lock); 9455 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 9456 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) { 9457 /* since changing of pause setting doesn't trigger any link 9458 * change event, the driver needs to update the current pause 9459 * result upon successfully return of the phy_cfg command 9460 */ 9461 bp->link_info.pause = 9462 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl; 9463 bp->link_info.auto_pause_setting = 0; 9464 if (!bp->link_info.force_link_chng) 9465 bnxt_report_link(bp); 9466 } 9467 bp->link_info.force_link_chng = false; 9468 mutex_unlock(&bp->hwrm_cmd_lock); 9469 return rc; 9470 } 9471 9472 static void bnxt_hwrm_set_eee(struct bnxt *bp, 9473 struct hwrm_port_phy_cfg_input *req) 9474 { 9475 struct ethtool_eee *eee = &bp->eee; 9476 9477 if (eee->eee_enabled) { 9478 u16 eee_speeds; 9479 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE; 9480 9481 if (eee->tx_lpi_enabled) 9482 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE; 9483 else 9484 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE; 9485 9486 req->flags |= cpu_to_le32(flags); 9487 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised); 9488 req->eee_link_speed_mask = cpu_to_le16(eee_speeds); 9489 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer); 9490 } else { 9491 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE); 9492 } 9493 } 9494 9495 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee) 9496 { 9497 struct hwrm_port_phy_cfg_input req = {0}; 9498 9499 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 9500 if (set_pause) 9501 bnxt_hwrm_set_pause_common(bp, &req); 9502 9503 bnxt_hwrm_set_link_common(bp, &req); 9504 9505 if (set_eee) 9506 bnxt_hwrm_set_eee(bp, &req); 9507 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 9508 } 9509 9510 static int bnxt_hwrm_shutdown_link(struct bnxt *bp) 9511 { 9512 struct hwrm_port_phy_cfg_input req = {0}; 9513 9514 if (!BNXT_SINGLE_PF(bp)) 9515 return 0; 9516 9517 if (pci_num_vf(bp->pdev) && 9518 !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN)) 9519 return 0; 9520 9521 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 9522 req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN); 9523 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 9524 } 9525 9526 static int bnxt_fw_init_one(struct bnxt *bp); 9527 9528 static int bnxt_fw_reset_via_optee(struct bnxt *bp) 9529 { 9530 #ifdef CONFIG_TEE_BNXT_FW 9531 int rc = tee_bnxt_fw_load(); 9532 9533 if (rc) 9534 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc); 9535 9536 return rc; 9537 #else 9538 netdev_err(bp->dev, "OP-TEE not supported\n"); 9539 return -ENODEV; 9540 #endif 9541 } 9542 9543 static int bnxt_try_recover_fw(struct bnxt *bp) 9544 { 9545 if (bp->fw_health && bp->fw_health->status_reliable) { 9546 int retry = 0, rc; 9547 u32 sts; 9548 9549 mutex_lock(&bp->hwrm_cmd_lock); 9550 do { 9551 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 9552 rc = __bnxt_hwrm_ver_get(bp, true); 9553 if (!BNXT_FW_IS_BOOTING(sts) && 9554 !BNXT_FW_IS_RECOVERING(sts)) 9555 break; 9556 retry++; 9557 } while (rc == -EBUSY && retry < BNXT_FW_RETRY); 9558 mutex_unlock(&bp->hwrm_cmd_lock); 9559 9560 if (!BNXT_FW_IS_HEALTHY(sts)) { 9561 netdev_err(bp->dev, 9562 "Firmware not responding, status: 0x%x\n", 9563 sts); 9564 rc = -ENODEV; 9565 } 9566 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) { 9567 netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n"); 9568 return bnxt_fw_reset_via_optee(bp); 9569 } 9570 return rc; 9571 } 9572 9573 return -ENODEV; 9574 } 9575 9576 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) 9577 { 9578 struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr; 9579 struct hwrm_func_drv_if_change_input req = {0}; 9580 bool fw_reset = !bp->irq_tbl; 9581 bool resc_reinit = false; 9582 int rc, retry = 0; 9583 u32 flags = 0; 9584 9585 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE)) 9586 return 0; 9587 9588 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1); 9589 if (up) 9590 req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP); 9591 mutex_lock(&bp->hwrm_cmd_lock); 9592 while (retry < BNXT_FW_IF_RETRY) { 9593 rc = _hwrm_send_message(bp, &req, sizeof(req), 9594 HWRM_CMD_TIMEOUT); 9595 if (rc != -EAGAIN) 9596 break; 9597 9598 msleep(50); 9599 retry++; 9600 } 9601 if (!rc) 9602 flags = le32_to_cpu(resp->flags); 9603 mutex_unlock(&bp->hwrm_cmd_lock); 9604 9605 if (rc == -EAGAIN) 9606 return rc; 9607 if (rc && up) { 9608 rc = bnxt_try_recover_fw(bp); 9609 fw_reset = true; 9610 } 9611 if (rc) 9612 return rc; 9613 9614 if (!up) { 9615 bnxt_inv_fw_health_reg(bp); 9616 return 0; 9617 } 9618 9619 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE) 9620 resc_reinit = true; 9621 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE) 9622 fw_reset = true; 9623 else if (bp->fw_health && !bp->fw_health->status_reliable) 9624 bnxt_try_map_fw_health_reg(bp); 9625 9626 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) { 9627 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n"); 9628 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); 9629 return -ENODEV; 9630 } 9631 if (resc_reinit || fw_reset) { 9632 if (fw_reset) { 9633 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state); 9634 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 9635 bnxt_ulp_stop(bp); 9636 bnxt_free_ctx_mem(bp); 9637 kfree(bp->ctx); 9638 bp->ctx = NULL; 9639 bnxt_dcb_free(bp); 9640 rc = bnxt_fw_init_one(bp); 9641 if (rc) { 9642 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state); 9643 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); 9644 return rc; 9645 } 9646 bnxt_clear_int_mode(bp); 9647 rc = bnxt_init_int_mode(bp); 9648 if (rc) { 9649 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state); 9650 netdev_err(bp->dev, "init int mode failed\n"); 9651 return rc; 9652 } 9653 } 9654 if (BNXT_NEW_RM(bp)) { 9655 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 9656 9657 rc = bnxt_hwrm_func_resc_qcaps(bp, true); 9658 if (rc) 9659 netdev_err(bp->dev, "resc_qcaps failed\n"); 9660 9661 hw_resc->resv_cp_rings = 0; 9662 hw_resc->resv_stat_ctxs = 0; 9663 hw_resc->resv_irqs = 0; 9664 hw_resc->resv_tx_rings = 0; 9665 hw_resc->resv_rx_rings = 0; 9666 hw_resc->resv_hw_ring_grps = 0; 9667 hw_resc->resv_vnics = 0; 9668 if (!fw_reset) { 9669 bp->tx_nr_rings = 0; 9670 bp->rx_nr_rings = 0; 9671 } 9672 } 9673 } 9674 return rc; 9675 } 9676 9677 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) 9678 { 9679 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 9680 struct hwrm_port_led_qcaps_input req = {0}; 9681 struct bnxt_pf_info *pf = &bp->pf; 9682 int rc; 9683 9684 bp->num_leds = 0; 9685 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601) 9686 return 0; 9687 9688 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1); 9689 req.port_id = cpu_to_le16(pf->port_id); 9690 mutex_lock(&bp->hwrm_cmd_lock); 9691 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 9692 if (rc) { 9693 mutex_unlock(&bp->hwrm_cmd_lock); 9694 return rc; 9695 } 9696 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) { 9697 int i; 9698 9699 bp->num_leds = resp->num_leds; 9700 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) * 9701 bp->num_leds); 9702 for (i = 0; i < bp->num_leds; i++) { 9703 struct bnxt_led_info *led = &bp->leds[i]; 9704 __le16 caps = led->led_state_caps; 9705 9706 if (!led->led_group_id || 9707 !BNXT_LED_ALT_BLINK_CAP(caps)) { 9708 bp->num_leds = 0; 9709 break; 9710 } 9711 } 9712 } 9713 mutex_unlock(&bp->hwrm_cmd_lock); 9714 return 0; 9715 } 9716 9717 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp) 9718 { 9719 struct hwrm_wol_filter_alloc_input req = {0}; 9720 struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; 9721 int rc; 9722 9723 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1); 9724 req.port_id = cpu_to_le16(bp->pf.port_id); 9725 req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT; 9726 req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS); 9727 memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN); 9728 mutex_lock(&bp->hwrm_cmd_lock); 9729 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 9730 if (!rc) 9731 bp->wol_filter_id = resp->wol_filter_id; 9732 mutex_unlock(&bp->hwrm_cmd_lock); 9733 return rc; 9734 } 9735 9736 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp) 9737 { 9738 struct hwrm_wol_filter_free_input req = {0}; 9739 9740 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1); 9741 req.port_id = cpu_to_le16(bp->pf.port_id); 9742 req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID); 9743 req.wol_filter_id = bp->wol_filter_id; 9744 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 9745 } 9746 9747 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle) 9748 { 9749 struct hwrm_wol_filter_qcfg_input req = {0}; 9750 struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 9751 u16 next_handle = 0; 9752 int rc; 9753 9754 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1); 9755 req.port_id = cpu_to_le16(bp->pf.port_id); 9756 req.handle = cpu_to_le16(handle); 9757 mutex_lock(&bp->hwrm_cmd_lock); 9758 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 9759 if (!rc) { 9760 next_handle = le16_to_cpu(resp->next_handle); 9761 if (next_handle != 0) { 9762 if (resp->wol_type == 9763 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) { 9764 bp->wol = 1; 9765 bp->wol_filter_id = resp->wol_filter_id; 9766 } 9767 } 9768 } 9769 mutex_unlock(&bp->hwrm_cmd_lock); 9770 return next_handle; 9771 } 9772 9773 static void bnxt_get_wol_settings(struct bnxt *bp) 9774 { 9775 u16 handle = 0; 9776 9777 bp->wol = 0; 9778 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP)) 9779 return; 9780 9781 do { 9782 handle = bnxt_hwrm_get_wol_fltrs(bp, handle); 9783 } while (handle && handle != 0xffff); 9784 } 9785 9786 #ifdef CONFIG_BNXT_HWMON 9787 static ssize_t bnxt_show_temp(struct device *dev, 9788 struct device_attribute *devattr, char *buf) 9789 { 9790 struct hwrm_temp_monitor_query_input req = {0}; 9791 struct hwrm_temp_monitor_query_output *resp; 9792 struct bnxt *bp = dev_get_drvdata(dev); 9793 u32 len = 0; 9794 int rc; 9795 9796 resp = bp->hwrm_cmd_resp_addr; 9797 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1); 9798 mutex_lock(&bp->hwrm_cmd_lock); 9799 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 9800 if (!rc) 9801 len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */ 9802 mutex_unlock(&bp->hwrm_cmd_lock); 9803 if (rc) 9804 return rc; 9805 return len; 9806 } 9807 static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0); 9808 9809 static struct attribute *bnxt_attrs[] = { 9810 &sensor_dev_attr_temp1_input.dev_attr.attr, 9811 NULL 9812 }; 9813 ATTRIBUTE_GROUPS(bnxt); 9814 9815 static void bnxt_hwmon_close(struct bnxt *bp) 9816 { 9817 if (bp->hwmon_dev) { 9818 hwmon_device_unregister(bp->hwmon_dev); 9819 bp->hwmon_dev = NULL; 9820 } 9821 } 9822 9823 static void bnxt_hwmon_open(struct bnxt *bp) 9824 { 9825 struct hwrm_temp_monitor_query_input req = {0}; 9826 struct pci_dev *pdev = bp->pdev; 9827 int rc; 9828 9829 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1); 9830 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 9831 if (rc == -EACCES || rc == -EOPNOTSUPP) { 9832 bnxt_hwmon_close(bp); 9833 return; 9834 } 9835 9836 if (bp->hwmon_dev) 9837 return; 9838 9839 bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, 9840 DRV_MODULE_NAME, bp, 9841 bnxt_groups); 9842 if (IS_ERR(bp->hwmon_dev)) { 9843 bp->hwmon_dev = NULL; 9844 dev_warn(&pdev->dev, "Cannot register hwmon device\n"); 9845 } 9846 } 9847 #else 9848 static void bnxt_hwmon_close(struct bnxt *bp) 9849 { 9850 } 9851 9852 static void bnxt_hwmon_open(struct bnxt *bp) 9853 { 9854 } 9855 #endif 9856 9857 static bool bnxt_eee_config_ok(struct bnxt *bp) 9858 { 9859 struct ethtool_eee *eee = &bp->eee; 9860 struct bnxt_link_info *link_info = &bp->link_info; 9861 9862 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP)) 9863 return true; 9864 9865 if (eee->eee_enabled) { 9866 u32 advertising = 9867 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); 9868 9869 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 9870 eee->eee_enabled = 0; 9871 return false; 9872 } 9873 if (eee->advertised & ~advertising) { 9874 eee->advertised = advertising & eee->supported; 9875 return false; 9876 } 9877 } 9878 return true; 9879 } 9880 9881 static int bnxt_update_phy_setting(struct bnxt *bp) 9882 { 9883 int rc; 9884 bool update_link = false; 9885 bool update_pause = false; 9886 bool update_eee = false; 9887 struct bnxt_link_info *link_info = &bp->link_info; 9888 9889 rc = bnxt_update_link(bp, true); 9890 if (rc) { 9891 netdev_err(bp->dev, "failed to update link (rc: %x)\n", 9892 rc); 9893 return rc; 9894 } 9895 if (!BNXT_SINGLE_PF(bp)) 9896 return 0; 9897 9898 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 9899 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) != 9900 link_info->req_flow_ctrl) 9901 update_pause = true; 9902 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 9903 link_info->force_pause_setting != link_info->req_flow_ctrl) 9904 update_pause = true; 9905 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 9906 if (BNXT_AUTO_MODE(link_info->auto_mode)) 9907 update_link = true; 9908 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ && 9909 link_info->req_link_speed != link_info->force_link_speed) 9910 update_link = true; 9911 else if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 && 9912 link_info->req_link_speed != link_info->force_pam4_link_speed) 9913 update_link = true; 9914 if (link_info->req_duplex != link_info->duplex_setting) 9915 update_link = true; 9916 } else { 9917 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE) 9918 update_link = true; 9919 if (link_info->advertising != link_info->auto_link_speeds || 9920 link_info->advertising_pam4 != link_info->auto_pam4_link_speeds) 9921 update_link = true; 9922 } 9923 9924 /* The last close may have shutdown the link, so need to call 9925 * PHY_CFG to bring it back up. 9926 */ 9927 if (!bp->link_info.link_up) 9928 update_link = true; 9929 9930 if (!bnxt_eee_config_ok(bp)) 9931 update_eee = true; 9932 9933 if (update_link) 9934 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee); 9935 else if (update_pause) 9936 rc = bnxt_hwrm_set_pause(bp); 9937 if (rc) { 9938 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n", 9939 rc); 9940 return rc; 9941 } 9942 9943 return rc; 9944 } 9945 9946 /* Common routine to pre-map certain register block to different GRC window. 9947 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows 9948 * in PF and 3 windows in VF that can be customized to map in different 9949 * register blocks. 9950 */ 9951 static void bnxt_preset_reg_win(struct bnxt *bp) 9952 { 9953 if (BNXT_PF(bp)) { 9954 /* CAG registers map to GRC window #4 */ 9955 writel(BNXT_CAG_REG_BASE, 9956 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12); 9957 } 9958 } 9959 9960 static int bnxt_init_dflt_ring_mode(struct bnxt *bp); 9961 9962 static int bnxt_reinit_after_abort(struct bnxt *bp) 9963 { 9964 int rc; 9965 9966 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 9967 return -EBUSY; 9968 9969 if (bp->dev->reg_state == NETREG_UNREGISTERED) 9970 return -ENODEV; 9971 9972 rc = bnxt_fw_init_one(bp); 9973 if (!rc) { 9974 bnxt_clear_int_mode(bp); 9975 rc = bnxt_init_int_mode(bp); 9976 if (!rc) { 9977 clear_bit(BNXT_STATE_ABORT_ERR, &bp->state); 9978 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state); 9979 } 9980 } 9981 return rc; 9982 } 9983 9984 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 9985 { 9986 int rc = 0; 9987 9988 bnxt_preset_reg_win(bp); 9989 netif_carrier_off(bp->dev); 9990 if (irq_re_init) { 9991 /* Reserve rings now if none were reserved at driver probe. */ 9992 rc = bnxt_init_dflt_ring_mode(bp); 9993 if (rc) { 9994 netdev_err(bp->dev, "Failed to reserve default rings at open\n"); 9995 return rc; 9996 } 9997 } 9998 rc = bnxt_reserve_rings(bp, irq_re_init); 9999 if (rc) 10000 return rc; 10001 if ((bp->flags & BNXT_FLAG_RFS) && 10002 !(bp->flags & BNXT_FLAG_USING_MSIX)) { 10003 /* disable RFS if falling back to INTA */ 10004 bp->dev->hw_features &= ~NETIF_F_NTUPLE; 10005 bp->flags &= ~BNXT_FLAG_RFS; 10006 } 10007 10008 rc = bnxt_alloc_mem(bp, irq_re_init); 10009 if (rc) { 10010 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); 10011 goto open_err_free_mem; 10012 } 10013 10014 if (irq_re_init) { 10015 bnxt_init_napi(bp); 10016 rc = bnxt_request_irq(bp); 10017 if (rc) { 10018 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc); 10019 goto open_err_irq; 10020 } 10021 } 10022 10023 rc = bnxt_init_nic(bp, irq_re_init); 10024 if (rc) { 10025 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); 10026 goto open_err_irq; 10027 } 10028 10029 bnxt_enable_napi(bp); 10030 bnxt_debug_dev_init(bp); 10031 10032 if (link_re_init) { 10033 mutex_lock(&bp->link_lock); 10034 rc = bnxt_update_phy_setting(bp); 10035 mutex_unlock(&bp->link_lock); 10036 if (rc) { 10037 netdev_warn(bp->dev, "failed to update phy settings\n"); 10038 if (BNXT_SINGLE_PF(bp)) { 10039 bp->link_info.phy_retry = true; 10040 bp->link_info.phy_retry_expires = 10041 jiffies + 5 * HZ; 10042 } 10043 } 10044 } 10045 10046 if (irq_re_init) 10047 udp_tunnel_nic_reset_ntf(bp->dev); 10048 10049 set_bit(BNXT_STATE_OPEN, &bp->state); 10050 bnxt_enable_int(bp); 10051 /* Enable TX queues */ 10052 bnxt_tx_enable(bp); 10053 mod_timer(&bp->timer, jiffies + bp->current_interval); 10054 /* Poll link status and check for SFP+ module status */ 10055 bnxt_get_port_module_status(bp); 10056 10057 /* VF-reps may need to be re-opened after the PF is re-opened */ 10058 if (BNXT_PF(bp)) 10059 bnxt_vf_reps_open(bp); 10060 return 0; 10061 10062 open_err_irq: 10063 bnxt_del_napi(bp); 10064 10065 open_err_free_mem: 10066 bnxt_free_skbs(bp); 10067 bnxt_free_irq(bp); 10068 bnxt_free_mem(bp, true); 10069 return rc; 10070 } 10071 10072 /* rtnl_lock held */ 10073 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 10074 { 10075 int rc = 0; 10076 10077 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) 10078 rc = -EIO; 10079 if (!rc) 10080 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init); 10081 if (rc) { 10082 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc); 10083 dev_close(bp->dev); 10084 } 10085 return rc; 10086 } 10087 10088 /* rtnl_lock held, open the NIC half way by allocating all resources, but 10089 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline 10090 * self tests. 10091 */ 10092 int bnxt_half_open_nic(struct bnxt *bp) 10093 { 10094 int rc = 0; 10095 10096 rc = bnxt_alloc_mem(bp, false); 10097 if (rc) { 10098 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); 10099 goto half_open_err; 10100 } 10101 rc = bnxt_init_nic(bp, false); 10102 if (rc) { 10103 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); 10104 goto half_open_err; 10105 } 10106 return 0; 10107 10108 half_open_err: 10109 bnxt_free_skbs(bp); 10110 bnxt_free_mem(bp, false); 10111 dev_close(bp->dev); 10112 return rc; 10113 } 10114 10115 /* rtnl_lock held, this call can only be made after a previous successful 10116 * call to bnxt_half_open_nic(). 10117 */ 10118 void bnxt_half_close_nic(struct bnxt *bp) 10119 { 10120 bnxt_hwrm_resource_free(bp, false, false); 10121 bnxt_free_skbs(bp); 10122 bnxt_free_mem(bp, false); 10123 } 10124 10125 static void bnxt_reenable_sriov(struct bnxt *bp) 10126 { 10127 if (BNXT_PF(bp)) { 10128 struct bnxt_pf_info *pf = &bp->pf; 10129 int n = pf->active_vfs; 10130 10131 if (n) 10132 bnxt_cfg_hw_sriov(bp, &n, true); 10133 } 10134 } 10135 10136 static int bnxt_open(struct net_device *dev) 10137 { 10138 struct bnxt *bp = netdev_priv(dev); 10139 int rc; 10140 10141 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { 10142 rc = bnxt_reinit_after_abort(bp); 10143 if (rc) { 10144 if (rc == -EBUSY) 10145 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n"); 10146 else 10147 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n"); 10148 return -ENODEV; 10149 } 10150 } 10151 10152 rc = bnxt_hwrm_if_change(bp, true); 10153 if (rc) 10154 return rc; 10155 rc = __bnxt_open_nic(bp, true, true); 10156 if (rc) { 10157 bnxt_hwrm_if_change(bp, false); 10158 } else { 10159 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) { 10160 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 10161 bnxt_ulp_start(bp, 0); 10162 bnxt_reenable_sriov(bp); 10163 } 10164 } 10165 bnxt_hwmon_open(bp); 10166 } 10167 10168 return rc; 10169 } 10170 10171 static bool bnxt_drv_busy(struct bnxt *bp) 10172 { 10173 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) || 10174 test_bit(BNXT_STATE_READ_STATS, &bp->state)); 10175 } 10176 10177 static void bnxt_get_ring_stats(struct bnxt *bp, 10178 struct rtnl_link_stats64 *stats); 10179 10180 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, 10181 bool link_re_init) 10182 { 10183 /* Close the VF-reps before closing PF */ 10184 if (BNXT_PF(bp)) 10185 bnxt_vf_reps_close(bp); 10186 10187 /* Change device state to avoid TX queue wake up's */ 10188 bnxt_tx_disable(bp); 10189 10190 clear_bit(BNXT_STATE_OPEN, &bp->state); 10191 smp_mb__after_atomic(); 10192 while (bnxt_drv_busy(bp)) 10193 msleep(20); 10194 10195 /* Flush rings and and disable interrupts */ 10196 bnxt_shutdown_nic(bp, irq_re_init); 10197 10198 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */ 10199 10200 bnxt_debug_dev_exit(bp); 10201 bnxt_disable_napi(bp); 10202 del_timer_sync(&bp->timer); 10203 bnxt_free_skbs(bp); 10204 10205 /* Save ring stats before shutdown */ 10206 if (bp->bnapi && irq_re_init) 10207 bnxt_get_ring_stats(bp, &bp->net_stats_prev); 10208 if (irq_re_init) { 10209 bnxt_free_irq(bp); 10210 bnxt_del_napi(bp); 10211 } 10212 bnxt_free_mem(bp, irq_re_init); 10213 } 10214 10215 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 10216 { 10217 int rc = 0; 10218 10219 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 10220 /* If we get here, it means firmware reset is in progress 10221 * while we are trying to close. We can safely proceed with 10222 * the close because we are holding rtnl_lock(). Some firmware 10223 * messages may fail as we proceed to close. We set the 10224 * ABORT_ERR flag here so that the FW reset thread will later 10225 * abort when it gets the rtnl_lock() and sees the flag. 10226 */ 10227 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n"); 10228 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); 10229 } 10230 10231 #ifdef CONFIG_BNXT_SRIOV 10232 if (bp->sriov_cfg) { 10233 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait, 10234 !bp->sriov_cfg, 10235 BNXT_SRIOV_CFG_WAIT_TMO); 10236 if (rc) 10237 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n"); 10238 } 10239 #endif 10240 __bnxt_close_nic(bp, irq_re_init, link_re_init); 10241 return rc; 10242 } 10243 10244 static int bnxt_close(struct net_device *dev) 10245 { 10246 struct bnxt *bp = netdev_priv(dev); 10247 10248 bnxt_hwmon_close(bp); 10249 bnxt_close_nic(bp, true, true); 10250 bnxt_hwrm_shutdown_link(bp); 10251 bnxt_hwrm_if_change(bp, false); 10252 return 0; 10253 } 10254 10255 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg, 10256 u16 *val) 10257 { 10258 struct hwrm_port_phy_mdio_read_output *resp = bp->hwrm_cmd_resp_addr; 10259 struct hwrm_port_phy_mdio_read_input req = {0}; 10260 int rc; 10261 10262 if (bp->hwrm_spec_code < 0x10a00) 10263 return -EOPNOTSUPP; 10264 10265 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_READ, -1, -1); 10266 req.port_id = cpu_to_le16(bp->pf.port_id); 10267 req.phy_addr = phy_addr; 10268 req.reg_addr = cpu_to_le16(reg & 0x1f); 10269 if (mdio_phy_id_is_c45(phy_addr)) { 10270 req.cl45_mdio = 1; 10271 req.phy_addr = mdio_phy_id_prtad(phy_addr); 10272 req.dev_addr = mdio_phy_id_devad(phy_addr); 10273 req.reg_addr = cpu_to_le16(reg); 10274 } 10275 10276 mutex_lock(&bp->hwrm_cmd_lock); 10277 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 10278 if (!rc) 10279 *val = le16_to_cpu(resp->reg_data); 10280 mutex_unlock(&bp->hwrm_cmd_lock); 10281 return rc; 10282 } 10283 10284 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg, 10285 u16 val) 10286 { 10287 struct hwrm_port_phy_mdio_write_input req = {0}; 10288 10289 if (bp->hwrm_spec_code < 0x10a00) 10290 return -EOPNOTSUPP; 10291 10292 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_WRITE, -1, -1); 10293 req.port_id = cpu_to_le16(bp->pf.port_id); 10294 req.phy_addr = phy_addr; 10295 req.reg_addr = cpu_to_le16(reg & 0x1f); 10296 if (mdio_phy_id_is_c45(phy_addr)) { 10297 req.cl45_mdio = 1; 10298 req.phy_addr = mdio_phy_id_prtad(phy_addr); 10299 req.dev_addr = mdio_phy_id_devad(phy_addr); 10300 req.reg_addr = cpu_to_le16(reg); 10301 } 10302 req.reg_data = cpu_to_le16(val); 10303 10304 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 10305 } 10306 10307 /* rtnl_lock held */ 10308 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 10309 { 10310 struct mii_ioctl_data *mdio = if_mii(ifr); 10311 struct bnxt *bp = netdev_priv(dev); 10312 int rc; 10313 10314 switch (cmd) { 10315 case SIOCGMIIPHY: 10316 mdio->phy_id = bp->link_info.phy_addr; 10317 10318 fallthrough; 10319 case SIOCGMIIREG: { 10320 u16 mii_regval = 0; 10321 10322 if (!netif_running(dev)) 10323 return -EAGAIN; 10324 10325 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num, 10326 &mii_regval); 10327 mdio->val_out = mii_regval; 10328 return rc; 10329 } 10330 10331 case SIOCSMIIREG: 10332 if (!netif_running(dev)) 10333 return -EAGAIN; 10334 10335 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num, 10336 mdio->val_in); 10337 10338 default: 10339 /* do nothing */ 10340 break; 10341 } 10342 return -EOPNOTSUPP; 10343 } 10344 10345 static void bnxt_get_ring_stats(struct bnxt *bp, 10346 struct rtnl_link_stats64 *stats) 10347 { 10348 int i; 10349 10350 for (i = 0; i < bp->cp_nr_rings; i++) { 10351 struct bnxt_napi *bnapi = bp->bnapi[i]; 10352 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 10353 u64 *sw = cpr->stats.sw_stats; 10354 10355 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts); 10356 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); 10357 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts); 10358 10359 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts); 10360 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts); 10361 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts); 10362 10363 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes); 10364 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes); 10365 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes); 10366 10367 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes); 10368 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes); 10369 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes); 10370 10371 stats->rx_missed_errors += 10372 BNXT_GET_RING_STATS64(sw, rx_discard_pkts); 10373 10374 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); 10375 10376 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts); 10377 } 10378 } 10379 10380 static void bnxt_add_prev_stats(struct bnxt *bp, 10381 struct rtnl_link_stats64 *stats) 10382 { 10383 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev; 10384 10385 stats->rx_packets += prev_stats->rx_packets; 10386 stats->tx_packets += prev_stats->tx_packets; 10387 stats->rx_bytes += prev_stats->rx_bytes; 10388 stats->tx_bytes += prev_stats->tx_bytes; 10389 stats->rx_missed_errors += prev_stats->rx_missed_errors; 10390 stats->multicast += prev_stats->multicast; 10391 stats->tx_dropped += prev_stats->tx_dropped; 10392 } 10393 10394 static void 10395 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 10396 { 10397 struct bnxt *bp = netdev_priv(dev); 10398 10399 set_bit(BNXT_STATE_READ_STATS, &bp->state); 10400 /* Make sure bnxt_close_nic() sees that we are reading stats before 10401 * we check the BNXT_STATE_OPEN flag. 10402 */ 10403 smp_mb__after_atomic(); 10404 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 10405 clear_bit(BNXT_STATE_READ_STATS, &bp->state); 10406 *stats = bp->net_stats_prev; 10407 return; 10408 } 10409 10410 bnxt_get_ring_stats(bp, stats); 10411 bnxt_add_prev_stats(bp, stats); 10412 10413 if (bp->flags & BNXT_FLAG_PORT_STATS) { 10414 u64 *rx = bp->port_stats.sw_stats; 10415 u64 *tx = bp->port_stats.sw_stats + 10416 BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 10417 10418 stats->rx_crc_errors = 10419 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames); 10420 stats->rx_frame_errors = 10421 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames); 10422 stats->rx_length_errors = 10423 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) + 10424 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) + 10425 BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames); 10426 stats->rx_errors = 10427 BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) + 10428 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames); 10429 stats->collisions = 10430 BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions); 10431 stats->tx_fifo_errors = 10432 BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns); 10433 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err); 10434 } 10435 clear_bit(BNXT_STATE_READ_STATS, &bp->state); 10436 } 10437 10438 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) 10439 { 10440 struct net_device *dev = bp->dev; 10441 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 10442 struct netdev_hw_addr *ha; 10443 u8 *haddr; 10444 int mc_count = 0; 10445 bool update = false; 10446 int off = 0; 10447 10448 netdev_for_each_mc_addr(ha, dev) { 10449 if (mc_count >= BNXT_MAX_MC_ADDRS) { 10450 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 10451 vnic->mc_list_count = 0; 10452 return false; 10453 } 10454 haddr = ha->addr; 10455 if (!ether_addr_equal(haddr, vnic->mc_list + off)) { 10456 memcpy(vnic->mc_list + off, haddr, ETH_ALEN); 10457 update = true; 10458 } 10459 off += ETH_ALEN; 10460 mc_count++; 10461 } 10462 if (mc_count) 10463 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; 10464 10465 if (mc_count != vnic->mc_list_count) { 10466 vnic->mc_list_count = mc_count; 10467 update = true; 10468 } 10469 return update; 10470 } 10471 10472 static bool bnxt_uc_list_updated(struct bnxt *bp) 10473 { 10474 struct net_device *dev = bp->dev; 10475 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 10476 struct netdev_hw_addr *ha; 10477 int off = 0; 10478 10479 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1)) 10480 return true; 10481 10482 netdev_for_each_uc_addr(ha, dev) { 10483 if (!ether_addr_equal(ha->addr, vnic->uc_list + off)) 10484 return true; 10485 10486 off += ETH_ALEN; 10487 } 10488 return false; 10489 } 10490 10491 static void bnxt_set_rx_mode(struct net_device *dev) 10492 { 10493 struct bnxt *bp = netdev_priv(dev); 10494 struct bnxt_vnic_info *vnic; 10495 bool mc_update = false; 10496 bool uc_update; 10497 u32 mask; 10498 10499 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) 10500 return; 10501 10502 vnic = &bp->vnic_info[0]; 10503 mask = vnic->rx_mask; 10504 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS | 10505 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST | 10506 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST | 10507 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST); 10508 10509 if (dev->flags & IFF_PROMISC) 10510 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 10511 10512 uc_update = bnxt_uc_list_updated(bp); 10513 10514 if (dev->flags & IFF_BROADCAST) 10515 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 10516 if (dev->flags & IFF_ALLMULTI) { 10517 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 10518 vnic->mc_list_count = 0; 10519 } else { 10520 mc_update = bnxt_mc_list_updated(bp, &mask); 10521 } 10522 10523 if (mask != vnic->rx_mask || uc_update || mc_update) { 10524 vnic->rx_mask = mask; 10525 10526 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event); 10527 bnxt_queue_sp_work(bp); 10528 } 10529 } 10530 10531 static int bnxt_cfg_rx_mode(struct bnxt *bp) 10532 { 10533 struct net_device *dev = bp->dev; 10534 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 10535 struct netdev_hw_addr *ha; 10536 int i, off = 0, rc; 10537 bool uc_update; 10538 10539 netif_addr_lock_bh(dev); 10540 uc_update = bnxt_uc_list_updated(bp); 10541 netif_addr_unlock_bh(dev); 10542 10543 if (!uc_update) 10544 goto skip_uc; 10545 10546 mutex_lock(&bp->hwrm_cmd_lock); 10547 for (i = 1; i < vnic->uc_filter_count; i++) { 10548 struct hwrm_cfa_l2_filter_free_input req = {0}; 10549 10550 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1, 10551 -1); 10552 10553 req.l2_filter_id = vnic->fw_l2_filter_id[i]; 10554 10555 rc = _hwrm_send_message(bp, &req, sizeof(req), 10556 HWRM_CMD_TIMEOUT); 10557 } 10558 mutex_unlock(&bp->hwrm_cmd_lock); 10559 10560 vnic->uc_filter_count = 1; 10561 10562 netif_addr_lock_bh(dev); 10563 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) { 10564 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 10565 } else { 10566 netdev_for_each_uc_addr(ha, dev) { 10567 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN); 10568 off += ETH_ALEN; 10569 vnic->uc_filter_count++; 10570 } 10571 } 10572 netif_addr_unlock_bh(dev); 10573 10574 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) { 10575 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off); 10576 if (rc) { 10577 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", 10578 rc); 10579 vnic->uc_filter_count = i; 10580 return rc; 10581 } 10582 } 10583 10584 skip_uc: 10585 if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) && 10586 !bnxt_promisc_ok(bp)) 10587 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 10588 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); 10589 if (rc && vnic->mc_list_count) { 10590 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n", 10591 rc); 10592 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 10593 vnic->mc_list_count = 0; 10594 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); 10595 } 10596 if (rc) 10597 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n", 10598 rc); 10599 10600 return rc; 10601 } 10602 10603 static bool bnxt_can_reserve_rings(struct bnxt *bp) 10604 { 10605 #ifdef CONFIG_BNXT_SRIOV 10606 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) { 10607 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 10608 10609 /* No minimum rings were provisioned by the PF. Don't 10610 * reserve rings by default when device is down. 10611 */ 10612 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings) 10613 return true; 10614 10615 if (!netif_running(bp->dev)) 10616 return false; 10617 } 10618 #endif 10619 return true; 10620 } 10621 10622 /* If the chip and firmware supports RFS */ 10623 static bool bnxt_rfs_supported(struct bnxt *bp) 10624 { 10625 if (bp->flags & BNXT_FLAG_CHIP_P5) { 10626 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) 10627 return true; 10628 return false; 10629 } 10630 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) 10631 return true; 10632 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) 10633 return true; 10634 return false; 10635 } 10636 10637 /* If runtime conditions support RFS */ 10638 static bool bnxt_rfs_capable(struct bnxt *bp) 10639 { 10640 #ifdef CONFIG_RFS_ACCEL 10641 int vnics, max_vnics, max_rss_ctxs; 10642 10643 if (bp->flags & BNXT_FLAG_CHIP_P5) 10644 return bnxt_rfs_supported(bp); 10645 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp)) 10646 return false; 10647 10648 vnics = 1 + bp->rx_nr_rings; 10649 max_vnics = bnxt_get_max_func_vnics(bp); 10650 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp); 10651 10652 /* RSS contexts not a limiting factor */ 10653 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) 10654 max_rss_ctxs = max_vnics; 10655 if (vnics > max_vnics || vnics > max_rss_ctxs) { 10656 if (bp->rx_nr_rings > 1) 10657 netdev_warn(bp->dev, 10658 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n", 10659 min(max_rss_ctxs - 1, max_vnics - 1)); 10660 return false; 10661 } 10662 10663 if (!BNXT_NEW_RM(bp)) 10664 return true; 10665 10666 if (vnics == bp->hw_resc.resv_vnics) 10667 return true; 10668 10669 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics); 10670 if (vnics <= bp->hw_resc.resv_vnics) 10671 return true; 10672 10673 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n"); 10674 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1); 10675 return false; 10676 #else 10677 return false; 10678 #endif 10679 } 10680 10681 static netdev_features_t bnxt_fix_features(struct net_device *dev, 10682 netdev_features_t features) 10683 { 10684 struct bnxt *bp = netdev_priv(dev); 10685 netdev_features_t vlan_features; 10686 10687 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp)) 10688 features &= ~NETIF_F_NTUPLE; 10689 10690 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 10691 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 10692 10693 if (!(features & NETIF_F_GRO)) 10694 features &= ~NETIF_F_GRO_HW; 10695 10696 if (features & NETIF_F_GRO_HW) 10697 features &= ~NETIF_F_LRO; 10698 10699 /* Both CTAG and STAG VLAN accelaration on the RX side have to be 10700 * turned on or off together. 10701 */ 10702 vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX; 10703 if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) { 10704 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) 10705 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX; 10706 else if (vlan_features) 10707 features |= BNXT_HW_FEATURE_VLAN_ALL_RX; 10708 } 10709 #ifdef CONFIG_BNXT_SRIOV 10710 if (BNXT_VF(bp) && bp->vf.vlan) 10711 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX; 10712 #endif 10713 return features; 10714 } 10715 10716 static int bnxt_set_features(struct net_device *dev, netdev_features_t features) 10717 { 10718 struct bnxt *bp = netdev_priv(dev); 10719 u32 flags = bp->flags; 10720 u32 changes; 10721 int rc = 0; 10722 bool re_init = false; 10723 bool update_tpa = false; 10724 10725 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS; 10726 if (features & NETIF_F_GRO_HW) 10727 flags |= BNXT_FLAG_GRO; 10728 else if (features & NETIF_F_LRO) 10729 flags |= BNXT_FLAG_LRO; 10730 10731 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 10732 flags &= ~BNXT_FLAG_TPA; 10733 10734 if (features & BNXT_HW_FEATURE_VLAN_ALL_RX) 10735 flags |= BNXT_FLAG_STRIP_VLAN; 10736 10737 if (features & NETIF_F_NTUPLE) 10738 flags |= BNXT_FLAG_RFS; 10739 10740 changes = flags ^ bp->flags; 10741 if (changes & BNXT_FLAG_TPA) { 10742 update_tpa = true; 10743 if ((bp->flags & BNXT_FLAG_TPA) == 0 || 10744 (flags & BNXT_FLAG_TPA) == 0 || 10745 (bp->flags & BNXT_FLAG_CHIP_P5)) 10746 re_init = true; 10747 } 10748 10749 if (changes & ~BNXT_FLAG_TPA) 10750 re_init = true; 10751 10752 if (flags != bp->flags) { 10753 u32 old_flags = bp->flags; 10754 10755 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 10756 bp->flags = flags; 10757 if (update_tpa) 10758 bnxt_set_ring_params(bp); 10759 return rc; 10760 } 10761 10762 if (re_init) { 10763 bnxt_close_nic(bp, false, false); 10764 bp->flags = flags; 10765 if (update_tpa) 10766 bnxt_set_ring_params(bp); 10767 10768 return bnxt_open_nic(bp, false, false); 10769 } 10770 if (update_tpa) { 10771 bp->flags = flags; 10772 rc = bnxt_set_tpa(bp, 10773 (flags & BNXT_FLAG_TPA) ? 10774 true : false); 10775 if (rc) 10776 bp->flags = old_flags; 10777 } 10778 } 10779 return rc; 10780 } 10781 10782 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off, 10783 u8 **nextp) 10784 { 10785 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off); 10786 int hdr_count = 0; 10787 u8 *nexthdr; 10788 int start; 10789 10790 /* Check that there are at most 2 IPv6 extension headers, no 10791 * fragment header, and each is <= 64 bytes. 10792 */ 10793 start = nw_off + sizeof(*ip6h); 10794 nexthdr = &ip6h->nexthdr; 10795 while (ipv6_ext_hdr(*nexthdr)) { 10796 struct ipv6_opt_hdr *hp; 10797 int hdrlen; 10798 10799 if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE || 10800 *nexthdr == NEXTHDR_FRAGMENT) 10801 return false; 10802 hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data, 10803 skb_headlen(skb), NULL); 10804 if (!hp) 10805 return false; 10806 if (*nexthdr == NEXTHDR_AUTH) 10807 hdrlen = ipv6_authlen(hp); 10808 else 10809 hdrlen = ipv6_optlen(hp); 10810 10811 if (hdrlen > 64) 10812 return false; 10813 nexthdr = &hp->nexthdr; 10814 start += hdrlen; 10815 hdr_count++; 10816 } 10817 if (nextp) { 10818 /* Caller will check inner protocol */ 10819 if (skb->encapsulation) { 10820 *nextp = nexthdr; 10821 return true; 10822 } 10823 *nextp = NULL; 10824 } 10825 /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */ 10826 return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP; 10827 } 10828 10829 /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */ 10830 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb) 10831 { 10832 struct udphdr *uh = udp_hdr(skb); 10833 __be16 udp_port = uh->dest; 10834 10835 if (udp_port != bp->vxlan_port && udp_port != bp->nge_port) 10836 return false; 10837 if (skb->inner_protocol_type == ENCAP_TYPE_ETHER) { 10838 struct ethhdr *eh = inner_eth_hdr(skb); 10839 10840 switch (eh->h_proto) { 10841 case htons(ETH_P_IP): 10842 return true; 10843 case htons(ETH_P_IPV6): 10844 return bnxt_exthdr_check(bp, skb, 10845 skb_inner_network_offset(skb), 10846 NULL); 10847 } 10848 } 10849 return false; 10850 } 10851 10852 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto) 10853 { 10854 switch (l4_proto) { 10855 case IPPROTO_UDP: 10856 return bnxt_udp_tunl_check(bp, skb); 10857 case IPPROTO_IPIP: 10858 return true; 10859 case IPPROTO_GRE: { 10860 switch (skb->inner_protocol) { 10861 default: 10862 return false; 10863 case htons(ETH_P_IP): 10864 return true; 10865 case htons(ETH_P_IPV6): 10866 fallthrough; 10867 } 10868 } 10869 case IPPROTO_IPV6: 10870 /* Check ext headers of inner ipv6 */ 10871 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb), 10872 NULL); 10873 } 10874 return false; 10875 } 10876 10877 static netdev_features_t bnxt_features_check(struct sk_buff *skb, 10878 struct net_device *dev, 10879 netdev_features_t features) 10880 { 10881 struct bnxt *bp = netdev_priv(dev); 10882 u8 *l4_proto; 10883 10884 features = vlan_features_check(skb, features); 10885 switch (vlan_get_protocol(skb)) { 10886 case htons(ETH_P_IP): 10887 if (!skb->encapsulation) 10888 return features; 10889 l4_proto = &ip_hdr(skb)->protocol; 10890 if (bnxt_tunl_check(bp, skb, *l4_proto)) 10891 return features; 10892 break; 10893 case htons(ETH_P_IPV6): 10894 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb), 10895 &l4_proto)) 10896 break; 10897 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto)) 10898 return features; 10899 break; 10900 } 10901 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 10902 } 10903 10904 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words, 10905 u32 *reg_buf) 10906 { 10907 struct hwrm_dbg_read_direct_output *resp = bp->hwrm_cmd_resp_addr; 10908 struct hwrm_dbg_read_direct_input req = {0}; 10909 __le32 *dbg_reg_buf; 10910 dma_addr_t mapping; 10911 int rc, i; 10912 10913 dbg_reg_buf = dma_alloc_coherent(&bp->pdev->dev, num_words * 4, 10914 &mapping, GFP_KERNEL); 10915 if (!dbg_reg_buf) 10916 return -ENOMEM; 10917 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_READ_DIRECT, -1, -1); 10918 req.host_dest_addr = cpu_to_le64(mapping); 10919 req.read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR); 10920 req.read_len32 = cpu_to_le32(num_words); 10921 mutex_lock(&bp->hwrm_cmd_lock); 10922 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 10923 if (rc || resp->error_code) { 10924 rc = -EIO; 10925 goto dbg_rd_reg_exit; 10926 } 10927 for (i = 0; i < num_words; i++) 10928 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]); 10929 10930 dbg_rd_reg_exit: 10931 mutex_unlock(&bp->hwrm_cmd_lock); 10932 dma_free_coherent(&bp->pdev->dev, num_words * 4, dbg_reg_buf, mapping); 10933 return rc; 10934 } 10935 10936 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type, 10937 u32 ring_id, u32 *prod, u32 *cons) 10938 { 10939 struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr; 10940 struct hwrm_dbg_ring_info_get_input req = {0}; 10941 int rc; 10942 10943 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1); 10944 req.ring_type = ring_type; 10945 req.fw_ring_id = cpu_to_le32(ring_id); 10946 mutex_lock(&bp->hwrm_cmd_lock); 10947 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 10948 if (!rc) { 10949 *prod = le32_to_cpu(resp->producer_index); 10950 *cons = le32_to_cpu(resp->consumer_index); 10951 } 10952 mutex_unlock(&bp->hwrm_cmd_lock); 10953 return rc; 10954 } 10955 10956 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi) 10957 { 10958 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; 10959 int i = bnapi->index; 10960 10961 if (!txr) 10962 return; 10963 10964 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n", 10965 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod, 10966 txr->tx_cons); 10967 } 10968 10969 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi) 10970 { 10971 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 10972 int i = bnapi->index; 10973 10974 if (!rxr) 10975 return; 10976 10977 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n", 10978 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod, 10979 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod, 10980 rxr->rx_sw_agg_prod); 10981 } 10982 10983 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi) 10984 { 10985 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 10986 int i = bnapi->index; 10987 10988 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n", 10989 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons); 10990 } 10991 10992 static void bnxt_dbg_dump_states(struct bnxt *bp) 10993 { 10994 int i; 10995 struct bnxt_napi *bnapi; 10996 10997 for (i = 0; i < bp->cp_nr_rings; i++) { 10998 bnapi = bp->bnapi[i]; 10999 if (netif_msg_drv(bp)) { 11000 bnxt_dump_tx_sw_state(bnapi); 11001 bnxt_dump_rx_sw_state(bnapi); 11002 bnxt_dump_cp_sw_state(bnapi); 11003 } 11004 } 11005 } 11006 11007 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr) 11008 { 11009 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; 11010 struct hwrm_ring_reset_input req = {0}; 11011 struct bnxt_napi *bnapi = rxr->bnapi; 11012 struct bnxt_cp_ring_info *cpr; 11013 u16 cp_ring_id; 11014 11015 cpr = &bnapi->cp_ring; 11016 cp_ring_id = cpr->cp_ring_struct.fw_ring_id; 11017 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_RESET, cp_ring_id, -1); 11018 req.ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP; 11019 req.ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id); 11020 return hwrm_send_message_silent(bp, &req, sizeof(req), 11021 HWRM_CMD_TIMEOUT); 11022 } 11023 11024 static void bnxt_reset_task(struct bnxt *bp, bool silent) 11025 { 11026 if (!silent) 11027 bnxt_dbg_dump_states(bp); 11028 if (netif_running(bp->dev)) { 11029 int rc; 11030 11031 if (silent) { 11032 bnxt_close_nic(bp, false, false); 11033 bnxt_open_nic(bp, false, false); 11034 } else { 11035 bnxt_ulp_stop(bp); 11036 bnxt_close_nic(bp, true, false); 11037 rc = bnxt_open_nic(bp, true, false); 11038 bnxt_ulp_start(bp, rc); 11039 } 11040 } 11041 } 11042 11043 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue) 11044 { 11045 struct bnxt *bp = netdev_priv(dev); 11046 11047 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n"); 11048 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 11049 bnxt_queue_sp_work(bp); 11050 } 11051 11052 static void bnxt_fw_health_check(struct bnxt *bp) 11053 { 11054 struct bnxt_fw_health *fw_health = bp->fw_health; 11055 u32 val; 11056 11057 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 11058 return; 11059 11060 if (fw_health->tmr_counter) { 11061 fw_health->tmr_counter--; 11062 return; 11063 } 11064 11065 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 11066 if (val == fw_health->last_fw_heartbeat) 11067 goto fw_reset; 11068 11069 fw_health->last_fw_heartbeat = val; 11070 11071 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 11072 if (val != fw_health->last_fw_reset_cnt) 11073 goto fw_reset; 11074 11075 fw_health->tmr_counter = fw_health->tmr_multiplier; 11076 return; 11077 11078 fw_reset: 11079 set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event); 11080 bnxt_queue_sp_work(bp); 11081 } 11082 11083 static void bnxt_timer(struct timer_list *t) 11084 { 11085 struct bnxt *bp = from_timer(bp, t, timer); 11086 struct net_device *dev = bp->dev; 11087 11088 if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state)) 11089 return; 11090 11091 if (atomic_read(&bp->intr_sem) != 0) 11092 goto bnxt_restart_timer; 11093 11094 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) 11095 bnxt_fw_health_check(bp); 11096 11097 if (bp->link_info.link_up && bp->stats_coal_ticks) { 11098 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event); 11099 bnxt_queue_sp_work(bp); 11100 } 11101 11102 if (bnxt_tc_flower_enabled(bp)) { 11103 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event); 11104 bnxt_queue_sp_work(bp); 11105 } 11106 11107 #ifdef CONFIG_RFS_ACCEL 11108 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) { 11109 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event); 11110 bnxt_queue_sp_work(bp); 11111 } 11112 #endif /*CONFIG_RFS_ACCEL*/ 11113 11114 if (bp->link_info.phy_retry) { 11115 if (time_after(jiffies, bp->link_info.phy_retry_expires)) { 11116 bp->link_info.phy_retry = false; 11117 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n"); 11118 } else { 11119 set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event); 11120 bnxt_queue_sp_work(bp); 11121 } 11122 } 11123 11124 if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev && 11125 netif_carrier_ok(dev)) { 11126 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event); 11127 bnxt_queue_sp_work(bp); 11128 } 11129 bnxt_restart_timer: 11130 mod_timer(&bp->timer, jiffies + bp->current_interval); 11131 } 11132 11133 static void bnxt_rtnl_lock_sp(struct bnxt *bp) 11134 { 11135 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK 11136 * set. If the device is being closed, bnxt_close() may be holding 11137 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we 11138 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl(). 11139 */ 11140 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 11141 rtnl_lock(); 11142 } 11143 11144 static void bnxt_rtnl_unlock_sp(struct bnxt *bp) 11145 { 11146 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 11147 rtnl_unlock(); 11148 } 11149 11150 /* Only called from bnxt_sp_task() */ 11151 static void bnxt_reset(struct bnxt *bp, bool silent) 11152 { 11153 bnxt_rtnl_lock_sp(bp); 11154 if (test_bit(BNXT_STATE_OPEN, &bp->state)) 11155 bnxt_reset_task(bp, silent); 11156 bnxt_rtnl_unlock_sp(bp); 11157 } 11158 11159 /* Only called from bnxt_sp_task() */ 11160 static void bnxt_rx_ring_reset(struct bnxt *bp) 11161 { 11162 int i; 11163 11164 bnxt_rtnl_lock_sp(bp); 11165 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 11166 bnxt_rtnl_unlock_sp(bp); 11167 return; 11168 } 11169 /* Disable and flush TPA before resetting the RX ring */ 11170 if (bp->flags & BNXT_FLAG_TPA) 11171 bnxt_set_tpa(bp, false); 11172 for (i = 0; i < bp->rx_nr_rings; i++) { 11173 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 11174 struct bnxt_cp_ring_info *cpr; 11175 int rc; 11176 11177 if (!rxr->bnapi->in_reset) 11178 continue; 11179 11180 rc = bnxt_hwrm_rx_ring_reset(bp, i); 11181 if (rc) { 11182 if (rc == -EINVAL || rc == -EOPNOTSUPP) 11183 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n"); 11184 else 11185 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n", 11186 rc); 11187 bnxt_reset_task(bp, true); 11188 break; 11189 } 11190 bnxt_free_one_rx_ring_skbs(bp, i); 11191 rxr->rx_prod = 0; 11192 rxr->rx_agg_prod = 0; 11193 rxr->rx_sw_agg_prod = 0; 11194 rxr->rx_next_cons = 0; 11195 rxr->bnapi->in_reset = false; 11196 bnxt_alloc_one_rx_ring(bp, i); 11197 cpr = &rxr->bnapi->cp_ring; 11198 cpr->sw_stats.rx.rx_resets++; 11199 if (bp->flags & BNXT_FLAG_AGG_RINGS) 11200 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 11201 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 11202 } 11203 if (bp->flags & BNXT_FLAG_TPA) 11204 bnxt_set_tpa(bp, true); 11205 bnxt_rtnl_unlock_sp(bp); 11206 } 11207 11208 static void bnxt_fw_reset_close(struct bnxt *bp) 11209 { 11210 bnxt_ulp_stop(bp); 11211 /* When firmware is in fatal state, quiesce device and disable 11212 * bus master to prevent any potential bad DMAs before freeing 11213 * kernel memory. 11214 */ 11215 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) { 11216 u16 val = 0; 11217 11218 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val); 11219 if (val == 0xffff) 11220 bp->fw_reset_min_dsecs = 0; 11221 bnxt_tx_disable(bp); 11222 bnxt_disable_napi(bp); 11223 bnxt_disable_int_sync(bp); 11224 bnxt_free_irq(bp); 11225 bnxt_clear_int_mode(bp); 11226 pci_disable_device(bp->pdev); 11227 } 11228 __bnxt_close_nic(bp, true, false); 11229 bnxt_vf_reps_free(bp); 11230 bnxt_clear_int_mode(bp); 11231 bnxt_hwrm_func_drv_unrgtr(bp); 11232 if (pci_is_enabled(bp->pdev)) 11233 pci_disable_device(bp->pdev); 11234 bnxt_free_ctx_mem(bp); 11235 kfree(bp->ctx); 11236 bp->ctx = NULL; 11237 } 11238 11239 static bool is_bnxt_fw_ok(struct bnxt *bp) 11240 { 11241 struct bnxt_fw_health *fw_health = bp->fw_health; 11242 bool no_heartbeat = false, has_reset = false; 11243 u32 val; 11244 11245 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 11246 if (val == fw_health->last_fw_heartbeat) 11247 no_heartbeat = true; 11248 11249 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 11250 if (val != fw_health->last_fw_reset_cnt) 11251 has_reset = true; 11252 11253 if (!no_heartbeat && has_reset) 11254 return true; 11255 11256 return false; 11257 } 11258 11259 /* rtnl_lock is acquired before calling this function */ 11260 static void bnxt_force_fw_reset(struct bnxt *bp) 11261 { 11262 struct bnxt_fw_health *fw_health = bp->fw_health; 11263 u32 wait_dsecs; 11264 11265 if (!test_bit(BNXT_STATE_OPEN, &bp->state) || 11266 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 11267 return; 11268 11269 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 11270 bnxt_fw_reset_close(bp); 11271 wait_dsecs = fw_health->master_func_wait_dsecs; 11272 if (fw_health->master) { 11273 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) 11274 wait_dsecs = 0; 11275 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; 11276 } else { 11277 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10; 11278 wait_dsecs = fw_health->normal_func_wait_dsecs; 11279 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 11280 } 11281 11282 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs; 11283 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs; 11284 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10); 11285 } 11286 11287 void bnxt_fw_exception(struct bnxt *bp) 11288 { 11289 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n"); 11290 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); 11291 bnxt_rtnl_lock_sp(bp); 11292 bnxt_force_fw_reset(bp); 11293 bnxt_rtnl_unlock_sp(bp); 11294 } 11295 11296 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or 11297 * < 0 on error. 11298 */ 11299 static int bnxt_get_registered_vfs(struct bnxt *bp) 11300 { 11301 #ifdef CONFIG_BNXT_SRIOV 11302 int rc; 11303 11304 if (!BNXT_PF(bp)) 11305 return 0; 11306 11307 rc = bnxt_hwrm_func_qcfg(bp); 11308 if (rc) { 11309 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc); 11310 return rc; 11311 } 11312 if (bp->pf.registered_vfs) 11313 return bp->pf.registered_vfs; 11314 if (bp->sriov_cfg) 11315 return 1; 11316 #endif 11317 return 0; 11318 } 11319 11320 void bnxt_fw_reset(struct bnxt *bp) 11321 { 11322 bnxt_rtnl_lock_sp(bp); 11323 if (test_bit(BNXT_STATE_OPEN, &bp->state) && 11324 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 11325 int n = 0, tmo; 11326 11327 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 11328 if (bp->pf.active_vfs && 11329 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 11330 n = bnxt_get_registered_vfs(bp); 11331 if (n < 0) { 11332 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n", 11333 n); 11334 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 11335 dev_close(bp->dev); 11336 goto fw_reset_exit; 11337 } else if (n > 0) { 11338 u16 vf_tmo_dsecs = n * 10; 11339 11340 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs) 11341 bp->fw_reset_max_dsecs = vf_tmo_dsecs; 11342 bp->fw_reset_state = 11343 BNXT_FW_RESET_STATE_POLL_VF; 11344 bnxt_queue_fw_reset_work(bp, HZ / 10); 11345 goto fw_reset_exit; 11346 } 11347 bnxt_fw_reset_close(bp); 11348 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { 11349 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; 11350 tmo = HZ / 10; 11351 } else { 11352 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 11353 tmo = bp->fw_reset_min_dsecs * HZ / 10; 11354 } 11355 bnxt_queue_fw_reset_work(bp, tmo); 11356 } 11357 fw_reset_exit: 11358 bnxt_rtnl_unlock_sp(bp); 11359 } 11360 11361 static void bnxt_chk_missed_irq(struct bnxt *bp) 11362 { 11363 int i; 11364 11365 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 11366 return; 11367 11368 for (i = 0; i < bp->cp_nr_rings; i++) { 11369 struct bnxt_napi *bnapi = bp->bnapi[i]; 11370 struct bnxt_cp_ring_info *cpr; 11371 u32 fw_ring_id; 11372 int j; 11373 11374 if (!bnapi) 11375 continue; 11376 11377 cpr = &bnapi->cp_ring; 11378 for (j = 0; j < 2; j++) { 11379 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; 11380 u32 val[2]; 11381 11382 if (!cpr2 || cpr2->has_more_work || 11383 !bnxt_has_work(bp, cpr2)) 11384 continue; 11385 11386 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) { 11387 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons; 11388 continue; 11389 } 11390 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id; 11391 bnxt_dbg_hwrm_ring_info_get(bp, 11392 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL, 11393 fw_ring_id, &val[0], &val[1]); 11394 cpr->sw_stats.cmn.missed_irqs++; 11395 } 11396 } 11397 } 11398 11399 static void bnxt_cfg_ntp_filters(struct bnxt *); 11400 11401 static void bnxt_init_ethtool_link_settings(struct bnxt *bp) 11402 { 11403 struct bnxt_link_info *link_info = &bp->link_info; 11404 11405 if (BNXT_AUTO_MODE(link_info->auto_mode)) { 11406 link_info->autoneg = BNXT_AUTONEG_SPEED; 11407 if (bp->hwrm_spec_code >= 0x10201) { 11408 if (link_info->auto_pause_setting & 11409 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE) 11410 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 11411 } else { 11412 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 11413 } 11414 link_info->advertising = link_info->auto_link_speeds; 11415 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds; 11416 } else { 11417 link_info->req_link_speed = link_info->force_link_speed; 11418 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ; 11419 if (link_info->force_pam4_link_speed) { 11420 link_info->req_link_speed = 11421 link_info->force_pam4_link_speed; 11422 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4; 11423 } 11424 link_info->req_duplex = link_info->duplex_setting; 11425 } 11426 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 11427 link_info->req_flow_ctrl = 11428 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH; 11429 else 11430 link_info->req_flow_ctrl = link_info->force_pause_setting; 11431 } 11432 11433 static void bnxt_fw_echo_reply(struct bnxt *bp) 11434 { 11435 struct bnxt_fw_health *fw_health = bp->fw_health; 11436 struct hwrm_func_echo_response_input req = {0}; 11437 11438 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_ECHO_RESPONSE, -1, -1); 11439 req.event_data1 = cpu_to_le32(fw_health->echo_req_data1); 11440 req.event_data2 = cpu_to_le32(fw_health->echo_req_data2); 11441 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 11442 } 11443 11444 static void bnxt_sp_task(struct work_struct *work) 11445 { 11446 struct bnxt *bp = container_of(work, struct bnxt, sp_task); 11447 11448 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 11449 smp_mb__after_atomic(); 11450 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 11451 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 11452 return; 11453 } 11454 11455 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) 11456 bnxt_cfg_rx_mode(bp); 11457 11458 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event)) 11459 bnxt_cfg_ntp_filters(bp); 11460 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) 11461 bnxt_hwrm_exec_fwd_req(bp); 11462 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) { 11463 bnxt_hwrm_port_qstats(bp, 0); 11464 bnxt_hwrm_port_qstats_ext(bp, 0); 11465 bnxt_accumulate_all_stats(bp); 11466 } 11467 11468 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { 11469 int rc; 11470 11471 mutex_lock(&bp->link_lock); 11472 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, 11473 &bp->sp_event)) 11474 bnxt_hwrm_phy_qcaps(bp); 11475 11476 rc = bnxt_update_link(bp, true); 11477 if (rc) 11478 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", 11479 rc); 11480 11481 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, 11482 &bp->sp_event)) 11483 bnxt_init_ethtool_link_settings(bp); 11484 mutex_unlock(&bp->link_lock); 11485 } 11486 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) { 11487 int rc; 11488 11489 mutex_lock(&bp->link_lock); 11490 rc = bnxt_update_phy_setting(bp); 11491 mutex_unlock(&bp->link_lock); 11492 if (rc) { 11493 netdev_warn(bp->dev, "update phy settings retry failed\n"); 11494 } else { 11495 bp->link_info.phy_retry = false; 11496 netdev_info(bp->dev, "update phy settings retry succeeded\n"); 11497 } 11498 } 11499 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) { 11500 mutex_lock(&bp->link_lock); 11501 bnxt_get_port_module_status(bp); 11502 mutex_unlock(&bp->link_lock); 11503 } 11504 11505 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event)) 11506 bnxt_tc_flow_stats_work(bp); 11507 11508 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event)) 11509 bnxt_chk_missed_irq(bp); 11510 11511 if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event)) 11512 bnxt_fw_echo_reply(bp); 11513 11514 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They 11515 * must be the last functions to be called before exiting. 11516 */ 11517 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) 11518 bnxt_reset(bp, false); 11519 11520 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event)) 11521 bnxt_reset(bp, true); 11522 11523 if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event)) 11524 bnxt_rx_ring_reset(bp); 11525 11526 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) 11527 bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT); 11528 11529 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) { 11530 if (!is_bnxt_fw_ok(bp)) 11531 bnxt_devlink_health_report(bp, 11532 BNXT_FW_EXCEPTION_SP_EVENT); 11533 } 11534 11535 smp_mb__before_atomic(); 11536 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 11537 } 11538 11539 /* Under rtnl_lock */ 11540 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, 11541 int tx_xdp) 11542 { 11543 int max_rx, max_tx, tx_sets = 1; 11544 int tx_rings_needed, stats; 11545 int rx_rings = rx; 11546 int cp, vnics, rc; 11547 11548 if (tcs) 11549 tx_sets = tcs; 11550 11551 rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh); 11552 if (rc) 11553 return rc; 11554 11555 if (max_rx < rx) 11556 return -ENOMEM; 11557 11558 tx_rings_needed = tx * tx_sets + tx_xdp; 11559 if (max_tx < tx_rings_needed) 11560 return -ENOMEM; 11561 11562 vnics = 1; 11563 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS) 11564 vnics += rx_rings; 11565 11566 if (bp->flags & BNXT_FLAG_AGG_RINGS) 11567 rx_rings <<= 1; 11568 cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx; 11569 stats = cp; 11570 if (BNXT_NEW_RM(bp)) { 11571 cp += bnxt_get_ulp_msix_num(bp); 11572 stats += bnxt_get_ulp_stat_ctxs(bp); 11573 } 11574 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp, 11575 stats, vnics); 11576 } 11577 11578 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) 11579 { 11580 if (bp->bar2) { 11581 pci_iounmap(pdev, bp->bar2); 11582 bp->bar2 = NULL; 11583 } 11584 11585 if (bp->bar1) { 11586 pci_iounmap(pdev, bp->bar1); 11587 bp->bar1 = NULL; 11588 } 11589 11590 if (bp->bar0) { 11591 pci_iounmap(pdev, bp->bar0); 11592 bp->bar0 = NULL; 11593 } 11594 } 11595 11596 static void bnxt_cleanup_pci(struct bnxt *bp) 11597 { 11598 bnxt_unmap_bars(bp, bp->pdev); 11599 pci_release_regions(bp->pdev); 11600 if (pci_is_enabled(bp->pdev)) 11601 pci_disable_device(bp->pdev); 11602 } 11603 11604 static void bnxt_init_dflt_coal(struct bnxt *bp) 11605 { 11606 struct bnxt_coal *coal; 11607 11608 /* Tick values in micro seconds. 11609 * 1 coal_buf x bufs_per_record = 1 completion record. 11610 */ 11611 coal = &bp->rx_coal; 11612 coal->coal_ticks = 10; 11613 coal->coal_bufs = 30; 11614 coal->coal_ticks_irq = 1; 11615 coal->coal_bufs_irq = 2; 11616 coal->idle_thresh = 50; 11617 coal->bufs_per_record = 2; 11618 coal->budget = 64; /* NAPI budget */ 11619 11620 coal = &bp->tx_coal; 11621 coal->coal_ticks = 28; 11622 coal->coal_bufs = 30; 11623 coal->coal_ticks_irq = 2; 11624 coal->coal_bufs_irq = 2; 11625 coal->bufs_per_record = 1; 11626 11627 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; 11628 } 11629 11630 static int bnxt_fw_init_one_p1(struct bnxt *bp) 11631 { 11632 int rc; 11633 11634 bp->fw_cap = 0; 11635 rc = bnxt_hwrm_ver_get(bp); 11636 bnxt_try_map_fw_health_reg(bp); 11637 if (rc) { 11638 rc = bnxt_try_recover_fw(bp); 11639 if (rc) 11640 return rc; 11641 rc = bnxt_hwrm_ver_get(bp); 11642 if (rc) 11643 return rc; 11644 } 11645 11646 if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) { 11647 rc = bnxt_alloc_kong_hwrm_resources(bp); 11648 if (rc) 11649 bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL; 11650 } 11651 11652 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || 11653 bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) { 11654 rc = bnxt_alloc_hwrm_short_cmd_req(bp); 11655 if (rc) 11656 return rc; 11657 } 11658 bnxt_nvm_cfg_ver_get(bp); 11659 11660 rc = bnxt_hwrm_func_reset(bp); 11661 if (rc) 11662 return -ENODEV; 11663 11664 bnxt_hwrm_fw_set_time(bp); 11665 return 0; 11666 } 11667 11668 static int bnxt_fw_init_one_p2(struct bnxt *bp) 11669 { 11670 int rc; 11671 11672 /* Get the MAX capabilities for this function */ 11673 rc = bnxt_hwrm_func_qcaps(bp); 11674 if (rc) { 11675 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n", 11676 rc); 11677 return -ENODEV; 11678 } 11679 11680 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp); 11681 if (rc) 11682 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n", 11683 rc); 11684 11685 if (bnxt_alloc_fw_health(bp)) { 11686 netdev_warn(bp->dev, "no memory for firmware error recovery\n"); 11687 } else { 11688 rc = bnxt_hwrm_error_recovery_qcfg(bp); 11689 if (rc) 11690 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n", 11691 rc); 11692 } 11693 11694 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false); 11695 if (rc) 11696 return -ENODEV; 11697 11698 bnxt_hwrm_func_qcfg(bp); 11699 bnxt_hwrm_vnic_qcaps(bp); 11700 bnxt_hwrm_port_led_qcaps(bp); 11701 bnxt_ethtool_init(bp); 11702 bnxt_dcb_init(bp); 11703 return 0; 11704 } 11705 11706 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp) 11707 { 11708 bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP; 11709 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 | 11710 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 | 11711 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | 11712 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; 11713 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) { 11714 bp->flags |= BNXT_FLAG_UDP_RSS_CAP; 11715 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 | 11716 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 11717 } 11718 } 11719 11720 static void bnxt_set_dflt_rfs(struct bnxt *bp) 11721 { 11722 struct net_device *dev = bp->dev; 11723 11724 dev->hw_features &= ~NETIF_F_NTUPLE; 11725 dev->features &= ~NETIF_F_NTUPLE; 11726 bp->flags &= ~BNXT_FLAG_RFS; 11727 if (bnxt_rfs_supported(bp)) { 11728 dev->hw_features |= NETIF_F_NTUPLE; 11729 if (bnxt_rfs_capable(bp)) { 11730 bp->flags |= BNXT_FLAG_RFS; 11731 dev->features |= NETIF_F_NTUPLE; 11732 } 11733 } 11734 } 11735 11736 static void bnxt_fw_init_one_p3(struct bnxt *bp) 11737 { 11738 struct pci_dev *pdev = bp->pdev; 11739 11740 bnxt_set_dflt_rss_hash_type(bp); 11741 bnxt_set_dflt_rfs(bp); 11742 11743 bnxt_get_wol_settings(bp); 11744 if (bp->flags & BNXT_FLAG_WOL_CAP) 11745 device_set_wakeup_enable(&pdev->dev, bp->wol); 11746 else 11747 device_set_wakeup_capable(&pdev->dev, false); 11748 11749 bnxt_hwrm_set_cache_line_size(bp, cache_line_size()); 11750 bnxt_hwrm_coal_params_qcaps(bp); 11751 } 11752 11753 static int bnxt_fw_init_one(struct bnxt *bp) 11754 { 11755 int rc; 11756 11757 rc = bnxt_fw_init_one_p1(bp); 11758 if (rc) { 11759 netdev_err(bp->dev, "Firmware init phase 1 failed\n"); 11760 return rc; 11761 } 11762 rc = bnxt_fw_init_one_p2(bp); 11763 if (rc) { 11764 netdev_err(bp->dev, "Firmware init phase 2 failed\n"); 11765 return rc; 11766 } 11767 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false); 11768 if (rc) 11769 return rc; 11770 11771 /* In case fw capabilities have changed, destroy the unneeded 11772 * reporters and create newly capable ones. 11773 */ 11774 bnxt_dl_fw_reporters_destroy(bp, false); 11775 bnxt_dl_fw_reporters_create(bp); 11776 bnxt_fw_init_one_p3(bp); 11777 return 0; 11778 } 11779 11780 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx) 11781 { 11782 struct bnxt_fw_health *fw_health = bp->fw_health; 11783 u32 reg = fw_health->fw_reset_seq_regs[reg_idx]; 11784 u32 val = fw_health->fw_reset_seq_vals[reg_idx]; 11785 u32 reg_type, reg_off, delay_msecs; 11786 11787 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx]; 11788 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg); 11789 reg_off = BNXT_FW_HEALTH_REG_OFF(reg); 11790 switch (reg_type) { 11791 case BNXT_FW_HEALTH_REG_TYPE_CFG: 11792 pci_write_config_dword(bp->pdev, reg_off, val); 11793 break; 11794 case BNXT_FW_HEALTH_REG_TYPE_GRC: 11795 writel(reg_off & BNXT_GRC_BASE_MASK, 11796 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 11797 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000; 11798 fallthrough; 11799 case BNXT_FW_HEALTH_REG_TYPE_BAR0: 11800 writel(val, bp->bar0 + reg_off); 11801 break; 11802 case BNXT_FW_HEALTH_REG_TYPE_BAR1: 11803 writel(val, bp->bar1 + reg_off); 11804 break; 11805 } 11806 if (delay_msecs) { 11807 pci_read_config_dword(bp->pdev, 0, &val); 11808 msleep(delay_msecs); 11809 } 11810 } 11811 11812 static void bnxt_reset_all(struct bnxt *bp) 11813 { 11814 struct bnxt_fw_health *fw_health = bp->fw_health; 11815 int i, rc; 11816 11817 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { 11818 bnxt_fw_reset_via_optee(bp); 11819 bp->fw_reset_timestamp = jiffies; 11820 return; 11821 } 11822 11823 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) { 11824 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) 11825 bnxt_fw_reset_writel(bp, i); 11826 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) { 11827 struct hwrm_fw_reset_input req = {0}; 11828 11829 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1); 11830 req.resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr); 11831 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP; 11832 req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP; 11833 req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; 11834 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 11835 if (rc != -ENODEV) 11836 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc); 11837 } 11838 bp->fw_reset_timestamp = jiffies; 11839 } 11840 11841 static bool bnxt_fw_reset_timeout(struct bnxt *bp) 11842 { 11843 return time_after(jiffies, bp->fw_reset_timestamp + 11844 (bp->fw_reset_max_dsecs * HZ / 10)); 11845 } 11846 11847 static void bnxt_fw_reset_task(struct work_struct *work) 11848 { 11849 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work); 11850 int rc; 11851 11852 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 11853 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n"); 11854 return; 11855 } 11856 11857 switch (bp->fw_reset_state) { 11858 case BNXT_FW_RESET_STATE_POLL_VF: { 11859 int n = bnxt_get_registered_vfs(bp); 11860 int tmo; 11861 11862 if (n < 0) { 11863 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n", 11864 n, jiffies_to_msecs(jiffies - 11865 bp->fw_reset_timestamp)); 11866 goto fw_reset_abort; 11867 } else if (n > 0) { 11868 if (bnxt_fw_reset_timeout(bp)) { 11869 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 11870 bp->fw_reset_state = 0; 11871 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n", 11872 n); 11873 return; 11874 } 11875 bnxt_queue_fw_reset_work(bp, HZ / 10); 11876 return; 11877 } 11878 bp->fw_reset_timestamp = jiffies; 11879 rtnl_lock(); 11880 bnxt_fw_reset_close(bp); 11881 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { 11882 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; 11883 tmo = HZ / 10; 11884 } else { 11885 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 11886 tmo = bp->fw_reset_min_dsecs * HZ / 10; 11887 } 11888 rtnl_unlock(); 11889 bnxt_queue_fw_reset_work(bp, tmo); 11890 return; 11891 } 11892 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: { 11893 u32 val; 11894 11895 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 11896 if (!(val & BNXT_FW_STATUS_SHUTDOWN) && 11897 !bnxt_fw_reset_timeout(bp)) { 11898 bnxt_queue_fw_reset_work(bp, HZ / 5); 11899 return; 11900 } 11901 11902 if (!bp->fw_health->master) { 11903 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs; 11904 11905 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 11906 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10); 11907 return; 11908 } 11909 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; 11910 } 11911 fallthrough; 11912 case BNXT_FW_RESET_STATE_RESET_FW: 11913 bnxt_reset_all(bp); 11914 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 11915 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10); 11916 return; 11917 case BNXT_FW_RESET_STATE_ENABLE_DEV: 11918 bnxt_inv_fw_health_reg(bp); 11919 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) && 11920 !bp->fw_reset_min_dsecs) { 11921 u16 val; 11922 11923 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val); 11924 if (val == 0xffff) { 11925 if (bnxt_fw_reset_timeout(bp)) { 11926 netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n"); 11927 goto fw_reset_abort; 11928 } 11929 bnxt_queue_fw_reset_work(bp, HZ / 1000); 11930 return; 11931 } 11932 } 11933 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); 11934 if (pci_enable_device(bp->pdev)) { 11935 netdev_err(bp->dev, "Cannot re-enable PCI device\n"); 11936 goto fw_reset_abort; 11937 } 11938 pci_set_master(bp->pdev); 11939 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW; 11940 fallthrough; 11941 case BNXT_FW_RESET_STATE_POLL_FW: 11942 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT; 11943 rc = __bnxt_hwrm_ver_get(bp, true); 11944 if (rc) { 11945 if (bnxt_fw_reset_timeout(bp)) { 11946 netdev_err(bp->dev, "Firmware reset aborted\n"); 11947 goto fw_reset_abort_status; 11948 } 11949 bnxt_queue_fw_reset_work(bp, HZ / 5); 11950 return; 11951 } 11952 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; 11953 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING; 11954 fallthrough; 11955 case BNXT_FW_RESET_STATE_OPENING: 11956 while (!rtnl_trylock()) { 11957 bnxt_queue_fw_reset_work(bp, HZ / 10); 11958 return; 11959 } 11960 rc = bnxt_open(bp->dev); 11961 if (rc) { 11962 netdev_err(bp->dev, "bnxt_open_nic() failed\n"); 11963 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 11964 dev_close(bp->dev); 11965 } 11966 11967 bp->fw_reset_state = 0; 11968 /* Make sure fw_reset_state is 0 before clearing the flag */ 11969 smp_mb__before_atomic(); 11970 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 11971 bnxt_ulp_start(bp, rc); 11972 if (!rc) 11973 bnxt_reenable_sriov(bp); 11974 bnxt_vf_reps_alloc(bp); 11975 bnxt_vf_reps_open(bp); 11976 bnxt_dl_health_recovery_done(bp); 11977 bnxt_dl_health_status_update(bp, true); 11978 rtnl_unlock(); 11979 break; 11980 } 11981 return; 11982 11983 fw_reset_abort_status: 11984 if (bp->fw_health->status_reliable || 11985 (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) { 11986 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 11987 11988 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts); 11989 } 11990 fw_reset_abort: 11991 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 11992 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) 11993 bnxt_dl_health_status_update(bp, false); 11994 bp->fw_reset_state = 0; 11995 rtnl_lock(); 11996 dev_close(bp->dev); 11997 rtnl_unlock(); 11998 } 11999 12000 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) 12001 { 12002 int rc; 12003 struct bnxt *bp = netdev_priv(dev); 12004 12005 SET_NETDEV_DEV(dev, &pdev->dev); 12006 12007 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 12008 rc = pci_enable_device(pdev); 12009 if (rc) { 12010 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 12011 goto init_err; 12012 } 12013 12014 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 12015 dev_err(&pdev->dev, 12016 "Cannot find PCI device base address, aborting\n"); 12017 rc = -ENODEV; 12018 goto init_err_disable; 12019 } 12020 12021 rc = pci_request_regions(pdev, DRV_MODULE_NAME); 12022 if (rc) { 12023 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); 12024 goto init_err_disable; 12025 } 12026 12027 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 && 12028 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) { 12029 dev_err(&pdev->dev, "System does not support DMA, aborting\n"); 12030 rc = -EIO; 12031 goto init_err_release; 12032 } 12033 12034 pci_set_master(pdev); 12035 12036 bp->dev = dev; 12037 bp->pdev = pdev; 12038 12039 /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2() 12040 * determines the BAR size. 12041 */ 12042 bp->bar0 = pci_ioremap_bar(pdev, 0); 12043 if (!bp->bar0) { 12044 dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); 12045 rc = -ENOMEM; 12046 goto init_err_release; 12047 } 12048 12049 bp->bar2 = pci_ioremap_bar(pdev, 4); 12050 if (!bp->bar2) { 12051 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n"); 12052 rc = -ENOMEM; 12053 goto init_err_release; 12054 } 12055 12056 pci_enable_pcie_error_reporting(pdev); 12057 12058 INIT_WORK(&bp->sp_task, bnxt_sp_task); 12059 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task); 12060 12061 spin_lock_init(&bp->ntp_fltr_lock); 12062 #if BITS_PER_LONG == 32 12063 spin_lock_init(&bp->db_lock); 12064 #endif 12065 12066 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE; 12067 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE; 12068 12069 bnxt_init_dflt_coal(bp); 12070 12071 timer_setup(&bp->timer, bnxt_timer, 0); 12072 bp->current_interval = BNXT_TIMER_INTERVAL; 12073 12074 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID; 12075 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID; 12076 12077 clear_bit(BNXT_STATE_OPEN, &bp->state); 12078 return 0; 12079 12080 init_err_release: 12081 bnxt_unmap_bars(bp, pdev); 12082 pci_release_regions(pdev); 12083 12084 init_err_disable: 12085 pci_disable_device(pdev); 12086 12087 init_err: 12088 return rc; 12089 } 12090 12091 /* rtnl_lock held */ 12092 static int bnxt_change_mac_addr(struct net_device *dev, void *p) 12093 { 12094 struct sockaddr *addr = p; 12095 struct bnxt *bp = netdev_priv(dev); 12096 int rc = 0; 12097 12098 if (!is_valid_ether_addr(addr->sa_data)) 12099 return -EADDRNOTAVAIL; 12100 12101 if (ether_addr_equal(addr->sa_data, dev->dev_addr)) 12102 return 0; 12103 12104 rc = bnxt_approve_mac(bp, addr->sa_data, true); 12105 if (rc) 12106 return rc; 12107 12108 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 12109 if (netif_running(dev)) { 12110 bnxt_close_nic(bp, false, false); 12111 rc = bnxt_open_nic(bp, false, false); 12112 } 12113 12114 return rc; 12115 } 12116 12117 /* rtnl_lock held */ 12118 static int bnxt_change_mtu(struct net_device *dev, int new_mtu) 12119 { 12120 struct bnxt *bp = netdev_priv(dev); 12121 12122 if (netif_running(dev)) 12123 bnxt_close_nic(bp, true, false); 12124 12125 dev->mtu = new_mtu; 12126 bnxt_set_ring_params(bp); 12127 12128 if (netif_running(dev)) 12129 return bnxt_open_nic(bp, true, false); 12130 12131 return 0; 12132 } 12133 12134 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) 12135 { 12136 struct bnxt *bp = netdev_priv(dev); 12137 bool sh = false; 12138 int rc; 12139 12140 if (tc > bp->max_tc) { 12141 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n", 12142 tc, bp->max_tc); 12143 return -EINVAL; 12144 } 12145 12146 if (netdev_get_num_tc(dev) == tc) 12147 return 0; 12148 12149 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 12150 sh = true; 12151 12152 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, 12153 sh, tc, bp->tx_nr_rings_xdp); 12154 if (rc) 12155 return rc; 12156 12157 /* Needs to close the device and do hw resource re-allocations */ 12158 if (netif_running(bp->dev)) 12159 bnxt_close_nic(bp, true, false); 12160 12161 if (tc) { 12162 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc; 12163 netdev_set_num_tc(dev, tc); 12164 } else { 12165 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 12166 netdev_reset_tc(dev); 12167 } 12168 bp->tx_nr_rings += bp->tx_nr_rings_xdp; 12169 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : 12170 bp->tx_nr_rings + bp->rx_nr_rings; 12171 12172 if (netif_running(bp->dev)) 12173 return bnxt_open_nic(bp, true, false); 12174 12175 return 0; 12176 } 12177 12178 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 12179 void *cb_priv) 12180 { 12181 struct bnxt *bp = cb_priv; 12182 12183 if (!bnxt_tc_flower_enabled(bp) || 12184 !tc_cls_can_offload_and_chain0(bp->dev, type_data)) 12185 return -EOPNOTSUPP; 12186 12187 switch (type) { 12188 case TC_SETUP_CLSFLOWER: 12189 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data); 12190 default: 12191 return -EOPNOTSUPP; 12192 } 12193 } 12194 12195 LIST_HEAD(bnxt_block_cb_list); 12196 12197 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, 12198 void *type_data) 12199 { 12200 struct bnxt *bp = netdev_priv(dev); 12201 12202 switch (type) { 12203 case TC_SETUP_BLOCK: 12204 return flow_block_cb_setup_simple(type_data, 12205 &bnxt_block_cb_list, 12206 bnxt_setup_tc_block_cb, 12207 bp, bp, true); 12208 case TC_SETUP_QDISC_MQPRIO: { 12209 struct tc_mqprio_qopt *mqprio = type_data; 12210 12211 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; 12212 12213 return bnxt_setup_mq_tc(dev, mqprio->num_tc); 12214 } 12215 default: 12216 return -EOPNOTSUPP; 12217 } 12218 } 12219 12220 #ifdef CONFIG_RFS_ACCEL 12221 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1, 12222 struct bnxt_ntuple_filter *f2) 12223 { 12224 struct flow_keys *keys1 = &f1->fkeys; 12225 struct flow_keys *keys2 = &f2->fkeys; 12226 12227 if (keys1->basic.n_proto != keys2->basic.n_proto || 12228 keys1->basic.ip_proto != keys2->basic.ip_proto) 12229 return false; 12230 12231 if (keys1->basic.n_proto == htons(ETH_P_IP)) { 12232 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src || 12233 keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst) 12234 return false; 12235 } else { 12236 if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src, 12237 sizeof(keys1->addrs.v6addrs.src)) || 12238 memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst, 12239 sizeof(keys1->addrs.v6addrs.dst))) 12240 return false; 12241 } 12242 12243 if (keys1->ports.ports == keys2->ports.ports && 12244 keys1->control.flags == keys2->control.flags && 12245 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) && 12246 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr)) 12247 return true; 12248 12249 return false; 12250 } 12251 12252 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, 12253 u16 rxq_index, u32 flow_id) 12254 { 12255 struct bnxt *bp = netdev_priv(dev); 12256 struct bnxt_ntuple_filter *fltr, *new_fltr; 12257 struct flow_keys *fkeys; 12258 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb); 12259 int rc = 0, idx, bit_id, l2_idx = 0; 12260 struct hlist_head *head; 12261 u32 flags; 12262 12263 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) { 12264 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 12265 int off = 0, j; 12266 12267 netif_addr_lock_bh(dev); 12268 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) { 12269 if (ether_addr_equal(eth->h_dest, 12270 vnic->uc_list + off)) { 12271 l2_idx = j + 1; 12272 break; 12273 } 12274 } 12275 netif_addr_unlock_bh(dev); 12276 if (!l2_idx) 12277 return -EINVAL; 12278 } 12279 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC); 12280 if (!new_fltr) 12281 return -ENOMEM; 12282 12283 fkeys = &new_fltr->fkeys; 12284 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) { 12285 rc = -EPROTONOSUPPORT; 12286 goto err_free; 12287 } 12288 12289 if ((fkeys->basic.n_proto != htons(ETH_P_IP) && 12290 fkeys->basic.n_proto != htons(ETH_P_IPV6)) || 12291 ((fkeys->basic.ip_proto != IPPROTO_TCP) && 12292 (fkeys->basic.ip_proto != IPPROTO_UDP))) { 12293 rc = -EPROTONOSUPPORT; 12294 goto err_free; 12295 } 12296 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) && 12297 bp->hwrm_spec_code < 0x10601) { 12298 rc = -EPROTONOSUPPORT; 12299 goto err_free; 12300 } 12301 flags = fkeys->control.flags; 12302 if (((flags & FLOW_DIS_ENCAPSULATION) && 12303 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) { 12304 rc = -EPROTONOSUPPORT; 12305 goto err_free; 12306 } 12307 12308 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN); 12309 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN); 12310 12311 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK; 12312 head = &bp->ntp_fltr_hash_tbl[idx]; 12313 rcu_read_lock(); 12314 hlist_for_each_entry_rcu(fltr, head, hash) { 12315 if (bnxt_fltr_match(fltr, new_fltr)) { 12316 rcu_read_unlock(); 12317 rc = 0; 12318 goto err_free; 12319 } 12320 } 12321 rcu_read_unlock(); 12322 12323 spin_lock_bh(&bp->ntp_fltr_lock); 12324 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, 12325 BNXT_NTP_FLTR_MAX_FLTR, 0); 12326 if (bit_id < 0) { 12327 spin_unlock_bh(&bp->ntp_fltr_lock); 12328 rc = -ENOMEM; 12329 goto err_free; 12330 } 12331 12332 new_fltr->sw_id = (u16)bit_id; 12333 new_fltr->flow_id = flow_id; 12334 new_fltr->l2_fltr_idx = l2_idx; 12335 new_fltr->rxq = rxq_index; 12336 hlist_add_head_rcu(&new_fltr->hash, head); 12337 bp->ntp_fltr_count++; 12338 spin_unlock_bh(&bp->ntp_fltr_lock); 12339 12340 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event); 12341 bnxt_queue_sp_work(bp); 12342 12343 return new_fltr->sw_id; 12344 12345 err_free: 12346 kfree(new_fltr); 12347 return rc; 12348 } 12349 12350 static void bnxt_cfg_ntp_filters(struct bnxt *bp) 12351 { 12352 int i; 12353 12354 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 12355 struct hlist_head *head; 12356 struct hlist_node *tmp; 12357 struct bnxt_ntuple_filter *fltr; 12358 int rc; 12359 12360 head = &bp->ntp_fltr_hash_tbl[i]; 12361 hlist_for_each_entry_safe(fltr, tmp, head, hash) { 12362 bool del = false; 12363 12364 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) { 12365 if (rps_may_expire_flow(bp->dev, fltr->rxq, 12366 fltr->flow_id, 12367 fltr->sw_id)) { 12368 bnxt_hwrm_cfa_ntuple_filter_free(bp, 12369 fltr); 12370 del = true; 12371 } 12372 } else { 12373 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, 12374 fltr); 12375 if (rc) 12376 del = true; 12377 else 12378 set_bit(BNXT_FLTR_VALID, &fltr->state); 12379 } 12380 12381 if (del) { 12382 spin_lock_bh(&bp->ntp_fltr_lock); 12383 hlist_del_rcu(&fltr->hash); 12384 bp->ntp_fltr_count--; 12385 spin_unlock_bh(&bp->ntp_fltr_lock); 12386 synchronize_rcu(); 12387 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap); 12388 kfree(fltr); 12389 } 12390 } 12391 } 12392 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event)) 12393 netdev_info(bp->dev, "Receive PF driver unload event!\n"); 12394 } 12395 12396 #else 12397 12398 static void bnxt_cfg_ntp_filters(struct bnxt *bp) 12399 { 12400 } 12401 12402 #endif /* CONFIG_RFS_ACCEL */ 12403 12404 static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table) 12405 { 12406 struct bnxt *bp = netdev_priv(netdev); 12407 struct udp_tunnel_info ti; 12408 unsigned int cmd; 12409 12410 udp_tunnel_nic_get_port(netdev, table, 0, &ti); 12411 if (ti.type == UDP_TUNNEL_TYPE_VXLAN) { 12412 bp->vxlan_port = ti.port; 12413 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN; 12414 } else { 12415 bp->nge_port = ti.port; 12416 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE; 12417 } 12418 12419 if (ti.port) 12420 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd); 12421 12422 return bnxt_hwrm_tunnel_dst_port_free(bp, cmd); 12423 } 12424 12425 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = { 12426 .sync_table = bnxt_udp_tunnel_sync, 12427 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | 12428 UDP_TUNNEL_NIC_INFO_OPEN_ONLY, 12429 .tables = { 12430 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, 12431 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, 12432 }, 12433 }; 12434 12435 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 12436 struct net_device *dev, u32 filter_mask, 12437 int nlflags) 12438 { 12439 struct bnxt *bp = netdev_priv(dev); 12440 12441 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0, 12442 nlflags, filter_mask, NULL); 12443 } 12444 12445 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, 12446 u16 flags, struct netlink_ext_ack *extack) 12447 { 12448 struct bnxt *bp = netdev_priv(dev); 12449 struct nlattr *attr, *br_spec; 12450 int rem, rc = 0; 12451 12452 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp)) 12453 return -EOPNOTSUPP; 12454 12455 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 12456 if (!br_spec) 12457 return -EINVAL; 12458 12459 nla_for_each_nested(attr, br_spec, rem) { 12460 u16 mode; 12461 12462 if (nla_type(attr) != IFLA_BRIDGE_MODE) 12463 continue; 12464 12465 if (nla_len(attr) < sizeof(mode)) 12466 return -EINVAL; 12467 12468 mode = nla_get_u16(attr); 12469 if (mode == bp->br_mode) 12470 break; 12471 12472 rc = bnxt_hwrm_set_br_mode(bp, mode); 12473 if (!rc) 12474 bp->br_mode = mode; 12475 break; 12476 } 12477 return rc; 12478 } 12479 12480 int bnxt_get_port_parent_id(struct net_device *dev, 12481 struct netdev_phys_item_id *ppid) 12482 { 12483 struct bnxt *bp = netdev_priv(dev); 12484 12485 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) 12486 return -EOPNOTSUPP; 12487 12488 /* The PF and it's VF-reps only support the switchdev framework */ 12489 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID)) 12490 return -EOPNOTSUPP; 12491 12492 ppid->id_len = sizeof(bp->dsn); 12493 memcpy(ppid->id, bp->dsn, ppid->id_len); 12494 12495 return 0; 12496 } 12497 12498 static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev) 12499 { 12500 struct bnxt *bp = netdev_priv(dev); 12501 12502 return &bp->dl_port; 12503 } 12504 12505 static const struct net_device_ops bnxt_netdev_ops = { 12506 .ndo_open = bnxt_open, 12507 .ndo_start_xmit = bnxt_start_xmit, 12508 .ndo_stop = bnxt_close, 12509 .ndo_get_stats64 = bnxt_get_stats64, 12510 .ndo_set_rx_mode = bnxt_set_rx_mode, 12511 .ndo_do_ioctl = bnxt_ioctl, 12512 .ndo_validate_addr = eth_validate_addr, 12513 .ndo_set_mac_address = bnxt_change_mac_addr, 12514 .ndo_change_mtu = bnxt_change_mtu, 12515 .ndo_fix_features = bnxt_fix_features, 12516 .ndo_set_features = bnxt_set_features, 12517 .ndo_features_check = bnxt_features_check, 12518 .ndo_tx_timeout = bnxt_tx_timeout, 12519 #ifdef CONFIG_BNXT_SRIOV 12520 .ndo_get_vf_config = bnxt_get_vf_config, 12521 .ndo_set_vf_mac = bnxt_set_vf_mac, 12522 .ndo_set_vf_vlan = bnxt_set_vf_vlan, 12523 .ndo_set_vf_rate = bnxt_set_vf_bw, 12524 .ndo_set_vf_link_state = bnxt_set_vf_link_state, 12525 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk, 12526 .ndo_set_vf_trust = bnxt_set_vf_trust, 12527 #endif 12528 .ndo_setup_tc = bnxt_setup_tc, 12529 #ifdef CONFIG_RFS_ACCEL 12530 .ndo_rx_flow_steer = bnxt_rx_flow_steer, 12531 #endif 12532 .ndo_bpf = bnxt_xdp, 12533 .ndo_xdp_xmit = bnxt_xdp_xmit, 12534 .ndo_bridge_getlink = bnxt_bridge_getlink, 12535 .ndo_bridge_setlink = bnxt_bridge_setlink, 12536 .ndo_get_devlink_port = bnxt_get_devlink_port, 12537 }; 12538 12539 static void bnxt_remove_one(struct pci_dev *pdev) 12540 { 12541 struct net_device *dev = pci_get_drvdata(pdev); 12542 struct bnxt *bp = netdev_priv(dev); 12543 12544 if (BNXT_PF(bp)) 12545 bnxt_sriov_disable(bp); 12546 12547 if (BNXT_PF(bp)) 12548 devlink_port_type_clear(&bp->dl_port); 12549 pci_disable_pcie_error_reporting(pdev); 12550 unregister_netdev(dev); 12551 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 12552 /* Flush any pending tasks */ 12553 cancel_work_sync(&bp->sp_task); 12554 cancel_delayed_work_sync(&bp->fw_reset_task); 12555 bp->sp_event = 0; 12556 12557 bnxt_dl_fw_reporters_destroy(bp, true); 12558 bnxt_dl_unregister(bp); 12559 bnxt_shutdown_tc(bp); 12560 12561 bnxt_clear_int_mode(bp); 12562 bnxt_hwrm_func_drv_unrgtr(bp); 12563 bnxt_free_hwrm_resources(bp); 12564 bnxt_free_hwrm_short_cmd_req(bp); 12565 bnxt_ethtool_free(bp); 12566 bnxt_dcb_free(bp); 12567 kfree(bp->edev); 12568 bp->edev = NULL; 12569 kfree(bp->fw_health); 12570 bp->fw_health = NULL; 12571 bnxt_cleanup_pci(bp); 12572 bnxt_free_ctx_mem(bp); 12573 kfree(bp->ctx); 12574 bp->ctx = NULL; 12575 kfree(bp->rss_indir_tbl); 12576 bp->rss_indir_tbl = NULL; 12577 bnxt_free_port_stats(bp); 12578 free_netdev(dev); 12579 } 12580 12581 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt) 12582 { 12583 int rc = 0; 12584 struct bnxt_link_info *link_info = &bp->link_info; 12585 12586 bp->phy_flags = 0; 12587 rc = bnxt_hwrm_phy_qcaps(bp); 12588 if (rc) { 12589 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n", 12590 rc); 12591 return rc; 12592 } 12593 if (bp->phy_flags & BNXT_PHY_FL_NO_FCS) 12594 bp->dev->priv_flags |= IFF_SUPP_NOFCS; 12595 else 12596 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS; 12597 if (!fw_dflt) 12598 return 0; 12599 12600 rc = bnxt_update_link(bp, false); 12601 if (rc) { 12602 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n", 12603 rc); 12604 return rc; 12605 } 12606 12607 /* Older firmware does not have supported_auto_speeds, so assume 12608 * that all supported speeds can be autonegotiated. 12609 */ 12610 if (link_info->auto_link_speeds && !link_info->support_auto_speeds) 12611 link_info->support_auto_speeds = link_info->support_speeds; 12612 12613 bnxt_init_ethtool_link_settings(bp); 12614 return 0; 12615 } 12616 12617 static int bnxt_get_max_irq(struct pci_dev *pdev) 12618 { 12619 u16 ctrl; 12620 12621 if (!pdev->msix_cap) 12622 return 1; 12623 12624 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl); 12625 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; 12626 } 12627 12628 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, 12629 int *max_cp) 12630 { 12631 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 12632 int max_ring_grps = 0, max_irq; 12633 12634 *max_tx = hw_resc->max_tx_rings; 12635 *max_rx = hw_resc->max_rx_rings; 12636 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp); 12637 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) - 12638 bnxt_get_ulp_msix_num(bp), 12639 hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp)); 12640 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 12641 *max_cp = min_t(int, *max_cp, max_irq); 12642 max_ring_grps = hw_resc->max_hw_ring_grps; 12643 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { 12644 *max_cp -= 1; 12645 *max_rx -= 2; 12646 } 12647 if (bp->flags & BNXT_FLAG_AGG_RINGS) 12648 *max_rx >>= 1; 12649 if (bp->flags & BNXT_FLAG_CHIP_P5) { 12650 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false); 12651 /* On P5 chips, max_cp output param should be available NQs */ 12652 *max_cp = max_irq; 12653 } 12654 *max_rx = min_t(int, *max_rx, max_ring_grps); 12655 } 12656 12657 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared) 12658 { 12659 int rx, tx, cp; 12660 12661 _bnxt_get_max_rings(bp, &rx, &tx, &cp); 12662 *max_rx = rx; 12663 *max_tx = tx; 12664 if (!rx || !tx || !cp) 12665 return -ENOMEM; 12666 12667 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared); 12668 } 12669 12670 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx, 12671 bool shared) 12672 { 12673 int rc; 12674 12675 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); 12676 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) { 12677 /* Not enough rings, try disabling agg rings. */ 12678 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 12679 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); 12680 if (rc) { 12681 /* set BNXT_FLAG_AGG_RINGS back for consistency */ 12682 bp->flags |= BNXT_FLAG_AGG_RINGS; 12683 return rc; 12684 } 12685 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 12686 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 12687 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 12688 bnxt_set_ring_params(bp); 12689 } 12690 12691 if (bp->flags & BNXT_FLAG_ROCE_CAP) { 12692 int max_cp, max_stat, max_irq; 12693 12694 /* Reserve minimum resources for RoCE */ 12695 max_cp = bnxt_get_max_func_cp_rings(bp); 12696 max_stat = bnxt_get_max_func_stat_ctxs(bp); 12697 max_irq = bnxt_get_max_func_irqs(bp); 12698 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS || 12699 max_irq <= BNXT_MIN_ROCE_CP_RINGS || 12700 max_stat <= BNXT_MIN_ROCE_STAT_CTXS) 12701 return 0; 12702 12703 max_cp -= BNXT_MIN_ROCE_CP_RINGS; 12704 max_irq -= BNXT_MIN_ROCE_CP_RINGS; 12705 max_stat -= BNXT_MIN_ROCE_STAT_CTXS; 12706 max_cp = min_t(int, max_cp, max_irq); 12707 max_cp = min_t(int, max_cp, max_stat); 12708 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared); 12709 if (rc) 12710 rc = 0; 12711 } 12712 return rc; 12713 } 12714 12715 /* In initial default shared ring setting, each shared ring must have a 12716 * RX/TX ring pair. 12717 */ 12718 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp) 12719 { 12720 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings); 12721 bp->rx_nr_rings = bp->cp_nr_rings; 12722 bp->tx_nr_rings_per_tc = bp->cp_nr_rings; 12723 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 12724 } 12725 12726 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) 12727 { 12728 int dflt_rings, max_rx_rings, max_tx_rings, rc; 12729 12730 if (!bnxt_can_reserve_rings(bp)) 12731 return 0; 12732 12733 if (sh) 12734 bp->flags |= BNXT_FLAG_SHARED_RINGS; 12735 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues(); 12736 /* Reduce default rings on multi-port cards so that total default 12737 * rings do not exceed CPU count. 12738 */ 12739 if (bp->port_count > 1) { 12740 int max_rings = 12741 max_t(int, num_online_cpus() / bp->port_count, 1); 12742 12743 dflt_rings = min_t(int, dflt_rings, max_rings); 12744 } 12745 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh); 12746 if (rc) 12747 return rc; 12748 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); 12749 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); 12750 if (sh) 12751 bnxt_trim_dflt_sh_rings(bp); 12752 else 12753 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings; 12754 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 12755 12756 rc = __bnxt_reserve_rings(bp); 12757 if (rc) 12758 netdev_warn(bp->dev, "Unable to reserve tx rings\n"); 12759 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 12760 if (sh) 12761 bnxt_trim_dflt_sh_rings(bp); 12762 12763 /* Rings may have been trimmed, re-reserve the trimmed rings. */ 12764 if (bnxt_need_reserve_rings(bp)) { 12765 rc = __bnxt_reserve_rings(bp); 12766 if (rc) 12767 netdev_warn(bp->dev, "2nd rings reservation failed.\n"); 12768 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 12769 } 12770 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 12771 bp->rx_nr_rings++; 12772 bp->cp_nr_rings++; 12773 } 12774 if (rc) { 12775 bp->tx_nr_rings = 0; 12776 bp->rx_nr_rings = 0; 12777 } 12778 return rc; 12779 } 12780 12781 static int bnxt_init_dflt_ring_mode(struct bnxt *bp) 12782 { 12783 int rc; 12784 12785 if (bp->tx_nr_rings) 12786 return 0; 12787 12788 bnxt_ulp_irq_stop(bp); 12789 bnxt_clear_int_mode(bp); 12790 rc = bnxt_set_dflt_rings(bp, true); 12791 if (rc) { 12792 netdev_err(bp->dev, "Not enough rings available.\n"); 12793 goto init_dflt_ring_err; 12794 } 12795 rc = bnxt_init_int_mode(bp); 12796 if (rc) 12797 goto init_dflt_ring_err; 12798 12799 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 12800 if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) { 12801 bp->flags |= BNXT_FLAG_RFS; 12802 bp->dev->features |= NETIF_F_NTUPLE; 12803 } 12804 init_dflt_ring_err: 12805 bnxt_ulp_irq_restart(bp, rc); 12806 return rc; 12807 } 12808 12809 int bnxt_restore_pf_fw_resources(struct bnxt *bp) 12810 { 12811 int rc; 12812 12813 ASSERT_RTNL(); 12814 bnxt_hwrm_func_qcaps(bp); 12815 12816 if (netif_running(bp->dev)) 12817 __bnxt_close_nic(bp, true, false); 12818 12819 bnxt_ulp_irq_stop(bp); 12820 bnxt_clear_int_mode(bp); 12821 rc = bnxt_init_int_mode(bp); 12822 bnxt_ulp_irq_restart(bp, rc); 12823 12824 if (netif_running(bp->dev)) { 12825 if (rc) 12826 dev_close(bp->dev); 12827 else 12828 rc = bnxt_open_nic(bp, true, false); 12829 } 12830 12831 return rc; 12832 } 12833 12834 static int bnxt_init_mac_addr(struct bnxt *bp) 12835 { 12836 int rc = 0; 12837 12838 if (BNXT_PF(bp)) { 12839 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN); 12840 } else { 12841 #ifdef CONFIG_BNXT_SRIOV 12842 struct bnxt_vf_info *vf = &bp->vf; 12843 bool strict_approval = true; 12844 12845 if (is_valid_ether_addr(vf->mac_addr)) { 12846 /* overwrite netdev dev_addr with admin VF MAC */ 12847 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); 12848 /* Older PF driver or firmware may not approve this 12849 * correctly. 12850 */ 12851 strict_approval = false; 12852 } else { 12853 eth_hw_addr_random(bp->dev); 12854 } 12855 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval); 12856 #endif 12857 } 12858 return rc; 12859 } 12860 12861 #define BNXT_VPD_LEN 512 12862 static void bnxt_vpd_read_info(struct bnxt *bp) 12863 { 12864 struct pci_dev *pdev = bp->pdev; 12865 int i, len, pos, ro_size, size; 12866 ssize_t vpd_size; 12867 u8 *vpd_data; 12868 12869 vpd_data = kmalloc(BNXT_VPD_LEN, GFP_KERNEL); 12870 if (!vpd_data) 12871 return; 12872 12873 vpd_size = pci_read_vpd(pdev, 0, BNXT_VPD_LEN, vpd_data); 12874 if (vpd_size <= 0) { 12875 netdev_err(bp->dev, "Unable to read VPD\n"); 12876 goto exit; 12877 } 12878 12879 i = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA); 12880 if (i < 0) { 12881 netdev_err(bp->dev, "VPD READ-Only not found\n"); 12882 goto exit; 12883 } 12884 12885 ro_size = pci_vpd_lrdt_size(&vpd_data[i]); 12886 i += PCI_VPD_LRDT_TAG_SIZE; 12887 if (i + ro_size > vpd_size) 12888 goto exit; 12889 12890 pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size, 12891 PCI_VPD_RO_KEYWORD_PARTNO); 12892 if (pos < 0) 12893 goto read_sn; 12894 12895 len = pci_vpd_info_field_size(&vpd_data[pos]); 12896 pos += PCI_VPD_INFO_FLD_HDR_SIZE; 12897 if (len + pos > vpd_size) 12898 goto read_sn; 12899 12900 size = min(len, BNXT_VPD_FLD_LEN - 1); 12901 memcpy(bp->board_partno, &vpd_data[pos], size); 12902 12903 read_sn: 12904 pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size, 12905 PCI_VPD_RO_KEYWORD_SERIALNO); 12906 if (pos < 0) 12907 goto exit; 12908 12909 len = pci_vpd_info_field_size(&vpd_data[pos]); 12910 pos += PCI_VPD_INFO_FLD_HDR_SIZE; 12911 if (len + pos > vpd_size) 12912 goto exit; 12913 12914 size = min(len, BNXT_VPD_FLD_LEN - 1); 12915 memcpy(bp->board_serialno, &vpd_data[pos], size); 12916 exit: 12917 kfree(vpd_data); 12918 } 12919 12920 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[]) 12921 { 12922 struct pci_dev *pdev = bp->pdev; 12923 u64 qword; 12924 12925 qword = pci_get_dsn(pdev); 12926 if (!qword) { 12927 netdev_info(bp->dev, "Unable to read adapter's DSN\n"); 12928 return -EOPNOTSUPP; 12929 } 12930 12931 put_unaligned_le64(qword, dsn); 12932 12933 bp->flags |= BNXT_FLAG_DSN_VALID; 12934 return 0; 12935 } 12936 12937 static int bnxt_map_db_bar(struct bnxt *bp) 12938 { 12939 if (!bp->db_size) 12940 return -ENODEV; 12941 bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size); 12942 if (!bp->bar1) 12943 return -ENOMEM; 12944 return 0; 12945 } 12946 12947 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 12948 { 12949 struct net_device *dev; 12950 struct bnxt *bp; 12951 int rc, max_irqs; 12952 12953 if (pci_is_bridge(pdev)) 12954 return -ENODEV; 12955 12956 /* Clear any pending DMA transactions from crash kernel 12957 * while loading driver in capture kernel. 12958 */ 12959 if (is_kdump_kernel()) { 12960 pci_clear_master(pdev); 12961 pcie_flr(pdev); 12962 } 12963 12964 max_irqs = bnxt_get_max_irq(pdev); 12965 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs); 12966 if (!dev) 12967 return -ENOMEM; 12968 12969 bp = netdev_priv(dev); 12970 bp->msg_enable = BNXT_DEF_MSG_ENABLE; 12971 bnxt_set_max_func_irqs(bp, max_irqs); 12972 12973 if (bnxt_vf_pciid(ent->driver_data)) 12974 bp->flags |= BNXT_FLAG_VF; 12975 12976 if (pdev->msix_cap) 12977 bp->flags |= BNXT_FLAG_MSIX_CAP; 12978 12979 rc = bnxt_init_board(pdev, dev); 12980 if (rc < 0) 12981 goto init_err_free; 12982 12983 dev->netdev_ops = &bnxt_netdev_ops; 12984 dev->watchdog_timeo = BNXT_TX_TIMEOUT; 12985 dev->ethtool_ops = &bnxt_ethtool_ops; 12986 pci_set_drvdata(pdev, dev); 12987 12988 rc = bnxt_alloc_hwrm_resources(bp); 12989 if (rc) 12990 goto init_err_pci_clean; 12991 12992 mutex_init(&bp->hwrm_cmd_lock); 12993 mutex_init(&bp->link_lock); 12994 12995 rc = bnxt_fw_init_one_p1(bp); 12996 if (rc) 12997 goto init_err_pci_clean; 12998 12999 if (BNXT_PF(bp)) 13000 bnxt_vpd_read_info(bp); 13001 13002 if (BNXT_CHIP_P5(bp)) { 13003 bp->flags |= BNXT_FLAG_CHIP_P5; 13004 if (BNXT_CHIP_SR2(bp)) 13005 bp->flags |= BNXT_FLAG_CHIP_SR2; 13006 } 13007 13008 rc = bnxt_alloc_rss_indir_tbl(bp); 13009 if (rc) 13010 goto init_err_pci_clean; 13011 13012 rc = bnxt_fw_init_one_p2(bp); 13013 if (rc) 13014 goto init_err_pci_clean; 13015 13016 rc = bnxt_map_db_bar(bp); 13017 if (rc) { 13018 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n", 13019 rc); 13020 goto init_err_pci_clean; 13021 } 13022 13023 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 13024 NETIF_F_TSO | NETIF_F_TSO6 | 13025 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | 13026 NETIF_F_GSO_IPXIP4 | 13027 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | 13028 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH | 13029 NETIF_F_RXCSUM | NETIF_F_GRO; 13030 13031 if (BNXT_SUPPORTS_TPA(bp)) 13032 dev->hw_features |= NETIF_F_LRO; 13033 13034 dev->hw_enc_features = 13035 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 13036 NETIF_F_TSO | NETIF_F_TSO6 | 13037 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | 13038 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | 13039 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL; 13040 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels; 13041 13042 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM | 13043 NETIF_F_GSO_GRE_CSUM; 13044 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA; 13045 if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP) 13046 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX; 13047 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT) 13048 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX; 13049 if (BNXT_SUPPORTS_TPA(bp)) 13050 dev->hw_features |= NETIF_F_GRO_HW; 13051 dev->features |= dev->hw_features | NETIF_F_HIGHDMA; 13052 if (dev->features & NETIF_F_GRO_HW) 13053 dev->features &= ~NETIF_F_LRO; 13054 dev->priv_flags |= IFF_UNICAST_FLT; 13055 13056 #ifdef CONFIG_BNXT_SRIOV 13057 init_waitqueue_head(&bp->sriov_cfg_wait); 13058 mutex_init(&bp->sriov_lock); 13059 #endif 13060 if (BNXT_SUPPORTS_TPA(bp)) { 13061 bp->gro_func = bnxt_gro_func_5730x; 13062 if (BNXT_CHIP_P4(bp)) 13063 bp->gro_func = bnxt_gro_func_5731x; 13064 else if (BNXT_CHIP_P5(bp)) 13065 bp->gro_func = bnxt_gro_func_5750x; 13066 } 13067 if (!BNXT_CHIP_P4_PLUS(bp)) 13068 bp->flags |= BNXT_FLAG_DOUBLE_DB; 13069 13070 rc = bnxt_init_mac_addr(bp); 13071 if (rc) { 13072 dev_err(&pdev->dev, "Unable to initialize mac address.\n"); 13073 rc = -EADDRNOTAVAIL; 13074 goto init_err_pci_clean; 13075 } 13076 13077 if (BNXT_PF(bp)) { 13078 /* Read the adapter's DSN to use as the eswitch switch_id */ 13079 rc = bnxt_pcie_dsn_get(bp, bp->dsn); 13080 } 13081 13082 /* MTU range: 60 - FW defined max */ 13083 dev->min_mtu = ETH_ZLEN; 13084 dev->max_mtu = bp->max_mtu; 13085 13086 rc = bnxt_probe_phy(bp, true); 13087 if (rc) 13088 goto init_err_pci_clean; 13089 13090 bnxt_set_rx_skb_mode(bp, false); 13091 bnxt_set_tpa_flags(bp); 13092 bnxt_set_ring_params(bp); 13093 rc = bnxt_set_dflt_rings(bp, true); 13094 if (rc) { 13095 netdev_err(bp->dev, "Not enough rings available.\n"); 13096 rc = -ENOMEM; 13097 goto init_err_pci_clean; 13098 } 13099 13100 bnxt_fw_init_one_p3(bp); 13101 13102 if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX) 13103 bp->flags |= BNXT_FLAG_STRIP_VLAN; 13104 13105 rc = bnxt_init_int_mode(bp); 13106 if (rc) 13107 goto init_err_pci_clean; 13108 13109 /* No TC has been set yet and rings may have been trimmed due to 13110 * limited MSIX, so we re-initialize the TX rings per TC. 13111 */ 13112 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 13113 13114 if (BNXT_PF(bp)) { 13115 if (!bnxt_pf_wq) { 13116 bnxt_pf_wq = 13117 create_singlethread_workqueue("bnxt_pf_wq"); 13118 if (!bnxt_pf_wq) { 13119 dev_err(&pdev->dev, "Unable to create workqueue.\n"); 13120 rc = -ENOMEM; 13121 goto init_err_pci_clean; 13122 } 13123 } 13124 rc = bnxt_init_tc(bp); 13125 if (rc) 13126 netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n", 13127 rc); 13128 } 13129 13130 bnxt_inv_fw_health_reg(bp); 13131 bnxt_dl_register(bp); 13132 13133 rc = register_netdev(dev); 13134 if (rc) 13135 goto init_err_cleanup; 13136 13137 if (BNXT_PF(bp)) 13138 devlink_port_type_eth_set(&bp->dl_port, bp->dev); 13139 bnxt_dl_fw_reporters_create(bp); 13140 13141 netdev_info(dev, "%s found at mem %lx, node addr %pM\n", 13142 board_info[ent->driver_data].name, 13143 (long)pci_resource_start(pdev, 0), dev->dev_addr); 13144 pcie_print_link_status(pdev); 13145 13146 pci_save_state(pdev); 13147 return 0; 13148 13149 init_err_cleanup: 13150 bnxt_dl_unregister(bp); 13151 bnxt_shutdown_tc(bp); 13152 bnxt_clear_int_mode(bp); 13153 13154 init_err_pci_clean: 13155 bnxt_hwrm_func_drv_unrgtr(bp); 13156 bnxt_free_hwrm_short_cmd_req(bp); 13157 bnxt_free_hwrm_resources(bp); 13158 kfree(bp->fw_health); 13159 bp->fw_health = NULL; 13160 bnxt_cleanup_pci(bp); 13161 bnxt_free_ctx_mem(bp); 13162 kfree(bp->ctx); 13163 bp->ctx = NULL; 13164 kfree(bp->rss_indir_tbl); 13165 bp->rss_indir_tbl = NULL; 13166 13167 init_err_free: 13168 free_netdev(dev); 13169 return rc; 13170 } 13171 13172 static void bnxt_shutdown(struct pci_dev *pdev) 13173 { 13174 struct net_device *dev = pci_get_drvdata(pdev); 13175 struct bnxt *bp; 13176 13177 if (!dev) 13178 return; 13179 13180 rtnl_lock(); 13181 bp = netdev_priv(dev); 13182 if (!bp) 13183 goto shutdown_exit; 13184 13185 if (netif_running(dev)) 13186 dev_close(dev); 13187 13188 bnxt_ulp_shutdown(bp); 13189 bnxt_clear_int_mode(bp); 13190 pci_disable_device(pdev); 13191 13192 if (system_state == SYSTEM_POWER_OFF) { 13193 pci_wake_from_d3(pdev, bp->wol); 13194 pci_set_power_state(pdev, PCI_D3hot); 13195 } 13196 13197 shutdown_exit: 13198 rtnl_unlock(); 13199 } 13200 13201 #ifdef CONFIG_PM_SLEEP 13202 static int bnxt_suspend(struct device *device) 13203 { 13204 struct net_device *dev = dev_get_drvdata(device); 13205 struct bnxt *bp = netdev_priv(dev); 13206 int rc = 0; 13207 13208 rtnl_lock(); 13209 bnxt_ulp_stop(bp); 13210 if (netif_running(dev)) { 13211 netif_device_detach(dev); 13212 rc = bnxt_close(dev); 13213 } 13214 bnxt_hwrm_func_drv_unrgtr(bp); 13215 pci_disable_device(bp->pdev); 13216 bnxt_free_ctx_mem(bp); 13217 kfree(bp->ctx); 13218 bp->ctx = NULL; 13219 rtnl_unlock(); 13220 return rc; 13221 } 13222 13223 static int bnxt_resume(struct device *device) 13224 { 13225 struct net_device *dev = dev_get_drvdata(device); 13226 struct bnxt *bp = netdev_priv(dev); 13227 int rc = 0; 13228 13229 rtnl_lock(); 13230 rc = pci_enable_device(bp->pdev); 13231 if (rc) { 13232 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n", 13233 rc); 13234 goto resume_exit; 13235 } 13236 pci_set_master(bp->pdev); 13237 if (bnxt_hwrm_ver_get(bp)) { 13238 rc = -ENODEV; 13239 goto resume_exit; 13240 } 13241 rc = bnxt_hwrm_func_reset(bp); 13242 if (rc) { 13243 rc = -EBUSY; 13244 goto resume_exit; 13245 } 13246 13247 rc = bnxt_hwrm_func_qcaps(bp); 13248 if (rc) 13249 goto resume_exit; 13250 13251 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) { 13252 rc = -ENODEV; 13253 goto resume_exit; 13254 } 13255 13256 bnxt_get_wol_settings(bp); 13257 if (netif_running(dev)) { 13258 rc = bnxt_open(dev); 13259 if (!rc) 13260 netif_device_attach(dev); 13261 } 13262 13263 resume_exit: 13264 bnxt_ulp_start(bp, rc); 13265 if (!rc) 13266 bnxt_reenable_sriov(bp); 13267 rtnl_unlock(); 13268 return rc; 13269 } 13270 13271 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume); 13272 #define BNXT_PM_OPS (&bnxt_pm_ops) 13273 13274 #else 13275 13276 #define BNXT_PM_OPS NULL 13277 13278 #endif /* CONFIG_PM_SLEEP */ 13279 13280 /** 13281 * bnxt_io_error_detected - called when PCI error is detected 13282 * @pdev: Pointer to PCI device 13283 * @state: The current pci connection state 13284 * 13285 * This function is called after a PCI bus error affecting 13286 * this device has been detected. 13287 */ 13288 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, 13289 pci_channel_state_t state) 13290 { 13291 struct net_device *netdev = pci_get_drvdata(pdev); 13292 struct bnxt *bp = netdev_priv(netdev); 13293 13294 netdev_info(netdev, "PCI I/O error detected\n"); 13295 13296 rtnl_lock(); 13297 netif_device_detach(netdev); 13298 13299 bnxt_ulp_stop(bp); 13300 13301 if (state == pci_channel_io_perm_failure) { 13302 rtnl_unlock(); 13303 return PCI_ERS_RESULT_DISCONNECT; 13304 } 13305 13306 if (state == pci_channel_io_frozen) 13307 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state); 13308 13309 if (netif_running(netdev)) 13310 bnxt_close(netdev); 13311 13312 pci_disable_device(pdev); 13313 bnxt_free_ctx_mem(bp); 13314 kfree(bp->ctx); 13315 bp->ctx = NULL; 13316 rtnl_unlock(); 13317 13318 /* Request a slot slot reset. */ 13319 return PCI_ERS_RESULT_NEED_RESET; 13320 } 13321 13322 /** 13323 * bnxt_io_slot_reset - called after the pci bus has been reset. 13324 * @pdev: Pointer to PCI device 13325 * 13326 * Restart the card from scratch, as if from a cold-boot. 13327 * At this point, the card has exprienced a hard reset, 13328 * followed by fixups by BIOS, and has its config space 13329 * set up identically to what it was at cold boot. 13330 */ 13331 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) 13332 { 13333 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT; 13334 struct net_device *netdev = pci_get_drvdata(pdev); 13335 struct bnxt *bp = netdev_priv(netdev); 13336 int err = 0, off; 13337 13338 netdev_info(bp->dev, "PCI Slot Reset\n"); 13339 13340 rtnl_lock(); 13341 13342 if (pci_enable_device(pdev)) { 13343 dev_err(&pdev->dev, 13344 "Cannot re-enable PCI device after reset.\n"); 13345 } else { 13346 pci_set_master(pdev); 13347 /* Upon fatal error, our device internal logic that latches to 13348 * BAR value is getting reset and will restore only upon 13349 * rewritting the BARs. 13350 * 13351 * As pci_restore_state() does not re-write the BARs if the 13352 * value is same as saved value earlier, driver needs to 13353 * write the BARs to 0 to force restore, in case of fatal error. 13354 */ 13355 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, 13356 &bp->state)) { 13357 for (off = PCI_BASE_ADDRESS_0; 13358 off <= PCI_BASE_ADDRESS_5; off += 4) 13359 pci_write_config_dword(bp->pdev, off, 0); 13360 } 13361 pci_restore_state(pdev); 13362 pci_save_state(pdev); 13363 13364 err = bnxt_hwrm_func_reset(bp); 13365 if (!err) 13366 result = PCI_ERS_RESULT_RECOVERED; 13367 } 13368 13369 rtnl_unlock(); 13370 13371 return result; 13372 } 13373 13374 /** 13375 * bnxt_io_resume - called when traffic can start flowing again. 13376 * @pdev: Pointer to PCI device 13377 * 13378 * This callback is called when the error recovery driver tells 13379 * us that its OK to resume normal operation. 13380 */ 13381 static void bnxt_io_resume(struct pci_dev *pdev) 13382 { 13383 struct net_device *netdev = pci_get_drvdata(pdev); 13384 struct bnxt *bp = netdev_priv(netdev); 13385 int err; 13386 13387 netdev_info(bp->dev, "PCI Slot Resume\n"); 13388 rtnl_lock(); 13389 13390 err = bnxt_hwrm_func_qcaps(bp); 13391 if (!err && netif_running(netdev)) 13392 err = bnxt_open(netdev); 13393 13394 bnxt_ulp_start(bp, err); 13395 if (!err) { 13396 bnxt_reenable_sriov(bp); 13397 netif_device_attach(netdev); 13398 } 13399 13400 rtnl_unlock(); 13401 } 13402 13403 static const struct pci_error_handlers bnxt_err_handler = { 13404 .error_detected = bnxt_io_error_detected, 13405 .slot_reset = bnxt_io_slot_reset, 13406 .resume = bnxt_io_resume 13407 }; 13408 13409 static struct pci_driver bnxt_pci_driver = { 13410 .name = DRV_MODULE_NAME, 13411 .id_table = bnxt_pci_tbl, 13412 .probe = bnxt_init_one, 13413 .remove = bnxt_remove_one, 13414 .shutdown = bnxt_shutdown, 13415 .driver.pm = BNXT_PM_OPS, 13416 .err_handler = &bnxt_err_handler, 13417 #if defined(CONFIG_BNXT_SRIOV) 13418 .sriov_configure = bnxt_sriov_configure, 13419 #endif 13420 }; 13421 13422 static int __init bnxt_init(void) 13423 { 13424 bnxt_debug_init(); 13425 return pci_register_driver(&bnxt_pci_driver); 13426 } 13427 13428 static void __exit bnxt_exit(void) 13429 { 13430 pci_unregister_driver(&bnxt_pci_driver); 13431 if (bnxt_pf_wq) 13432 destroy_workqueue(bnxt_pf_wq); 13433 bnxt_debug_exit(); 13434 } 13435 13436 module_init(bnxt_init); 13437 module_exit(bnxt_exit); 13438