1 /* Broadcom NetXtreme-C/E network driver. 2 * 3 * Copyright (c) 2014-2016 Broadcom Corporation 4 * Copyright (c) 2016-2019 Broadcom Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 */ 10 11 #include <linux/module.h> 12 13 #include <linux/stringify.h> 14 #include <linux/kernel.h> 15 #include <linux/timer.h> 16 #include <linux/errno.h> 17 #include <linux/ioport.h> 18 #include <linux/slab.h> 19 #include <linux/vmalloc.h> 20 #include <linux/interrupt.h> 21 #include <linux/pci.h> 22 #include <linux/netdevice.h> 23 #include <linux/etherdevice.h> 24 #include <linux/skbuff.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/bitops.h> 27 #include <linux/io.h> 28 #include <linux/irq.h> 29 #include <linux/delay.h> 30 #include <asm/byteorder.h> 31 #include <asm/page.h> 32 #include <linux/time.h> 33 #include <linux/mii.h> 34 #include <linux/mdio.h> 35 #include <linux/if.h> 36 #include <linux/if_vlan.h> 37 #include <linux/if_bridge.h> 38 #include <linux/rtc.h> 39 #include <linux/bpf.h> 40 #include <net/ip.h> 41 #include <net/tcp.h> 42 #include <net/udp.h> 43 #include <net/checksum.h> 44 #include <net/ip6_checksum.h> 45 #include <net/udp_tunnel.h> 46 #include <linux/workqueue.h> 47 #include <linux/prefetch.h> 48 #include <linux/cache.h> 49 #include <linux/log2.h> 50 #include <linux/aer.h> 51 #include <linux/bitmap.h> 52 #include <linux/cpu_rmap.h> 53 #include <linux/cpumask.h> 54 #include <net/pkt_cls.h> 55 #include <linux/hwmon.h> 56 #include <linux/hwmon-sysfs.h> 57 #include <net/page_pool.h> 58 59 #include "bnxt_hsi.h" 60 #include "bnxt.h" 61 #include "bnxt_ulp.h" 62 #include "bnxt_sriov.h" 63 #include "bnxt_ethtool.h" 64 #include "bnxt_dcb.h" 65 #include "bnxt_xdp.h" 66 #include "bnxt_vfr.h" 67 #include "bnxt_tc.h" 68 #include "bnxt_devlink.h" 69 #include "bnxt_debugfs.h" 70 71 #define BNXT_TX_TIMEOUT (5 * HZ) 72 #define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW) 73 74 MODULE_LICENSE("GPL"); 75 MODULE_DESCRIPTION("Broadcom BCM573xx network driver"); 76 77 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN) 78 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD 79 #define BNXT_RX_COPY_THRESH 256 80 81 #define BNXT_TX_PUSH_THRESH 164 82 83 enum board_idx { 84 BCM57301, 85 BCM57302, 86 BCM57304, 87 BCM57417_NPAR, 88 BCM58700, 89 BCM57311, 90 BCM57312, 91 BCM57402, 92 BCM57404, 93 BCM57406, 94 BCM57402_NPAR, 95 BCM57407, 96 BCM57412, 97 BCM57414, 98 BCM57416, 99 BCM57417, 100 BCM57412_NPAR, 101 BCM57314, 102 BCM57417_SFP, 103 BCM57416_SFP, 104 BCM57404_NPAR, 105 BCM57406_NPAR, 106 BCM57407_SFP, 107 BCM57407_NPAR, 108 BCM57414_NPAR, 109 BCM57416_NPAR, 110 BCM57452, 111 BCM57454, 112 BCM5745x_NPAR, 113 BCM57508, 114 BCM57504, 115 BCM57502, 116 BCM57508_NPAR, 117 BCM57504_NPAR, 118 BCM57502_NPAR, 119 BCM58802, 120 BCM58804, 121 BCM58808, 122 NETXTREME_E_VF, 123 NETXTREME_C_VF, 124 NETXTREME_S_VF, 125 NETXTREME_E_P5_VF, 126 }; 127 128 /* indexed by enum above */ 129 static const struct { 130 char *name; 131 } board_info[] = { 132 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" }, 133 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" }, 134 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, 135 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" }, 136 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" }, 137 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" }, 138 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" }, 139 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" }, 140 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" }, 141 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" }, 142 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" }, 143 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" }, 144 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" }, 145 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" }, 146 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" }, 147 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" }, 148 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" }, 149 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, 150 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" }, 151 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" }, 152 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" }, 153 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" }, 154 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" }, 155 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" }, 156 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" }, 157 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" }, 158 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" }, 159 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 160 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" }, 161 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, 162 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, 163 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" }, 164 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" }, 165 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" }, 166 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" }, 167 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" }, 168 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 169 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 170 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" }, 171 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" }, 172 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" }, 173 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" }, 174 }; 175 176 static const struct pci_device_id bnxt_pci_tbl[] = { 177 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR }, 178 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR }, 179 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 }, 180 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR }, 181 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 }, 182 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 }, 183 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 }, 184 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR }, 185 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 }, 186 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 }, 187 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 }, 188 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 }, 189 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 }, 190 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 }, 191 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR }, 192 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 }, 193 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 }, 194 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 }, 195 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 }, 196 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 }, 197 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR }, 198 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 }, 199 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP }, 200 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP }, 201 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR }, 202 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR }, 203 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP }, 204 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR }, 205 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR }, 206 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR }, 207 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR }, 208 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR }, 209 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR }, 210 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 }, 211 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 }, 212 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 }, 213 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 }, 214 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 }, 215 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR }, 216 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR }, 217 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR }, 218 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR }, 219 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR }, 220 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR }, 221 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 }, 222 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 }, 223 #ifdef CONFIG_BNXT_SRIOV 224 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF }, 225 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF }, 226 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF }, 227 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF }, 228 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF }, 229 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF }, 230 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF }, 231 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF }, 232 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF }, 233 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF }, 234 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF }, 235 #endif 236 { 0 } 237 }; 238 239 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl); 240 241 static const u16 bnxt_vf_req_snif[] = { 242 HWRM_FUNC_CFG, 243 HWRM_FUNC_VF_CFG, 244 HWRM_PORT_PHY_QCFG, 245 HWRM_CFA_L2_FILTER_ALLOC, 246 }; 247 248 static const u16 bnxt_async_events_arr[] = { 249 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE, 250 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE, 251 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD, 252 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED, 253 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE, 254 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, 255 ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE, 256 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY, 257 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY, 258 ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG, 259 }; 260 261 static struct workqueue_struct *bnxt_pf_wq; 262 263 static bool bnxt_vf_pciid(enum board_idx idx) 264 { 265 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF || 266 idx == NETXTREME_S_VF || idx == NETXTREME_E_P5_VF); 267 } 268 269 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) 270 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS) 271 #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS) 272 273 #define BNXT_CP_DB_IRQ_DIS(db) \ 274 writel(DB_CP_IRQ_DIS_FLAGS, db) 275 276 #define BNXT_DB_CQ(db, idx) \ 277 writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell) 278 279 #define BNXT_DB_NQ_P5(db, idx) \ 280 writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell) 281 282 #define BNXT_DB_CQ_ARM(db, idx) \ 283 writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell) 284 285 #define BNXT_DB_NQ_ARM_P5(db, idx) \ 286 writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell) 287 288 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 289 { 290 if (bp->flags & BNXT_FLAG_CHIP_P5) 291 BNXT_DB_NQ_P5(db, idx); 292 else 293 BNXT_DB_CQ(db, idx); 294 } 295 296 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 297 { 298 if (bp->flags & BNXT_FLAG_CHIP_P5) 299 BNXT_DB_NQ_ARM_P5(db, idx); 300 else 301 BNXT_DB_CQ_ARM(db, idx); 302 } 303 304 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 305 { 306 if (bp->flags & BNXT_FLAG_CHIP_P5) 307 writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx), 308 db->doorbell); 309 else 310 BNXT_DB_CQ(db, idx); 311 } 312 313 const u16 bnxt_lhint_arr[] = { 314 TX_BD_FLAGS_LHINT_512_AND_SMALLER, 315 TX_BD_FLAGS_LHINT_512_TO_1023, 316 TX_BD_FLAGS_LHINT_1024_TO_2047, 317 TX_BD_FLAGS_LHINT_1024_TO_2047, 318 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 319 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 320 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 321 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 322 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 323 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 324 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 325 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 326 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 327 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 328 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 329 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 330 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 331 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 332 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 333 }; 334 335 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb) 336 { 337 struct metadata_dst *md_dst = skb_metadata_dst(skb); 338 339 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX) 340 return 0; 341 342 return md_dst->u.port_info.port_id; 343 } 344 345 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) 346 { 347 struct bnxt *bp = netdev_priv(dev); 348 struct tx_bd *txbd; 349 struct tx_bd_ext *txbd1; 350 struct netdev_queue *txq; 351 int i; 352 dma_addr_t mapping; 353 unsigned int length, pad = 0; 354 u32 len, free_size, vlan_tag_flags, cfa_action, flags; 355 u16 prod, last_frag; 356 struct pci_dev *pdev = bp->pdev; 357 struct bnxt_tx_ring_info *txr; 358 struct bnxt_sw_tx_bd *tx_buf; 359 360 i = skb_get_queue_mapping(skb); 361 if (unlikely(i >= bp->tx_nr_rings)) { 362 dev_kfree_skb_any(skb); 363 return NETDEV_TX_OK; 364 } 365 366 txq = netdev_get_tx_queue(dev, i); 367 txr = &bp->tx_ring[bp->tx_ring_map[i]]; 368 prod = txr->tx_prod; 369 370 free_size = bnxt_tx_avail(bp, txr); 371 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) { 372 netif_tx_stop_queue(txq); 373 return NETDEV_TX_BUSY; 374 } 375 376 length = skb->len; 377 len = skb_headlen(skb); 378 last_frag = skb_shinfo(skb)->nr_frags; 379 380 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 381 382 txbd->tx_bd_opaque = prod; 383 384 tx_buf = &txr->tx_buf_ring[prod]; 385 tx_buf->skb = skb; 386 tx_buf->nr_frags = last_frag; 387 388 vlan_tag_flags = 0; 389 cfa_action = bnxt_xmit_get_cfa_action(skb); 390 if (skb_vlan_tag_present(skb)) { 391 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN | 392 skb_vlan_tag_get(skb); 393 /* Currently supports 8021Q, 8021AD vlan offloads 394 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated 395 */ 396 if (skb->vlan_proto == htons(ETH_P_8021Q)) 397 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT; 398 } 399 400 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) { 401 struct tx_push_buffer *tx_push_buf = txr->tx_push; 402 struct tx_push_bd *tx_push = &tx_push_buf->push_bd; 403 struct tx_bd_ext *tx_push1 = &tx_push->txbd2; 404 void __iomem *db = txr->tx_db.doorbell; 405 void *pdata = tx_push_buf->data; 406 u64 *end; 407 int j, push_len; 408 409 /* Set COAL_NOW to be ready quickly for the next push */ 410 tx_push->tx_bd_len_flags_type = 411 cpu_to_le32((length << TX_BD_LEN_SHIFT) | 412 TX_BD_TYPE_LONG_TX_BD | 413 TX_BD_FLAGS_LHINT_512_AND_SMALLER | 414 TX_BD_FLAGS_COAL_NOW | 415 TX_BD_FLAGS_PACKET_END | 416 (2 << TX_BD_FLAGS_BD_CNT_SHIFT)); 417 418 if (skb->ip_summed == CHECKSUM_PARTIAL) 419 tx_push1->tx_bd_hsize_lflags = 420 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); 421 else 422 tx_push1->tx_bd_hsize_lflags = 0; 423 424 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 425 tx_push1->tx_bd_cfa_action = 426 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); 427 428 end = pdata + length; 429 end = PTR_ALIGN(end, 8) - 1; 430 *end = 0; 431 432 skb_copy_from_linear_data(skb, pdata, len); 433 pdata += len; 434 for (j = 0; j < last_frag; j++) { 435 skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; 436 void *fptr; 437 438 fptr = skb_frag_address_safe(frag); 439 if (!fptr) 440 goto normal_tx; 441 442 memcpy(pdata, fptr, skb_frag_size(frag)); 443 pdata += skb_frag_size(frag); 444 } 445 446 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type; 447 txbd->tx_bd_haddr = txr->data_mapping; 448 prod = NEXT_TX(prod); 449 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 450 memcpy(txbd, tx_push1, sizeof(*txbd)); 451 prod = NEXT_TX(prod); 452 tx_push->doorbell = 453 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod); 454 txr->tx_prod = prod; 455 456 tx_buf->is_push = 1; 457 netdev_tx_sent_queue(txq, skb->len); 458 wmb(); /* Sync is_push and byte queue before pushing data */ 459 460 push_len = (length + sizeof(*tx_push) + 7) / 8; 461 if (push_len > 16) { 462 __iowrite64_copy(db, tx_push_buf, 16); 463 __iowrite32_copy(db + 4, tx_push_buf + 1, 464 (push_len - 16) << 1); 465 } else { 466 __iowrite64_copy(db, tx_push_buf, push_len); 467 } 468 469 goto tx_done; 470 } 471 472 normal_tx: 473 if (length < BNXT_MIN_PKT_SIZE) { 474 pad = BNXT_MIN_PKT_SIZE - length; 475 if (skb_pad(skb, pad)) { 476 /* SKB already freed. */ 477 tx_buf->skb = NULL; 478 return NETDEV_TX_OK; 479 } 480 length = BNXT_MIN_PKT_SIZE; 481 } 482 483 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE); 484 485 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) { 486 dev_kfree_skb_any(skb); 487 tx_buf->skb = NULL; 488 return NETDEV_TX_OK; 489 } 490 491 dma_unmap_addr_set(tx_buf, mapping, mapping); 492 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD | 493 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT); 494 495 txbd->tx_bd_haddr = cpu_to_le64(mapping); 496 497 prod = NEXT_TX(prod); 498 txbd1 = (struct tx_bd_ext *) 499 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 500 501 txbd1->tx_bd_hsize_lflags = 0; 502 if (skb_is_gso(skb)) { 503 u32 hdr_len; 504 505 if (skb->encapsulation) 506 hdr_len = skb_inner_network_offset(skb) + 507 skb_inner_network_header_len(skb) + 508 inner_tcp_hdrlen(skb); 509 else 510 hdr_len = skb_transport_offset(skb) + 511 tcp_hdrlen(skb); 512 513 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO | 514 TX_BD_FLAGS_T_IPID | 515 (hdr_len << (TX_BD_HSIZE_SHIFT - 1))); 516 length = skb_shinfo(skb)->gso_size; 517 txbd1->tx_bd_mss = cpu_to_le32(length); 518 length += hdr_len; 519 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 520 txbd1->tx_bd_hsize_lflags = 521 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); 522 txbd1->tx_bd_mss = 0; 523 } 524 525 length >>= 9; 526 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) { 527 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n", 528 skb->len); 529 i = 0; 530 goto tx_dma_error; 531 } 532 flags |= bnxt_lhint_arr[length]; 533 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 534 535 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 536 txbd1->tx_bd_cfa_action = 537 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); 538 for (i = 0; i < last_frag; i++) { 539 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 540 541 prod = NEXT_TX(prod); 542 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 543 544 len = skb_frag_size(frag); 545 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len, 546 DMA_TO_DEVICE); 547 548 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) 549 goto tx_dma_error; 550 551 tx_buf = &txr->tx_buf_ring[prod]; 552 dma_unmap_addr_set(tx_buf, mapping, mapping); 553 554 txbd->tx_bd_haddr = cpu_to_le64(mapping); 555 556 flags = len << TX_BD_LEN_SHIFT; 557 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 558 } 559 560 flags &= ~TX_BD_LEN; 561 txbd->tx_bd_len_flags_type = 562 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags | 563 TX_BD_FLAGS_PACKET_END); 564 565 netdev_tx_sent_queue(txq, skb->len); 566 567 /* Sync BD data before updating doorbell */ 568 wmb(); 569 570 prod = NEXT_TX(prod); 571 txr->tx_prod = prod; 572 573 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) 574 bnxt_db_write(bp, &txr->tx_db, prod); 575 576 tx_done: 577 578 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) { 579 if (netdev_xmit_more() && !tx_buf->is_push) 580 bnxt_db_write(bp, &txr->tx_db, prod); 581 582 netif_tx_stop_queue(txq); 583 584 /* netif_tx_stop_queue() must be done before checking 585 * tx index in bnxt_tx_avail() below, because in 586 * bnxt_tx_int(), we update tx index before checking for 587 * netif_tx_queue_stopped(). 588 */ 589 smp_mb(); 590 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) 591 netif_tx_wake_queue(txq); 592 } 593 return NETDEV_TX_OK; 594 595 tx_dma_error: 596 last_frag = i; 597 598 /* start back at beginning and unmap skb */ 599 prod = txr->tx_prod; 600 tx_buf = &txr->tx_buf_ring[prod]; 601 tx_buf->skb = NULL; 602 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 603 skb_headlen(skb), PCI_DMA_TODEVICE); 604 prod = NEXT_TX(prod); 605 606 /* unmap remaining mapped pages */ 607 for (i = 0; i < last_frag; i++) { 608 prod = NEXT_TX(prod); 609 tx_buf = &txr->tx_buf_ring[prod]; 610 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 611 skb_frag_size(&skb_shinfo(skb)->frags[i]), 612 PCI_DMA_TODEVICE); 613 } 614 615 dev_kfree_skb_any(skb); 616 return NETDEV_TX_OK; 617 } 618 619 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) 620 { 621 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; 622 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index); 623 u16 cons = txr->tx_cons; 624 struct pci_dev *pdev = bp->pdev; 625 int i; 626 unsigned int tx_bytes = 0; 627 628 for (i = 0; i < nr_pkts; i++) { 629 struct bnxt_sw_tx_bd *tx_buf; 630 struct sk_buff *skb; 631 int j, last; 632 633 tx_buf = &txr->tx_buf_ring[cons]; 634 cons = NEXT_TX(cons); 635 skb = tx_buf->skb; 636 tx_buf->skb = NULL; 637 638 if (tx_buf->is_push) { 639 tx_buf->is_push = 0; 640 goto next_tx_int; 641 } 642 643 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 644 skb_headlen(skb), PCI_DMA_TODEVICE); 645 last = tx_buf->nr_frags; 646 647 for (j = 0; j < last; j++) { 648 cons = NEXT_TX(cons); 649 tx_buf = &txr->tx_buf_ring[cons]; 650 dma_unmap_page( 651 &pdev->dev, 652 dma_unmap_addr(tx_buf, mapping), 653 skb_frag_size(&skb_shinfo(skb)->frags[j]), 654 PCI_DMA_TODEVICE); 655 } 656 657 next_tx_int: 658 cons = NEXT_TX(cons); 659 660 tx_bytes += skb->len; 661 dev_kfree_skb_any(skb); 662 } 663 664 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes); 665 txr->tx_cons = cons; 666 667 /* Need to make the tx_cons update visible to bnxt_start_xmit() 668 * before checking for netif_tx_queue_stopped(). Without the 669 * memory barrier, there is a small possibility that bnxt_start_xmit() 670 * will miss it and cause the queue to be stopped forever. 671 */ 672 smp_mb(); 673 674 if (unlikely(netif_tx_queue_stopped(txq)) && 675 (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) { 676 __netif_tx_lock(txq, smp_processor_id()); 677 if (netif_tx_queue_stopped(txq) && 678 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh && 679 txr->dev_state != BNXT_DEV_STATE_CLOSING) 680 netif_tx_wake_queue(txq); 681 __netif_tx_unlock(txq); 682 } 683 } 684 685 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, 686 struct bnxt_rx_ring_info *rxr, 687 gfp_t gfp) 688 { 689 struct device *dev = &bp->pdev->dev; 690 struct page *page; 691 692 page = page_pool_dev_alloc_pages(rxr->page_pool); 693 if (!page) 694 return NULL; 695 696 *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir, 697 DMA_ATTR_WEAK_ORDERING); 698 if (dma_mapping_error(dev, *mapping)) { 699 page_pool_recycle_direct(rxr->page_pool, page); 700 return NULL; 701 } 702 *mapping += bp->rx_dma_offset; 703 return page; 704 } 705 706 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping, 707 gfp_t gfp) 708 { 709 u8 *data; 710 struct pci_dev *pdev = bp->pdev; 711 712 data = kmalloc(bp->rx_buf_size, gfp); 713 if (!data) 714 return NULL; 715 716 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset, 717 bp->rx_buf_use_size, bp->rx_dir, 718 DMA_ATTR_WEAK_ORDERING); 719 720 if (dma_mapping_error(&pdev->dev, *mapping)) { 721 kfree(data); 722 data = NULL; 723 } 724 return data; 725 } 726 727 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 728 u16 prod, gfp_t gfp) 729 { 730 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 731 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod]; 732 dma_addr_t mapping; 733 734 if (BNXT_RX_PAGE_MODE(bp)) { 735 struct page *page = 736 __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp); 737 738 if (!page) 739 return -ENOMEM; 740 741 rx_buf->data = page; 742 rx_buf->data_ptr = page_address(page) + bp->rx_offset; 743 } else { 744 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp); 745 746 if (!data) 747 return -ENOMEM; 748 749 rx_buf->data = data; 750 rx_buf->data_ptr = data + bp->rx_offset; 751 } 752 rx_buf->mapping = mapping; 753 754 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 755 return 0; 756 } 757 758 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data) 759 { 760 u16 prod = rxr->rx_prod; 761 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 762 struct rx_bd *cons_bd, *prod_bd; 763 764 prod_rx_buf = &rxr->rx_buf_ring[prod]; 765 cons_rx_buf = &rxr->rx_buf_ring[cons]; 766 767 prod_rx_buf->data = data; 768 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr; 769 770 prod_rx_buf->mapping = cons_rx_buf->mapping; 771 772 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 773 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)]; 774 775 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr; 776 } 777 778 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) 779 { 780 u16 next, max = rxr->rx_agg_bmap_size; 781 782 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx); 783 if (next >= max) 784 next = find_first_zero_bit(rxr->rx_agg_bmap, max); 785 return next; 786 } 787 788 static inline int bnxt_alloc_rx_page(struct bnxt *bp, 789 struct bnxt_rx_ring_info *rxr, 790 u16 prod, gfp_t gfp) 791 { 792 struct rx_bd *rxbd = 793 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 794 struct bnxt_sw_rx_agg_bd *rx_agg_buf; 795 struct pci_dev *pdev = bp->pdev; 796 struct page *page; 797 dma_addr_t mapping; 798 u16 sw_prod = rxr->rx_sw_agg_prod; 799 unsigned int offset = 0; 800 801 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) { 802 page = rxr->rx_page; 803 if (!page) { 804 page = alloc_page(gfp); 805 if (!page) 806 return -ENOMEM; 807 rxr->rx_page = page; 808 rxr->rx_page_offset = 0; 809 } 810 offset = rxr->rx_page_offset; 811 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE; 812 if (rxr->rx_page_offset == PAGE_SIZE) 813 rxr->rx_page = NULL; 814 else 815 get_page(page); 816 } else { 817 page = alloc_page(gfp); 818 if (!page) 819 return -ENOMEM; 820 } 821 822 mapping = dma_map_page_attrs(&pdev->dev, page, offset, 823 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE, 824 DMA_ATTR_WEAK_ORDERING); 825 if (dma_mapping_error(&pdev->dev, mapping)) { 826 __free_page(page); 827 return -EIO; 828 } 829 830 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) 831 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); 832 833 __set_bit(sw_prod, rxr->rx_agg_bmap); 834 rx_agg_buf = &rxr->rx_agg_ring[sw_prod]; 835 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod); 836 837 rx_agg_buf->page = page; 838 rx_agg_buf->offset = offset; 839 rx_agg_buf->mapping = mapping; 840 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 841 rxbd->rx_bd_opaque = sw_prod; 842 return 0; 843 } 844 845 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp, 846 struct bnxt_cp_ring_info *cpr, 847 u16 cp_cons, u16 curr) 848 { 849 struct rx_agg_cmp *agg; 850 851 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr)); 852 agg = (struct rx_agg_cmp *) 853 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 854 return agg; 855 } 856 857 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp, 858 struct bnxt_rx_ring_info *rxr, 859 u16 agg_id, u16 curr) 860 { 861 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id]; 862 863 return &tpa_info->agg_arr[curr]; 864 } 865 866 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx, 867 u16 start, u32 agg_bufs, bool tpa) 868 { 869 struct bnxt_napi *bnapi = cpr->bnapi; 870 struct bnxt *bp = bnapi->bp; 871 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 872 u16 prod = rxr->rx_agg_prod; 873 u16 sw_prod = rxr->rx_sw_agg_prod; 874 bool p5_tpa = false; 875 u32 i; 876 877 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa) 878 p5_tpa = true; 879 880 for (i = 0; i < agg_bufs; i++) { 881 u16 cons; 882 struct rx_agg_cmp *agg; 883 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf; 884 struct rx_bd *prod_bd; 885 struct page *page; 886 887 if (p5_tpa) 888 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i); 889 else 890 agg = bnxt_get_agg(bp, cpr, idx, start + i); 891 cons = agg->rx_agg_cmp_opaque; 892 __clear_bit(cons, rxr->rx_agg_bmap); 893 894 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) 895 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); 896 897 __set_bit(sw_prod, rxr->rx_agg_bmap); 898 prod_rx_buf = &rxr->rx_agg_ring[sw_prod]; 899 cons_rx_buf = &rxr->rx_agg_ring[cons]; 900 901 /* It is possible for sw_prod to be equal to cons, so 902 * set cons_rx_buf->page to NULL first. 903 */ 904 page = cons_rx_buf->page; 905 cons_rx_buf->page = NULL; 906 prod_rx_buf->page = page; 907 prod_rx_buf->offset = cons_rx_buf->offset; 908 909 prod_rx_buf->mapping = cons_rx_buf->mapping; 910 911 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 912 913 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping); 914 prod_bd->rx_bd_opaque = sw_prod; 915 916 prod = NEXT_RX_AGG(prod); 917 sw_prod = NEXT_RX_AGG(sw_prod); 918 } 919 rxr->rx_agg_prod = prod; 920 rxr->rx_sw_agg_prod = sw_prod; 921 } 922 923 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, 924 struct bnxt_rx_ring_info *rxr, 925 u16 cons, void *data, u8 *data_ptr, 926 dma_addr_t dma_addr, 927 unsigned int offset_and_len) 928 { 929 unsigned int payload = offset_and_len >> 16; 930 unsigned int len = offset_and_len & 0xffff; 931 skb_frag_t *frag; 932 struct page *page = data; 933 u16 prod = rxr->rx_prod; 934 struct sk_buff *skb; 935 int off, err; 936 937 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 938 if (unlikely(err)) { 939 bnxt_reuse_rx_data(rxr, cons, data); 940 return NULL; 941 } 942 dma_addr -= bp->rx_dma_offset; 943 dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir, 944 DMA_ATTR_WEAK_ORDERING); 945 page_pool_release_page(rxr->page_pool, page); 946 947 if (unlikely(!payload)) 948 payload = eth_get_headlen(bp->dev, data_ptr, len); 949 950 skb = napi_alloc_skb(&rxr->bnapi->napi, payload); 951 if (!skb) { 952 __free_page(page); 953 return NULL; 954 } 955 956 off = (void *)data_ptr - page_address(page); 957 skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE); 958 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN, 959 payload + NET_IP_ALIGN); 960 961 frag = &skb_shinfo(skb)->frags[0]; 962 skb_frag_size_sub(frag, payload); 963 skb_frag_off_add(frag, payload); 964 skb->data_len -= payload; 965 skb->tail += payload; 966 967 return skb; 968 } 969 970 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, 971 struct bnxt_rx_ring_info *rxr, u16 cons, 972 void *data, u8 *data_ptr, 973 dma_addr_t dma_addr, 974 unsigned int offset_and_len) 975 { 976 u16 prod = rxr->rx_prod; 977 struct sk_buff *skb; 978 int err; 979 980 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 981 if (unlikely(err)) { 982 bnxt_reuse_rx_data(rxr, cons, data); 983 return NULL; 984 } 985 986 skb = build_skb(data, 0); 987 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, 988 bp->rx_dir, DMA_ATTR_WEAK_ORDERING); 989 if (!skb) { 990 kfree(data); 991 return NULL; 992 } 993 994 skb_reserve(skb, bp->rx_offset); 995 skb_put(skb, offset_and_len & 0xffff); 996 return skb; 997 } 998 999 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, 1000 struct bnxt_cp_ring_info *cpr, 1001 struct sk_buff *skb, u16 idx, 1002 u32 agg_bufs, bool tpa) 1003 { 1004 struct bnxt_napi *bnapi = cpr->bnapi; 1005 struct pci_dev *pdev = bp->pdev; 1006 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1007 u16 prod = rxr->rx_agg_prod; 1008 bool p5_tpa = false; 1009 u32 i; 1010 1011 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa) 1012 p5_tpa = true; 1013 1014 for (i = 0; i < agg_bufs; i++) { 1015 u16 cons, frag_len; 1016 struct rx_agg_cmp *agg; 1017 struct bnxt_sw_rx_agg_bd *cons_rx_buf; 1018 struct page *page; 1019 dma_addr_t mapping; 1020 1021 if (p5_tpa) 1022 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i); 1023 else 1024 agg = bnxt_get_agg(bp, cpr, idx, i); 1025 cons = agg->rx_agg_cmp_opaque; 1026 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) & 1027 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT; 1028 1029 cons_rx_buf = &rxr->rx_agg_ring[cons]; 1030 skb_fill_page_desc(skb, i, cons_rx_buf->page, 1031 cons_rx_buf->offset, frag_len); 1032 __clear_bit(cons, rxr->rx_agg_bmap); 1033 1034 /* It is possible for bnxt_alloc_rx_page() to allocate 1035 * a sw_prod index that equals the cons index, so we 1036 * need to clear the cons entry now. 1037 */ 1038 mapping = cons_rx_buf->mapping; 1039 page = cons_rx_buf->page; 1040 cons_rx_buf->page = NULL; 1041 1042 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) { 1043 struct skb_shared_info *shinfo; 1044 unsigned int nr_frags; 1045 1046 shinfo = skb_shinfo(skb); 1047 nr_frags = --shinfo->nr_frags; 1048 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL); 1049 1050 dev_kfree_skb(skb); 1051 1052 cons_rx_buf->page = page; 1053 1054 /* Update prod since possibly some pages have been 1055 * allocated already. 1056 */ 1057 rxr->rx_agg_prod = prod; 1058 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa); 1059 return NULL; 1060 } 1061 1062 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE, 1063 PCI_DMA_FROMDEVICE, 1064 DMA_ATTR_WEAK_ORDERING); 1065 1066 skb->data_len += frag_len; 1067 skb->len += frag_len; 1068 skb->truesize += PAGE_SIZE; 1069 1070 prod = NEXT_RX_AGG(prod); 1071 } 1072 rxr->rx_agg_prod = prod; 1073 return skb; 1074 } 1075 1076 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 1077 u8 agg_bufs, u32 *raw_cons) 1078 { 1079 u16 last; 1080 struct rx_agg_cmp *agg; 1081 1082 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs); 1083 last = RING_CMP(*raw_cons); 1084 agg = (struct rx_agg_cmp *) 1085 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)]; 1086 return RX_AGG_CMP_VALID(agg, *raw_cons); 1087 } 1088 1089 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data, 1090 unsigned int len, 1091 dma_addr_t mapping) 1092 { 1093 struct bnxt *bp = bnapi->bp; 1094 struct pci_dev *pdev = bp->pdev; 1095 struct sk_buff *skb; 1096 1097 skb = napi_alloc_skb(&bnapi->napi, len); 1098 if (!skb) 1099 return NULL; 1100 1101 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh, 1102 bp->rx_dir); 1103 1104 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN, 1105 len + NET_IP_ALIGN); 1106 1107 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh, 1108 bp->rx_dir); 1109 1110 skb_put(skb, len); 1111 return skb; 1112 } 1113 1114 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 1115 u32 *raw_cons, void *cmp) 1116 { 1117 struct rx_cmp *rxcmp = cmp; 1118 u32 tmp_raw_cons = *raw_cons; 1119 u8 cmp_type, agg_bufs = 0; 1120 1121 cmp_type = RX_CMP_TYPE(rxcmp); 1122 1123 if (cmp_type == CMP_TYPE_RX_L2_CMP) { 1124 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & 1125 RX_CMP_AGG_BUFS) >> 1126 RX_CMP_AGG_BUFS_SHIFT; 1127 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1128 struct rx_tpa_end_cmp *tpa_end = cmp; 1129 1130 if (bp->flags & BNXT_FLAG_CHIP_P5) 1131 return 0; 1132 1133 agg_bufs = TPA_END_AGG_BUFS(tpa_end); 1134 } 1135 1136 if (agg_bufs) { 1137 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) 1138 return -EBUSY; 1139 } 1140 *raw_cons = tmp_raw_cons; 1141 return 0; 1142 } 1143 1144 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay) 1145 { 1146 if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))) 1147 return; 1148 1149 if (BNXT_PF(bp)) 1150 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay); 1151 else 1152 schedule_delayed_work(&bp->fw_reset_task, delay); 1153 } 1154 1155 static void bnxt_queue_sp_work(struct bnxt *bp) 1156 { 1157 if (BNXT_PF(bp)) 1158 queue_work(bnxt_pf_wq, &bp->sp_task); 1159 else 1160 schedule_work(&bp->sp_task); 1161 } 1162 1163 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 1164 { 1165 if (!rxr->bnapi->in_reset) { 1166 rxr->bnapi->in_reset = true; 1167 if (bp->flags & BNXT_FLAG_CHIP_P5) 1168 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 1169 else 1170 set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event); 1171 bnxt_queue_sp_work(bp); 1172 } 1173 rxr->rx_next_cons = 0xffff; 1174 } 1175 1176 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) 1177 { 1178 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; 1179 u16 idx = agg_id & MAX_TPA_P5_MASK; 1180 1181 if (test_bit(idx, map->agg_idx_bmap)) 1182 idx = find_first_zero_bit(map->agg_idx_bmap, 1183 BNXT_AGG_IDX_BMAP_SIZE); 1184 __set_bit(idx, map->agg_idx_bmap); 1185 map->agg_id_tbl[agg_id] = idx; 1186 return idx; 1187 } 1188 1189 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) 1190 { 1191 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; 1192 1193 __clear_bit(idx, map->agg_idx_bmap); 1194 } 1195 1196 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) 1197 { 1198 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; 1199 1200 return map->agg_id_tbl[agg_id]; 1201 } 1202 1203 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 1204 struct rx_tpa_start_cmp *tpa_start, 1205 struct rx_tpa_start_cmp_ext *tpa_start1) 1206 { 1207 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 1208 struct bnxt_tpa_info *tpa_info; 1209 u16 cons, prod, agg_id; 1210 struct rx_bd *prod_bd; 1211 dma_addr_t mapping; 1212 1213 if (bp->flags & BNXT_FLAG_CHIP_P5) { 1214 agg_id = TPA_START_AGG_ID_P5(tpa_start); 1215 agg_id = bnxt_alloc_agg_idx(rxr, agg_id); 1216 } else { 1217 agg_id = TPA_START_AGG_ID(tpa_start); 1218 } 1219 cons = tpa_start->rx_tpa_start_cmp_opaque; 1220 prod = rxr->rx_prod; 1221 cons_rx_buf = &rxr->rx_buf_ring[cons]; 1222 prod_rx_buf = &rxr->rx_buf_ring[prod]; 1223 tpa_info = &rxr->rx_tpa[agg_id]; 1224 1225 if (unlikely(cons != rxr->rx_next_cons || 1226 TPA_START_ERROR(tpa_start))) { 1227 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n", 1228 cons, rxr->rx_next_cons, 1229 TPA_START_ERROR_CODE(tpa_start1)); 1230 bnxt_sched_reset(bp, rxr); 1231 return; 1232 } 1233 /* Store cfa_code in tpa_info to use in tpa_end 1234 * completion processing. 1235 */ 1236 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1); 1237 prod_rx_buf->data = tpa_info->data; 1238 prod_rx_buf->data_ptr = tpa_info->data_ptr; 1239 1240 mapping = tpa_info->mapping; 1241 prod_rx_buf->mapping = mapping; 1242 1243 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 1244 1245 prod_bd->rx_bd_haddr = cpu_to_le64(mapping); 1246 1247 tpa_info->data = cons_rx_buf->data; 1248 tpa_info->data_ptr = cons_rx_buf->data_ptr; 1249 cons_rx_buf->data = NULL; 1250 tpa_info->mapping = cons_rx_buf->mapping; 1251 1252 tpa_info->len = 1253 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >> 1254 RX_TPA_START_CMP_LEN_SHIFT; 1255 if (likely(TPA_START_HASH_VALID(tpa_start))) { 1256 u32 hash_type = TPA_START_HASH_TYPE(tpa_start); 1257 1258 tpa_info->hash_type = PKT_HASH_TYPE_L4; 1259 tpa_info->gso_type = SKB_GSO_TCPV4; 1260 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ 1261 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1)) 1262 tpa_info->gso_type = SKB_GSO_TCPV6; 1263 tpa_info->rss_hash = 1264 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash); 1265 } else { 1266 tpa_info->hash_type = PKT_HASH_TYPE_NONE; 1267 tpa_info->gso_type = 0; 1268 if (netif_msg_rx_err(bp)) 1269 netdev_warn(bp->dev, "TPA packet without valid hash\n"); 1270 } 1271 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2); 1272 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata); 1273 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info); 1274 tpa_info->agg_count = 0; 1275 1276 rxr->rx_prod = NEXT_RX(prod); 1277 cons = NEXT_RX(cons); 1278 rxr->rx_next_cons = NEXT_RX(cons); 1279 cons_rx_buf = &rxr->rx_buf_ring[cons]; 1280 1281 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data); 1282 rxr->rx_prod = NEXT_RX(rxr->rx_prod); 1283 cons_rx_buf->data = NULL; 1284 } 1285 1286 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs) 1287 { 1288 if (agg_bufs) 1289 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true); 1290 } 1291 1292 #ifdef CONFIG_INET 1293 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto) 1294 { 1295 struct udphdr *uh = NULL; 1296 1297 if (ip_proto == htons(ETH_P_IP)) { 1298 struct iphdr *iph = (struct iphdr *)skb->data; 1299 1300 if (iph->protocol == IPPROTO_UDP) 1301 uh = (struct udphdr *)(iph + 1); 1302 } else { 1303 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; 1304 1305 if (iph->nexthdr == IPPROTO_UDP) 1306 uh = (struct udphdr *)(iph + 1); 1307 } 1308 if (uh) { 1309 if (uh->check) 1310 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM; 1311 else 1312 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; 1313 } 1314 } 1315 #endif 1316 1317 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info, 1318 int payload_off, int tcp_ts, 1319 struct sk_buff *skb) 1320 { 1321 #ifdef CONFIG_INET 1322 struct tcphdr *th; 1323 int len, nw_off; 1324 u16 outer_ip_off, inner_ip_off, inner_mac_off; 1325 u32 hdr_info = tpa_info->hdr_info; 1326 bool loopback = false; 1327 1328 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); 1329 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); 1330 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); 1331 1332 /* If the packet is an internal loopback packet, the offsets will 1333 * have an extra 4 bytes. 1334 */ 1335 if (inner_mac_off == 4) { 1336 loopback = true; 1337 } else if (inner_mac_off > 4) { 1338 __be16 proto = *((__be16 *)(skb->data + inner_ip_off - 1339 ETH_HLEN - 2)); 1340 1341 /* We only support inner iPv4/ipv6. If we don't see the 1342 * correct protocol ID, it must be a loopback packet where 1343 * the offsets are off by 4. 1344 */ 1345 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6)) 1346 loopback = true; 1347 } 1348 if (loopback) { 1349 /* internal loopback packet, subtract all offsets by 4 */ 1350 inner_ip_off -= 4; 1351 inner_mac_off -= 4; 1352 outer_ip_off -= 4; 1353 } 1354 1355 nw_off = inner_ip_off - ETH_HLEN; 1356 skb_set_network_header(skb, nw_off); 1357 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) { 1358 struct ipv6hdr *iph = ipv6_hdr(skb); 1359 1360 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); 1361 len = skb->len - skb_transport_offset(skb); 1362 th = tcp_hdr(skb); 1363 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); 1364 } else { 1365 struct iphdr *iph = ip_hdr(skb); 1366 1367 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); 1368 len = skb->len - skb_transport_offset(skb); 1369 th = tcp_hdr(skb); 1370 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); 1371 } 1372 1373 if (inner_mac_off) { /* tunnel */ 1374 __be16 proto = *((__be16 *)(skb->data + outer_ip_off - 1375 ETH_HLEN - 2)); 1376 1377 bnxt_gro_tunnel(skb, proto); 1378 } 1379 #endif 1380 return skb; 1381 } 1382 1383 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info, 1384 int payload_off, int tcp_ts, 1385 struct sk_buff *skb) 1386 { 1387 #ifdef CONFIG_INET 1388 u16 outer_ip_off, inner_ip_off, inner_mac_off; 1389 u32 hdr_info = tpa_info->hdr_info; 1390 int iphdr_len, nw_off; 1391 1392 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); 1393 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); 1394 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); 1395 1396 nw_off = inner_ip_off - ETH_HLEN; 1397 skb_set_network_header(skb, nw_off); 1398 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ? 1399 sizeof(struct ipv6hdr) : sizeof(struct iphdr); 1400 skb_set_transport_header(skb, nw_off + iphdr_len); 1401 1402 if (inner_mac_off) { /* tunnel */ 1403 __be16 proto = *((__be16 *)(skb->data + outer_ip_off - 1404 ETH_HLEN - 2)); 1405 1406 bnxt_gro_tunnel(skb, proto); 1407 } 1408 #endif 1409 return skb; 1410 } 1411 1412 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr)) 1413 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr)) 1414 1415 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info, 1416 int payload_off, int tcp_ts, 1417 struct sk_buff *skb) 1418 { 1419 #ifdef CONFIG_INET 1420 struct tcphdr *th; 1421 int len, nw_off, tcp_opt_len = 0; 1422 1423 if (tcp_ts) 1424 tcp_opt_len = 12; 1425 1426 if (tpa_info->gso_type == SKB_GSO_TCPV4) { 1427 struct iphdr *iph; 1428 1429 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len - 1430 ETH_HLEN; 1431 skb_set_network_header(skb, nw_off); 1432 iph = ip_hdr(skb); 1433 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); 1434 len = skb->len - skb_transport_offset(skb); 1435 th = tcp_hdr(skb); 1436 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); 1437 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) { 1438 struct ipv6hdr *iph; 1439 1440 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len - 1441 ETH_HLEN; 1442 skb_set_network_header(skb, nw_off); 1443 iph = ipv6_hdr(skb); 1444 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); 1445 len = skb->len - skb_transport_offset(skb); 1446 th = tcp_hdr(skb); 1447 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); 1448 } else { 1449 dev_kfree_skb_any(skb); 1450 return NULL; 1451 } 1452 1453 if (nw_off) /* tunnel */ 1454 bnxt_gro_tunnel(skb, skb->protocol); 1455 #endif 1456 return skb; 1457 } 1458 1459 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp, 1460 struct bnxt_tpa_info *tpa_info, 1461 struct rx_tpa_end_cmp *tpa_end, 1462 struct rx_tpa_end_cmp_ext *tpa_end1, 1463 struct sk_buff *skb) 1464 { 1465 #ifdef CONFIG_INET 1466 int payload_off; 1467 u16 segs; 1468 1469 segs = TPA_END_TPA_SEGS(tpa_end); 1470 if (segs == 1) 1471 return skb; 1472 1473 NAPI_GRO_CB(skb)->count = segs; 1474 skb_shinfo(skb)->gso_size = 1475 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len); 1476 skb_shinfo(skb)->gso_type = tpa_info->gso_type; 1477 if (bp->flags & BNXT_FLAG_CHIP_P5) 1478 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1); 1479 else 1480 payload_off = TPA_END_PAYLOAD_OFF(tpa_end); 1481 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb); 1482 if (likely(skb)) 1483 tcp_gro_complete(skb); 1484 #endif 1485 return skb; 1486 } 1487 1488 /* Given the cfa_code of a received packet determine which 1489 * netdev (vf-rep or PF) the packet is destined to. 1490 */ 1491 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code) 1492 { 1493 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code); 1494 1495 /* if vf-rep dev is NULL, the must belongs to the PF */ 1496 return dev ? dev : bp->dev; 1497 } 1498 1499 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, 1500 struct bnxt_cp_ring_info *cpr, 1501 u32 *raw_cons, 1502 struct rx_tpa_end_cmp *tpa_end, 1503 struct rx_tpa_end_cmp_ext *tpa_end1, 1504 u8 *event) 1505 { 1506 struct bnxt_napi *bnapi = cpr->bnapi; 1507 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1508 u8 *data_ptr, agg_bufs; 1509 unsigned int len; 1510 struct bnxt_tpa_info *tpa_info; 1511 dma_addr_t mapping; 1512 struct sk_buff *skb; 1513 u16 idx = 0, agg_id; 1514 void *data; 1515 bool gro; 1516 1517 if (unlikely(bnapi->in_reset)) { 1518 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end); 1519 1520 if (rc < 0) 1521 return ERR_PTR(-EBUSY); 1522 return NULL; 1523 } 1524 1525 if (bp->flags & BNXT_FLAG_CHIP_P5) { 1526 agg_id = TPA_END_AGG_ID_P5(tpa_end); 1527 agg_id = bnxt_lookup_agg_idx(rxr, agg_id); 1528 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1); 1529 tpa_info = &rxr->rx_tpa[agg_id]; 1530 if (unlikely(agg_bufs != tpa_info->agg_count)) { 1531 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n", 1532 agg_bufs, tpa_info->agg_count); 1533 agg_bufs = tpa_info->agg_count; 1534 } 1535 tpa_info->agg_count = 0; 1536 *event |= BNXT_AGG_EVENT; 1537 bnxt_free_agg_idx(rxr, agg_id); 1538 idx = agg_id; 1539 gro = !!(bp->flags & BNXT_FLAG_GRO); 1540 } else { 1541 agg_id = TPA_END_AGG_ID(tpa_end); 1542 agg_bufs = TPA_END_AGG_BUFS(tpa_end); 1543 tpa_info = &rxr->rx_tpa[agg_id]; 1544 idx = RING_CMP(*raw_cons); 1545 if (agg_bufs) { 1546 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons)) 1547 return ERR_PTR(-EBUSY); 1548 1549 *event |= BNXT_AGG_EVENT; 1550 idx = NEXT_CMP(idx); 1551 } 1552 gro = !!TPA_END_GRO(tpa_end); 1553 } 1554 data = tpa_info->data; 1555 data_ptr = tpa_info->data_ptr; 1556 prefetch(data_ptr); 1557 len = tpa_info->len; 1558 mapping = tpa_info->mapping; 1559 1560 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) { 1561 bnxt_abort_tpa(cpr, idx, agg_bufs); 1562 if (agg_bufs > MAX_SKB_FRAGS) 1563 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", 1564 agg_bufs, (int)MAX_SKB_FRAGS); 1565 return NULL; 1566 } 1567 1568 if (len <= bp->rx_copy_thresh) { 1569 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping); 1570 if (!skb) { 1571 bnxt_abort_tpa(cpr, idx, agg_bufs); 1572 return NULL; 1573 } 1574 } else { 1575 u8 *new_data; 1576 dma_addr_t new_mapping; 1577 1578 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC); 1579 if (!new_data) { 1580 bnxt_abort_tpa(cpr, idx, agg_bufs); 1581 return NULL; 1582 } 1583 1584 tpa_info->data = new_data; 1585 tpa_info->data_ptr = new_data + bp->rx_offset; 1586 tpa_info->mapping = new_mapping; 1587 1588 skb = build_skb(data, 0); 1589 dma_unmap_single_attrs(&bp->pdev->dev, mapping, 1590 bp->rx_buf_use_size, bp->rx_dir, 1591 DMA_ATTR_WEAK_ORDERING); 1592 1593 if (!skb) { 1594 kfree(data); 1595 bnxt_abort_tpa(cpr, idx, agg_bufs); 1596 return NULL; 1597 } 1598 skb_reserve(skb, bp->rx_offset); 1599 skb_put(skb, len); 1600 } 1601 1602 if (agg_bufs) { 1603 skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true); 1604 if (!skb) { 1605 /* Page reuse already handled by bnxt_rx_pages(). */ 1606 return NULL; 1607 } 1608 } 1609 1610 skb->protocol = 1611 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code)); 1612 1613 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE) 1614 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); 1615 1616 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) && 1617 (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) { 1618 u16 vlan_proto = tpa_info->metadata >> 1619 RX_CMP_FLAGS2_METADATA_TPID_SFT; 1620 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK; 1621 1622 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); 1623 } 1624 1625 skb_checksum_none_assert(skb); 1626 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) { 1627 skb->ip_summed = CHECKSUM_UNNECESSARY; 1628 skb->csum_level = 1629 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3; 1630 } 1631 1632 if (gro) 1633 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb); 1634 1635 return skb; 1636 } 1637 1638 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 1639 struct rx_agg_cmp *rx_agg) 1640 { 1641 u16 agg_id = TPA_AGG_AGG_ID(rx_agg); 1642 struct bnxt_tpa_info *tpa_info; 1643 1644 agg_id = bnxt_lookup_agg_idx(rxr, agg_id); 1645 tpa_info = &rxr->rx_tpa[agg_id]; 1646 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS); 1647 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg; 1648 } 1649 1650 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi, 1651 struct sk_buff *skb) 1652 { 1653 if (skb->dev != bp->dev) { 1654 /* this packet belongs to a vf-rep */ 1655 bnxt_vf_rep_rx(bp, skb); 1656 return; 1657 } 1658 skb_record_rx_queue(skb, bnapi->index); 1659 napi_gro_receive(&bnapi->napi, skb); 1660 } 1661 1662 /* returns the following: 1663 * 1 - 1 packet successfully received 1664 * 0 - successful TPA_START, packet not completed yet 1665 * -EBUSY - completion ring does not have all the agg buffers yet 1666 * -ENOMEM - packet aborted due to out of memory 1667 * -EIO - packet aborted due to hw error indicated in BD 1668 */ 1669 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 1670 u32 *raw_cons, u8 *event) 1671 { 1672 struct bnxt_napi *bnapi = cpr->bnapi; 1673 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1674 struct net_device *dev = bp->dev; 1675 struct rx_cmp *rxcmp; 1676 struct rx_cmp_ext *rxcmp1; 1677 u32 tmp_raw_cons = *raw_cons; 1678 u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons); 1679 struct bnxt_sw_rx_bd *rx_buf; 1680 unsigned int len; 1681 u8 *data_ptr, agg_bufs, cmp_type; 1682 dma_addr_t dma_addr; 1683 struct sk_buff *skb; 1684 void *data; 1685 int rc = 0; 1686 u32 misc; 1687 1688 rxcmp = (struct rx_cmp *) 1689 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1690 1691 cmp_type = RX_CMP_TYPE(rxcmp); 1692 1693 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) { 1694 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp); 1695 goto next_rx_no_prod_no_len; 1696 } 1697 1698 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 1699 cp_cons = RING_CMP(tmp_raw_cons); 1700 rxcmp1 = (struct rx_cmp_ext *) 1701 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1702 1703 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 1704 return -EBUSY; 1705 1706 prod = rxr->rx_prod; 1707 1708 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) { 1709 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp, 1710 (struct rx_tpa_start_cmp_ext *)rxcmp1); 1711 1712 *event |= BNXT_RX_EVENT; 1713 goto next_rx_no_prod_no_len; 1714 1715 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1716 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons, 1717 (struct rx_tpa_end_cmp *)rxcmp, 1718 (struct rx_tpa_end_cmp_ext *)rxcmp1, event); 1719 1720 if (IS_ERR(skb)) 1721 return -EBUSY; 1722 1723 rc = -ENOMEM; 1724 if (likely(skb)) { 1725 bnxt_deliver_skb(bp, bnapi, skb); 1726 rc = 1; 1727 } 1728 *event |= BNXT_RX_EVENT; 1729 goto next_rx_no_prod_no_len; 1730 } 1731 1732 cons = rxcmp->rx_cmp_opaque; 1733 if (unlikely(cons != rxr->rx_next_cons)) { 1734 int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp); 1735 1736 /* 0xffff is forced error, don't print it */ 1737 if (rxr->rx_next_cons != 0xffff) 1738 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n", 1739 cons, rxr->rx_next_cons); 1740 bnxt_sched_reset(bp, rxr); 1741 return rc1; 1742 } 1743 rx_buf = &rxr->rx_buf_ring[cons]; 1744 data = rx_buf->data; 1745 data_ptr = rx_buf->data_ptr; 1746 prefetch(data_ptr); 1747 1748 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1); 1749 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT; 1750 1751 if (agg_bufs) { 1752 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) 1753 return -EBUSY; 1754 1755 cp_cons = NEXT_CMP(cp_cons); 1756 *event |= BNXT_AGG_EVENT; 1757 } 1758 *event |= BNXT_RX_EVENT; 1759 1760 rx_buf->data = NULL; 1761 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) { 1762 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2); 1763 1764 bnxt_reuse_rx_data(rxr, cons, data); 1765 if (agg_bufs) 1766 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs, 1767 false); 1768 1769 rc = -EIO; 1770 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) { 1771 bnapi->cp_ring.sw_stats.rx.rx_buf_errors++; 1772 if (!(bp->flags & BNXT_FLAG_CHIP_P5) && 1773 !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) { 1774 netdev_warn_once(bp->dev, "RX buffer error %x\n", 1775 rx_err); 1776 bnxt_sched_reset(bp, rxr); 1777 } 1778 } 1779 goto next_rx_no_len; 1780 } 1781 1782 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT; 1783 dma_addr = rx_buf->mapping; 1784 1785 if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) { 1786 rc = 1; 1787 goto next_rx; 1788 } 1789 1790 if (len <= bp->rx_copy_thresh) { 1791 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr); 1792 bnxt_reuse_rx_data(rxr, cons, data); 1793 if (!skb) { 1794 if (agg_bufs) 1795 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, 1796 agg_bufs, false); 1797 rc = -ENOMEM; 1798 goto next_rx; 1799 } 1800 } else { 1801 u32 payload; 1802 1803 if (rx_buf->data_ptr == data_ptr) 1804 payload = misc & RX_CMP_PAYLOAD_OFFSET; 1805 else 1806 payload = 0; 1807 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr, 1808 payload | len); 1809 if (!skb) { 1810 rc = -ENOMEM; 1811 goto next_rx; 1812 } 1813 } 1814 1815 if (agg_bufs) { 1816 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false); 1817 if (!skb) { 1818 rc = -ENOMEM; 1819 goto next_rx; 1820 } 1821 } 1822 1823 if (RX_CMP_HASH_VALID(rxcmp)) { 1824 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp); 1825 enum pkt_hash_types type = PKT_HASH_TYPE_L4; 1826 1827 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ 1828 if (hash_type != 1 && hash_type != 3) 1829 type = PKT_HASH_TYPE_L3; 1830 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type); 1831 } 1832 1833 cfa_code = RX_CMP_CFA_CODE(rxcmp1); 1834 skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code)); 1835 1836 if ((rxcmp1->rx_cmp_flags2 & 1837 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) && 1838 (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) { 1839 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); 1840 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK; 1841 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; 1842 1843 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); 1844 } 1845 1846 skb_checksum_none_assert(skb); 1847 if (RX_CMP_L4_CS_OK(rxcmp1)) { 1848 if (dev->features & NETIF_F_RXCSUM) { 1849 skb->ip_summed = CHECKSUM_UNNECESSARY; 1850 skb->csum_level = RX_CMP_ENCAP(rxcmp1); 1851 } 1852 } else { 1853 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) { 1854 if (dev->features & NETIF_F_RXCSUM) 1855 bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++; 1856 } 1857 } 1858 1859 bnxt_deliver_skb(bp, bnapi, skb); 1860 rc = 1; 1861 1862 next_rx: 1863 cpr->rx_packets += 1; 1864 cpr->rx_bytes += len; 1865 1866 next_rx_no_len: 1867 rxr->rx_prod = NEXT_RX(prod); 1868 rxr->rx_next_cons = NEXT_RX(cons); 1869 1870 next_rx_no_prod_no_len: 1871 *raw_cons = tmp_raw_cons; 1872 1873 return rc; 1874 } 1875 1876 /* In netpoll mode, if we are using a combined completion ring, we need to 1877 * discard the rx packets and recycle the buffers. 1878 */ 1879 static int bnxt_force_rx_discard(struct bnxt *bp, 1880 struct bnxt_cp_ring_info *cpr, 1881 u32 *raw_cons, u8 *event) 1882 { 1883 u32 tmp_raw_cons = *raw_cons; 1884 struct rx_cmp_ext *rxcmp1; 1885 struct rx_cmp *rxcmp; 1886 u16 cp_cons; 1887 u8 cmp_type; 1888 1889 cp_cons = RING_CMP(tmp_raw_cons); 1890 rxcmp = (struct rx_cmp *) 1891 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1892 1893 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 1894 cp_cons = RING_CMP(tmp_raw_cons); 1895 rxcmp1 = (struct rx_cmp_ext *) 1896 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1897 1898 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 1899 return -EBUSY; 1900 1901 cmp_type = RX_CMP_TYPE(rxcmp); 1902 if (cmp_type == CMP_TYPE_RX_L2_CMP) { 1903 rxcmp1->rx_cmp_cfa_code_errors_v2 |= 1904 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); 1905 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1906 struct rx_tpa_end_cmp_ext *tpa_end1; 1907 1908 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1; 1909 tpa_end1->rx_tpa_end_cmp_errors_v2 |= 1910 cpu_to_le32(RX_TPA_END_CMP_ERRORS); 1911 } 1912 return bnxt_rx_pkt(bp, cpr, raw_cons, event); 1913 } 1914 1915 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx) 1916 { 1917 struct bnxt_fw_health *fw_health = bp->fw_health; 1918 u32 reg = fw_health->regs[reg_idx]; 1919 u32 reg_type, reg_off, val = 0; 1920 1921 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg); 1922 reg_off = BNXT_FW_HEALTH_REG_OFF(reg); 1923 switch (reg_type) { 1924 case BNXT_FW_HEALTH_REG_TYPE_CFG: 1925 pci_read_config_dword(bp->pdev, reg_off, &val); 1926 break; 1927 case BNXT_FW_HEALTH_REG_TYPE_GRC: 1928 reg_off = fw_health->mapped_regs[reg_idx]; 1929 fallthrough; 1930 case BNXT_FW_HEALTH_REG_TYPE_BAR0: 1931 val = readl(bp->bar0 + reg_off); 1932 break; 1933 case BNXT_FW_HEALTH_REG_TYPE_BAR1: 1934 val = readl(bp->bar1 + reg_off); 1935 break; 1936 } 1937 if (reg_idx == BNXT_FW_RESET_INPROG_REG) 1938 val &= fw_health->fw_reset_inprog_reg_mask; 1939 return val; 1940 } 1941 1942 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id) 1943 { 1944 int i; 1945 1946 for (i = 0; i < bp->rx_nr_rings; i++) { 1947 u16 grp_idx = bp->rx_ring[i].bnapi->index; 1948 struct bnxt_ring_grp_info *grp_info; 1949 1950 grp_info = &bp->grp_info[grp_idx]; 1951 if (grp_info->agg_fw_ring_id == ring_id) 1952 return grp_idx; 1953 } 1954 return INVALID_HW_RING_ID; 1955 } 1956 1957 #define BNXT_GET_EVENT_PORT(data) \ 1958 ((data) & \ 1959 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK) 1960 1961 #define BNXT_EVENT_RING_TYPE(data2) \ 1962 ((data2) & \ 1963 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK) 1964 1965 #define BNXT_EVENT_RING_TYPE_RX(data2) \ 1966 (BNXT_EVENT_RING_TYPE(data2) == \ 1967 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX) 1968 1969 static int bnxt_async_event_process(struct bnxt *bp, 1970 struct hwrm_async_event_cmpl *cmpl) 1971 { 1972 u16 event_id = le16_to_cpu(cmpl->event_id); 1973 u32 data1 = le32_to_cpu(cmpl->event_data1); 1974 u32 data2 = le32_to_cpu(cmpl->event_data2); 1975 1976 /* TODO CHIMP_FW: Define event id's for link change, error etc */ 1977 switch (event_id) { 1978 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: { 1979 struct bnxt_link_info *link_info = &bp->link_info; 1980 1981 if (BNXT_VF(bp)) 1982 goto async_event_process_exit; 1983 1984 /* print unsupported speed warning in forced speed mode only */ 1985 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) && 1986 (data1 & 0x20000)) { 1987 u16 fw_speed = link_info->force_link_speed; 1988 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed); 1989 1990 if (speed != SPEED_UNKNOWN) 1991 netdev_warn(bp->dev, "Link speed %d no longer supported\n", 1992 speed); 1993 } 1994 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); 1995 } 1996 fallthrough; 1997 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE: 1998 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE: 1999 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event); 2000 fallthrough; 2001 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: 2002 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); 2003 break; 2004 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD: 2005 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event); 2006 break; 2007 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: { 2008 u16 port_id = BNXT_GET_EVENT_PORT(data1); 2009 2010 if (BNXT_VF(bp)) 2011 break; 2012 2013 if (bp->pf.port_id != port_id) 2014 break; 2015 2016 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event); 2017 break; 2018 } 2019 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE: 2020 if (BNXT_PF(bp)) 2021 goto async_event_process_exit; 2022 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event); 2023 break; 2024 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: 2025 if (netif_msg_hw(bp)) 2026 netdev_warn(bp->dev, "Received RESET_NOTIFY event, data1: 0x%x, data2: 0x%x\n", 2027 data1, data2); 2028 if (!bp->fw_health) 2029 goto async_event_process_exit; 2030 2031 bp->fw_reset_timestamp = jiffies; 2032 bp->fw_reset_min_dsecs = cmpl->timestamp_lo; 2033 if (!bp->fw_reset_min_dsecs) 2034 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS; 2035 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi); 2036 if (!bp->fw_reset_max_dsecs) 2037 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS; 2038 if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) { 2039 netdev_warn(bp->dev, "Firmware fatal reset event received\n"); 2040 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); 2041 } else { 2042 netdev_warn(bp->dev, "Firmware non-fatal reset event received, max wait time %d msec\n", 2043 bp->fw_reset_max_dsecs * 100); 2044 } 2045 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event); 2046 break; 2047 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: { 2048 struct bnxt_fw_health *fw_health = bp->fw_health; 2049 2050 if (!fw_health) 2051 goto async_event_process_exit; 2052 2053 fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1); 2054 fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1); 2055 if (!fw_health->enabled) 2056 break; 2057 2058 if (netif_msg_drv(bp)) 2059 netdev_info(bp->dev, "Error recovery info: error recovery[%d], master[%d], reset count[0x%x], health status: 0x%x\n", 2060 fw_health->enabled, fw_health->master, 2061 bnxt_fw_health_readl(bp, 2062 BNXT_FW_RESET_CNT_REG), 2063 bnxt_fw_health_readl(bp, 2064 BNXT_FW_HEALTH_REG)); 2065 fw_health->tmr_multiplier = 2066 DIV_ROUND_UP(fw_health->polling_dsecs * HZ, 2067 bp->current_interval * 10); 2068 fw_health->tmr_counter = fw_health->tmr_multiplier; 2069 fw_health->last_fw_heartbeat = 2070 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 2071 fw_health->last_fw_reset_cnt = 2072 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 2073 goto async_event_process_exit; 2074 } 2075 case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: { 2076 struct bnxt_rx_ring_info *rxr; 2077 u16 grp_idx; 2078 2079 if (bp->flags & BNXT_FLAG_CHIP_P5) 2080 goto async_event_process_exit; 2081 2082 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n", 2083 BNXT_EVENT_RING_TYPE(data2), data1); 2084 if (!BNXT_EVENT_RING_TYPE_RX(data2)) 2085 goto async_event_process_exit; 2086 2087 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1); 2088 if (grp_idx == INVALID_HW_RING_ID) { 2089 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n", 2090 data1); 2091 goto async_event_process_exit; 2092 } 2093 rxr = bp->bnapi[grp_idx]->rx_ring; 2094 bnxt_sched_reset(bp, rxr); 2095 goto async_event_process_exit; 2096 } 2097 default: 2098 goto async_event_process_exit; 2099 } 2100 bnxt_queue_sp_work(bp); 2101 async_event_process_exit: 2102 bnxt_ulp_async_events(bp, cmpl); 2103 return 0; 2104 } 2105 2106 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) 2107 { 2108 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id; 2109 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp; 2110 struct hwrm_fwd_req_cmpl *fwd_req_cmpl = 2111 (struct hwrm_fwd_req_cmpl *)txcmp; 2112 2113 switch (cmpl_type) { 2114 case CMPL_BASE_TYPE_HWRM_DONE: 2115 seq_id = le16_to_cpu(h_cmpl->sequence_id); 2116 if (seq_id == bp->hwrm_intr_seq_id) 2117 bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id; 2118 else 2119 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id); 2120 break; 2121 2122 case CMPL_BASE_TYPE_HWRM_FWD_REQ: 2123 vf_id = le16_to_cpu(fwd_req_cmpl->source_id); 2124 2125 if ((vf_id < bp->pf.first_vf_id) || 2126 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) { 2127 netdev_err(bp->dev, "Msg contains invalid VF id %x\n", 2128 vf_id); 2129 return -EINVAL; 2130 } 2131 2132 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap); 2133 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event); 2134 bnxt_queue_sp_work(bp); 2135 break; 2136 2137 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: 2138 bnxt_async_event_process(bp, 2139 (struct hwrm_async_event_cmpl *)txcmp); 2140 2141 default: 2142 break; 2143 } 2144 2145 return 0; 2146 } 2147 2148 static irqreturn_t bnxt_msix(int irq, void *dev_instance) 2149 { 2150 struct bnxt_napi *bnapi = dev_instance; 2151 struct bnxt *bp = bnapi->bp; 2152 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2153 u32 cons = RING_CMP(cpr->cp_raw_cons); 2154 2155 cpr->event_ctr++; 2156 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); 2157 napi_schedule(&bnapi->napi); 2158 return IRQ_HANDLED; 2159 } 2160 2161 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) 2162 { 2163 u32 raw_cons = cpr->cp_raw_cons; 2164 u16 cons = RING_CMP(raw_cons); 2165 struct tx_cmp *txcmp; 2166 2167 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 2168 2169 return TX_CMP_VALID(txcmp, raw_cons); 2170 } 2171 2172 static irqreturn_t bnxt_inta(int irq, void *dev_instance) 2173 { 2174 struct bnxt_napi *bnapi = dev_instance; 2175 struct bnxt *bp = bnapi->bp; 2176 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2177 u32 cons = RING_CMP(cpr->cp_raw_cons); 2178 u32 int_status; 2179 2180 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); 2181 2182 if (!bnxt_has_work(bp, cpr)) { 2183 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS); 2184 /* return if erroneous interrupt */ 2185 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id))) 2186 return IRQ_NONE; 2187 } 2188 2189 /* disable ring IRQ */ 2190 BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell); 2191 2192 /* Return here if interrupt is shared and is disabled. */ 2193 if (unlikely(atomic_read(&bp->intr_sem) != 0)) 2194 return IRQ_HANDLED; 2195 2196 napi_schedule(&bnapi->napi); 2197 return IRQ_HANDLED; 2198 } 2199 2200 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 2201 int budget) 2202 { 2203 struct bnxt_napi *bnapi = cpr->bnapi; 2204 u32 raw_cons = cpr->cp_raw_cons; 2205 u32 cons; 2206 int tx_pkts = 0; 2207 int rx_pkts = 0; 2208 u8 event = 0; 2209 struct tx_cmp *txcmp; 2210 2211 cpr->has_more_work = 0; 2212 cpr->had_work_done = 1; 2213 while (1) { 2214 int rc; 2215 2216 cons = RING_CMP(raw_cons); 2217 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 2218 2219 if (!TX_CMP_VALID(txcmp, raw_cons)) 2220 break; 2221 2222 /* The valid test of the entry must be done first before 2223 * reading any further. 2224 */ 2225 dma_rmb(); 2226 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) { 2227 tx_pkts++; 2228 /* return full budget so NAPI will complete. */ 2229 if (unlikely(tx_pkts > bp->tx_wake_thresh)) { 2230 rx_pkts = budget; 2231 raw_cons = NEXT_RAW_CMP(raw_cons); 2232 if (budget) 2233 cpr->has_more_work = 1; 2234 break; 2235 } 2236 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { 2237 if (likely(budget)) 2238 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event); 2239 else 2240 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons, 2241 &event); 2242 if (likely(rc >= 0)) 2243 rx_pkts += rc; 2244 /* Increment rx_pkts when rc is -ENOMEM to count towards 2245 * the NAPI budget. Otherwise, we may potentially loop 2246 * here forever if we consistently cannot allocate 2247 * buffers. 2248 */ 2249 else if (rc == -ENOMEM && budget) 2250 rx_pkts++; 2251 else if (rc == -EBUSY) /* partial completion */ 2252 break; 2253 } else if (unlikely((TX_CMP_TYPE(txcmp) == 2254 CMPL_BASE_TYPE_HWRM_DONE) || 2255 (TX_CMP_TYPE(txcmp) == 2256 CMPL_BASE_TYPE_HWRM_FWD_REQ) || 2257 (TX_CMP_TYPE(txcmp) == 2258 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) { 2259 bnxt_hwrm_handler(bp, txcmp); 2260 } 2261 raw_cons = NEXT_RAW_CMP(raw_cons); 2262 2263 if (rx_pkts && rx_pkts == budget) { 2264 cpr->has_more_work = 1; 2265 break; 2266 } 2267 } 2268 2269 if (event & BNXT_REDIRECT_EVENT) 2270 xdp_do_flush_map(); 2271 2272 if (event & BNXT_TX_EVENT) { 2273 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; 2274 u16 prod = txr->tx_prod; 2275 2276 /* Sync BD data before updating doorbell */ 2277 wmb(); 2278 2279 bnxt_db_write_relaxed(bp, &txr->tx_db, prod); 2280 } 2281 2282 cpr->cp_raw_cons = raw_cons; 2283 bnapi->tx_pkts += tx_pkts; 2284 bnapi->events |= event; 2285 return rx_pkts; 2286 } 2287 2288 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi) 2289 { 2290 if (bnapi->tx_pkts) { 2291 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts); 2292 bnapi->tx_pkts = 0; 2293 } 2294 2295 if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) { 2296 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 2297 2298 if (bnapi->events & BNXT_AGG_EVENT) 2299 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 2300 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 2301 } 2302 bnapi->events = 0; 2303 } 2304 2305 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 2306 int budget) 2307 { 2308 struct bnxt_napi *bnapi = cpr->bnapi; 2309 int rx_pkts; 2310 2311 rx_pkts = __bnxt_poll_work(bp, cpr, budget); 2312 2313 /* ACK completion ring before freeing tx ring and producing new 2314 * buffers in rx/agg rings to prevent overflowing the completion 2315 * ring. 2316 */ 2317 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons); 2318 2319 __bnxt_poll_work_done(bp, bnapi); 2320 return rx_pkts; 2321 } 2322 2323 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) 2324 { 2325 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 2326 struct bnxt *bp = bnapi->bp; 2327 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2328 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 2329 struct tx_cmp *txcmp; 2330 struct rx_cmp_ext *rxcmp1; 2331 u32 cp_cons, tmp_raw_cons; 2332 u32 raw_cons = cpr->cp_raw_cons; 2333 u32 rx_pkts = 0; 2334 u8 event = 0; 2335 2336 while (1) { 2337 int rc; 2338 2339 cp_cons = RING_CMP(raw_cons); 2340 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2341 2342 if (!TX_CMP_VALID(txcmp, raw_cons)) 2343 break; 2344 2345 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { 2346 tmp_raw_cons = NEXT_RAW_CMP(raw_cons); 2347 cp_cons = RING_CMP(tmp_raw_cons); 2348 rxcmp1 = (struct rx_cmp_ext *) 2349 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2350 2351 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 2352 break; 2353 2354 /* force an error to recycle the buffer */ 2355 rxcmp1->rx_cmp_cfa_code_errors_v2 |= 2356 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); 2357 2358 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event); 2359 if (likely(rc == -EIO) && budget) 2360 rx_pkts++; 2361 else if (rc == -EBUSY) /* partial completion */ 2362 break; 2363 } else if (unlikely(TX_CMP_TYPE(txcmp) == 2364 CMPL_BASE_TYPE_HWRM_DONE)) { 2365 bnxt_hwrm_handler(bp, txcmp); 2366 } else { 2367 netdev_err(bp->dev, 2368 "Invalid completion received on special ring\n"); 2369 } 2370 raw_cons = NEXT_RAW_CMP(raw_cons); 2371 2372 if (rx_pkts == budget) 2373 break; 2374 } 2375 2376 cpr->cp_raw_cons = raw_cons; 2377 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons); 2378 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 2379 2380 if (event & BNXT_AGG_EVENT) 2381 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 2382 2383 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) { 2384 napi_complete_done(napi, rx_pkts); 2385 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 2386 } 2387 return rx_pkts; 2388 } 2389 2390 static int bnxt_poll(struct napi_struct *napi, int budget) 2391 { 2392 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 2393 struct bnxt *bp = bnapi->bp; 2394 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2395 int work_done = 0; 2396 2397 while (1) { 2398 work_done += bnxt_poll_work(bp, cpr, budget - work_done); 2399 2400 if (work_done >= budget) { 2401 if (!budget) 2402 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 2403 break; 2404 } 2405 2406 if (!bnxt_has_work(bp, cpr)) { 2407 if (napi_complete_done(napi, work_done)) 2408 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 2409 break; 2410 } 2411 } 2412 if (bp->flags & BNXT_FLAG_DIM) { 2413 struct dim_sample dim_sample = {}; 2414 2415 dim_update_sample(cpr->event_ctr, 2416 cpr->rx_packets, 2417 cpr->rx_bytes, 2418 &dim_sample); 2419 net_dim(&cpr->dim, dim_sample); 2420 } 2421 return work_done; 2422 } 2423 2424 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) 2425 { 2426 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2427 int i, work_done = 0; 2428 2429 for (i = 0; i < 2; i++) { 2430 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i]; 2431 2432 if (cpr2) { 2433 work_done += __bnxt_poll_work(bp, cpr2, 2434 budget - work_done); 2435 cpr->has_more_work |= cpr2->has_more_work; 2436 } 2437 } 2438 return work_done; 2439 } 2440 2441 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi, 2442 u64 dbr_type) 2443 { 2444 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2445 int i; 2446 2447 for (i = 0; i < 2; i++) { 2448 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i]; 2449 struct bnxt_db_info *db; 2450 2451 if (cpr2 && cpr2->had_work_done) { 2452 db = &cpr2->cp_db; 2453 writeq(db->db_key64 | dbr_type | 2454 RING_CMP(cpr2->cp_raw_cons), db->doorbell); 2455 cpr2->had_work_done = 0; 2456 } 2457 } 2458 __bnxt_poll_work_done(bp, bnapi); 2459 } 2460 2461 static int bnxt_poll_p5(struct napi_struct *napi, int budget) 2462 { 2463 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 2464 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2465 u32 raw_cons = cpr->cp_raw_cons; 2466 struct bnxt *bp = bnapi->bp; 2467 struct nqe_cn *nqcmp; 2468 int work_done = 0; 2469 u32 cons; 2470 2471 if (cpr->has_more_work) { 2472 cpr->has_more_work = 0; 2473 work_done = __bnxt_poll_cqs(bp, bnapi, budget); 2474 } 2475 while (1) { 2476 cons = RING_CMP(raw_cons); 2477 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 2478 2479 if (!NQ_CMP_VALID(nqcmp, raw_cons)) { 2480 if (cpr->has_more_work) 2481 break; 2482 2483 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL); 2484 cpr->cp_raw_cons = raw_cons; 2485 if (napi_complete_done(napi, work_done)) 2486 BNXT_DB_NQ_ARM_P5(&cpr->cp_db, 2487 cpr->cp_raw_cons); 2488 return work_done; 2489 } 2490 2491 /* The valid test of the entry must be done first before 2492 * reading any further. 2493 */ 2494 dma_rmb(); 2495 2496 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) { 2497 u32 idx = le32_to_cpu(nqcmp->cq_handle_low); 2498 struct bnxt_cp_ring_info *cpr2; 2499 2500 cpr2 = cpr->cp_ring_arr[idx]; 2501 work_done += __bnxt_poll_work(bp, cpr2, 2502 budget - work_done); 2503 cpr->has_more_work |= cpr2->has_more_work; 2504 } else { 2505 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp); 2506 } 2507 raw_cons = NEXT_RAW_CMP(raw_cons); 2508 } 2509 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ); 2510 if (raw_cons != cpr->cp_raw_cons) { 2511 cpr->cp_raw_cons = raw_cons; 2512 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons); 2513 } 2514 return work_done; 2515 } 2516 2517 static void bnxt_free_tx_skbs(struct bnxt *bp) 2518 { 2519 int i, max_idx; 2520 struct pci_dev *pdev = bp->pdev; 2521 2522 if (!bp->tx_ring) 2523 return; 2524 2525 max_idx = bp->tx_nr_pages * TX_DESC_CNT; 2526 for (i = 0; i < bp->tx_nr_rings; i++) { 2527 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 2528 int j; 2529 2530 for (j = 0; j < max_idx;) { 2531 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; 2532 struct sk_buff *skb; 2533 int k, last; 2534 2535 if (i < bp->tx_nr_rings_xdp && 2536 tx_buf->action == XDP_REDIRECT) { 2537 dma_unmap_single(&pdev->dev, 2538 dma_unmap_addr(tx_buf, mapping), 2539 dma_unmap_len(tx_buf, len), 2540 PCI_DMA_TODEVICE); 2541 xdp_return_frame(tx_buf->xdpf); 2542 tx_buf->action = 0; 2543 tx_buf->xdpf = NULL; 2544 j++; 2545 continue; 2546 } 2547 2548 skb = tx_buf->skb; 2549 if (!skb) { 2550 j++; 2551 continue; 2552 } 2553 2554 tx_buf->skb = NULL; 2555 2556 if (tx_buf->is_push) { 2557 dev_kfree_skb(skb); 2558 j += 2; 2559 continue; 2560 } 2561 2562 dma_unmap_single(&pdev->dev, 2563 dma_unmap_addr(tx_buf, mapping), 2564 skb_headlen(skb), 2565 PCI_DMA_TODEVICE); 2566 2567 last = tx_buf->nr_frags; 2568 j += 2; 2569 for (k = 0; k < last; k++, j++) { 2570 int ring_idx = j & bp->tx_ring_mask; 2571 skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; 2572 2573 tx_buf = &txr->tx_buf_ring[ring_idx]; 2574 dma_unmap_page( 2575 &pdev->dev, 2576 dma_unmap_addr(tx_buf, mapping), 2577 skb_frag_size(frag), PCI_DMA_TODEVICE); 2578 } 2579 dev_kfree_skb(skb); 2580 } 2581 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i)); 2582 } 2583 } 2584 2585 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr) 2586 { 2587 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; 2588 struct pci_dev *pdev = bp->pdev; 2589 struct bnxt_tpa_idx_map *map; 2590 int i, max_idx, max_agg_idx; 2591 2592 max_idx = bp->rx_nr_pages * RX_DESC_CNT; 2593 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; 2594 if (!rxr->rx_tpa) 2595 goto skip_rx_tpa_free; 2596 2597 for (i = 0; i < bp->max_tpa; i++) { 2598 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i]; 2599 u8 *data = tpa_info->data; 2600 2601 if (!data) 2602 continue; 2603 2604 dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping, 2605 bp->rx_buf_use_size, bp->rx_dir, 2606 DMA_ATTR_WEAK_ORDERING); 2607 2608 tpa_info->data = NULL; 2609 2610 kfree(data); 2611 } 2612 2613 skip_rx_tpa_free: 2614 for (i = 0; i < max_idx; i++) { 2615 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i]; 2616 dma_addr_t mapping = rx_buf->mapping; 2617 void *data = rx_buf->data; 2618 2619 if (!data) 2620 continue; 2621 2622 rx_buf->data = NULL; 2623 if (BNXT_RX_PAGE_MODE(bp)) { 2624 mapping -= bp->rx_dma_offset; 2625 dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE, 2626 bp->rx_dir, 2627 DMA_ATTR_WEAK_ORDERING); 2628 page_pool_recycle_direct(rxr->page_pool, data); 2629 } else { 2630 dma_unmap_single_attrs(&pdev->dev, mapping, 2631 bp->rx_buf_use_size, bp->rx_dir, 2632 DMA_ATTR_WEAK_ORDERING); 2633 kfree(data); 2634 } 2635 } 2636 for (i = 0; i < max_agg_idx; i++) { 2637 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i]; 2638 struct page *page = rx_agg_buf->page; 2639 2640 if (!page) 2641 continue; 2642 2643 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping, 2644 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE, 2645 DMA_ATTR_WEAK_ORDERING); 2646 2647 rx_agg_buf->page = NULL; 2648 __clear_bit(i, rxr->rx_agg_bmap); 2649 2650 __free_page(page); 2651 } 2652 if (rxr->rx_page) { 2653 __free_page(rxr->rx_page); 2654 rxr->rx_page = NULL; 2655 } 2656 map = rxr->rx_tpa_idx_map; 2657 if (map) 2658 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap)); 2659 } 2660 2661 static void bnxt_free_rx_skbs(struct bnxt *bp) 2662 { 2663 int i; 2664 2665 if (!bp->rx_ring) 2666 return; 2667 2668 for (i = 0; i < bp->rx_nr_rings; i++) 2669 bnxt_free_one_rx_ring_skbs(bp, i); 2670 } 2671 2672 static void bnxt_free_skbs(struct bnxt *bp) 2673 { 2674 bnxt_free_tx_skbs(bp); 2675 bnxt_free_rx_skbs(bp); 2676 } 2677 2678 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) 2679 { 2680 struct pci_dev *pdev = bp->pdev; 2681 int i; 2682 2683 for (i = 0; i < rmem->nr_pages; i++) { 2684 if (!rmem->pg_arr[i]) 2685 continue; 2686 2687 dma_free_coherent(&pdev->dev, rmem->page_size, 2688 rmem->pg_arr[i], rmem->dma_arr[i]); 2689 2690 rmem->pg_arr[i] = NULL; 2691 } 2692 if (rmem->pg_tbl) { 2693 size_t pg_tbl_size = rmem->nr_pages * 8; 2694 2695 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) 2696 pg_tbl_size = rmem->page_size; 2697 dma_free_coherent(&pdev->dev, pg_tbl_size, 2698 rmem->pg_tbl, rmem->pg_tbl_map); 2699 rmem->pg_tbl = NULL; 2700 } 2701 if (rmem->vmem_size && *rmem->vmem) { 2702 vfree(*rmem->vmem); 2703 *rmem->vmem = NULL; 2704 } 2705 } 2706 2707 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) 2708 { 2709 struct pci_dev *pdev = bp->pdev; 2710 u64 valid_bit = 0; 2711 int i; 2712 2713 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG)) 2714 valid_bit = PTU_PTE_VALID; 2715 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) { 2716 size_t pg_tbl_size = rmem->nr_pages * 8; 2717 2718 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) 2719 pg_tbl_size = rmem->page_size; 2720 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size, 2721 &rmem->pg_tbl_map, 2722 GFP_KERNEL); 2723 if (!rmem->pg_tbl) 2724 return -ENOMEM; 2725 } 2726 2727 for (i = 0; i < rmem->nr_pages; i++) { 2728 u64 extra_bits = valid_bit; 2729 2730 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev, 2731 rmem->page_size, 2732 &rmem->dma_arr[i], 2733 GFP_KERNEL); 2734 if (!rmem->pg_arr[i]) 2735 return -ENOMEM; 2736 2737 if (rmem->init_val) 2738 memset(rmem->pg_arr[i], rmem->init_val, 2739 rmem->page_size); 2740 if (rmem->nr_pages > 1 || rmem->depth > 0) { 2741 if (i == rmem->nr_pages - 2 && 2742 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 2743 extra_bits |= PTU_PTE_NEXT_TO_LAST; 2744 else if (i == rmem->nr_pages - 1 && 2745 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 2746 extra_bits |= PTU_PTE_LAST; 2747 rmem->pg_tbl[i] = 2748 cpu_to_le64(rmem->dma_arr[i] | extra_bits); 2749 } 2750 } 2751 2752 if (rmem->vmem_size) { 2753 *rmem->vmem = vzalloc(rmem->vmem_size); 2754 if (!(*rmem->vmem)) 2755 return -ENOMEM; 2756 } 2757 return 0; 2758 } 2759 2760 static void bnxt_free_tpa_info(struct bnxt *bp) 2761 { 2762 int i; 2763 2764 for (i = 0; i < bp->rx_nr_rings; i++) { 2765 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2766 2767 kfree(rxr->rx_tpa_idx_map); 2768 rxr->rx_tpa_idx_map = NULL; 2769 if (rxr->rx_tpa) { 2770 kfree(rxr->rx_tpa[0].agg_arr); 2771 rxr->rx_tpa[0].agg_arr = NULL; 2772 } 2773 kfree(rxr->rx_tpa); 2774 rxr->rx_tpa = NULL; 2775 } 2776 } 2777 2778 static int bnxt_alloc_tpa_info(struct bnxt *bp) 2779 { 2780 int i, j, total_aggs = 0; 2781 2782 bp->max_tpa = MAX_TPA; 2783 if (bp->flags & BNXT_FLAG_CHIP_P5) { 2784 if (!bp->max_tpa_v2) 2785 return 0; 2786 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5); 2787 total_aggs = bp->max_tpa * MAX_SKB_FRAGS; 2788 } 2789 2790 for (i = 0; i < bp->rx_nr_rings; i++) { 2791 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2792 struct rx_agg_cmp *agg; 2793 2794 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info), 2795 GFP_KERNEL); 2796 if (!rxr->rx_tpa) 2797 return -ENOMEM; 2798 2799 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 2800 continue; 2801 agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL); 2802 rxr->rx_tpa[0].agg_arr = agg; 2803 if (!agg) 2804 return -ENOMEM; 2805 for (j = 1; j < bp->max_tpa; j++) 2806 rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS; 2807 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map), 2808 GFP_KERNEL); 2809 if (!rxr->rx_tpa_idx_map) 2810 return -ENOMEM; 2811 } 2812 return 0; 2813 } 2814 2815 static void bnxt_free_rx_rings(struct bnxt *bp) 2816 { 2817 int i; 2818 2819 if (!bp->rx_ring) 2820 return; 2821 2822 bnxt_free_tpa_info(bp); 2823 for (i = 0; i < bp->rx_nr_rings; i++) { 2824 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2825 struct bnxt_ring_struct *ring; 2826 2827 if (rxr->xdp_prog) 2828 bpf_prog_put(rxr->xdp_prog); 2829 2830 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq)) 2831 xdp_rxq_info_unreg(&rxr->xdp_rxq); 2832 2833 page_pool_destroy(rxr->page_pool); 2834 rxr->page_pool = NULL; 2835 2836 kfree(rxr->rx_agg_bmap); 2837 rxr->rx_agg_bmap = NULL; 2838 2839 ring = &rxr->rx_ring_struct; 2840 bnxt_free_ring(bp, &ring->ring_mem); 2841 2842 ring = &rxr->rx_agg_ring_struct; 2843 bnxt_free_ring(bp, &ring->ring_mem); 2844 } 2845 } 2846 2847 static int bnxt_alloc_rx_page_pool(struct bnxt *bp, 2848 struct bnxt_rx_ring_info *rxr) 2849 { 2850 struct page_pool_params pp = { 0 }; 2851 2852 pp.pool_size = bp->rx_ring_size; 2853 pp.nid = dev_to_node(&bp->pdev->dev); 2854 pp.dev = &bp->pdev->dev; 2855 pp.dma_dir = DMA_BIDIRECTIONAL; 2856 2857 rxr->page_pool = page_pool_create(&pp); 2858 if (IS_ERR(rxr->page_pool)) { 2859 int err = PTR_ERR(rxr->page_pool); 2860 2861 rxr->page_pool = NULL; 2862 return err; 2863 } 2864 return 0; 2865 } 2866 2867 static int bnxt_alloc_rx_rings(struct bnxt *bp) 2868 { 2869 int i, rc = 0, agg_rings = 0; 2870 2871 if (!bp->rx_ring) 2872 return -ENOMEM; 2873 2874 if (bp->flags & BNXT_FLAG_AGG_RINGS) 2875 agg_rings = 1; 2876 2877 for (i = 0; i < bp->rx_nr_rings; i++) { 2878 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2879 struct bnxt_ring_struct *ring; 2880 2881 ring = &rxr->rx_ring_struct; 2882 2883 rc = bnxt_alloc_rx_page_pool(bp, rxr); 2884 if (rc) 2885 return rc; 2886 2887 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0); 2888 if (rc < 0) 2889 return rc; 2890 2891 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq, 2892 MEM_TYPE_PAGE_POOL, 2893 rxr->page_pool); 2894 if (rc) { 2895 xdp_rxq_info_unreg(&rxr->xdp_rxq); 2896 return rc; 2897 } 2898 2899 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 2900 if (rc) 2901 return rc; 2902 2903 ring->grp_idx = i; 2904 if (agg_rings) { 2905 u16 mem_size; 2906 2907 ring = &rxr->rx_agg_ring_struct; 2908 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 2909 if (rc) 2910 return rc; 2911 2912 ring->grp_idx = i; 2913 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1; 2914 mem_size = rxr->rx_agg_bmap_size / 8; 2915 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL); 2916 if (!rxr->rx_agg_bmap) 2917 return -ENOMEM; 2918 } 2919 } 2920 if (bp->flags & BNXT_FLAG_TPA) 2921 rc = bnxt_alloc_tpa_info(bp); 2922 return rc; 2923 } 2924 2925 static void bnxt_free_tx_rings(struct bnxt *bp) 2926 { 2927 int i; 2928 struct pci_dev *pdev = bp->pdev; 2929 2930 if (!bp->tx_ring) 2931 return; 2932 2933 for (i = 0; i < bp->tx_nr_rings; i++) { 2934 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 2935 struct bnxt_ring_struct *ring; 2936 2937 if (txr->tx_push) { 2938 dma_free_coherent(&pdev->dev, bp->tx_push_size, 2939 txr->tx_push, txr->tx_push_mapping); 2940 txr->tx_push = NULL; 2941 } 2942 2943 ring = &txr->tx_ring_struct; 2944 2945 bnxt_free_ring(bp, &ring->ring_mem); 2946 } 2947 } 2948 2949 static int bnxt_alloc_tx_rings(struct bnxt *bp) 2950 { 2951 int i, j, rc; 2952 struct pci_dev *pdev = bp->pdev; 2953 2954 bp->tx_push_size = 0; 2955 if (bp->tx_push_thresh) { 2956 int push_size; 2957 2958 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) + 2959 bp->tx_push_thresh); 2960 2961 if (push_size > 256) { 2962 push_size = 0; 2963 bp->tx_push_thresh = 0; 2964 } 2965 2966 bp->tx_push_size = push_size; 2967 } 2968 2969 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) { 2970 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 2971 struct bnxt_ring_struct *ring; 2972 u8 qidx; 2973 2974 ring = &txr->tx_ring_struct; 2975 2976 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 2977 if (rc) 2978 return rc; 2979 2980 ring->grp_idx = txr->bnapi->index; 2981 if (bp->tx_push_size) { 2982 dma_addr_t mapping; 2983 2984 /* One pre-allocated DMA buffer to backup 2985 * TX push operation 2986 */ 2987 txr->tx_push = dma_alloc_coherent(&pdev->dev, 2988 bp->tx_push_size, 2989 &txr->tx_push_mapping, 2990 GFP_KERNEL); 2991 2992 if (!txr->tx_push) 2993 return -ENOMEM; 2994 2995 mapping = txr->tx_push_mapping + 2996 sizeof(struct tx_push_bd); 2997 txr->data_mapping = cpu_to_le64(mapping); 2998 } 2999 qidx = bp->tc_to_qidx[j]; 3000 ring->queue_id = bp->q_info[qidx].queue_id; 3001 if (i < bp->tx_nr_rings_xdp) 3002 continue; 3003 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1)) 3004 j++; 3005 } 3006 return 0; 3007 } 3008 3009 static void bnxt_free_cp_rings(struct bnxt *bp) 3010 { 3011 int i; 3012 3013 if (!bp->bnapi) 3014 return; 3015 3016 for (i = 0; i < bp->cp_nr_rings; i++) { 3017 struct bnxt_napi *bnapi = bp->bnapi[i]; 3018 struct bnxt_cp_ring_info *cpr; 3019 struct bnxt_ring_struct *ring; 3020 int j; 3021 3022 if (!bnapi) 3023 continue; 3024 3025 cpr = &bnapi->cp_ring; 3026 ring = &cpr->cp_ring_struct; 3027 3028 bnxt_free_ring(bp, &ring->ring_mem); 3029 3030 for (j = 0; j < 2; j++) { 3031 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; 3032 3033 if (cpr2) { 3034 ring = &cpr2->cp_ring_struct; 3035 bnxt_free_ring(bp, &ring->ring_mem); 3036 kfree(cpr2); 3037 cpr->cp_ring_arr[j] = NULL; 3038 } 3039 } 3040 } 3041 } 3042 3043 static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp) 3044 { 3045 struct bnxt_ring_mem_info *rmem; 3046 struct bnxt_ring_struct *ring; 3047 struct bnxt_cp_ring_info *cpr; 3048 int rc; 3049 3050 cpr = kzalloc(sizeof(*cpr), GFP_KERNEL); 3051 if (!cpr) 3052 return NULL; 3053 3054 ring = &cpr->cp_ring_struct; 3055 rmem = &ring->ring_mem; 3056 rmem->nr_pages = bp->cp_nr_pages; 3057 rmem->page_size = HW_CMPD_RING_SIZE; 3058 rmem->pg_arr = (void **)cpr->cp_desc_ring; 3059 rmem->dma_arr = cpr->cp_desc_mapping; 3060 rmem->flags = BNXT_RMEM_RING_PTE_FLAG; 3061 rc = bnxt_alloc_ring(bp, rmem); 3062 if (rc) { 3063 bnxt_free_ring(bp, rmem); 3064 kfree(cpr); 3065 cpr = NULL; 3066 } 3067 return cpr; 3068 } 3069 3070 static int bnxt_alloc_cp_rings(struct bnxt *bp) 3071 { 3072 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS); 3073 int i, rc, ulp_base_vec, ulp_msix; 3074 3075 ulp_msix = bnxt_get_ulp_msix_num(bp); 3076 ulp_base_vec = bnxt_get_ulp_msix_base(bp); 3077 for (i = 0; i < bp->cp_nr_rings; i++) { 3078 struct bnxt_napi *bnapi = bp->bnapi[i]; 3079 struct bnxt_cp_ring_info *cpr; 3080 struct bnxt_ring_struct *ring; 3081 3082 if (!bnapi) 3083 continue; 3084 3085 cpr = &bnapi->cp_ring; 3086 cpr->bnapi = bnapi; 3087 ring = &cpr->cp_ring_struct; 3088 3089 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 3090 if (rc) 3091 return rc; 3092 3093 if (ulp_msix && i >= ulp_base_vec) 3094 ring->map_idx = i + ulp_msix; 3095 else 3096 ring->map_idx = i; 3097 3098 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 3099 continue; 3100 3101 if (i < bp->rx_nr_rings) { 3102 struct bnxt_cp_ring_info *cpr2 = 3103 bnxt_alloc_cp_sub_ring(bp); 3104 3105 cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2; 3106 if (!cpr2) 3107 return -ENOMEM; 3108 cpr2->bnapi = bnapi; 3109 } 3110 if ((sh && i < bp->tx_nr_rings) || 3111 (!sh && i >= bp->rx_nr_rings)) { 3112 struct bnxt_cp_ring_info *cpr2 = 3113 bnxt_alloc_cp_sub_ring(bp); 3114 3115 cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2; 3116 if (!cpr2) 3117 return -ENOMEM; 3118 cpr2->bnapi = bnapi; 3119 } 3120 } 3121 return 0; 3122 } 3123 3124 static void bnxt_init_ring_struct(struct bnxt *bp) 3125 { 3126 int i; 3127 3128 for (i = 0; i < bp->cp_nr_rings; i++) { 3129 struct bnxt_napi *bnapi = bp->bnapi[i]; 3130 struct bnxt_ring_mem_info *rmem; 3131 struct bnxt_cp_ring_info *cpr; 3132 struct bnxt_rx_ring_info *rxr; 3133 struct bnxt_tx_ring_info *txr; 3134 struct bnxt_ring_struct *ring; 3135 3136 if (!bnapi) 3137 continue; 3138 3139 cpr = &bnapi->cp_ring; 3140 ring = &cpr->cp_ring_struct; 3141 rmem = &ring->ring_mem; 3142 rmem->nr_pages = bp->cp_nr_pages; 3143 rmem->page_size = HW_CMPD_RING_SIZE; 3144 rmem->pg_arr = (void **)cpr->cp_desc_ring; 3145 rmem->dma_arr = cpr->cp_desc_mapping; 3146 rmem->vmem_size = 0; 3147 3148 rxr = bnapi->rx_ring; 3149 if (!rxr) 3150 goto skip_rx; 3151 3152 ring = &rxr->rx_ring_struct; 3153 rmem = &ring->ring_mem; 3154 rmem->nr_pages = bp->rx_nr_pages; 3155 rmem->page_size = HW_RXBD_RING_SIZE; 3156 rmem->pg_arr = (void **)rxr->rx_desc_ring; 3157 rmem->dma_arr = rxr->rx_desc_mapping; 3158 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; 3159 rmem->vmem = (void **)&rxr->rx_buf_ring; 3160 3161 ring = &rxr->rx_agg_ring_struct; 3162 rmem = &ring->ring_mem; 3163 rmem->nr_pages = bp->rx_agg_nr_pages; 3164 rmem->page_size = HW_RXBD_RING_SIZE; 3165 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring; 3166 rmem->dma_arr = rxr->rx_agg_desc_mapping; 3167 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; 3168 rmem->vmem = (void **)&rxr->rx_agg_ring; 3169 3170 skip_rx: 3171 txr = bnapi->tx_ring; 3172 if (!txr) 3173 continue; 3174 3175 ring = &txr->tx_ring_struct; 3176 rmem = &ring->ring_mem; 3177 rmem->nr_pages = bp->tx_nr_pages; 3178 rmem->page_size = HW_RXBD_RING_SIZE; 3179 rmem->pg_arr = (void **)txr->tx_desc_ring; 3180 rmem->dma_arr = txr->tx_desc_mapping; 3181 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages; 3182 rmem->vmem = (void **)&txr->tx_buf_ring; 3183 } 3184 } 3185 3186 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type) 3187 { 3188 int i; 3189 u32 prod; 3190 struct rx_bd **rx_buf_ring; 3191 3192 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr; 3193 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) { 3194 int j; 3195 struct rx_bd *rxbd; 3196 3197 rxbd = rx_buf_ring[i]; 3198 if (!rxbd) 3199 continue; 3200 3201 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) { 3202 rxbd->rx_bd_len_flags_type = cpu_to_le32(type); 3203 rxbd->rx_bd_opaque = prod; 3204 } 3205 } 3206 } 3207 3208 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr) 3209 { 3210 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; 3211 struct net_device *dev = bp->dev; 3212 u32 prod; 3213 int i; 3214 3215 prod = rxr->rx_prod; 3216 for (i = 0; i < bp->rx_ring_size; i++) { 3217 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) { 3218 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n", 3219 ring_nr, i, bp->rx_ring_size); 3220 break; 3221 } 3222 prod = NEXT_RX(prod); 3223 } 3224 rxr->rx_prod = prod; 3225 3226 if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) 3227 return 0; 3228 3229 prod = rxr->rx_agg_prod; 3230 for (i = 0; i < bp->rx_agg_ring_size; i++) { 3231 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) { 3232 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n", 3233 ring_nr, i, bp->rx_ring_size); 3234 break; 3235 } 3236 prod = NEXT_RX_AGG(prod); 3237 } 3238 rxr->rx_agg_prod = prod; 3239 3240 if (rxr->rx_tpa) { 3241 dma_addr_t mapping; 3242 u8 *data; 3243 3244 for (i = 0; i < bp->max_tpa; i++) { 3245 data = __bnxt_alloc_rx_data(bp, &mapping, GFP_KERNEL); 3246 if (!data) 3247 return -ENOMEM; 3248 3249 rxr->rx_tpa[i].data = data; 3250 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset; 3251 rxr->rx_tpa[i].mapping = mapping; 3252 } 3253 } 3254 return 0; 3255 } 3256 3257 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) 3258 { 3259 struct bnxt_rx_ring_info *rxr; 3260 struct bnxt_ring_struct *ring; 3261 u32 type; 3262 3263 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) | 3264 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP; 3265 3266 if (NET_IP_ALIGN == 2) 3267 type |= RX_BD_FLAGS_SOP; 3268 3269 rxr = &bp->rx_ring[ring_nr]; 3270 ring = &rxr->rx_ring_struct; 3271 bnxt_init_rxbd_pages(ring, type); 3272 3273 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) { 3274 bpf_prog_add(bp->xdp_prog, 1); 3275 rxr->xdp_prog = bp->xdp_prog; 3276 } 3277 ring->fw_ring_id = INVALID_HW_RING_ID; 3278 3279 ring = &rxr->rx_agg_ring_struct; 3280 ring->fw_ring_id = INVALID_HW_RING_ID; 3281 3282 if ((bp->flags & BNXT_FLAG_AGG_RINGS)) { 3283 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) | 3284 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; 3285 3286 bnxt_init_rxbd_pages(ring, type); 3287 } 3288 3289 return bnxt_alloc_one_rx_ring(bp, ring_nr); 3290 } 3291 3292 static void bnxt_init_cp_rings(struct bnxt *bp) 3293 { 3294 int i, j; 3295 3296 for (i = 0; i < bp->cp_nr_rings; i++) { 3297 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; 3298 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 3299 3300 ring->fw_ring_id = INVALID_HW_RING_ID; 3301 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; 3302 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; 3303 for (j = 0; j < 2; j++) { 3304 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; 3305 3306 if (!cpr2) 3307 continue; 3308 3309 ring = &cpr2->cp_ring_struct; 3310 ring->fw_ring_id = INVALID_HW_RING_ID; 3311 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; 3312 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; 3313 } 3314 } 3315 } 3316 3317 static int bnxt_init_rx_rings(struct bnxt *bp) 3318 { 3319 int i, rc = 0; 3320 3321 if (BNXT_RX_PAGE_MODE(bp)) { 3322 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM; 3323 bp->rx_dma_offset = XDP_PACKET_HEADROOM; 3324 } else { 3325 bp->rx_offset = BNXT_RX_OFFSET; 3326 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET; 3327 } 3328 3329 for (i = 0; i < bp->rx_nr_rings; i++) { 3330 rc = bnxt_init_one_rx_ring(bp, i); 3331 if (rc) 3332 break; 3333 } 3334 3335 return rc; 3336 } 3337 3338 static int bnxt_init_tx_rings(struct bnxt *bp) 3339 { 3340 u16 i; 3341 3342 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2, 3343 MAX_SKB_FRAGS + 1); 3344 3345 for (i = 0; i < bp->tx_nr_rings; i++) { 3346 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 3347 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 3348 3349 ring->fw_ring_id = INVALID_HW_RING_ID; 3350 } 3351 3352 return 0; 3353 } 3354 3355 static void bnxt_free_ring_grps(struct bnxt *bp) 3356 { 3357 kfree(bp->grp_info); 3358 bp->grp_info = NULL; 3359 } 3360 3361 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init) 3362 { 3363 int i; 3364 3365 if (irq_re_init) { 3366 bp->grp_info = kcalloc(bp->cp_nr_rings, 3367 sizeof(struct bnxt_ring_grp_info), 3368 GFP_KERNEL); 3369 if (!bp->grp_info) 3370 return -ENOMEM; 3371 } 3372 for (i = 0; i < bp->cp_nr_rings; i++) { 3373 if (irq_re_init) 3374 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID; 3375 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; 3376 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID; 3377 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID; 3378 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; 3379 } 3380 return 0; 3381 } 3382 3383 static void bnxt_free_vnics(struct bnxt *bp) 3384 { 3385 kfree(bp->vnic_info); 3386 bp->vnic_info = NULL; 3387 bp->nr_vnics = 0; 3388 } 3389 3390 static int bnxt_alloc_vnics(struct bnxt *bp) 3391 { 3392 int num_vnics = 1; 3393 3394 #ifdef CONFIG_RFS_ACCEL 3395 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS) 3396 num_vnics += bp->rx_nr_rings; 3397 #endif 3398 3399 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 3400 num_vnics++; 3401 3402 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info), 3403 GFP_KERNEL); 3404 if (!bp->vnic_info) 3405 return -ENOMEM; 3406 3407 bp->nr_vnics = num_vnics; 3408 return 0; 3409 } 3410 3411 static void bnxt_init_vnics(struct bnxt *bp) 3412 { 3413 int i; 3414 3415 for (i = 0; i < bp->nr_vnics; i++) { 3416 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 3417 int j; 3418 3419 vnic->fw_vnic_id = INVALID_HW_RING_ID; 3420 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) 3421 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID; 3422 3423 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID; 3424 3425 if (bp->vnic_info[i].rss_hash_key) { 3426 if (i == 0) 3427 prandom_bytes(vnic->rss_hash_key, 3428 HW_HASH_KEY_SIZE); 3429 else 3430 memcpy(vnic->rss_hash_key, 3431 bp->vnic_info[0].rss_hash_key, 3432 HW_HASH_KEY_SIZE); 3433 } 3434 } 3435 } 3436 3437 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg) 3438 { 3439 int pages; 3440 3441 pages = ring_size / desc_per_pg; 3442 3443 if (!pages) 3444 return 1; 3445 3446 pages++; 3447 3448 while (pages & (pages - 1)) 3449 pages++; 3450 3451 return pages; 3452 } 3453 3454 void bnxt_set_tpa_flags(struct bnxt *bp) 3455 { 3456 bp->flags &= ~BNXT_FLAG_TPA; 3457 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 3458 return; 3459 if (bp->dev->features & NETIF_F_LRO) 3460 bp->flags |= BNXT_FLAG_LRO; 3461 else if (bp->dev->features & NETIF_F_GRO_HW) 3462 bp->flags |= BNXT_FLAG_GRO; 3463 } 3464 3465 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must 3466 * be set on entry. 3467 */ 3468 void bnxt_set_ring_params(struct bnxt *bp) 3469 { 3470 u32 ring_size, rx_size, rx_space, max_rx_cmpl; 3471 u32 agg_factor = 0, agg_ring_size = 0; 3472 3473 /* 8 for CRC and VLAN */ 3474 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8); 3475 3476 rx_space = rx_size + NET_SKB_PAD + 3477 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 3478 3479 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH; 3480 ring_size = bp->rx_ring_size; 3481 bp->rx_agg_ring_size = 0; 3482 bp->rx_agg_nr_pages = 0; 3483 3484 if (bp->flags & BNXT_FLAG_TPA) 3485 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE); 3486 3487 bp->flags &= ~BNXT_FLAG_JUMBO; 3488 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) { 3489 u32 jumbo_factor; 3490 3491 bp->flags |= BNXT_FLAG_JUMBO; 3492 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT; 3493 if (jumbo_factor > agg_factor) 3494 agg_factor = jumbo_factor; 3495 } 3496 agg_ring_size = ring_size * agg_factor; 3497 3498 if (agg_ring_size) { 3499 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size, 3500 RX_DESC_CNT); 3501 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) { 3502 u32 tmp = agg_ring_size; 3503 3504 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES; 3505 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1; 3506 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n", 3507 tmp, agg_ring_size); 3508 } 3509 bp->rx_agg_ring_size = agg_ring_size; 3510 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1; 3511 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN); 3512 rx_space = rx_size + NET_SKB_PAD + 3513 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 3514 } 3515 3516 bp->rx_buf_use_size = rx_size; 3517 bp->rx_buf_size = rx_space; 3518 3519 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT); 3520 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1; 3521 3522 ring_size = bp->tx_ring_size; 3523 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT); 3524 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1; 3525 3526 max_rx_cmpl = bp->rx_ring_size; 3527 /* MAX TPA needs to be added because TPA_START completions are 3528 * immediately recycled, so the TPA completions are not bound by 3529 * the RX ring size. 3530 */ 3531 if (bp->flags & BNXT_FLAG_TPA) 3532 max_rx_cmpl += bp->max_tpa; 3533 /* RX and TPA completions are 32-byte, all others are 16-byte */ 3534 ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size; 3535 bp->cp_ring_size = ring_size; 3536 3537 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT); 3538 if (bp->cp_nr_pages > MAX_CP_PAGES) { 3539 bp->cp_nr_pages = MAX_CP_PAGES; 3540 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1; 3541 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n", 3542 ring_size, bp->cp_ring_size); 3543 } 3544 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT; 3545 bp->cp_ring_mask = bp->cp_bit - 1; 3546 } 3547 3548 /* Changing allocation mode of RX rings. 3549 * TODO: Update when extending xdp_rxq_info to support allocation modes. 3550 */ 3551 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) 3552 { 3553 if (page_mode) { 3554 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) 3555 return -EOPNOTSUPP; 3556 bp->dev->max_mtu = 3557 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU); 3558 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 3559 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE; 3560 bp->rx_dir = DMA_BIDIRECTIONAL; 3561 bp->rx_skb_func = bnxt_rx_page_skb; 3562 /* Disable LRO or GRO_HW */ 3563 netdev_update_features(bp->dev); 3564 } else { 3565 bp->dev->max_mtu = bp->max_mtu; 3566 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE; 3567 bp->rx_dir = DMA_FROM_DEVICE; 3568 bp->rx_skb_func = bnxt_rx_skb; 3569 } 3570 return 0; 3571 } 3572 3573 static void bnxt_free_vnic_attributes(struct bnxt *bp) 3574 { 3575 int i; 3576 struct bnxt_vnic_info *vnic; 3577 struct pci_dev *pdev = bp->pdev; 3578 3579 if (!bp->vnic_info) 3580 return; 3581 3582 for (i = 0; i < bp->nr_vnics; i++) { 3583 vnic = &bp->vnic_info[i]; 3584 3585 kfree(vnic->fw_grp_ids); 3586 vnic->fw_grp_ids = NULL; 3587 3588 kfree(vnic->uc_list); 3589 vnic->uc_list = NULL; 3590 3591 if (vnic->mc_list) { 3592 dma_free_coherent(&pdev->dev, vnic->mc_list_size, 3593 vnic->mc_list, vnic->mc_list_mapping); 3594 vnic->mc_list = NULL; 3595 } 3596 3597 if (vnic->rss_table) { 3598 dma_free_coherent(&pdev->dev, vnic->rss_table_size, 3599 vnic->rss_table, 3600 vnic->rss_table_dma_addr); 3601 vnic->rss_table = NULL; 3602 } 3603 3604 vnic->rss_hash_key = NULL; 3605 vnic->flags = 0; 3606 } 3607 } 3608 3609 static int bnxt_alloc_vnic_attributes(struct bnxt *bp) 3610 { 3611 int i, rc = 0, size; 3612 struct bnxt_vnic_info *vnic; 3613 struct pci_dev *pdev = bp->pdev; 3614 int max_rings; 3615 3616 for (i = 0; i < bp->nr_vnics; i++) { 3617 vnic = &bp->vnic_info[i]; 3618 3619 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) { 3620 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN; 3621 3622 if (mem_size > 0) { 3623 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL); 3624 if (!vnic->uc_list) { 3625 rc = -ENOMEM; 3626 goto out; 3627 } 3628 } 3629 } 3630 3631 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) { 3632 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN; 3633 vnic->mc_list = 3634 dma_alloc_coherent(&pdev->dev, 3635 vnic->mc_list_size, 3636 &vnic->mc_list_mapping, 3637 GFP_KERNEL); 3638 if (!vnic->mc_list) { 3639 rc = -ENOMEM; 3640 goto out; 3641 } 3642 } 3643 3644 if (bp->flags & BNXT_FLAG_CHIP_P5) 3645 goto vnic_skip_grps; 3646 3647 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 3648 max_rings = bp->rx_nr_rings; 3649 else 3650 max_rings = 1; 3651 3652 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL); 3653 if (!vnic->fw_grp_ids) { 3654 rc = -ENOMEM; 3655 goto out; 3656 } 3657 vnic_skip_grps: 3658 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) && 3659 !(vnic->flags & BNXT_VNIC_RSS_FLAG)) 3660 continue; 3661 3662 /* Allocate rss table and hash key */ 3663 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16)); 3664 if (bp->flags & BNXT_FLAG_CHIP_P5) 3665 size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5); 3666 3667 vnic->rss_table_size = size + HW_HASH_KEY_SIZE; 3668 vnic->rss_table = dma_alloc_coherent(&pdev->dev, 3669 vnic->rss_table_size, 3670 &vnic->rss_table_dma_addr, 3671 GFP_KERNEL); 3672 if (!vnic->rss_table) { 3673 rc = -ENOMEM; 3674 goto out; 3675 } 3676 3677 vnic->rss_hash_key = ((void *)vnic->rss_table) + size; 3678 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size; 3679 } 3680 return 0; 3681 3682 out: 3683 return rc; 3684 } 3685 3686 static void bnxt_free_hwrm_resources(struct bnxt *bp) 3687 { 3688 struct pci_dev *pdev = bp->pdev; 3689 3690 if (bp->hwrm_cmd_resp_addr) { 3691 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr, 3692 bp->hwrm_cmd_resp_dma_addr); 3693 bp->hwrm_cmd_resp_addr = NULL; 3694 } 3695 3696 if (bp->hwrm_cmd_kong_resp_addr) { 3697 dma_free_coherent(&pdev->dev, PAGE_SIZE, 3698 bp->hwrm_cmd_kong_resp_addr, 3699 bp->hwrm_cmd_kong_resp_dma_addr); 3700 bp->hwrm_cmd_kong_resp_addr = NULL; 3701 } 3702 } 3703 3704 static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp) 3705 { 3706 struct pci_dev *pdev = bp->pdev; 3707 3708 if (bp->hwrm_cmd_kong_resp_addr) 3709 return 0; 3710 3711 bp->hwrm_cmd_kong_resp_addr = 3712 dma_alloc_coherent(&pdev->dev, PAGE_SIZE, 3713 &bp->hwrm_cmd_kong_resp_dma_addr, 3714 GFP_KERNEL); 3715 if (!bp->hwrm_cmd_kong_resp_addr) 3716 return -ENOMEM; 3717 3718 return 0; 3719 } 3720 3721 static int bnxt_alloc_hwrm_resources(struct bnxt *bp) 3722 { 3723 struct pci_dev *pdev = bp->pdev; 3724 3725 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, 3726 &bp->hwrm_cmd_resp_dma_addr, 3727 GFP_KERNEL); 3728 if (!bp->hwrm_cmd_resp_addr) 3729 return -ENOMEM; 3730 3731 return 0; 3732 } 3733 3734 static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp) 3735 { 3736 if (bp->hwrm_short_cmd_req_addr) { 3737 struct pci_dev *pdev = bp->pdev; 3738 3739 dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len, 3740 bp->hwrm_short_cmd_req_addr, 3741 bp->hwrm_short_cmd_req_dma_addr); 3742 bp->hwrm_short_cmd_req_addr = NULL; 3743 } 3744 } 3745 3746 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp) 3747 { 3748 struct pci_dev *pdev = bp->pdev; 3749 3750 if (bp->hwrm_short_cmd_req_addr) 3751 return 0; 3752 3753 bp->hwrm_short_cmd_req_addr = 3754 dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len, 3755 &bp->hwrm_short_cmd_req_dma_addr, 3756 GFP_KERNEL); 3757 if (!bp->hwrm_short_cmd_req_addr) 3758 return -ENOMEM; 3759 3760 return 0; 3761 } 3762 3763 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats) 3764 { 3765 kfree(stats->hw_masks); 3766 stats->hw_masks = NULL; 3767 kfree(stats->sw_stats); 3768 stats->sw_stats = NULL; 3769 if (stats->hw_stats) { 3770 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats, 3771 stats->hw_stats_map); 3772 stats->hw_stats = NULL; 3773 } 3774 } 3775 3776 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats, 3777 bool alloc_masks) 3778 { 3779 stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len, 3780 &stats->hw_stats_map, GFP_KERNEL); 3781 if (!stats->hw_stats) 3782 return -ENOMEM; 3783 3784 stats->sw_stats = kzalloc(stats->len, GFP_KERNEL); 3785 if (!stats->sw_stats) 3786 goto stats_mem_err; 3787 3788 if (alloc_masks) { 3789 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL); 3790 if (!stats->hw_masks) 3791 goto stats_mem_err; 3792 } 3793 return 0; 3794 3795 stats_mem_err: 3796 bnxt_free_stats_mem(bp, stats); 3797 return -ENOMEM; 3798 } 3799 3800 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count) 3801 { 3802 int i; 3803 3804 for (i = 0; i < count; i++) 3805 mask_arr[i] = mask; 3806 } 3807 3808 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count) 3809 { 3810 int i; 3811 3812 for (i = 0; i < count; i++) 3813 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]); 3814 } 3815 3816 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp, 3817 struct bnxt_stats_mem *stats) 3818 { 3819 struct hwrm_func_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr; 3820 struct hwrm_func_qstats_ext_input req = {0}; 3821 __le64 *hw_masks; 3822 int rc; 3823 3824 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) || 3825 !(bp->flags & BNXT_FLAG_CHIP_P5)) 3826 return -EOPNOTSUPP; 3827 3828 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QSTATS_EXT, -1, -1); 3829 req.fid = cpu_to_le16(0xffff); 3830 req.flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; 3831 mutex_lock(&bp->hwrm_cmd_lock); 3832 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3833 if (rc) 3834 goto qstat_exit; 3835 3836 hw_masks = &resp->rx_ucast_pkts; 3837 bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8); 3838 3839 qstat_exit: 3840 mutex_unlock(&bp->hwrm_cmd_lock); 3841 return rc; 3842 } 3843 3844 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags); 3845 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags); 3846 3847 static void bnxt_init_stats(struct bnxt *bp) 3848 { 3849 struct bnxt_napi *bnapi = bp->bnapi[0]; 3850 struct bnxt_cp_ring_info *cpr; 3851 struct bnxt_stats_mem *stats; 3852 __le64 *rx_stats, *tx_stats; 3853 int rc, rx_count, tx_count; 3854 u64 *rx_masks, *tx_masks; 3855 u64 mask; 3856 u8 flags; 3857 3858 cpr = &bnapi->cp_ring; 3859 stats = &cpr->stats; 3860 rc = bnxt_hwrm_func_qstat_ext(bp, stats); 3861 if (rc) { 3862 if (bp->flags & BNXT_FLAG_CHIP_P5) 3863 mask = (1ULL << 48) - 1; 3864 else 3865 mask = -1ULL; 3866 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8); 3867 } 3868 if (bp->flags & BNXT_FLAG_PORT_STATS) { 3869 stats = &bp->port_stats; 3870 rx_stats = stats->hw_stats; 3871 rx_masks = stats->hw_masks; 3872 rx_count = sizeof(struct rx_port_stats) / 8; 3873 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 3874 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 3875 tx_count = sizeof(struct tx_port_stats) / 8; 3876 3877 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK; 3878 rc = bnxt_hwrm_port_qstats(bp, flags); 3879 if (rc) { 3880 mask = (1ULL << 40) - 1; 3881 3882 bnxt_fill_masks(rx_masks, mask, rx_count); 3883 bnxt_fill_masks(tx_masks, mask, tx_count); 3884 } else { 3885 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count); 3886 bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count); 3887 bnxt_hwrm_port_qstats(bp, 0); 3888 } 3889 } 3890 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 3891 stats = &bp->rx_port_stats_ext; 3892 rx_stats = stats->hw_stats; 3893 rx_masks = stats->hw_masks; 3894 rx_count = sizeof(struct rx_port_stats_ext) / 8; 3895 stats = &bp->tx_port_stats_ext; 3896 tx_stats = stats->hw_stats; 3897 tx_masks = stats->hw_masks; 3898 tx_count = sizeof(struct tx_port_stats_ext) / 8; 3899 3900 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; 3901 rc = bnxt_hwrm_port_qstats_ext(bp, flags); 3902 if (rc) { 3903 mask = (1ULL << 40) - 1; 3904 3905 bnxt_fill_masks(rx_masks, mask, rx_count); 3906 if (tx_stats) 3907 bnxt_fill_masks(tx_masks, mask, tx_count); 3908 } else { 3909 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count); 3910 if (tx_stats) 3911 bnxt_copy_hw_masks(tx_masks, tx_stats, 3912 tx_count); 3913 bnxt_hwrm_port_qstats_ext(bp, 0); 3914 } 3915 } 3916 } 3917 3918 static void bnxt_free_port_stats(struct bnxt *bp) 3919 { 3920 bp->flags &= ~BNXT_FLAG_PORT_STATS; 3921 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT; 3922 3923 bnxt_free_stats_mem(bp, &bp->port_stats); 3924 bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext); 3925 bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext); 3926 } 3927 3928 static void bnxt_free_ring_stats(struct bnxt *bp) 3929 { 3930 int i; 3931 3932 if (!bp->bnapi) 3933 return; 3934 3935 for (i = 0; i < bp->cp_nr_rings; i++) { 3936 struct bnxt_napi *bnapi = bp->bnapi[i]; 3937 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3938 3939 bnxt_free_stats_mem(bp, &cpr->stats); 3940 } 3941 } 3942 3943 static int bnxt_alloc_stats(struct bnxt *bp) 3944 { 3945 u32 size, i; 3946 int rc; 3947 3948 size = bp->hw_ring_stats_size; 3949 3950 for (i = 0; i < bp->cp_nr_rings; i++) { 3951 struct bnxt_napi *bnapi = bp->bnapi[i]; 3952 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3953 3954 cpr->stats.len = size; 3955 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i); 3956 if (rc) 3957 return rc; 3958 3959 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; 3960 } 3961 3962 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700) 3963 return 0; 3964 3965 if (bp->port_stats.hw_stats) 3966 goto alloc_ext_stats; 3967 3968 bp->port_stats.len = BNXT_PORT_STATS_SIZE; 3969 rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true); 3970 if (rc) 3971 return rc; 3972 3973 bp->flags |= BNXT_FLAG_PORT_STATS; 3974 3975 alloc_ext_stats: 3976 /* Display extended statistics only if FW supports it */ 3977 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900) 3978 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) 3979 return 0; 3980 3981 if (bp->rx_port_stats_ext.hw_stats) 3982 goto alloc_tx_ext_stats; 3983 3984 bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext); 3985 rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true); 3986 /* Extended stats are optional */ 3987 if (rc) 3988 return 0; 3989 3990 alloc_tx_ext_stats: 3991 if (bp->tx_port_stats_ext.hw_stats) 3992 return 0; 3993 3994 if (bp->hwrm_spec_code >= 0x10902 || 3995 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) { 3996 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext); 3997 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true); 3998 /* Extended stats are optional */ 3999 if (rc) 4000 return 0; 4001 } 4002 bp->flags |= BNXT_FLAG_PORT_STATS_EXT; 4003 return 0; 4004 } 4005 4006 static void bnxt_clear_ring_indices(struct bnxt *bp) 4007 { 4008 int i; 4009 4010 if (!bp->bnapi) 4011 return; 4012 4013 for (i = 0; i < bp->cp_nr_rings; i++) { 4014 struct bnxt_napi *bnapi = bp->bnapi[i]; 4015 struct bnxt_cp_ring_info *cpr; 4016 struct bnxt_rx_ring_info *rxr; 4017 struct bnxt_tx_ring_info *txr; 4018 4019 if (!bnapi) 4020 continue; 4021 4022 cpr = &bnapi->cp_ring; 4023 cpr->cp_raw_cons = 0; 4024 4025 txr = bnapi->tx_ring; 4026 if (txr) { 4027 txr->tx_prod = 0; 4028 txr->tx_cons = 0; 4029 } 4030 4031 rxr = bnapi->rx_ring; 4032 if (rxr) { 4033 rxr->rx_prod = 0; 4034 rxr->rx_agg_prod = 0; 4035 rxr->rx_sw_agg_prod = 0; 4036 rxr->rx_next_cons = 0; 4037 } 4038 } 4039 } 4040 4041 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit) 4042 { 4043 #ifdef CONFIG_RFS_ACCEL 4044 int i; 4045 4046 /* Under rtnl_lock and all our NAPIs have been disabled. It's 4047 * safe to delete the hash table. 4048 */ 4049 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 4050 struct hlist_head *head; 4051 struct hlist_node *tmp; 4052 struct bnxt_ntuple_filter *fltr; 4053 4054 head = &bp->ntp_fltr_hash_tbl[i]; 4055 hlist_for_each_entry_safe(fltr, tmp, head, hash) { 4056 hlist_del(&fltr->hash); 4057 kfree(fltr); 4058 } 4059 } 4060 if (irq_reinit) { 4061 kfree(bp->ntp_fltr_bmap); 4062 bp->ntp_fltr_bmap = NULL; 4063 } 4064 bp->ntp_fltr_count = 0; 4065 #endif 4066 } 4067 4068 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp) 4069 { 4070 #ifdef CONFIG_RFS_ACCEL 4071 int i, rc = 0; 4072 4073 if (!(bp->flags & BNXT_FLAG_RFS)) 4074 return 0; 4075 4076 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) 4077 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]); 4078 4079 bp->ntp_fltr_count = 0; 4080 bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR), 4081 sizeof(long), 4082 GFP_KERNEL); 4083 4084 if (!bp->ntp_fltr_bmap) 4085 rc = -ENOMEM; 4086 4087 return rc; 4088 #else 4089 return 0; 4090 #endif 4091 } 4092 4093 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) 4094 { 4095 bnxt_free_vnic_attributes(bp); 4096 bnxt_free_tx_rings(bp); 4097 bnxt_free_rx_rings(bp); 4098 bnxt_free_cp_rings(bp); 4099 bnxt_free_ntp_fltrs(bp, irq_re_init); 4100 if (irq_re_init) { 4101 bnxt_free_ring_stats(bp); 4102 if (!(bp->fw_cap & BNXT_FW_CAP_PORT_STATS_NO_RESET) || 4103 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 4104 bnxt_free_port_stats(bp); 4105 bnxt_free_ring_grps(bp); 4106 bnxt_free_vnics(bp); 4107 kfree(bp->tx_ring_map); 4108 bp->tx_ring_map = NULL; 4109 kfree(bp->tx_ring); 4110 bp->tx_ring = NULL; 4111 kfree(bp->rx_ring); 4112 bp->rx_ring = NULL; 4113 kfree(bp->bnapi); 4114 bp->bnapi = NULL; 4115 } else { 4116 bnxt_clear_ring_indices(bp); 4117 } 4118 } 4119 4120 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) 4121 { 4122 int i, j, rc, size, arr_size; 4123 void *bnapi; 4124 4125 if (irq_re_init) { 4126 /* Allocate bnapi mem pointer array and mem block for 4127 * all queues 4128 */ 4129 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) * 4130 bp->cp_nr_rings); 4131 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi)); 4132 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL); 4133 if (!bnapi) 4134 return -ENOMEM; 4135 4136 bp->bnapi = bnapi; 4137 bnapi += arr_size; 4138 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) { 4139 bp->bnapi[i] = bnapi; 4140 bp->bnapi[i]->index = i; 4141 bp->bnapi[i]->bp = bp; 4142 if (bp->flags & BNXT_FLAG_CHIP_P5) { 4143 struct bnxt_cp_ring_info *cpr = 4144 &bp->bnapi[i]->cp_ring; 4145 4146 cpr->cp_ring_struct.ring_mem.flags = 4147 BNXT_RMEM_RING_PTE_FLAG; 4148 } 4149 } 4150 4151 bp->rx_ring = kcalloc(bp->rx_nr_rings, 4152 sizeof(struct bnxt_rx_ring_info), 4153 GFP_KERNEL); 4154 if (!bp->rx_ring) 4155 return -ENOMEM; 4156 4157 for (i = 0; i < bp->rx_nr_rings; i++) { 4158 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 4159 4160 if (bp->flags & BNXT_FLAG_CHIP_P5) { 4161 rxr->rx_ring_struct.ring_mem.flags = 4162 BNXT_RMEM_RING_PTE_FLAG; 4163 rxr->rx_agg_ring_struct.ring_mem.flags = 4164 BNXT_RMEM_RING_PTE_FLAG; 4165 } 4166 rxr->bnapi = bp->bnapi[i]; 4167 bp->bnapi[i]->rx_ring = &bp->rx_ring[i]; 4168 } 4169 4170 bp->tx_ring = kcalloc(bp->tx_nr_rings, 4171 sizeof(struct bnxt_tx_ring_info), 4172 GFP_KERNEL); 4173 if (!bp->tx_ring) 4174 return -ENOMEM; 4175 4176 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16), 4177 GFP_KERNEL); 4178 4179 if (!bp->tx_ring_map) 4180 return -ENOMEM; 4181 4182 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 4183 j = 0; 4184 else 4185 j = bp->rx_nr_rings; 4186 4187 for (i = 0; i < bp->tx_nr_rings; i++, j++) { 4188 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 4189 4190 if (bp->flags & BNXT_FLAG_CHIP_P5) 4191 txr->tx_ring_struct.ring_mem.flags = 4192 BNXT_RMEM_RING_PTE_FLAG; 4193 txr->bnapi = bp->bnapi[j]; 4194 bp->bnapi[j]->tx_ring = txr; 4195 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i; 4196 if (i >= bp->tx_nr_rings_xdp) { 4197 txr->txq_index = i - bp->tx_nr_rings_xdp; 4198 bp->bnapi[j]->tx_int = bnxt_tx_int; 4199 } else { 4200 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP; 4201 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp; 4202 } 4203 } 4204 4205 rc = bnxt_alloc_stats(bp); 4206 if (rc) 4207 goto alloc_mem_err; 4208 bnxt_init_stats(bp); 4209 4210 rc = bnxt_alloc_ntp_fltrs(bp); 4211 if (rc) 4212 goto alloc_mem_err; 4213 4214 rc = bnxt_alloc_vnics(bp); 4215 if (rc) 4216 goto alloc_mem_err; 4217 } 4218 4219 bnxt_init_ring_struct(bp); 4220 4221 rc = bnxt_alloc_rx_rings(bp); 4222 if (rc) 4223 goto alloc_mem_err; 4224 4225 rc = bnxt_alloc_tx_rings(bp); 4226 if (rc) 4227 goto alloc_mem_err; 4228 4229 rc = bnxt_alloc_cp_rings(bp); 4230 if (rc) 4231 goto alloc_mem_err; 4232 4233 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG | 4234 BNXT_VNIC_UCAST_FLAG; 4235 rc = bnxt_alloc_vnic_attributes(bp); 4236 if (rc) 4237 goto alloc_mem_err; 4238 return 0; 4239 4240 alloc_mem_err: 4241 bnxt_free_mem(bp, true); 4242 return rc; 4243 } 4244 4245 static void bnxt_disable_int(struct bnxt *bp) 4246 { 4247 int i; 4248 4249 if (!bp->bnapi) 4250 return; 4251 4252 for (i = 0; i < bp->cp_nr_rings; i++) { 4253 struct bnxt_napi *bnapi = bp->bnapi[i]; 4254 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4255 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 4256 4257 if (ring->fw_ring_id != INVALID_HW_RING_ID) 4258 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); 4259 } 4260 } 4261 4262 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n) 4263 { 4264 struct bnxt_napi *bnapi = bp->bnapi[n]; 4265 struct bnxt_cp_ring_info *cpr; 4266 4267 cpr = &bnapi->cp_ring; 4268 return cpr->cp_ring_struct.map_idx; 4269 } 4270 4271 static void bnxt_disable_int_sync(struct bnxt *bp) 4272 { 4273 int i; 4274 4275 atomic_inc(&bp->intr_sem); 4276 4277 bnxt_disable_int(bp); 4278 for (i = 0; i < bp->cp_nr_rings; i++) { 4279 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 4280 4281 synchronize_irq(bp->irq_tbl[map_idx].vector); 4282 } 4283 } 4284 4285 static void bnxt_enable_int(struct bnxt *bp) 4286 { 4287 int i; 4288 4289 atomic_set(&bp->intr_sem, 0); 4290 for (i = 0; i < bp->cp_nr_rings; i++) { 4291 struct bnxt_napi *bnapi = bp->bnapi[i]; 4292 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4293 4294 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons); 4295 } 4296 } 4297 4298 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type, 4299 u16 cmpl_ring, u16 target_id) 4300 { 4301 struct input *req = request; 4302 4303 req->req_type = cpu_to_le16(req_type); 4304 req->cmpl_ring = cpu_to_le16(cmpl_ring); 4305 req->target_id = cpu_to_le16(target_id); 4306 if (bnxt_kong_hwrm_message(bp, req)) 4307 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr); 4308 else 4309 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr); 4310 } 4311 4312 static int bnxt_hwrm_to_stderr(u32 hwrm_err) 4313 { 4314 switch (hwrm_err) { 4315 case HWRM_ERR_CODE_SUCCESS: 4316 return 0; 4317 case HWRM_ERR_CODE_RESOURCE_LOCKED: 4318 return -EROFS; 4319 case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED: 4320 return -EACCES; 4321 case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR: 4322 return -ENOSPC; 4323 case HWRM_ERR_CODE_INVALID_PARAMS: 4324 case HWRM_ERR_CODE_INVALID_FLAGS: 4325 case HWRM_ERR_CODE_INVALID_ENABLES: 4326 case HWRM_ERR_CODE_UNSUPPORTED_TLV: 4327 case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR: 4328 return -EINVAL; 4329 case HWRM_ERR_CODE_NO_BUFFER: 4330 return -ENOMEM; 4331 case HWRM_ERR_CODE_HOT_RESET_PROGRESS: 4332 case HWRM_ERR_CODE_BUSY: 4333 return -EAGAIN; 4334 case HWRM_ERR_CODE_CMD_NOT_SUPPORTED: 4335 return -EOPNOTSUPP; 4336 default: 4337 return -EIO; 4338 } 4339 } 4340 4341 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, 4342 int timeout, bool silent) 4343 { 4344 int i, intr_process, rc, tmo_count; 4345 struct input *req = msg; 4346 u32 *data = msg; 4347 u8 *valid; 4348 u16 cp_ring_id, len = 0; 4349 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; 4350 u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN; 4351 struct hwrm_short_input short_input = {0}; 4352 u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER; 4353 u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM; 4354 u16 dst = BNXT_HWRM_CHNL_CHIMP; 4355 4356 if (BNXT_NO_FW_ACCESS(bp) && 4357 le16_to_cpu(req->req_type) != HWRM_FUNC_RESET) 4358 return -EBUSY; 4359 4360 if (msg_len > BNXT_HWRM_MAX_REQ_LEN) { 4361 if (msg_len > bp->hwrm_max_ext_req_len || 4362 !bp->hwrm_short_cmd_req_addr) 4363 return -EINVAL; 4364 } 4365 4366 if (bnxt_hwrm_kong_chnl(bp, req)) { 4367 dst = BNXT_HWRM_CHNL_KONG; 4368 bar_offset = BNXT_GRCPF_REG_KONG_COMM; 4369 doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER; 4370 resp = bp->hwrm_cmd_kong_resp_addr; 4371 } 4372 4373 memset(resp, 0, PAGE_SIZE); 4374 cp_ring_id = le16_to_cpu(req->cmpl_ring); 4375 intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1; 4376 4377 req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst)); 4378 /* currently supports only one outstanding message */ 4379 if (intr_process) 4380 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id); 4381 4382 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || 4383 msg_len > BNXT_HWRM_MAX_REQ_LEN) { 4384 void *short_cmd_req = bp->hwrm_short_cmd_req_addr; 4385 u16 max_msg_len; 4386 4387 /* Set boundary for maximum extended request length for short 4388 * cmd format. If passed up from device use the max supported 4389 * internal req length. 4390 */ 4391 max_msg_len = bp->hwrm_max_ext_req_len; 4392 4393 memcpy(short_cmd_req, req, msg_len); 4394 if (msg_len < max_msg_len) 4395 memset(short_cmd_req + msg_len, 0, 4396 max_msg_len - msg_len); 4397 4398 short_input.req_type = req->req_type; 4399 short_input.signature = 4400 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD); 4401 short_input.size = cpu_to_le16(msg_len); 4402 short_input.req_addr = 4403 cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr); 4404 4405 data = (u32 *)&short_input; 4406 msg_len = sizeof(short_input); 4407 4408 /* Sync memory write before updating doorbell */ 4409 wmb(); 4410 4411 max_req_len = BNXT_HWRM_SHORT_REQ_LEN; 4412 } 4413 4414 /* Write request msg to hwrm channel */ 4415 __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4); 4416 4417 for (i = msg_len; i < max_req_len; i += 4) 4418 writel(0, bp->bar0 + bar_offset + i); 4419 4420 /* Ring channel doorbell */ 4421 writel(1, bp->bar0 + doorbell_offset); 4422 4423 if (!pci_is_enabled(bp->pdev)) 4424 return 0; 4425 4426 if (!timeout) 4427 timeout = DFLT_HWRM_CMD_TIMEOUT; 4428 /* convert timeout to usec */ 4429 timeout *= 1000; 4430 4431 i = 0; 4432 /* Short timeout for the first few iterations: 4433 * number of loops = number of loops for short timeout + 4434 * number of loops for standard timeout. 4435 */ 4436 tmo_count = HWRM_SHORT_TIMEOUT_COUNTER; 4437 timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER; 4438 tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT); 4439 4440 if (intr_process) { 4441 u16 seq_id = bp->hwrm_intr_seq_id; 4442 4443 /* Wait until hwrm response cmpl interrupt is processed */ 4444 while (bp->hwrm_intr_seq_id != (u16)~seq_id && 4445 i++ < tmo_count) { 4446 /* Abort the wait for completion if the FW health 4447 * check has failed. 4448 */ 4449 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 4450 return -EBUSY; 4451 /* on first few passes, just barely sleep */ 4452 if (i < HWRM_SHORT_TIMEOUT_COUNTER) 4453 usleep_range(HWRM_SHORT_MIN_TIMEOUT, 4454 HWRM_SHORT_MAX_TIMEOUT); 4455 else 4456 usleep_range(HWRM_MIN_TIMEOUT, 4457 HWRM_MAX_TIMEOUT); 4458 } 4459 4460 if (bp->hwrm_intr_seq_id != (u16)~seq_id) { 4461 if (!silent) 4462 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n", 4463 le16_to_cpu(req->req_type)); 4464 return -EBUSY; 4465 } 4466 len = le16_to_cpu(resp->resp_len); 4467 valid = ((u8 *)resp) + len - 1; 4468 } else { 4469 int j; 4470 4471 /* Check if response len is updated */ 4472 for (i = 0; i < tmo_count; i++) { 4473 /* Abort the wait for completion if the FW health 4474 * check has failed. 4475 */ 4476 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 4477 return -EBUSY; 4478 len = le16_to_cpu(resp->resp_len); 4479 if (len) 4480 break; 4481 /* on first few passes, just barely sleep */ 4482 if (i < HWRM_SHORT_TIMEOUT_COUNTER) 4483 usleep_range(HWRM_SHORT_MIN_TIMEOUT, 4484 HWRM_SHORT_MAX_TIMEOUT); 4485 else 4486 usleep_range(HWRM_MIN_TIMEOUT, 4487 HWRM_MAX_TIMEOUT); 4488 } 4489 4490 if (i >= tmo_count) { 4491 if (!silent) 4492 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n", 4493 HWRM_TOTAL_TIMEOUT(i), 4494 le16_to_cpu(req->req_type), 4495 le16_to_cpu(req->seq_id), len); 4496 return -EBUSY; 4497 } 4498 4499 /* Last byte of resp contains valid bit */ 4500 valid = ((u8 *)resp) + len - 1; 4501 for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) { 4502 /* make sure we read from updated DMA memory */ 4503 dma_rmb(); 4504 if (*valid) 4505 break; 4506 usleep_range(1, 5); 4507 } 4508 4509 if (j >= HWRM_VALID_BIT_DELAY_USEC) { 4510 if (!silent) 4511 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n", 4512 HWRM_TOTAL_TIMEOUT(i), 4513 le16_to_cpu(req->req_type), 4514 le16_to_cpu(req->seq_id), len, 4515 *valid); 4516 return -EBUSY; 4517 } 4518 } 4519 4520 /* Zero valid bit for compatibility. Valid bit in an older spec 4521 * may become a new field in a newer spec. We must make sure that 4522 * a new field not implemented by old spec will read zero. 4523 */ 4524 *valid = 0; 4525 rc = le16_to_cpu(resp->error_code); 4526 if (rc && !silent) 4527 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n", 4528 le16_to_cpu(resp->req_type), 4529 le16_to_cpu(resp->seq_id), rc); 4530 return bnxt_hwrm_to_stderr(rc); 4531 } 4532 4533 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) 4534 { 4535 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false); 4536 } 4537 4538 int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len, 4539 int timeout) 4540 { 4541 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true); 4542 } 4543 4544 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) 4545 { 4546 int rc; 4547 4548 mutex_lock(&bp->hwrm_cmd_lock); 4549 rc = _hwrm_send_message(bp, msg, msg_len, timeout); 4550 mutex_unlock(&bp->hwrm_cmd_lock); 4551 return rc; 4552 } 4553 4554 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len, 4555 int timeout) 4556 { 4557 int rc; 4558 4559 mutex_lock(&bp->hwrm_cmd_lock); 4560 rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true); 4561 mutex_unlock(&bp->hwrm_cmd_lock); 4562 return rc; 4563 } 4564 4565 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, 4566 bool async_only) 4567 { 4568 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr; 4569 struct hwrm_func_drv_rgtr_input req = {0}; 4570 DECLARE_BITMAP(async_events_bmap, 256); 4571 u32 *events = (u32 *)async_events_bmap; 4572 u32 flags; 4573 int rc, i; 4574 4575 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1); 4576 4577 req.enables = 4578 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | 4579 FUNC_DRV_RGTR_REQ_ENABLES_VER | 4580 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); 4581 4582 req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); 4583 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE; 4584 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) 4585 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT; 4586 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) 4587 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT | 4588 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT; 4589 req.flags = cpu_to_le32(flags); 4590 req.ver_maj_8b = DRV_VER_MAJ; 4591 req.ver_min_8b = DRV_VER_MIN; 4592 req.ver_upd_8b = DRV_VER_UPD; 4593 req.ver_maj = cpu_to_le16(DRV_VER_MAJ); 4594 req.ver_min = cpu_to_le16(DRV_VER_MIN); 4595 req.ver_upd = cpu_to_le16(DRV_VER_UPD); 4596 4597 if (BNXT_PF(bp)) { 4598 u32 data[8]; 4599 int i; 4600 4601 memset(data, 0, sizeof(data)); 4602 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) { 4603 u16 cmd = bnxt_vf_req_snif[i]; 4604 unsigned int bit, idx; 4605 4606 idx = cmd / 32; 4607 bit = cmd % 32; 4608 data[idx] |= 1 << bit; 4609 } 4610 4611 for (i = 0; i < 8; i++) 4612 req.vf_req_fwd[i] = cpu_to_le32(data[i]); 4613 4614 req.enables |= 4615 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); 4616 } 4617 4618 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) 4619 req.flags |= cpu_to_le32( 4620 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE); 4621 4622 memset(async_events_bmap, 0, sizeof(async_events_bmap)); 4623 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) { 4624 u16 event_id = bnxt_async_events_arr[i]; 4625 4626 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY && 4627 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 4628 continue; 4629 __set_bit(bnxt_async_events_arr[i], async_events_bmap); 4630 } 4631 if (bmap && bmap_size) { 4632 for (i = 0; i < bmap_size; i++) { 4633 if (test_bit(i, bmap)) 4634 __set_bit(i, async_events_bmap); 4635 } 4636 } 4637 for (i = 0; i < 8; i++) 4638 req.async_event_fwd[i] |= cpu_to_le32(events[i]); 4639 4640 if (async_only) 4641 req.enables = 4642 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); 4643 4644 mutex_lock(&bp->hwrm_cmd_lock); 4645 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4646 if (!rc) { 4647 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state); 4648 if (resp->flags & 4649 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED)) 4650 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE; 4651 } 4652 mutex_unlock(&bp->hwrm_cmd_lock); 4653 return rc; 4654 } 4655 4656 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp) 4657 { 4658 struct hwrm_func_drv_unrgtr_input req = {0}; 4659 4660 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state)) 4661 return 0; 4662 4663 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1); 4664 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4665 } 4666 4667 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type) 4668 { 4669 u32 rc = 0; 4670 struct hwrm_tunnel_dst_port_free_input req = {0}; 4671 4672 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1); 4673 req.tunnel_type = tunnel_type; 4674 4675 switch (tunnel_type) { 4676 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN: 4677 req.tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id); 4678 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID; 4679 break; 4680 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE: 4681 req.tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id); 4682 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID; 4683 break; 4684 default: 4685 break; 4686 } 4687 4688 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4689 if (rc) 4690 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n", 4691 rc); 4692 return rc; 4693 } 4694 4695 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port, 4696 u8 tunnel_type) 4697 { 4698 u32 rc = 0; 4699 struct hwrm_tunnel_dst_port_alloc_input req = {0}; 4700 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr; 4701 4702 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1); 4703 4704 req.tunnel_type = tunnel_type; 4705 req.tunnel_dst_port_val = port; 4706 4707 mutex_lock(&bp->hwrm_cmd_lock); 4708 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4709 if (rc) { 4710 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n", 4711 rc); 4712 goto err_out; 4713 } 4714 4715 switch (tunnel_type) { 4716 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN: 4717 bp->vxlan_fw_dst_port_id = 4718 le16_to_cpu(resp->tunnel_dst_port_id); 4719 break; 4720 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE: 4721 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id); 4722 break; 4723 default: 4724 break; 4725 } 4726 4727 err_out: 4728 mutex_unlock(&bp->hwrm_cmd_lock); 4729 return rc; 4730 } 4731 4732 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id) 4733 { 4734 struct hwrm_cfa_l2_set_rx_mask_input req = {0}; 4735 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 4736 4737 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1); 4738 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id); 4739 4740 req.num_mc_entries = cpu_to_le32(vnic->mc_list_count); 4741 req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); 4742 req.mask = cpu_to_le32(vnic->rx_mask); 4743 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4744 } 4745 4746 #ifdef CONFIG_RFS_ACCEL 4747 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, 4748 struct bnxt_ntuple_filter *fltr) 4749 { 4750 struct hwrm_cfa_ntuple_filter_free_input req = {0}; 4751 4752 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1); 4753 req.ntuple_filter_id = fltr->filter_id; 4754 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4755 } 4756 4757 #define BNXT_NTP_FLTR_FLAGS \ 4758 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \ 4759 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \ 4760 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \ 4761 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \ 4762 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \ 4763 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \ 4764 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \ 4765 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \ 4766 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \ 4767 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \ 4768 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \ 4769 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \ 4770 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \ 4771 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID) 4772 4773 #define BNXT_NTP_TUNNEL_FLTR_FLAG \ 4774 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 4775 4776 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, 4777 struct bnxt_ntuple_filter *fltr) 4778 { 4779 struct hwrm_cfa_ntuple_filter_alloc_input req = {0}; 4780 struct hwrm_cfa_ntuple_filter_alloc_output *resp; 4781 struct flow_keys *keys = &fltr->fkeys; 4782 struct bnxt_vnic_info *vnic; 4783 u32 flags = 0; 4784 int rc = 0; 4785 4786 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1); 4787 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx]; 4788 4789 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) { 4790 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX; 4791 req.dst_id = cpu_to_le16(fltr->rxq); 4792 } else { 4793 vnic = &bp->vnic_info[fltr->rxq + 1]; 4794 req.dst_id = cpu_to_le16(vnic->fw_vnic_id); 4795 } 4796 req.flags = cpu_to_le32(flags); 4797 req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS); 4798 4799 req.ethertype = htons(ETH_P_IP); 4800 memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN); 4801 req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; 4802 req.ip_protocol = keys->basic.ip_proto; 4803 4804 if (keys->basic.n_proto == htons(ETH_P_IPV6)) { 4805 int i; 4806 4807 req.ethertype = htons(ETH_P_IPV6); 4808 req.ip_addr_type = 4809 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6; 4810 *(struct in6_addr *)&req.src_ipaddr[0] = 4811 keys->addrs.v6addrs.src; 4812 *(struct in6_addr *)&req.dst_ipaddr[0] = 4813 keys->addrs.v6addrs.dst; 4814 for (i = 0; i < 4; i++) { 4815 req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff); 4816 req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff); 4817 } 4818 } else { 4819 req.src_ipaddr[0] = keys->addrs.v4addrs.src; 4820 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff); 4821 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst; 4822 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff); 4823 } 4824 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) { 4825 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG); 4826 req.tunnel_type = 4827 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL; 4828 } 4829 4830 req.src_port = keys->ports.src; 4831 req.src_port_mask = cpu_to_be16(0xffff); 4832 req.dst_port = keys->ports.dst; 4833 req.dst_port_mask = cpu_to_be16(0xffff); 4834 4835 mutex_lock(&bp->hwrm_cmd_lock); 4836 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4837 if (!rc) { 4838 resp = bnxt_get_hwrm_resp_addr(bp, &req); 4839 fltr->filter_id = resp->ntuple_filter_id; 4840 } 4841 mutex_unlock(&bp->hwrm_cmd_lock); 4842 return rc; 4843 } 4844 #endif 4845 4846 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx, 4847 u8 *mac_addr) 4848 { 4849 u32 rc = 0; 4850 struct hwrm_cfa_l2_filter_alloc_input req = {0}; 4851 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; 4852 4853 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1); 4854 req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX); 4855 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) 4856 req.flags |= 4857 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST); 4858 req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id); 4859 req.enables = 4860 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR | 4861 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID | 4862 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK); 4863 memcpy(req.l2_addr, mac_addr, ETH_ALEN); 4864 req.l2_addr_mask[0] = 0xff; 4865 req.l2_addr_mask[1] = 0xff; 4866 req.l2_addr_mask[2] = 0xff; 4867 req.l2_addr_mask[3] = 0xff; 4868 req.l2_addr_mask[4] = 0xff; 4869 req.l2_addr_mask[5] = 0xff; 4870 4871 mutex_lock(&bp->hwrm_cmd_lock); 4872 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4873 if (!rc) 4874 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] = 4875 resp->l2_filter_id; 4876 mutex_unlock(&bp->hwrm_cmd_lock); 4877 return rc; 4878 } 4879 4880 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp) 4881 { 4882 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */ 4883 int rc = 0; 4884 4885 /* Any associated ntuple filters will also be cleared by firmware. */ 4886 mutex_lock(&bp->hwrm_cmd_lock); 4887 for (i = 0; i < num_of_vnics; i++) { 4888 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 4889 4890 for (j = 0; j < vnic->uc_filter_count; j++) { 4891 struct hwrm_cfa_l2_filter_free_input req = {0}; 4892 4893 bnxt_hwrm_cmd_hdr_init(bp, &req, 4894 HWRM_CFA_L2_FILTER_FREE, -1, -1); 4895 4896 req.l2_filter_id = vnic->fw_l2_filter_id[j]; 4897 4898 rc = _hwrm_send_message(bp, &req, sizeof(req), 4899 HWRM_CMD_TIMEOUT); 4900 } 4901 vnic->uc_filter_count = 0; 4902 } 4903 mutex_unlock(&bp->hwrm_cmd_lock); 4904 4905 return rc; 4906 } 4907 4908 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) 4909 { 4910 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 4911 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX; 4912 struct hwrm_vnic_tpa_cfg_input req = {0}; 4913 4914 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) 4915 return 0; 4916 4917 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1); 4918 4919 if (tpa_flags) { 4920 u16 mss = bp->dev->mtu - 40; 4921 u32 nsegs, n, segs = 0, flags; 4922 4923 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA | 4924 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA | 4925 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE | 4926 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN | 4927 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ; 4928 if (tpa_flags & BNXT_FLAG_GRO) 4929 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO; 4930 4931 req.flags = cpu_to_le32(flags); 4932 4933 req.enables = 4934 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS | 4935 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS | 4936 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN); 4937 4938 /* Number of segs are log2 units, and first packet is not 4939 * included as part of this units. 4940 */ 4941 if (mss <= BNXT_RX_PAGE_SIZE) { 4942 n = BNXT_RX_PAGE_SIZE / mss; 4943 nsegs = (MAX_SKB_FRAGS - 1) * n; 4944 } else { 4945 n = mss / BNXT_RX_PAGE_SIZE; 4946 if (mss & (BNXT_RX_PAGE_SIZE - 1)) 4947 n++; 4948 nsegs = (MAX_SKB_FRAGS - n) / n; 4949 } 4950 4951 if (bp->flags & BNXT_FLAG_CHIP_P5) { 4952 segs = MAX_TPA_SEGS_P5; 4953 max_aggs = bp->max_tpa; 4954 } else { 4955 segs = ilog2(nsegs); 4956 } 4957 req.max_agg_segs = cpu_to_le16(segs); 4958 req.max_aggs = cpu_to_le16(max_aggs); 4959 4960 req.min_agg_len = cpu_to_le32(512); 4961 } 4962 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); 4963 4964 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4965 } 4966 4967 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring) 4968 { 4969 struct bnxt_ring_grp_info *grp_info; 4970 4971 grp_info = &bp->grp_info[ring->grp_idx]; 4972 return grp_info->cp_fw_ring_id; 4973 } 4974 4975 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 4976 { 4977 if (bp->flags & BNXT_FLAG_CHIP_P5) { 4978 struct bnxt_napi *bnapi = rxr->bnapi; 4979 struct bnxt_cp_ring_info *cpr; 4980 4981 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL]; 4982 return cpr->cp_ring_struct.fw_ring_id; 4983 } else { 4984 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct); 4985 } 4986 } 4987 4988 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr) 4989 { 4990 if (bp->flags & BNXT_FLAG_CHIP_P5) { 4991 struct bnxt_napi *bnapi = txr->bnapi; 4992 struct bnxt_cp_ring_info *cpr; 4993 4994 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL]; 4995 return cpr->cp_ring_struct.fw_ring_id; 4996 } else { 4997 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct); 4998 } 4999 } 5000 5001 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp) 5002 { 5003 int entries; 5004 5005 if (bp->flags & BNXT_FLAG_CHIP_P5) 5006 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5; 5007 else 5008 entries = HW_HASH_INDEX_SIZE; 5009 5010 bp->rss_indir_tbl_entries = entries; 5011 bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), 5012 GFP_KERNEL); 5013 if (!bp->rss_indir_tbl) 5014 return -ENOMEM; 5015 return 0; 5016 } 5017 5018 static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp) 5019 { 5020 u16 max_rings, max_entries, pad, i; 5021 5022 if (!bp->rx_nr_rings) 5023 return; 5024 5025 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 5026 max_rings = bp->rx_nr_rings - 1; 5027 else 5028 max_rings = bp->rx_nr_rings; 5029 5030 max_entries = bnxt_get_rxfh_indir_size(bp->dev); 5031 5032 for (i = 0; i < max_entries; i++) 5033 bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings); 5034 5035 pad = bp->rss_indir_tbl_entries - max_entries; 5036 if (pad) 5037 memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16)); 5038 } 5039 5040 static u16 bnxt_get_max_rss_ring(struct bnxt *bp) 5041 { 5042 u16 i, tbl_size, max_ring = 0; 5043 5044 if (!bp->rss_indir_tbl) 5045 return 0; 5046 5047 tbl_size = bnxt_get_rxfh_indir_size(bp->dev); 5048 for (i = 0; i < tbl_size; i++) 5049 max_ring = max(max_ring, bp->rss_indir_tbl[i]); 5050 return max_ring; 5051 } 5052 5053 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings) 5054 { 5055 if (bp->flags & BNXT_FLAG_CHIP_P5) 5056 return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5); 5057 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 5058 return 2; 5059 return 1; 5060 } 5061 5062 static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic) 5063 { 5064 bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG); 5065 u16 i, j; 5066 5067 /* Fill the RSS indirection table with ring group ids */ 5068 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) { 5069 if (!no_rss) 5070 j = bp->rss_indir_tbl[i]; 5071 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]); 5072 } 5073 } 5074 5075 static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp, 5076 struct bnxt_vnic_info *vnic) 5077 { 5078 __le16 *ring_tbl = vnic->rss_table; 5079 struct bnxt_rx_ring_info *rxr; 5080 u16 tbl_size, i; 5081 5082 tbl_size = bnxt_get_rxfh_indir_size(bp->dev); 5083 5084 for (i = 0; i < tbl_size; i++) { 5085 u16 ring_id, j; 5086 5087 j = bp->rss_indir_tbl[i]; 5088 rxr = &bp->rx_ring[j]; 5089 5090 ring_id = rxr->rx_ring_struct.fw_ring_id; 5091 *ring_tbl++ = cpu_to_le16(ring_id); 5092 ring_id = bnxt_cp_ring_for_rx(bp, rxr); 5093 *ring_tbl++ = cpu_to_le16(ring_id); 5094 } 5095 } 5096 5097 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic) 5098 { 5099 if (bp->flags & BNXT_FLAG_CHIP_P5) 5100 __bnxt_fill_hw_rss_tbl_p5(bp, vnic); 5101 else 5102 __bnxt_fill_hw_rss_tbl(bp, vnic); 5103 } 5104 5105 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) 5106 { 5107 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 5108 struct hwrm_vnic_rss_cfg_input req = {0}; 5109 5110 if ((bp->flags & BNXT_FLAG_CHIP_P5) || 5111 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID) 5112 return 0; 5113 5114 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); 5115 if (set_rss) { 5116 bnxt_fill_hw_rss_tbl(bp, vnic); 5117 req.hash_type = cpu_to_le32(bp->rss_hash_cfg); 5118 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; 5119 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); 5120 req.hash_key_tbl_addr = 5121 cpu_to_le64(vnic->rss_hash_key_dma_addr); 5122 } 5123 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 5124 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5125 } 5126 5127 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss) 5128 { 5129 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 5130 struct hwrm_vnic_rss_cfg_input req = {0}; 5131 dma_addr_t ring_tbl_map; 5132 u32 i, nr_ctxs; 5133 5134 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); 5135 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); 5136 if (!set_rss) { 5137 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5138 return 0; 5139 } 5140 bnxt_fill_hw_rss_tbl(bp, vnic); 5141 req.hash_type = cpu_to_le32(bp->rss_hash_cfg); 5142 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; 5143 req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); 5144 ring_tbl_map = vnic->rss_table_dma_addr; 5145 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); 5146 for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) { 5147 int rc; 5148 5149 req.ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map); 5150 req.ring_table_pair_index = i; 5151 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]); 5152 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5153 if (rc) 5154 return rc; 5155 } 5156 return 0; 5157 } 5158 5159 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id) 5160 { 5161 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 5162 struct hwrm_vnic_plcmodes_cfg_input req = {0}; 5163 5164 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1); 5165 req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT | 5166 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | 5167 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); 5168 req.enables = 5169 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID | 5170 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID); 5171 /* thresholds not implemented in firmware yet */ 5172 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh); 5173 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh); 5174 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id); 5175 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5176 } 5177 5178 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id, 5179 u16 ctx_idx) 5180 { 5181 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0}; 5182 5183 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1); 5184 req.rss_cos_lb_ctx_id = 5185 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]); 5186 5187 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5188 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID; 5189 } 5190 5191 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp) 5192 { 5193 int i, j; 5194 5195 for (i = 0; i < bp->nr_vnics; i++) { 5196 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 5197 5198 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) { 5199 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID) 5200 bnxt_hwrm_vnic_ctx_free_one(bp, i, j); 5201 } 5202 } 5203 bp->rsscos_nr_ctxs = 0; 5204 } 5205 5206 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx) 5207 { 5208 int rc; 5209 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0}; 5210 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp = 5211 bp->hwrm_cmd_resp_addr; 5212 5213 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1, 5214 -1); 5215 5216 mutex_lock(&bp->hwrm_cmd_lock); 5217 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5218 if (!rc) 5219 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = 5220 le16_to_cpu(resp->rss_cos_lb_ctx_id); 5221 mutex_unlock(&bp->hwrm_cmd_lock); 5222 5223 return rc; 5224 } 5225 5226 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp) 5227 { 5228 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP) 5229 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE; 5230 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE; 5231 } 5232 5233 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) 5234 { 5235 unsigned int ring = 0, grp_idx; 5236 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 5237 struct hwrm_vnic_cfg_input req = {0}; 5238 u16 def_vlan = 0; 5239 5240 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1); 5241 5242 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5243 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; 5244 5245 req.default_rx_ring_id = 5246 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id); 5247 req.default_cmpl_ring_id = 5248 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr)); 5249 req.enables = 5250 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID | 5251 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID); 5252 goto vnic_mru; 5253 } 5254 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP); 5255 /* Only RSS support for now TBD: COS & LB */ 5256 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) { 5257 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 5258 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | 5259 VNIC_CFG_REQ_ENABLES_MRU); 5260 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) { 5261 req.rss_rule = 5262 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]); 5263 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | 5264 VNIC_CFG_REQ_ENABLES_MRU); 5265 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE); 5266 } else { 5267 req.rss_rule = cpu_to_le16(0xffff); 5268 } 5269 5270 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && 5271 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) { 5272 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]); 5273 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE); 5274 } else { 5275 req.cos_rule = cpu_to_le16(0xffff); 5276 } 5277 5278 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 5279 ring = 0; 5280 else if (vnic->flags & BNXT_VNIC_RFS_FLAG) 5281 ring = vnic_id - 1; 5282 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp)) 5283 ring = bp->rx_nr_rings - 1; 5284 5285 grp_idx = bp->rx_ring[ring].bnapi->index; 5286 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); 5287 req.lb_rule = cpu_to_le16(0xffff); 5288 vnic_mru: 5289 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN); 5290 5291 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); 5292 #ifdef CONFIG_BNXT_SRIOV 5293 if (BNXT_VF(bp)) 5294 def_vlan = bp->vf.vlan; 5295 #endif 5296 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan) 5297 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); 5298 if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) 5299 req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp)); 5300 5301 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5302 } 5303 5304 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id) 5305 { 5306 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) { 5307 struct hwrm_vnic_free_input req = {0}; 5308 5309 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1); 5310 req.vnic_id = 5311 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id); 5312 5313 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5314 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID; 5315 } 5316 } 5317 5318 static void bnxt_hwrm_vnic_free(struct bnxt *bp) 5319 { 5320 u16 i; 5321 5322 for (i = 0; i < bp->nr_vnics; i++) 5323 bnxt_hwrm_vnic_free_one(bp, i); 5324 } 5325 5326 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, 5327 unsigned int start_rx_ring_idx, 5328 unsigned int nr_rings) 5329 { 5330 int rc = 0; 5331 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings; 5332 struct hwrm_vnic_alloc_input req = {0}; 5333 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr; 5334 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 5335 5336 if (bp->flags & BNXT_FLAG_CHIP_P5) 5337 goto vnic_no_ring_grps; 5338 5339 /* map ring groups to this vnic */ 5340 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) { 5341 grp_idx = bp->rx_ring[i].bnapi->index; 5342 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) { 5343 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n", 5344 j, nr_rings); 5345 break; 5346 } 5347 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id; 5348 } 5349 5350 vnic_no_ring_grps: 5351 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) 5352 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID; 5353 if (vnic_id == 0) 5354 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); 5355 5356 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1); 5357 5358 mutex_lock(&bp->hwrm_cmd_lock); 5359 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5360 if (!rc) 5361 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id); 5362 mutex_unlock(&bp->hwrm_cmd_lock); 5363 return rc; 5364 } 5365 5366 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) 5367 { 5368 struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 5369 struct hwrm_vnic_qcaps_input req = {0}; 5370 int rc; 5371 5372 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats); 5373 bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP); 5374 if (bp->hwrm_spec_code < 0x10600) 5375 return 0; 5376 5377 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1); 5378 mutex_lock(&bp->hwrm_cmd_lock); 5379 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5380 if (!rc) { 5381 u32 flags = le32_to_cpu(resp->flags); 5382 5383 if (!(bp->flags & BNXT_FLAG_CHIP_P5) && 5384 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP)) 5385 bp->flags |= BNXT_FLAG_NEW_RSS_CAP; 5386 if (flags & 5387 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP) 5388 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP; 5389 5390 /* Older P5 fw before EXT_HW_STATS support did not set 5391 * VLAN_STRIP_CAP properly. 5392 */ 5393 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) || 5394 (BNXT_CHIP_P5_THOR(bp) && 5395 !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))) 5396 bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP; 5397 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported); 5398 if (bp->max_tpa_v2) { 5399 if (BNXT_CHIP_P5_THOR(bp)) 5400 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5; 5401 else 5402 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2; 5403 } 5404 } 5405 mutex_unlock(&bp->hwrm_cmd_lock); 5406 return rc; 5407 } 5408 5409 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp) 5410 { 5411 u16 i; 5412 u32 rc = 0; 5413 5414 if (bp->flags & BNXT_FLAG_CHIP_P5) 5415 return 0; 5416 5417 mutex_lock(&bp->hwrm_cmd_lock); 5418 for (i = 0; i < bp->rx_nr_rings; i++) { 5419 struct hwrm_ring_grp_alloc_input req = {0}; 5420 struct hwrm_ring_grp_alloc_output *resp = 5421 bp->hwrm_cmd_resp_addr; 5422 unsigned int grp_idx = bp->rx_ring[i].bnapi->index; 5423 5424 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1); 5425 5426 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id); 5427 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id); 5428 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id); 5429 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx); 5430 5431 rc = _hwrm_send_message(bp, &req, sizeof(req), 5432 HWRM_CMD_TIMEOUT); 5433 if (rc) 5434 break; 5435 5436 bp->grp_info[grp_idx].fw_grp_id = 5437 le32_to_cpu(resp->ring_group_id); 5438 } 5439 mutex_unlock(&bp->hwrm_cmd_lock); 5440 return rc; 5441 } 5442 5443 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp) 5444 { 5445 u16 i; 5446 struct hwrm_ring_grp_free_input req = {0}; 5447 5448 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5)) 5449 return; 5450 5451 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1); 5452 5453 mutex_lock(&bp->hwrm_cmd_lock); 5454 for (i = 0; i < bp->cp_nr_rings; i++) { 5455 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) 5456 continue; 5457 req.ring_group_id = 5458 cpu_to_le32(bp->grp_info[i].fw_grp_id); 5459 5460 _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5461 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; 5462 } 5463 mutex_unlock(&bp->hwrm_cmd_lock); 5464 } 5465 5466 static int hwrm_ring_alloc_send_msg(struct bnxt *bp, 5467 struct bnxt_ring_struct *ring, 5468 u32 ring_type, u32 map_index) 5469 { 5470 int rc = 0, err = 0; 5471 struct hwrm_ring_alloc_input req = {0}; 5472 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr; 5473 struct bnxt_ring_mem_info *rmem = &ring->ring_mem; 5474 struct bnxt_ring_grp_info *grp_info; 5475 u16 ring_id; 5476 5477 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1); 5478 5479 req.enables = 0; 5480 if (rmem->nr_pages > 1) { 5481 req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map); 5482 /* Page size is in log2 units */ 5483 req.page_size = BNXT_PAGE_SHIFT; 5484 req.page_tbl_depth = 1; 5485 } else { 5486 req.page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]); 5487 } 5488 req.fbo = 0; 5489 /* Association of ring index with doorbell index and MSIX number */ 5490 req.logical_id = cpu_to_le16(map_index); 5491 5492 switch (ring_type) { 5493 case HWRM_RING_ALLOC_TX: { 5494 struct bnxt_tx_ring_info *txr; 5495 5496 txr = container_of(ring, struct bnxt_tx_ring_info, 5497 tx_ring_struct); 5498 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX; 5499 /* Association of transmit ring with completion ring */ 5500 grp_info = &bp->grp_info[ring->grp_idx]; 5501 req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr)); 5502 req.length = cpu_to_le32(bp->tx_ring_mask + 1); 5503 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); 5504 req.queue_id = cpu_to_le16(ring->queue_id); 5505 break; 5506 } 5507 case HWRM_RING_ALLOC_RX: 5508 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; 5509 req.length = cpu_to_le32(bp->rx_ring_mask + 1); 5510 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5511 u16 flags = 0; 5512 5513 /* Association of rx ring with stats context */ 5514 grp_info = &bp->grp_info[ring->grp_idx]; 5515 req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size); 5516 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); 5517 req.enables |= cpu_to_le32( 5518 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); 5519 if (NET_IP_ALIGN == 2) 5520 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD; 5521 req.flags = cpu_to_le16(flags); 5522 } 5523 break; 5524 case HWRM_RING_ALLOC_AGG: 5525 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5526 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG; 5527 /* Association of agg ring with rx ring */ 5528 grp_info = &bp->grp_info[ring->grp_idx]; 5529 req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id); 5530 req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE); 5531 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); 5532 req.enables |= cpu_to_le32( 5533 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID | 5534 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); 5535 } else { 5536 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; 5537 } 5538 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1); 5539 break; 5540 case HWRM_RING_ALLOC_CMPL: 5541 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; 5542 req.length = cpu_to_le32(bp->cp_ring_mask + 1); 5543 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5544 /* Association of cp ring with nq */ 5545 grp_info = &bp->grp_info[map_index]; 5546 req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id); 5547 req.cq_handle = cpu_to_le64(ring->handle); 5548 req.enables |= cpu_to_le32( 5549 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID); 5550 } else if (bp->flags & BNXT_FLAG_USING_MSIX) { 5551 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; 5552 } 5553 break; 5554 case HWRM_RING_ALLOC_NQ: 5555 req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ; 5556 req.length = cpu_to_le32(bp->cp_ring_mask + 1); 5557 if (bp->flags & BNXT_FLAG_USING_MSIX) 5558 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; 5559 break; 5560 default: 5561 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n", 5562 ring_type); 5563 return -1; 5564 } 5565 5566 mutex_lock(&bp->hwrm_cmd_lock); 5567 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5568 err = le16_to_cpu(resp->error_code); 5569 ring_id = le16_to_cpu(resp->ring_id); 5570 mutex_unlock(&bp->hwrm_cmd_lock); 5571 5572 if (rc || err) { 5573 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n", 5574 ring_type, rc, err); 5575 return -EIO; 5576 } 5577 ring->fw_ring_id = ring_id; 5578 return rc; 5579 } 5580 5581 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx) 5582 { 5583 int rc; 5584 5585 if (BNXT_PF(bp)) { 5586 struct hwrm_func_cfg_input req = {0}; 5587 5588 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 5589 req.fid = cpu_to_le16(0xffff); 5590 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR); 5591 req.async_event_cr = cpu_to_le16(idx); 5592 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5593 } else { 5594 struct hwrm_func_vf_cfg_input req = {0}; 5595 5596 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); 5597 req.enables = 5598 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR); 5599 req.async_event_cr = cpu_to_le16(idx); 5600 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5601 } 5602 return rc; 5603 } 5604 5605 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type, 5606 u32 map_idx, u32 xid) 5607 { 5608 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5609 if (BNXT_PF(bp)) 5610 db->doorbell = bp->bar1 + DB_PF_OFFSET_P5; 5611 else 5612 db->doorbell = bp->bar1 + DB_VF_OFFSET_P5; 5613 switch (ring_type) { 5614 case HWRM_RING_ALLOC_TX: 5615 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ; 5616 break; 5617 case HWRM_RING_ALLOC_RX: 5618 case HWRM_RING_ALLOC_AGG: 5619 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ; 5620 break; 5621 case HWRM_RING_ALLOC_CMPL: 5622 db->db_key64 = DBR_PATH_L2; 5623 break; 5624 case HWRM_RING_ALLOC_NQ: 5625 db->db_key64 = DBR_PATH_L2; 5626 break; 5627 } 5628 db->db_key64 |= (u64)xid << DBR_XID_SFT; 5629 } else { 5630 db->doorbell = bp->bar1 + map_idx * 0x80; 5631 switch (ring_type) { 5632 case HWRM_RING_ALLOC_TX: 5633 db->db_key32 = DB_KEY_TX; 5634 break; 5635 case HWRM_RING_ALLOC_RX: 5636 case HWRM_RING_ALLOC_AGG: 5637 db->db_key32 = DB_KEY_RX; 5638 break; 5639 case HWRM_RING_ALLOC_CMPL: 5640 db->db_key32 = DB_KEY_CP; 5641 break; 5642 } 5643 } 5644 } 5645 5646 static int bnxt_hwrm_ring_alloc(struct bnxt *bp) 5647 { 5648 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS); 5649 int i, rc = 0; 5650 u32 type; 5651 5652 if (bp->flags & BNXT_FLAG_CHIP_P5) 5653 type = HWRM_RING_ALLOC_NQ; 5654 else 5655 type = HWRM_RING_ALLOC_CMPL; 5656 for (i = 0; i < bp->cp_nr_rings; i++) { 5657 struct bnxt_napi *bnapi = bp->bnapi[i]; 5658 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5659 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 5660 u32 map_idx = ring->map_idx; 5661 unsigned int vector; 5662 5663 vector = bp->irq_tbl[map_idx].vector; 5664 disable_irq_nosync(vector); 5665 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 5666 if (rc) { 5667 enable_irq(vector); 5668 goto err_out; 5669 } 5670 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id); 5671 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); 5672 enable_irq(vector); 5673 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; 5674 5675 if (!i) { 5676 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id); 5677 if (rc) 5678 netdev_warn(bp->dev, "Failed to set async event completion ring.\n"); 5679 } 5680 } 5681 5682 type = HWRM_RING_ALLOC_TX; 5683 for (i = 0; i < bp->tx_nr_rings; i++) { 5684 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 5685 struct bnxt_ring_struct *ring; 5686 u32 map_idx; 5687 5688 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5689 struct bnxt_napi *bnapi = txr->bnapi; 5690 struct bnxt_cp_ring_info *cpr, *cpr2; 5691 u32 type2 = HWRM_RING_ALLOC_CMPL; 5692 5693 cpr = &bnapi->cp_ring; 5694 cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL]; 5695 ring = &cpr2->cp_ring_struct; 5696 ring->handle = BNXT_TX_HDL; 5697 map_idx = bnapi->index; 5698 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx); 5699 if (rc) 5700 goto err_out; 5701 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx, 5702 ring->fw_ring_id); 5703 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons); 5704 } 5705 ring = &txr->tx_ring_struct; 5706 map_idx = i; 5707 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 5708 if (rc) 5709 goto err_out; 5710 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id); 5711 } 5712 5713 type = HWRM_RING_ALLOC_RX; 5714 for (i = 0; i < bp->rx_nr_rings; i++) { 5715 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 5716 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; 5717 struct bnxt_napi *bnapi = rxr->bnapi; 5718 u32 map_idx = bnapi->index; 5719 5720 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 5721 if (rc) 5722 goto err_out; 5723 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id); 5724 /* If we have agg rings, post agg buffers first. */ 5725 if (!agg_rings) 5726 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 5727 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; 5728 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5729 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5730 u32 type2 = HWRM_RING_ALLOC_CMPL; 5731 struct bnxt_cp_ring_info *cpr2; 5732 5733 cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL]; 5734 ring = &cpr2->cp_ring_struct; 5735 ring->handle = BNXT_RX_HDL; 5736 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx); 5737 if (rc) 5738 goto err_out; 5739 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx, 5740 ring->fw_ring_id); 5741 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons); 5742 } 5743 } 5744 5745 if (agg_rings) { 5746 type = HWRM_RING_ALLOC_AGG; 5747 for (i = 0; i < bp->rx_nr_rings; i++) { 5748 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 5749 struct bnxt_ring_struct *ring = 5750 &rxr->rx_agg_ring_struct; 5751 u32 grp_idx = ring->grp_idx; 5752 u32 map_idx = grp_idx + bp->rx_nr_rings; 5753 5754 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 5755 if (rc) 5756 goto err_out; 5757 5758 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx, 5759 ring->fw_ring_id); 5760 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 5761 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 5762 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; 5763 } 5764 } 5765 err_out: 5766 return rc; 5767 } 5768 5769 static int hwrm_ring_free_send_msg(struct bnxt *bp, 5770 struct bnxt_ring_struct *ring, 5771 u32 ring_type, int cmpl_ring_id) 5772 { 5773 int rc; 5774 struct hwrm_ring_free_input req = {0}; 5775 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr; 5776 u16 error_code; 5777 5778 if (BNXT_NO_FW_ACCESS(bp)) 5779 return 0; 5780 5781 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1); 5782 req.ring_type = ring_type; 5783 req.ring_id = cpu_to_le16(ring->fw_ring_id); 5784 5785 mutex_lock(&bp->hwrm_cmd_lock); 5786 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5787 error_code = le16_to_cpu(resp->error_code); 5788 mutex_unlock(&bp->hwrm_cmd_lock); 5789 5790 if (rc || error_code) { 5791 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n", 5792 ring_type, rc, error_code); 5793 return -EIO; 5794 } 5795 return 0; 5796 } 5797 5798 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) 5799 { 5800 u32 type; 5801 int i; 5802 5803 if (!bp->bnapi) 5804 return; 5805 5806 for (i = 0; i < bp->tx_nr_rings; i++) { 5807 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 5808 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 5809 5810 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 5811 u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr); 5812 5813 hwrm_ring_free_send_msg(bp, ring, 5814 RING_FREE_REQ_RING_TYPE_TX, 5815 close_path ? cmpl_ring_id : 5816 INVALID_HW_RING_ID); 5817 ring->fw_ring_id = INVALID_HW_RING_ID; 5818 } 5819 } 5820 5821 for (i = 0; i < bp->rx_nr_rings; i++) { 5822 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 5823 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; 5824 u32 grp_idx = rxr->bnapi->index; 5825 5826 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 5827 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); 5828 5829 hwrm_ring_free_send_msg(bp, ring, 5830 RING_FREE_REQ_RING_TYPE_RX, 5831 close_path ? cmpl_ring_id : 5832 INVALID_HW_RING_ID); 5833 ring->fw_ring_id = INVALID_HW_RING_ID; 5834 bp->grp_info[grp_idx].rx_fw_ring_id = 5835 INVALID_HW_RING_ID; 5836 } 5837 } 5838 5839 if (bp->flags & BNXT_FLAG_CHIP_P5) 5840 type = RING_FREE_REQ_RING_TYPE_RX_AGG; 5841 else 5842 type = RING_FREE_REQ_RING_TYPE_RX; 5843 for (i = 0; i < bp->rx_nr_rings; i++) { 5844 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 5845 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; 5846 u32 grp_idx = rxr->bnapi->index; 5847 5848 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 5849 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); 5850 5851 hwrm_ring_free_send_msg(bp, ring, type, 5852 close_path ? cmpl_ring_id : 5853 INVALID_HW_RING_ID); 5854 ring->fw_ring_id = INVALID_HW_RING_ID; 5855 bp->grp_info[grp_idx].agg_fw_ring_id = 5856 INVALID_HW_RING_ID; 5857 } 5858 } 5859 5860 /* The completion rings are about to be freed. After that the 5861 * IRQ doorbell will not work anymore. So we need to disable 5862 * IRQ here. 5863 */ 5864 bnxt_disable_int_sync(bp); 5865 5866 if (bp->flags & BNXT_FLAG_CHIP_P5) 5867 type = RING_FREE_REQ_RING_TYPE_NQ; 5868 else 5869 type = RING_FREE_REQ_RING_TYPE_L2_CMPL; 5870 for (i = 0; i < bp->cp_nr_rings; i++) { 5871 struct bnxt_napi *bnapi = bp->bnapi[i]; 5872 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5873 struct bnxt_ring_struct *ring; 5874 int j; 5875 5876 for (j = 0; j < 2; j++) { 5877 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; 5878 5879 if (cpr2) { 5880 ring = &cpr2->cp_ring_struct; 5881 if (ring->fw_ring_id == INVALID_HW_RING_ID) 5882 continue; 5883 hwrm_ring_free_send_msg(bp, ring, 5884 RING_FREE_REQ_RING_TYPE_L2_CMPL, 5885 INVALID_HW_RING_ID); 5886 ring->fw_ring_id = INVALID_HW_RING_ID; 5887 } 5888 } 5889 ring = &cpr->cp_ring_struct; 5890 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 5891 hwrm_ring_free_send_msg(bp, ring, type, 5892 INVALID_HW_RING_ID); 5893 ring->fw_ring_id = INVALID_HW_RING_ID; 5894 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; 5895 } 5896 } 5897 } 5898 5899 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 5900 bool shared); 5901 5902 static int bnxt_hwrm_get_rings(struct bnxt *bp) 5903 { 5904 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 5905 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 5906 struct hwrm_func_qcfg_input req = {0}; 5907 int rc; 5908 5909 if (bp->hwrm_spec_code < 0x10601) 5910 return 0; 5911 5912 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); 5913 req.fid = cpu_to_le16(0xffff); 5914 mutex_lock(&bp->hwrm_cmd_lock); 5915 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5916 if (rc) { 5917 mutex_unlock(&bp->hwrm_cmd_lock); 5918 return rc; 5919 } 5920 5921 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings); 5922 if (BNXT_NEW_RM(bp)) { 5923 u16 cp, stats; 5924 5925 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings); 5926 hw_resc->resv_hw_ring_grps = 5927 le32_to_cpu(resp->alloc_hw_ring_grps); 5928 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics); 5929 cp = le16_to_cpu(resp->alloc_cmpl_rings); 5930 stats = le16_to_cpu(resp->alloc_stat_ctx); 5931 hw_resc->resv_irqs = cp; 5932 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5933 int rx = hw_resc->resv_rx_rings; 5934 int tx = hw_resc->resv_tx_rings; 5935 5936 if (bp->flags & BNXT_FLAG_AGG_RINGS) 5937 rx >>= 1; 5938 if (cp < (rx + tx)) { 5939 bnxt_trim_rings(bp, &rx, &tx, cp, false); 5940 if (bp->flags & BNXT_FLAG_AGG_RINGS) 5941 rx <<= 1; 5942 hw_resc->resv_rx_rings = rx; 5943 hw_resc->resv_tx_rings = tx; 5944 } 5945 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix); 5946 hw_resc->resv_hw_ring_grps = rx; 5947 } 5948 hw_resc->resv_cp_rings = cp; 5949 hw_resc->resv_stat_ctxs = stats; 5950 } 5951 mutex_unlock(&bp->hwrm_cmd_lock); 5952 return 0; 5953 } 5954 5955 /* Caller must hold bp->hwrm_cmd_lock */ 5956 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) 5957 { 5958 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 5959 struct hwrm_func_qcfg_input req = {0}; 5960 int rc; 5961 5962 if (bp->hwrm_spec_code < 0x10601) 5963 return 0; 5964 5965 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); 5966 req.fid = cpu_to_le16(fid); 5967 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5968 if (!rc) 5969 *tx_rings = le16_to_cpu(resp->alloc_tx_rings); 5970 5971 return rc; 5972 } 5973 5974 static bool bnxt_rfs_supported(struct bnxt *bp); 5975 5976 static void 5977 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req, 5978 int tx_rings, int rx_rings, int ring_grps, 5979 int cp_rings, int stats, int vnics) 5980 { 5981 u32 enables = 0; 5982 5983 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1); 5984 req->fid = cpu_to_le16(0xffff); 5985 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; 5986 req->num_tx_rings = cpu_to_le16(tx_rings); 5987 if (BNXT_NEW_RM(bp)) { 5988 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; 5989 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; 5990 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5991 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0; 5992 enables |= tx_rings + ring_grps ? 5993 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 5994 enables |= rx_rings ? 5995 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 5996 } else { 5997 enables |= cp_rings ? 5998 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 5999 enables |= ring_grps ? 6000 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS | 6001 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 6002 } 6003 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0; 6004 6005 req->num_rx_rings = cpu_to_le16(rx_rings); 6006 if (bp->flags & BNXT_FLAG_CHIP_P5) { 6007 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); 6008 req->num_msix = cpu_to_le16(cp_rings); 6009 req->num_rsscos_ctxs = 6010 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64)); 6011 } else { 6012 req->num_cmpl_rings = cpu_to_le16(cp_rings); 6013 req->num_hw_ring_grps = cpu_to_le16(ring_grps); 6014 req->num_rsscos_ctxs = cpu_to_le16(1); 6015 if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) && 6016 bnxt_rfs_supported(bp)) 6017 req->num_rsscos_ctxs = 6018 cpu_to_le16(ring_grps + 1); 6019 } 6020 req->num_stat_ctxs = cpu_to_le16(stats); 6021 req->num_vnics = cpu_to_le16(vnics); 6022 } 6023 req->enables = cpu_to_le32(enables); 6024 } 6025 6026 static void 6027 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, 6028 struct hwrm_func_vf_cfg_input *req, int tx_rings, 6029 int rx_rings, int ring_grps, int cp_rings, 6030 int stats, int vnics) 6031 { 6032 u32 enables = 0; 6033 6034 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1); 6035 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; 6036 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS | 6037 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 6038 enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; 6039 if (bp->flags & BNXT_FLAG_CHIP_P5) { 6040 enables |= tx_rings + ring_grps ? 6041 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 6042 } else { 6043 enables |= cp_rings ? 6044 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 6045 enables |= ring_grps ? 6046 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; 6047 } 6048 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; 6049 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS; 6050 6051 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); 6052 req->num_tx_rings = cpu_to_le16(tx_rings); 6053 req->num_rx_rings = cpu_to_le16(rx_rings); 6054 if (bp->flags & BNXT_FLAG_CHIP_P5) { 6055 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); 6056 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64)); 6057 } else { 6058 req->num_cmpl_rings = cpu_to_le16(cp_rings); 6059 req->num_hw_ring_grps = cpu_to_le16(ring_grps); 6060 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX); 6061 } 6062 req->num_stat_ctxs = cpu_to_le16(stats); 6063 req->num_vnics = cpu_to_le16(vnics); 6064 6065 req->enables = cpu_to_le32(enables); 6066 } 6067 6068 static int 6069 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 6070 int ring_grps, int cp_rings, int stats, int vnics) 6071 { 6072 struct hwrm_func_cfg_input req = {0}; 6073 int rc; 6074 6075 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, 6076 cp_rings, stats, vnics); 6077 if (!req.enables) 6078 return 0; 6079 6080 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6081 if (rc) 6082 return rc; 6083 6084 if (bp->hwrm_spec_code < 0x10601) 6085 bp->hw_resc.resv_tx_rings = tx_rings; 6086 6087 return bnxt_hwrm_get_rings(bp); 6088 } 6089 6090 static int 6091 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 6092 int ring_grps, int cp_rings, int stats, int vnics) 6093 { 6094 struct hwrm_func_vf_cfg_input req = {0}; 6095 int rc; 6096 6097 if (!BNXT_NEW_RM(bp)) { 6098 bp->hw_resc.resv_tx_rings = tx_rings; 6099 return 0; 6100 } 6101 6102 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, 6103 cp_rings, stats, vnics); 6104 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6105 if (rc) 6106 return rc; 6107 6108 return bnxt_hwrm_get_rings(bp); 6109 } 6110 6111 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp, 6112 int cp, int stat, int vnic) 6113 { 6114 if (BNXT_PF(bp)) 6115 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat, 6116 vnic); 6117 else 6118 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat, 6119 vnic); 6120 } 6121 6122 int bnxt_nq_rings_in_use(struct bnxt *bp) 6123 { 6124 int cp = bp->cp_nr_rings; 6125 int ulp_msix, ulp_base; 6126 6127 ulp_msix = bnxt_get_ulp_msix_num(bp); 6128 if (ulp_msix) { 6129 ulp_base = bnxt_get_ulp_msix_base(bp); 6130 cp += ulp_msix; 6131 if ((ulp_base + ulp_msix) > cp) 6132 cp = ulp_base + ulp_msix; 6133 } 6134 return cp; 6135 } 6136 6137 static int bnxt_cp_rings_in_use(struct bnxt *bp) 6138 { 6139 int cp; 6140 6141 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 6142 return bnxt_nq_rings_in_use(bp); 6143 6144 cp = bp->tx_nr_rings + bp->rx_nr_rings; 6145 return cp; 6146 } 6147 6148 static int bnxt_get_func_stat_ctxs(struct bnxt *bp) 6149 { 6150 int ulp_stat = bnxt_get_ulp_stat_ctxs(bp); 6151 int cp = bp->cp_nr_rings; 6152 6153 if (!ulp_stat) 6154 return cp; 6155 6156 if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp)) 6157 return bnxt_get_ulp_msix_base(bp) + ulp_stat; 6158 6159 return cp + ulp_stat; 6160 } 6161 6162 /* Check if a default RSS map needs to be setup. This function is only 6163 * used on older firmware that does not require reserving RX rings. 6164 */ 6165 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp) 6166 { 6167 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 6168 6169 /* The RSS map is valid for RX rings set to resv_rx_rings */ 6170 if (hw_resc->resv_rx_rings != bp->rx_nr_rings) { 6171 hw_resc->resv_rx_rings = bp->rx_nr_rings; 6172 if (!netif_is_rxfh_configured(bp->dev)) 6173 bnxt_set_dflt_rss_indir_tbl(bp); 6174 } 6175 } 6176 6177 static bool bnxt_need_reserve_rings(struct bnxt *bp) 6178 { 6179 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 6180 int cp = bnxt_cp_rings_in_use(bp); 6181 int nq = bnxt_nq_rings_in_use(bp); 6182 int rx = bp->rx_nr_rings, stat; 6183 int vnic = 1, grp = rx; 6184 6185 if (hw_resc->resv_tx_rings != bp->tx_nr_rings && 6186 bp->hwrm_spec_code >= 0x10601) 6187 return true; 6188 6189 /* Old firmware does not need RX ring reservations but we still 6190 * need to setup a default RSS map when needed. With new firmware 6191 * we go through RX ring reservations first and then set up the 6192 * RSS map for the successfully reserved RX rings when needed. 6193 */ 6194 if (!BNXT_NEW_RM(bp)) { 6195 bnxt_check_rss_tbl_no_rmgr(bp); 6196 return false; 6197 } 6198 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5)) 6199 vnic = rx + 1; 6200 if (bp->flags & BNXT_FLAG_AGG_RINGS) 6201 rx <<= 1; 6202 stat = bnxt_get_func_stat_ctxs(bp); 6203 if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp || 6204 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat || 6205 (hw_resc->resv_hw_ring_grps != grp && 6206 !(bp->flags & BNXT_FLAG_CHIP_P5))) 6207 return true; 6208 if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) && 6209 hw_resc->resv_irqs != nq) 6210 return true; 6211 return false; 6212 } 6213 6214 static int __bnxt_reserve_rings(struct bnxt *bp) 6215 { 6216 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 6217 int cp = bnxt_nq_rings_in_use(bp); 6218 int tx = bp->tx_nr_rings; 6219 int rx = bp->rx_nr_rings; 6220 int grp, rx_rings, rc; 6221 int vnic = 1, stat; 6222 bool sh = false; 6223 6224 if (!bnxt_need_reserve_rings(bp)) 6225 return 0; 6226 6227 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 6228 sh = true; 6229 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5)) 6230 vnic = rx + 1; 6231 if (bp->flags & BNXT_FLAG_AGG_RINGS) 6232 rx <<= 1; 6233 grp = bp->rx_nr_rings; 6234 stat = bnxt_get_func_stat_ctxs(bp); 6235 6236 rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic); 6237 if (rc) 6238 return rc; 6239 6240 tx = hw_resc->resv_tx_rings; 6241 if (BNXT_NEW_RM(bp)) { 6242 rx = hw_resc->resv_rx_rings; 6243 cp = hw_resc->resv_irqs; 6244 grp = hw_resc->resv_hw_ring_grps; 6245 vnic = hw_resc->resv_vnics; 6246 stat = hw_resc->resv_stat_ctxs; 6247 } 6248 6249 rx_rings = rx; 6250 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 6251 if (rx >= 2) { 6252 rx_rings = rx >> 1; 6253 } else { 6254 if (netif_running(bp->dev)) 6255 return -ENOMEM; 6256 6257 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 6258 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 6259 bp->dev->hw_features &= ~NETIF_F_LRO; 6260 bp->dev->features &= ~NETIF_F_LRO; 6261 bnxt_set_ring_params(bp); 6262 } 6263 } 6264 rx_rings = min_t(int, rx_rings, grp); 6265 cp = min_t(int, cp, bp->cp_nr_rings); 6266 if (stat > bnxt_get_ulp_stat_ctxs(bp)) 6267 stat -= bnxt_get_ulp_stat_ctxs(bp); 6268 cp = min_t(int, cp, stat); 6269 rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh); 6270 if (bp->flags & BNXT_FLAG_AGG_RINGS) 6271 rx = rx_rings << 1; 6272 cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings; 6273 bp->tx_nr_rings = tx; 6274 6275 /* If we cannot reserve all the RX rings, reset the RSS map only 6276 * if absolutely necessary 6277 */ 6278 if (rx_rings != bp->rx_nr_rings) { 6279 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n", 6280 rx_rings, bp->rx_nr_rings); 6281 if ((bp->dev->priv_flags & IFF_RXFH_CONFIGURED) && 6282 (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) != 6283 bnxt_get_nr_rss_ctxs(bp, rx_rings) || 6284 bnxt_get_max_rss_ring(bp) >= rx_rings)) { 6285 netdev_warn(bp->dev, "RSS table entries reverting to default\n"); 6286 bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED; 6287 } 6288 } 6289 bp->rx_nr_rings = rx_rings; 6290 bp->cp_nr_rings = cp; 6291 6292 if (!tx || !rx || !cp || !grp || !vnic || !stat) 6293 return -ENOMEM; 6294 6295 if (!netif_is_rxfh_configured(bp->dev)) 6296 bnxt_set_dflt_rss_indir_tbl(bp); 6297 6298 return rc; 6299 } 6300 6301 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 6302 int ring_grps, int cp_rings, int stats, 6303 int vnics) 6304 { 6305 struct hwrm_func_vf_cfg_input req = {0}; 6306 u32 flags; 6307 6308 if (!BNXT_NEW_RM(bp)) 6309 return 0; 6310 6311 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, 6312 cp_rings, stats, vnics); 6313 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST | 6314 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST | 6315 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | 6316 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | 6317 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST | 6318 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST; 6319 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 6320 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; 6321 6322 req.flags = cpu_to_le32(flags); 6323 return hwrm_send_message_silent(bp, &req, sizeof(req), 6324 HWRM_CMD_TIMEOUT); 6325 } 6326 6327 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 6328 int ring_grps, int cp_rings, int stats, 6329 int vnics) 6330 { 6331 struct hwrm_func_cfg_input req = {0}; 6332 u32 flags; 6333 6334 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, 6335 cp_rings, stats, vnics); 6336 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST; 6337 if (BNXT_NEW_RM(bp)) { 6338 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST | 6339 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | 6340 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | 6341 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; 6342 if (bp->flags & BNXT_FLAG_CHIP_P5) 6343 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST | 6344 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST; 6345 else 6346 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; 6347 } 6348 6349 req.flags = cpu_to_le32(flags); 6350 return hwrm_send_message_silent(bp, &req, sizeof(req), 6351 HWRM_CMD_TIMEOUT); 6352 } 6353 6354 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings, 6355 int ring_grps, int cp_rings, int stats, 6356 int vnics) 6357 { 6358 if (bp->hwrm_spec_code < 0x10801) 6359 return 0; 6360 6361 if (BNXT_PF(bp)) 6362 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings, 6363 ring_grps, cp_rings, stats, 6364 vnics); 6365 6366 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps, 6367 cp_rings, stats, vnics); 6368 } 6369 6370 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp) 6371 { 6372 struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 6373 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 6374 struct hwrm_ring_aggint_qcaps_input req = {0}; 6375 int rc; 6376 6377 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS; 6378 coal_cap->num_cmpl_dma_aggr_max = 63; 6379 coal_cap->num_cmpl_dma_aggr_during_int_max = 63; 6380 coal_cap->cmpl_aggr_dma_tmr_max = 65535; 6381 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535; 6382 coal_cap->int_lat_tmr_min_max = 65535; 6383 coal_cap->int_lat_tmr_max_max = 65535; 6384 coal_cap->num_cmpl_aggr_int_max = 65535; 6385 coal_cap->timer_units = 80; 6386 6387 if (bp->hwrm_spec_code < 0x10902) 6388 return; 6389 6390 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1); 6391 mutex_lock(&bp->hwrm_cmd_lock); 6392 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6393 if (!rc) { 6394 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params); 6395 coal_cap->nq_params = le32_to_cpu(resp->nq_params); 6396 coal_cap->num_cmpl_dma_aggr_max = 6397 le16_to_cpu(resp->num_cmpl_dma_aggr_max); 6398 coal_cap->num_cmpl_dma_aggr_during_int_max = 6399 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max); 6400 coal_cap->cmpl_aggr_dma_tmr_max = 6401 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max); 6402 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 6403 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max); 6404 coal_cap->int_lat_tmr_min_max = 6405 le16_to_cpu(resp->int_lat_tmr_min_max); 6406 coal_cap->int_lat_tmr_max_max = 6407 le16_to_cpu(resp->int_lat_tmr_max_max); 6408 coal_cap->num_cmpl_aggr_int_max = 6409 le16_to_cpu(resp->num_cmpl_aggr_int_max); 6410 coal_cap->timer_units = le16_to_cpu(resp->timer_units); 6411 } 6412 mutex_unlock(&bp->hwrm_cmd_lock); 6413 } 6414 6415 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec) 6416 { 6417 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 6418 6419 return usec * 1000 / coal_cap->timer_units; 6420 } 6421 6422 static void bnxt_hwrm_set_coal_params(struct bnxt *bp, 6423 struct bnxt_coal *hw_coal, 6424 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) 6425 { 6426 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 6427 u32 cmpl_params = coal_cap->cmpl_params; 6428 u16 val, tmr, max, flags = 0; 6429 6430 max = hw_coal->bufs_per_record * 128; 6431 if (hw_coal->budget) 6432 max = hw_coal->bufs_per_record * hw_coal->budget; 6433 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max); 6434 6435 val = clamp_t(u16, hw_coal->coal_bufs, 1, max); 6436 req->num_cmpl_aggr_int = cpu_to_le16(val); 6437 6438 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max); 6439 req->num_cmpl_dma_aggr = cpu_to_le16(val); 6440 6441 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1, 6442 coal_cap->num_cmpl_dma_aggr_during_int_max); 6443 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val); 6444 6445 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks); 6446 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max); 6447 req->int_lat_tmr_max = cpu_to_le16(tmr); 6448 6449 /* min timer set to 1/2 of interrupt timer */ 6450 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) { 6451 val = tmr / 2; 6452 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max); 6453 req->int_lat_tmr_min = cpu_to_le16(val); 6454 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); 6455 } 6456 6457 /* buf timer set to 1/4 of interrupt timer */ 6458 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max); 6459 req->cmpl_aggr_dma_tmr = cpu_to_le16(val); 6460 6461 if (cmpl_params & 6462 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) { 6463 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq); 6464 val = clamp_t(u16, tmr, 1, 6465 coal_cap->cmpl_aggr_dma_tmr_during_int_max); 6466 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val); 6467 req->enables |= 6468 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE); 6469 } 6470 6471 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET) 6472 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 6473 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) && 6474 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh) 6475 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE; 6476 req->flags = cpu_to_le16(flags); 6477 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES); 6478 } 6479 6480 /* Caller holds bp->hwrm_cmd_lock */ 6481 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi, 6482 struct bnxt_coal *hw_coal) 6483 { 6484 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0}; 6485 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 6486 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 6487 u32 nq_params = coal_cap->nq_params; 6488 u16 tmr; 6489 6490 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN)) 6491 return 0; 6492 6493 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, 6494 -1, -1); 6495 req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id); 6496 req.flags = 6497 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ); 6498 6499 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2; 6500 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max); 6501 req.int_lat_tmr_min = cpu_to_le16(tmr); 6502 req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); 6503 return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6504 } 6505 6506 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi) 6507 { 6508 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0}; 6509 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 6510 struct bnxt_coal coal; 6511 6512 /* Tick values in micro seconds. 6513 * 1 coal_buf x bufs_per_record = 1 completion record. 6514 */ 6515 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal)); 6516 6517 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks; 6518 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs; 6519 6520 if (!bnapi->rx_ring) 6521 return -ENODEV; 6522 6523 bnxt_hwrm_cmd_hdr_init(bp, &req_rx, 6524 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); 6525 6526 bnxt_hwrm_set_coal_params(bp, &coal, &req_rx); 6527 6528 req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring)); 6529 6530 return hwrm_send_message(bp, &req_rx, sizeof(req_rx), 6531 HWRM_CMD_TIMEOUT); 6532 } 6533 6534 int bnxt_hwrm_set_coal(struct bnxt *bp) 6535 { 6536 int i, rc = 0; 6537 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0}, 6538 req_tx = {0}, *req; 6539 6540 bnxt_hwrm_cmd_hdr_init(bp, &req_rx, 6541 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); 6542 bnxt_hwrm_cmd_hdr_init(bp, &req_tx, 6543 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); 6544 6545 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx); 6546 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx); 6547 6548 mutex_lock(&bp->hwrm_cmd_lock); 6549 for (i = 0; i < bp->cp_nr_rings; i++) { 6550 struct bnxt_napi *bnapi = bp->bnapi[i]; 6551 struct bnxt_coal *hw_coal; 6552 u16 ring_id; 6553 6554 req = &req_rx; 6555 if (!bnapi->rx_ring) { 6556 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring); 6557 req = &req_tx; 6558 } else { 6559 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring); 6560 } 6561 req->ring_id = cpu_to_le16(ring_id); 6562 6563 rc = _hwrm_send_message(bp, req, sizeof(*req), 6564 HWRM_CMD_TIMEOUT); 6565 if (rc) 6566 break; 6567 6568 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 6569 continue; 6570 6571 if (bnapi->rx_ring && bnapi->tx_ring) { 6572 req = &req_tx; 6573 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring); 6574 req->ring_id = cpu_to_le16(ring_id); 6575 rc = _hwrm_send_message(bp, req, sizeof(*req), 6576 HWRM_CMD_TIMEOUT); 6577 if (rc) 6578 break; 6579 } 6580 if (bnapi->rx_ring) 6581 hw_coal = &bp->rx_coal; 6582 else 6583 hw_coal = &bp->tx_coal; 6584 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal); 6585 } 6586 mutex_unlock(&bp->hwrm_cmd_lock); 6587 return rc; 6588 } 6589 6590 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp) 6591 { 6592 struct hwrm_stat_ctx_clr_stats_input req0 = {0}; 6593 struct hwrm_stat_ctx_free_input req = {0}; 6594 int i; 6595 6596 if (!bp->bnapi) 6597 return; 6598 6599 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 6600 return; 6601 6602 bnxt_hwrm_cmd_hdr_init(bp, &req0, HWRM_STAT_CTX_CLR_STATS, -1, -1); 6603 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1); 6604 6605 mutex_lock(&bp->hwrm_cmd_lock); 6606 for (i = 0; i < bp->cp_nr_rings; i++) { 6607 struct bnxt_napi *bnapi = bp->bnapi[i]; 6608 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 6609 6610 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) { 6611 req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id); 6612 if (BNXT_FW_MAJ(bp) <= 20) { 6613 req0.stat_ctx_id = req.stat_ctx_id; 6614 _hwrm_send_message(bp, &req0, sizeof(req0), 6615 HWRM_CMD_TIMEOUT); 6616 } 6617 _hwrm_send_message(bp, &req, sizeof(req), 6618 HWRM_CMD_TIMEOUT); 6619 6620 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; 6621 } 6622 } 6623 mutex_unlock(&bp->hwrm_cmd_lock); 6624 } 6625 6626 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) 6627 { 6628 int rc = 0, i; 6629 struct hwrm_stat_ctx_alloc_input req = {0}; 6630 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr; 6631 6632 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 6633 return 0; 6634 6635 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1); 6636 6637 req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size); 6638 req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000); 6639 6640 mutex_lock(&bp->hwrm_cmd_lock); 6641 for (i = 0; i < bp->cp_nr_rings; i++) { 6642 struct bnxt_napi *bnapi = bp->bnapi[i]; 6643 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 6644 6645 req.stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map); 6646 6647 rc = _hwrm_send_message(bp, &req, sizeof(req), 6648 HWRM_CMD_TIMEOUT); 6649 if (rc) 6650 break; 6651 6652 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id); 6653 6654 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; 6655 } 6656 mutex_unlock(&bp->hwrm_cmd_lock); 6657 return rc; 6658 } 6659 6660 static int bnxt_hwrm_func_qcfg(struct bnxt *bp) 6661 { 6662 struct hwrm_func_qcfg_input req = {0}; 6663 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 6664 u32 min_db_offset = 0; 6665 u16 flags; 6666 int rc; 6667 6668 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); 6669 req.fid = cpu_to_le16(0xffff); 6670 mutex_lock(&bp->hwrm_cmd_lock); 6671 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6672 if (rc) 6673 goto func_qcfg_exit; 6674 6675 #ifdef CONFIG_BNXT_SRIOV 6676 if (BNXT_VF(bp)) { 6677 struct bnxt_vf_info *vf = &bp->vf; 6678 6679 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK; 6680 } else { 6681 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs); 6682 } 6683 #endif 6684 flags = le16_to_cpu(resp->flags); 6685 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED | 6686 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) { 6687 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT; 6688 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED) 6689 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT; 6690 } 6691 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)) 6692 bp->flags |= BNXT_FLAG_MULTI_HOST; 6693 if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED) 6694 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR; 6695 6696 switch (resp->port_partition_type) { 6697 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0: 6698 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5: 6699 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0: 6700 bp->port_partition_type = resp->port_partition_type; 6701 break; 6702 } 6703 if (bp->hwrm_spec_code < 0x10707 || 6704 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB) 6705 bp->br_mode = BRIDGE_MODE_VEB; 6706 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA) 6707 bp->br_mode = BRIDGE_MODE_VEPA; 6708 else 6709 bp->br_mode = BRIDGE_MODE_UNDEF; 6710 6711 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured); 6712 if (!bp->max_mtu) 6713 bp->max_mtu = BNXT_MAX_MTU; 6714 6715 if (bp->db_size) 6716 goto func_qcfg_exit; 6717 6718 if (bp->flags & BNXT_FLAG_CHIP_P5) { 6719 if (BNXT_PF(bp)) 6720 min_db_offset = DB_PF_OFFSET_P5; 6721 else 6722 min_db_offset = DB_VF_OFFSET_P5; 6723 } 6724 bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) * 6725 1024); 6726 if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) || 6727 bp->db_size <= min_db_offset) 6728 bp->db_size = pci_resource_len(bp->pdev, 2); 6729 6730 func_qcfg_exit: 6731 mutex_unlock(&bp->hwrm_cmd_lock); 6732 return rc; 6733 } 6734 6735 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) 6736 { 6737 struct hwrm_func_backing_store_qcaps_input req = {0}; 6738 struct hwrm_func_backing_store_qcaps_output *resp = 6739 bp->hwrm_cmd_resp_addr; 6740 int rc; 6741 6742 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx) 6743 return 0; 6744 6745 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1); 6746 mutex_lock(&bp->hwrm_cmd_lock); 6747 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6748 if (!rc) { 6749 struct bnxt_ctx_pg_info *ctx_pg; 6750 struct bnxt_ctx_mem_info *ctx; 6751 int i, tqm_rings; 6752 6753 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 6754 if (!ctx) { 6755 rc = -ENOMEM; 6756 goto ctx_err; 6757 } 6758 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries); 6759 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries); 6760 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries); 6761 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size); 6762 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries); 6763 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries); 6764 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size); 6765 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries); 6766 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries); 6767 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size); 6768 ctx->vnic_max_vnic_entries = 6769 le16_to_cpu(resp->vnic_max_vnic_entries); 6770 ctx->vnic_max_ring_table_entries = 6771 le16_to_cpu(resp->vnic_max_ring_table_entries); 6772 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size); 6773 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries); 6774 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size); 6775 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size); 6776 ctx->tqm_min_entries_per_ring = 6777 le32_to_cpu(resp->tqm_min_entries_per_ring); 6778 ctx->tqm_max_entries_per_ring = 6779 le32_to_cpu(resp->tqm_max_entries_per_ring); 6780 ctx->tqm_entries_multiple = resp->tqm_entries_multiple; 6781 if (!ctx->tqm_entries_multiple) 6782 ctx->tqm_entries_multiple = 1; 6783 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries); 6784 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size); 6785 ctx->mrav_num_entries_units = 6786 le16_to_cpu(resp->mrav_num_entries_units); 6787 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size); 6788 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries); 6789 ctx->ctx_kind_initializer = resp->ctx_kind_initializer; 6790 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count; 6791 if (!ctx->tqm_fp_rings_count) 6792 ctx->tqm_fp_rings_count = bp->max_q; 6793 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS) 6794 ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS; 6795 6796 tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS; 6797 ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL); 6798 if (!ctx_pg) { 6799 kfree(ctx); 6800 rc = -ENOMEM; 6801 goto ctx_err; 6802 } 6803 for (i = 0; i < tqm_rings; i++, ctx_pg++) 6804 ctx->tqm_mem[i] = ctx_pg; 6805 bp->ctx = ctx; 6806 } else { 6807 rc = 0; 6808 } 6809 ctx_err: 6810 mutex_unlock(&bp->hwrm_cmd_lock); 6811 return rc; 6812 } 6813 6814 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr, 6815 __le64 *pg_dir) 6816 { 6817 u8 pg_size = 0; 6818 6819 if (BNXT_PAGE_SHIFT == 13) 6820 pg_size = 1 << 4; 6821 else if (BNXT_PAGE_SIZE == 16) 6822 pg_size = 2 << 4; 6823 6824 *pg_attr = pg_size; 6825 if (rmem->depth >= 1) { 6826 if (rmem->depth == 2) 6827 *pg_attr |= 2; 6828 else 6829 *pg_attr |= 1; 6830 *pg_dir = cpu_to_le64(rmem->pg_tbl_map); 6831 } else { 6832 *pg_dir = cpu_to_le64(rmem->dma_arr[0]); 6833 } 6834 } 6835 6836 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \ 6837 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \ 6838 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \ 6839 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \ 6840 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \ 6841 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) 6842 6843 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) 6844 { 6845 struct hwrm_func_backing_store_cfg_input req = {0}; 6846 struct bnxt_ctx_mem_info *ctx = bp->ctx; 6847 struct bnxt_ctx_pg_info *ctx_pg; 6848 __le32 *num_entries; 6849 __le64 *pg_dir; 6850 u32 flags = 0; 6851 u8 *pg_attr; 6852 u32 ena; 6853 int i; 6854 6855 if (!ctx) 6856 return 0; 6857 6858 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1); 6859 req.enables = cpu_to_le32(enables); 6860 6861 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) { 6862 ctx_pg = &ctx->qp_mem; 6863 req.qp_num_entries = cpu_to_le32(ctx_pg->entries); 6864 req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries); 6865 req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries); 6866 req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size); 6867 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 6868 &req.qpc_pg_size_qpc_lvl, 6869 &req.qpc_page_dir); 6870 } 6871 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) { 6872 ctx_pg = &ctx->srq_mem; 6873 req.srq_num_entries = cpu_to_le32(ctx_pg->entries); 6874 req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries); 6875 req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size); 6876 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 6877 &req.srq_pg_size_srq_lvl, 6878 &req.srq_page_dir); 6879 } 6880 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) { 6881 ctx_pg = &ctx->cq_mem; 6882 req.cq_num_entries = cpu_to_le32(ctx_pg->entries); 6883 req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries); 6884 req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size); 6885 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl, 6886 &req.cq_page_dir); 6887 } 6888 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) { 6889 ctx_pg = &ctx->vnic_mem; 6890 req.vnic_num_vnic_entries = 6891 cpu_to_le16(ctx->vnic_max_vnic_entries); 6892 req.vnic_num_ring_table_entries = 6893 cpu_to_le16(ctx->vnic_max_ring_table_entries); 6894 req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size); 6895 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 6896 &req.vnic_pg_size_vnic_lvl, 6897 &req.vnic_page_dir); 6898 } 6899 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) { 6900 ctx_pg = &ctx->stat_mem; 6901 req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries); 6902 req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size); 6903 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 6904 &req.stat_pg_size_stat_lvl, 6905 &req.stat_page_dir); 6906 } 6907 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) { 6908 ctx_pg = &ctx->mrav_mem; 6909 req.mrav_num_entries = cpu_to_le32(ctx_pg->entries); 6910 if (ctx->mrav_num_entries_units) 6911 flags |= 6912 FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT; 6913 req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size); 6914 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 6915 &req.mrav_pg_size_mrav_lvl, 6916 &req.mrav_page_dir); 6917 } 6918 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) { 6919 ctx_pg = &ctx->tim_mem; 6920 req.tim_num_entries = cpu_to_le32(ctx_pg->entries); 6921 req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size); 6922 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 6923 &req.tim_pg_size_tim_lvl, 6924 &req.tim_page_dir); 6925 } 6926 for (i = 0, num_entries = &req.tqm_sp_num_entries, 6927 pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl, 6928 pg_dir = &req.tqm_sp_page_dir, 6929 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP; 6930 i < BNXT_MAX_TQM_RINGS; 6931 i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) { 6932 if (!(enables & ena)) 6933 continue; 6934 6935 req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size); 6936 ctx_pg = ctx->tqm_mem[i]; 6937 *num_entries = cpu_to_le32(ctx_pg->entries); 6938 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir); 6939 } 6940 req.flags = cpu_to_le32(flags); 6941 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6942 } 6943 6944 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, 6945 struct bnxt_ctx_pg_info *ctx_pg) 6946 { 6947 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 6948 6949 rmem->page_size = BNXT_PAGE_SIZE; 6950 rmem->pg_arr = ctx_pg->ctx_pg_arr; 6951 rmem->dma_arr = ctx_pg->ctx_dma_arr; 6952 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; 6953 if (rmem->depth >= 1) 6954 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG; 6955 return bnxt_alloc_ring(bp, rmem); 6956 } 6957 6958 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, 6959 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size, 6960 u8 depth, bool use_init_val) 6961 { 6962 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 6963 int rc; 6964 6965 if (!mem_size) 6966 return -EINVAL; 6967 6968 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); 6969 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) { 6970 ctx_pg->nr_pages = 0; 6971 return -EINVAL; 6972 } 6973 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) { 6974 int nr_tbls, i; 6975 6976 rmem->depth = 2; 6977 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg), 6978 GFP_KERNEL); 6979 if (!ctx_pg->ctx_pg_tbl) 6980 return -ENOMEM; 6981 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES); 6982 rmem->nr_pages = nr_tbls; 6983 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); 6984 if (rc) 6985 return rc; 6986 for (i = 0; i < nr_tbls; i++) { 6987 struct bnxt_ctx_pg_info *pg_tbl; 6988 6989 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL); 6990 if (!pg_tbl) 6991 return -ENOMEM; 6992 ctx_pg->ctx_pg_tbl[i] = pg_tbl; 6993 rmem = &pg_tbl->ring_mem; 6994 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i]; 6995 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i]; 6996 rmem->depth = 1; 6997 rmem->nr_pages = MAX_CTX_PAGES; 6998 if (use_init_val) 6999 rmem->init_val = bp->ctx->ctx_kind_initializer; 7000 if (i == (nr_tbls - 1)) { 7001 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES; 7002 7003 if (rem) 7004 rmem->nr_pages = rem; 7005 } 7006 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl); 7007 if (rc) 7008 break; 7009 } 7010 } else { 7011 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); 7012 if (rmem->nr_pages > 1 || depth) 7013 rmem->depth = 1; 7014 if (use_init_val) 7015 rmem->init_val = bp->ctx->ctx_kind_initializer; 7016 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); 7017 } 7018 return rc; 7019 } 7020 7021 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp, 7022 struct bnxt_ctx_pg_info *ctx_pg) 7023 { 7024 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 7025 7026 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES || 7027 ctx_pg->ctx_pg_tbl) { 7028 int i, nr_tbls = rmem->nr_pages; 7029 7030 for (i = 0; i < nr_tbls; i++) { 7031 struct bnxt_ctx_pg_info *pg_tbl; 7032 struct bnxt_ring_mem_info *rmem2; 7033 7034 pg_tbl = ctx_pg->ctx_pg_tbl[i]; 7035 if (!pg_tbl) 7036 continue; 7037 rmem2 = &pg_tbl->ring_mem; 7038 bnxt_free_ring(bp, rmem2); 7039 ctx_pg->ctx_pg_arr[i] = NULL; 7040 kfree(pg_tbl); 7041 ctx_pg->ctx_pg_tbl[i] = NULL; 7042 } 7043 kfree(ctx_pg->ctx_pg_tbl); 7044 ctx_pg->ctx_pg_tbl = NULL; 7045 } 7046 bnxt_free_ring(bp, rmem); 7047 ctx_pg->nr_pages = 0; 7048 } 7049 7050 static void bnxt_free_ctx_mem(struct bnxt *bp) 7051 { 7052 struct bnxt_ctx_mem_info *ctx = bp->ctx; 7053 int i; 7054 7055 if (!ctx) 7056 return; 7057 7058 if (ctx->tqm_mem[0]) { 7059 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) 7060 bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]); 7061 kfree(ctx->tqm_mem[0]); 7062 ctx->tqm_mem[0] = NULL; 7063 } 7064 7065 bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem); 7066 bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem); 7067 bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem); 7068 bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem); 7069 bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem); 7070 bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem); 7071 bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem); 7072 ctx->flags &= ~BNXT_CTX_FLAG_INITED; 7073 } 7074 7075 static int bnxt_alloc_ctx_mem(struct bnxt *bp) 7076 { 7077 struct bnxt_ctx_pg_info *ctx_pg; 7078 struct bnxt_ctx_mem_info *ctx; 7079 u32 mem_size, ena, entries; 7080 u32 entries_sp, min; 7081 u32 num_mr, num_ah; 7082 u32 extra_srqs = 0; 7083 u32 extra_qps = 0; 7084 u8 pg_lvl = 1; 7085 int i, rc; 7086 7087 rc = bnxt_hwrm_func_backing_store_qcaps(bp); 7088 if (rc) { 7089 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n", 7090 rc); 7091 return rc; 7092 } 7093 ctx = bp->ctx; 7094 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) 7095 return 0; 7096 7097 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) { 7098 pg_lvl = 2; 7099 extra_qps = 65536; 7100 extra_srqs = 8192; 7101 } 7102 7103 ctx_pg = &ctx->qp_mem; 7104 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries + 7105 extra_qps; 7106 mem_size = ctx->qp_entry_size * ctx_pg->entries; 7107 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true); 7108 if (rc) 7109 return rc; 7110 7111 ctx_pg = &ctx->srq_mem; 7112 ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs; 7113 mem_size = ctx->srq_entry_size * ctx_pg->entries; 7114 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true); 7115 if (rc) 7116 return rc; 7117 7118 ctx_pg = &ctx->cq_mem; 7119 ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2; 7120 mem_size = ctx->cq_entry_size * ctx_pg->entries; 7121 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true); 7122 if (rc) 7123 return rc; 7124 7125 ctx_pg = &ctx->vnic_mem; 7126 ctx_pg->entries = ctx->vnic_max_vnic_entries + 7127 ctx->vnic_max_ring_table_entries; 7128 mem_size = ctx->vnic_entry_size * ctx_pg->entries; 7129 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true); 7130 if (rc) 7131 return rc; 7132 7133 ctx_pg = &ctx->stat_mem; 7134 ctx_pg->entries = ctx->stat_max_entries; 7135 mem_size = ctx->stat_entry_size * ctx_pg->entries; 7136 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true); 7137 if (rc) 7138 return rc; 7139 7140 ena = 0; 7141 if (!(bp->flags & BNXT_FLAG_ROCE_CAP)) 7142 goto skip_rdma; 7143 7144 ctx_pg = &ctx->mrav_mem; 7145 /* 128K extra is needed to accommodate static AH context 7146 * allocation by f/w. 7147 */ 7148 num_mr = 1024 * 256; 7149 num_ah = 1024 * 128; 7150 ctx_pg->entries = num_mr + num_ah; 7151 mem_size = ctx->mrav_entry_size * ctx_pg->entries; 7152 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, true); 7153 if (rc) 7154 return rc; 7155 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV; 7156 if (ctx->mrav_num_entries_units) 7157 ctx_pg->entries = 7158 ((num_mr / ctx->mrav_num_entries_units) << 16) | 7159 (num_ah / ctx->mrav_num_entries_units); 7160 7161 ctx_pg = &ctx->tim_mem; 7162 ctx_pg->entries = ctx->qp_mem.entries; 7163 mem_size = ctx->tim_entry_size * ctx_pg->entries; 7164 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false); 7165 if (rc) 7166 return rc; 7167 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM; 7168 7169 skip_rdma: 7170 min = ctx->tqm_min_entries_per_ring; 7171 entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries + 7172 2 * (extra_qps + ctx->qp_min_qp1_entries) + min; 7173 entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple); 7174 entries = ctx->qp_max_l2_entries + extra_qps + ctx->qp_min_qp1_entries; 7175 entries = roundup(entries, ctx->tqm_entries_multiple); 7176 entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring); 7177 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) { 7178 ctx_pg = ctx->tqm_mem[i]; 7179 ctx_pg->entries = i ? entries : entries_sp; 7180 mem_size = ctx->tqm_entry_size * ctx_pg->entries; 7181 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false); 7182 if (rc) 7183 return rc; 7184 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i; 7185 } 7186 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES; 7187 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena); 7188 if (rc) { 7189 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n", 7190 rc); 7191 return rc; 7192 } 7193 ctx->flags |= BNXT_CTX_FLAG_INITED; 7194 return 0; 7195 } 7196 7197 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all) 7198 { 7199 struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 7200 struct hwrm_func_resource_qcaps_input req = {0}; 7201 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 7202 int rc; 7203 7204 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1); 7205 req.fid = cpu_to_le16(0xffff); 7206 7207 mutex_lock(&bp->hwrm_cmd_lock); 7208 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), 7209 HWRM_CMD_TIMEOUT); 7210 if (rc) 7211 goto hwrm_func_resc_qcaps_exit; 7212 7213 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs); 7214 if (!all) 7215 goto hwrm_func_resc_qcaps_exit; 7216 7217 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx); 7218 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 7219 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings); 7220 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 7221 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings); 7222 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 7223 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings); 7224 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings); 7225 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps); 7226 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps); 7227 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs); 7228 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); 7229 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics); 7230 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics); 7231 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx); 7232 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 7233 7234 if (bp->flags & BNXT_FLAG_CHIP_P5) { 7235 u16 max_msix = le16_to_cpu(resp->max_msix); 7236 7237 hw_resc->max_nqs = max_msix; 7238 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings; 7239 } 7240 7241 if (BNXT_PF(bp)) { 7242 struct bnxt_pf_info *pf = &bp->pf; 7243 7244 pf->vf_resv_strategy = 7245 le16_to_cpu(resp->vf_reservation_strategy); 7246 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) 7247 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL; 7248 } 7249 hwrm_func_resc_qcaps_exit: 7250 mutex_unlock(&bp->hwrm_cmd_lock); 7251 return rc; 7252 } 7253 7254 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) 7255 { 7256 int rc = 0; 7257 struct hwrm_func_qcaps_input req = {0}; 7258 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 7259 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 7260 u32 flags, flags_ext; 7261 7262 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); 7263 req.fid = cpu_to_le16(0xffff); 7264 7265 mutex_lock(&bp->hwrm_cmd_lock); 7266 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7267 if (rc) 7268 goto hwrm_func_qcaps_exit; 7269 7270 flags = le32_to_cpu(resp->flags); 7271 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED) 7272 bp->flags |= BNXT_FLAG_ROCEV1_CAP; 7273 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED) 7274 bp->flags |= BNXT_FLAG_ROCEV2_CAP; 7275 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED) 7276 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED; 7277 if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE) 7278 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET; 7279 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED) 7280 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED; 7281 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE) 7282 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY; 7283 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD) 7284 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD; 7285 if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED)) 7286 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT; 7287 7288 flags_ext = le32_to_cpu(resp->flags_ext); 7289 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED) 7290 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED; 7291 7292 bp->tx_push_thresh = 0; 7293 if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) && 7294 BNXT_FW_MAJ(bp) > 217) 7295 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH; 7296 7297 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 7298 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 7299 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 7300 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings); 7301 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps); 7302 if (!hw_resc->max_hw_ring_grps) 7303 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings; 7304 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); 7305 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics); 7306 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 7307 7308 if (BNXT_PF(bp)) { 7309 struct bnxt_pf_info *pf = &bp->pf; 7310 7311 pf->fw_fid = le16_to_cpu(resp->fid); 7312 pf->port_id = le16_to_cpu(resp->port_id); 7313 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); 7314 pf->first_vf_id = le16_to_cpu(resp->first_vf_id); 7315 pf->max_vfs = le16_to_cpu(resp->max_vfs); 7316 pf->max_encap_records = le32_to_cpu(resp->max_encap_records); 7317 pf->max_decap_records = le32_to_cpu(resp->max_decap_records); 7318 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows); 7319 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows); 7320 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows); 7321 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows); 7322 bp->flags &= ~BNXT_FLAG_WOL_CAP; 7323 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED) 7324 bp->flags |= BNXT_FLAG_WOL_CAP; 7325 } else { 7326 #ifdef CONFIG_BNXT_SRIOV 7327 struct bnxt_vf_info *vf = &bp->vf; 7328 7329 vf->fw_fid = le16_to_cpu(resp->fid); 7330 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN); 7331 #endif 7332 } 7333 7334 hwrm_func_qcaps_exit: 7335 mutex_unlock(&bp->hwrm_cmd_lock); 7336 return rc; 7337 } 7338 7339 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp); 7340 7341 static int bnxt_hwrm_func_qcaps(struct bnxt *bp) 7342 { 7343 int rc; 7344 7345 rc = __bnxt_hwrm_func_qcaps(bp); 7346 if (rc) 7347 return rc; 7348 rc = bnxt_hwrm_queue_qportcfg(bp); 7349 if (rc) { 7350 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc); 7351 return rc; 7352 } 7353 if (bp->hwrm_spec_code >= 0x10803) { 7354 rc = bnxt_alloc_ctx_mem(bp); 7355 if (rc) 7356 return rc; 7357 rc = bnxt_hwrm_func_resc_qcaps(bp, true); 7358 if (!rc) 7359 bp->fw_cap |= BNXT_FW_CAP_NEW_RM; 7360 } 7361 return 0; 7362 } 7363 7364 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp) 7365 { 7366 struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0}; 7367 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp; 7368 int rc = 0; 7369 u32 flags; 7370 7371 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW)) 7372 return 0; 7373 7374 resp = bp->hwrm_cmd_resp_addr; 7375 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, -1, -1); 7376 7377 mutex_lock(&bp->hwrm_cmd_lock); 7378 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7379 if (rc) 7380 goto hwrm_cfa_adv_qcaps_exit; 7381 7382 flags = le32_to_cpu(resp->flags); 7383 if (flags & 7384 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED) 7385 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2; 7386 7387 hwrm_cfa_adv_qcaps_exit: 7388 mutex_unlock(&bp->hwrm_cmd_lock); 7389 return rc; 7390 } 7391 7392 static int __bnxt_alloc_fw_health(struct bnxt *bp) 7393 { 7394 if (bp->fw_health) 7395 return 0; 7396 7397 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL); 7398 if (!bp->fw_health) 7399 return -ENOMEM; 7400 7401 return 0; 7402 } 7403 7404 static int bnxt_alloc_fw_health(struct bnxt *bp) 7405 { 7406 int rc; 7407 7408 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) && 7409 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 7410 return 0; 7411 7412 rc = __bnxt_alloc_fw_health(bp); 7413 if (rc) { 7414 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET; 7415 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 7416 return rc; 7417 } 7418 7419 return 0; 7420 } 7421 7422 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg) 7423 { 7424 writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 + 7425 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 7426 BNXT_FW_HEALTH_WIN_MAP_OFF); 7427 } 7428 7429 static void bnxt_try_map_fw_health_reg(struct bnxt *bp) 7430 { 7431 void __iomem *hs; 7432 u32 status_loc; 7433 u32 reg_type; 7434 u32 sig; 7435 7436 __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC); 7437 hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC); 7438 7439 sig = readl(hs + offsetof(struct hcomm_status, sig_ver)); 7440 if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) { 7441 if (bp->fw_health) 7442 bp->fw_health->status_reliable = false; 7443 return; 7444 } 7445 7446 if (__bnxt_alloc_fw_health(bp)) { 7447 netdev_warn(bp->dev, "no memory for firmware status checks\n"); 7448 return; 7449 } 7450 7451 status_loc = readl(hs + offsetof(struct hcomm_status, fw_status_loc)); 7452 bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc; 7453 reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc); 7454 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) { 7455 __bnxt_map_fw_health_reg(bp, status_loc); 7456 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] = 7457 BNXT_FW_HEALTH_WIN_OFF(status_loc); 7458 } 7459 7460 bp->fw_health->status_reliable = true; 7461 } 7462 7463 static int bnxt_map_fw_health_regs(struct bnxt *bp) 7464 { 7465 struct bnxt_fw_health *fw_health = bp->fw_health; 7466 u32 reg_base = 0xffffffff; 7467 int i; 7468 7469 /* Only pre-map the monitoring GRC registers using window 3 */ 7470 for (i = 0; i < 4; i++) { 7471 u32 reg = fw_health->regs[i]; 7472 7473 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC) 7474 continue; 7475 if (reg_base == 0xffffffff) 7476 reg_base = reg & BNXT_GRC_BASE_MASK; 7477 if ((reg & BNXT_GRC_BASE_MASK) != reg_base) 7478 return -ERANGE; 7479 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg); 7480 } 7481 if (reg_base == 0xffffffff) 7482 return 0; 7483 7484 __bnxt_map_fw_health_reg(bp, reg_base); 7485 return 0; 7486 } 7487 7488 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) 7489 { 7490 struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 7491 struct bnxt_fw_health *fw_health = bp->fw_health; 7492 struct hwrm_error_recovery_qcfg_input req = {0}; 7493 int rc, i; 7494 7495 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 7496 return 0; 7497 7498 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG, -1, -1); 7499 mutex_lock(&bp->hwrm_cmd_lock); 7500 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7501 if (rc) 7502 goto err_recovery_out; 7503 fw_health->flags = le32_to_cpu(resp->flags); 7504 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) && 7505 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) { 7506 rc = -EINVAL; 7507 goto err_recovery_out; 7508 } 7509 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq); 7510 fw_health->master_func_wait_dsecs = 7511 le32_to_cpu(resp->master_func_wait_period); 7512 fw_health->normal_func_wait_dsecs = 7513 le32_to_cpu(resp->normal_func_wait_period); 7514 fw_health->post_reset_wait_dsecs = 7515 le32_to_cpu(resp->master_func_wait_period_after_reset); 7516 fw_health->post_reset_max_wait_dsecs = 7517 le32_to_cpu(resp->max_bailout_time_after_reset); 7518 fw_health->regs[BNXT_FW_HEALTH_REG] = 7519 le32_to_cpu(resp->fw_health_status_reg); 7520 fw_health->regs[BNXT_FW_HEARTBEAT_REG] = 7521 le32_to_cpu(resp->fw_heartbeat_reg); 7522 fw_health->regs[BNXT_FW_RESET_CNT_REG] = 7523 le32_to_cpu(resp->fw_reset_cnt_reg); 7524 fw_health->regs[BNXT_FW_RESET_INPROG_REG] = 7525 le32_to_cpu(resp->reset_inprogress_reg); 7526 fw_health->fw_reset_inprog_reg_mask = 7527 le32_to_cpu(resp->reset_inprogress_reg_mask); 7528 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt; 7529 if (fw_health->fw_reset_seq_cnt >= 16) { 7530 rc = -EINVAL; 7531 goto err_recovery_out; 7532 } 7533 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) { 7534 fw_health->fw_reset_seq_regs[i] = 7535 le32_to_cpu(resp->reset_reg[i]); 7536 fw_health->fw_reset_seq_vals[i] = 7537 le32_to_cpu(resp->reset_reg_val[i]); 7538 fw_health->fw_reset_seq_delay_msec[i] = 7539 resp->delay_after_reset[i]; 7540 } 7541 err_recovery_out: 7542 mutex_unlock(&bp->hwrm_cmd_lock); 7543 if (!rc) 7544 rc = bnxt_map_fw_health_regs(bp); 7545 if (rc) 7546 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 7547 return rc; 7548 } 7549 7550 static int bnxt_hwrm_func_reset(struct bnxt *bp) 7551 { 7552 struct hwrm_func_reset_input req = {0}; 7553 7554 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1); 7555 req.enables = 0; 7556 7557 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT); 7558 } 7559 7560 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp) 7561 { 7562 struct hwrm_nvm_get_dev_info_output nvm_info; 7563 7564 if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info)) 7565 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d", 7566 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min, 7567 nvm_info.nvm_cfg_ver_upd); 7568 } 7569 7570 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) 7571 { 7572 int rc = 0; 7573 struct hwrm_queue_qportcfg_input req = {0}; 7574 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr; 7575 u8 i, j, *qptr; 7576 bool no_rdma; 7577 7578 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1); 7579 7580 mutex_lock(&bp->hwrm_cmd_lock); 7581 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7582 if (rc) 7583 goto qportcfg_exit; 7584 7585 if (!resp->max_configurable_queues) { 7586 rc = -EINVAL; 7587 goto qportcfg_exit; 7588 } 7589 bp->max_tc = resp->max_configurable_queues; 7590 bp->max_lltc = resp->max_configurable_lossless_queues; 7591 if (bp->max_tc > BNXT_MAX_QUEUE) 7592 bp->max_tc = BNXT_MAX_QUEUE; 7593 7594 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP); 7595 qptr = &resp->queue_id0; 7596 for (i = 0, j = 0; i < bp->max_tc; i++) { 7597 bp->q_info[j].queue_id = *qptr; 7598 bp->q_ids[i] = *qptr++; 7599 bp->q_info[j].queue_profile = *qptr++; 7600 bp->tc_to_qidx[j] = j; 7601 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) || 7602 (no_rdma && BNXT_PF(bp))) 7603 j++; 7604 } 7605 bp->max_q = bp->max_tc; 7606 bp->max_tc = max_t(u8, j, 1); 7607 7608 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG) 7609 bp->max_tc = 1; 7610 7611 if (bp->max_lltc > bp->max_tc) 7612 bp->max_lltc = bp->max_tc; 7613 7614 qportcfg_exit: 7615 mutex_unlock(&bp->hwrm_cmd_lock); 7616 return rc; 7617 } 7618 7619 static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent) 7620 { 7621 struct hwrm_ver_get_input req = {0}; 7622 int rc; 7623 7624 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1); 7625 req.hwrm_intf_maj = HWRM_VERSION_MAJOR; 7626 req.hwrm_intf_min = HWRM_VERSION_MINOR; 7627 req.hwrm_intf_upd = HWRM_VERSION_UPDATE; 7628 7629 rc = bnxt_hwrm_do_send_msg(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT, 7630 silent); 7631 return rc; 7632 } 7633 7634 static int bnxt_hwrm_ver_get(struct bnxt *bp) 7635 { 7636 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr; 7637 u16 fw_maj, fw_min, fw_bld, fw_rsv; 7638 u32 dev_caps_cfg, hwrm_ver; 7639 int rc, len; 7640 7641 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; 7642 mutex_lock(&bp->hwrm_cmd_lock); 7643 rc = __bnxt_hwrm_ver_get(bp, false); 7644 if (rc) 7645 goto hwrm_ver_get_exit; 7646 7647 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output)); 7648 7649 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 | 7650 resp->hwrm_intf_min_8b << 8 | 7651 resp->hwrm_intf_upd_8b; 7652 if (resp->hwrm_intf_maj_8b < 1) { 7653 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n", 7654 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, 7655 resp->hwrm_intf_upd_8b); 7656 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n"); 7657 } 7658 7659 hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 | 7660 HWRM_VERSION_UPDATE; 7661 7662 if (bp->hwrm_spec_code > hwrm_ver) 7663 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d", 7664 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, 7665 HWRM_VERSION_UPDATE); 7666 else 7667 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d", 7668 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, 7669 resp->hwrm_intf_upd_8b); 7670 7671 fw_maj = le16_to_cpu(resp->hwrm_fw_major); 7672 if (bp->hwrm_spec_code > 0x10803 && fw_maj) { 7673 fw_min = le16_to_cpu(resp->hwrm_fw_minor); 7674 fw_bld = le16_to_cpu(resp->hwrm_fw_build); 7675 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch); 7676 len = FW_VER_STR_LEN; 7677 } else { 7678 fw_maj = resp->hwrm_fw_maj_8b; 7679 fw_min = resp->hwrm_fw_min_8b; 7680 fw_bld = resp->hwrm_fw_bld_8b; 7681 fw_rsv = resp->hwrm_fw_rsvd_8b; 7682 len = BC_HWRM_STR_LEN; 7683 } 7684 bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv); 7685 snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld, 7686 fw_rsv); 7687 7688 if (strlen(resp->active_pkg_name)) { 7689 int fw_ver_len = strlen(bp->fw_ver_str); 7690 7691 snprintf(bp->fw_ver_str + fw_ver_len, 7692 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s", 7693 resp->active_pkg_name); 7694 bp->fw_cap |= BNXT_FW_CAP_PKG_VER; 7695 } 7696 7697 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout); 7698 if (!bp->hwrm_cmd_timeout) 7699 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; 7700 7701 if (resp->hwrm_intf_maj_8b >= 1) { 7702 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len); 7703 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len); 7704 } 7705 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN) 7706 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN; 7707 7708 bp->chip_num = le16_to_cpu(resp->chip_num); 7709 bp->chip_rev = resp->chip_rev; 7710 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev && 7711 !resp->chip_metal) 7712 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0; 7713 7714 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg); 7715 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) && 7716 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) 7717 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD; 7718 7719 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) 7720 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL; 7721 7722 if (dev_caps_cfg & 7723 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED) 7724 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE; 7725 7726 if (dev_caps_cfg & 7727 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED) 7728 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF; 7729 7730 if (dev_caps_cfg & 7731 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) 7732 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW; 7733 7734 hwrm_ver_get_exit: 7735 mutex_unlock(&bp->hwrm_cmd_lock); 7736 return rc; 7737 } 7738 7739 int bnxt_hwrm_fw_set_time(struct bnxt *bp) 7740 { 7741 struct hwrm_fw_set_time_input req = {0}; 7742 struct tm tm; 7743 time64_t now = ktime_get_real_seconds(); 7744 7745 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) || 7746 bp->hwrm_spec_code < 0x10400) 7747 return -EOPNOTSUPP; 7748 7749 time64_to_tm(now, 0, &tm); 7750 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1); 7751 req.year = cpu_to_le16(1900 + tm.tm_year); 7752 req.month = 1 + tm.tm_mon; 7753 req.day = tm.tm_mday; 7754 req.hour = tm.tm_hour; 7755 req.minute = tm.tm_min; 7756 req.second = tm.tm_sec; 7757 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7758 } 7759 7760 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask) 7761 { 7762 u64 sw_tmp; 7763 7764 hw &= mask; 7765 sw_tmp = (*sw & ~mask) | hw; 7766 if (hw < (*sw & mask)) 7767 sw_tmp += mask + 1; 7768 WRITE_ONCE(*sw, sw_tmp); 7769 } 7770 7771 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks, 7772 int count, bool ignore_zero) 7773 { 7774 int i; 7775 7776 for (i = 0; i < count; i++) { 7777 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i])); 7778 7779 if (ignore_zero && !hw) 7780 continue; 7781 7782 if (masks[i] == -1ULL) 7783 sw_stats[i] = hw; 7784 else 7785 bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]); 7786 } 7787 } 7788 7789 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats) 7790 { 7791 if (!stats->hw_stats) 7792 return; 7793 7794 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats, 7795 stats->hw_masks, stats->len / 8, false); 7796 } 7797 7798 static void bnxt_accumulate_all_stats(struct bnxt *bp) 7799 { 7800 struct bnxt_stats_mem *ring0_stats; 7801 bool ignore_zero = false; 7802 int i; 7803 7804 /* Chip bug. Counter intermittently becomes 0. */ 7805 if (bp->flags & BNXT_FLAG_CHIP_P5) 7806 ignore_zero = true; 7807 7808 for (i = 0; i < bp->cp_nr_rings; i++) { 7809 struct bnxt_napi *bnapi = bp->bnapi[i]; 7810 struct bnxt_cp_ring_info *cpr; 7811 struct bnxt_stats_mem *stats; 7812 7813 cpr = &bnapi->cp_ring; 7814 stats = &cpr->stats; 7815 if (!i) 7816 ring0_stats = stats; 7817 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats, 7818 ring0_stats->hw_masks, 7819 ring0_stats->len / 8, ignore_zero); 7820 } 7821 if (bp->flags & BNXT_FLAG_PORT_STATS) { 7822 struct bnxt_stats_mem *stats = &bp->port_stats; 7823 __le64 *hw_stats = stats->hw_stats; 7824 u64 *sw_stats = stats->sw_stats; 7825 u64 *masks = stats->hw_masks; 7826 int cnt; 7827 7828 cnt = sizeof(struct rx_port_stats) / 8; 7829 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false); 7830 7831 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 7832 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 7833 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 7834 cnt = sizeof(struct tx_port_stats) / 8; 7835 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false); 7836 } 7837 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 7838 bnxt_accumulate_stats(&bp->rx_port_stats_ext); 7839 bnxt_accumulate_stats(&bp->tx_port_stats_ext); 7840 } 7841 } 7842 7843 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags) 7844 { 7845 struct bnxt_pf_info *pf = &bp->pf; 7846 struct hwrm_port_qstats_input req = {0}; 7847 7848 if (!(bp->flags & BNXT_FLAG_PORT_STATS)) 7849 return 0; 7850 7851 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) 7852 return -EOPNOTSUPP; 7853 7854 req.flags = flags; 7855 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1); 7856 req.port_id = cpu_to_le16(pf->port_id); 7857 req.tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map + 7858 BNXT_TX_PORT_STATS_BYTE_OFFSET); 7859 req.rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map); 7860 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7861 } 7862 7863 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags) 7864 { 7865 struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr; 7866 struct hwrm_queue_pri2cos_qcfg_input req2 = {0}; 7867 struct hwrm_port_qstats_ext_input req = {0}; 7868 struct bnxt_pf_info *pf = &bp->pf; 7869 u32 tx_stat_size; 7870 int rc; 7871 7872 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) 7873 return 0; 7874 7875 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) 7876 return -EOPNOTSUPP; 7877 7878 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1); 7879 req.flags = flags; 7880 req.port_id = cpu_to_le16(pf->port_id); 7881 req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext)); 7882 req.rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map); 7883 tx_stat_size = bp->tx_port_stats_ext.hw_stats ? 7884 sizeof(struct tx_port_stats_ext) : 0; 7885 req.tx_stat_size = cpu_to_le16(tx_stat_size); 7886 req.tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map); 7887 mutex_lock(&bp->hwrm_cmd_lock); 7888 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7889 if (!rc) { 7890 bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8; 7891 bp->fw_tx_stats_ext_size = tx_stat_size ? 7892 le16_to_cpu(resp->tx_stat_size) / 8 : 0; 7893 } else { 7894 bp->fw_rx_stats_ext_size = 0; 7895 bp->fw_tx_stats_ext_size = 0; 7896 } 7897 if (flags) 7898 goto qstats_done; 7899 7900 if (bp->fw_tx_stats_ext_size <= 7901 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) { 7902 mutex_unlock(&bp->hwrm_cmd_lock); 7903 bp->pri2cos_valid = 0; 7904 return rc; 7905 } 7906 7907 bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1); 7908 req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); 7909 7910 rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT); 7911 if (!rc) { 7912 struct hwrm_queue_pri2cos_qcfg_output *resp2; 7913 u8 *pri2cos; 7914 int i, j; 7915 7916 resp2 = bp->hwrm_cmd_resp_addr; 7917 pri2cos = &resp2->pri0_cos_queue_id; 7918 for (i = 0; i < 8; i++) { 7919 u8 queue_id = pri2cos[i]; 7920 u8 queue_idx; 7921 7922 /* Per port queue IDs start from 0, 10, 20, etc */ 7923 queue_idx = queue_id % 10; 7924 if (queue_idx > BNXT_MAX_QUEUE) { 7925 bp->pri2cos_valid = false; 7926 goto qstats_done; 7927 } 7928 for (j = 0; j < bp->max_q; j++) { 7929 if (bp->q_ids[j] == queue_id) 7930 bp->pri2cos_idx[i] = queue_idx; 7931 } 7932 } 7933 bp->pri2cos_valid = 1; 7934 } 7935 qstats_done: 7936 mutex_unlock(&bp->hwrm_cmd_lock); 7937 return rc; 7938 } 7939 7940 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) 7941 { 7942 if (bp->vxlan_fw_dst_port_id != INVALID_HW_RING_ID) 7943 bnxt_hwrm_tunnel_dst_port_free( 7944 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 7945 if (bp->nge_fw_dst_port_id != INVALID_HW_RING_ID) 7946 bnxt_hwrm_tunnel_dst_port_free( 7947 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 7948 } 7949 7950 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa) 7951 { 7952 int rc, i; 7953 u32 tpa_flags = 0; 7954 7955 if (set_tpa) 7956 tpa_flags = bp->flags & BNXT_FLAG_TPA; 7957 else if (BNXT_NO_FW_ACCESS(bp)) 7958 return 0; 7959 for (i = 0; i < bp->nr_vnics; i++) { 7960 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags); 7961 if (rc) { 7962 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n", 7963 i, rc); 7964 return rc; 7965 } 7966 } 7967 return 0; 7968 } 7969 7970 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp) 7971 { 7972 int i; 7973 7974 for (i = 0; i < bp->nr_vnics; i++) 7975 bnxt_hwrm_vnic_set_rss(bp, i, false); 7976 } 7977 7978 static void bnxt_clear_vnic(struct bnxt *bp) 7979 { 7980 if (!bp->vnic_info) 7981 return; 7982 7983 bnxt_hwrm_clear_vnic_filter(bp); 7984 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) { 7985 /* clear all RSS setting before free vnic ctx */ 7986 bnxt_hwrm_clear_vnic_rss(bp); 7987 bnxt_hwrm_vnic_ctx_free(bp); 7988 } 7989 /* before free the vnic, undo the vnic tpa settings */ 7990 if (bp->flags & BNXT_FLAG_TPA) 7991 bnxt_set_tpa(bp, false); 7992 bnxt_hwrm_vnic_free(bp); 7993 if (bp->flags & BNXT_FLAG_CHIP_P5) 7994 bnxt_hwrm_vnic_ctx_free(bp); 7995 } 7996 7997 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, 7998 bool irq_re_init) 7999 { 8000 bnxt_clear_vnic(bp); 8001 bnxt_hwrm_ring_free(bp, close_path); 8002 bnxt_hwrm_ring_grp_free(bp); 8003 if (irq_re_init) { 8004 bnxt_hwrm_stat_ctx_free(bp); 8005 bnxt_hwrm_free_tunnel_ports(bp); 8006 } 8007 } 8008 8009 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode) 8010 { 8011 struct hwrm_func_cfg_input req = {0}; 8012 8013 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 8014 req.fid = cpu_to_le16(0xffff); 8015 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE); 8016 if (br_mode == BRIDGE_MODE_VEB) 8017 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB; 8018 else if (br_mode == BRIDGE_MODE_VEPA) 8019 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA; 8020 else 8021 return -EINVAL; 8022 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8023 } 8024 8025 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size) 8026 { 8027 struct hwrm_func_cfg_input req = {0}; 8028 8029 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803) 8030 return 0; 8031 8032 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 8033 req.fid = cpu_to_le16(0xffff); 8034 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE); 8035 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64; 8036 if (size == 128) 8037 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128; 8038 8039 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8040 } 8041 8042 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) 8043 { 8044 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 8045 int rc; 8046 8047 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) 8048 goto skip_rss_ctx; 8049 8050 /* allocate context for vnic */ 8051 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0); 8052 if (rc) { 8053 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 8054 vnic_id, rc); 8055 goto vnic_setup_err; 8056 } 8057 bp->rsscos_nr_ctxs++; 8058 8059 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 8060 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1); 8061 if (rc) { 8062 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n", 8063 vnic_id, rc); 8064 goto vnic_setup_err; 8065 } 8066 bp->rsscos_nr_ctxs++; 8067 } 8068 8069 skip_rss_ctx: 8070 /* configure default vnic, ring grp */ 8071 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); 8072 if (rc) { 8073 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", 8074 vnic_id, rc); 8075 goto vnic_setup_err; 8076 } 8077 8078 /* Enable RSS hashing on vnic */ 8079 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true); 8080 if (rc) { 8081 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n", 8082 vnic_id, rc); 8083 goto vnic_setup_err; 8084 } 8085 8086 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 8087 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id); 8088 if (rc) { 8089 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", 8090 vnic_id, rc); 8091 } 8092 } 8093 8094 vnic_setup_err: 8095 return rc; 8096 } 8097 8098 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id) 8099 { 8100 int rc, i, nr_ctxs; 8101 8102 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); 8103 for (i = 0; i < nr_ctxs; i++) { 8104 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i); 8105 if (rc) { 8106 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n", 8107 vnic_id, i, rc); 8108 break; 8109 } 8110 bp->rsscos_nr_ctxs++; 8111 } 8112 if (i < nr_ctxs) 8113 return -ENOMEM; 8114 8115 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true); 8116 if (rc) { 8117 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n", 8118 vnic_id, rc); 8119 return rc; 8120 } 8121 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); 8122 if (rc) { 8123 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", 8124 vnic_id, rc); 8125 return rc; 8126 } 8127 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 8128 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id); 8129 if (rc) { 8130 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", 8131 vnic_id, rc); 8132 } 8133 } 8134 return rc; 8135 } 8136 8137 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) 8138 { 8139 if (bp->flags & BNXT_FLAG_CHIP_P5) 8140 return __bnxt_setup_vnic_p5(bp, vnic_id); 8141 else 8142 return __bnxt_setup_vnic(bp, vnic_id); 8143 } 8144 8145 static int bnxt_alloc_rfs_vnics(struct bnxt *bp) 8146 { 8147 #ifdef CONFIG_RFS_ACCEL 8148 int i, rc = 0; 8149 8150 if (bp->flags & BNXT_FLAG_CHIP_P5) 8151 return 0; 8152 8153 for (i = 0; i < bp->rx_nr_rings; i++) { 8154 struct bnxt_vnic_info *vnic; 8155 u16 vnic_id = i + 1; 8156 u16 ring_id = i; 8157 8158 if (vnic_id >= bp->nr_vnics) 8159 break; 8160 8161 vnic = &bp->vnic_info[vnic_id]; 8162 vnic->flags |= BNXT_VNIC_RFS_FLAG; 8163 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) 8164 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG; 8165 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1); 8166 if (rc) { 8167 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 8168 vnic_id, rc); 8169 break; 8170 } 8171 rc = bnxt_setup_vnic(bp, vnic_id); 8172 if (rc) 8173 break; 8174 } 8175 return rc; 8176 #else 8177 return 0; 8178 #endif 8179 } 8180 8181 /* Allow PF and VF with default VLAN to be in promiscuous mode */ 8182 static bool bnxt_promisc_ok(struct bnxt *bp) 8183 { 8184 #ifdef CONFIG_BNXT_SRIOV 8185 if (BNXT_VF(bp) && !bp->vf.vlan) 8186 return false; 8187 #endif 8188 return true; 8189 } 8190 8191 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp) 8192 { 8193 unsigned int rc = 0; 8194 8195 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1); 8196 if (rc) { 8197 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", 8198 rc); 8199 return rc; 8200 } 8201 8202 rc = bnxt_hwrm_vnic_cfg(bp, 1); 8203 if (rc) { 8204 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", 8205 rc); 8206 return rc; 8207 } 8208 return rc; 8209 } 8210 8211 static int bnxt_cfg_rx_mode(struct bnxt *); 8212 static bool bnxt_mc_list_updated(struct bnxt *, u32 *); 8213 8214 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) 8215 { 8216 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 8217 int rc = 0; 8218 unsigned int rx_nr_rings = bp->rx_nr_rings; 8219 8220 if (irq_re_init) { 8221 rc = bnxt_hwrm_stat_ctx_alloc(bp); 8222 if (rc) { 8223 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n", 8224 rc); 8225 goto err_out; 8226 } 8227 } 8228 8229 rc = bnxt_hwrm_ring_alloc(bp); 8230 if (rc) { 8231 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc); 8232 goto err_out; 8233 } 8234 8235 rc = bnxt_hwrm_ring_grp_alloc(bp); 8236 if (rc) { 8237 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc); 8238 goto err_out; 8239 } 8240 8241 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 8242 rx_nr_rings--; 8243 8244 /* default vnic 0 */ 8245 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings); 8246 if (rc) { 8247 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc); 8248 goto err_out; 8249 } 8250 8251 rc = bnxt_setup_vnic(bp, 0); 8252 if (rc) 8253 goto err_out; 8254 8255 if (bp->flags & BNXT_FLAG_RFS) { 8256 rc = bnxt_alloc_rfs_vnics(bp); 8257 if (rc) 8258 goto err_out; 8259 } 8260 8261 if (bp->flags & BNXT_FLAG_TPA) { 8262 rc = bnxt_set_tpa(bp, true); 8263 if (rc) 8264 goto err_out; 8265 } 8266 8267 if (BNXT_VF(bp)) 8268 bnxt_update_vf_mac(bp); 8269 8270 /* Filter for default vnic 0 */ 8271 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr); 8272 if (rc) { 8273 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); 8274 goto err_out; 8275 } 8276 vnic->uc_filter_count = 1; 8277 8278 vnic->rx_mask = 0; 8279 if (bp->dev->flags & IFF_BROADCAST) 8280 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 8281 8282 if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp)) 8283 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 8284 8285 if (bp->dev->flags & IFF_ALLMULTI) { 8286 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 8287 vnic->mc_list_count = 0; 8288 } else { 8289 u32 mask = 0; 8290 8291 bnxt_mc_list_updated(bp, &mask); 8292 vnic->rx_mask |= mask; 8293 } 8294 8295 rc = bnxt_cfg_rx_mode(bp); 8296 if (rc) 8297 goto err_out; 8298 8299 rc = bnxt_hwrm_set_coal(bp); 8300 if (rc) 8301 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n", 8302 rc); 8303 8304 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 8305 rc = bnxt_setup_nitroa0_vnic(bp); 8306 if (rc) 8307 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n", 8308 rc); 8309 } 8310 8311 if (BNXT_VF(bp)) { 8312 bnxt_hwrm_func_qcfg(bp); 8313 netdev_update_features(bp->dev); 8314 } 8315 8316 return 0; 8317 8318 err_out: 8319 bnxt_hwrm_resource_free(bp, 0, true); 8320 8321 return rc; 8322 } 8323 8324 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init) 8325 { 8326 bnxt_hwrm_resource_free(bp, 1, irq_re_init); 8327 return 0; 8328 } 8329 8330 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init) 8331 { 8332 bnxt_init_cp_rings(bp); 8333 bnxt_init_rx_rings(bp); 8334 bnxt_init_tx_rings(bp); 8335 bnxt_init_ring_grps(bp, irq_re_init); 8336 bnxt_init_vnics(bp); 8337 8338 return bnxt_init_chip(bp, irq_re_init); 8339 } 8340 8341 static int bnxt_set_real_num_queues(struct bnxt *bp) 8342 { 8343 int rc; 8344 struct net_device *dev = bp->dev; 8345 8346 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings - 8347 bp->tx_nr_rings_xdp); 8348 if (rc) 8349 return rc; 8350 8351 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings); 8352 if (rc) 8353 return rc; 8354 8355 #ifdef CONFIG_RFS_ACCEL 8356 if (bp->flags & BNXT_FLAG_RFS) 8357 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings); 8358 #endif 8359 8360 return rc; 8361 } 8362 8363 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 8364 bool shared) 8365 { 8366 int _rx = *rx, _tx = *tx; 8367 8368 if (shared) { 8369 *rx = min_t(int, _rx, max); 8370 *tx = min_t(int, _tx, max); 8371 } else { 8372 if (max < 2) 8373 return -ENOMEM; 8374 8375 while (_rx + _tx > max) { 8376 if (_rx > _tx && _rx > 1) 8377 _rx--; 8378 else if (_tx > 1) 8379 _tx--; 8380 } 8381 *rx = _rx; 8382 *tx = _tx; 8383 } 8384 return 0; 8385 } 8386 8387 static void bnxt_setup_msix(struct bnxt *bp) 8388 { 8389 const int len = sizeof(bp->irq_tbl[0].name); 8390 struct net_device *dev = bp->dev; 8391 int tcs, i; 8392 8393 tcs = netdev_get_num_tc(dev); 8394 if (tcs) { 8395 int i, off, count; 8396 8397 for (i = 0; i < tcs; i++) { 8398 count = bp->tx_nr_rings_per_tc; 8399 off = i * count; 8400 netdev_set_tc_queue(dev, i, count, off); 8401 } 8402 } 8403 8404 for (i = 0; i < bp->cp_nr_rings; i++) { 8405 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 8406 char *attr; 8407 8408 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 8409 attr = "TxRx"; 8410 else if (i < bp->rx_nr_rings) 8411 attr = "rx"; 8412 else 8413 attr = "tx"; 8414 8415 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name, 8416 attr, i); 8417 bp->irq_tbl[map_idx].handler = bnxt_msix; 8418 } 8419 } 8420 8421 static void bnxt_setup_inta(struct bnxt *bp) 8422 { 8423 const int len = sizeof(bp->irq_tbl[0].name); 8424 8425 if (netdev_get_num_tc(bp->dev)) 8426 netdev_reset_tc(bp->dev); 8427 8428 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx", 8429 0); 8430 bp->irq_tbl[0].handler = bnxt_inta; 8431 } 8432 8433 static int bnxt_setup_int_mode(struct bnxt *bp) 8434 { 8435 int rc; 8436 8437 if (bp->flags & BNXT_FLAG_USING_MSIX) 8438 bnxt_setup_msix(bp); 8439 else 8440 bnxt_setup_inta(bp); 8441 8442 rc = bnxt_set_real_num_queues(bp); 8443 return rc; 8444 } 8445 8446 #ifdef CONFIG_RFS_ACCEL 8447 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp) 8448 { 8449 return bp->hw_resc.max_rsscos_ctxs; 8450 } 8451 8452 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp) 8453 { 8454 return bp->hw_resc.max_vnics; 8455 } 8456 #endif 8457 8458 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp) 8459 { 8460 return bp->hw_resc.max_stat_ctxs; 8461 } 8462 8463 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp) 8464 { 8465 return bp->hw_resc.max_cp_rings; 8466 } 8467 8468 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp) 8469 { 8470 unsigned int cp = bp->hw_resc.max_cp_rings; 8471 8472 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 8473 cp -= bnxt_get_ulp_msix_num(bp); 8474 8475 return cp; 8476 } 8477 8478 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) 8479 { 8480 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 8481 8482 if (bp->flags & BNXT_FLAG_CHIP_P5) 8483 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs); 8484 8485 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings); 8486 } 8487 8488 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs) 8489 { 8490 bp->hw_resc.max_irqs = max_irqs; 8491 } 8492 8493 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp) 8494 { 8495 unsigned int cp; 8496 8497 cp = bnxt_get_max_func_cp_rings_for_en(bp); 8498 if (bp->flags & BNXT_FLAG_CHIP_P5) 8499 return cp - bp->rx_nr_rings - bp->tx_nr_rings; 8500 else 8501 return cp - bp->cp_nr_rings; 8502 } 8503 8504 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp) 8505 { 8506 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp); 8507 } 8508 8509 int bnxt_get_avail_msix(struct bnxt *bp, int num) 8510 { 8511 int max_cp = bnxt_get_max_func_cp_rings(bp); 8512 int max_irq = bnxt_get_max_func_irqs(bp); 8513 int total_req = bp->cp_nr_rings + num; 8514 int max_idx, avail_msix; 8515 8516 max_idx = bp->total_irqs; 8517 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 8518 max_idx = min_t(int, bp->total_irqs, max_cp); 8519 avail_msix = max_idx - bp->cp_nr_rings; 8520 if (!BNXT_NEW_RM(bp) || avail_msix >= num) 8521 return avail_msix; 8522 8523 if (max_irq < total_req) { 8524 num = max_irq - bp->cp_nr_rings; 8525 if (num <= 0) 8526 return 0; 8527 } 8528 return num; 8529 } 8530 8531 static int bnxt_get_num_msix(struct bnxt *bp) 8532 { 8533 if (!BNXT_NEW_RM(bp)) 8534 return bnxt_get_max_func_irqs(bp); 8535 8536 return bnxt_nq_rings_in_use(bp); 8537 } 8538 8539 static int bnxt_init_msix(struct bnxt *bp) 8540 { 8541 int i, total_vecs, max, rc = 0, min = 1, ulp_msix; 8542 struct msix_entry *msix_ent; 8543 8544 total_vecs = bnxt_get_num_msix(bp); 8545 max = bnxt_get_max_func_irqs(bp); 8546 if (total_vecs > max) 8547 total_vecs = max; 8548 8549 if (!total_vecs) 8550 return 0; 8551 8552 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL); 8553 if (!msix_ent) 8554 return -ENOMEM; 8555 8556 for (i = 0; i < total_vecs; i++) { 8557 msix_ent[i].entry = i; 8558 msix_ent[i].vector = 0; 8559 } 8560 8561 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) 8562 min = 2; 8563 8564 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs); 8565 ulp_msix = bnxt_get_ulp_msix_num(bp); 8566 if (total_vecs < 0 || total_vecs < ulp_msix) { 8567 rc = -ENODEV; 8568 goto msix_setup_exit; 8569 } 8570 8571 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL); 8572 if (bp->irq_tbl) { 8573 for (i = 0; i < total_vecs; i++) 8574 bp->irq_tbl[i].vector = msix_ent[i].vector; 8575 8576 bp->total_irqs = total_vecs; 8577 /* Trim rings based upon num of vectors allocated */ 8578 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings, 8579 total_vecs - ulp_msix, min == 1); 8580 if (rc) 8581 goto msix_setup_exit; 8582 8583 bp->cp_nr_rings = (min == 1) ? 8584 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : 8585 bp->tx_nr_rings + bp->rx_nr_rings; 8586 8587 } else { 8588 rc = -ENOMEM; 8589 goto msix_setup_exit; 8590 } 8591 bp->flags |= BNXT_FLAG_USING_MSIX; 8592 kfree(msix_ent); 8593 return 0; 8594 8595 msix_setup_exit: 8596 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc); 8597 kfree(bp->irq_tbl); 8598 bp->irq_tbl = NULL; 8599 pci_disable_msix(bp->pdev); 8600 kfree(msix_ent); 8601 return rc; 8602 } 8603 8604 static int bnxt_init_inta(struct bnxt *bp) 8605 { 8606 bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL); 8607 if (!bp->irq_tbl) 8608 return -ENOMEM; 8609 8610 bp->total_irqs = 1; 8611 bp->rx_nr_rings = 1; 8612 bp->tx_nr_rings = 1; 8613 bp->cp_nr_rings = 1; 8614 bp->flags |= BNXT_FLAG_SHARED_RINGS; 8615 bp->irq_tbl[0].vector = bp->pdev->irq; 8616 return 0; 8617 } 8618 8619 static int bnxt_init_int_mode(struct bnxt *bp) 8620 { 8621 int rc = 0; 8622 8623 if (bp->flags & BNXT_FLAG_MSIX_CAP) 8624 rc = bnxt_init_msix(bp); 8625 8626 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) { 8627 /* fallback to INTA */ 8628 rc = bnxt_init_inta(bp); 8629 } 8630 return rc; 8631 } 8632 8633 static void bnxt_clear_int_mode(struct bnxt *bp) 8634 { 8635 if (bp->flags & BNXT_FLAG_USING_MSIX) 8636 pci_disable_msix(bp->pdev); 8637 8638 kfree(bp->irq_tbl); 8639 bp->irq_tbl = NULL; 8640 bp->flags &= ~BNXT_FLAG_USING_MSIX; 8641 } 8642 8643 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init) 8644 { 8645 int tcs = netdev_get_num_tc(bp->dev); 8646 bool irq_cleared = false; 8647 int rc; 8648 8649 if (!bnxt_need_reserve_rings(bp)) 8650 return 0; 8651 8652 if (irq_re_init && BNXT_NEW_RM(bp) && 8653 bnxt_get_num_msix(bp) != bp->total_irqs) { 8654 bnxt_ulp_irq_stop(bp); 8655 bnxt_clear_int_mode(bp); 8656 irq_cleared = true; 8657 } 8658 rc = __bnxt_reserve_rings(bp); 8659 if (irq_cleared) { 8660 if (!rc) 8661 rc = bnxt_init_int_mode(bp); 8662 bnxt_ulp_irq_restart(bp, rc); 8663 } 8664 if (rc) { 8665 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc); 8666 return rc; 8667 } 8668 if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) { 8669 netdev_err(bp->dev, "tx ring reservation failure\n"); 8670 netdev_reset_tc(bp->dev); 8671 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 8672 return -ENOMEM; 8673 } 8674 return 0; 8675 } 8676 8677 static void bnxt_free_irq(struct bnxt *bp) 8678 { 8679 struct bnxt_irq *irq; 8680 int i; 8681 8682 #ifdef CONFIG_RFS_ACCEL 8683 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap); 8684 bp->dev->rx_cpu_rmap = NULL; 8685 #endif 8686 if (!bp->irq_tbl || !bp->bnapi) 8687 return; 8688 8689 for (i = 0; i < bp->cp_nr_rings; i++) { 8690 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 8691 8692 irq = &bp->irq_tbl[map_idx]; 8693 if (irq->requested) { 8694 if (irq->have_cpumask) { 8695 irq_set_affinity_hint(irq->vector, NULL); 8696 free_cpumask_var(irq->cpu_mask); 8697 irq->have_cpumask = 0; 8698 } 8699 free_irq(irq->vector, bp->bnapi[i]); 8700 } 8701 8702 irq->requested = 0; 8703 } 8704 } 8705 8706 static int bnxt_request_irq(struct bnxt *bp) 8707 { 8708 int i, j, rc = 0; 8709 unsigned long flags = 0; 8710 #ifdef CONFIG_RFS_ACCEL 8711 struct cpu_rmap *rmap; 8712 #endif 8713 8714 rc = bnxt_setup_int_mode(bp); 8715 if (rc) { 8716 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n", 8717 rc); 8718 return rc; 8719 } 8720 #ifdef CONFIG_RFS_ACCEL 8721 rmap = bp->dev->rx_cpu_rmap; 8722 #endif 8723 if (!(bp->flags & BNXT_FLAG_USING_MSIX)) 8724 flags = IRQF_SHARED; 8725 8726 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { 8727 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 8728 struct bnxt_irq *irq = &bp->irq_tbl[map_idx]; 8729 8730 #ifdef CONFIG_RFS_ACCEL 8731 if (rmap && bp->bnapi[i]->rx_ring) { 8732 rc = irq_cpu_rmap_add(rmap, irq->vector); 8733 if (rc) 8734 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n", 8735 j); 8736 j++; 8737 } 8738 #endif 8739 rc = request_irq(irq->vector, irq->handler, flags, irq->name, 8740 bp->bnapi[i]); 8741 if (rc) 8742 break; 8743 8744 irq->requested = 1; 8745 8746 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) { 8747 int numa_node = dev_to_node(&bp->pdev->dev); 8748 8749 irq->have_cpumask = 1; 8750 cpumask_set_cpu(cpumask_local_spread(i, numa_node), 8751 irq->cpu_mask); 8752 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask); 8753 if (rc) { 8754 netdev_warn(bp->dev, 8755 "Set affinity failed, IRQ = %d\n", 8756 irq->vector); 8757 break; 8758 } 8759 } 8760 } 8761 return rc; 8762 } 8763 8764 static void bnxt_del_napi(struct bnxt *bp) 8765 { 8766 int i; 8767 8768 if (!bp->bnapi) 8769 return; 8770 8771 for (i = 0; i < bp->cp_nr_rings; i++) { 8772 struct bnxt_napi *bnapi = bp->bnapi[i]; 8773 8774 __netif_napi_del(&bnapi->napi); 8775 } 8776 /* We called __netif_napi_del(), we need 8777 * to respect an RCU grace period before freeing napi structures. 8778 */ 8779 synchronize_net(); 8780 } 8781 8782 static void bnxt_init_napi(struct bnxt *bp) 8783 { 8784 int i; 8785 unsigned int cp_nr_rings = bp->cp_nr_rings; 8786 struct bnxt_napi *bnapi; 8787 8788 if (bp->flags & BNXT_FLAG_USING_MSIX) { 8789 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll; 8790 8791 if (bp->flags & BNXT_FLAG_CHIP_P5) 8792 poll_fn = bnxt_poll_p5; 8793 else if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 8794 cp_nr_rings--; 8795 for (i = 0; i < cp_nr_rings; i++) { 8796 bnapi = bp->bnapi[i]; 8797 netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64); 8798 } 8799 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 8800 bnapi = bp->bnapi[cp_nr_rings]; 8801 netif_napi_add(bp->dev, &bnapi->napi, 8802 bnxt_poll_nitroa0, 64); 8803 } 8804 } else { 8805 bnapi = bp->bnapi[0]; 8806 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64); 8807 } 8808 } 8809 8810 static void bnxt_disable_napi(struct bnxt *bp) 8811 { 8812 int i; 8813 8814 if (!bp->bnapi) 8815 return; 8816 8817 for (i = 0; i < bp->cp_nr_rings; i++) { 8818 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; 8819 8820 if (bp->bnapi[i]->rx_ring) 8821 cancel_work_sync(&cpr->dim.work); 8822 8823 napi_disable(&bp->bnapi[i]->napi); 8824 } 8825 } 8826 8827 static void bnxt_enable_napi(struct bnxt *bp) 8828 { 8829 int i; 8830 8831 for (i = 0; i < bp->cp_nr_rings; i++) { 8832 struct bnxt_napi *bnapi = bp->bnapi[i]; 8833 struct bnxt_cp_ring_info *cpr; 8834 8835 cpr = &bnapi->cp_ring; 8836 if (bnapi->in_reset) 8837 cpr->sw_stats.rx.rx_resets++; 8838 bnapi->in_reset = false; 8839 8840 if (bnapi->rx_ring) { 8841 INIT_WORK(&cpr->dim.work, bnxt_dim_work); 8842 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 8843 } 8844 napi_enable(&bnapi->napi); 8845 } 8846 } 8847 8848 void bnxt_tx_disable(struct bnxt *bp) 8849 { 8850 int i; 8851 struct bnxt_tx_ring_info *txr; 8852 8853 if (bp->tx_ring) { 8854 for (i = 0; i < bp->tx_nr_rings; i++) { 8855 txr = &bp->tx_ring[i]; 8856 txr->dev_state = BNXT_DEV_STATE_CLOSING; 8857 } 8858 } 8859 /* Stop all TX queues */ 8860 netif_tx_disable(bp->dev); 8861 netif_carrier_off(bp->dev); 8862 } 8863 8864 void bnxt_tx_enable(struct bnxt *bp) 8865 { 8866 int i; 8867 struct bnxt_tx_ring_info *txr; 8868 8869 for (i = 0; i < bp->tx_nr_rings; i++) { 8870 txr = &bp->tx_ring[i]; 8871 txr->dev_state = 0; 8872 } 8873 netif_tx_wake_all_queues(bp->dev); 8874 if (bp->link_info.link_up) 8875 netif_carrier_on(bp->dev); 8876 } 8877 8878 static char *bnxt_report_fec(struct bnxt_link_info *link_info) 8879 { 8880 u8 active_fec = link_info->active_fec_sig_mode & 8881 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK; 8882 8883 switch (active_fec) { 8884 default: 8885 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE: 8886 return "None"; 8887 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE: 8888 return "Clause 74 BaseR"; 8889 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE: 8890 return "Clause 91 RS(528,514)"; 8891 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE: 8892 return "Clause 91 RS544_1XN"; 8893 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE: 8894 return "Clause 91 RS(544,514)"; 8895 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE: 8896 return "Clause 91 RS272_1XN"; 8897 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE: 8898 return "Clause 91 RS(272,257)"; 8899 } 8900 } 8901 8902 static void bnxt_report_link(struct bnxt *bp) 8903 { 8904 if (bp->link_info.link_up) { 8905 const char *duplex; 8906 const char *flow_ctrl; 8907 u32 speed; 8908 u16 fec; 8909 8910 netif_carrier_on(bp->dev); 8911 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); 8912 if (speed == SPEED_UNKNOWN) { 8913 netdev_info(bp->dev, "NIC Link is Up, speed unknown\n"); 8914 return; 8915 } 8916 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL) 8917 duplex = "full"; 8918 else 8919 duplex = "half"; 8920 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH) 8921 flow_ctrl = "ON - receive & transmit"; 8922 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX) 8923 flow_ctrl = "ON - transmit"; 8924 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX) 8925 flow_ctrl = "ON - receive"; 8926 else 8927 flow_ctrl = "none"; 8928 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n", 8929 speed, duplex, flow_ctrl); 8930 if (bp->flags & BNXT_FLAG_EEE_CAP) 8931 netdev_info(bp->dev, "EEE is %s\n", 8932 bp->eee.eee_active ? "active" : 8933 "not active"); 8934 fec = bp->link_info.fec_cfg; 8935 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED)) 8936 netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n", 8937 (fec & BNXT_FEC_AUTONEG) ? "on" : "off", 8938 bnxt_report_fec(&bp->link_info)); 8939 } else { 8940 netif_carrier_off(bp->dev); 8941 netdev_err(bp->dev, "NIC Link is Down\n"); 8942 } 8943 } 8944 8945 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp) 8946 { 8947 if (!resp->supported_speeds_auto_mode && 8948 !resp->supported_speeds_force_mode && 8949 !resp->supported_pam4_speeds_auto_mode && 8950 !resp->supported_pam4_speeds_force_mode) 8951 return true; 8952 return false; 8953 } 8954 8955 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) 8956 { 8957 int rc = 0; 8958 struct hwrm_port_phy_qcaps_input req = {0}; 8959 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 8960 struct bnxt_link_info *link_info = &bp->link_info; 8961 8962 bp->flags &= ~BNXT_FLAG_EEE_CAP; 8963 if (bp->test_info) 8964 bp->test_info->flags &= ~(BNXT_TEST_FL_EXT_LPBK | 8965 BNXT_TEST_FL_AN_PHY_LPBK); 8966 if (bp->hwrm_spec_code < 0x10201) 8967 return 0; 8968 8969 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1); 8970 8971 mutex_lock(&bp->hwrm_cmd_lock); 8972 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8973 if (rc) 8974 goto hwrm_phy_qcaps_exit; 8975 8976 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) { 8977 struct ethtool_eee *eee = &bp->eee; 8978 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode); 8979 8980 bp->flags |= BNXT_FLAG_EEE_CAP; 8981 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 8982 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) & 8983 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK; 8984 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) & 8985 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK; 8986 } 8987 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED) { 8988 if (bp->test_info) 8989 bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK; 8990 } 8991 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED) { 8992 if (bp->test_info) 8993 bp->test_info->flags |= BNXT_TEST_FL_AN_PHY_LPBK; 8994 } 8995 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED) { 8996 if (BNXT_PF(bp)) 8997 bp->fw_cap |= BNXT_FW_CAP_SHARED_PORT_CFG; 8998 } 8999 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET) 9000 bp->fw_cap |= BNXT_FW_CAP_PORT_STATS_NO_RESET; 9001 9002 if (bp->hwrm_spec_code >= 0x10a01) { 9003 if (bnxt_phy_qcaps_no_speed(resp)) { 9004 link_info->phy_state = BNXT_PHY_STATE_DISABLED; 9005 netdev_warn(bp->dev, "Ethernet link disabled\n"); 9006 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) { 9007 link_info->phy_state = BNXT_PHY_STATE_ENABLED; 9008 netdev_info(bp->dev, "Ethernet link enabled\n"); 9009 /* Phy re-enabled, reprobe the speeds */ 9010 link_info->support_auto_speeds = 0; 9011 link_info->support_pam4_auto_speeds = 0; 9012 } 9013 } 9014 if (resp->supported_speeds_auto_mode) 9015 link_info->support_auto_speeds = 9016 le16_to_cpu(resp->supported_speeds_auto_mode); 9017 if (resp->supported_pam4_speeds_auto_mode) 9018 link_info->support_pam4_auto_speeds = 9019 le16_to_cpu(resp->supported_pam4_speeds_auto_mode); 9020 9021 bp->port_count = resp->port_cnt; 9022 9023 hwrm_phy_qcaps_exit: 9024 mutex_unlock(&bp->hwrm_cmd_lock); 9025 return rc; 9026 } 9027 9028 static bool bnxt_support_dropped(u16 advertising, u16 supported) 9029 { 9030 u16 diff = advertising ^ supported; 9031 9032 return ((supported | diff) != supported); 9033 } 9034 9035 int bnxt_update_link(struct bnxt *bp, bool chng_link_state) 9036 { 9037 int rc = 0; 9038 struct bnxt_link_info *link_info = &bp->link_info; 9039 struct hwrm_port_phy_qcfg_input req = {0}; 9040 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 9041 u8 link_up = link_info->link_up; 9042 bool support_changed = false; 9043 9044 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1); 9045 9046 mutex_lock(&bp->hwrm_cmd_lock); 9047 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 9048 if (rc) { 9049 mutex_unlock(&bp->hwrm_cmd_lock); 9050 return rc; 9051 } 9052 9053 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp)); 9054 link_info->phy_link_status = resp->link; 9055 link_info->duplex = resp->duplex_cfg; 9056 if (bp->hwrm_spec_code >= 0x10800) 9057 link_info->duplex = resp->duplex_state; 9058 link_info->pause = resp->pause; 9059 link_info->auto_mode = resp->auto_mode; 9060 link_info->auto_pause_setting = resp->auto_pause; 9061 link_info->lp_pause = resp->link_partner_adv_pause; 9062 link_info->force_pause_setting = resp->force_pause; 9063 link_info->duplex_setting = resp->duplex_cfg; 9064 if (link_info->phy_link_status == BNXT_LINK_LINK) 9065 link_info->link_speed = le16_to_cpu(resp->link_speed); 9066 else 9067 link_info->link_speed = 0; 9068 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed); 9069 link_info->force_pam4_link_speed = 9070 le16_to_cpu(resp->force_pam4_link_speed); 9071 link_info->support_speeds = le16_to_cpu(resp->support_speeds); 9072 link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds); 9073 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask); 9074 link_info->auto_pam4_link_speeds = 9075 le16_to_cpu(resp->auto_pam4_link_speed_mask); 9076 link_info->lp_auto_link_speeds = 9077 le16_to_cpu(resp->link_partner_adv_speeds); 9078 link_info->lp_auto_pam4_link_speeds = 9079 resp->link_partner_pam4_adv_speeds; 9080 link_info->preemphasis = le32_to_cpu(resp->preemphasis); 9081 link_info->phy_ver[0] = resp->phy_maj; 9082 link_info->phy_ver[1] = resp->phy_min; 9083 link_info->phy_ver[2] = resp->phy_bld; 9084 link_info->media_type = resp->media_type; 9085 link_info->phy_type = resp->phy_type; 9086 link_info->transceiver = resp->xcvr_pkg_type; 9087 link_info->phy_addr = resp->eee_config_phy_addr & 9088 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK; 9089 link_info->module_status = resp->module_status; 9090 9091 if (bp->flags & BNXT_FLAG_EEE_CAP) { 9092 struct ethtool_eee *eee = &bp->eee; 9093 u16 fw_speeds; 9094 9095 eee->eee_active = 0; 9096 if (resp->eee_config_phy_addr & 9097 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) { 9098 eee->eee_active = 1; 9099 fw_speeds = le16_to_cpu( 9100 resp->link_partner_adv_eee_link_speed_mask); 9101 eee->lp_advertised = 9102 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 9103 } 9104 9105 /* Pull initial EEE config */ 9106 if (!chng_link_state) { 9107 if (resp->eee_config_phy_addr & 9108 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED) 9109 eee->eee_enabled = 1; 9110 9111 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask); 9112 eee->advertised = 9113 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 9114 9115 if (resp->eee_config_phy_addr & 9116 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) { 9117 __le32 tmr; 9118 9119 eee->tx_lpi_enabled = 1; 9120 tmr = resp->xcvr_identifier_type_tx_lpi_timer; 9121 eee->tx_lpi_timer = le32_to_cpu(tmr) & 9122 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK; 9123 } 9124 } 9125 } 9126 9127 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED; 9128 if (bp->hwrm_spec_code >= 0x10504) { 9129 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg); 9130 link_info->active_fec_sig_mode = resp->active_fec_signal_mode; 9131 } 9132 /* TODO: need to add more logic to report VF link */ 9133 if (chng_link_state) { 9134 if (link_info->phy_link_status == BNXT_LINK_LINK) 9135 link_info->link_up = 1; 9136 else 9137 link_info->link_up = 0; 9138 if (link_up != link_info->link_up) 9139 bnxt_report_link(bp); 9140 } else { 9141 /* alwasy link down if not require to update link state */ 9142 link_info->link_up = 0; 9143 } 9144 mutex_unlock(&bp->hwrm_cmd_lock); 9145 9146 if (!BNXT_PHY_CFG_ABLE(bp)) 9147 return 0; 9148 9149 /* Check if any advertised speeds are no longer supported. The caller 9150 * holds the link_lock mutex, so we can modify link_info settings. 9151 */ 9152 if (bnxt_support_dropped(link_info->advertising, 9153 link_info->support_auto_speeds)) { 9154 link_info->advertising = link_info->support_auto_speeds; 9155 support_changed = true; 9156 } 9157 if (bnxt_support_dropped(link_info->advertising_pam4, 9158 link_info->support_pam4_auto_speeds)) { 9159 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds; 9160 support_changed = true; 9161 } 9162 if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED)) 9163 bnxt_hwrm_set_link_setting(bp, true, false); 9164 return 0; 9165 } 9166 9167 static void bnxt_get_port_module_status(struct bnxt *bp) 9168 { 9169 struct bnxt_link_info *link_info = &bp->link_info; 9170 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp; 9171 u8 module_status; 9172 9173 if (bnxt_update_link(bp, true)) 9174 return; 9175 9176 module_status = link_info->module_status; 9177 switch (module_status) { 9178 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX: 9179 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN: 9180 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG: 9181 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n", 9182 bp->pf.port_id); 9183 if (bp->hwrm_spec_code >= 0x10201) { 9184 netdev_warn(bp->dev, "Module part number %s\n", 9185 resp->phy_vendor_partnumber); 9186 } 9187 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX) 9188 netdev_warn(bp->dev, "TX is disabled\n"); 9189 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN) 9190 netdev_warn(bp->dev, "SFP+ module is shutdown\n"); 9191 } 9192 } 9193 9194 static void 9195 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) 9196 { 9197 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) { 9198 if (bp->hwrm_spec_code >= 0x10201) 9199 req->auto_pause = 9200 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE; 9201 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 9202 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; 9203 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 9204 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX; 9205 req->enables |= 9206 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 9207 } else { 9208 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 9209 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX; 9210 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 9211 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX; 9212 req->enables |= 9213 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE); 9214 if (bp->hwrm_spec_code >= 0x10201) { 9215 req->auto_pause = req->force_pause; 9216 req->enables |= cpu_to_le32( 9217 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 9218 } 9219 } 9220 } 9221 9222 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) 9223 { 9224 if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) { 9225 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK; 9226 if (bp->link_info.advertising) { 9227 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK); 9228 req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising); 9229 } 9230 if (bp->link_info.advertising_pam4) { 9231 req->enables |= 9232 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK); 9233 req->auto_link_pam4_speed_mask = 9234 cpu_to_le16(bp->link_info.advertising_pam4); 9235 } 9236 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE); 9237 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG); 9238 } else { 9239 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE); 9240 if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) { 9241 req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed); 9242 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED); 9243 } else { 9244 req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed); 9245 } 9246 } 9247 9248 /* tell chimp that the setting takes effect immediately */ 9249 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); 9250 } 9251 9252 int bnxt_hwrm_set_pause(struct bnxt *bp) 9253 { 9254 struct hwrm_port_phy_cfg_input req = {0}; 9255 int rc; 9256 9257 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 9258 bnxt_hwrm_set_pause_common(bp, &req); 9259 9260 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) || 9261 bp->link_info.force_link_chng) 9262 bnxt_hwrm_set_link_common(bp, &req); 9263 9264 mutex_lock(&bp->hwrm_cmd_lock); 9265 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 9266 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) { 9267 /* since changing of pause setting doesn't trigger any link 9268 * change event, the driver needs to update the current pause 9269 * result upon successfully return of the phy_cfg command 9270 */ 9271 bp->link_info.pause = 9272 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl; 9273 bp->link_info.auto_pause_setting = 0; 9274 if (!bp->link_info.force_link_chng) 9275 bnxt_report_link(bp); 9276 } 9277 bp->link_info.force_link_chng = false; 9278 mutex_unlock(&bp->hwrm_cmd_lock); 9279 return rc; 9280 } 9281 9282 static void bnxt_hwrm_set_eee(struct bnxt *bp, 9283 struct hwrm_port_phy_cfg_input *req) 9284 { 9285 struct ethtool_eee *eee = &bp->eee; 9286 9287 if (eee->eee_enabled) { 9288 u16 eee_speeds; 9289 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE; 9290 9291 if (eee->tx_lpi_enabled) 9292 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE; 9293 else 9294 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE; 9295 9296 req->flags |= cpu_to_le32(flags); 9297 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised); 9298 req->eee_link_speed_mask = cpu_to_le16(eee_speeds); 9299 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer); 9300 } else { 9301 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE); 9302 } 9303 } 9304 9305 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee) 9306 { 9307 struct hwrm_port_phy_cfg_input req = {0}; 9308 9309 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 9310 if (set_pause) 9311 bnxt_hwrm_set_pause_common(bp, &req); 9312 9313 bnxt_hwrm_set_link_common(bp, &req); 9314 9315 if (set_eee) 9316 bnxt_hwrm_set_eee(bp, &req); 9317 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 9318 } 9319 9320 static int bnxt_hwrm_shutdown_link(struct bnxt *bp) 9321 { 9322 struct hwrm_port_phy_cfg_input req = {0}; 9323 9324 if (!BNXT_SINGLE_PF(bp)) 9325 return 0; 9326 9327 if (pci_num_vf(bp->pdev)) 9328 return 0; 9329 9330 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 9331 req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN); 9332 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 9333 } 9334 9335 static int bnxt_fw_init_one(struct bnxt *bp); 9336 9337 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) 9338 { 9339 struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr; 9340 struct hwrm_func_drv_if_change_input req = {0}; 9341 bool resc_reinit = false, fw_reset = false; 9342 u32 flags = 0; 9343 int rc; 9344 9345 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE)) 9346 return 0; 9347 9348 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1); 9349 if (up) 9350 req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP); 9351 mutex_lock(&bp->hwrm_cmd_lock); 9352 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 9353 if (!rc) 9354 flags = le32_to_cpu(resp->flags); 9355 mutex_unlock(&bp->hwrm_cmd_lock); 9356 if (rc) 9357 return rc; 9358 9359 if (!up) 9360 return 0; 9361 9362 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE) 9363 resc_reinit = true; 9364 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE) 9365 fw_reset = true; 9366 9367 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) { 9368 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n"); 9369 return -ENODEV; 9370 } 9371 if (resc_reinit || fw_reset) { 9372 if (fw_reset) { 9373 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 9374 bnxt_ulp_stop(bp); 9375 bnxt_free_ctx_mem(bp); 9376 kfree(bp->ctx); 9377 bp->ctx = NULL; 9378 bnxt_dcb_free(bp); 9379 rc = bnxt_fw_init_one(bp); 9380 if (rc) { 9381 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); 9382 return rc; 9383 } 9384 bnxt_clear_int_mode(bp); 9385 rc = bnxt_init_int_mode(bp); 9386 if (rc) { 9387 netdev_err(bp->dev, "init int mode failed\n"); 9388 return rc; 9389 } 9390 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state); 9391 } 9392 if (BNXT_NEW_RM(bp)) { 9393 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 9394 9395 rc = bnxt_hwrm_func_resc_qcaps(bp, true); 9396 hw_resc->resv_cp_rings = 0; 9397 hw_resc->resv_stat_ctxs = 0; 9398 hw_resc->resv_irqs = 0; 9399 hw_resc->resv_tx_rings = 0; 9400 hw_resc->resv_rx_rings = 0; 9401 hw_resc->resv_hw_ring_grps = 0; 9402 hw_resc->resv_vnics = 0; 9403 if (!fw_reset) { 9404 bp->tx_nr_rings = 0; 9405 bp->rx_nr_rings = 0; 9406 } 9407 } 9408 } 9409 return 0; 9410 } 9411 9412 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) 9413 { 9414 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 9415 struct hwrm_port_led_qcaps_input req = {0}; 9416 struct bnxt_pf_info *pf = &bp->pf; 9417 int rc; 9418 9419 bp->num_leds = 0; 9420 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601) 9421 return 0; 9422 9423 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1); 9424 req.port_id = cpu_to_le16(pf->port_id); 9425 mutex_lock(&bp->hwrm_cmd_lock); 9426 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 9427 if (rc) { 9428 mutex_unlock(&bp->hwrm_cmd_lock); 9429 return rc; 9430 } 9431 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) { 9432 int i; 9433 9434 bp->num_leds = resp->num_leds; 9435 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) * 9436 bp->num_leds); 9437 for (i = 0; i < bp->num_leds; i++) { 9438 struct bnxt_led_info *led = &bp->leds[i]; 9439 __le16 caps = led->led_state_caps; 9440 9441 if (!led->led_group_id || 9442 !BNXT_LED_ALT_BLINK_CAP(caps)) { 9443 bp->num_leds = 0; 9444 break; 9445 } 9446 } 9447 } 9448 mutex_unlock(&bp->hwrm_cmd_lock); 9449 return 0; 9450 } 9451 9452 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp) 9453 { 9454 struct hwrm_wol_filter_alloc_input req = {0}; 9455 struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; 9456 int rc; 9457 9458 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1); 9459 req.port_id = cpu_to_le16(bp->pf.port_id); 9460 req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT; 9461 req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS); 9462 memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN); 9463 mutex_lock(&bp->hwrm_cmd_lock); 9464 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 9465 if (!rc) 9466 bp->wol_filter_id = resp->wol_filter_id; 9467 mutex_unlock(&bp->hwrm_cmd_lock); 9468 return rc; 9469 } 9470 9471 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp) 9472 { 9473 struct hwrm_wol_filter_free_input req = {0}; 9474 9475 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1); 9476 req.port_id = cpu_to_le16(bp->pf.port_id); 9477 req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID); 9478 req.wol_filter_id = bp->wol_filter_id; 9479 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 9480 } 9481 9482 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle) 9483 { 9484 struct hwrm_wol_filter_qcfg_input req = {0}; 9485 struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 9486 u16 next_handle = 0; 9487 int rc; 9488 9489 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1); 9490 req.port_id = cpu_to_le16(bp->pf.port_id); 9491 req.handle = cpu_to_le16(handle); 9492 mutex_lock(&bp->hwrm_cmd_lock); 9493 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 9494 if (!rc) { 9495 next_handle = le16_to_cpu(resp->next_handle); 9496 if (next_handle != 0) { 9497 if (resp->wol_type == 9498 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) { 9499 bp->wol = 1; 9500 bp->wol_filter_id = resp->wol_filter_id; 9501 } 9502 } 9503 } 9504 mutex_unlock(&bp->hwrm_cmd_lock); 9505 return next_handle; 9506 } 9507 9508 static void bnxt_get_wol_settings(struct bnxt *bp) 9509 { 9510 u16 handle = 0; 9511 9512 bp->wol = 0; 9513 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP)) 9514 return; 9515 9516 do { 9517 handle = bnxt_hwrm_get_wol_fltrs(bp, handle); 9518 } while (handle && handle != 0xffff); 9519 } 9520 9521 #ifdef CONFIG_BNXT_HWMON 9522 static ssize_t bnxt_show_temp(struct device *dev, 9523 struct device_attribute *devattr, char *buf) 9524 { 9525 struct hwrm_temp_monitor_query_input req = {0}; 9526 struct hwrm_temp_monitor_query_output *resp; 9527 struct bnxt *bp = dev_get_drvdata(dev); 9528 u32 len = 0; 9529 int rc; 9530 9531 resp = bp->hwrm_cmd_resp_addr; 9532 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1); 9533 mutex_lock(&bp->hwrm_cmd_lock); 9534 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 9535 if (!rc) 9536 len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */ 9537 mutex_unlock(&bp->hwrm_cmd_lock); 9538 return rc ?: len; 9539 } 9540 static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0); 9541 9542 static struct attribute *bnxt_attrs[] = { 9543 &sensor_dev_attr_temp1_input.dev_attr.attr, 9544 NULL 9545 }; 9546 ATTRIBUTE_GROUPS(bnxt); 9547 9548 static void bnxt_hwmon_close(struct bnxt *bp) 9549 { 9550 if (bp->hwmon_dev) { 9551 hwmon_device_unregister(bp->hwmon_dev); 9552 bp->hwmon_dev = NULL; 9553 } 9554 } 9555 9556 static void bnxt_hwmon_open(struct bnxt *bp) 9557 { 9558 struct hwrm_temp_monitor_query_input req = {0}; 9559 struct pci_dev *pdev = bp->pdev; 9560 int rc; 9561 9562 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1); 9563 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 9564 if (rc == -EACCES || rc == -EOPNOTSUPP) { 9565 bnxt_hwmon_close(bp); 9566 return; 9567 } 9568 9569 if (bp->hwmon_dev) 9570 return; 9571 9572 bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, 9573 DRV_MODULE_NAME, bp, 9574 bnxt_groups); 9575 if (IS_ERR(bp->hwmon_dev)) { 9576 bp->hwmon_dev = NULL; 9577 dev_warn(&pdev->dev, "Cannot register hwmon device\n"); 9578 } 9579 } 9580 #else 9581 static void bnxt_hwmon_close(struct bnxt *bp) 9582 { 9583 } 9584 9585 static void bnxt_hwmon_open(struct bnxt *bp) 9586 { 9587 } 9588 #endif 9589 9590 static bool bnxt_eee_config_ok(struct bnxt *bp) 9591 { 9592 struct ethtool_eee *eee = &bp->eee; 9593 struct bnxt_link_info *link_info = &bp->link_info; 9594 9595 if (!(bp->flags & BNXT_FLAG_EEE_CAP)) 9596 return true; 9597 9598 if (eee->eee_enabled) { 9599 u32 advertising = 9600 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); 9601 9602 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 9603 eee->eee_enabled = 0; 9604 return false; 9605 } 9606 if (eee->advertised & ~advertising) { 9607 eee->advertised = advertising & eee->supported; 9608 return false; 9609 } 9610 } 9611 return true; 9612 } 9613 9614 static int bnxt_update_phy_setting(struct bnxt *bp) 9615 { 9616 int rc; 9617 bool update_link = false; 9618 bool update_pause = false; 9619 bool update_eee = false; 9620 struct bnxt_link_info *link_info = &bp->link_info; 9621 9622 rc = bnxt_update_link(bp, true); 9623 if (rc) { 9624 netdev_err(bp->dev, "failed to update link (rc: %x)\n", 9625 rc); 9626 return rc; 9627 } 9628 if (!BNXT_SINGLE_PF(bp)) 9629 return 0; 9630 9631 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 9632 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) != 9633 link_info->req_flow_ctrl) 9634 update_pause = true; 9635 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 9636 link_info->force_pause_setting != link_info->req_flow_ctrl) 9637 update_pause = true; 9638 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 9639 if (BNXT_AUTO_MODE(link_info->auto_mode)) 9640 update_link = true; 9641 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ && 9642 link_info->req_link_speed != link_info->force_link_speed) 9643 update_link = true; 9644 else if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 && 9645 link_info->req_link_speed != link_info->force_pam4_link_speed) 9646 update_link = true; 9647 if (link_info->req_duplex != link_info->duplex_setting) 9648 update_link = true; 9649 } else { 9650 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE) 9651 update_link = true; 9652 if (link_info->advertising != link_info->auto_link_speeds || 9653 link_info->advertising_pam4 != link_info->auto_pam4_link_speeds) 9654 update_link = true; 9655 } 9656 9657 /* The last close may have shutdown the link, so need to call 9658 * PHY_CFG to bring it back up. 9659 */ 9660 if (!bp->link_info.link_up) 9661 update_link = true; 9662 9663 if (!bnxt_eee_config_ok(bp)) 9664 update_eee = true; 9665 9666 if (update_link) 9667 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee); 9668 else if (update_pause) 9669 rc = bnxt_hwrm_set_pause(bp); 9670 if (rc) { 9671 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n", 9672 rc); 9673 return rc; 9674 } 9675 9676 return rc; 9677 } 9678 9679 /* Common routine to pre-map certain register block to different GRC window. 9680 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows 9681 * in PF and 3 windows in VF that can be customized to map in different 9682 * register blocks. 9683 */ 9684 static void bnxt_preset_reg_win(struct bnxt *bp) 9685 { 9686 if (BNXT_PF(bp)) { 9687 /* CAG registers map to GRC window #4 */ 9688 writel(BNXT_CAG_REG_BASE, 9689 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12); 9690 } 9691 } 9692 9693 static int bnxt_init_dflt_ring_mode(struct bnxt *bp); 9694 9695 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 9696 { 9697 int rc = 0; 9698 9699 bnxt_preset_reg_win(bp); 9700 netif_carrier_off(bp->dev); 9701 if (irq_re_init) { 9702 /* Reserve rings now if none were reserved at driver probe. */ 9703 rc = bnxt_init_dflt_ring_mode(bp); 9704 if (rc) { 9705 netdev_err(bp->dev, "Failed to reserve default rings at open\n"); 9706 return rc; 9707 } 9708 } 9709 rc = bnxt_reserve_rings(bp, irq_re_init); 9710 if (rc) 9711 return rc; 9712 if ((bp->flags & BNXT_FLAG_RFS) && 9713 !(bp->flags & BNXT_FLAG_USING_MSIX)) { 9714 /* disable RFS if falling back to INTA */ 9715 bp->dev->hw_features &= ~NETIF_F_NTUPLE; 9716 bp->flags &= ~BNXT_FLAG_RFS; 9717 } 9718 9719 rc = bnxt_alloc_mem(bp, irq_re_init); 9720 if (rc) { 9721 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); 9722 goto open_err_free_mem; 9723 } 9724 9725 if (irq_re_init) { 9726 bnxt_init_napi(bp); 9727 rc = bnxt_request_irq(bp); 9728 if (rc) { 9729 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc); 9730 goto open_err_irq; 9731 } 9732 } 9733 9734 rc = bnxt_init_nic(bp, irq_re_init); 9735 if (rc) { 9736 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); 9737 goto open_err_irq; 9738 } 9739 9740 bnxt_enable_napi(bp); 9741 bnxt_debug_dev_init(bp); 9742 9743 if (link_re_init) { 9744 mutex_lock(&bp->link_lock); 9745 rc = bnxt_update_phy_setting(bp); 9746 mutex_unlock(&bp->link_lock); 9747 if (rc) { 9748 netdev_warn(bp->dev, "failed to update phy settings\n"); 9749 if (BNXT_SINGLE_PF(bp)) { 9750 bp->link_info.phy_retry = true; 9751 bp->link_info.phy_retry_expires = 9752 jiffies + 5 * HZ; 9753 } 9754 } 9755 } 9756 9757 if (irq_re_init) 9758 udp_tunnel_nic_reset_ntf(bp->dev); 9759 9760 set_bit(BNXT_STATE_OPEN, &bp->state); 9761 bnxt_enable_int(bp); 9762 /* Enable TX queues */ 9763 bnxt_tx_enable(bp); 9764 mod_timer(&bp->timer, jiffies + bp->current_interval); 9765 /* Poll link status and check for SFP+ module status */ 9766 bnxt_get_port_module_status(bp); 9767 9768 /* VF-reps may need to be re-opened after the PF is re-opened */ 9769 if (BNXT_PF(bp)) 9770 bnxt_vf_reps_open(bp); 9771 return 0; 9772 9773 open_err_irq: 9774 bnxt_del_napi(bp); 9775 9776 open_err_free_mem: 9777 bnxt_free_skbs(bp); 9778 bnxt_free_irq(bp); 9779 bnxt_free_mem(bp, true); 9780 return rc; 9781 } 9782 9783 /* rtnl_lock held */ 9784 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 9785 { 9786 int rc = 0; 9787 9788 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) 9789 rc = -EIO; 9790 if (!rc) 9791 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init); 9792 if (rc) { 9793 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc); 9794 dev_close(bp->dev); 9795 } 9796 return rc; 9797 } 9798 9799 /* rtnl_lock held, open the NIC half way by allocating all resources, but 9800 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline 9801 * self tests. 9802 */ 9803 int bnxt_half_open_nic(struct bnxt *bp) 9804 { 9805 int rc = 0; 9806 9807 rc = bnxt_alloc_mem(bp, false); 9808 if (rc) { 9809 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); 9810 goto half_open_err; 9811 } 9812 rc = bnxt_init_nic(bp, false); 9813 if (rc) { 9814 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); 9815 goto half_open_err; 9816 } 9817 return 0; 9818 9819 half_open_err: 9820 bnxt_free_skbs(bp); 9821 bnxt_free_mem(bp, false); 9822 dev_close(bp->dev); 9823 return rc; 9824 } 9825 9826 /* rtnl_lock held, this call can only be made after a previous successful 9827 * call to bnxt_half_open_nic(). 9828 */ 9829 void bnxt_half_close_nic(struct bnxt *bp) 9830 { 9831 bnxt_hwrm_resource_free(bp, false, false); 9832 bnxt_free_skbs(bp); 9833 bnxt_free_mem(bp, false); 9834 } 9835 9836 static void bnxt_reenable_sriov(struct bnxt *bp) 9837 { 9838 if (BNXT_PF(bp)) { 9839 struct bnxt_pf_info *pf = &bp->pf; 9840 int n = pf->active_vfs; 9841 9842 if (n) 9843 bnxt_cfg_hw_sriov(bp, &n, true); 9844 } 9845 } 9846 9847 static int bnxt_open(struct net_device *dev) 9848 { 9849 struct bnxt *bp = netdev_priv(dev); 9850 int rc; 9851 9852 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { 9853 netdev_err(bp->dev, "A previous firmware reset did not complete, aborting\n"); 9854 return -ENODEV; 9855 } 9856 9857 rc = bnxt_hwrm_if_change(bp, true); 9858 if (rc) 9859 return rc; 9860 rc = __bnxt_open_nic(bp, true, true); 9861 if (rc) { 9862 bnxt_hwrm_if_change(bp, false); 9863 } else { 9864 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) { 9865 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 9866 bnxt_ulp_start(bp, 0); 9867 bnxt_reenable_sriov(bp); 9868 } 9869 } 9870 bnxt_hwmon_open(bp); 9871 } 9872 9873 return rc; 9874 } 9875 9876 static bool bnxt_drv_busy(struct bnxt *bp) 9877 { 9878 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) || 9879 test_bit(BNXT_STATE_READ_STATS, &bp->state)); 9880 } 9881 9882 static void bnxt_get_ring_stats(struct bnxt *bp, 9883 struct rtnl_link_stats64 *stats); 9884 9885 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, 9886 bool link_re_init) 9887 { 9888 /* Close the VF-reps before closing PF */ 9889 if (BNXT_PF(bp)) 9890 bnxt_vf_reps_close(bp); 9891 9892 /* Change device state to avoid TX queue wake up's */ 9893 bnxt_tx_disable(bp); 9894 9895 clear_bit(BNXT_STATE_OPEN, &bp->state); 9896 smp_mb__after_atomic(); 9897 while (bnxt_drv_busy(bp)) 9898 msleep(20); 9899 9900 /* Flush rings and and disable interrupts */ 9901 bnxt_shutdown_nic(bp, irq_re_init); 9902 9903 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */ 9904 9905 bnxt_debug_dev_exit(bp); 9906 bnxt_disable_napi(bp); 9907 del_timer_sync(&bp->timer); 9908 bnxt_free_skbs(bp); 9909 9910 /* Save ring stats before shutdown */ 9911 if (bp->bnapi && irq_re_init) 9912 bnxt_get_ring_stats(bp, &bp->net_stats_prev); 9913 if (irq_re_init) { 9914 bnxt_free_irq(bp); 9915 bnxt_del_napi(bp); 9916 } 9917 bnxt_free_mem(bp, irq_re_init); 9918 } 9919 9920 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 9921 { 9922 int rc = 0; 9923 9924 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 9925 /* If we get here, it means firmware reset is in progress 9926 * while we are trying to close. We can safely proceed with 9927 * the close because we are holding rtnl_lock(). Some firmware 9928 * messages may fail as we proceed to close. We set the 9929 * ABORT_ERR flag here so that the FW reset thread will later 9930 * abort when it gets the rtnl_lock() and sees the flag. 9931 */ 9932 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n"); 9933 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); 9934 } 9935 9936 #ifdef CONFIG_BNXT_SRIOV 9937 if (bp->sriov_cfg) { 9938 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait, 9939 !bp->sriov_cfg, 9940 BNXT_SRIOV_CFG_WAIT_TMO); 9941 if (rc) 9942 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n"); 9943 } 9944 #endif 9945 __bnxt_close_nic(bp, irq_re_init, link_re_init); 9946 return rc; 9947 } 9948 9949 static int bnxt_close(struct net_device *dev) 9950 { 9951 struct bnxt *bp = netdev_priv(dev); 9952 9953 bnxt_hwmon_close(bp); 9954 bnxt_close_nic(bp, true, true); 9955 bnxt_hwrm_shutdown_link(bp); 9956 bnxt_hwrm_if_change(bp, false); 9957 return 0; 9958 } 9959 9960 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg, 9961 u16 *val) 9962 { 9963 struct hwrm_port_phy_mdio_read_output *resp = bp->hwrm_cmd_resp_addr; 9964 struct hwrm_port_phy_mdio_read_input req = {0}; 9965 int rc; 9966 9967 if (bp->hwrm_spec_code < 0x10a00) 9968 return -EOPNOTSUPP; 9969 9970 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_READ, -1, -1); 9971 req.port_id = cpu_to_le16(bp->pf.port_id); 9972 req.phy_addr = phy_addr; 9973 req.reg_addr = cpu_to_le16(reg & 0x1f); 9974 if (mdio_phy_id_is_c45(phy_addr)) { 9975 req.cl45_mdio = 1; 9976 req.phy_addr = mdio_phy_id_prtad(phy_addr); 9977 req.dev_addr = mdio_phy_id_devad(phy_addr); 9978 req.reg_addr = cpu_to_le16(reg); 9979 } 9980 9981 mutex_lock(&bp->hwrm_cmd_lock); 9982 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 9983 if (!rc) 9984 *val = le16_to_cpu(resp->reg_data); 9985 mutex_unlock(&bp->hwrm_cmd_lock); 9986 return rc; 9987 } 9988 9989 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg, 9990 u16 val) 9991 { 9992 struct hwrm_port_phy_mdio_write_input req = {0}; 9993 9994 if (bp->hwrm_spec_code < 0x10a00) 9995 return -EOPNOTSUPP; 9996 9997 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_WRITE, -1, -1); 9998 req.port_id = cpu_to_le16(bp->pf.port_id); 9999 req.phy_addr = phy_addr; 10000 req.reg_addr = cpu_to_le16(reg & 0x1f); 10001 if (mdio_phy_id_is_c45(phy_addr)) { 10002 req.cl45_mdio = 1; 10003 req.phy_addr = mdio_phy_id_prtad(phy_addr); 10004 req.dev_addr = mdio_phy_id_devad(phy_addr); 10005 req.reg_addr = cpu_to_le16(reg); 10006 } 10007 req.reg_data = cpu_to_le16(val); 10008 10009 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 10010 } 10011 10012 /* rtnl_lock held */ 10013 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 10014 { 10015 struct mii_ioctl_data *mdio = if_mii(ifr); 10016 struct bnxt *bp = netdev_priv(dev); 10017 int rc; 10018 10019 switch (cmd) { 10020 case SIOCGMIIPHY: 10021 mdio->phy_id = bp->link_info.phy_addr; 10022 10023 fallthrough; 10024 case SIOCGMIIREG: { 10025 u16 mii_regval = 0; 10026 10027 if (!netif_running(dev)) 10028 return -EAGAIN; 10029 10030 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num, 10031 &mii_regval); 10032 mdio->val_out = mii_regval; 10033 return rc; 10034 } 10035 10036 case SIOCSMIIREG: 10037 if (!netif_running(dev)) 10038 return -EAGAIN; 10039 10040 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num, 10041 mdio->val_in); 10042 10043 default: 10044 /* do nothing */ 10045 break; 10046 } 10047 return -EOPNOTSUPP; 10048 } 10049 10050 static void bnxt_get_ring_stats(struct bnxt *bp, 10051 struct rtnl_link_stats64 *stats) 10052 { 10053 int i; 10054 10055 for (i = 0; i < bp->cp_nr_rings; i++) { 10056 struct bnxt_napi *bnapi = bp->bnapi[i]; 10057 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 10058 u64 *sw = cpr->stats.sw_stats; 10059 10060 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts); 10061 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); 10062 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts); 10063 10064 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts); 10065 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts); 10066 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts); 10067 10068 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes); 10069 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes); 10070 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes); 10071 10072 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes); 10073 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes); 10074 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes); 10075 10076 stats->rx_missed_errors += 10077 BNXT_GET_RING_STATS64(sw, rx_discard_pkts); 10078 10079 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); 10080 10081 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts); 10082 } 10083 } 10084 10085 static void bnxt_add_prev_stats(struct bnxt *bp, 10086 struct rtnl_link_stats64 *stats) 10087 { 10088 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev; 10089 10090 stats->rx_packets += prev_stats->rx_packets; 10091 stats->tx_packets += prev_stats->tx_packets; 10092 stats->rx_bytes += prev_stats->rx_bytes; 10093 stats->tx_bytes += prev_stats->tx_bytes; 10094 stats->rx_missed_errors += prev_stats->rx_missed_errors; 10095 stats->multicast += prev_stats->multicast; 10096 stats->tx_dropped += prev_stats->tx_dropped; 10097 } 10098 10099 static void 10100 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 10101 { 10102 struct bnxt *bp = netdev_priv(dev); 10103 10104 set_bit(BNXT_STATE_READ_STATS, &bp->state); 10105 /* Make sure bnxt_close_nic() sees that we are reading stats before 10106 * we check the BNXT_STATE_OPEN flag. 10107 */ 10108 smp_mb__after_atomic(); 10109 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 10110 clear_bit(BNXT_STATE_READ_STATS, &bp->state); 10111 *stats = bp->net_stats_prev; 10112 return; 10113 } 10114 10115 bnxt_get_ring_stats(bp, stats); 10116 bnxt_add_prev_stats(bp, stats); 10117 10118 if (bp->flags & BNXT_FLAG_PORT_STATS) { 10119 u64 *rx = bp->port_stats.sw_stats; 10120 u64 *tx = bp->port_stats.sw_stats + 10121 BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 10122 10123 stats->rx_crc_errors = 10124 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames); 10125 stats->rx_frame_errors = 10126 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames); 10127 stats->rx_length_errors = 10128 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) + 10129 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) + 10130 BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames); 10131 stats->rx_errors = 10132 BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) + 10133 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames); 10134 stats->collisions = 10135 BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions); 10136 stats->tx_fifo_errors = 10137 BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns); 10138 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err); 10139 } 10140 clear_bit(BNXT_STATE_READ_STATS, &bp->state); 10141 } 10142 10143 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) 10144 { 10145 struct net_device *dev = bp->dev; 10146 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 10147 struct netdev_hw_addr *ha; 10148 u8 *haddr; 10149 int mc_count = 0; 10150 bool update = false; 10151 int off = 0; 10152 10153 netdev_for_each_mc_addr(ha, dev) { 10154 if (mc_count >= BNXT_MAX_MC_ADDRS) { 10155 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 10156 vnic->mc_list_count = 0; 10157 return false; 10158 } 10159 haddr = ha->addr; 10160 if (!ether_addr_equal(haddr, vnic->mc_list + off)) { 10161 memcpy(vnic->mc_list + off, haddr, ETH_ALEN); 10162 update = true; 10163 } 10164 off += ETH_ALEN; 10165 mc_count++; 10166 } 10167 if (mc_count) 10168 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; 10169 10170 if (mc_count != vnic->mc_list_count) { 10171 vnic->mc_list_count = mc_count; 10172 update = true; 10173 } 10174 return update; 10175 } 10176 10177 static bool bnxt_uc_list_updated(struct bnxt *bp) 10178 { 10179 struct net_device *dev = bp->dev; 10180 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 10181 struct netdev_hw_addr *ha; 10182 int off = 0; 10183 10184 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1)) 10185 return true; 10186 10187 netdev_for_each_uc_addr(ha, dev) { 10188 if (!ether_addr_equal(ha->addr, vnic->uc_list + off)) 10189 return true; 10190 10191 off += ETH_ALEN; 10192 } 10193 return false; 10194 } 10195 10196 static void bnxt_set_rx_mode(struct net_device *dev) 10197 { 10198 struct bnxt *bp = netdev_priv(dev); 10199 struct bnxt_vnic_info *vnic; 10200 bool mc_update = false; 10201 bool uc_update; 10202 u32 mask; 10203 10204 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) 10205 return; 10206 10207 vnic = &bp->vnic_info[0]; 10208 mask = vnic->rx_mask; 10209 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS | 10210 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST | 10211 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST | 10212 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST); 10213 10214 if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp)) 10215 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 10216 10217 uc_update = bnxt_uc_list_updated(bp); 10218 10219 if (dev->flags & IFF_BROADCAST) 10220 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 10221 if (dev->flags & IFF_ALLMULTI) { 10222 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 10223 vnic->mc_list_count = 0; 10224 } else { 10225 mc_update = bnxt_mc_list_updated(bp, &mask); 10226 } 10227 10228 if (mask != vnic->rx_mask || uc_update || mc_update) { 10229 vnic->rx_mask = mask; 10230 10231 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event); 10232 bnxt_queue_sp_work(bp); 10233 } 10234 } 10235 10236 static int bnxt_cfg_rx_mode(struct bnxt *bp) 10237 { 10238 struct net_device *dev = bp->dev; 10239 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 10240 struct netdev_hw_addr *ha; 10241 int i, off = 0, rc; 10242 bool uc_update; 10243 10244 netif_addr_lock_bh(dev); 10245 uc_update = bnxt_uc_list_updated(bp); 10246 netif_addr_unlock_bh(dev); 10247 10248 if (!uc_update) 10249 goto skip_uc; 10250 10251 mutex_lock(&bp->hwrm_cmd_lock); 10252 for (i = 1; i < vnic->uc_filter_count; i++) { 10253 struct hwrm_cfa_l2_filter_free_input req = {0}; 10254 10255 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1, 10256 -1); 10257 10258 req.l2_filter_id = vnic->fw_l2_filter_id[i]; 10259 10260 rc = _hwrm_send_message(bp, &req, sizeof(req), 10261 HWRM_CMD_TIMEOUT); 10262 } 10263 mutex_unlock(&bp->hwrm_cmd_lock); 10264 10265 vnic->uc_filter_count = 1; 10266 10267 netif_addr_lock_bh(dev); 10268 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) { 10269 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 10270 } else { 10271 netdev_for_each_uc_addr(ha, dev) { 10272 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN); 10273 off += ETH_ALEN; 10274 vnic->uc_filter_count++; 10275 } 10276 } 10277 netif_addr_unlock_bh(dev); 10278 10279 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) { 10280 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off); 10281 if (rc) { 10282 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", 10283 rc); 10284 vnic->uc_filter_count = i; 10285 return rc; 10286 } 10287 } 10288 10289 skip_uc: 10290 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); 10291 if (rc && vnic->mc_list_count) { 10292 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n", 10293 rc); 10294 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 10295 vnic->mc_list_count = 0; 10296 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); 10297 } 10298 if (rc) 10299 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n", 10300 rc); 10301 10302 return rc; 10303 } 10304 10305 static bool bnxt_can_reserve_rings(struct bnxt *bp) 10306 { 10307 #ifdef CONFIG_BNXT_SRIOV 10308 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) { 10309 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 10310 10311 /* No minimum rings were provisioned by the PF. Don't 10312 * reserve rings by default when device is down. 10313 */ 10314 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings) 10315 return true; 10316 10317 if (!netif_running(bp->dev)) 10318 return false; 10319 } 10320 #endif 10321 return true; 10322 } 10323 10324 /* If the chip and firmware supports RFS */ 10325 static bool bnxt_rfs_supported(struct bnxt *bp) 10326 { 10327 if (bp->flags & BNXT_FLAG_CHIP_P5) { 10328 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) 10329 return true; 10330 return false; 10331 } 10332 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) 10333 return true; 10334 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) 10335 return true; 10336 return false; 10337 } 10338 10339 /* If runtime conditions support RFS */ 10340 static bool bnxt_rfs_capable(struct bnxt *bp) 10341 { 10342 #ifdef CONFIG_RFS_ACCEL 10343 int vnics, max_vnics, max_rss_ctxs; 10344 10345 if (bp->flags & BNXT_FLAG_CHIP_P5) 10346 return bnxt_rfs_supported(bp); 10347 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp)) 10348 return false; 10349 10350 vnics = 1 + bp->rx_nr_rings; 10351 max_vnics = bnxt_get_max_func_vnics(bp); 10352 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp); 10353 10354 /* RSS contexts not a limiting factor */ 10355 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) 10356 max_rss_ctxs = max_vnics; 10357 if (vnics > max_vnics || vnics > max_rss_ctxs) { 10358 if (bp->rx_nr_rings > 1) 10359 netdev_warn(bp->dev, 10360 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n", 10361 min(max_rss_ctxs - 1, max_vnics - 1)); 10362 return false; 10363 } 10364 10365 if (!BNXT_NEW_RM(bp)) 10366 return true; 10367 10368 if (vnics == bp->hw_resc.resv_vnics) 10369 return true; 10370 10371 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics); 10372 if (vnics <= bp->hw_resc.resv_vnics) 10373 return true; 10374 10375 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n"); 10376 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1); 10377 return false; 10378 #else 10379 return false; 10380 #endif 10381 } 10382 10383 static netdev_features_t bnxt_fix_features(struct net_device *dev, 10384 netdev_features_t features) 10385 { 10386 struct bnxt *bp = netdev_priv(dev); 10387 netdev_features_t vlan_features; 10388 10389 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp)) 10390 features &= ~NETIF_F_NTUPLE; 10391 10392 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 10393 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 10394 10395 if (!(features & NETIF_F_GRO)) 10396 features &= ~NETIF_F_GRO_HW; 10397 10398 if (features & NETIF_F_GRO_HW) 10399 features &= ~NETIF_F_LRO; 10400 10401 /* Both CTAG and STAG VLAN accelaration on the RX side have to be 10402 * turned on or off together. 10403 */ 10404 vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX; 10405 if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) { 10406 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) 10407 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX; 10408 else if (vlan_features) 10409 features |= BNXT_HW_FEATURE_VLAN_ALL_RX; 10410 } 10411 #ifdef CONFIG_BNXT_SRIOV 10412 if (BNXT_VF(bp) && bp->vf.vlan) 10413 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX; 10414 #endif 10415 return features; 10416 } 10417 10418 static int bnxt_set_features(struct net_device *dev, netdev_features_t features) 10419 { 10420 struct bnxt *bp = netdev_priv(dev); 10421 u32 flags = bp->flags; 10422 u32 changes; 10423 int rc = 0; 10424 bool re_init = false; 10425 bool update_tpa = false; 10426 10427 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS; 10428 if (features & NETIF_F_GRO_HW) 10429 flags |= BNXT_FLAG_GRO; 10430 else if (features & NETIF_F_LRO) 10431 flags |= BNXT_FLAG_LRO; 10432 10433 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 10434 flags &= ~BNXT_FLAG_TPA; 10435 10436 if (features & BNXT_HW_FEATURE_VLAN_ALL_RX) 10437 flags |= BNXT_FLAG_STRIP_VLAN; 10438 10439 if (features & NETIF_F_NTUPLE) 10440 flags |= BNXT_FLAG_RFS; 10441 10442 changes = flags ^ bp->flags; 10443 if (changes & BNXT_FLAG_TPA) { 10444 update_tpa = true; 10445 if ((bp->flags & BNXT_FLAG_TPA) == 0 || 10446 (flags & BNXT_FLAG_TPA) == 0 || 10447 (bp->flags & BNXT_FLAG_CHIP_P5)) 10448 re_init = true; 10449 } 10450 10451 if (changes & ~BNXT_FLAG_TPA) 10452 re_init = true; 10453 10454 if (flags != bp->flags) { 10455 u32 old_flags = bp->flags; 10456 10457 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 10458 bp->flags = flags; 10459 if (update_tpa) 10460 bnxt_set_ring_params(bp); 10461 return rc; 10462 } 10463 10464 if (re_init) { 10465 bnxt_close_nic(bp, false, false); 10466 bp->flags = flags; 10467 if (update_tpa) 10468 bnxt_set_ring_params(bp); 10469 10470 return bnxt_open_nic(bp, false, false); 10471 } 10472 if (update_tpa) { 10473 bp->flags = flags; 10474 rc = bnxt_set_tpa(bp, 10475 (flags & BNXT_FLAG_TPA) ? 10476 true : false); 10477 if (rc) 10478 bp->flags = old_flags; 10479 } 10480 } 10481 return rc; 10482 } 10483 10484 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words, 10485 u32 *reg_buf) 10486 { 10487 struct hwrm_dbg_read_direct_output *resp = bp->hwrm_cmd_resp_addr; 10488 struct hwrm_dbg_read_direct_input req = {0}; 10489 __le32 *dbg_reg_buf; 10490 dma_addr_t mapping; 10491 int rc, i; 10492 10493 dbg_reg_buf = dma_alloc_coherent(&bp->pdev->dev, num_words * 4, 10494 &mapping, GFP_KERNEL); 10495 if (!dbg_reg_buf) 10496 return -ENOMEM; 10497 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_READ_DIRECT, -1, -1); 10498 req.host_dest_addr = cpu_to_le64(mapping); 10499 req.read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR); 10500 req.read_len32 = cpu_to_le32(num_words); 10501 mutex_lock(&bp->hwrm_cmd_lock); 10502 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 10503 if (rc || resp->error_code) { 10504 rc = -EIO; 10505 goto dbg_rd_reg_exit; 10506 } 10507 for (i = 0; i < num_words; i++) 10508 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]); 10509 10510 dbg_rd_reg_exit: 10511 mutex_unlock(&bp->hwrm_cmd_lock); 10512 dma_free_coherent(&bp->pdev->dev, num_words * 4, dbg_reg_buf, mapping); 10513 return rc; 10514 } 10515 10516 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type, 10517 u32 ring_id, u32 *prod, u32 *cons) 10518 { 10519 struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr; 10520 struct hwrm_dbg_ring_info_get_input req = {0}; 10521 int rc; 10522 10523 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1); 10524 req.ring_type = ring_type; 10525 req.fw_ring_id = cpu_to_le32(ring_id); 10526 mutex_lock(&bp->hwrm_cmd_lock); 10527 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 10528 if (!rc) { 10529 *prod = le32_to_cpu(resp->producer_index); 10530 *cons = le32_to_cpu(resp->consumer_index); 10531 } 10532 mutex_unlock(&bp->hwrm_cmd_lock); 10533 return rc; 10534 } 10535 10536 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi) 10537 { 10538 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; 10539 int i = bnapi->index; 10540 10541 if (!txr) 10542 return; 10543 10544 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n", 10545 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod, 10546 txr->tx_cons); 10547 } 10548 10549 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi) 10550 { 10551 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 10552 int i = bnapi->index; 10553 10554 if (!rxr) 10555 return; 10556 10557 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n", 10558 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod, 10559 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod, 10560 rxr->rx_sw_agg_prod); 10561 } 10562 10563 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi) 10564 { 10565 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 10566 int i = bnapi->index; 10567 10568 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n", 10569 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons); 10570 } 10571 10572 static void bnxt_dbg_dump_states(struct bnxt *bp) 10573 { 10574 int i; 10575 struct bnxt_napi *bnapi; 10576 10577 for (i = 0; i < bp->cp_nr_rings; i++) { 10578 bnapi = bp->bnapi[i]; 10579 if (netif_msg_drv(bp)) { 10580 bnxt_dump_tx_sw_state(bnapi); 10581 bnxt_dump_rx_sw_state(bnapi); 10582 bnxt_dump_cp_sw_state(bnapi); 10583 } 10584 } 10585 } 10586 10587 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr) 10588 { 10589 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; 10590 struct hwrm_ring_reset_input req = {0}; 10591 struct bnxt_napi *bnapi = rxr->bnapi; 10592 struct bnxt_cp_ring_info *cpr; 10593 u16 cp_ring_id; 10594 10595 cpr = &bnapi->cp_ring; 10596 cp_ring_id = cpr->cp_ring_struct.fw_ring_id; 10597 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_RESET, cp_ring_id, -1); 10598 req.ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP; 10599 req.ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id); 10600 return hwrm_send_message_silent(bp, &req, sizeof(req), 10601 HWRM_CMD_TIMEOUT); 10602 } 10603 10604 static void bnxt_reset_task(struct bnxt *bp, bool silent) 10605 { 10606 if (!silent) 10607 bnxt_dbg_dump_states(bp); 10608 if (netif_running(bp->dev)) { 10609 int rc; 10610 10611 if (silent) { 10612 bnxt_close_nic(bp, false, false); 10613 bnxt_open_nic(bp, false, false); 10614 } else { 10615 bnxt_ulp_stop(bp); 10616 bnxt_close_nic(bp, true, false); 10617 rc = bnxt_open_nic(bp, true, false); 10618 bnxt_ulp_start(bp, rc); 10619 } 10620 } 10621 } 10622 10623 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue) 10624 { 10625 struct bnxt *bp = netdev_priv(dev); 10626 10627 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n"); 10628 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 10629 bnxt_queue_sp_work(bp); 10630 } 10631 10632 static void bnxt_fw_health_check(struct bnxt *bp) 10633 { 10634 struct bnxt_fw_health *fw_health = bp->fw_health; 10635 u32 val; 10636 10637 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 10638 return; 10639 10640 if (fw_health->tmr_counter) { 10641 fw_health->tmr_counter--; 10642 return; 10643 } 10644 10645 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 10646 if (val == fw_health->last_fw_heartbeat) 10647 goto fw_reset; 10648 10649 fw_health->last_fw_heartbeat = val; 10650 10651 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 10652 if (val != fw_health->last_fw_reset_cnt) 10653 goto fw_reset; 10654 10655 fw_health->tmr_counter = fw_health->tmr_multiplier; 10656 return; 10657 10658 fw_reset: 10659 set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event); 10660 bnxt_queue_sp_work(bp); 10661 } 10662 10663 static void bnxt_timer(struct timer_list *t) 10664 { 10665 struct bnxt *bp = from_timer(bp, t, timer); 10666 struct net_device *dev = bp->dev; 10667 10668 if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state)) 10669 return; 10670 10671 if (atomic_read(&bp->intr_sem) != 0) 10672 goto bnxt_restart_timer; 10673 10674 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) 10675 bnxt_fw_health_check(bp); 10676 10677 if (bp->link_info.link_up && bp->stats_coal_ticks) { 10678 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event); 10679 bnxt_queue_sp_work(bp); 10680 } 10681 10682 if (bnxt_tc_flower_enabled(bp)) { 10683 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event); 10684 bnxt_queue_sp_work(bp); 10685 } 10686 10687 #ifdef CONFIG_RFS_ACCEL 10688 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) { 10689 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event); 10690 bnxt_queue_sp_work(bp); 10691 } 10692 #endif /*CONFIG_RFS_ACCEL*/ 10693 10694 if (bp->link_info.phy_retry) { 10695 if (time_after(jiffies, bp->link_info.phy_retry_expires)) { 10696 bp->link_info.phy_retry = false; 10697 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n"); 10698 } else { 10699 set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event); 10700 bnxt_queue_sp_work(bp); 10701 } 10702 } 10703 10704 if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev && 10705 netif_carrier_ok(dev)) { 10706 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event); 10707 bnxt_queue_sp_work(bp); 10708 } 10709 bnxt_restart_timer: 10710 mod_timer(&bp->timer, jiffies + bp->current_interval); 10711 } 10712 10713 static void bnxt_rtnl_lock_sp(struct bnxt *bp) 10714 { 10715 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK 10716 * set. If the device is being closed, bnxt_close() may be holding 10717 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we 10718 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl(). 10719 */ 10720 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 10721 rtnl_lock(); 10722 } 10723 10724 static void bnxt_rtnl_unlock_sp(struct bnxt *bp) 10725 { 10726 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 10727 rtnl_unlock(); 10728 } 10729 10730 /* Only called from bnxt_sp_task() */ 10731 static void bnxt_reset(struct bnxt *bp, bool silent) 10732 { 10733 bnxt_rtnl_lock_sp(bp); 10734 if (test_bit(BNXT_STATE_OPEN, &bp->state)) 10735 bnxt_reset_task(bp, silent); 10736 bnxt_rtnl_unlock_sp(bp); 10737 } 10738 10739 /* Only called from bnxt_sp_task() */ 10740 static void bnxt_rx_ring_reset(struct bnxt *bp) 10741 { 10742 int i; 10743 10744 bnxt_rtnl_lock_sp(bp); 10745 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 10746 bnxt_rtnl_unlock_sp(bp); 10747 return; 10748 } 10749 /* Disable and flush TPA before resetting the RX ring */ 10750 if (bp->flags & BNXT_FLAG_TPA) 10751 bnxt_set_tpa(bp, false); 10752 for (i = 0; i < bp->rx_nr_rings; i++) { 10753 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 10754 struct bnxt_cp_ring_info *cpr; 10755 int rc; 10756 10757 if (!rxr->bnapi->in_reset) 10758 continue; 10759 10760 rc = bnxt_hwrm_rx_ring_reset(bp, i); 10761 if (rc) { 10762 if (rc == -EINVAL || rc == -EOPNOTSUPP) 10763 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n"); 10764 else 10765 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n", 10766 rc); 10767 bnxt_reset_task(bp, true); 10768 break; 10769 } 10770 bnxt_free_one_rx_ring_skbs(bp, i); 10771 rxr->rx_prod = 0; 10772 rxr->rx_agg_prod = 0; 10773 rxr->rx_sw_agg_prod = 0; 10774 rxr->rx_next_cons = 0; 10775 rxr->bnapi->in_reset = false; 10776 bnxt_alloc_one_rx_ring(bp, i); 10777 cpr = &rxr->bnapi->cp_ring; 10778 cpr->sw_stats.rx.rx_resets++; 10779 if (bp->flags & BNXT_FLAG_AGG_RINGS) 10780 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 10781 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 10782 } 10783 if (bp->flags & BNXT_FLAG_TPA) 10784 bnxt_set_tpa(bp, true); 10785 bnxt_rtnl_unlock_sp(bp); 10786 } 10787 10788 static void bnxt_fw_reset_close(struct bnxt *bp) 10789 { 10790 bnxt_ulp_stop(bp); 10791 /* When firmware is fatal state, disable PCI device to prevent 10792 * any potential bad DMAs before freeing kernel memory. 10793 */ 10794 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 10795 pci_disable_device(bp->pdev); 10796 __bnxt_close_nic(bp, true, false); 10797 bnxt_clear_int_mode(bp); 10798 bnxt_hwrm_func_drv_unrgtr(bp); 10799 if (pci_is_enabled(bp->pdev)) 10800 pci_disable_device(bp->pdev); 10801 bnxt_free_ctx_mem(bp); 10802 kfree(bp->ctx); 10803 bp->ctx = NULL; 10804 } 10805 10806 static bool is_bnxt_fw_ok(struct bnxt *bp) 10807 { 10808 struct bnxt_fw_health *fw_health = bp->fw_health; 10809 bool no_heartbeat = false, has_reset = false; 10810 u32 val; 10811 10812 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 10813 if (val == fw_health->last_fw_heartbeat) 10814 no_heartbeat = true; 10815 10816 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 10817 if (val != fw_health->last_fw_reset_cnt) 10818 has_reset = true; 10819 10820 if (!no_heartbeat && has_reset) 10821 return true; 10822 10823 return false; 10824 } 10825 10826 /* rtnl_lock is acquired before calling this function */ 10827 static void bnxt_force_fw_reset(struct bnxt *bp) 10828 { 10829 struct bnxt_fw_health *fw_health = bp->fw_health; 10830 u32 wait_dsecs; 10831 10832 if (!test_bit(BNXT_STATE_OPEN, &bp->state) || 10833 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 10834 return; 10835 10836 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 10837 bnxt_fw_reset_close(bp); 10838 wait_dsecs = fw_health->master_func_wait_dsecs; 10839 if (fw_health->master) { 10840 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) 10841 wait_dsecs = 0; 10842 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; 10843 } else { 10844 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10; 10845 wait_dsecs = fw_health->normal_func_wait_dsecs; 10846 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 10847 } 10848 10849 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs; 10850 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs; 10851 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10); 10852 } 10853 10854 void bnxt_fw_exception(struct bnxt *bp) 10855 { 10856 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n"); 10857 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); 10858 bnxt_rtnl_lock_sp(bp); 10859 bnxt_force_fw_reset(bp); 10860 bnxt_rtnl_unlock_sp(bp); 10861 } 10862 10863 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or 10864 * < 0 on error. 10865 */ 10866 static int bnxt_get_registered_vfs(struct bnxt *bp) 10867 { 10868 #ifdef CONFIG_BNXT_SRIOV 10869 int rc; 10870 10871 if (!BNXT_PF(bp)) 10872 return 0; 10873 10874 rc = bnxt_hwrm_func_qcfg(bp); 10875 if (rc) { 10876 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc); 10877 return rc; 10878 } 10879 if (bp->pf.registered_vfs) 10880 return bp->pf.registered_vfs; 10881 if (bp->sriov_cfg) 10882 return 1; 10883 #endif 10884 return 0; 10885 } 10886 10887 void bnxt_fw_reset(struct bnxt *bp) 10888 { 10889 bnxt_rtnl_lock_sp(bp); 10890 if (test_bit(BNXT_STATE_OPEN, &bp->state) && 10891 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 10892 int n = 0, tmo; 10893 10894 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 10895 if (bp->pf.active_vfs && 10896 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 10897 n = bnxt_get_registered_vfs(bp); 10898 if (n < 0) { 10899 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n", 10900 n); 10901 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 10902 dev_close(bp->dev); 10903 goto fw_reset_exit; 10904 } else if (n > 0) { 10905 u16 vf_tmo_dsecs = n * 10; 10906 10907 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs) 10908 bp->fw_reset_max_dsecs = vf_tmo_dsecs; 10909 bp->fw_reset_state = 10910 BNXT_FW_RESET_STATE_POLL_VF; 10911 bnxt_queue_fw_reset_work(bp, HZ / 10); 10912 goto fw_reset_exit; 10913 } 10914 bnxt_fw_reset_close(bp); 10915 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { 10916 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; 10917 tmo = HZ / 10; 10918 } else { 10919 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 10920 tmo = bp->fw_reset_min_dsecs * HZ / 10; 10921 } 10922 bnxt_queue_fw_reset_work(bp, tmo); 10923 } 10924 fw_reset_exit: 10925 bnxt_rtnl_unlock_sp(bp); 10926 } 10927 10928 static void bnxt_chk_missed_irq(struct bnxt *bp) 10929 { 10930 int i; 10931 10932 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 10933 return; 10934 10935 for (i = 0; i < bp->cp_nr_rings; i++) { 10936 struct bnxt_napi *bnapi = bp->bnapi[i]; 10937 struct bnxt_cp_ring_info *cpr; 10938 u32 fw_ring_id; 10939 int j; 10940 10941 if (!bnapi) 10942 continue; 10943 10944 cpr = &bnapi->cp_ring; 10945 for (j = 0; j < 2; j++) { 10946 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; 10947 u32 val[2]; 10948 10949 if (!cpr2 || cpr2->has_more_work || 10950 !bnxt_has_work(bp, cpr2)) 10951 continue; 10952 10953 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) { 10954 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons; 10955 continue; 10956 } 10957 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id; 10958 bnxt_dbg_hwrm_ring_info_get(bp, 10959 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL, 10960 fw_ring_id, &val[0], &val[1]); 10961 cpr->sw_stats.cmn.missed_irqs++; 10962 } 10963 } 10964 } 10965 10966 static void bnxt_cfg_ntp_filters(struct bnxt *); 10967 10968 static void bnxt_init_ethtool_link_settings(struct bnxt *bp) 10969 { 10970 struct bnxt_link_info *link_info = &bp->link_info; 10971 10972 if (BNXT_AUTO_MODE(link_info->auto_mode)) { 10973 link_info->autoneg = BNXT_AUTONEG_SPEED; 10974 if (bp->hwrm_spec_code >= 0x10201) { 10975 if (link_info->auto_pause_setting & 10976 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE) 10977 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 10978 } else { 10979 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 10980 } 10981 link_info->advertising = link_info->auto_link_speeds; 10982 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds; 10983 } else { 10984 link_info->req_link_speed = link_info->force_link_speed; 10985 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ; 10986 if (link_info->force_pam4_link_speed) { 10987 link_info->req_link_speed = 10988 link_info->force_pam4_link_speed; 10989 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4; 10990 } 10991 link_info->req_duplex = link_info->duplex_setting; 10992 } 10993 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 10994 link_info->req_flow_ctrl = 10995 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH; 10996 else 10997 link_info->req_flow_ctrl = link_info->force_pause_setting; 10998 } 10999 11000 static void bnxt_sp_task(struct work_struct *work) 11001 { 11002 struct bnxt *bp = container_of(work, struct bnxt, sp_task); 11003 11004 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 11005 smp_mb__after_atomic(); 11006 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 11007 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 11008 return; 11009 } 11010 11011 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) 11012 bnxt_cfg_rx_mode(bp); 11013 11014 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event)) 11015 bnxt_cfg_ntp_filters(bp); 11016 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) 11017 bnxt_hwrm_exec_fwd_req(bp); 11018 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) { 11019 bnxt_hwrm_port_qstats(bp, 0); 11020 bnxt_hwrm_port_qstats_ext(bp, 0); 11021 bnxt_accumulate_all_stats(bp); 11022 } 11023 11024 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { 11025 int rc; 11026 11027 mutex_lock(&bp->link_lock); 11028 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, 11029 &bp->sp_event)) 11030 bnxt_hwrm_phy_qcaps(bp); 11031 11032 rc = bnxt_update_link(bp, true); 11033 if (rc) 11034 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", 11035 rc); 11036 11037 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, 11038 &bp->sp_event)) 11039 bnxt_init_ethtool_link_settings(bp); 11040 mutex_unlock(&bp->link_lock); 11041 } 11042 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) { 11043 int rc; 11044 11045 mutex_lock(&bp->link_lock); 11046 rc = bnxt_update_phy_setting(bp); 11047 mutex_unlock(&bp->link_lock); 11048 if (rc) { 11049 netdev_warn(bp->dev, "update phy settings retry failed\n"); 11050 } else { 11051 bp->link_info.phy_retry = false; 11052 netdev_info(bp->dev, "update phy settings retry succeeded\n"); 11053 } 11054 } 11055 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) { 11056 mutex_lock(&bp->link_lock); 11057 bnxt_get_port_module_status(bp); 11058 mutex_unlock(&bp->link_lock); 11059 } 11060 11061 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event)) 11062 bnxt_tc_flow_stats_work(bp); 11063 11064 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event)) 11065 bnxt_chk_missed_irq(bp); 11066 11067 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They 11068 * must be the last functions to be called before exiting. 11069 */ 11070 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) 11071 bnxt_reset(bp, false); 11072 11073 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event)) 11074 bnxt_reset(bp, true); 11075 11076 if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event)) 11077 bnxt_rx_ring_reset(bp); 11078 11079 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) 11080 bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT); 11081 11082 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) { 11083 if (!is_bnxt_fw_ok(bp)) 11084 bnxt_devlink_health_report(bp, 11085 BNXT_FW_EXCEPTION_SP_EVENT); 11086 } 11087 11088 smp_mb__before_atomic(); 11089 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 11090 } 11091 11092 /* Under rtnl_lock */ 11093 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, 11094 int tx_xdp) 11095 { 11096 int max_rx, max_tx, tx_sets = 1; 11097 int tx_rings_needed, stats; 11098 int rx_rings = rx; 11099 int cp, vnics, rc; 11100 11101 if (tcs) 11102 tx_sets = tcs; 11103 11104 rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh); 11105 if (rc) 11106 return rc; 11107 11108 if (max_rx < rx) 11109 return -ENOMEM; 11110 11111 tx_rings_needed = tx * tx_sets + tx_xdp; 11112 if (max_tx < tx_rings_needed) 11113 return -ENOMEM; 11114 11115 vnics = 1; 11116 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS) 11117 vnics += rx_rings; 11118 11119 if (bp->flags & BNXT_FLAG_AGG_RINGS) 11120 rx_rings <<= 1; 11121 cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx; 11122 stats = cp; 11123 if (BNXT_NEW_RM(bp)) { 11124 cp += bnxt_get_ulp_msix_num(bp); 11125 stats += bnxt_get_ulp_stat_ctxs(bp); 11126 } 11127 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp, 11128 stats, vnics); 11129 } 11130 11131 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) 11132 { 11133 if (bp->bar2) { 11134 pci_iounmap(pdev, bp->bar2); 11135 bp->bar2 = NULL; 11136 } 11137 11138 if (bp->bar1) { 11139 pci_iounmap(pdev, bp->bar1); 11140 bp->bar1 = NULL; 11141 } 11142 11143 if (bp->bar0) { 11144 pci_iounmap(pdev, bp->bar0); 11145 bp->bar0 = NULL; 11146 } 11147 } 11148 11149 static void bnxt_cleanup_pci(struct bnxt *bp) 11150 { 11151 bnxt_unmap_bars(bp, bp->pdev); 11152 pci_release_regions(bp->pdev); 11153 if (pci_is_enabled(bp->pdev)) 11154 pci_disable_device(bp->pdev); 11155 } 11156 11157 static void bnxt_init_dflt_coal(struct bnxt *bp) 11158 { 11159 struct bnxt_coal *coal; 11160 11161 /* Tick values in micro seconds. 11162 * 1 coal_buf x bufs_per_record = 1 completion record. 11163 */ 11164 coal = &bp->rx_coal; 11165 coal->coal_ticks = 10; 11166 coal->coal_bufs = 30; 11167 coal->coal_ticks_irq = 1; 11168 coal->coal_bufs_irq = 2; 11169 coal->idle_thresh = 50; 11170 coal->bufs_per_record = 2; 11171 coal->budget = 64; /* NAPI budget */ 11172 11173 coal = &bp->tx_coal; 11174 coal->coal_ticks = 28; 11175 coal->coal_bufs = 30; 11176 coal->coal_ticks_irq = 2; 11177 coal->coal_bufs_irq = 2; 11178 coal->bufs_per_record = 1; 11179 11180 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; 11181 } 11182 11183 static int bnxt_fw_reset_via_optee(struct bnxt *bp) 11184 { 11185 #ifdef CONFIG_TEE_BNXT_FW 11186 int rc = tee_bnxt_fw_load(); 11187 11188 if (rc) 11189 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc); 11190 11191 return rc; 11192 #else 11193 netdev_err(bp->dev, "OP-TEE not supported\n"); 11194 return -ENODEV; 11195 #endif 11196 } 11197 11198 static int bnxt_fw_init_one_p1(struct bnxt *bp) 11199 { 11200 int rc; 11201 11202 bp->fw_cap = 0; 11203 rc = bnxt_hwrm_ver_get(bp); 11204 bnxt_try_map_fw_health_reg(bp); 11205 if (rc) { 11206 if (bp->fw_health && bp->fw_health->status_reliable) { 11207 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 11208 11209 netdev_err(bp->dev, 11210 "Firmware not responding, status: 0x%x\n", 11211 sts); 11212 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) { 11213 netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n"); 11214 rc = bnxt_fw_reset_via_optee(bp); 11215 if (!rc) 11216 rc = bnxt_hwrm_ver_get(bp); 11217 } 11218 } 11219 if (rc) 11220 return rc; 11221 } 11222 11223 if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) { 11224 rc = bnxt_alloc_kong_hwrm_resources(bp); 11225 if (rc) 11226 bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL; 11227 } 11228 11229 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || 11230 bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) { 11231 rc = bnxt_alloc_hwrm_short_cmd_req(bp); 11232 if (rc) 11233 return rc; 11234 } 11235 bnxt_nvm_cfg_ver_get(bp); 11236 11237 rc = bnxt_hwrm_func_reset(bp); 11238 if (rc) 11239 return -ENODEV; 11240 11241 bnxt_hwrm_fw_set_time(bp); 11242 return 0; 11243 } 11244 11245 static int bnxt_fw_init_one_p2(struct bnxt *bp) 11246 { 11247 int rc; 11248 11249 /* Get the MAX capabilities for this function */ 11250 rc = bnxt_hwrm_func_qcaps(bp); 11251 if (rc) { 11252 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n", 11253 rc); 11254 return -ENODEV; 11255 } 11256 11257 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp); 11258 if (rc) 11259 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n", 11260 rc); 11261 11262 if (bnxt_alloc_fw_health(bp)) { 11263 netdev_warn(bp->dev, "no memory for firmware error recovery\n"); 11264 } else { 11265 rc = bnxt_hwrm_error_recovery_qcfg(bp); 11266 if (rc) 11267 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n", 11268 rc); 11269 } 11270 11271 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false); 11272 if (rc) 11273 return -ENODEV; 11274 11275 bnxt_hwrm_func_qcfg(bp); 11276 bnxt_hwrm_vnic_qcaps(bp); 11277 bnxt_hwrm_port_led_qcaps(bp); 11278 bnxt_ethtool_init(bp); 11279 bnxt_dcb_init(bp); 11280 return 0; 11281 } 11282 11283 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp) 11284 { 11285 bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP; 11286 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 | 11287 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 | 11288 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | 11289 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; 11290 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) { 11291 bp->flags |= BNXT_FLAG_UDP_RSS_CAP; 11292 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 | 11293 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 11294 } 11295 } 11296 11297 static void bnxt_set_dflt_rfs(struct bnxt *bp) 11298 { 11299 struct net_device *dev = bp->dev; 11300 11301 dev->hw_features &= ~NETIF_F_NTUPLE; 11302 dev->features &= ~NETIF_F_NTUPLE; 11303 bp->flags &= ~BNXT_FLAG_RFS; 11304 if (bnxt_rfs_supported(bp)) { 11305 dev->hw_features |= NETIF_F_NTUPLE; 11306 if (bnxt_rfs_capable(bp)) { 11307 bp->flags |= BNXT_FLAG_RFS; 11308 dev->features |= NETIF_F_NTUPLE; 11309 } 11310 } 11311 } 11312 11313 static void bnxt_fw_init_one_p3(struct bnxt *bp) 11314 { 11315 struct pci_dev *pdev = bp->pdev; 11316 11317 bnxt_set_dflt_rss_hash_type(bp); 11318 bnxt_set_dflt_rfs(bp); 11319 11320 bnxt_get_wol_settings(bp); 11321 if (bp->flags & BNXT_FLAG_WOL_CAP) 11322 device_set_wakeup_enable(&pdev->dev, bp->wol); 11323 else 11324 device_set_wakeup_capable(&pdev->dev, false); 11325 11326 bnxt_hwrm_set_cache_line_size(bp, cache_line_size()); 11327 bnxt_hwrm_coal_params_qcaps(bp); 11328 } 11329 11330 static int bnxt_fw_init_one(struct bnxt *bp) 11331 { 11332 int rc; 11333 11334 rc = bnxt_fw_init_one_p1(bp); 11335 if (rc) { 11336 netdev_err(bp->dev, "Firmware init phase 1 failed\n"); 11337 return rc; 11338 } 11339 rc = bnxt_fw_init_one_p2(bp); 11340 if (rc) { 11341 netdev_err(bp->dev, "Firmware init phase 2 failed\n"); 11342 return rc; 11343 } 11344 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false); 11345 if (rc) 11346 return rc; 11347 11348 /* In case fw capabilities have changed, destroy the unneeded 11349 * reporters and create newly capable ones. 11350 */ 11351 bnxt_dl_fw_reporters_destroy(bp, false); 11352 bnxt_dl_fw_reporters_create(bp); 11353 bnxt_fw_init_one_p3(bp); 11354 return 0; 11355 } 11356 11357 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx) 11358 { 11359 struct bnxt_fw_health *fw_health = bp->fw_health; 11360 u32 reg = fw_health->fw_reset_seq_regs[reg_idx]; 11361 u32 val = fw_health->fw_reset_seq_vals[reg_idx]; 11362 u32 reg_type, reg_off, delay_msecs; 11363 11364 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx]; 11365 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg); 11366 reg_off = BNXT_FW_HEALTH_REG_OFF(reg); 11367 switch (reg_type) { 11368 case BNXT_FW_HEALTH_REG_TYPE_CFG: 11369 pci_write_config_dword(bp->pdev, reg_off, val); 11370 break; 11371 case BNXT_FW_HEALTH_REG_TYPE_GRC: 11372 writel(reg_off & BNXT_GRC_BASE_MASK, 11373 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 11374 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000; 11375 fallthrough; 11376 case BNXT_FW_HEALTH_REG_TYPE_BAR0: 11377 writel(val, bp->bar0 + reg_off); 11378 break; 11379 case BNXT_FW_HEALTH_REG_TYPE_BAR1: 11380 writel(val, bp->bar1 + reg_off); 11381 break; 11382 } 11383 if (delay_msecs) { 11384 pci_read_config_dword(bp->pdev, 0, &val); 11385 msleep(delay_msecs); 11386 } 11387 } 11388 11389 static void bnxt_reset_all(struct bnxt *bp) 11390 { 11391 struct bnxt_fw_health *fw_health = bp->fw_health; 11392 int i, rc; 11393 11394 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { 11395 bnxt_fw_reset_via_optee(bp); 11396 bp->fw_reset_timestamp = jiffies; 11397 return; 11398 } 11399 11400 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) { 11401 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) 11402 bnxt_fw_reset_writel(bp, i); 11403 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) { 11404 struct hwrm_fw_reset_input req = {0}; 11405 11406 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1); 11407 req.resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr); 11408 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP; 11409 req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP; 11410 req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; 11411 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 11412 if (rc) 11413 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc); 11414 } 11415 bp->fw_reset_timestamp = jiffies; 11416 } 11417 11418 static void bnxt_fw_reset_task(struct work_struct *work) 11419 { 11420 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work); 11421 int rc; 11422 11423 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 11424 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n"); 11425 return; 11426 } 11427 11428 switch (bp->fw_reset_state) { 11429 case BNXT_FW_RESET_STATE_POLL_VF: { 11430 int n = bnxt_get_registered_vfs(bp); 11431 int tmo; 11432 11433 if (n < 0) { 11434 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n", 11435 n, jiffies_to_msecs(jiffies - 11436 bp->fw_reset_timestamp)); 11437 goto fw_reset_abort; 11438 } else if (n > 0) { 11439 if (time_after(jiffies, bp->fw_reset_timestamp + 11440 (bp->fw_reset_max_dsecs * HZ / 10))) { 11441 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 11442 bp->fw_reset_state = 0; 11443 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n", 11444 n); 11445 return; 11446 } 11447 bnxt_queue_fw_reset_work(bp, HZ / 10); 11448 return; 11449 } 11450 bp->fw_reset_timestamp = jiffies; 11451 rtnl_lock(); 11452 bnxt_fw_reset_close(bp); 11453 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { 11454 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; 11455 tmo = HZ / 10; 11456 } else { 11457 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 11458 tmo = bp->fw_reset_min_dsecs * HZ / 10; 11459 } 11460 rtnl_unlock(); 11461 bnxt_queue_fw_reset_work(bp, tmo); 11462 return; 11463 } 11464 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: { 11465 u32 val; 11466 11467 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 11468 if (!(val & BNXT_FW_STATUS_SHUTDOWN) && 11469 !time_after(jiffies, bp->fw_reset_timestamp + 11470 (bp->fw_reset_max_dsecs * HZ / 10))) { 11471 bnxt_queue_fw_reset_work(bp, HZ / 5); 11472 return; 11473 } 11474 11475 if (!bp->fw_health->master) { 11476 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs; 11477 11478 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 11479 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10); 11480 return; 11481 } 11482 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; 11483 } 11484 fallthrough; 11485 case BNXT_FW_RESET_STATE_RESET_FW: 11486 bnxt_reset_all(bp); 11487 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 11488 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10); 11489 return; 11490 case BNXT_FW_RESET_STATE_ENABLE_DEV: 11491 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) { 11492 u32 val; 11493 11494 val = bnxt_fw_health_readl(bp, 11495 BNXT_FW_RESET_INPROG_REG); 11496 if (val) 11497 netdev_warn(bp->dev, "FW reset inprog %x after min wait time.\n", 11498 val); 11499 } 11500 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); 11501 if (pci_enable_device(bp->pdev)) { 11502 netdev_err(bp->dev, "Cannot re-enable PCI device\n"); 11503 goto fw_reset_abort; 11504 } 11505 pci_set_master(bp->pdev); 11506 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW; 11507 fallthrough; 11508 case BNXT_FW_RESET_STATE_POLL_FW: 11509 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT; 11510 rc = __bnxt_hwrm_ver_get(bp, true); 11511 if (rc) { 11512 if (time_after(jiffies, bp->fw_reset_timestamp + 11513 (bp->fw_reset_max_dsecs * HZ / 10))) { 11514 netdev_err(bp->dev, "Firmware reset aborted\n"); 11515 goto fw_reset_abort_status; 11516 } 11517 bnxt_queue_fw_reset_work(bp, HZ / 5); 11518 return; 11519 } 11520 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; 11521 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING; 11522 fallthrough; 11523 case BNXT_FW_RESET_STATE_OPENING: 11524 while (!rtnl_trylock()) { 11525 bnxt_queue_fw_reset_work(bp, HZ / 10); 11526 return; 11527 } 11528 rc = bnxt_open(bp->dev); 11529 if (rc) { 11530 netdev_err(bp->dev, "bnxt_open_nic() failed\n"); 11531 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 11532 dev_close(bp->dev); 11533 } 11534 11535 bp->fw_reset_state = 0; 11536 /* Make sure fw_reset_state is 0 before clearing the flag */ 11537 smp_mb__before_atomic(); 11538 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 11539 bnxt_ulp_start(bp, rc); 11540 if (!rc) 11541 bnxt_reenable_sriov(bp); 11542 bnxt_dl_health_recovery_done(bp); 11543 bnxt_dl_health_status_update(bp, true); 11544 rtnl_unlock(); 11545 break; 11546 } 11547 return; 11548 11549 fw_reset_abort_status: 11550 if (bp->fw_health->status_reliable || 11551 (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) { 11552 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 11553 11554 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts); 11555 } 11556 fw_reset_abort: 11557 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 11558 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) 11559 bnxt_dl_health_status_update(bp, false); 11560 bp->fw_reset_state = 0; 11561 rtnl_lock(); 11562 dev_close(bp->dev); 11563 rtnl_unlock(); 11564 } 11565 11566 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) 11567 { 11568 int rc; 11569 struct bnxt *bp = netdev_priv(dev); 11570 11571 SET_NETDEV_DEV(dev, &pdev->dev); 11572 11573 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 11574 rc = pci_enable_device(pdev); 11575 if (rc) { 11576 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 11577 goto init_err; 11578 } 11579 11580 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 11581 dev_err(&pdev->dev, 11582 "Cannot find PCI device base address, aborting\n"); 11583 rc = -ENODEV; 11584 goto init_err_disable; 11585 } 11586 11587 rc = pci_request_regions(pdev, DRV_MODULE_NAME); 11588 if (rc) { 11589 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); 11590 goto init_err_disable; 11591 } 11592 11593 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 && 11594 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) { 11595 dev_err(&pdev->dev, "System does not support DMA, aborting\n"); 11596 rc = -EIO; 11597 goto init_err_release; 11598 } 11599 11600 pci_set_master(pdev); 11601 11602 bp->dev = dev; 11603 bp->pdev = pdev; 11604 11605 /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2() 11606 * determines the BAR size. 11607 */ 11608 bp->bar0 = pci_ioremap_bar(pdev, 0); 11609 if (!bp->bar0) { 11610 dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); 11611 rc = -ENOMEM; 11612 goto init_err_release; 11613 } 11614 11615 bp->bar2 = pci_ioremap_bar(pdev, 4); 11616 if (!bp->bar2) { 11617 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n"); 11618 rc = -ENOMEM; 11619 goto init_err_release; 11620 } 11621 11622 pci_enable_pcie_error_reporting(pdev); 11623 11624 INIT_WORK(&bp->sp_task, bnxt_sp_task); 11625 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task); 11626 11627 spin_lock_init(&bp->ntp_fltr_lock); 11628 #if BITS_PER_LONG == 32 11629 spin_lock_init(&bp->db_lock); 11630 #endif 11631 11632 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE; 11633 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE; 11634 11635 bnxt_init_dflt_coal(bp); 11636 11637 timer_setup(&bp->timer, bnxt_timer, 0); 11638 bp->current_interval = BNXT_TIMER_INTERVAL; 11639 11640 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID; 11641 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID; 11642 11643 clear_bit(BNXT_STATE_OPEN, &bp->state); 11644 return 0; 11645 11646 init_err_release: 11647 bnxt_unmap_bars(bp, pdev); 11648 pci_release_regions(pdev); 11649 11650 init_err_disable: 11651 pci_disable_device(pdev); 11652 11653 init_err: 11654 return rc; 11655 } 11656 11657 /* rtnl_lock held */ 11658 static int bnxt_change_mac_addr(struct net_device *dev, void *p) 11659 { 11660 struct sockaddr *addr = p; 11661 struct bnxt *bp = netdev_priv(dev); 11662 int rc = 0; 11663 11664 if (!is_valid_ether_addr(addr->sa_data)) 11665 return -EADDRNOTAVAIL; 11666 11667 if (ether_addr_equal(addr->sa_data, dev->dev_addr)) 11668 return 0; 11669 11670 rc = bnxt_approve_mac(bp, addr->sa_data, true); 11671 if (rc) 11672 return rc; 11673 11674 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 11675 if (netif_running(dev)) { 11676 bnxt_close_nic(bp, false, false); 11677 rc = bnxt_open_nic(bp, false, false); 11678 } 11679 11680 return rc; 11681 } 11682 11683 /* rtnl_lock held */ 11684 static int bnxt_change_mtu(struct net_device *dev, int new_mtu) 11685 { 11686 struct bnxt *bp = netdev_priv(dev); 11687 11688 if (netif_running(dev)) 11689 bnxt_close_nic(bp, true, false); 11690 11691 dev->mtu = new_mtu; 11692 bnxt_set_ring_params(bp); 11693 11694 if (netif_running(dev)) 11695 return bnxt_open_nic(bp, true, false); 11696 11697 return 0; 11698 } 11699 11700 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) 11701 { 11702 struct bnxt *bp = netdev_priv(dev); 11703 bool sh = false; 11704 int rc; 11705 11706 if (tc > bp->max_tc) { 11707 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n", 11708 tc, bp->max_tc); 11709 return -EINVAL; 11710 } 11711 11712 if (netdev_get_num_tc(dev) == tc) 11713 return 0; 11714 11715 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 11716 sh = true; 11717 11718 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, 11719 sh, tc, bp->tx_nr_rings_xdp); 11720 if (rc) 11721 return rc; 11722 11723 /* Needs to close the device and do hw resource re-allocations */ 11724 if (netif_running(bp->dev)) 11725 bnxt_close_nic(bp, true, false); 11726 11727 if (tc) { 11728 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc; 11729 netdev_set_num_tc(dev, tc); 11730 } else { 11731 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 11732 netdev_reset_tc(dev); 11733 } 11734 bp->tx_nr_rings += bp->tx_nr_rings_xdp; 11735 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : 11736 bp->tx_nr_rings + bp->rx_nr_rings; 11737 11738 if (netif_running(bp->dev)) 11739 return bnxt_open_nic(bp, true, false); 11740 11741 return 0; 11742 } 11743 11744 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 11745 void *cb_priv) 11746 { 11747 struct bnxt *bp = cb_priv; 11748 11749 if (!bnxt_tc_flower_enabled(bp) || 11750 !tc_cls_can_offload_and_chain0(bp->dev, type_data)) 11751 return -EOPNOTSUPP; 11752 11753 switch (type) { 11754 case TC_SETUP_CLSFLOWER: 11755 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data); 11756 default: 11757 return -EOPNOTSUPP; 11758 } 11759 } 11760 11761 LIST_HEAD(bnxt_block_cb_list); 11762 11763 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, 11764 void *type_data) 11765 { 11766 struct bnxt *bp = netdev_priv(dev); 11767 11768 switch (type) { 11769 case TC_SETUP_BLOCK: 11770 return flow_block_cb_setup_simple(type_data, 11771 &bnxt_block_cb_list, 11772 bnxt_setup_tc_block_cb, 11773 bp, bp, true); 11774 case TC_SETUP_QDISC_MQPRIO: { 11775 struct tc_mqprio_qopt *mqprio = type_data; 11776 11777 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; 11778 11779 return bnxt_setup_mq_tc(dev, mqprio->num_tc); 11780 } 11781 default: 11782 return -EOPNOTSUPP; 11783 } 11784 } 11785 11786 #ifdef CONFIG_RFS_ACCEL 11787 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1, 11788 struct bnxt_ntuple_filter *f2) 11789 { 11790 struct flow_keys *keys1 = &f1->fkeys; 11791 struct flow_keys *keys2 = &f2->fkeys; 11792 11793 if (keys1->basic.n_proto != keys2->basic.n_proto || 11794 keys1->basic.ip_proto != keys2->basic.ip_proto) 11795 return false; 11796 11797 if (keys1->basic.n_proto == htons(ETH_P_IP)) { 11798 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src || 11799 keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst) 11800 return false; 11801 } else { 11802 if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src, 11803 sizeof(keys1->addrs.v6addrs.src)) || 11804 memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst, 11805 sizeof(keys1->addrs.v6addrs.dst))) 11806 return false; 11807 } 11808 11809 if (keys1->ports.ports == keys2->ports.ports && 11810 keys1->control.flags == keys2->control.flags && 11811 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) && 11812 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr)) 11813 return true; 11814 11815 return false; 11816 } 11817 11818 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, 11819 u16 rxq_index, u32 flow_id) 11820 { 11821 struct bnxt *bp = netdev_priv(dev); 11822 struct bnxt_ntuple_filter *fltr, *new_fltr; 11823 struct flow_keys *fkeys; 11824 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb); 11825 int rc = 0, idx, bit_id, l2_idx = 0; 11826 struct hlist_head *head; 11827 u32 flags; 11828 11829 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) { 11830 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 11831 int off = 0, j; 11832 11833 netif_addr_lock_bh(dev); 11834 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) { 11835 if (ether_addr_equal(eth->h_dest, 11836 vnic->uc_list + off)) { 11837 l2_idx = j + 1; 11838 break; 11839 } 11840 } 11841 netif_addr_unlock_bh(dev); 11842 if (!l2_idx) 11843 return -EINVAL; 11844 } 11845 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC); 11846 if (!new_fltr) 11847 return -ENOMEM; 11848 11849 fkeys = &new_fltr->fkeys; 11850 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) { 11851 rc = -EPROTONOSUPPORT; 11852 goto err_free; 11853 } 11854 11855 if ((fkeys->basic.n_proto != htons(ETH_P_IP) && 11856 fkeys->basic.n_proto != htons(ETH_P_IPV6)) || 11857 ((fkeys->basic.ip_proto != IPPROTO_TCP) && 11858 (fkeys->basic.ip_proto != IPPROTO_UDP))) { 11859 rc = -EPROTONOSUPPORT; 11860 goto err_free; 11861 } 11862 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) && 11863 bp->hwrm_spec_code < 0x10601) { 11864 rc = -EPROTONOSUPPORT; 11865 goto err_free; 11866 } 11867 flags = fkeys->control.flags; 11868 if (((flags & FLOW_DIS_ENCAPSULATION) && 11869 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) { 11870 rc = -EPROTONOSUPPORT; 11871 goto err_free; 11872 } 11873 11874 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN); 11875 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN); 11876 11877 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK; 11878 head = &bp->ntp_fltr_hash_tbl[idx]; 11879 rcu_read_lock(); 11880 hlist_for_each_entry_rcu(fltr, head, hash) { 11881 if (bnxt_fltr_match(fltr, new_fltr)) { 11882 rcu_read_unlock(); 11883 rc = 0; 11884 goto err_free; 11885 } 11886 } 11887 rcu_read_unlock(); 11888 11889 spin_lock_bh(&bp->ntp_fltr_lock); 11890 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, 11891 BNXT_NTP_FLTR_MAX_FLTR, 0); 11892 if (bit_id < 0) { 11893 spin_unlock_bh(&bp->ntp_fltr_lock); 11894 rc = -ENOMEM; 11895 goto err_free; 11896 } 11897 11898 new_fltr->sw_id = (u16)bit_id; 11899 new_fltr->flow_id = flow_id; 11900 new_fltr->l2_fltr_idx = l2_idx; 11901 new_fltr->rxq = rxq_index; 11902 hlist_add_head_rcu(&new_fltr->hash, head); 11903 bp->ntp_fltr_count++; 11904 spin_unlock_bh(&bp->ntp_fltr_lock); 11905 11906 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event); 11907 bnxt_queue_sp_work(bp); 11908 11909 return new_fltr->sw_id; 11910 11911 err_free: 11912 kfree(new_fltr); 11913 return rc; 11914 } 11915 11916 static void bnxt_cfg_ntp_filters(struct bnxt *bp) 11917 { 11918 int i; 11919 11920 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 11921 struct hlist_head *head; 11922 struct hlist_node *tmp; 11923 struct bnxt_ntuple_filter *fltr; 11924 int rc; 11925 11926 head = &bp->ntp_fltr_hash_tbl[i]; 11927 hlist_for_each_entry_safe(fltr, tmp, head, hash) { 11928 bool del = false; 11929 11930 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) { 11931 if (rps_may_expire_flow(bp->dev, fltr->rxq, 11932 fltr->flow_id, 11933 fltr->sw_id)) { 11934 bnxt_hwrm_cfa_ntuple_filter_free(bp, 11935 fltr); 11936 del = true; 11937 } 11938 } else { 11939 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, 11940 fltr); 11941 if (rc) 11942 del = true; 11943 else 11944 set_bit(BNXT_FLTR_VALID, &fltr->state); 11945 } 11946 11947 if (del) { 11948 spin_lock_bh(&bp->ntp_fltr_lock); 11949 hlist_del_rcu(&fltr->hash); 11950 bp->ntp_fltr_count--; 11951 spin_unlock_bh(&bp->ntp_fltr_lock); 11952 synchronize_rcu(); 11953 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap); 11954 kfree(fltr); 11955 } 11956 } 11957 } 11958 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event)) 11959 netdev_info(bp->dev, "Receive PF driver unload event!\n"); 11960 } 11961 11962 #else 11963 11964 static void bnxt_cfg_ntp_filters(struct bnxt *bp) 11965 { 11966 } 11967 11968 #endif /* CONFIG_RFS_ACCEL */ 11969 11970 static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table) 11971 { 11972 struct bnxt *bp = netdev_priv(netdev); 11973 struct udp_tunnel_info ti; 11974 unsigned int cmd; 11975 11976 udp_tunnel_nic_get_port(netdev, table, 0, &ti); 11977 if (ti.type == UDP_TUNNEL_TYPE_VXLAN) 11978 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN; 11979 else 11980 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE; 11981 11982 if (ti.port) 11983 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd); 11984 11985 return bnxt_hwrm_tunnel_dst_port_free(bp, cmd); 11986 } 11987 11988 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = { 11989 .sync_table = bnxt_udp_tunnel_sync, 11990 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | 11991 UDP_TUNNEL_NIC_INFO_OPEN_ONLY, 11992 .tables = { 11993 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, 11994 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, 11995 }, 11996 }; 11997 11998 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 11999 struct net_device *dev, u32 filter_mask, 12000 int nlflags) 12001 { 12002 struct bnxt *bp = netdev_priv(dev); 12003 12004 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0, 12005 nlflags, filter_mask, NULL); 12006 } 12007 12008 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, 12009 u16 flags, struct netlink_ext_ack *extack) 12010 { 12011 struct bnxt *bp = netdev_priv(dev); 12012 struct nlattr *attr, *br_spec; 12013 int rem, rc = 0; 12014 12015 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp)) 12016 return -EOPNOTSUPP; 12017 12018 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 12019 if (!br_spec) 12020 return -EINVAL; 12021 12022 nla_for_each_nested(attr, br_spec, rem) { 12023 u16 mode; 12024 12025 if (nla_type(attr) != IFLA_BRIDGE_MODE) 12026 continue; 12027 12028 if (nla_len(attr) < sizeof(mode)) 12029 return -EINVAL; 12030 12031 mode = nla_get_u16(attr); 12032 if (mode == bp->br_mode) 12033 break; 12034 12035 rc = bnxt_hwrm_set_br_mode(bp, mode); 12036 if (!rc) 12037 bp->br_mode = mode; 12038 break; 12039 } 12040 return rc; 12041 } 12042 12043 int bnxt_get_port_parent_id(struct net_device *dev, 12044 struct netdev_phys_item_id *ppid) 12045 { 12046 struct bnxt *bp = netdev_priv(dev); 12047 12048 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) 12049 return -EOPNOTSUPP; 12050 12051 /* The PF and it's VF-reps only support the switchdev framework */ 12052 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID)) 12053 return -EOPNOTSUPP; 12054 12055 ppid->id_len = sizeof(bp->dsn); 12056 memcpy(ppid->id, bp->dsn, ppid->id_len); 12057 12058 return 0; 12059 } 12060 12061 static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev) 12062 { 12063 struct bnxt *bp = netdev_priv(dev); 12064 12065 return &bp->dl_port; 12066 } 12067 12068 static const struct net_device_ops bnxt_netdev_ops = { 12069 .ndo_open = bnxt_open, 12070 .ndo_start_xmit = bnxt_start_xmit, 12071 .ndo_stop = bnxt_close, 12072 .ndo_get_stats64 = bnxt_get_stats64, 12073 .ndo_set_rx_mode = bnxt_set_rx_mode, 12074 .ndo_do_ioctl = bnxt_ioctl, 12075 .ndo_validate_addr = eth_validate_addr, 12076 .ndo_set_mac_address = bnxt_change_mac_addr, 12077 .ndo_change_mtu = bnxt_change_mtu, 12078 .ndo_fix_features = bnxt_fix_features, 12079 .ndo_set_features = bnxt_set_features, 12080 .ndo_tx_timeout = bnxt_tx_timeout, 12081 #ifdef CONFIG_BNXT_SRIOV 12082 .ndo_get_vf_config = bnxt_get_vf_config, 12083 .ndo_set_vf_mac = bnxt_set_vf_mac, 12084 .ndo_set_vf_vlan = bnxt_set_vf_vlan, 12085 .ndo_set_vf_rate = bnxt_set_vf_bw, 12086 .ndo_set_vf_link_state = bnxt_set_vf_link_state, 12087 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk, 12088 .ndo_set_vf_trust = bnxt_set_vf_trust, 12089 #endif 12090 .ndo_setup_tc = bnxt_setup_tc, 12091 #ifdef CONFIG_RFS_ACCEL 12092 .ndo_rx_flow_steer = bnxt_rx_flow_steer, 12093 #endif 12094 .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, 12095 .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, 12096 .ndo_bpf = bnxt_xdp, 12097 .ndo_xdp_xmit = bnxt_xdp_xmit, 12098 .ndo_bridge_getlink = bnxt_bridge_getlink, 12099 .ndo_bridge_setlink = bnxt_bridge_setlink, 12100 .ndo_get_devlink_port = bnxt_get_devlink_port, 12101 }; 12102 12103 static void bnxt_remove_one(struct pci_dev *pdev) 12104 { 12105 struct net_device *dev = pci_get_drvdata(pdev); 12106 struct bnxt *bp = netdev_priv(dev); 12107 12108 if (BNXT_PF(bp)) 12109 bnxt_sriov_disable(bp); 12110 12111 if (BNXT_PF(bp)) 12112 devlink_port_type_clear(&bp->dl_port); 12113 pci_disable_pcie_error_reporting(pdev); 12114 unregister_netdev(dev); 12115 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 12116 /* Flush any pending tasks */ 12117 cancel_work_sync(&bp->sp_task); 12118 cancel_delayed_work_sync(&bp->fw_reset_task); 12119 bp->sp_event = 0; 12120 12121 bnxt_dl_fw_reporters_destroy(bp, true); 12122 bnxt_dl_unregister(bp); 12123 bnxt_shutdown_tc(bp); 12124 12125 bnxt_clear_int_mode(bp); 12126 bnxt_hwrm_func_drv_unrgtr(bp); 12127 bnxt_free_hwrm_resources(bp); 12128 bnxt_free_hwrm_short_cmd_req(bp); 12129 bnxt_ethtool_free(bp); 12130 bnxt_dcb_free(bp); 12131 kfree(bp->edev); 12132 bp->edev = NULL; 12133 kfree(bp->fw_health); 12134 bp->fw_health = NULL; 12135 bnxt_cleanup_pci(bp); 12136 bnxt_free_ctx_mem(bp); 12137 kfree(bp->ctx); 12138 bp->ctx = NULL; 12139 kfree(bp->rss_indir_tbl); 12140 bp->rss_indir_tbl = NULL; 12141 bnxt_free_port_stats(bp); 12142 free_netdev(dev); 12143 } 12144 12145 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt) 12146 { 12147 int rc = 0; 12148 struct bnxt_link_info *link_info = &bp->link_info; 12149 12150 rc = bnxt_hwrm_phy_qcaps(bp); 12151 if (rc) { 12152 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n", 12153 rc); 12154 return rc; 12155 } 12156 if (!fw_dflt) 12157 return 0; 12158 12159 rc = bnxt_update_link(bp, false); 12160 if (rc) { 12161 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n", 12162 rc); 12163 return rc; 12164 } 12165 12166 /* Older firmware does not have supported_auto_speeds, so assume 12167 * that all supported speeds can be autonegotiated. 12168 */ 12169 if (link_info->auto_link_speeds && !link_info->support_auto_speeds) 12170 link_info->support_auto_speeds = link_info->support_speeds; 12171 12172 bnxt_init_ethtool_link_settings(bp); 12173 return 0; 12174 } 12175 12176 static int bnxt_get_max_irq(struct pci_dev *pdev) 12177 { 12178 u16 ctrl; 12179 12180 if (!pdev->msix_cap) 12181 return 1; 12182 12183 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl); 12184 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; 12185 } 12186 12187 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, 12188 int *max_cp) 12189 { 12190 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 12191 int max_ring_grps = 0, max_irq; 12192 12193 *max_tx = hw_resc->max_tx_rings; 12194 *max_rx = hw_resc->max_rx_rings; 12195 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp); 12196 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) - 12197 bnxt_get_ulp_msix_num(bp), 12198 hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp)); 12199 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 12200 *max_cp = min_t(int, *max_cp, max_irq); 12201 max_ring_grps = hw_resc->max_hw_ring_grps; 12202 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { 12203 *max_cp -= 1; 12204 *max_rx -= 2; 12205 } 12206 if (bp->flags & BNXT_FLAG_AGG_RINGS) 12207 *max_rx >>= 1; 12208 if (bp->flags & BNXT_FLAG_CHIP_P5) { 12209 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false); 12210 /* On P5 chips, max_cp output param should be available NQs */ 12211 *max_cp = max_irq; 12212 } 12213 *max_rx = min_t(int, *max_rx, max_ring_grps); 12214 } 12215 12216 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared) 12217 { 12218 int rx, tx, cp; 12219 12220 _bnxt_get_max_rings(bp, &rx, &tx, &cp); 12221 *max_rx = rx; 12222 *max_tx = tx; 12223 if (!rx || !tx || !cp) 12224 return -ENOMEM; 12225 12226 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared); 12227 } 12228 12229 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx, 12230 bool shared) 12231 { 12232 int rc; 12233 12234 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); 12235 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) { 12236 /* Not enough rings, try disabling agg rings. */ 12237 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 12238 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); 12239 if (rc) { 12240 /* set BNXT_FLAG_AGG_RINGS back for consistency */ 12241 bp->flags |= BNXT_FLAG_AGG_RINGS; 12242 return rc; 12243 } 12244 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 12245 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 12246 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 12247 bnxt_set_ring_params(bp); 12248 } 12249 12250 if (bp->flags & BNXT_FLAG_ROCE_CAP) { 12251 int max_cp, max_stat, max_irq; 12252 12253 /* Reserve minimum resources for RoCE */ 12254 max_cp = bnxt_get_max_func_cp_rings(bp); 12255 max_stat = bnxt_get_max_func_stat_ctxs(bp); 12256 max_irq = bnxt_get_max_func_irqs(bp); 12257 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS || 12258 max_irq <= BNXT_MIN_ROCE_CP_RINGS || 12259 max_stat <= BNXT_MIN_ROCE_STAT_CTXS) 12260 return 0; 12261 12262 max_cp -= BNXT_MIN_ROCE_CP_RINGS; 12263 max_irq -= BNXT_MIN_ROCE_CP_RINGS; 12264 max_stat -= BNXT_MIN_ROCE_STAT_CTXS; 12265 max_cp = min_t(int, max_cp, max_irq); 12266 max_cp = min_t(int, max_cp, max_stat); 12267 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared); 12268 if (rc) 12269 rc = 0; 12270 } 12271 return rc; 12272 } 12273 12274 /* In initial default shared ring setting, each shared ring must have a 12275 * RX/TX ring pair. 12276 */ 12277 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp) 12278 { 12279 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings); 12280 bp->rx_nr_rings = bp->cp_nr_rings; 12281 bp->tx_nr_rings_per_tc = bp->cp_nr_rings; 12282 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 12283 } 12284 12285 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) 12286 { 12287 int dflt_rings, max_rx_rings, max_tx_rings, rc; 12288 12289 if (!bnxt_can_reserve_rings(bp)) 12290 return 0; 12291 12292 if (sh) 12293 bp->flags |= BNXT_FLAG_SHARED_RINGS; 12294 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues(); 12295 /* Reduce default rings on multi-port cards so that total default 12296 * rings do not exceed CPU count. 12297 */ 12298 if (bp->port_count > 1) { 12299 int max_rings = 12300 max_t(int, num_online_cpus() / bp->port_count, 1); 12301 12302 dflt_rings = min_t(int, dflt_rings, max_rings); 12303 } 12304 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh); 12305 if (rc) 12306 return rc; 12307 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); 12308 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); 12309 if (sh) 12310 bnxt_trim_dflt_sh_rings(bp); 12311 else 12312 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings; 12313 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 12314 12315 rc = __bnxt_reserve_rings(bp); 12316 if (rc) 12317 netdev_warn(bp->dev, "Unable to reserve tx rings\n"); 12318 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 12319 if (sh) 12320 bnxt_trim_dflt_sh_rings(bp); 12321 12322 /* Rings may have been trimmed, re-reserve the trimmed rings. */ 12323 if (bnxt_need_reserve_rings(bp)) { 12324 rc = __bnxt_reserve_rings(bp); 12325 if (rc) 12326 netdev_warn(bp->dev, "2nd rings reservation failed.\n"); 12327 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 12328 } 12329 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 12330 bp->rx_nr_rings++; 12331 bp->cp_nr_rings++; 12332 } 12333 if (rc) { 12334 bp->tx_nr_rings = 0; 12335 bp->rx_nr_rings = 0; 12336 } 12337 return rc; 12338 } 12339 12340 static int bnxt_init_dflt_ring_mode(struct bnxt *bp) 12341 { 12342 int rc; 12343 12344 if (bp->tx_nr_rings) 12345 return 0; 12346 12347 bnxt_ulp_irq_stop(bp); 12348 bnxt_clear_int_mode(bp); 12349 rc = bnxt_set_dflt_rings(bp, true); 12350 if (rc) { 12351 netdev_err(bp->dev, "Not enough rings available.\n"); 12352 goto init_dflt_ring_err; 12353 } 12354 rc = bnxt_init_int_mode(bp); 12355 if (rc) 12356 goto init_dflt_ring_err; 12357 12358 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 12359 if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) { 12360 bp->flags |= BNXT_FLAG_RFS; 12361 bp->dev->features |= NETIF_F_NTUPLE; 12362 } 12363 init_dflt_ring_err: 12364 bnxt_ulp_irq_restart(bp, rc); 12365 return rc; 12366 } 12367 12368 int bnxt_restore_pf_fw_resources(struct bnxt *bp) 12369 { 12370 int rc; 12371 12372 ASSERT_RTNL(); 12373 bnxt_hwrm_func_qcaps(bp); 12374 12375 if (netif_running(bp->dev)) 12376 __bnxt_close_nic(bp, true, false); 12377 12378 bnxt_ulp_irq_stop(bp); 12379 bnxt_clear_int_mode(bp); 12380 rc = bnxt_init_int_mode(bp); 12381 bnxt_ulp_irq_restart(bp, rc); 12382 12383 if (netif_running(bp->dev)) { 12384 if (rc) 12385 dev_close(bp->dev); 12386 else 12387 rc = bnxt_open_nic(bp, true, false); 12388 } 12389 12390 return rc; 12391 } 12392 12393 static int bnxt_init_mac_addr(struct bnxt *bp) 12394 { 12395 int rc = 0; 12396 12397 if (BNXT_PF(bp)) { 12398 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN); 12399 } else { 12400 #ifdef CONFIG_BNXT_SRIOV 12401 struct bnxt_vf_info *vf = &bp->vf; 12402 bool strict_approval = true; 12403 12404 if (is_valid_ether_addr(vf->mac_addr)) { 12405 /* overwrite netdev dev_addr with admin VF MAC */ 12406 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); 12407 /* Older PF driver or firmware may not approve this 12408 * correctly. 12409 */ 12410 strict_approval = false; 12411 } else { 12412 eth_hw_addr_random(bp->dev); 12413 } 12414 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval); 12415 #endif 12416 } 12417 return rc; 12418 } 12419 12420 #define BNXT_VPD_LEN 512 12421 static void bnxt_vpd_read_info(struct bnxt *bp) 12422 { 12423 struct pci_dev *pdev = bp->pdev; 12424 int i, len, pos, ro_size, size; 12425 ssize_t vpd_size; 12426 u8 *vpd_data; 12427 12428 vpd_data = kmalloc(BNXT_VPD_LEN, GFP_KERNEL); 12429 if (!vpd_data) 12430 return; 12431 12432 vpd_size = pci_read_vpd(pdev, 0, BNXT_VPD_LEN, vpd_data); 12433 if (vpd_size <= 0) { 12434 netdev_err(bp->dev, "Unable to read VPD\n"); 12435 goto exit; 12436 } 12437 12438 i = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA); 12439 if (i < 0) { 12440 netdev_err(bp->dev, "VPD READ-Only not found\n"); 12441 goto exit; 12442 } 12443 12444 ro_size = pci_vpd_lrdt_size(&vpd_data[i]); 12445 i += PCI_VPD_LRDT_TAG_SIZE; 12446 if (i + ro_size > vpd_size) 12447 goto exit; 12448 12449 pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size, 12450 PCI_VPD_RO_KEYWORD_PARTNO); 12451 if (pos < 0) 12452 goto read_sn; 12453 12454 len = pci_vpd_info_field_size(&vpd_data[pos]); 12455 pos += PCI_VPD_INFO_FLD_HDR_SIZE; 12456 if (len + pos > vpd_size) 12457 goto read_sn; 12458 12459 size = min(len, BNXT_VPD_FLD_LEN - 1); 12460 memcpy(bp->board_partno, &vpd_data[pos], size); 12461 12462 read_sn: 12463 pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size, 12464 PCI_VPD_RO_KEYWORD_SERIALNO); 12465 if (pos < 0) 12466 goto exit; 12467 12468 len = pci_vpd_info_field_size(&vpd_data[pos]); 12469 pos += PCI_VPD_INFO_FLD_HDR_SIZE; 12470 if (len + pos > vpd_size) 12471 goto exit; 12472 12473 size = min(len, BNXT_VPD_FLD_LEN - 1); 12474 memcpy(bp->board_serialno, &vpd_data[pos], size); 12475 exit: 12476 kfree(vpd_data); 12477 } 12478 12479 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[]) 12480 { 12481 struct pci_dev *pdev = bp->pdev; 12482 u64 qword; 12483 12484 qword = pci_get_dsn(pdev); 12485 if (!qword) { 12486 netdev_info(bp->dev, "Unable to read adapter's DSN\n"); 12487 return -EOPNOTSUPP; 12488 } 12489 12490 put_unaligned_le64(qword, dsn); 12491 12492 bp->flags |= BNXT_FLAG_DSN_VALID; 12493 return 0; 12494 } 12495 12496 static int bnxt_map_db_bar(struct bnxt *bp) 12497 { 12498 if (!bp->db_size) 12499 return -ENODEV; 12500 bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size); 12501 if (!bp->bar1) 12502 return -ENOMEM; 12503 return 0; 12504 } 12505 12506 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 12507 { 12508 struct net_device *dev; 12509 struct bnxt *bp; 12510 int rc, max_irqs; 12511 12512 if (pci_is_bridge(pdev)) 12513 return -ENODEV; 12514 12515 /* Clear any pending DMA transactions from crash kernel 12516 * while loading driver in capture kernel. 12517 */ 12518 if (is_kdump_kernel()) { 12519 pci_clear_master(pdev); 12520 pcie_flr(pdev); 12521 } 12522 12523 max_irqs = bnxt_get_max_irq(pdev); 12524 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs); 12525 if (!dev) 12526 return -ENOMEM; 12527 12528 bp = netdev_priv(dev); 12529 bp->msg_enable = BNXT_DEF_MSG_ENABLE; 12530 bnxt_set_max_func_irqs(bp, max_irqs); 12531 12532 if (bnxt_vf_pciid(ent->driver_data)) 12533 bp->flags |= BNXT_FLAG_VF; 12534 12535 if (pdev->msix_cap) 12536 bp->flags |= BNXT_FLAG_MSIX_CAP; 12537 12538 rc = bnxt_init_board(pdev, dev); 12539 if (rc < 0) 12540 goto init_err_free; 12541 12542 dev->netdev_ops = &bnxt_netdev_ops; 12543 dev->watchdog_timeo = BNXT_TX_TIMEOUT; 12544 dev->ethtool_ops = &bnxt_ethtool_ops; 12545 pci_set_drvdata(pdev, dev); 12546 12547 if (BNXT_PF(bp)) 12548 bnxt_vpd_read_info(bp); 12549 12550 rc = bnxt_alloc_hwrm_resources(bp); 12551 if (rc) 12552 goto init_err_pci_clean; 12553 12554 mutex_init(&bp->hwrm_cmd_lock); 12555 mutex_init(&bp->link_lock); 12556 12557 rc = bnxt_fw_init_one_p1(bp); 12558 if (rc) 12559 goto init_err_pci_clean; 12560 12561 if (BNXT_CHIP_P5(bp)) { 12562 bp->flags |= BNXT_FLAG_CHIP_P5; 12563 if (BNXT_CHIP_SR2(bp)) 12564 bp->flags |= BNXT_FLAG_CHIP_SR2; 12565 } 12566 12567 rc = bnxt_alloc_rss_indir_tbl(bp); 12568 if (rc) 12569 goto init_err_pci_clean; 12570 12571 rc = bnxt_fw_init_one_p2(bp); 12572 if (rc) 12573 goto init_err_pci_clean; 12574 12575 rc = bnxt_map_db_bar(bp); 12576 if (rc) { 12577 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n", 12578 rc); 12579 goto init_err_pci_clean; 12580 } 12581 12582 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 12583 NETIF_F_TSO | NETIF_F_TSO6 | 12584 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | 12585 NETIF_F_GSO_IPXIP4 | 12586 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | 12587 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH | 12588 NETIF_F_RXCSUM | NETIF_F_GRO; 12589 12590 if (BNXT_SUPPORTS_TPA(bp)) 12591 dev->hw_features |= NETIF_F_LRO; 12592 12593 dev->hw_enc_features = 12594 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 12595 NETIF_F_TSO | NETIF_F_TSO6 | 12596 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | 12597 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | 12598 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL; 12599 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels; 12600 12601 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM | 12602 NETIF_F_GSO_GRE_CSUM; 12603 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA; 12604 if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP) 12605 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX; 12606 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT) 12607 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX; 12608 if (BNXT_SUPPORTS_TPA(bp)) 12609 dev->hw_features |= NETIF_F_GRO_HW; 12610 dev->features |= dev->hw_features | NETIF_F_HIGHDMA; 12611 if (dev->features & NETIF_F_GRO_HW) 12612 dev->features &= ~NETIF_F_LRO; 12613 dev->priv_flags |= IFF_UNICAST_FLT; 12614 12615 #ifdef CONFIG_BNXT_SRIOV 12616 init_waitqueue_head(&bp->sriov_cfg_wait); 12617 mutex_init(&bp->sriov_lock); 12618 #endif 12619 if (BNXT_SUPPORTS_TPA(bp)) { 12620 bp->gro_func = bnxt_gro_func_5730x; 12621 if (BNXT_CHIP_P4(bp)) 12622 bp->gro_func = bnxt_gro_func_5731x; 12623 else if (BNXT_CHIP_P5(bp)) 12624 bp->gro_func = bnxt_gro_func_5750x; 12625 } 12626 if (!BNXT_CHIP_P4_PLUS(bp)) 12627 bp->flags |= BNXT_FLAG_DOUBLE_DB; 12628 12629 bp->ulp_probe = bnxt_ulp_probe; 12630 12631 rc = bnxt_init_mac_addr(bp); 12632 if (rc) { 12633 dev_err(&pdev->dev, "Unable to initialize mac address.\n"); 12634 rc = -EADDRNOTAVAIL; 12635 goto init_err_pci_clean; 12636 } 12637 12638 if (BNXT_PF(bp)) { 12639 /* Read the adapter's DSN to use as the eswitch switch_id */ 12640 rc = bnxt_pcie_dsn_get(bp, bp->dsn); 12641 } 12642 12643 /* MTU range: 60 - FW defined max */ 12644 dev->min_mtu = ETH_ZLEN; 12645 dev->max_mtu = bp->max_mtu; 12646 12647 rc = bnxt_probe_phy(bp, true); 12648 if (rc) 12649 goto init_err_pci_clean; 12650 12651 bnxt_set_rx_skb_mode(bp, false); 12652 bnxt_set_tpa_flags(bp); 12653 bnxt_set_ring_params(bp); 12654 rc = bnxt_set_dflt_rings(bp, true); 12655 if (rc) { 12656 netdev_err(bp->dev, "Not enough rings available.\n"); 12657 rc = -ENOMEM; 12658 goto init_err_pci_clean; 12659 } 12660 12661 bnxt_fw_init_one_p3(bp); 12662 12663 if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX) 12664 bp->flags |= BNXT_FLAG_STRIP_VLAN; 12665 12666 rc = bnxt_init_int_mode(bp); 12667 if (rc) 12668 goto init_err_pci_clean; 12669 12670 /* No TC has been set yet and rings may have been trimmed due to 12671 * limited MSIX, so we re-initialize the TX rings per TC. 12672 */ 12673 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 12674 12675 if (BNXT_PF(bp)) { 12676 if (!bnxt_pf_wq) { 12677 bnxt_pf_wq = 12678 create_singlethread_workqueue("bnxt_pf_wq"); 12679 if (!bnxt_pf_wq) { 12680 dev_err(&pdev->dev, "Unable to create workqueue.\n"); 12681 rc = -ENOMEM; 12682 goto init_err_pci_clean; 12683 } 12684 } 12685 rc = bnxt_init_tc(bp); 12686 if (rc) 12687 netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n", 12688 rc); 12689 } 12690 12691 bnxt_dl_register(bp); 12692 12693 rc = register_netdev(dev); 12694 if (rc) 12695 goto init_err_cleanup; 12696 12697 if (BNXT_PF(bp)) 12698 devlink_port_type_eth_set(&bp->dl_port, bp->dev); 12699 bnxt_dl_fw_reporters_create(bp); 12700 12701 netdev_info(dev, "%s found at mem %lx, node addr %pM\n", 12702 board_info[ent->driver_data].name, 12703 (long)pci_resource_start(pdev, 0), dev->dev_addr); 12704 pcie_print_link_status(pdev); 12705 12706 pci_save_state(pdev); 12707 return 0; 12708 12709 init_err_cleanup: 12710 bnxt_dl_unregister(bp); 12711 bnxt_shutdown_tc(bp); 12712 bnxt_clear_int_mode(bp); 12713 12714 init_err_pci_clean: 12715 bnxt_hwrm_func_drv_unrgtr(bp); 12716 bnxt_free_hwrm_short_cmd_req(bp); 12717 bnxt_free_hwrm_resources(bp); 12718 kfree(bp->fw_health); 12719 bp->fw_health = NULL; 12720 bnxt_cleanup_pci(bp); 12721 bnxt_free_ctx_mem(bp); 12722 kfree(bp->ctx); 12723 bp->ctx = NULL; 12724 kfree(bp->rss_indir_tbl); 12725 bp->rss_indir_tbl = NULL; 12726 12727 init_err_free: 12728 free_netdev(dev); 12729 return rc; 12730 } 12731 12732 static void bnxt_shutdown(struct pci_dev *pdev) 12733 { 12734 struct net_device *dev = pci_get_drvdata(pdev); 12735 struct bnxt *bp; 12736 12737 if (!dev) 12738 return; 12739 12740 rtnl_lock(); 12741 bp = netdev_priv(dev); 12742 if (!bp) 12743 goto shutdown_exit; 12744 12745 if (netif_running(dev)) 12746 dev_close(dev); 12747 12748 bnxt_ulp_shutdown(bp); 12749 bnxt_clear_int_mode(bp); 12750 pci_disable_device(pdev); 12751 12752 if (system_state == SYSTEM_POWER_OFF) { 12753 pci_wake_from_d3(pdev, bp->wol); 12754 pci_set_power_state(pdev, PCI_D3hot); 12755 } 12756 12757 shutdown_exit: 12758 rtnl_unlock(); 12759 } 12760 12761 #ifdef CONFIG_PM_SLEEP 12762 static int bnxt_suspend(struct device *device) 12763 { 12764 struct net_device *dev = dev_get_drvdata(device); 12765 struct bnxt *bp = netdev_priv(dev); 12766 int rc = 0; 12767 12768 rtnl_lock(); 12769 bnxt_ulp_stop(bp); 12770 if (netif_running(dev)) { 12771 netif_device_detach(dev); 12772 rc = bnxt_close(dev); 12773 } 12774 bnxt_hwrm_func_drv_unrgtr(bp); 12775 pci_disable_device(bp->pdev); 12776 bnxt_free_ctx_mem(bp); 12777 kfree(bp->ctx); 12778 bp->ctx = NULL; 12779 rtnl_unlock(); 12780 return rc; 12781 } 12782 12783 static int bnxt_resume(struct device *device) 12784 { 12785 struct net_device *dev = dev_get_drvdata(device); 12786 struct bnxt *bp = netdev_priv(dev); 12787 int rc = 0; 12788 12789 rtnl_lock(); 12790 rc = pci_enable_device(bp->pdev); 12791 if (rc) { 12792 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n", 12793 rc); 12794 goto resume_exit; 12795 } 12796 pci_set_master(bp->pdev); 12797 if (bnxt_hwrm_ver_get(bp)) { 12798 rc = -ENODEV; 12799 goto resume_exit; 12800 } 12801 rc = bnxt_hwrm_func_reset(bp); 12802 if (rc) { 12803 rc = -EBUSY; 12804 goto resume_exit; 12805 } 12806 12807 rc = bnxt_hwrm_func_qcaps(bp); 12808 if (rc) 12809 goto resume_exit; 12810 12811 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) { 12812 rc = -ENODEV; 12813 goto resume_exit; 12814 } 12815 12816 bnxt_get_wol_settings(bp); 12817 if (netif_running(dev)) { 12818 rc = bnxt_open(dev); 12819 if (!rc) 12820 netif_device_attach(dev); 12821 } 12822 12823 resume_exit: 12824 bnxt_ulp_start(bp, rc); 12825 if (!rc) 12826 bnxt_reenable_sriov(bp); 12827 rtnl_unlock(); 12828 return rc; 12829 } 12830 12831 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume); 12832 #define BNXT_PM_OPS (&bnxt_pm_ops) 12833 12834 #else 12835 12836 #define BNXT_PM_OPS NULL 12837 12838 #endif /* CONFIG_PM_SLEEP */ 12839 12840 /** 12841 * bnxt_io_error_detected - called when PCI error is detected 12842 * @pdev: Pointer to PCI device 12843 * @state: The current pci connection state 12844 * 12845 * This function is called after a PCI bus error affecting 12846 * this device has been detected. 12847 */ 12848 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, 12849 pci_channel_state_t state) 12850 { 12851 struct net_device *netdev = pci_get_drvdata(pdev); 12852 struct bnxt *bp = netdev_priv(netdev); 12853 12854 netdev_info(netdev, "PCI I/O error detected\n"); 12855 12856 rtnl_lock(); 12857 netif_device_detach(netdev); 12858 12859 bnxt_ulp_stop(bp); 12860 12861 if (state == pci_channel_io_perm_failure) { 12862 rtnl_unlock(); 12863 return PCI_ERS_RESULT_DISCONNECT; 12864 } 12865 12866 if (state == pci_channel_io_frozen) 12867 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state); 12868 12869 if (netif_running(netdev)) 12870 bnxt_close(netdev); 12871 12872 pci_disable_device(pdev); 12873 bnxt_free_ctx_mem(bp); 12874 kfree(bp->ctx); 12875 bp->ctx = NULL; 12876 rtnl_unlock(); 12877 12878 /* Request a slot slot reset. */ 12879 return PCI_ERS_RESULT_NEED_RESET; 12880 } 12881 12882 /** 12883 * bnxt_io_slot_reset - called after the pci bus has been reset. 12884 * @pdev: Pointer to PCI device 12885 * 12886 * Restart the card from scratch, as if from a cold-boot. 12887 * At this point, the card has exprienced a hard reset, 12888 * followed by fixups by BIOS, and has its config space 12889 * set up identically to what it was at cold boot. 12890 */ 12891 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) 12892 { 12893 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT; 12894 struct net_device *netdev = pci_get_drvdata(pdev); 12895 struct bnxt *bp = netdev_priv(netdev); 12896 int err = 0, off; 12897 12898 netdev_info(bp->dev, "PCI Slot Reset\n"); 12899 12900 rtnl_lock(); 12901 12902 if (pci_enable_device(pdev)) { 12903 dev_err(&pdev->dev, 12904 "Cannot re-enable PCI device after reset.\n"); 12905 } else { 12906 pci_set_master(pdev); 12907 /* Upon fatal error, our device internal logic that latches to 12908 * BAR value is getting reset and will restore only upon 12909 * rewritting the BARs. 12910 * 12911 * As pci_restore_state() does not re-write the BARs if the 12912 * value is same as saved value earlier, driver needs to 12913 * write the BARs to 0 to force restore, in case of fatal error. 12914 */ 12915 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, 12916 &bp->state)) { 12917 for (off = PCI_BASE_ADDRESS_0; 12918 off <= PCI_BASE_ADDRESS_5; off += 4) 12919 pci_write_config_dword(bp->pdev, off, 0); 12920 } 12921 pci_restore_state(pdev); 12922 pci_save_state(pdev); 12923 12924 err = bnxt_hwrm_func_reset(bp); 12925 if (!err) 12926 result = PCI_ERS_RESULT_RECOVERED; 12927 } 12928 12929 rtnl_unlock(); 12930 12931 return result; 12932 } 12933 12934 /** 12935 * bnxt_io_resume - called when traffic can start flowing again. 12936 * @pdev: Pointer to PCI device 12937 * 12938 * This callback is called when the error recovery driver tells 12939 * us that its OK to resume normal operation. 12940 */ 12941 static void bnxt_io_resume(struct pci_dev *pdev) 12942 { 12943 struct net_device *netdev = pci_get_drvdata(pdev); 12944 struct bnxt *bp = netdev_priv(netdev); 12945 int err; 12946 12947 netdev_info(bp->dev, "PCI Slot Resume\n"); 12948 rtnl_lock(); 12949 12950 err = bnxt_hwrm_func_qcaps(bp); 12951 if (!err && netif_running(netdev)) 12952 err = bnxt_open(netdev); 12953 12954 bnxt_ulp_start(bp, err); 12955 if (!err) { 12956 bnxt_reenable_sriov(bp); 12957 netif_device_attach(netdev); 12958 } 12959 12960 rtnl_unlock(); 12961 } 12962 12963 static const struct pci_error_handlers bnxt_err_handler = { 12964 .error_detected = bnxt_io_error_detected, 12965 .slot_reset = bnxt_io_slot_reset, 12966 .resume = bnxt_io_resume 12967 }; 12968 12969 static struct pci_driver bnxt_pci_driver = { 12970 .name = DRV_MODULE_NAME, 12971 .id_table = bnxt_pci_tbl, 12972 .probe = bnxt_init_one, 12973 .remove = bnxt_remove_one, 12974 .shutdown = bnxt_shutdown, 12975 .driver.pm = BNXT_PM_OPS, 12976 .err_handler = &bnxt_err_handler, 12977 #if defined(CONFIG_BNXT_SRIOV) 12978 .sriov_configure = bnxt_sriov_configure, 12979 #endif 12980 }; 12981 12982 static int __init bnxt_init(void) 12983 { 12984 bnxt_debug_init(); 12985 return pci_register_driver(&bnxt_pci_driver); 12986 } 12987 12988 static void __exit bnxt_exit(void) 12989 { 12990 pci_unregister_driver(&bnxt_pci_driver); 12991 if (bnxt_pf_wq) 12992 destroy_workqueue(bnxt_pf_wq); 12993 bnxt_debug_exit(); 12994 } 12995 12996 module_init(bnxt_init); 12997 module_exit(bnxt_exit); 12998