1 /* Broadcom NetXtreme-C/E network driver. 2 * 3 * Copyright (c) 2014-2016 Broadcom Corporation 4 * Copyright (c) 2016-2019 Broadcom Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 */ 10 11 #include <linux/module.h> 12 13 #include <linux/stringify.h> 14 #include <linux/kernel.h> 15 #include <linux/timer.h> 16 #include <linux/errno.h> 17 #include <linux/ioport.h> 18 #include <linux/slab.h> 19 #include <linux/vmalloc.h> 20 #include <linux/interrupt.h> 21 #include <linux/pci.h> 22 #include <linux/netdevice.h> 23 #include <linux/etherdevice.h> 24 #include <linux/skbuff.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/bitops.h> 27 #include <linux/io.h> 28 #include <linux/irq.h> 29 #include <linux/delay.h> 30 #include <asm/byteorder.h> 31 #include <asm/page.h> 32 #include <linux/time.h> 33 #include <linux/mii.h> 34 #include <linux/mdio.h> 35 #include <linux/if.h> 36 #include <linux/if_vlan.h> 37 #include <linux/if_bridge.h> 38 #include <linux/rtc.h> 39 #include <linux/bpf.h> 40 #include <net/ip.h> 41 #include <net/tcp.h> 42 #include <net/udp.h> 43 #include <net/checksum.h> 44 #include <net/ip6_checksum.h> 45 #include <net/udp_tunnel.h> 46 #include <linux/workqueue.h> 47 #include <linux/prefetch.h> 48 #include <linux/cache.h> 49 #include <linux/log2.h> 50 #include <linux/aer.h> 51 #include <linux/bitmap.h> 52 #include <linux/cpu_rmap.h> 53 #include <linux/cpumask.h> 54 #include <net/pkt_cls.h> 55 #include <linux/hwmon.h> 56 #include <linux/hwmon-sysfs.h> 57 #include <net/page_pool.h> 58 59 #include "bnxt_hsi.h" 60 #include "bnxt.h" 61 #include "bnxt_ulp.h" 62 #include "bnxt_sriov.h" 63 #include "bnxt_ethtool.h" 64 #include "bnxt_dcb.h" 65 #include "bnxt_xdp.h" 66 #include "bnxt_vfr.h" 67 #include "bnxt_tc.h" 68 #include "bnxt_devlink.h" 69 #include "bnxt_debugfs.h" 70 71 #define BNXT_TX_TIMEOUT (5 * HZ) 72 73 MODULE_LICENSE("GPL"); 74 MODULE_DESCRIPTION("Broadcom BCM573xx network driver"); 75 76 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN) 77 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD 78 #define BNXT_RX_COPY_THRESH 256 79 80 #define BNXT_TX_PUSH_THRESH 164 81 82 enum board_idx { 83 BCM57301, 84 BCM57302, 85 BCM57304, 86 BCM57417_NPAR, 87 BCM58700, 88 BCM57311, 89 BCM57312, 90 BCM57402, 91 BCM57404, 92 BCM57406, 93 BCM57402_NPAR, 94 BCM57407, 95 BCM57412, 96 BCM57414, 97 BCM57416, 98 BCM57417, 99 BCM57412_NPAR, 100 BCM57314, 101 BCM57417_SFP, 102 BCM57416_SFP, 103 BCM57404_NPAR, 104 BCM57406_NPAR, 105 BCM57407_SFP, 106 BCM57407_NPAR, 107 BCM57414_NPAR, 108 BCM57416_NPAR, 109 BCM57452, 110 BCM57454, 111 BCM5745x_NPAR, 112 BCM57508, 113 BCM57504, 114 BCM57502, 115 BCM57508_NPAR, 116 BCM57504_NPAR, 117 BCM57502_NPAR, 118 BCM58802, 119 BCM58804, 120 BCM58808, 121 NETXTREME_E_VF, 122 NETXTREME_C_VF, 123 NETXTREME_S_VF, 124 NETXTREME_E_P5_VF, 125 }; 126 127 /* indexed by enum above */ 128 static const struct { 129 char *name; 130 } board_info[] = { 131 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" }, 132 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" }, 133 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, 134 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" }, 135 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" }, 136 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" }, 137 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" }, 138 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" }, 139 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" }, 140 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" }, 141 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" }, 142 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" }, 143 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" }, 144 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" }, 145 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" }, 146 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" }, 147 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" }, 148 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, 149 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" }, 150 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" }, 151 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" }, 152 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" }, 153 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" }, 154 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" }, 155 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" }, 156 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" }, 157 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" }, 158 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 159 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" }, 160 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, 161 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, 162 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" }, 163 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" }, 164 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" }, 165 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" }, 166 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" }, 167 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 168 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 169 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" }, 170 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" }, 171 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" }, 172 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" }, 173 }; 174 175 static const struct pci_device_id bnxt_pci_tbl[] = { 176 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR }, 177 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR }, 178 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 }, 179 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR }, 180 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 }, 181 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 }, 182 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 }, 183 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR }, 184 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 }, 185 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 }, 186 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 }, 187 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 }, 188 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 }, 189 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 }, 190 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR }, 191 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 }, 192 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 }, 193 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 }, 194 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 }, 195 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 }, 196 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR }, 197 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 }, 198 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP }, 199 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP }, 200 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR }, 201 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR }, 202 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP }, 203 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR }, 204 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR }, 205 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR }, 206 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR }, 207 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR }, 208 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR }, 209 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 }, 210 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 }, 211 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 }, 212 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 }, 213 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 }, 214 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR }, 215 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR }, 216 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR }, 217 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR }, 218 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR }, 219 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR }, 220 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 }, 221 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 }, 222 #ifdef CONFIG_BNXT_SRIOV 223 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF }, 224 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF }, 225 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF }, 226 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF }, 227 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF }, 228 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF }, 229 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF }, 230 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF }, 231 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF }, 232 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF }, 233 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF }, 234 #endif 235 { 0 } 236 }; 237 238 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl); 239 240 static const u16 bnxt_vf_req_snif[] = { 241 HWRM_FUNC_CFG, 242 HWRM_FUNC_VF_CFG, 243 HWRM_PORT_PHY_QCFG, 244 HWRM_CFA_L2_FILTER_ALLOC, 245 }; 246 247 static const u16 bnxt_async_events_arr[] = { 248 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE, 249 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE, 250 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD, 251 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED, 252 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE, 253 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, 254 ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE, 255 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY, 256 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY, 257 }; 258 259 static struct workqueue_struct *bnxt_pf_wq; 260 261 static bool bnxt_vf_pciid(enum board_idx idx) 262 { 263 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF || 264 idx == NETXTREME_S_VF || idx == NETXTREME_E_P5_VF); 265 } 266 267 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) 268 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS) 269 #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS) 270 271 #define BNXT_CP_DB_IRQ_DIS(db) \ 272 writel(DB_CP_IRQ_DIS_FLAGS, db) 273 274 #define BNXT_DB_CQ(db, idx) \ 275 writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell) 276 277 #define BNXT_DB_NQ_P5(db, idx) \ 278 writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell) 279 280 #define BNXT_DB_CQ_ARM(db, idx) \ 281 writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell) 282 283 #define BNXT_DB_NQ_ARM_P5(db, idx) \ 284 writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell) 285 286 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 287 { 288 if (bp->flags & BNXT_FLAG_CHIP_P5) 289 BNXT_DB_NQ_P5(db, idx); 290 else 291 BNXT_DB_CQ(db, idx); 292 } 293 294 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 295 { 296 if (bp->flags & BNXT_FLAG_CHIP_P5) 297 BNXT_DB_NQ_ARM_P5(db, idx); 298 else 299 BNXT_DB_CQ_ARM(db, idx); 300 } 301 302 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 303 { 304 if (bp->flags & BNXT_FLAG_CHIP_P5) 305 writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx), 306 db->doorbell); 307 else 308 BNXT_DB_CQ(db, idx); 309 } 310 311 const u16 bnxt_lhint_arr[] = { 312 TX_BD_FLAGS_LHINT_512_AND_SMALLER, 313 TX_BD_FLAGS_LHINT_512_TO_1023, 314 TX_BD_FLAGS_LHINT_1024_TO_2047, 315 TX_BD_FLAGS_LHINT_1024_TO_2047, 316 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 317 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 318 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 319 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 320 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 321 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 322 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 323 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 324 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 325 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 326 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 327 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 328 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 329 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 330 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 331 }; 332 333 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb) 334 { 335 struct metadata_dst *md_dst = skb_metadata_dst(skb); 336 337 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX) 338 return 0; 339 340 return md_dst->u.port_info.port_id; 341 } 342 343 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) 344 { 345 struct bnxt *bp = netdev_priv(dev); 346 struct tx_bd *txbd; 347 struct tx_bd_ext *txbd1; 348 struct netdev_queue *txq; 349 int i; 350 dma_addr_t mapping; 351 unsigned int length, pad = 0; 352 u32 len, free_size, vlan_tag_flags, cfa_action, flags; 353 u16 prod, last_frag; 354 struct pci_dev *pdev = bp->pdev; 355 struct bnxt_tx_ring_info *txr; 356 struct bnxt_sw_tx_bd *tx_buf; 357 358 i = skb_get_queue_mapping(skb); 359 if (unlikely(i >= bp->tx_nr_rings)) { 360 dev_kfree_skb_any(skb); 361 return NETDEV_TX_OK; 362 } 363 364 txq = netdev_get_tx_queue(dev, i); 365 txr = &bp->tx_ring[bp->tx_ring_map[i]]; 366 prod = txr->tx_prod; 367 368 free_size = bnxt_tx_avail(bp, txr); 369 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) { 370 netif_tx_stop_queue(txq); 371 return NETDEV_TX_BUSY; 372 } 373 374 length = skb->len; 375 len = skb_headlen(skb); 376 last_frag = skb_shinfo(skb)->nr_frags; 377 378 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 379 380 txbd->tx_bd_opaque = prod; 381 382 tx_buf = &txr->tx_buf_ring[prod]; 383 tx_buf->skb = skb; 384 tx_buf->nr_frags = last_frag; 385 386 vlan_tag_flags = 0; 387 cfa_action = bnxt_xmit_get_cfa_action(skb); 388 if (skb_vlan_tag_present(skb)) { 389 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN | 390 skb_vlan_tag_get(skb); 391 /* Currently supports 8021Q, 8021AD vlan offloads 392 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated 393 */ 394 if (skb->vlan_proto == htons(ETH_P_8021Q)) 395 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT; 396 } 397 398 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) { 399 struct tx_push_buffer *tx_push_buf = txr->tx_push; 400 struct tx_push_bd *tx_push = &tx_push_buf->push_bd; 401 struct tx_bd_ext *tx_push1 = &tx_push->txbd2; 402 void __iomem *db = txr->tx_db.doorbell; 403 void *pdata = tx_push_buf->data; 404 u64 *end; 405 int j, push_len; 406 407 /* Set COAL_NOW to be ready quickly for the next push */ 408 tx_push->tx_bd_len_flags_type = 409 cpu_to_le32((length << TX_BD_LEN_SHIFT) | 410 TX_BD_TYPE_LONG_TX_BD | 411 TX_BD_FLAGS_LHINT_512_AND_SMALLER | 412 TX_BD_FLAGS_COAL_NOW | 413 TX_BD_FLAGS_PACKET_END | 414 (2 << TX_BD_FLAGS_BD_CNT_SHIFT)); 415 416 if (skb->ip_summed == CHECKSUM_PARTIAL) 417 tx_push1->tx_bd_hsize_lflags = 418 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); 419 else 420 tx_push1->tx_bd_hsize_lflags = 0; 421 422 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 423 tx_push1->tx_bd_cfa_action = 424 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); 425 426 end = pdata + length; 427 end = PTR_ALIGN(end, 8) - 1; 428 *end = 0; 429 430 skb_copy_from_linear_data(skb, pdata, len); 431 pdata += len; 432 for (j = 0; j < last_frag; j++) { 433 skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; 434 void *fptr; 435 436 fptr = skb_frag_address_safe(frag); 437 if (!fptr) 438 goto normal_tx; 439 440 memcpy(pdata, fptr, skb_frag_size(frag)); 441 pdata += skb_frag_size(frag); 442 } 443 444 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type; 445 txbd->tx_bd_haddr = txr->data_mapping; 446 prod = NEXT_TX(prod); 447 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 448 memcpy(txbd, tx_push1, sizeof(*txbd)); 449 prod = NEXT_TX(prod); 450 tx_push->doorbell = 451 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod); 452 txr->tx_prod = prod; 453 454 tx_buf->is_push = 1; 455 netdev_tx_sent_queue(txq, skb->len); 456 wmb(); /* Sync is_push and byte queue before pushing data */ 457 458 push_len = (length + sizeof(*tx_push) + 7) / 8; 459 if (push_len > 16) { 460 __iowrite64_copy(db, tx_push_buf, 16); 461 __iowrite32_copy(db + 4, tx_push_buf + 1, 462 (push_len - 16) << 1); 463 } else { 464 __iowrite64_copy(db, tx_push_buf, push_len); 465 } 466 467 goto tx_done; 468 } 469 470 normal_tx: 471 if (length < BNXT_MIN_PKT_SIZE) { 472 pad = BNXT_MIN_PKT_SIZE - length; 473 if (skb_pad(skb, pad)) { 474 /* SKB already freed. */ 475 tx_buf->skb = NULL; 476 return NETDEV_TX_OK; 477 } 478 length = BNXT_MIN_PKT_SIZE; 479 } 480 481 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE); 482 483 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) { 484 dev_kfree_skb_any(skb); 485 tx_buf->skb = NULL; 486 return NETDEV_TX_OK; 487 } 488 489 dma_unmap_addr_set(tx_buf, mapping, mapping); 490 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD | 491 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT); 492 493 txbd->tx_bd_haddr = cpu_to_le64(mapping); 494 495 prod = NEXT_TX(prod); 496 txbd1 = (struct tx_bd_ext *) 497 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 498 499 txbd1->tx_bd_hsize_lflags = 0; 500 if (skb_is_gso(skb)) { 501 u32 hdr_len; 502 503 if (skb->encapsulation) 504 hdr_len = skb_inner_network_offset(skb) + 505 skb_inner_network_header_len(skb) + 506 inner_tcp_hdrlen(skb); 507 else 508 hdr_len = skb_transport_offset(skb) + 509 tcp_hdrlen(skb); 510 511 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO | 512 TX_BD_FLAGS_T_IPID | 513 (hdr_len << (TX_BD_HSIZE_SHIFT - 1))); 514 length = skb_shinfo(skb)->gso_size; 515 txbd1->tx_bd_mss = cpu_to_le32(length); 516 length += hdr_len; 517 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 518 txbd1->tx_bd_hsize_lflags = 519 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); 520 txbd1->tx_bd_mss = 0; 521 } 522 523 length >>= 9; 524 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) { 525 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n", 526 skb->len); 527 i = 0; 528 goto tx_dma_error; 529 } 530 flags |= bnxt_lhint_arr[length]; 531 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 532 533 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 534 txbd1->tx_bd_cfa_action = 535 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); 536 for (i = 0; i < last_frag; i++) { 537 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 538 539 prod = NEXT_TX(prod); 540 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 541 542 len = skb_frag_size(frag); 543 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len, 544 DMA_TO_DEVICE); 545 546 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) 547 goto tx_dma_error; 548 549 tx_buf = &txr->tx_buf_ring[prod]; 550 dma_unmap_addr_set(tx_buf, mapping, mapping); 551 552 txbd->tx_bd_haddr = cpu_to_le64(mapping); 553 554 flags = len << TX_BD_LEN_SHIFT; 555 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 556 } 557 558 flags &= ~TX_BD_LEN; 559 txbd->tx_bd_len_flags_type = 560 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags | 561 TX_BD_FLAGS_PACKET_END); 562 563 netdev_tx_sent_queue(txq, skb->len); 564 565 /* Sync BD data before updating doorbell */ 566 wmb(); 567 568 prod = NEXT_TX(prod); 569 txr->tx_prod = prod; 570 571 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) 572 bnxt_db_write(bp, &txr->tx_db, prod); 573 574 tx_done: 575 576 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) { 577 if (netdev_xmit_more() && !tx_buf->is_push) 578 bnxt_db_write(bp, &txr->tx_db, prod); 579 580 netif_tx_stop_queue(txq); 581 582 /* netif_tx_stop_queue() must be done before checking 583 * tx index in bnxt_tx_avail() below, because in 584 * bnxt_tx_int(), we update tx index before checking for 585 * netif_tx_queue_stopped(). 586 */ 587 smp_mb(); 588 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) 589 netif_tx_wake_queue(txq); 590 } 591 return NETDEV_TX_OK; 592 593 tx_dma_error: 594 last_frag = i; 595 596 /* start back at beginning and unmap skb */ 597 prod = txr->tx_prod; 598 tx_buf = &txr->tx_buf_ring[prod]; 599 tx_buf->skb = NULL; 600 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 601 skb_headlen(skb), PCI_DMA_TODEVICE); 602 prod = NEXT_TX(prod); 603 604 /* unmap remaining mapped pages */ 605 for (i = 0; i < last_frag; i++) { 606 prod = NEXT_TX(prod); 607 tx_buf = &txr->tx_buf_ring[prod]; 608 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 609 skb_frag_size(&skb_shinfo(skb)->frags[i]), 610 PCI_DMA_TODEVICE); 611 } 612 613 dev_kfree_skb_any(skb); 614 return NETDEV_TX_OK; 615 } 616 617 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) 618 { 619 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; 620 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index); 621 u16 cons = txr->tx_cons; 622 struct pci_dev *pdev = bp->pdev; 623 int i; 624 unsigned int tx_bytes = 0; 625 626 for (i = 0; i < nr_pkts; i++) { 627 struct bnxt_sw_tx_bd *tx_buf; 628 struct sk_buff *skb; 629 int j, last; 630 631 tx_buf = &txr->tx_buf_ring[cons]; 632 cons = NEXT_TX(cons); 633 skb = tx_buf->skb; 634 tx_buf->skb = NULL; 635 636 if (tx_buf->is_push) { 637 tx_buf->is_push = 0; 638 goto next_tx_int; 639 } 640 641 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 642 skb_headlen(skb), PCI_DMA_TODEVICE); 643 last = tx_buf->nr_frags; 644 645 for (j = 0; j < last; j++) { 646 cons = NEXT_TX(cons); 647 tx_buf = &txr->tx_buf_ring[cons]; 648 dma_unmap_page( 649 &pdev->dev, 650 dma_unmap_addr(tx_buf, mapping), 651 skb_frag_size(&skb_shinfo(skb)->frags[j]), 652 PCI_DMA_TODEVICE); 653 } 654 655 next_tx_int: 656 cons = NEXT_TX(cons); 657 658 tx_bytes += skb->len; 659 dev_kfree_skb_any(skb); 660 } 661 662 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes); 663 txr->tx_cons = cons; 664 665 /* Need to make the tx_cons update visible to bnxt_start_xmit() 666 * before checking for netif_tx_queue_stopped(). Without the 667 * memory barrier, there is a small possibility that bnxt_start_xmit() 668 * will miss it and cause the queue to be stopped forever. 669 */ 670 smp_mb(); 671 672 if (unlikely(netif_tx_queue_stopped(txq)) && 673 (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) { 674 __netif_tx_lock(txq, smp_processor_id()); 675 if (netif_tx_queue_stopped(txq) && 676 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh && 677 txr->dev_state != BNXT_DEV_STATE_CLOSING) 678 netif_tx_wake_queue(txq); 679 __netif_tx_unlock(txq); 680 } 681 } 682 683 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, 684 struct bnxt_rx_ring_info *rxr, 685 gfp_t gfp) 686 { 687 struct device *dev = &bp->pdev->dev; 688 struct page *page; 689 690 page = page_pool_dev_alloc_pages(rxr->page_pool); 691 if (!page) 692 return NULL; 693 694 *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir, 695 DMA_ATTR_WEAK_ORDERING); 696 if (dma_mapping_error(dev, *mapping)) { 697 page_pool_recycle_direct(rxr->page_pool, page); 698 return NULL; 699 } 700 *mapping += bp->rx_dma_offset; 701 return page; 702 } 703 704 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping, 705 gfp_t gfp) 706 { 707 u8 *data; 708 struct pci_dev *pdev = bp->pdev; 709 710 data = kmalloc(bp->rx_buf_size, gfp); 711 if (!data) 712 return NULL; 713 714 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset, 715 bp->rx_buf_use_size, bp->rx_dir, 716 DMA_ATTR_WEAK_ORDERING); 717 718 if (dma_mapping_error(&pdev->dev, *mapping)) { 719 kfree(data); 720 data = NULL; 721 } 722 return data; 723 } 724 725 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 726 u16 prod, gfp_t gfp) 727 { 728 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 729 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod]; 730 dma_addr_t mapping; 731 732 if (BNXT_RX_PAGE_MODE(bp)) { 733 struct page *page = 734 __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp); 735 736 if (!page) 737 return -ENOMEM; 738 739 rx_buf->data = page; 740 rx_buf->data_ptr = page_address(page) + bp->rx_offset; 741 } else { 742 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp); 743 744 if (!data) 745 return -ENOMEM; 746 747 rx_buf->data = data; 748 rx_buf->data_ptr = data + bp->rx_offset; 749 } 750 rx_buf->mapping = mapping; 751 752 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 753 return 0; 754 } 755 756 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data) 757 { 758 u16 prod = rxr->rx_prod; 759 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 760 struct rx_bd *cons_bd, *prod_bd; 761 762 prod_rx_buf = &rxr->rx_buf_ring[prod]; 763 cons_rx_buf = &rxr->rx_buf_ring[cons]; 764 765 prod_rx_buf->data = data; 766 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr; 767 768 prod_rx_buf->mapping = cons_rx_buf->mapping; 769 770 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 771 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)]; 772 773 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr; 774 } 775 776 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) 777 { 778 u16 next, max = rxr->rx_agg_bmap_size; 779 780 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx); 781 if (next >= max) 782 next = find_first_zero_bit(rxr->rx_agg_bmap, max); 783 return next; 784 } 785 786 static inline int bnxt_alloc_rx_page(struct bnxt *bp, 787 struct bnxt_rx_ring_info *rxr, 788 u16 prod, gfp_t gfp) 789 { 790 struct rx_bd *rxbd = 791 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 792 struct bnxt_sw_rx_agg_bd *rx_agg_buf; 793 struct pci_dev *pdev = bp->pdev; 794 struct page *page; 795 dma_addr_t mapping; 796 u16 sw_prod = rxr->rx_sw_agg_prod; 797 unsigned int offset = 0; 798 799 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) { 800 page = rxr->rx_page; 801 if (!page) { 802 page = alloc_page(gfp); 803 if (!page) 804 return -ENOMEM; 805 rxr->rx_page = page; 806 rxr->rx_page_offset = 0; 807 } 808 offset = rxr->rx_page_offset; 809 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE; 810 if (rxr->rx_page_offset == PAGE_SIZE) 811 rxr->rx_page = NULL; 812 else 813 get_page(page); 814 } else { 815 page = alloc_page(gfp); 816 if (!page) 817 return -ENOMEM; 818 } 819 820 mapping = dma_map_page_attrs(&pdev->dev, page, offset, 821 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE, 822 DMA_ATTR_WEAK_ORDERING); 823 if (dma_mapping_error(&pdev->dev, mapping)) { 824 __free_page(page); 825 return -EIO; 826 } 827 828 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) 829 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); 830 831 __set_bit(sw_prod, rxr->rx_agg_bmap); 832 rx_agg_buf = &rxr->rx_agg_ring[sw_prod]; 833 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod); 834 835 rx_agg_buf->page = page; 836 rx_agg_buf->offset = offset; 837 rx_agg_buf->mapping = mapping; 838 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 839 rxbd->rx_bd_opaque = sw_prod; 840 return 0; 841 } 842 843 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp, 844 struct bnxt_cp_ring_info *cpr, 845 u16 cp_cons, u16 curr) 846 { 847 struct rx_agg_cmp *agg; 848 849 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr)); 850 agg = (struct rx_agg_cmp *) 851 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 852 return agg; 853 } 854 855 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp, 856 struct bnxt_rx_ring_info *rxr, 857 u16 agg_id, u16 curr) 858 { 859 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id]; 860 861 return &tpa_info->agg_arr[curr]; 862 } 863 864 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx, 865 u16 start, u32 agg_bufs, bool tpa) 866 { 867 struct bnxt_napi *bnapi = cpr->bnapi; 868 struct bnxt *bp = bnapi->bp; 869 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 870 u16 prod = rxr->rx_agg_prod; 871 u16 sw_prod = rxr->rx_sw_agg_prod; 872 bool p5_tpa = false; 873 u32 i; 874 875 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa) 876 p5_tpa = true; 877 878 for (i = 0; i < agg_bufs; i++) { 879 u16 cons; 880 struct rx_agg_cmp *agg; 881 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf; 882 struct rx_bd *prod_bd; 883 struct page *page; 884 885 if (p5_tpa) 886 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i); 887 else 888 agg = bnxt_get_agg(bp, cpr, idx, start + i); 889 cons = agg->rx_agg_cmp_opaque; 890 __clear_bit(cons, rxr->rx_agg_bmap); 891 892 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) 893 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); 894 895 __set_bit(sw_prod, rxr->rx_agg_bmap); 896 prod_rx_buf = &rxr->rx_agg_ring[sw_prod]; 897 cons_rx_buf = &rxr->rx_agg_ring[cons]; 898 899 /* It is possible for sw_prod to be equal to cons, so 900 * set cons_rx_buf->page to NULL first. 901 */ 902 page = cons_rx_buf->page; 903 cons_rx_buf->page = NULL; 904 prod_rx_buf->page = page; 905 prod_rx_buf->offset = cons_rx_buf->offset; 906 907 prod_rx_buf->mapping = cons_rx_buf->mapping; 908 909 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 910 911 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping); 912 prod_bd->rx_bd_opaque = sw_prod; 913 914 prod = NEXT_RX_AGG(prod); 915 sw_prod = NEXT_RX_AGG(sw_prod); 916 } 917 rxr->rx_agg_prod = prod; 918 rxr->rx_sw_agg_prod = sw_prod; 919 } 920 921 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, 922 struct bnxt_rx_ring_info *rxr, 923 u16 cons, void *data, u8 *data_ptr, 924 dma_addr_t dma_addr, 925 unsigned int offset_and_len) 926 { 927 unsigned int payload = offset_and_len >> 16; 928 unsigned int len = offset_and_len & 0xffff; 929 skb_frag_t *frag; 930 struct page *page = data; 931 u16 prod = rxr->rx_prod; 932 struct sk_buff *skb; 933 int off, err; 934 935 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 936 if (unlikely(err)) { 937 bnxt_reuse_rx_data(rxr, cons, data); 938 return NULL; 939 } 940 dma_addr -= bp->rx_dma_offset; 941 dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir, 942 DMA_ATTR_WEAK_ORDERING); 943 page_pool_release_page(rxr->page_pool, page); 944 945 if (unlikely(!payload)) 946 payload = eth_get_headlen(bp->dev, data_ptr, len); 947 948 skb = napi_alloc_skb(&rxr->bnapi->napi, payload); 949 if (!skb) { 950 __free_page(page); 951 return NULL; 952 } 953 954 off = (void *)data_ptr - page_address(page); 955 skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE); 956 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN, 957 payload + NET_IP_ALIGN); 958 959 frag = &skb_shinfo(skb)->frags[0]; 960 skb_frag_size_sub(frag, payload); 961 skb_frag_off_add(frag, payload); 962 skb->data_len -= payload; 963 skb->tail += payload; 964 965 return skb; 966 } 967 968 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, 969 struct bnxt_rx_ring_info *rxr, u16 cons, 970 void *data, u8 *data_ptr, 971 dma_addr_t dma_addr, 972 unsigned int offset_and_len) 973 { 974 u16 prod = rxr->rx_prod; 975 struct sk_buff *skb; 976 int err; 977 978 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 979 if (unlikely(err)) { 980 bnxt_reuse_rx_data(rxr, cons, data); 981 return NULL; 982 } 983 984 skb = build_skb(data, 0); 985 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, 986 bp->rx_dir, DMA_ATTR_WEAK_ORDERING); 987 if (!skb) { 988 kfree(data); 989 return NULL; 990 } 991 992 skb_reserve(skb, bp->rx_offset); 993 skb_put(skb, offset_and_len & 0xffff); 994 return skb; 995 } 996 997 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, 998 struct bnxt_cp_ring_info *cpr, 999 struct sk_buff *skb, u16 idx, 1000 u32 agg_bufs, bool tpa) 1001 { 1002 struct bnxt_napi *bnapi = cpr->bnapi; 1003 struct pci_dev *pdev = bp->pdev; 1004 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1005 u16 prod = rxr->rx_agg_prod; 1006 bool p5_tpa = false; 1007 u32 i; 1008 1009 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa) 1010 p5_tpa = true; 1011 1012 for (i = 0; i < agg_bufs; i++) { 1013 u16 cons, frag_len; 1014 struct rx_agg_cmp *agg; 1015 struct bnxt_sw_rx_agg_bd *cons_rx_buf; 1016 struct page *page; 1017 dma_addr_t mapping; 1018 1019 if (p5_tpa) 1020 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i); 1021 else 1022 agg = bnxt_get_agg(bp, cpr, idx, i); 1023 cons = agg->rx_agg_cmp_opaque; 1024 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) & 1025 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT; 1026 1027 cons_rx_buf = &rxr->rx_agg_ring[cons]; 1028 skb_fill_page_desc(skb, i, cons_rx_buf->page, 1029 cons_rx_buf->offset, frag_len); 1030 __clear_bit(cons, rxr->rx_agg_bmap); 1031 1032 /* It is possible for bnxt_alloc_rx_page() to allocate 1033 * a sw_prod index that equals the cons index, so we 1034 * need to clear the cons entry now. 1035 */ 1036 mapping = cons_rx_buf->mapping; 1037 page = cons_rx_buf->page; 1038 cons_rx_buf->page = NULL; 1039 1040 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) { 1041 struct skb_shared_info *shinfo; 1042 unsigned int nr_frags; 1043 1044 shinfo = skb_shinfo(skb); 1045 nr_frags = --shinfo->nr_frags; 1046 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL); 1047 1048 dev_kfree_skb(skb); 1049 1050 cons_rx_buf->page = page; 1051 1052 /* Update prod since possibly some pages have been 1053 * allocated already. 1054 */ 1055 rxr->rx_agg_prod = prod; 1056 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa); 1057 return NULL; 1058 } 1059 1060 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE, 1061 PCI_DMA_FROMDEVICE, 1062 DMA_ATTR_WEAK_ORDERING); 1063 1064 skb->data_len += frag_len; 1065 skb->len += frag_len; 1066 skb->truesize += PAGE_SIZE; 1067 1068 prod = NEXT_RX_AGG(prod); 1069 } 1070 rxr->rx_agg_prod = prod; 1071 return skb; 1072 } 1073 1074 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 1075 u8 agg_bufs, u32 *raw_cons) 1076 { 1077 u16 last; 1078 struct rx_agg_cmp *agg; 1079 1080 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs); 1081 last = RING_CMP(*raw_cons); 1082 agg = (struct rx_agg_cmp *) 1083 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)]; 1084 return RX_AGG_CMP_VALID(agg, *raw_cons); 1085 } 1086 1087 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data, 1088 unsigned int len, 1089 dma_addr_t mapping) 1090 { 1091 struct bnxt *bp = bnapi->bp; 1092 struct pci_dev *pdev = bp->pdev; 1093 struct sk_buff *skb; 1094 1095 skb = napi_alloc_skb(&bnapi->napi, len); 1096 if (!skb) 1097 return NULL; 1098 1099 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh, 1100 bp->rx_dir); 1101 1102 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN, 1103 len + NET_IP_ALIGN); 1104 1105 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh, 1106 bp->rx_dir); 1107 1108 skb_put(skb, len); 1109 return skb; 1110 } 1111 1112 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 1113 u32 *raw_cons, void *cmp) 1114 { 1115 struct rx_cmp *rxcmp = cmp; 1116 u32 tmp_raw_cons = *raw_cons; 1117 u8 cmp_type, agg_bufs = 0; 1118 1119 cmp_type = RX_CMP_TYPE(rxcmp); 1120 1121 if (cmp_type == CMP_TYPE_RX_L2_CMP) { 1122 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & 1123 RX_CMP_AGG_BUFS) >> 1124 RX_CMP_AGG_BUFS_SHIFT; 1125 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1126 struct rx_tpa_end_cmp *tpa_end = cmp; 1127 1128 if (bp->flags & BNXT_FLAG_CHIP_P5) 1129 return 0; 1130 1131 agg_bufs = TPA_END_AGG_BUFS(tpa_end); 1132 } 1133 1134 if (agg_bufs) { 1135 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) 1136 return -EBUSY; 1137 } 1138 *raw_cons = tmp_raw_cons; 1139 return 0; 1140 } 1141 1142 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay) 1143 { 1144 if (BNXT_PF(bp)) 1145 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay); 1146 else 1147 schedule_delayed_work(&bp->fw_reset_task, delay); 1148 } 1149 1150 static void bnxt_queue_sp_work(struct bnxt *bp) 1151 { 1152 if (BNXT_PF(bp)) 1153 queue_work(bnxt_pf_wq, &bp->sp_task); 1154 else 1155 schedule_work(&bp->sp_task); 1156 } 1157 1158 static void bnxt_cancel_sp_work(struct bnxt *bp) 1159 { 1160 if (BNXT_PF(bp)) 1161 flush_workqueue(bnxt_pf_wq); 1162 else 1163 cancel_work_sync(&bp->sp_task); 1164 } 1165 1166 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 1167 { 1168 if (!rxr->bnapi->in_reset) { 1169 rxr->bnapi->in_reset = true; 1170 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 1171 bnxt_queue_sp_work(bp); 1172 } 1173 rxr->rx_next_cons = 0xffff; 1174 } 1175 1176 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) 1177 { 1178 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; 1179 u16 idx = agg_id & MAX_TPA_P5_MASK; 1180 1181 if (test_bit(idx, map->agg_idx_bmap)) 1182 idx = find_first_zero_bit(map->agg_idx_bmap, 1183 BNXT_AGG_IDX_BMAP_SIZE); 1184 __set_bit(idx, map->agg_idx_bmap); 1185 map->agg_id_tbl[agg_id] = idx; 1186 return idx; 1187 } 1188 1189 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) 1190 { 1191 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; 1192 1193 __clear_bit(idx, map->agg_idx_bmap); 1194 } 1195 1196 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) 1197 { 1198 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; 1199 1200 return map->agg_id_tbl[agg_id]; 1201 } 1202 1203 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 1204 struct rx_tpa_start_cmp *tpa_start, 1205 struct rx_tpa_start_cmp_ext *tpa_start1) 1206 { 1207 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 1208 struct bnxt_tpa_info *tpa_info; 1209 u16 cons, prod, agg_id; 1210 struct rx_bd *prod_bd; 1211 dma_addr_t mapping; 1212 1213 if (bp->flags & BNXT_FLAG_CHIP_P5) { 1214 agg_id = TPA_START_AGG_ID_P5(tpa_start); 1215 agg_id = bnxt_alloc_agg_idx(rxr, agg_id); 1216 } else { 1217 agg_id = TPA_START_AGG_ID(tpa_start); 1218 } 1219 cons = tpa_start->rx_tpa_start_cmp_opaque; 1220 prod = rxr->rx_prod; 1221 cons_rx_buf = &rxr->rx_buf_ring[cons]; 1222 prod_rx_buf = &rxr->rx_buf_ring[prod]; 1223 tpa_info = &rxr->rx_tpa[agg_id]; 1224 1225 if (unlikely(cons != rxr->rx_next_cons || 1226 TPA_START_ERROR(tpa_start))) { 1227 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n", 1228 cons, rxr->rx_next_cons, 1229 TPA_START_ERROR_CODE(tpa_start1)); 1230 bnxt_sched_reset(bp, rxr); 1231 return; 1232 } 1233 /* Store cfa_code in tpa_info to use in tpa_end 1234 * completion processing. 1235 */ 1236 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1); 1237 prod_rx_buf->data = tpa_info->data; 1238 prod_rx_buf->data_ptr = tpa_info->data_ptr; 1239 1240 mapping = tpa_info->mapping; 1241 prod_rx_buf->mapping = mapping; 1242 1243 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 1244 1245 prod_bd->rx_bd_haddr = cpu_to_le64(mapping); 1246 1247 tpa_info->data = cons_rx_buf->data; 1248 tpa_info->data_ptr = cons_rx_buf->data_ptr; 1249 cons_rx_buf->data = NULL; 1250 tpa_info->mapping = cons_rx_buf->mapping; 1251 1252 tpa_info->len = 1253 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >> 1254 RX_TPA_START_CMP_LEN_SHIFT; 1255 if (likely(TPA_START_HASH_VALID(tpa_start))) { 1256 u32 hash_type = TPA_START_HASH_TYPE(tpa_start); 1257 1258 tpa_info->hash_type = PKT_HASH_TYPE_L4; 1259 tpa_info->gso_type = SKB_GSO_TCPV4; 1260 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ 1261 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1)) 1262 tpa_info->gso_type = SKB_GSO_TCPV6; 1263 tpa_info->rss_hash = 1264 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash); 1265 } else { 1266 tpa_info->hash_type = PKT_HASH_TYPE_NONE; 1267 tpa_info->gso_type = 0; 1268 if (netif_msg_rx_err(bp)) 1269 netdev_warn(bp->dev, "TPA packet without valid hash\n"); 1270 } 1271 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2); 1272 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata); 1273 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info); 1274 tpa_info->agg_count = 0; 1275 1276 rxr->rx_prod = NEXT_RX(prod); 1277 cons = NEXT_RX(cons); 1278 rxr->rx_next_cons = NEXT_RX(cons); 1279 cons_rx_buf = &rxr->rx_buf_ring[cons]; 1280 1281 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data); 1282 rxr->rx_prod = NEXT_RX(rxr->rx_prod); 1283 cons_rx_buf->data = NULL; 1284 } 1285 1286 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs) 1287 { 1288 if (agg_bufs) 1289 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true); 1290 } 1291 1292 #ifdef CONFIG_INET 1293 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto) 1294 { 1295 struct udphdr *uh = NULL; 1296 1297 if (ip_proto == htons(ETH_P_IP)) { 1298 struct iphdr *iph = (struct iphdr *)skb->data; 1299 1300 if (iph->protocol == IPPROTO_UDP) 1301 uh = (struct udphdr *)(iph + 1); 1302 } else { 1303 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; 1304 1305 if (iph->nexthdr == IPPROTO_UDP) 1306 uh = (struct udphdr *)(iph + 1); 1307 } 1308 if (uh) { 1309 if (uh->check) 1310 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM; 1311 else 1312 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; 1313 } 1314 } 1315 #endif 1316 1317 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info, 1318 int payload_off, int tcp_ts, 1319 struct sk_buff *skb) 1320 { 1321 #ifdef CONFIG_INET 1322 struct tcphdr *th; 1323 int len, nw_off; 1324 u16 outer_ip_off, inner_ip_off, inner_mac_off; 1325 u32 hdr_info = tpa_info->hdr_info; 1326 bool loopback = false; 1327 1328 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); 1329 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); 1330 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); 1331 1332 /* If the packet is an internal loopback packet, the offsets will 1333 * have an extra 4 bytes. 1334 */ 1335 if (inner_mac_off == 4) { 1336 loopback = true; 1337 } else if (inner_mac_off > 4) { 1338 __be16 proto = *((__be16 *)(skb->data + inner_ip_off - 1339 ETH_HLEN - 2)); 1340 1341 /* We only support inner iPv4/ipv6. If we don't see the 1342 * correct protocol ID, it must be a loopback packet where 1343 * the offsets are off by 4. 1344 */ 1345 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6)) 1346 loopback = true; 1347 } 1348 if (loopback) { 1349 /* internal loopback packet, subtract all offsets by 4 */ 1350 inner_ip_off -= 4; 1351 inner_mac_off -= 4; 1352 outer_ip_off -= 4; 1353 } 1354 1355 nw_off = inner_ip_off - ETH_HLEN; 1356 skb_set_network_header(skb, nw_off); 1357 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) { 1358 struct ipv6hdr *iph = ipv6_hdr(skb); 1359 1360 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); 1361 len = skb->len - skb_transport_offset(skb); 1362 th = tcp_hdr(skb); 1363 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); 1364 } else { 1365 struct iphdr *iph = ip_hdr(skb); 1366 1367 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); 1368 len = skb->len - skb_transport_offset(skb); 1369 th = tcp_hdr(skb); 1370 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); 1371 } 1372 1373 if (inner_mac_off) { /* tunnel */ 1374 __be16 proto = *((__be16 *)(skb->data + outer_ip_off - 1375 ETH_HLEN - 2)); 1376 1377 bnxt_gro_tunnel(skb, proto); 1378 } 1379 #endif 1380 return skb; 1381 } 1382 1383 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info, 1384 int payload_off, int tcp_ts, 1385 struct sk_buff *skb) 1386 { 1387 #ifdef CONFIG_INET 1388 u16 outer_ip_off, inner_ip_off, inner_mac_off; 1389 u32 hdr_info = tpa_info->hdr_info; 1390 int iphdr_len, nw_off; 1391 1392 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); 1393 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); 1394 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); 1395 1396 nw_off = inner_ip_off - ETH_HLEN; 1397 skb_set_network_header(skb, nw_off); 1398 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ? 1399 sizeof(struct ipv6hdr) : sizeof(struct iphdr); 1400 skb_set_transport_header(skb, nw_off + iphdr_len); 1401 1402 if (inner_mac_off) { /* tunnel */ 1403 __be16 proto = *((__be16 *)(skb->data + outer_ip_off - 1404 ETH_HLEN - 2)); 1405 1406 bnxt_gro_tunnel(skb, proto); 1407 } 1408 #endif 1409 return skb; 1410 } 1411 1412 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr)) 1413 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr)) 1414 1415 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info, 1416 int payload_off, int tcp_ts, 1417 struct sk_buff *skb) 1418 { 1419 #ifdef CONFIG_INET 1420 struct tcphdr *th; 1421 int len, nw_off, tcp_opt_len = 0; 1422 1423 if (tcp_ts) 1424 tcp_opt_len = 12; 1425 1426 if (tpa_info->gso_type == SKB_GSO_TCPV4) { 1427 struct iphdr *iph; 1428 1429 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len - 1430 ETH_HLEN; 1431 skb_set_network_header(skb, nw_off); 1432 iph = ip_hdr(skb); 1433 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); 1434 len = skb->len - skb_transport_offset(skb); 1435 th = tcp_hdr(skb); 1436 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); 1437 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) { 1438 struct ipv6hdr *iph; 1439 1440 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len - 1441 ETH_HLEN; 1442 skb_set_network_header(skb, nw_off); 1443 iph = ipv6_hdr(skb); 1444 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); 1445 len = skb->len - skb_transport_offset(skb); 1446 th = tcp_hdr(skb); 1447 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); 1448 } else { 1449 dev_kfree_skb_any(skb); 1450 return NULL; 1451 } 1452 1453 if (nw_off) /* tunnel */ 1454 bnxt_gro_tunnel(skb, skb->protocol); 1455 #endif 1456 return skb; 1457 } 1458 1459 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp, 1460 struct bnxt_tpa_info *tpa_info, 1461 struct rx_tpa_end_cmp *tpa_end, 1462 struct rx_tpa_end_cmp_ext *tpa_end1, 1463 struct sk_buff *skb) 1464 { 1465 #ifdef CONFIG_INET 1466 int payload_off; 1467 u16 segs; 1468 1469 segs = TPA_END_TPA_SEGS(tpa_end); 1470 if (segs == 1) 1471 return skb; 1472 1473 NAPI_GRO_CB(skb)->count = segs; 1474 skb_shinfo(skb)->gso_size = 1475 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len); 1476 skb_shinfo(skb)->gso_type = tpa_info->gso_type; 1477 if (bp->flags & BNXT_FLAG_CHIP_P5) 1478 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1); 1479 else 1480 payload_off = TPA_END_PAYLOAD_OFF(tpa_end); 1481 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb); 1482 if (likely(skb)) 1483 tcp_gro_complete(skb); 1484 #endif 1485 return skb; 1486 } 1487 1488 /* Given the cfa_code of a received packet determine which 1489 * netdev (vf-rep or PF) the packet is destined to. 1490 */ 1491 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code) 1492 { 1493 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code); 1494 1495 /* if vf-rep dev is NULL, the must belongs to the PF */ 1496 return dev ? dev : bp->dev; 1497 } 1498 1499 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, 1500 struct bnxt_cp_ring_info *cpr, 1501 u32 *raw_cons, 1502 struct rx_tpa_end_cmp *tpa_end, 1503 struct rx_tpa_end_cmp_ext *tpa_end1, 1504 u8 *event) 1505 { 1506 struct bnxt_napi *bnapi = cpr->bnapi; 1507 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1508 u8 *data_ptr, agg_bufs; 1509 unsigned int len; 1510 struct bnxt_tpa_info *tpa_info; 1511 dma_addr_t mapping; 1512 struct sk_buff *skb; 1513 u16 idx = 0, agg_id; 1514 void *data; 1515 bool gro; 1516 1517 if (unlikely(bnapi->in_reset)) { 1518 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end); 1519 1520 if (rc < 0) 1521 return ERR_PTR(-EBUSY); 1522 return NULL; 1523 } 1524 1525 if (bp->flags & BNXT_FLAG_CHIP_P5) { 1526 agg_id = TPA_END_AGG_ID_P5(tpa_end); 1527 agg_id = bnxt_lookup_agg_idx(rxr, agg_id); 1528 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1); 1529 tpa_info = &rxr->rx_tpa[agg_id]; 1530 if (unlikely(agg_bufs != tpa_info->agg_count)) { 1531 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n", 1532 agg_bufs, tpa_info->agg_count); 1533 agg_bufs = tpa_info->agg_count; 1534 } 1535 tpa_info->agg_count = 0; 1536 *event |= BNXT_AGG_EVENT; 1537 bnxt_free_agg_idx(rxr, agg_id); 1538 idx = agg_id; 1539 gro = !!(bp->flags & BNXT_FLAG_GRO); 1540 } else { 1541 agg_id = TPA_END_AGG_ID(tpa_end); 1542 agg_bufs = TPA_END_AGG_BUFS(tpa_end); 1543 tpa_info = &rxr->rx_tpa[agg_id]; 1544 idx = RING_CMP(*raw_cons); 1545 if (agg_bufs) { 1546 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons)) 1547 return ERR_PTR(-EBUSY); 1548 1549 *event |= BNXT_AGG_EVENT; 1550 idx = NEXT_CMP(idx); 1551 } 1552 gro = !!TPA_END_GRO(tpa_end); 1553 } 1554 data = tpa_info->data; 1555 data_ptr = tpa_info->data_ptr; 1556 prefetch(data_ptr); 1557 len = tpa_info->len; 1558 mapping = tpa_info->mapping; 1559 1560 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) { 1561 bnxt_abort_tpa(cpr, idx, agg_bufs); 1562 if (agg_bufs > MAX_SKB_FRAGS) 1563 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", 1564 agg_bufs, (int)MAX_SKB_FRAGS); 1565 return NULL; 1566 } 1567 1568 if (len <= bp->rx_copy_thresh) { 1569 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping); 1570 if (!skb) { 1571 bnxt_abort_tpa(cpr, idx, agg_bufs); 1572 return NULL; 1573 } 1574 } else { 1575 u8 *new_data; 1576 dma_addr_t new_mapping; 1577 1578 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC); 1579 if (!new_data) { 1580 bnxt_abort_tpa(cpr, idx, agg_bufs); 1581 return NULL; 1582 } 1583 1584 tpa_info->data = new_data; 1585 tpa_info->data_ptr = new_data + bp->rx_offset; 1586 tpa_info->mapping = new_mapping; 1587 1588 skb = build_skb(data, 0); 1589 dma_unmap_single_attrs(&bp->pdev->dev, mapping, 1590 bp->rx_buf_use_size, bp->rx_dir, 1591 DMA_ATTR_WEAK_ORDERING); 1592 1593 if (!skb) { 1594 kfree(data); 1595 bnxt_abort_tpa(cpr, idx, agg_bufs); 1596 return NULL; 1597 } 1598 skb_reserve(skb, bp->rx_offset); 1599 skb_put(skb, len); 1600 } 1601 1602 if (agg_bufs) { 1603 skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true); 1604 if (!skb) { 1605 /* Page reuse already handled by bnxt_rx_pages(). */ 1606 return NULL; 1607 } 1608 } 1609 1610 skb->protocol = 1611 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code)); 1612 1613 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE) 1614 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); 1615 1616 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) && 1617 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { 1618 u16 vlan_proto = tpa_info->metadata >> 1619 RX_CMP_FLAGS2_METADATA_TPID_SFT; 1620 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK; 1621 1622 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); 1623 } 1624 1625 skb_checksum_none_assert(skb); 1626 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) { 1627 skb->ip_summed = CHECKSUM_UNNECESSARY; 1628 skb->csum_level = 1629 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3; 1630 } 1631 1632 if (gro) 1633 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb); 1634 1635 return skb; 1636 } 1637 1638 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 1639 struct rx_agg_cmp *rx_agg) 1640 { 1641 u16 agg_id = TPA_AGG_AGG_ID(rx_agg); 1642 struct bnxt_tpa_info *tpa_info; 1643 1644 agg_id = bnxt_lookup_agg_idx(rxr, agg_id); 1645 tpa_info = &rxr->rx_tpa[agg_id]; 1646 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS); 1647 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg; 1648 } 1649 1650 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi, 1651 struct sk_buff *skb) 1652 { 1653 if (skb->dev != bp->dev) { 1654 /* this packet belongs to a vf-rep */ 1655 bnxt_vf_rep_rx(bp, skb); 1656 return; 1657 } 1658 skb_record_rx_queue(skb, bnapi->index); 1659 napi_gro_receive(&bnapi->napi, skb); 1660 } 1661 1662 /* returns the following: 1663 * 1 - 1 packet successfully received 1664 * 0 - successful TPA_START, packet not completed yet 1665 * -EBUSY - completion ring does not have all the agg buffers yet 1666 * -ENOMEM - packet aborted due to out of memory 1667 * -EIO - packet aborted due to hw error indicated in BD 1668 */ 1669 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 1670 u32 *raw_cons, u8 *event) 1671 { 1672 struct bnxt_napi *bnapi = cpr->bnapi; 1673 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1674 struct net_device *dev = bp->dev; 1675 struct rx_cmp *rxcmp; 1676 struct rx_cmp_ext *rxcmp1; 1677 u32 tmp_raw_cons = *raw_cons; 1678 u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons); 1679 struct bnxt_sw_rx_bd *rx_buf; 1680 unsigned int len; 1681 u8 *data_ptr, agg_bufs, cmp_type; 1682 dma_addr_t dma_addr; 1683 struct sk_buff *skb; 1684 void *data; 1685 int rc = 0; 1686 u32 misc; 1687 1688 rxcmp = (struct rx_cmp *) 1689 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1690 1691 cmp_type = RX_CMP_TYPE(rxcmp); 1692 1693 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) { 1694 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp); 1695 goto next_rx_no_prod_no_len; 1696 } 1697 1698 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 1699 cp_cons = RING_CMP(tmp_raw_cons); 1700 rxcmp1 = (struct rx_cmp_ext *) 1701 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1702 1703 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 1704 return -EBUSY; 1705 1706 prod = rxr->rx_prod; 1707 1708 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) { 1709 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp, 1710 (struct rx_tpa_start_cmp_ext *)rxcmp1); 1711 1712 *event |= BNXT_RX_EVENT; 1713 goto next_rx_no_prod_no_len; 1714 1715 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1716 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons, 1717 (struct rx_tpa_end_cmp *)rxcmp, 1718 (struct rx_tpa_end_cmp_ext *)rxcmp1, event); 1719 1720 if (IS_ERR(skb)) 1721 return -EBUSY; 1722 1723 rc = -ENOMEM; 1724 if (likely(skb)) { 1725 bnxt_deliver_skb(bp, bnapi, skb); 1726 rc = 1; 1727 } 1728 *event |= BNXT_RX_EVENT; 1729 goto next_rx_no_prod_no_len; 1730 } 1731 1732 cons = rxcmp->rx_cmp_opaque; 1733 if (unlikely(cons != rxr->rx_next_cons)) { 1734 int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp); 1735 1736 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n", 1737 cons, rxr->rx_next_cons); 1738 bnxt_sched_reset(bp, rxr); 1739 return rc1; 1740 } 1741 rx_buf = &rxr->rx_buf_ring[cons]; 1742 data = rx_buf->data; 1743 data_ptr = rx_buf->data_ptr; 1744 prefetch(data_ptr); 1745 1746 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1); 1747 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT; 1748 1749 if (agg_bufs) { 1750 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) 1751 return -EBUSY; 1752 1753 cp_cons = NEXT_CMP(cp_cons); 1754 *event |= BNXT_AGG_EVENT; 1755 } 1756 *event |= BNXT_RX_EVENT; 1757 1758 rx_buf->data = NULL; 1759 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) { 1760 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2); 1761 1762 bnxt_reuse_rx_data(rxr, cons, data); 1763 if (agg_bufs) 1764 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs, 1765 false); 1766 1767 rc = -EIO; 1768 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) { 1769 bnapi->cp_ring.sw_stats.rx.rx_buf_errors++; 1770 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) { 1771 netdev_warn(bp->dev, "RX buffer error %x\n", 1772 rx_err); 1773 bnxt_sched_reset(bp, rxr); 1774 } 1775 } 1776 goto next_rx_no_len; 1777 } 1778 1779 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT; 1780 dma_addr = rx_buf->mapping; 1781 1782 if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) { 1783 rc = 1; 1784 goto next_rx; 1785 } 1786 1787 if (len <= bp->rx_copy_thresh) { 1788 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr); 1789 bnxt_reuse_rx_data(rxr, cons, data); 1790 if (!skb) { 1791 if (agg_bufs) 1792 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, 1793 agg_bufs, false); 1794 rc = -ENOMEM; 1795 goto next_rx; 1796 } 1797 } else { 1798 u32 payload; 1799 1800 if (rx_buf->data_ptr == data_ptr) 1801 payload = misc & RX_CMP_PAYLOAD_OFFSET; 1802 else 1803 payload = 0; 1804 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr, 1805 payload | len); 1806 if (!skb) { 1807 rc = -ENOMEM; 1808 goto next_rx; 1809 } 1810 } 1811 1812 if (agg_bufs) { 1813 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false); 1814 if (!skb) { 1815 rc = -ENOMEM; 1816 goto next_rx; 1817 } 1818 } 1819 1820 if (RX_CMP_HASH_VALID(rxcmp)) { 1821 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp); 1822 enum pkt_hash_types type = PKT_HASH_TYPE_L4; 1823 1824 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ 1825 if (hash_type != 1 && hash_type != 3) 1826 type = PKT_HASH_TYPE_L3; 1827 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type); 1828 } 1829 1830 cfa_code = RX_CMP_CFA_CODE(rxcmp1); 1831 skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code)); 1832 1833 if ((rxcmp1->rx_cmp_flags2 & 1834 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) && 1835 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { 1836 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); 1837 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK; 1838 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; 1839 1840 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); 1841 } 1842 1843 skb_checksum_none_assert(skb); 1844 if (RX_CMP_L4_CS_OK(rxcmp1)) { 1845 if (dev->features & NETIF_F_RXCSUM) { 1846 skb->ip_summed = CHECKSUM_UNNECESSARY; 1847 skb->csum_level = RX_CMP_ENCAP(rxcmp1); 1848 } 1849 } else { 1850 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) { 1851 if (dev->features & NETIF_F_RXCSUM) 1852 bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++; 1853 } 1854 } 1855 1856 bnxt_deliver_skb(bp, bnapi, skb); 1857 rc = 1; 1858 1859 next_rx: 1860 cpr->rx_packets += 1; 1861 cpr->rx_bytes += len; 1862 1863 next_rx_no_len: 1864 rxr->rx_prod = NEXT_RX(prod); 1865 rxr->rx_next_cons = NEXT_RX(cons); 1866 1867 next_rx_no_prod_no_len: 1868 *raw_cons = tmp_raw_cons; 1869 1870 return rc; 1871 } 1872 1873 /* In netpoll mode, if we are using a combined completion ring, we need to 1874 * discard the rx packets and recycle the buffers. 1875 */ 1876 static int bnxt_force_rx_discard(struct bnxt *bp, 1877 struct bnxt_cp_ring_info *cpr, 1878 u32 *raw_cons, u8 *event) 1879 { 1880 u32 tmp_raw_cons = *raw_cons; 1881 struct rx_cmp_ext *rxcmp1; 1882 struct rx_cmp *rxcmp; 1883 u16 cp_cons; 1884 u8 cmp_type; 1885 1886 cp_cons = RING_CMP(tmp_raw_cons); 1887 rxcmp = (struct rx_cmp *) 1888 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1889 1890 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 1891 cp_cons = RING_CMP(tmp_raw_cons); 1892 rxcmp1 = (struct rx_cmp_ext *) 1893 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1894 1895 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 1896 return -EBUSY; 1897 1898 cmp_type = RX_CMP_TYPE(rxcmp); 1899 if (cmp_type == CMP_TYPE_RX_L2_CMP) { 1900 rxcmp1->rx_cmp_cfa_code_errors_v2 |= 1901 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); 1902 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1903 struct rx_tpa_end_cmp_ext *tpa_end1; 1904 1905 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1; 1906 tpa_end1->rx_tpa_end_cmp_errors_v2 |= 1907 cpu_to_le32(RX_TPA_END_CMP_ERRORS); 1908 } 1909 return bnxt_rx_pkt(bp, cpr, raw_cons, event); 1910 } 1911 1912 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx) 1913 { 1914 struct bnxt_fw_health *fw_health = bp->fw_health; 1915 u32 reg = fw_health->regs[reg_idx]; 1916 u32 reg_type, reg_off, val = 0; 1917 1918 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg); 1919 reg_off = BNXT_FW_HEALTH_REG_OFF(reg); 1920 switch (reg_type) { 1921 case BNXT_FW_HEALTH_REG_TYPE_CFG: 1922 pci_read_config_dword(bp->pdev, reg_off, &val); 1923 break; 1924 case BNXT_FW_HEALTH_REG_TYPE_GRC: 1925 reg_off = fw_health->mapped_regs[reg_idx]; 1926 /* fall through */ 1927 case BNXT_FW_HEALTH_REG_TYPE_BAR0: 1928 val = readl(bp->bar0 + reg_off); 1929 break; 1930 case BNXT_FW_HEALTH_REG_TYPE_BAR1: 1931 val = readl(bp->bar1 + reg_off); 1932 break; 1933 } 1934 if (reg_idx == BNXT_FW_RESET_INPROG_REG) 1935 val &= fw_health->fw_reset_inprog_reg_mask; 1936 return val; 1937 } 1938 1939 #define BNXT_GET_EVENT_PORT(data) \ 1940 ((data) & \ 1941 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK) 1942 1943 static int bnxt_async_event_process(struct bnxt *bp, 1944 struct hwrm_async_event_cmpl *cmpl) 1945 { 1946 u16 event_id = le16_to_cpu(cmpl->event_id); 1947 1948 /* TODO CHIMP_FW: Define event id's for link change, error etc */ 1949 switch (event_id) { 1950 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: { 1951 u32 data1 = le32_to_cpu(cmpl->event_data1); 1952 struct bnxt_link_info *link_info = &bp->link_info; 1953 1954 if (BNXT_VF(bp)) 1955 goto async_event_process_exit; 1956 1957 /* print unsupported speed warning in forced speed mode only */ 1958 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) && 1959 (data1 & 0x20000)) { 1960 u16 fw_speed = link_info->force_link_speed; 1961 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed); 1962 1963 if (speed != SPEED_UNKNOWN) 1964 netdev_warn(bp->dev, "Link speed %d no longer supported\n", 1965 speed); 1966 } 1967 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); 1968 } 1969 /* fall through */ 1970 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE: 1971 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE: 1972 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event); 1973 /* fall through */ 1974 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: 1975 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); 1976 break; 1977 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD: 1978 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event); 1979 break; 1980 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: { 1981 u32 data1 = le32_to_cpu(cmpl->event_data1); 1982 u16 port_id = BNXT_GET_EVENT_PORT(data1); 1983 1984 if (BNXT_VF(bp)) 1985 break; 1986 1987 if (bp->pf.port_id != port_id) 1988 break; 1989 1990 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event); 1991 break; 1992 } 1993 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE: 1994 if (BNXT_PF(bp)) 1995 goto async_event_process_exit; 1996 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event); 1997 break; 1998 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: { 1999 u32 data1 = le32_to_cpu(cmpl->event_data1); 2000 2001 if (!bp->fw_health) 2002 goto async_event_process_exit; 2003 2004 bp->fw_reset_timestamp = jiffies; 2005 bp->fw_reset_min_dsecs = cmpl->timestamp_lo; 2006 if (!bp->fw_reset_min_dsecs) 2007 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS; 2008 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi); 2009 if (!bp->fw_reset_max_dsecs) 2010 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS; 2011 if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) { 2012 netdev_warn(bp->dev, "Firmware fatal reset event received\n"); 2013 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); 2014 } else { 2015 netdev_warn(bp->dev, "Firmware non-fatal reset event received, max wait time %d msec\n", 2016 bp->fw_reset_max_dsecs * 100); 2017 } 2018 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event); 2019 break; 2020 } 2021 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: { 2022 struct bnxt_fw_health *fw_health = bp->fw_health; 2023 u32 data1 = le32_to_cpu(cmpl->event_data1); 2024 2025 if (!fw_health) 2026 goto async_event_process_exit; 2027 2028 fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1); 2029 fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1); 2030 if (!fw_health->enabled) 2031 break; 2032 2033 if (netif_msg_drv(bp)) 2034 netdev_info(bp->dev, "Error recovery info: error recovery[%d], master[%d], reset count[0x%x], health status: 0x%x\n", 2035 fw_health->enabled, fw_health->master, 2036 bnxt_fw_health_readl(bp, 2037 BNXT_FW_RESET_CNT_REG), 2038 bnxt_fw_health_readl(bp, 2039 BNXT_FW_HEALTH_REG)); 2040 fw_health->tmr_multiplier = 2041 DIV_ROUND_UP(fw_health->polling_dsecs * HZ, 2042 bp->current_interval * 10); 2043 fw_health->tmr_counter = fw_health->tmr_multiplier; 2044 fw_health->last_fw_heartbeat = 2045 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 2046 fw_health->last_fw_reset_cnt = 2047 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 2048 goto async_event_process_exit; 2049 } 2050 default: 2051 goto async_event_process_exit; 2052 } 2053 bnxt_queue_sp_work(bp); 2054 async_event_process_exit: 2055 bnxt_ulp_async_events(bp, cmpl); 2056 return 0; 2057 } 2058 2059 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) 2060 { 2061 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id; 2062 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp; 2063 struct hwrm_fwd_req_cmpl *fwd_req_cmpl = 2064 (struct hwrm_fwd_req_cmpl *)txcmp; 2065 2066 switch (cmpl_type) { 2067 case CMPL_BASE_TYPE_HWRM_DONE: 2068 seq_id = le16_to_cpu(h_cmpl->sequence_id); 2069 if (seq_id == bp->hwrm_intr_seq_id) 2070 bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id; 2071 else 2072 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id); 2073 break; 2074 2075 case CMPL_BASE_TYPE_HWRM_FWD_REQ: 2076 vf_id = le16_to_cpu(fwd_req_cmpl->source_id); 2077 2078 if ((vf_id < bp->pf.first_vf_id) || 2079 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) { 2080 netdev_err(bp->dev, "Msg contains invalid VF id %x\n", 2081 vf_id); 2082 return -EINVAL; 2083 } 2084 2085 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap); 2086 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event); 2087 bnxt_queue_sp_work(bp); 2088 break; 2089 2090 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: 2091 bnxt_async_event_process(bp, 2092 (struct hwrm_async_event_cmpl *)txcmp); 2093 2094 default: 2095 break; 2096 } 2097 2098 return 0; 2099 } 2100 2101 static irqreturn_t bnxt_msix(int irq, void *dev_instance) 2102 { 2103 struct bnxt_napi *bnapi = dev_instance; 2104 struct bnxt *bp = bnapi->bp; 2105 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2106 u32 cons = RING_CMP(cpr->cp_raw_cons); 2107 2108 cpr->event_ctr++; 2109 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); 2110 napi_schedule(&bnapi->napi); 2111 return IRQ_HANDLED; 2112 } 2113 2114 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) 2115 { 2116 u32 raw_cons = cpr->cp_raw_cons; 2117 u16 cons = RING_CMP(raw_cons); 2118 struct tx_cmp *txcmp; 2119 2120 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 2121 2122 return TX_CMP_VALID(txcmp, raw_cons); 2123 } 2124 2125 static irqreturn_t bnxt_inta(int irq, void *dev_instance) 2126 { 2127 struct bnxt_napi *bnapi = dev_instance; 2128 struct bnxt *bp = bnapi->bp; 2129 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2130 u32 cons = RING_CMP(cpr->cp_raw_cons); 2131 u32 int_status; 2132 2133 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); 2134 2135 if (!bnxt_has_work(bp, cpr)) { 2136 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS); 2137 /* return if erroneous interrupt */ 2138 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id))) 2139 return IRQ_NONE; 2140 } 2141 2142 /* disable ring IRQ */ 2143 BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell); 2144 2145 /* Return here if interrupt is shared and is disabled. */ 2146 if (unlikely(atomic_read(&bp->intr_sem) != 0)) 2147 return IRQ_HANDLED; 2148 2149 napi_schedule(&bnapi->napi); 2150 return IRQ_HANDLED; 2151 } 2152 2153 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 2154 int budget) 2155 { 2156 struct bnxt_napi *bnapi = cpr->bnapi; 2157 u32 raw_cons = cpr->cp_raw_cons; 2158 u32 cons; 2159 int tx_pkts = 0; 2160 int rx_pkts = 0; 2161 u8 event = 0; 2162 struct tx_cmp *txcmp; 2163 2164 cpr->has_more_work = 0; 2165 cpr->had_work_done = 1; 2166 while (1) { 2167 int rc; 2168 2169 cons = RING_CMP(raw_cons); 2170 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 2171 2172 if (!TX_CMP_VALID(txcmp, raw_cons)) 2173 break; 2174 2175 /* The valid test of the entry must be done first before 2176 * reading any further. 2177 */ 2178 dma_rmb(); 2179 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) { 2180 tx_pkts++; 2181 /* return full budget so NAPI will complete. */ 2182 if (unlikely(tx_pkts > bp->tx_wake_thresh)) { 2183 rx_pkts = budget; 2184 raw_cons = NEXT_RAW_CMP(raw_cons); 2185 if (budget) 2186 cpr->has_more_work = 1; 2187 break; 2188 } 2189 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { 2190 if (likely(budget)) 2191 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event); 2192 else 2193 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons, 2194 &event); 2195 if (likely(rc >= 0)) 2196 rx_pkts += rc; 2197 /* Increment rx_pkts when rc is -ENOMEM to count towards 2198 * the NAPI budget. Otherwise, we may potentially loop 2199 * here forever if we consistently cannot allocate 2200 * buffers. 2201 */ 2202 else if (rc == -ENOMEM && budget) 2203 rx_pkts++; 2204 else if (rc == -EBUSY) /* partial completion */ 2205 break; 2206 } else if (unlikely((TX_CMP_TYPE(txcmp) == 2207 CMPL_BASE_TYPE_HWRM_DONE) || 2208 (TX_CMP_TYPE(txcmp) == 2209 CMPL_BASE_TYPE_HWRM_FWD_REQ) || 2210 (TX_CMP_TYPE(txcmp) == 2211 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) { 2212 bnxt_hwrm_handler(bp, txcmp); 2213 } 2214 raw_cons = NEXT_RAW_CMP(raw_cons); 2215 2216 if (rx_pkts && rx_pkts == budget) { 2217 cpr->has_more_work = 1; 2218 break; 2219 } 2220 } 2221 2222 if (event & BNXT_REDIRECT_EVENT) 2223 xdp_do_flush_map(); 2224 2225 if (event & BNXT_TX_EVENT) { 2226 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; 2227 u16 prod = txr->tx_prod; 2228 2229 /* Sync BD data before updating doorbell */ 2230 wmb(); 2231 2232 bnxt_db_write_relaxed(bp, &txr->tx_db, prod); 2233 } 2234 2235 cpr->cp_raw_cons = raw_cons; 2236 bnapi->tx_pkts += tx_pkts; 2237 bnapi->events |= event; 2238 return rx_pkts; 2239 } 2240 2241 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi) 2242 { 2243 if (bnapi->tx_pkts) { 2244 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts); 2245 bnapi->tx_pkts = 0; 2246 } 2247 2248 if (bnapi->events & BNXT_RX_EVENT) { 2249 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 2250 2251 if (bnapi->events & BNXT_AGG_EVENT) 2252 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 2253 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 2254 } 2255 bnapi->events = 0; 2256 } 2257 2258 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 2259 int budget) 2260 { 2261 struct bnxt_napi *bnapi = cpr->bnapi; 2262 int rx_pkts; 2263 2264 rx_pkts = __bnxt_poll_work(bp, cpr, budget); 2265 2266 /* ACK completion ring before freeing tx ring and producing new 2267 * buffers in rx/agg rings to prevent overflowing the completion 2268 * ring. 2269 */ 2270 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons); 2271 2272 __bnxt_poll_work_done(bp, bnapi); 2273 return rx_pkts; 2274 } 2275 2276 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) 2277 { 2278 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 2279 struct bnxt *bp = bnapi->bp; 2280 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2281 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 2282 struct tx_cmp *txcmp; 2283 struct rx_cmp_ext *rxcmp1; 2284 u32 cp_cons, tmp_raw_cons; 2285 u32 raw_cons = cpr->cp_raw_cons; 2286 u32 rx_pkts = 0; 2287 u8 event = 0; 2288 2289 while (1) { 2290 int rc; 2291 2292 cp_cons = RING_CMP(raw_cons); 2293 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2294 2295 if (!TX_CMP_VALID(txcmp, raw_cons)) 2296 break; 2297 2298 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { 2299 tmp_raw_cons = NEXT_RAW_CMP(raw_cons); 2300 cp_cons = RING_CMP(tmp_raw_cons); 2301 rxcmp1 = (struct rx_cmp_ext *) 2302 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2303 2304 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 2305 break; 2306 2307 /* force an error to recycle the buffer */ 2308 rxcmp1->rx_cmp_cfa_code_errors_v2 |= 2309 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); 2310 2311 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event); 2312 if (likely(rc == -EIO) && budget) 2313 rx_pkts++; 2314 else if (rc == -EBUSY) /* partial completion */ 2315 break; 2316 } else if (unlikely(TX_CMP_TYPE(txcmp) == 2317 CMPL_BASE_TYPE_HWRM_DONE)) { 2318 bnxt_hwrm_handler(bp, txcmp); 2319 } else { 2320 netdev_err(bp->dev, 2321 "Invalid completion received on special ring\n"); 2322 } 2323 raw_cons = NEXT_RAW_CMP(raw_cons); 2324 2325 if (rx_pkts == budget) 2326 break; 2327 } 2328 2329 cpr->cp_raw_cons = raw_cons; 2330 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons); 2331 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 2332 2333 if (event & BNXT_AGG_EVENT) 2334 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 2335 2336 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) { 2337 napi_complete_done(napi, rx_pkts); 2338 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 2339 } 2340 return rx_pkts; 2341 } 2342 2343 static int bnxt_poll(struct napi_struct *napi, int budget) 2344 { 2345 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 2346 struct bnxt *bp = bnapi->bp; 2347 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2348 int work_done = 0; 2349 2350 while (1) { 2351 work_done += bnxt_poll_work(bp, cpr, budget - work_done); 2352 2353 if (work_done >= budget) { 2354 if (!budget) 2355 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 2356 break; 2357 } 2358 2359 if (!bnxt_has_work(bp, cpr)) { 2360 if (napi_complete_done(napi, work_done)) 2361 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 2362 break; 2363 } 2364 } 2365 if (bp->flags & BNXT_FLAG_DIM) { 2366 struct dim_sample dim_sample = {}; 2367 2368 dim_update_sample(cpr->event_ctr, 2369 cpr->rx_packets, 2370 cpr->rx_bytes, 2371 &dim_sample); 2372 net_dim(&cpr->dim, dim_sample); 2373 } 2374 return work_done; 2375 } 2376 2377 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) 2378 { 2379 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2380 int i, work_done = 0; 2381 2382 for (i = 0; i < 2; i++) { 2383 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i]; 2384 2385 if (cpr2) { 2386 work_done += __bnxt_poll_work(bp, cpr2, 2387 budget - work_done); 2388 cpr->has_more_work |= cpr2->has_more_work; 2389 } 2390 } 2391 return work_done; 2392 } 2393 2394 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi, 2395 u64 dbr_type) 2396 { 2397 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2398 int i; 2399 2400 for (i = 0; i < 2; i++) { 2401 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i]; 2402 struct bnxt_db_info *db; 2403 2404 if (cpr2 && cpr2->had_work_done) { 2405 db = &cpr2->cp_db; 2406 writeq(db->db_key64 | dbr_type | 2407 RING_CMP(cpr2->cp_raw_cons), db->doorbell); 2408 cpr2->had_work_done = 0; 2409 } 2410 } 2411 __bnxt_poll_work_done(bp, bnapi); 2412 } 2413 2414 static int bnxt_poll_p5(struct napi_struct *napi, int budget) 2415 { 2416 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 2417 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2418 u32 raw_cons = cpr->cp_raw_cons; 2419 struct bnxt *bp = bnapi->bp; 2420 struct nqe_cn *nqcmp; 2421 int work_done = 0; 2422 u32 cons; 2423 2424 if (cpr->has_more_work) { 2425 cpr->has_more_work = 0; 2426 work_done = __bnxt_poll_cqs(bp, bnapi, budget); 2427 } 2428 while (1) { 2429 cons = RING_CMP(raw_cons); 2430 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 2431 2432 if (!NQ_CMP_VALID(nqcmp, raw_cons)) { 2433 if (cpr->has_more_work) 2434 break; 2435 2436 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL); 2437 cpr->cp_raw_cons = raw_cons; 2438 if (napi_complete_done(napi, work_done)) 2439 BNXT_DB_NQ_ARM_P5(&cpr->cp_db, 2440 cpr->cp_raw_cons); 2441 return work_done; 2442 } 2443 2444 /* The valid test of the entry must be done first before 2445 * reading any further. 2446 */ 2447 dma_rmb(); 2448 2449 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) { 2450 u32 idx = le32_to_cpu(nqcmp->cq_handle_low); 2451 struct bnxt_cp_ring_info *cpr2; 2452 2453 cpr2 = cpr->cp_ring_arr[idx]; 2454 work_done += __bnxt_poll_work(bp, cpr2, 2455 budget - work_done); 2456 cpr->has_more_work |= cpr2->has_more_work; 2457 } else { 2458 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp); 2459 } 2460 raw_cons = NEXT_RAW_CMP(raw_cons); 2461 } 2462 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ); 2463 if (raw_cons != cpr->cp_raw_cons) { 2464 cpr->cp_raw_cons = raw_cons; 2465 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons); 2466 } 2467 return work_done; 2468 } 2469 2470 static void bnxt_free_tx_skbs(struct bnxt *bp) 2471 { 2472 int i, max_idx; 2473 struct pci_dev *pdev = bp->pdev; 2474 2475 if (!bp->tx_ring) 2476 return; 2477 2478 max_idx = bp->tx_nr_pages * TX_DESC_CNT; 2479 for (i = 0; i < bp->tx_nr_rings; i++) { 2480 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 2481 int j; 2482 2483 for (j = 0; j < max_idx;) { 2484 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; 2485 struct sk_buff *skb; 2486 int k, last; 2487 2488 if (i < bp->tx_nr_rings_xdp && 2489 tx_buf->action == XDP_REDIRECT) { 2490 dma_unmap_single(&pdev->dev, 2491 dma_unmap_addr(tx_buf, mapping), 2492 dma_unmap_len(tx_buf, len), 2493 PCI_DMA_TODEVICE); 2494 xdp_return_frame(tx_buf->xdpf); 2495 tx_buf->action = 0; 2496 tx_buf->xdpf = NULL; 2497 j++; 2498 continue; 2499 } 2500 2501 skb = tx_buf->skb; 2502 if (!skb) { 2503 j++; 2504 continue; 2505 } 2506 2507 tx_buf->skb = NULL; 2508 2509 if (tx_buf->is_push) { 2510 dev_kfree_skb(skb); 2511 j += 2; 2512 continue; 2513 } 2514 2515 dma_unmap_single(&pdev->dev, 2516 dma_unmap_addr(tx_buf, mapping), 2517 skb_headlen(skb), 2518 PCI_DMA_TODEVICE); 2519 2520 last = tx_buf->nr_frags; 2521 j += 2; 2522 for (k = 0; k < last; k++, j++) { 2523 int ring_idx = j & bp->tx_ring_mask; 2524 skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; 2525 2526 tx_buf = &txr->tx_buf_ring[ring_idx]; 2527 dma_unmap_page( 2528 &pdev->dev, 2529 dma_unmap_addr(tx_buf, mapping), 2530 skb_frag_size(frag), PCI_DMA_TODEVICE); 2531 } 2532 dev_kfree_skb(skb); 2533 } 2534 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i)); 2535 } 2536 } 2537 2538 static void bnxt_free_rx_skbs(struct bnxt *bp) 2539 { 2540 int i, max_idx, max_agg_idx; 2541 struct pci_dev *pdev = bp->pdev; 2542 2543 if (!bp->rx_ring) 2544 return; 2545 2546 max_idx = bp->rx_nr_pages * RX_DESC_CNT; 2547 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; 2548 for (i = 0; i < bp->rx_nr_rings; i++) { 2549 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2550 struct bnxt_tpa_idx_map *map; 2551 int j; 2552 2553 if (rxr->rx_tpa) { 2554 for (j = 0; j < bp->max_tpa; j++) { 2555 struct bnxt_tpa_info *tpa_info = 2556 &rxr->rx_tpa[j]; 2557 u8 *data = tpa_info->data; 2558 2559 if (!data) 2560 continue; 2561 2562 dma_unmap_single_attrs(&pdev->dev, 2563 tpa_info->mapping, 2564 bp->rx_buf_use_size, 2565 bp->rx_dir, 2566 DMA_ATTR_WEAK_ORDERING); 2567 2568 tpa_info->data = NULL; 2569 2570 kfree(data); 2571 } 2572 } 2573 2574 for (j = 0; j < max_idx; j++) { 2575 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j]; 2576 dma_addr_t mapping = rx_buf->mapping; 2577 void *data = rx_buf->data; 2578 2579 if (!data) 2580 continue; 2581 2582 rx_buf->data = NULL; 2583 2584 if (BNXT_RX_PAGE_MODE(bp)) { 2585 mapping -= bp->rx_dma_offset; 2586 dma_unmap_page_attrs(&pdev->dev, mapping, 2587 PAGE_SIZE, bp->rx_dir, 2588 DMA_ATTR_WEAK_ORDERING); 2589 page_pool_recycle_direct(rxr->page_pool, data); 2590 } else { 2591 dma_unmap_single_attrs(&pdev->dev, mapping, 2592 bp->rx_buf_use_size, 2593 bp->rx_dir, 2594 DMA_ATTR_WEAK_ORDERING); 2595 kfree(data); 2596 } 2597 } 2598 2599 for (j = 0; j < max_agg_idx; j++) { 2600 struct bnxt_sw_rx_agg_bd *rx_agg_buf = 2601 &rxr->rx_agg_ring[j]; 2602 struct page *page = rx_agg_buf->page; 2603 2604 if (!page) 2605 continue; 2606 2607 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping, 2608 BNXT_RX_PAGE_SIZE, 2609 PCI_DMA_FROMDEVICE, 2610 DMA_ATTR_WEAK_ORDERING); 2611 2612 rx_agg_buf->page = NULL; 2613 __clear_bit(j, rxr->rx_agg_bmap); 2614 2615 __free_page(page); 2616 } 2617 if (rxr->rx_page) { 2618 __free_page(rxr->rx_page); 2619 rxr->rx_page = NULL; 2620 } 2621 map = rxr->rx_tpa_idx_map; 2622 if (map) 2623 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap)); 2624 } 2625 } 2626 2627 static void bnxt_free_skbs(struct bnxt *bp) 2628 { 2629 bnxt_free_tx_skbs(bp); 2630 bnxt_free_rx_skbs(bp); 2631 } 2632 2633 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) 2634 { 2635 struct pci_dev *pdev = bp->pdev; 2636 int i; 2637 2638 for (i = 0; i < rmem->nr_pages; i++) { 2639 if (!rmem->pg_arr[i]) 2640 continue; 2641 2642 dma_free_coherent(&pdev->dev, rmem->page_size, 2643 rmem->pg_arr[i], rmem->dma_arr[i]); 2644 2645 rmem->pg_arr[i] = NULL; 2646 } 2647 if (rmem->pg_tbl) { 2648 size_t pg_tbl_size = rmem->nr_pages * 8; 2649 2650 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) 2651 pg_tbl_size = rmem->page_size; 2652 dma_free_coherent(&pdev->dev, pg_tbl_size, 2653 rmem->pg_tbl, rmem->pg_tbl_map); 2654 rmem->pg_tbl = NULL; 2655 } 2656 if (rmem->vmem_size && *rmem->vmem) { 2657 vfree(*rmem->vmem); 2658 *rmem->vmem = NULL; 2659 } 2660 } 2661 2662 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) 2663 { 2664 struct pci_dev *pdev = bp->pdev; 2665 u64 valid_bit = 0; 2666 int i; 2667 2668 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG)) 2669 valid_bit = PTU_PTE_VALID; 2670 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) { 2671 size_t pg_tbl_size = rmem->nr_pages * 8; 2672 2673 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) 2674 pg_tbl_size = rmem->page_size; 2675 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size, 2676 &rmem->pg_tbl_map, 2677 GFP_KERNEL); 2678 if (!rmem->pg_tbl) 2679 return -ENOMEM; 2680 } 2681 2682 for (i = 0; i < rmem->nr_pages; i++) { 2683 u64 extra_bits = valid_bit; 2684 2685 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev, 2686 rmem->page_size, 2687 &rmem->dma_arr[i], 2688 GFP_KERNEL); 2689 if (!rmem->pg_arr[i]) 2690 return -ENOMEM; 2691 2692 if (rmem->init_val) 2693 memset(rmem->pg_arr[i], rmem->init_val, 2694 rmem->page_size); 2695 if (rmem->nr_pages > 1 || rmem->depth > 0) { 2696 if (i == rmem->nr_pages - 2 && 2697 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 2698 extra_bits |= PTU_PTE_NEXT_TO_LAST; 2699 else if (i == rmem->nr_pages - 1 && 2700 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 2701 extra_bits |= PTU_PTE_LAST; 2702 rmem->pg_tbl[i] = 2703 cpu_to_le64(rmem->dma_arr[i] | extra_bits); 2704 } 2705 } 2706 2707 if (rmem->vmem_size) { 2708 *rmem->vmem = vzalloc(rmem->vmem_size); 2709 if (!(*rmem->vmem)) 2710 return -ENOMEM; 2711 } 2712 return 0; 2713 } 2714 2715 static void bnxt_free_tpa_info(struct bnxt *bp) 2716 { 2717 int i; 2718 2719 for (i = 0; i < bp->rx_nr_rings; i++) { 2720 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2721 2722 kfree(rxr->rx_tpa_idx_map); 2723 rxr->rx_tpa_idx_map = NULL; 2724 if (rxr->rx_tpa) { 2725 kfree(rxr->rx_tpa[0].agg_arr); 2726 rxr->rx_tpa[0].agg_arr = NULL; 2727 } 2728 kfree(rxr->rx_tpa); 2729 rxr->rx_tpa = NULL; 2730 } 2731 } 2732 2733 static int bnxt_alloc_tpa_info(struct bnxt *bp) 2734 { 2735 int i, j, total_aggs = 0; 2736 2737 bp->max_tpa = MAX_TPA; 2738 if (bp->flags & BNXT_FLAG_CHIP_P5) { 2739 if (!bp->max_tpa_v2) 2740 return 0; 2741 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5); 2742 total_aggs = bp->max_tpa * MAX_SKB_FRAGS; 2743 } 2744 2745 for (i = 0; i < bp->rx_nr_rings; i++) { 2746 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2747 struct rx_agg_cmp *agg; 2748 2749 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info), 2750 GFP_KERNEL); 2751 if (!rxr->rx_tpa) 2752 return -ENOMEM; 2753 2754 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 2755 continue; 2756 agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL); 2757 rxr->rx_tpa[0].agg_arr = agg; 2758 if (!agg) 2759 return -ENOMEM; 2760 for (j = 1; j < bp->max_tpa; j++) 2761 rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS; 2762 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map), 2763 GFP_KERNEL); 2764 if (!rxr->rx_tpa_idx_map) 2765 return -ENOMEM; 2766 } 2767 return 0; 2768 } 2769 2770 static void bnxt_free_rx_rings(struct bnxt *bp) 2771 { 2772 int i; 2773 2774 if (!bp->rx_ring) 2775 return; 2776 2777 bnxt_free_tpa_info(bp); 2778 for (i = 0; i < bp->rx_nr_rings; i++) { 2779 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2780 struct bnxt_ring_struct *ring; 2781 2782 if (rxr->xdp_prog) 2783 bpf_prog_put(rxr->xdp_prog); 2784 2785 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq)) 2786 xdp_rxq_info_unreg(&rxr->xdp_rxq); 2787 2788 page_pool_destroy(rxr->page_pool); 2789 rxr->page_pool = NULL; 2790 2791 kfree(rxr->rx_agg_bmap); 2792 rxr->rx_agg_bmap = NULL; 2793 2794 ring = &rxr->rx_ring_struct; 2795 bnxt_free_ring(bp, &ring->ring_mem); 2796 2797 ring = &rxr->rx_agg_ring_struct; 2798 bnxt_free_ring(bp, &ring->ring_mem); 2799 } 2800 } 2801 2802 static int bnxt_alloc_rx_page_pool(struct bnxt *bp, 2803 struct bnxt_rx_ring_info *rxr) 2804 { 2805 struct page_pool_params pp = { 0 }; 2806 2807 pp.pool_size = bp->rx_ring_size; 2808 pp.nid = dev_to_node(&bp->pdev->dev); 2809 pp.dev = &bp->pdev->dev; 2810 pp.dma_dir = DMA_BIDIRECTIONAL; 2811 2812 rxr->page_pool = page_pool_create(&pp); 2813 if (IS_ERR(rxr->page_pool)) { 2814 int err = PTR_ERR(rxr->page_pool); 2815 2816 rxr->page_pool = NULL; 2817 return err; 2818 } 2819 return 0; 2820 } 2821 2822 static int bnxt_alloc_rx_rings(struct bnxt *bp) 2823 { 2824 int i, rc = 0, agg_rings = 0; 2825 2826 if (!bp->rx_ring) 2827 return -ENOMEM; 2828 2829 if (bp->flags & BNXT_FLAG_AGG_RINGS) 2830 agg_rings = 1; 2831 2832 for (i = 0; i < bp->rx_nr_rings; i++) { 2833 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2834 struct bnxt_ring_struct *ring; 2835 2836 ring = &rxr->rx_ring_struct; 2837 2838 rc = bnxt_alloc_rx_page_pool(bp, rxr); 2839 if (rc) 2840 return rc; 2841 2842 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i); 2843 if (rc < 0) 2844 return rc; 2845 2846 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq, 2847 MEM_TYPE_PAGE_POOL, 2848 rxr->page_pool); 2849 if (rc) { 2850 xdp_rxq_info_unreg(&rxr->xdp_rxq); 2851 return rc; 2852 } 2853 2854 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 2855 if (rc) 2856 return rc; 2857 2858 ring->grp_idx = i; 2859 if (agg_rings) { 2860 u16 mem_size; 2861 2862 ring = &rxr->rx_agg_ring_struct; 2863 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 2864 if (rc) 2865 return rc; 2866 2867 ring->grp_idx = i; 2868 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1; 2869 mem_size = rxr->rx_agg_bmap_size / 8; 2870 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL); 2871 if (!rxr->rx_agg_bmap) 2872 return -ENOMEM; 2873 } 2874 } 2875 if (bp->flags & BNXT_FLAG_TPA) 2876 rc = bnxt_alloc_tpa_info(bp); 2877 return rc; 2878 } 2879 2880 static void bnxt_free_tx_rings(struct bnxt *bp) 2881 { 2882 int i; 2883 struct pci_dev *pdev = bp->pdev; 2884 2885 if (!bp->tx_ring) 2886 return; 2887 2888 for (i = 0; i < bp->tx_nr_rings; i++) { 2889 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 2890 struct bnxt_ring_struct *ring; 2891 2892 if (txr->tx_push) { 2893 dma_free_coherent(&pdev->dev, bp->tx_push_size, 2894 txr->tx_push, txr->tx_push_mapping); 2895 txr->tx_push = NULL; 2896 } 2897 2898 ring = &txr->tx_ring_struct; 2899 2900 bnxt_free_ring(bp, &ring->ring_mem); 2901 } 2902 } 2903 2904 static int bnxt_alloc_tx_rings(struct bnxt *bp) 2905 { 2906 int i, j, rc; 2907 struct pci_dev *pdev = bp->pdev; 2908 2909 bp->tx_push_size = 0; 2910 if (bp->tx_push_thresh) { 2911 int push_size; 2912 2913 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) + 2914 bp->tx_push_thresh); 2915 2916 if (push_size > 256) { 2917 push_size = 0; 2918 bp->tx_push_thresh = 0; 2919 } 2920 2921 bp->tx_push_size = push_size; 2922 } 2923 2924 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) { 2925 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 2926 struct bnxt_ring_struct *ring; 2927 u8 qidx; 2928 2929 ring = &txr->tx_ring_struct; 2930 2931 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 2932 if (rc) 2933 return rc; 2934 2935 ring->grp_idx = txr->bnapi->index; 2936 if (bp->tx_push_size) { 2937 dma_addr_t mapping; 2938 2939 /* One pre-allocated DMA buffer to backup 2940 * TX push operation 2941 */ 2942 txr->tx_push = dma_alloc_coherent(&pdev->dev, 2943 bp->tx_push_size, 2944 &txr->tx_push_mapping, 2945 GFP_KERNEL); 2946 2947 if (!txr->tx_push) 2948 return -ENOMEM; 2949 2950 mapping = txr->tx_push_mapping + 2951 sizeof(struct tx_push_bd); 2952 txr->data_mapping = cpu_to_le64(mapping); 2953 } 2954 qidx = bp->tc_to_qidx[j]; 2955 ring->queue_id = bp->q_info[qidx].queue_id; 2956 if (i < bp->tx_nr_rings_xdp) 2957 continue; 2958 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1)) 2959 j++; 2960 } 2961 return 0; 2962 } 2963 2964 static void bnxt_free_cp_rings(struct bnxt *bp) 2965 { 2966 int i; 2967 2968 if (!bp->bnapi) 2969 return; 2970 2971 for (i = 0; i < bp->cp_nr_rings; i++) { 2972 struct bnxt_napi *bnapi = bp->bnapi[i]; 2973 struct bnxt_cp_ring_info *cpr; 2974 struct bnxt_ring_struct *ring; 2975 int j; 2976 2977 if (!bnapi) 2978 continue; 2979 2980 cpr = &bnapi->cp_ring; 2981 ring = &cpr->cp_ring_struct; 2982 2983 bnxt_free_ring(bp, &ring->ring_mem); 2984 2985 for (j = 0; j < 2; j++) { 2986 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; 2987 2988 if (cpr2) { 2989 ring = &cpr2->cp_ring_struct; 2990 bnxt_free_ring(bp, &ring->ring_mem); 2991 kfree(cpr2); 2992 cpr->cp_ring_arr[j] = NULL; 2993 } 2994 } 2995 } 2996 } 2997 2998 static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp) 2999 { 3000 struct bnxt_ring_mem_info *rmem; 3001 struct bnxt_ring_struct *ring; 3002 struct bnxt_cp_ring_info *cpr; 3003 int rc; 3004 3005 cpr = kzalloc(sizeof(*cpr), GFP_KERNEL); 3006 if (!cpr) 3007 return NULL; 3008 3009 ring = &cpr->cp_ring_struct; 3010 rmem = &ring->ring_mem; 3011 rmem->nr_pages = bp->cp_nr_pages; 3012 rmem->page_size = HW_CMPD_RING_SIZE; 3013 rmem->pg_arr = (void **)cpr->cp_desc_ring; 3014 rmem->dma_arr = cpr->cp_desc_mapping; 3015 rmem->flags = BNXT_RMEM_RING_PTE_FLAG; 3016 rc = bnxt_alloc_ring(bp, rmem); 3017 if (rc) { 3018 bnxt_free_ring(bp, rmem); 3019 kfree(cpr); 3020 cpr = NULL; 3021 } 3022 return cpr; 3023 } 3024 3025 static int bnxt_alloc_cp_rings(struct bnxt *bp) 3026 { 3027 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS); 3028 int i, rc, ulp_base_vec, ulp_msix; 3029 3030 ulp_msix = bnxt_get_ulp_msix_num(bp); 3031 ulp_base_vec = bnxt_get_ulp_msix_base(bp); 3032 for (i = 0; i < bp->cp_nr_rings; i++) { 3033 struct bnxt_napi *bnapi = bp->bnapi[i]; 3034 struct bnxt_cp_ring_info *cpr; 3035 struct bnxt_ring_struct *ring; 3036 3037 if (!bnapi) 3038 continue; 3039 3040 cpr = &bnapi->cp_ring; 3041 cpr->bnapi = bnapi; 3042 ring = &cpr->cp_ring_struct; 3043 3044 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 3045 if (rc) 3046 return rc; 3047 3048 if (ulp_msix && i >= ulp_base_vec) 3049 ring->map_idx = i + ulp_msix; 3050 else 3051 ring->map_idx = i; 3052 3053 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 3054 continue; 3055 3056 if (i < bp->rx_nr_rings) { 3057 struct bnxt_cp_ring_info *cpr2 = 3058 bnxt_alloc_cp_sub_ring(bp); 3059 3060 cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2; 3061 if (!cpr2) 3062 return -ENOMEM; 3063 cpr2->bnapi = bnapi; 3064 } 3065 if ((sh && i < bp->tx_nr_rings) || 3066 (!sh && i >= bp->rx_nr_rings)) { 3067 struct bnxt_cp_ring_info *cpr2 = 3068 bnxt_alloc_cp_sub_ring(bp); 3069 3070 cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2; 3071 if (!cpr2) 3072 return -ENOMEM; 3073 cpr2->bnapi = bnapi; 3074 } 3075 } 3076 return 0; 3077 } 3078 3079 static void bnxt_init_ring_struct(struct bnxt *bp) 3080 { 3081 int i; 3082 3083 for (i = 0; i < bp->cp_nr_rings; i++) { 3084 struct bnxt_napi *bnapi = bp->bnapi[i]; 3085 struct bnxt_ring_mem_info *rmem; 3086 struct bnxt_cp_ring_info *cpr; 3087 struct bnxt_rx_ring_info *rxr; 3088 struct bnxt_tx_ring_info *txr; 3089 struct bnxt_ring_struct *ring; 3090 3091 if (!bnapi) 3092 continue; 3093 3094 cpr = &bnapi->cp_ring; 3095 ring = &cpr->cp_ring_struct; 3096 rmem = &ring->ring_mem; 3097 rmem->nr_pages = bp->cp_nr_pages; 3098 rmem->page_size = HW_CMPD_RING_SIZE; 3099 rmem->pg_arr = (void **)cpr->cp_desc_ring; 3100 rmem->dma_arr = cpr->cp_desc_mapping; 3101 rmem->vmem_size = 0; 3102 3103 rxr = bnapi->rx_ring; 3104 if (!rxr) 3105 goto skip_rx; 3106 3107 ring = &rxr->rx_ring_struct; 3108 rmem = &ring->ring_mem; 3109 rmem->nr_pages = bp->rx_nr_pages; 3110 rmem->page_size = HW_RXBD_RING_SIZE; 3111 rmem->pg_arr = (void **)rxr->rx_desc_ring; 3112 rmem->dma_arr = rxr->rx_desc_mapping; 3113 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; 3114 rmem->vmem = (void **)&rxr->rx_buf_ring; 3115 3116 ring = &rxr->rx_agg_ring_struct; 3117 rmem = &ring->ring_mem; 3118 rmem->nr_pages = bp->rx_agg_nr_pages; 3119 rmem->page_size = HW_RXBD_RING_SIZE; 3120 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring; 3121 rmem->dma_arr = rxr->rx_agg_desc_mapping; 3122 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; 3123 rmem->vmem = (void **)&rxr->rx_agg_ring; 3124 3125 skip_rx: 3126 txr = bnapi->tx_ring; 3127 if (!txr) 3128 continue; 3129 3130 ring = &txr->tx_ring_struct; 3131 rmem = &ring->ring_mem; 3132 rmem->nr_pages = bp->tx_nr_pages; 3133 rmem->page_size = HW_RXBD_RING_SIZE; 3134 rmem->pg_arr = (void **)txr->tx_desc_ring; 3135 rmem->dma_arr = txr->tx_desc_mapping; 3136 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages; 3137 rmem->vmem = (void **)&txr->tx_buf_ring; 3138 } 3139 } 3140 3141 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type) 3142 { 3143 int i; 3144 u32 prod; 3145 struct rx_bd **rx_buf_ring; 3146 3147 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr; 3148 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) { 3149 int j; 3150 struct rx_bd *rxbd; 3151 3152 rxbd = rx_buf_ring[i]; 3153 if (!rxbd) 3154 continue; 3155 3156 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) { 3157 rxbd->rx_bd_len_flags_type = cpu_to_le32(type); 3158 rxbd->rx_bd_opaque = prod; 3159 } 3160 } 3161 } 3162 3163 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) 3164 { 3165 struct net_device *dev = bp->dev; 3166 struct bnxt_rx_ring_info *rxr; 3167 struct bnxt_ring_struct *ring; 3168 u32 prod, type; 3169 int i; 3170 3171 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) | 3172 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP; 3173 3174 if (NET_IP_ALIGN == 2) 3175 type |= RX_BD_FLAGS_SOP; 3176 3177 rxr = &bp->rx_ring[ring_nr]; 3178 ring = &rxr->rx_ring_struct; 3179 bnxt_init_rxbd_pages(ring, type); 3180 3181 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) { 3182 bpf_prog_add(bp->xdp_prog, 1); 3183 rxr->xdp_prog = bp->xdp_prog; 3184 } 3185 prod = rxr->rx_prod; 3186 for (i = 0; i < bp->rx_ring_size; i++) { 3187 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) { 3188 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n", 3189 ring_nr, i, bp->rx_ring_size); 3190 break; 3191 } 3192 prod = NEXT_RX(prod); 3193 } 3194 rxr->rx_prod = prod; 3195 ring->fw_ring_id = INVALID_HW_RING_ID; 3196 3197 ring = &rxr->rx_agg_ring_struct; 3198 ring->fw_ring_id = INVALID_HW_RING_ID; 3199 3200 if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) 3201 return 0; 3202 3203 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) | 3204 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; 3205 3206 bnxt_init_rxbd_pages(ring, type); 3207 3208 prod = rxr->rx_agg_prod; 3209 for (i = 0; i < bp->rx_agg_ring_size; i++) { 3210 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) { 3211 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n", 3212 ring_nr, i, bp->rx_ring_size); 3213 break; 3214 } 3215 prod = NEXT_RX_AGG(prod); 3216 } 3217 rxr->rx_agg_prod = prod; 3218 3219 if (bp->flags & BNXT_FLAG_TPA) { 3220 if (rxr->rx_tpa) { 3221 u8 *data; 3222 dma_addr_t mapping; 3223 3224 for (i = 0; i < bp->max_tpa; i++) { 3225 data = __bnxt_alloc_rx_data(bp, &mapping, 3226 GFP_KERNEL); 3227 if (!data) 3228 return -ENOMEM; 3229 3230 rxr->rx_tpa[i].data = data; 3231 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset; 3232 rxr->rx_tpa[i].mapping = mapping; 3233 } 3234 } else { 3235 netdev_err(bp->dev, "No resource allocated for LRO/GRO\n"); 3236 return -ENOMEM; 3237 } 3238 } 3239 3240 return 0; 3241 } 3242 3243 static void bnxt_init_cp_rings(struct bnxt *bp) 3244 { 3245 int i, j; 3246 3247 for (i = 0; i < bp->cp_nr_rings; i++) { 3248 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; 3249 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 3250 3251 ring->fw_ring_id = INVALID_HW_RING_ID; 3252 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; 3253 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; 3254 for (j = 0; j < 2; j++) { 3255 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; 3256 3257 if (!cpr2) 3258 continue; 3259 3260 ring = &cpr2->cp_ring_struct; 3261 ring->fw_ring_id = INVALID_HW_RING_ID; 3262 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; 3263 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; 3264 } 3265 } 3266 } 3267 3268 static int bnxt_init_rx_rings(struct bnxt *bp) 3269 { 3270 int i, rc = 0; 3271 3272 if (BNXT_RX_PAGE_MODE(bp)) { 3273 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM; 3274 bp->rx_dma_offset = XDP_PACKET_HEADROOM; 3275 } else { 3276 bp->rx_offset = BNXT_RX_OFFSET; 3277 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET; 3278 } 3279 3280 for (i = 0; i < bp->rx_nr_rings; i++) { 3281 rc = bnxt_init_one_rx_ring(bp, i); 3282 if (rc) 3283 break; 3284 } 3285 3286 return rc; 3287 } 3288 3289 static int bnxt_init_tx_rings(struct bnxt *bp) 3290 { 3291 u16 i; 3292 3293 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2, 3294 MAX_SKB_FRAGS + 1); 3295 3296 for (i = 0; i < bp->tx_nr_rings; i++) { 3297 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 3298 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 3299 3300 ring->fw_ring_id = INVALID_HW_RING_ID; 3301 } 3302 3303 return 0; 3304 } 3305 3306 static void bnxt_free_ring_grps(struct bnxt *bp) 3307 { 3308 kfree(bp->grp_info); 3309 bp->grp_info = NULL; 3310 } 3311 3312 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init) 3313 { 3314 int i; 3315 3316 if (irq_re_init) { 3317 bp->grp_info = kcalloc(bp->cp_nr_rings, 3318 sizeof(struct bnxt_ring_grp_info), 3319 GFP_KERNEL); 3320 if (!bp->grp_info) 3321 return -ENOMEM; 3322 } 3323 for (i = 0; i < bp->cp_nr_rings; i++) { 3324 if (irq_re_init) 3325 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID; 3326 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; 3327 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID; 3328 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID; 3329 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; 3330 } 3331 return 0; 3332 } 3333 3334 static void bnxt_free_vnics(struct bnxt *bp) 3335 { 3336 kfree(bp->vnic_info); 3337 bp->vnic_info = NULL; 3338 bp->nr_vnics = 0; 3339 } 3340 3341 static int bnxt_alloc_vnics(struct bnxt *bp) 3342 { 3343 int num_vnics = 1; 3344 3345 #ifdef CONFIG_RFS_ACCEL 3346 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS) 3347 num_vnics += bp->rx_nr_rings; 3348 #endif 3349 3350 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 3351 num_vnics++; 3352 3353 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info), 3354 GFP_KERNEL); 3355 if (!bp->vnic_info) 3356 return -ENOMEM; 3357 3358 bp->nr_vnics = num_vnics; 3359 return 0; 3360 } 3361 3362 static void bnxt_init_vnics(struct bnxt *bp) 3363 { 3364 int i; 3365 3366 for (i = 0; i < bp->nr_vnics; i++) { 3367 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 3368 int j; 3369 3370 vnic->fw_vnic_id = INVALID_HW_RING_ID; 3371 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) 3372 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID; 3373 3374 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID; 3375 3376 if (bp->vnic_info[i].rss_hash_key) { 3377 if (i == 0) 3378 prandom_bytes(vnic->rss_hash_key, 3379 HW_HASH_KEY_SIZE); 3380 else 3381 memcpy(vnic->rss_hash_key, 3382 bp->vnic_info[0].rss_hash_key, 3383 HW_HASH_KEY_SIZE); 3384 } 3385 } 3386 } 3387 3388 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg) 3389 { 3390 int pages; 3391 3392 pages = ring_size / desc_per_pg; 3393 3394 if (!pages) 3395 return 1; 3396 3397 pages++; 3398 3399 while (pages & (pages - 1)) 3400 pages++; 3401 3402 return pages; 3403 } 3404 3405 void bnxt_set_tpa_flags(struct bnxt *bp) 3406 { 3407 bp->flags &= ~BNXT_FLAG_TPA; 3408 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 3409 return; 3410 if (bp->dev->features & NETIF_F_LRO) 3411 bp->flags |= BNXT_FLAG_LRO; 3412 else if (bp->dev->features & NETIF_F_GRO_HW) 3413 bp->flags |= BNXT_FLAG_GRO; 3414 } 3415 3416 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must 3417 * be set on entry. 3418 */ 3419 void bnxt_set_ring_params(struct bnxt *bp) 3420 { 3421 u32 ring_size, rx_size, rx_space; 3422 u32 agg_factor = 0, agg_ring_size = 0; 3423 3424 /* 8 for CRC and VLAN */ 3425 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8); 3426 3427 rx_space = rx_size + NET_SKB_PAD + 3428 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 3429 3430 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH; 3431 ring_size = bp->rx_ring_size; 3432 bp->rx_agg_ring_size = 0; 3433 bp->rx_agg_nr_pages = 0; 3434 3435 if (bp->flags & BNXT_FLAG_TPA) 3436 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE); 3437 3438 bp->flags &= ~BNXT_FLAG_JUMBO; 3439 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) { 3440 u32 jumbo_factor; 3441 3442 bp->flags |= BNXT_FLAG_JUMBO; 3443 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT; 3444 if (jumbo_factor > agg_factor) 3445 agg_factor = jumbo_factor; 3446 } 3447 agg_ring_size = ring_size * agg_factor; 3448 3449 if (agg_ring_size) { 3450 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size, 3451 RX_DESC_CNT); 3452 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) { 3453 u32 tmp = agg_ring_size; 3454 3455 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES; 3456 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1; 3457 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n", 3458 tmp, agg_ring_size); 3459 } 3460 bp->rx_agg_ring_size = agg_ring_size; 3461 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1; 3462 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN); 3463 rx_space = rx_size + NET_SKB_PAD + 3464 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 3465 } 3466 3467 bp->rx_buf_use_size = rx_size; 3468 bp->rx_buf_size = rx_space; 3469 3470 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT); 3471 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1; 3472 3473 ring_size = bp->tx_ring_size; 3474 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT); 3475 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1; 3476 3477 ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size; 3478 bp->cp_ring_size = ring_size; 3479 3480 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT); 3481 if (bp->cp_nr_pages > MAX_CP_PAGES) { 3482 bp->cp_nr_pages = MAX_CP_PAGES; 3483 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1; 3484 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n", 3485 ring_size, bp->cp_ring_size); 3486 } 3487 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT; 3488 bp->cp_ring_mask = bp->cp_bit - 1; 3489 } 3490 3491 /* Changing allocation mode of RX rings. 3492 * TODO: Update when extending xdp_rxq_info to support allocation modes. 3493 */ 3494 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) 3495 { 3496 if (page_mode) { 3497 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) 3498 return -EOPNOTSUPP; 3499 bp->dev->max_mtu = 3500 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU); 3501 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 3502 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE; 3503 bp->rx_dir = DMA_BIDIRECTIONAL; 3504 bp->rx_skb_func = bnxt_rx_page_skb; 3505 /* Disable LRO or GRO_HW */ 3506 netdev_update_features(bp->dev); 3507 } else { 3508 bp->dev->max_mtu = bp->max_mtu; 3509 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE; 3510 bp->rx_dir = DMA_FROM_DEVICE; 3511 bp->rx_skb_func = bnxt_rx_skb; 3512 } 3513 return 0; 3514 } 3515 3516 static void bnxt_free_vnic_attributes(struct bnxt *bp) 3517 { 3518 int i; 3519 struct bnxt_vnic_info *vnic; 3520 struct pci_dev *pdev = bp->pdev; 3521 3522 if (!bp->vnic_info) 3523 return; 3524 3525 for (i = 0; i < bp->nr_vnics; i++) { 3526 vnic = &bp->vnic_info[i]; 3527 3528 kfree(vnic->fw_grp_ids); 3529 vnic->fw_grp_ids = NULL; 3530 3531 kfree(vnic->uc_list); 3532 vnic->uc_list = NULL; 3533 3534 if (vnic->mc_list) { 3535 dma_free_coherent(&pdev->dev, vnic->mc_list_size, 3536 vnic->mc_list, vnic->mc_list_mapping); 3537 vnic->mc_list = NULL; 3538 } 3539 3540 if (vnic->rss_table) { 3541 dma_free_coherent(&pdev->dev, PAGE_SIZE, 3542 vnic->rss_table, 3543 vnic->rss_table_dma_addr); 3544 vnic->rss_table = NULL; 3545 } 3546 3547 vnic->rss_hash_key = NULL; 3548 vnic->flags = 0; 3549 } 3550 } 3551 3552 static int bnxt_alloc_vnic_attributes(struct bnxt *bp) 3553 { 3554 int i, rc = 0, size; 3555 struct bnxt_vnic_info *vnic; 3556 struct pci_dev *pdev = bp->pdev; 3557 int max_rings; 3558 3559 for (i = 0; i < bp->nr_vnics; i++) { 3560 vnic = &bp->vnic_info[i]; 3561 3562 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) { 3563 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN; 3564 3565 if (mem_size > 0) { 3566 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL); 3567 if (!vnic->uc_list) { 3568 rc = -ENOMEM; 3569 goto out; 3570 } 3571 } 3572 } 3573 3574 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) { 3575 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN; 3576 vnic->mc_list = 3577 dma_alloc_coherent(&pdev->dev, 3578 vnic->mc_list_size, 3579 &vnic->mc_list_mapping, 3580 GFP_KERNEL); 3581 if (!vnic->mc_list) { 3582 rc = -ENOMEM; 3583 goto out; 3584 } 3585 } 3586 3587 if (bp->flags & BNXT_FLAG_CHIP_P5) 3588 goto vnic_skip_grps; 3589 3590 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 3591 max_rings = bp->rx_nr_rings; 3592 else 3593 max_rings = 1; 3594 3595 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL); 3596 if (!vnic->fw_grp_ids) { 3597 rc = -ENOMEM; 3598 goto out; 3599 } 3600 vnic_skip_grps: 3601 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) && 3602 !(vnic->flags & BNXT_VNIC_RSS_FLAG)) 3603 continue; 3604 3605 /* Allocate rss table and hash key */ 3606 vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, 3607 &vnic->rss_table_dma_addr, 3608 GFP_KERNEL); 3609 if (!vnic->rss_table) { 3610 rc = -ENOMEM; 3611 goto out; 3612 } 3613 3614 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16)); 3615 3616 vnic->rss_hash_key = ((void *)vnic->rss_table) + size; 3617 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size; 3618 } 3619 return 0; 3620 3621 out: 3622 return rc; 3623 } 3624 3625 static void bnxt_free_hwrm_resources(struct bnxt *bp) 3626 { 3627 struct pci_dev *pdev = bp->pdev; 3628 3629 if (bp->hwrm_cmd_resp_addr) { 3630 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr, 3631 bp->hwrm_cmd_resp_dma_addr); 3632 bp->hwrm_cmd_resp_addr = NULL; 3633 } 3634 3635 if (bp->hwrm_cmd_kong_resp_addr) { 3636 dma_free_coherent(&pdev->dev, PAGE_SIZE, 3637 bp->hwrm_cmd_kong_resp_addr, 3638 bp->hwrm_cmd_kong_resp_dma_addr); 3639 bp->hwrm_cmd_kong_resp_addr = NULL; 3640 } 3641 } 3642 3643 static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp) 3644 { 3645 struct pci_dev *pdev = bp->pdev; 3646 3647 if (bp->hwrm_cmd_kong_resp_addr) 3648 return 0; 3649 3650 bp->hwrm_cmd_kong_resp_addr = 3651 dma_alloc_coherent(&pdev->dev, PAGE_SIZE, 3652 &bp->hwrm_cmd_kong_resp_dma_addr, 3653 GFP_KERNEL); 3654 if (!bp->hwrm_cmd_kong_resp_addr) 3655 return -ENOMEM; 3656 3657 return 0; 3658 } 3659 3660 static int bnxt_alloc_hwrm_resources(struct bnxt *bp) 3661 { 3662 struct pci_dev *pdev = bp->pdev; 3663 3664 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, 3665 &bp->hwrm_cmd_resp_dma_addr, 3666 GFP_KERNEL); 3667 if (!bp->hwrm_cmd_resp_addr) 3668 return -ENOMEM; 3669 3670 return 0; 3671 } 3672 3673 static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp) 3674 { 3675 if (bp->hwrm_short_cmd_req_addr) { 3676 struct pci_dev *pdev = bp->pdev; 3677 3678 dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len, 3679 bp->hwrm_short_cmd_req_addr, 3680 bp->hwrm_short_cmd_req_dma_addr); 3681 bp->hwrm_short_cmd_req_addr = NULL; 3682 } 3683 } 3684 3685 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp) 3686 { 3687 struct pci_dev *pdev = bp->pdev; 3688 3689 if (bp->hwrm_short_cmd_req_addr) 3690 return 0; 3691 3692 bp->hwrm_short_cmd_req_addr = 3693 dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len, 3694 &bp->hwrm_short_cmd_req_dma_addr, 3695 GFP_KERNEL); 3696 if (!bp->hwrm_short_cmd_req_addr) 3697 return -ENOMEM; 3698 3699 return 0; 3700 } 3701 3702 static void bnxt_free_port_stats(struct bnxt *bp) 3703 { 3704 struct pci_dev *pdev = bp->pdev; 3705 3706 bp->flags &= ~BNXT_FLAG_PORT_STATS; 3707 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT; 3708 3709 if (bp->hw_rx_port_stats) { 3710 dma_free_coherent(&pdev->dev, bp->hw_port_stats_size, 3711 bp->hw_rx_port_stats, 3712 bp->hw_rx_port_stats_map); 3713 bp->hw_rx_port_stats = NULL; 3714 } 3715 3716 if (bp->hw_tx_port_stats_ext) { 3717 dma_free_coherent(&pdev->dev, sizeof(struct tx_port_stats_ext), 3718 bp->hw_tx_port_stats_ext, 3719 bp->hw_tx_port_stats_ext_map); 3720 bp->hw_tx_port_stats_ext = NULL; 3721 } 3722 3723 if (bp->hw_rx_port_stats_ext) { 3724 dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext), 3725 bp->hw_rx_port_stats_ext, 3726 bp->hw_rx_port_stats_ext_map); 3727 bp->hw_rx_port_stats_ext = NULL; 3728 } 3729 3730 if (bp->hw_pcie_stats) { 3731 dma_free_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats), 3732 bp->hw_pcie_stats, bp->hw_pcie_stats_map); 3733 bp->hw_pcie_stats = NULL; 3734 } 3735 } 3736 3737 static void bnxt_free_ring_stats(struct bnxt *bp) 3738 { 3739 struct pci_dev *pdev = bp->pdev; 3740 int size, i; 3741 3742 if (!bp->bnapi) 3743 return; 3744 3745 size = bp->hw_ring_stats_size; 3746 3747 for (i = 0; i < bp->cp_nr_rings; i++) { 3748 struct bnxt_napi *bnapi = bp->bnapi[i]; 3749 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3750 3751 if (cpr->hw_stats) { 3752 dma_free_coherent(&pdev->dev, size, cpr->hw_stats, 3753 cpr->hw_stats_map); 3754 cpr->hw_stats = NULL; 3755 } 3756 } 3757 } 3758 3759 static int bnxt_alloc_stats(struct bnxt *bp) 3760 { 3761 u32 size, i; 3762 struct pci_dev *pdev = bp->pdev; 3763 3764 size = bp->hw_ring_stats_size; 3765 3766 for (i = 0; i < bp->cp_nr_rings; i++) { 3767 struct bnxt_napi *bnapi = bp->bnapi[i]; 3768 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3769 3770 cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size, 3771 &cpr->hw_stats_map, 3772 GFP_KERNEL); 3773 if (!cpr->hw_stats) 3774 return -ENOMEM; 3775 3776 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; 3777 } 3778 3779 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700) 3780 return 0; 3781 3782 if (bp->hw_rx_port_stats) 3783 goto alloc_ext_stats; 3784 3785 bp->hw_port_stats_size = sizeof(struct rx_port_stats) + 3786 sizeof(struct tx_port_stats) + 1024; 3787 3788 bp->hw_rx_port_stats = 3789 dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size, 3790 &bp->hw_rx_port_stats_map, 3791 GFP_KERNEL); 3792 if (!bp->hw_rx_port_stats) 3793 return -ENOMEM; 3794 3795 bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) + 512; 3796 bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map + 3797 sizeof(struct rx_port_stats) + 512; 3798 bp->flags |= BNXT_FLAG_PORT_STATS; 3799 3800 alloc_ext_stats: 3801 /* Display extended statistics only if FW supports it */ 3802 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900) 3803 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) 3804 return 0; 3805 3806 if (bp->hw_rx_port_stats_ext) 3807 goto alloc_tx_ext_stats; 3808 3809 bp->hw_rx_port_stats_ext = 3810 dma_alloc_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext), 3811 &bp->hw_rx_port_stats_ext_map, GFP_KERNEL); 3812 if (!bp->hw_rx_port_stats_ext) 3813 return 0; 3814 3815 alloc_tx_ext_stats: 3816 if (bp->hw_tx_port_stats_ext) 3817 goto alloc_pcie_stats; 3818 3819 if (bp->hwrm_spec_code >= 0x10902 || 3820 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) { 3821 bp->hw_tx_port_stats_ext = 3822 dma_alloc_coherent(&pdev->dev, 3823 sizeof(struct tx_port_stats_ext), 3824 &bp->hw_tx_port_stats_ext_map, 3825 GFP_KERNEL); 3826 } 3827 bp->flags |= BNXT_FLAG_PORT_STATS_EXT; 3828 3829 alloc_pcie_stats: 3830 if (bp->hw_pcie_stats || 3831 !(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)) 3832 return 0; 3833 3834 bp->hw_pcie_stats = 3835 dma_alloc_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats), 3836 &bp->hw_pcie_stats_map, GFP_KERNEL); 3837 if (!bp->hw_pcie_stats) 3838 return 0; 3839 3840 bp->flags |= BNXT_FLAG_PCIE_STATS; 3841 return 0; 3842 } 3843 3844 static void bnxt_clear_ring_indices(struct bnxt *bp) 3845 { 3846 int i; 3847 3848 if (!bp->bnapi) 3849 return; 3850 3851 for (i = 0; i < bp->cp_nr_rings; i++) { 3852 struct bnxt_napi *bnapi = bp->bnapi[i]; 3853 struct bnxt_cp_ring_info *cpr; 3854 struct bnxt_rx_ring_info *rxr; 3855 struct bnxt_tx_ring_info *txr; 3856 3857 if (!bnapi) 3858 continue; 3859 3860 cpr = &bnapi->cp_ring; 3861 cpr->cp_raw_cons = 0; 3862 3863 txr = bnapi->tx_ring; 3864 if (txr) { 3865 txr->tx_prod = 0; 3866 txr->tx_cons = 0; 3867 } 3868 3869 rxr = bnapi->rx_ring; 3870 if (rxr) { 3871 rxr->rx_prod = 0; 3872 rxr->rx_agg_prod = 0; 3873 rxr->rx_sw_agg_prod = 0; 3874 rxr->rx_next_cons = 0; 3875 } 3876 } 3877 } 3878 3879 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit) 3880 { 3881 #ifdef CONFIG_RFS_ACCEL 3882 int i; 3883 3884 /* Under rtnl_lock and all our NAPIs have been disabled. It's 3885 * safe to delete the hash table. 3886 */ 3887 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 3888 struct hlist_head *head; 3889 struct hlist_node *tmp; 3890 struct bnxt_ntuple_filter *fltr; 3891 3892 head = &bp->ntp_fltr_hash_tbl[i]; 3893 hlist_for_each_entry_safe(fltr, tmp, head, hash) { 3894 hlist_del(&fltr->hash); 3895 kfree(fltr); 3896 } 3897 } 3898 if (irq_reinit) { 3899 kfree(bp->ntp_fltr_bmap); 3900 bp->ntp_fltr_bmap = NULL; 3901 } 3902 bp->ntp_fltr_count = 0; 3903 #endif 3904 } 3905 3906 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp) 3907 { 3908 #ifdef CONFIG_RFS_ACCEL 3909 int i, rc = 0; 3910 3911 if (!(bp->flags & BNXT_FLAG_RFS)) 3912 return 0; 3913 3914 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) 3915 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]); 3916 3917 bp->ntp_fltr_count = 0; 3918 bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR), 3919 sizeof(long), 3920 GFP_KERNEL); 3921 3922 if (!bp->ntp_fltr_bmap) 3923 rc = -ENOMEM; 3924 3925 return rc; 3926 #else 3927 return 0; 3928 #endif 3929 } 3930 3931 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) 3932 { 3933 bnxt_free_vnic_attributes(bp); 3934 bnxt_free_tx_rings(bp); 3935 bnxt_free_rx_rings(bp); 3936 bnxt_free_cp_rings(bp); 3937 bnxt_free_ntp_fltrs(bp, irq_re_init); 3938 if (irq_re_init) { 3939 bnxt_free_ring_stats(bp); 3940 bnxt_free_ring_grps(bp); 3941 bnxt_free_vnics(bp); 3942 kfree(bp->tx_ring_map); 3943 bp->tx_ring_map = NULL; 3944 kfree(bp->tx_ring); 3945 bp->tx_ring = NULL; 3946 kfree(bp->rx_ring); 3947 bp->rx_ring = NULL; 3948 kfree(bp->bnapi); 3949 bp->bnapi = NULL; 3950 } else { 3951 bnxt_clear_ring_indices(bp); 3952 } 3953 } 3954 3955 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) 3956 { 3957 int i, j, rc, size, arr_size; 3958 void *bnapi; 3959 3960 if (irq_re_init) { 3961 /* Allocate bnapi mem pointer array and mem block for 3962 * all queues 3963 */ 3964 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) * 3965 bp->cp_nr_rings); 3966 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi)); 3967 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL); 3968 if (!bnapi) 3969 return -ENOMEM; 3970 3971 bp->bnapi = bnapi; 3972 bnapi += arr_size; 3973 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) { 3974 bp->bnapi[i] = bnapi; 3975 bp->bnapi[i]->index = i; 3976 bp->bnapi[i]->bp = bp; 3977 if (bp->flags & BNXT_FLAG_CHIP_P5) { 3978 struct bnxt_cp_ring_info *cpr = 3979 &bp->bnapi[i]->cp_ring; 3980 3981 cpr->cp_ring_struct.ring_mem.flags = 3982 BNXT_RMEM_RING_PTE_FLAG; 3983 } 3984 } 3985 3986 bp->rx_ring = kcalloc(bp->rx_nr_rings, 3987 sizeof(struct bnxt_rx_ring_info), 3988 GFP_KERNEL); 3989 if (!bp->rx_ring) 3990 return -ENOMEM; 3991 3992 for (i = 0; i < bp->rx_nr_rings; i++) { 3993 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3994 3995 if (bp->flags & BNXT_FLAG_CHIP_P5) { 3996 rxr->rx_ring_struct.ring_mem.flags = 3997 BNXT_RMEM_RING_PTE_FLAG; 3998 rxr->rx_agg_ring_struct.ring_mem.flags = 3999 BNXT_RMEM_RING_PTE_FLAG; 4000 } 4001 rxr->bnapi = bp->bnapi[i]; 4002 bp->bnapi[i]->rx_ring = &bp->rx_ring[i]; 4003 } 4004 4005 bp->tx_ring = kcalloc(bp->tx_nr_rings, 4006 sizeof(struct bnxt_tx_ring_info), 4007 GFP_KERNEL); 4008 if (!bp->tx_ring) 4009 return -ENOMEM; 4010 4011 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16), 4012 GFP_KERNEL); 4013 4014 if (!bp->tx_ring_map) 4015 return -ENOMEM; 4016 4017 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 4018 j = 0; 4019 else 4020 j = bp->rx_nr_rings; 4021 4022 for (i = 0; i < bp->tx_nr_rings; i++, j++) { 4023 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 4024 4025 if (bp->flags & BNXT_FLAG_CHIP_P5) 4026 txr->tx_ring_struct.ring_mem.flags = 4027 BNXT_RMEM_RING_PTE_FLAG; 4028 txr->bnapi = bp->bnapi[j]; 4029 bp->bnapi[j]->tx_ring = txr; 4030 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i; 4031 if (i >= bp->tx_nr_rings_xdp) { 4032 txr->txq_index = i - bp->tx_nr_rings_xdp; 4033 bp->bnapi[j]->tx_int = bnxt_tx_int; 4034 } else { 4035 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP; 4036 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp; 4037 } 4038 } 4039 4040 rc = bnxt_alloc_stats(bp); 4041 if (rc) 4042 goto alloc_mem_err; 4043 4044 rc = bnxt_alloc_ntp_fltrs(bp); 4045 if (rc) 4046 goto alloc_mem_err; 4047 4048 rc = bnxt_alloc_vnics(bp); 4049 if (rc) 4050 goto alloc_mem_err; 4051 } 4052 4053 bnxt_init_ring_struct(bp); 4054 4055 rc = bnxt_alloc_rx_rings(bp); 4056 if (rc) 4057 goto alloc_mem_err; 4058 4059 rc = bnxt_alloc_tx_rings(bp); 4060 if (rc) 4061 goto alloc_mem_err; 4062 4063 rc = bnxt_alloc_cp_rings(bp); 4064 if (rc) 4065 goto alloc_mem_err; 4066 4067 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG | 4068 BNXT_VNIC_UCAST_FLAG; 4069 rc = bnxt_alloc_vnic_attributes(bp); 4070 if (rc) 4071 goto alloc_mem_err; 4072 return 0; 4073 4074 alloc_mem_err: 4075 bnxt_free_mem(bp, true); 4076 return rc; 4077 } 4078 4079 static void bnxt_disable_int(struct bnxt *bp) 4080 { 4081 int i; 4082 4083 if (!bp->bnapi) 4084 return; 4085 4086 for (i = 0; i < bp->cp_nr_rings; i++) { 4087 struct bnxt_napi *bnapi = bp->bnapi[i]; 4088 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4089 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 4090 4091 if (ring->fw_ring_id != INVALID_HW_RING_ID) 4092 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); 4093 } 4094 } 4095 4096 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n) 4097 { 4098 struct bnxt_napi *bnapi = bp->bnapi[n]; 4099 struct bnxt_cp_ring_info *cpr; 4100 4101 cpr = &bnapi->cp_ring; 4102 return cpr->cp_ring_struct.map_idx; 4103 } 4104 4105 static void bnxt_disable_int_sync(struct bnxt *bp) 4106 { 4107 int i; 4108 4109 atomic_inc(&bp->intr_sem); 4110 4111 bnxt_disable_int(bp); 4112 for (i = 0; i < bp->cp_nr_rings; i++) { 4113 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 4114 4115 synchronize_irq(bp->irq_tbl[map_idx].vector); 4116 } 4117 } 4118 4119 static void bnxt_enable_int(struct bnxt *bp) 4120 { 4121 int i; 4122 4123 atomic_set(&bp->intr_sem, 0); 4124 for (i = 0; i < bp->cp_nr_rings; i++) { 4125 struct bnxt_napi *bnapi = bp->bnapi[i]; 4126 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4127 4128 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons); 4129 } 4130 } 4131 4132 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type, 4133 u16 cmpl_ring, u16 target_id) 4134 { 4135 struct input *req = request; 4136 4137 req->req_type = cpu_to_le16(req_type); 4138 req->cmpl_ring = cpu_to_le16(cmpl_ring); 4139 req->target_id = cpu_to_le16(target_id); 4140 if (bnxt_kong_hwrm_message(bp, req)) 4141 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr); 4142 else 4143 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr); 4144 } 4145 4146 static int bnxt_hwrm_to_stderr(u32 hwrm_err) 4147 { 4148 switch (hwrm_err) { 4149 case HWRM_ERR_CODE_SUCCESS: 4150 return 0; 4151 case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED: 4152 return -EACCES; 4153 case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR: 4154 return -ENOSPC; 4155 case HWRM_ERR_CODE_INVALID_PARAMS: 4156 case HWRM_ERR_CODE_INVALID_FLAGS: 4157 case HWRM_ERR_CODE_INVALID_ENABLES: 4158 case HWRM_ERR_CODE_UNSUPPORTED_TLV: 4159 case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR: 4160 return -EINVAL; 4161 case HWRM_ERR_CODE_NO_BUFFER: 4162 return -ENOMEM; 4163 case HWRM_ERR_CODE_HOT_RESET_PROGRESS: 4164 case HWRM_ERR_CODE_BUSY: 4165 return -EAGAIN; 4166 case HWRM_ERR_CODE_CMD_NOT_SUPPORTED: 4167 return -EOPNOTSUPP; 4168 default: 4169 return -EIO; 4170 } 4171 } 4172 4173 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, 4174 int timeout, bool silent) 4175 { 4176 int i, intr_process, rc, tmo_count; 4177 struct input *req = msg; 4178 u32 *data = msg; 4179 u8 *valid; 4180 u16 cp_ring_id, len = 0; 4181 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; 4182 u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN; 4183 struct hwrm_short_input short_input = {0}; 4184 u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER; 4185 u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM; 4186 u16 dst = BNXT_HWRM_CHNL_CHIMP; 4187 4188 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 4189 return -EBUSY; 4190 4191 if (msg_len > BNXT_HWRM_MAX_REQ_LEN) { 4192 if (msg_len > bp->hwrm_max_ext_req_len || 4193 !bp->hwrm_short_cmd_req_addr) 4194 return -EINVAL; 4195 } 4196 4197 if (bnxt_hwrm_kong_chnl(bp, req)) { 4198 dst = BNXT_HWRM_CHNL_KONG; 4199 bar_offset = BNXT_GRCPF_REG_KONG_COMM; 4200 doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER; 4201 resp = bp->hwrm_cmd_kong_resp_addr; 4202 } 4203 4204 memset(resp, 0, PAGE_SIZE); 4205 cp_ring_id = le16_to_cpu(req->cmpl_ring); 4206 intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1; 4207 4208 req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst)); 4209 /* currently supports only one outstanding message */ 4210 if (intr_process) 4211 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id); 4212 4213 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || 4214 msg_len > BNXT_HWRM_MAX_REQ_LEN) { 4215 void *short_cmd_req = bp->hwrm_short_cmd_req_addr; 4216 u16 max_msg_len; 4217 4218 /* Set boundary for maximum extended request length for short 4219 * cmd format. If passed up from device use the max supported 4220 * internal req length. 4221 */ 4222 max_msg_len = bp->hwrm_max_ext_req_len; 4223 4224 memcpy(short_cmd_req, req, msg_len); 4225 if (msg_len < max_msg_len) 4226 memset(short_cmd_req + msg_len, 0, 4227 max_msg_len - msg_len); 4228 4229 short_input.req_type = req->req_type; 4230 short_input.signature = 4231 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD); 4232 short_input.size = cpu_to_le16(msg_len); 4233 short_input.req_addr = 4234 cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr); 4235 4236 data = (u32 *)&short_input; 4237 msg_len = sizeof(short_input); 4238 4239 /* Sync memory write before updating doorbell */ 4240 wmb(); 4241 4242 max_req_len = BNXT_HWRM_SHORT_REQ_LEN; 4243 } 4244 4245 /* Write request msg to hwrm channel */ 4246 __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4); 4247 4248 for (i = msg_len; i < max_req_len; i += 4) 4249 writel(0, bp->bar0 + bar_offset + i); 4250 4251 /* Ring channel doorbell */ 4252 writel(1, bp->bar0 + doorbell_offset); 4253 4254 if (!pci_is_enabled(bp->pdev)) 4255 return 0; 4256 4257 if (!timeout) 4258 timeout = DFLT_HWRM_CMD_TIMEOUT; 4259 /* convert timeout to usec */ 4260 timeout *= 1000; 4261 4262 i = 0; 4263 /* Short timeout for the first few iterations: 4264 * number of loops = number of loops for short timeout + 4265 * number of loops for standard timeout. 4266 */ 4267 tmo_count = HWRM_SHORT_TIMEOUT_COUNTER; 4268 timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER; 4269 tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT); 4270 4271 if (intr_process) { 4272 u16 seq_id = bp->hwrm_intr_seq_id; 4273 4274 /* Wait until hwrm response cmpl interrupt is processed */ 4275 while (bp->hwrm_intr_seq_id != (u16)~seq_id && 4276 i++ < tmo_count) { 4277 /* Abort the wait for completion if the FW health 4278 * check has failed. 4279 */ 4280 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 4281 return -EBUSY; 4282 /* on first few passes, just barely sleep */ 4283 if (i < HWRM_SHORT_TIMEOUT_COUNTER) 4284 usleep_range(HWRM_SHORT_MIN_TIMEOUT, 4285 HWRM_SHORT_MAX_TIMEOUT); 4286 else 4287 usleep_range(HWRM_MIN_TIMEOUT, 4288 HWRM_MAX_TIMEOUT); 4289 } 4290 4291 if (bp->hwrm_intr_seq_id != (u16)~seq_id) { 4292 if (!silent) 4293 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n", 4294 le16_to_cpu(req->req_type)); 4295 return -EBUSY; 4296 } 4297 len = le16_to_cpu(resp->resp_len); 4298 valid = ((u8 *)resp) + len - 1; 4299 } else { 4300 int j; 4301 4302 /* Check if response len is updated */ 4303 for (i = 0; i < tmo_count; i++) { 4304 /* Abort the wait for completion if the FW health 4305 * check has failed. 4306 */ 4307 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 4308 return -EBUSY; 4309 len = le16_to_cpu(resp->resp_len); 4310 if (len) 4311 break; 4312 /* on first few passes, just barely sleep */ 4313 if (i < HWRM_SHORT_TIMEOUT_COUNTER) 4314 usleep_range(HWRM_SHORT_MIN_TIMEOUT, 4315 HWRM_SHORT_MAX_TIMEOUT); 4316 else 4317 usleep_range(HWRM_MIN_TIMEOUT, 4318 HWRM_MAX_TIMEOUT); 4319 } 4320 4321 if (i >= tmo_count) { 4322 if (!silent) 4323 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n", 4324 HWRM_TOTAL_TIMEOUT(i), 4325 le16_to_cpu(req->req_type), 4326 le16_to_cpu(req->seq_id), len); 4327 return -EBUSY; 4328 } 4329 4330 /* Last byte of resp contains valid bit */ 4331 valid = ((u8 *)resp) + len - 1; 4332 for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) { 4333 /* make sure we read from updated DMA memory */ 4334 dma_rmb(); 4335 if (*valid) 4336 break; 4337 usleep_range(1, 5); 4338 } 4339 4340 if (j >= HWRM_VALID_BIT_DELAY_USEC) { 4341 if (!silent) 4342 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n", 4343 HWRM_TOTAL_TIMEOUT(i), 4344 le16_to_cpu(req->req_type), 4345 le16_to_cpu(req->seq_id), len, 4346 *valid); 4347 return -EBUSY; 4348 } 4349 } 4350 4351 /* Zero valid bit for compatibility. Valid bit in an older spec 4352 * may become a new field in a newer spec. We must make sure that 4353 * a new field not implemented by old spec will read zero. 4354 */ 4355 *valid = 0; 4356 rc = le16_to_cpu(resp->error_code); 4357 if (rc && !silent) 4358 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n", 4359 le16_to_cpu(resp->req_type), 4360 le16_to_cpu(resp->seq_id), rc); 4361 return bnxt_hwrm_to_stderr(rc); 4362 } 4363 4364 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) 4365 { 4366 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false); 4367 } 4368 4369 int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len, 4370 int timeout) 4371 { 4372 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true); 4373 } 4374 4375 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) 4376 { 4377 int rc; 4378 4379 mutex_lock(&bp->hwrm_cmd_lock); 4380 rc = _hwrm_send_message(bp, msg, msg_len, timeout); 4381 mutex_unlock(&bp->hwrm_cmd_lock); 4382 return rc; 4383 } 4384 4385 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len, 4386 int timeout) 4387 { 4388 int rc; 4389 4390 mutex_lock(&bp->hwrm_cmd_lock); 4391 rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true); 4392 mutex_unlock(&bp->hwrm_cmd_lock); 4393 return rc; 4394 } 4395 4396 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, 4397 bool async_only) 4398 { 4399 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr; 4400 struct hwrm_func_drv_rgtr_input req = {0}; 4401 DECLARE_BITMAP(async_events_bmap, 256); 4402 u32 *events = (u32 *)async_events_bmap; 4403 u32 flags; 4404 int rc, i; 4405 4406 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1); 4407 4408 req.enables = 4409 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | 4410 FUNC_DRV_RGTR_REQ_ENABLES_VER | 4411 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); 4412 4413 req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); 4414 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE; 4415 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) 4416 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT; 4417 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) 4418 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT | 4419 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT; 4420 req.flags = cpu_to_le32(flags); 4421 req.ver_maj_8b = DRV_VER_MAJ; 4422 req.ver_min_8b = DRV_VER_MIN; 4423 req.ver_upd_8b = DRV_VER_UPD; 4424 req.ver_maj = cpu_to_le16(DRV_VER_MAJ); 4425 req.ver_min = cpu_to_le16(DRV_VER_MIN); 4426 req.ver_upd = cpu_to_le16(DRV_VER_UPD); 4427 4428 if (BNXT_PF(bp)) { 4429 u32 data[8]; 4430 int i; 4431 4432 memset(data, 0, sizeof(data)); 4433 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) { 4434 u16 cmd = bnxt_vf_req_snif[i]; 4435 unsigned int bit, idx; 4436 4437 idx = cmd / 32; 4438 bit = cmd % 32; 4439 data[idx] |= 1 << bit; 4440 } 4441 4442 for (i = 0; i < 8; i++) 4443 req.vf_req_fwd[i] = cpu_to_le32(data[i]); 4444 4445 req.enables |= 4446 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); 4447 } 4448 4449 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) 4450 req.flags |= cpu_to_le32( 4451 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE); 4452 4453 memset(async_events_bmap, 0, sizeof(async_events_bmap)); 4454 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) { 4455 u16 event_id = bnxt_async_events_arr[i]; 4456 4457 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY && 4458 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 4459 continue; 4460 __set_bit(bnxt_async_events_arr[i], async_events_bmap); 4461 } 4462 if (bmap && bmap_size) { 4463 for (i = 0; i < bmap_size; i++) { 4464 if (test_bit(i, bmap)) 4465 __set_bit(i, async_events_bmap); 4466 } 4467 } 4468 for (i = 0; i < 8; i++) 4469 req.async_event_fwd[i] |= cpu_to_le32(events[i]); 4470 4471 if (async_only) 4472 req.enables = 4473 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); 4474 4475 mutex_lock(&bp->hwrm_cmd_lock); 4476 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4477 if (!rc) { 4478 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state); 4479 if (resp->flags & 4480 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED)) 4481 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE; 4482 } 4483 mutex_unlock(&bp->hwrm_cmd_lock); 4484 return rc; 4485 } 4486 4487 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp) 4488 { 4489 struct hwrm_func_drv_unrgtr_input req = {0}; 4490 4491 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state)) 4492 return 0; 4493 4494 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1); 4495 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4496 } 4497 4498 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type) 4499 { 4500 u32 rc = 0; 4501 struct hwrm_tunnel_dst_port_free_input req = {0}; 4502 4503 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1); 4504 req.tunnel_type = tunnel_type; 4505 4506 switch (tunnel_type) { 4507 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN: 4508 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id; 4509 break; 4510 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE: 4511 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id; 4512 break; 4513 default: 4514 break; 4515 } 4516 4517 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4518 if (rc) 4519 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n", 4520 rc); 4521 return rc; 4522 } 4523 4524 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port, 4525 u8 tunnel_type) 4526 { 4527 u32 rc = 0; 4528 struct hwrm_tunnel_dst_port_alloc_input req = {0}; 4529 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr; 4530 4531 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1); 4532 4533 req.tunnel_type = tunnel_type; 4534 req.tunnel_dst_port_val = port; 4535 4536 mutex_lock(&bp->hwrm_cmd_lock); 4537 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4538 if (rc) { 4539 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n", 4540 rc); 4541 goto err_out; 4542 } 4543 4544 switch (tunnel_type) { 4545 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN: 4546 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id; 4547 break; 4548 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE: 4549 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id; 4550 break; 4551 default: 4552 break; 4553 } 4554 4555 err_out: 4556 mutex_unlock(&bp->hwrm_cmd_lock); 4557 return rc; 4558 } 4559 4560 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id) 4561 { 4562 struct hwrm_cfa_l2_set_rx_mask_input req = {0}; 4563 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 4564 4565 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1); 4566 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id); 4567 4568 req.num_mc_entries = cpu_to_le32(vnic->mc_list_count); 4569 req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); 4570 req.mask = cpu_to_le32(vnic->rx_mask); 4571 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4572 } 4573 4574 #ifdef CONFIG_RFS_ACCEL 4575 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, 4576 struct bnxt_ntuple_filter *fltr) 4577 { 4578 struct hwrm_cfa_ntuple_filter_free_input req = {0}; 4579 4580 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1); 4581 req.ntuple_filter_id = fltr->filter_id; 4582 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4583 } 4584 4585 #define BNXT_NTP_FLTR_FLAGS \ 4586 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \ 4587 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \ 4588 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \ 4589 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \ 4590 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \ 4591 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \ 4592 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \ 4593 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \ 4594 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \ 4595 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \ 4596 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \ 4597 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \ 4598 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \ 4599 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID) 4600 4601 #define BNXT_NTP_TUNNEL_FLTR_FLAG \ 4602 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 4603 4604 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, 4605 struct bnxt_ntuple_filter *fltr) 4606 { 4607 struct hwrm_cfa_ntuple_filter_alloc_input req = {0}; 4608 struct hwrm_cfa_ntuple_filter_alloc_output *resp; 4609 struct flow_keys *keys = &fltr->fkeys; 4610 struct bnxt_vnic_info *vnic; 4611 u32 flags = 0; 4612 int rc = 0; 4613 4614 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1); 4615 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx]; 4616 4617 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) { 4618 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX; 4619 req.dst_id = cpu_to_le16(fltr->rxq); 4620 } else { 4621 vnic = &bp->vnic_info[fltr->rxq + 1]; 4622 req.dst_id = cpu_to_le16(vnic->fw_vnic_id); 4623 } 4624 req.flags = cpu_to_le32(flags); 4625 req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS); 4626 4627 req.ethertype = htons(ETH_P_IP); 4628 memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN); 4629 req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; 4630 req.ip_protocol = keys->basic.ip_proto; 4631 4632 if (keys->basic.n_proto == htons(ETH_P_IPV6)) { 4633 int i; 4634 4635 req.ethertype = htons(ETH_P_IPV6); 4636 req.ip_addr_type = 4637 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6; 4638 *(struct in6_addr *)&req.src_ipaddr[0] = 4639 keys->addrs.v6addrs.src; 4640 *(struct in6_addr *)&req.dst_ipaddr[0] = 4641 keys->addrs.v6addrs.dst; 4642 for (i = 0; i < 4; i++) { 4643 req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff); 4644 req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff); 4645 } 4646 } else { 4647 req.src_ipaddr[0] = keys->addrs.v4addrs.src; 4648 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff); 4649 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst; 4650 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff); 4651 } 4652 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) { 4653 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG); 4654 req.tunnel_type = 4655 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL; 4656 } 4657 4658 req.src_port = keys->ports.src; 4659 req.src_port_mask = cpu_to_be16(0xffff); 4660 req.dst_port = keys->ports.dst; 4661 req.dst_port_mask = cpu_to_be16(0xffff); 4662 4663 mutex_lock(&bp->hwrm_cmd_lock); 4664 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4665 if (!rc) { 4666 resp = bnxt_get_hwrm_resp_addr(bp, &req); 4667 fltr->filter_id = resp->ntuple_filter_id; 4668 } 4669 mutex_unlock(&bp->hwrm_cmd_lock); 4670 return rc; 4671 } 4672 #endif 4673 4674 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx, 4675 u8 *mac_addr) 4676 { 4677 u32 rc = 0; 4678 struct hwrm_cfa_l2_filter_alloc_input req = {0}; 4679 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; 4680 4681 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1); 4682 req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX); 4683 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) 4684 req.flags |= 4685 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST); 4686 req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id); 4687 req.enables = 4688 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR | 4689 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID | 4690 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK); 4691 memcpy(req.l2_addr, mac_addr, ETH_ALEN); 4692 req.l2_addr_mask[0] = 0xff; 4693 req.l2_addr_mask[1] = 0xff; 4694 req.l2_addr_mask[2] = 0xff; 4695 req.l2_addr_mask[3] = 0xff; 4696 req.l2_addr_mask[4] = 0xff; 4697 req.l2_addr_mask[5] = 0xff; 4698 4699 mutex_lock(&bp->hwrm_cmd_lock); 4700 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4701 if (!rc) 4702 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] = 4703 resp->l2_filter_id; 4704 mutex_unlock(&bp->hwrm_cmd_lock); 4705 return rc; 4706 } 4707 4708 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp) 4709 { 4710 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */ 4711 int rc = 0; 4712 4713 /* Any associated ntuple filters will also be cleared by firmware. */ 4714 mutex_lock(&bp->hwrm_cmd_lock); 4715 for (i = 0; i < num_of_vnics; i++) { 4716 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 4717 4718 for (j = 0; j < vnic->uc_filter_count; j++) { 4719 struct hwrm_cfa_l2_filter_free_input req = {0}; 4720 4721 bnxt_hwrm_cmd_hdr_init(bp, &req, 4722 HWRM_CFA_L2_FILTER_FREE, -1, -1); 4723 4724 req.l2_filter_id = vnic->fw_l2_filter_id[j]; 4725 4726 rc = _hwrm_send_message(bp, &req, sizeof(req), 4727 HWRM_CMD_TIMEOUT); 4728 } 4729 vnic->uc_filter_count = 0; 4730 } 4731 mutex_unlock(&bp->hwrm_cmd_lock); 4732 4733 return rc; 4734 } 4735 4736 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) 4737 { 4738 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 4739 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX; 4740 struct hwrm_vnic_tpa_cfg_input req = {0}; 4741 4742 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) 4743 return 0; 4744 4745 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1); 4746 4747 if (tpa_flags) { 4748 u16 mss = bp->dev->mtu - 40; 4749 u32 nsegs, n, segs = 0, flags; 4750 4751 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA | 4752 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA | 4753 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE | 4754 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN | 4755 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ; 4756 if (tpa_flags & BNXT_FLAG_GRO) 4757 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO; 4758 4759 req.flags = cpu_to_le32(flags); 4760 4761 req.enables = 4762 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS | 4763 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS | 4764 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN); 4765 4766 /* Number of segs are log2 units, and first packet is not 4767 * included as part of this units. 4768 */ 4769 if (mss <= BNXT_RX_PAGE_SIZE) { 4770 n = BNXT_RX_PAGE_SIZE / mss; 4771 nsegs = (MAX_SKB_FRAGS - 1) * n; 4772 } else { 4773 n = mss / BNXT_RX_PAGE_SIZE; 4774 if (mss & (BNXT_RX_PAGE_SIZE - 1)) 4775 n++; 4776 nsegs = (MAX_SKB_FRAGS - n) / n; 4777 } 4778 4779 if (bp->flags & BNXT_FLAG_CHIP_P5) { 4780 segs = MAX_TPA_SEGS_P5; 4781 max_aggs = bp->max_tpa; 4782 } else { 4783 segs = ilog2(nsegs); 4784 } 4785 req.max_agg_segs = cpu_to_le16(segs); 4786 req.max_aggs = cpu_to_le16(max_aggs); 4787 4788 req.min_agg_len = cpu_to_le32(512); 4789 } 4790 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); 4791 4792 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4793 } 4794 4795 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring) 4796 { 4797 struct bnxt_ring_grp_info *grp_info; 4798 4799 grp_info = &bp->grp_info[ring->grp_idx]; 4800 return grp_info->cp_fw_ring_id; 4801 } 4802 4803 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 4804 { 4805 if (bp->flags & BNXT_FLAG_CHIP_P5) { 4806 struct bnxt_napi *bnapi = rxr->bnapi; 4807 struct bnxt_cp_ring_info *cpr; 4808 4809 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL]; 4810 return cpr->cp_ring_struct.fw_ring_id; 4811 } else { 4812 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct); 4813 } 4814 } 4815 4816 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr) 4817 { 4818 if (bp->flags & BNXT_FLAG_CHIP_P5) { 4819 struct bnxt_napi *bnapi = txr->bnapi; 4820 struct bnxt_cp_ring_info *cpr; 4821 4822 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL]; 4823 return cpr->cp_ring_struct.fw_ring_id; 4824 } else { 4825 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct); 4826 } 4827 } 4828 4829 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) 4830 { 4831 u32 i, j, max_rings; 4832 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 4833 struct hwrm_vnic_rss_cfg_input req = {0}; 4834 4835 if ((bp->flags & BNXT_FLAG_CHIP_P5) || 4836 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID) 4837 return 0; 4838 4839 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); 4840 if (set_rss) { 4841 req.hash_type = cpu_to_le32(bp->rss_hash_cfg); 4842 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; 4843 if (vnic->flags & BNXT_VNIC_RSS_FLAG) { 4844 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 4845 max_rings = bp->rx_nr_rings - 1; 4846 else 4847 max_rings = bp->rx_nr_rings; 4848 } else { 4849 max_rings = 1; 4850 } 4851 4852 /* Fill the RSS indirection table with ring group ids */ 4853 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) { 4854 if (j == max_rings) 4855 j = 0; 4856 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]); 4857 } 4858 4859 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); 4860 req.hash_key_tbl_addr = 4861 cpu_to_le64(vnic->rss_hash_key_dma_addr); 4862 } 4863 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 4864 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4865 } 4866 4867 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss) 4868 { 4869 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 4870 u32 i, j, k, nr_ctxs, max_rings = bp->rx_nr_rings; 4871 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; 4872 struct hwrm_vnic_rss_cfg_input req = {0}; 4873 4874 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); 4875 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); 4876 if (!set_rss) { 4877 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4878 return 0; 4879 } 4880 req.hash_type = cpu_to_le32(bp->rss_hash_cfg); 4881 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; 4882 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); 4883 req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); 4884 nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64); 4885 for (i = 0, k = 0; i < nr_ctxs; i++) { 4886 __le16 *ring_tbl = vnic->rss_table; 4887 int rc; 4888 4889 req.ring_table_pair_index = i; 4890 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]); 4891 for (j = 0; j < 64; j++) { 4892 u16 ring_id; 4893 4894 ring_id = rxr->rx_ring_struct.fw_ring_id; 4895 *ring_tbl++ = cpu_to_le16(ring_id); 4896 ring_id = bnxt_cp_ring_for_rx(bp, rxr); 4897 *ring_tbl++ = cpu_to_le16(ring_id); 4898 rxr++; 4899 k++; 4900 if (k == max_rings) { 4901 k = 0; 4902 rxr = &bp->rx_ring[0]; 4903 } 4904 } 4905 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4906 if (rc) 4907 return rc; 4908 } 4909 return 0; 4910 } 4911 4912 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id) 4913 { 4914 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 4915 struct hwrm_vnic_plcmodes_cfg_input req = {0}; 4916 4917 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1); 4918 req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT | 4919 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | 4920 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); 4921 req.enables = 4922 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID | 4923 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID); 4924 /* thresholds not implemented in firmware yet */ 4925 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh); 4926 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh); 4927 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id); 4928 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4929 } 4930 4931 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id, 4932 u16 ctx_idx) 4933 { 4934 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0}; 4935 4936 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1); 4937 req.rss_cos_lb_ctx_id = 4938 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]); 4939 4940 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4941 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID; 4942 } 4943 4944 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp) 4945 { 4946 int i, j; 4947 4948 for (i = 0; i < bp->nr_vnics; i++) { 4949 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 4950 4951 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) { 4952 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID) 4953 bnxt_hwrm_vnic_ctx_free_one(bp, i, j); 4954 } 4955 } 4956 bp->rsscos_nr_ctxs = 0; 4957 } 4958 4959 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx) 4960 { 4961 int rc; 4962 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0}; 4963 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp = 4964 bp->hwrm_cmd_resp_addr; 4965 4966 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1, 4967 -1); 4968 4969 mutex_lock(&bp->hwrm_cmd_lock); 4970 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4971 if (!rc) 4972 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = 4973 le16_to_cpu(resp->rss_cos_lb_ctx_id); 4974 mutex_unlock(&bp->hwrm_cmd_lock); 4975 4976 return rc; 4977 } 4978 4979 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp) 4980 { 4981 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP) 4982 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE; 4983 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE; 4984 } 4985 4986 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) 4987 { 4988 unsigned int ring = 0, grp_idx; 4989 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 4990 struct hwrm_vnic_cfg_input req = {0}; 4991 u16 def_vlan = 0; 4992 4993 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1); 4994 4995 if (bp->flags & BNXT_FLAG_CHIP_P5) { 4996 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; 4997 4998 req.default_rx_ring_id = 4999 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id); 5000 req.default_cmpl_ring_id = 5001 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr)); 5002 req.enables = 5003 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID | 5004 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID); 5005 goto vnic_mru; 5006 } 5007 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP); 5008 /* Only RSS support for now TBD: COS & LB */ 5009 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) { 5010 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 5011 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | 5012 VNIC_CFG_REQ_ENABLES_MRU); 5013 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) { 5014 req.rss_rule = 5015 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]); 5016 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | 5017 VNIC_CFG_REQ_ENABLES_MRU); 5018 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE); 5019 } else { 5020 req.rss_rule = cpu_to_le16(0xffff); 5021 } 5022 5023 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && 5024 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) { 5025 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]); 5026 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE); 5027 } else { 5028 req.cos_rule = cpu_to_le16(0xffff); 5029 } 5030 5031 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 5032 ring = 0; 5033 else if (vnic->flags & BNXT_VNIC_RFS_FLAG) 5034 ring = vnic_id - 1; 5035 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp)) 5036 ring = bp->rx_nr_rings - 1; 5037 5038 grp_idx = bp->rx_ring[ring].bnapi->index; 5039 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); 5040 req.lb_rule = cpu_to_le16(0xffff); 5041 vnic_mru: 5042 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN); 5043 5044 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); 5045 #ifdef CONFIG_BNXT_SRIOV 5046 if (BNXT_VF(bp)) 5047 def_vlan = bp->vf.vlan; 5048 #endif 5049 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan) 5050 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); 5051 if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) 5052 req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp)); 5053 5054 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5055 } 5056 5057 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id) 5058 { 5059 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) { 5060 struct hwrm_vnic_free_input req = {0}; 5061 5062 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1); 5063 req.vnic_id = 5064 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id); 5065 5066 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5067 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID; 5068 } 5069 } 5070 5071 static void bnxt_hwrm_vnic_free(struct bnxt *bp) 5072 { 5073 u16 i; 5074 5075 for (i = 0; i < bp->nr_vnics; i++) 5076 bnxt_hwrm_vnic_free_one(bp, i); 5077 } 5078 5079 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, 5080 unsigned int start_rx_ring_idx, 5081 unsigned int nr_rings) 5082 { 5083 int rc = 0; 5084 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings; 5085 struct hwrm_vnic_alloc_input req = {0}; 5086 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr; 5087 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 5088 5089 if (bp->flags & BNXT_FLAG_CHIP_P5) 5090 goto vnic_no_ring_grps; 5091 5092 /* map ring groups to this vnic */ 5093 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) { 5094 grp_idx = bp->rx_ring[i].bnapi->index; 5095 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) { 5096 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n", 5097 j, nr_rings); 5098 break; 5099 } 5100 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id; 5101 } 5102 5103 vnic_no_ring_grps: 5104 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) 5105 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID; 5106 if (vnic_id == 0) 5107 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); 5108 5109 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1); 5110 5111 mutex_lock(&bp->hwrm_cmd_lock); 5112 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5113 if (!rc) 5114 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id); 5115 mutex_unlock(&bp->hwrm_cmd_lock); 5116 return rc; 5117 } 5118 5119 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) 5120 { 5121 struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 5122 struct hwrm_vnic_qcaps_input req = {0}; 5123 int rc; 5124 5125 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats); 5126 bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP); 5127 if (bp->hwrm_spec_code < 0x10600) 5128 return 0; 5129 5130 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1); 5131 mutex_lock(&bp->hwrm_cmd_lock); 5132 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5133 if (!rc) { 5134 u32 flags = le32_to_cpu(resp->flags); 5135 5136 if (!(bp->flags & BNXT_FLAG_CHIP_P5) && 5137 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP)) 5138 bp->flags |= BNXT_FLAG_NEW_RSS_CAP; 5139 if (flags & 5140 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP) 5141 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP; 5142 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported); 5143 if (bp->max_tpa_v2) 5144 bp->hw_ring_stats_size = 5145 sizeof(struct ctx_hw_stats_ext); 5146 } 5147 mutex_unlock(&bp->hwrm_cmd_lock); 5148 return rc; 5149 } 5150 5151 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp) 5152 { 5153 u16 i; 5154 u32 rc = 0; 5155 5156 if (bp->flags & BNXT_FLAG_CHIP_P5) 5157 return 0; 5158 5159 mutex_lock(&bp->hwrm_cmd_lock); 5160 for (i = 0; i < bp->rx_nr_rings; i++) { 5161 struct hwrm_ring_grp_alloc_input req = {0}; 5162 struct hwrm_ring_grp_alloc_output *resp = 5163 bp->hwrm_cmd_resp_addr; 5164 unsigned int grp_idx = bp->rx_ring[i].bnapi->index; 5165 5166 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1); 5167 5168 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id); 5169 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id); 5170 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id); 5171 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx); 5172 5173 rc = _hwrm_send_message(bp, &req, sizeof(req), 5174 HWRM_CMD_TIMEOUT); 5175 if (rc) 5176 break; 5177 5178 bp->grp_info[grp_idx].fw_grp_id = 5179 le32_to_cpu(resp->ring_group_id); 5180 } 5181 mutex_unlock(&bp->hwrm_cmd_lock); 5182 return rc; 5183 } 5184 5185 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp) 5186 { 5187 u16 i; 5188 struct hwrm_ring_grp_free_input req = {0}; 5189 5190 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5)) 5191 return; 5192 5193 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1); 5194 5195 mutex_lock(&bp->hwrm_cmd_lock); 5196 for (i = 0; i < bp->cp_nr_rings; i++) { 5197 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) 5198 continue; 5199 req.ring_group_id = 5200 cpu_to_le32(bp->grp_info[i].fw_grp_id); 5201 5202 _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5203 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; 5204 } 5205 mutex_unlock(&bp->hwrm_cmd_lock); 5206 } 5207 5208 static int hwrm_ring_alloc_send_msg(struct bnxt *bp, 5209 struct bnxt_ring_struct *ring, 5210 u32 ring_type, u32 map_index) 5211 { 5212 int rc = 0, err = 0; 5213 struct hwrm_ring_alloc_input req = {0}; 5214 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr; 5215 struct bnxt_ring_mem_info *rmem = &ring->ring_mem; 5216 struct bnxt_ring_grp_info *grp_info; 5217 u16 ring_id; 5218 5219 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1); 5220 5221 req.enables = 0; 5222 if (rmem->nr_pages > 1) { 5223 req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map); 5224 /* Page size is in log2 units */ 5225 req.page_size = BNXT_PAGE_SHIFT; 5226 req.page_tbl_depth = 1; 5227 } else { 5228 req.page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]); 5229 } 5230 req.fbo = 0; 5231 /* Association of ring index with doorbell index and MSIX number */ 5232 req.logical_id = cpu_to_le16(map_index); 5233 5234 switch (ring_type) { 5235 case HWRM_RING_ALLOC_TX: { 5236 struct bnxt_tx_ring_info *txr; 5237 5238 txr = container_of(ring, struct bnxt_tx_ring_info, 5239 tx_ring_struct); 5240 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX; 5241 /* Association of transmit ring with completion ring */ 5242 grp_info = &bp->grp_info[ring->grp_idx]; 5243 req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr)); 5244 req.length = cpu_to_le32(bp->tx_ring_mask + 1); 5245 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); 5246 req.queue_id = cpu_to_le16(ring->queue_id); 5247 break; 5248 } 5249 case HWRM_RING_ALLOC_RX: 5250 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; 5251 req.length = cpu_to_le32(bp->rx_ring_mask + 1); 5252 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5253 u16 flags = 0; 5254 5255 /* Association of rx ring with stats context */ 5256 grp_info = &bp->grp_info[ring->grp_idx]; 5257 req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size); 5258 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); 5259 req.enables |= cpu_to_le32( 5260 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); 5261 if (NET_IP_ALIGN == 2) 5262 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD; 5263 req.flags = cpu_to_le16(flags); 5264 } 5265 break; 5266 case HWRM_RING_ALLOC_AGG: 5267 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5268 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG; 5269 /* Association of agg ring with rx ring */ 5270 grp_info = &bp->grp_info[ring->grp_idx]; 5271 req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id); 5272 req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE); 5273 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); 5274 req.enables |= cpu_to_le32( 5275 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID | 5276 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); 5277 } else { 5278 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; 5279 } 5280 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1); 5281 break; 5282 case HWRM_RING_ALLOC_CMPL: 5283 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; 5284 req.length = cpu_to_le32(bp->cp_ring_mask + 1); 5285 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5286 /* Association of cp ring with nq */ 5287 grp_info = &bp->grp_info[map_index]; 5288 req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id); 5289 req.cq_handle = cpu_to_le64(ring->handle); 5290 req.enables |= cpu_to_le32( 5291 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID); 5292 } else if (bp->flags & BNXT_FLAG_USING_MSIX) { 5293 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; 5294 } 5295 break; 5296 case HWRM_RING_ALLOC_NQ: 5297 req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ; 5298 req.length = cpu_to_le32(bp->cp_ring_mask + 1); 5299 if (bp->flags & BNXT_FLAG_USING_MSIX) 5300 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; 5301 break; 5302 default: 5303 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n", 5304 ring_type); 5305 return -1; 5306 } 5307 5308 mutex_lock(&bp->hwrm_cmd_lock); 5309 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5310 err = le16_to_cpu(resp->error_code); 5311 ring_id = le16_to_cpu(resp->ring_id); 5312 mutex_unlock(&bp->hwrm_cmd_lock); 5313 5314 if (rc || err) { 5315 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n", 5316 ring_type, rc, err); 5317 return -EIO; 5318 } 5319 ring->fw_ring_id = ring_id; 5320 return rc; 5321 } 5322 5323 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx) 5324 { 5325 int rc; 5326 5327 if (BNXT_PF(bp)) { 5328 struct hwrm_func_cfg_input req = {0}; 5329 5330 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 5331 req.fid = cpu_to_le16(0xffff); 5332 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR); 5333 req.async_event_cr = cpu_to_le16(idx); 5334 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5335 } else { 5336 struct hwrm_func_vf_cfg_input req = {0}; 5337 5338 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); 5339 req.enables = 5340 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR); 5341 req.async_event_cr = cpu_to_le16(idx); 5342 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5343 } 5344 return rc; 5345 } 5346 5347 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type, 5348 u32 map_idx, u32 xid) 5349 { 5350 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5351 if (BNXT_PF(bp)) 5352 db->doorbell = bp->bar1 + DB_PF_OFFSET_P5; 5353 else 5354 db->doorbell = bp->bar1 + DB_VF_OFFSET_P5; 5355 switch (ring_type) { 5356 case HWRM_RING_ALLOC_TX: 5357 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ; 5358 break; 5359 case HWRM_RING_ALLOC_RX: 5360 case HWRM_RING_ALLOC_AGG: 5361 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ; 5362 break; 5363 case HWRM_RING_ALLOC_CMPL: 5364 db->db_key64 = DBR_PATH_L2; 5365 break; 5366 case HWRM_RING_ALLOC_NQ: 5367 db->db_key64 = DBR_PATH_L2; 5368 break; 5369 } 5370 db->db_key64 |= (u64)xid << DBR_XID_SFT; 5371 } else { 5372 db->doorbell = bp->bar1 + map_idx * 0x80; 5373 switch (ring_type) { 5374 case HWRM_RING_ALLOC_TX: 5375 db->db_key32 = DB_KEY_TX; 5376 break; 5377 case HWRM_RING_ALLOC_RX: 5378 case HWRM_RING_ALLOC_AGG: 5379 db->db_key32 = DB_KEY_RX; 5380 break; 5381 case HWRM_RING_ALLOC_CMPL: 5382 db->db_key32 = DB_KEY_CP; 5383 break; 5384 } 5385 } 5386 } 5387 5388 static int bnxt_hwrm_ring_alloc(struct bnxt *bp) 5389 { 5390 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS); 5391 int i, rc = 0; 5392 u32 type; 5393 5394 if (bp->flags & BNXT_FLAG_CHIP_P5) 5395 type = HWRM_RING_ALLOC_NQ; 5396 else 5397 type = HWRM_RING_ALLOC_CMPL; 5398 for (i = 0; i < bp->cp_nr_rings; i++) { 5399 struct bnxt_napi *bnapi = bp->bnapi[i]; 5400 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5401 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 5402 u32 map_idx = ring->map_idx; 5403 unsigned int vector; 5404 5405 vector = bp->irq_tbl[map_idx].vector; 5406 disable_irq_nosync(vector); 5407 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 5408 if (rc) { 5409 enable_irq(vector); 5410 goto err_out; 5411 } 5412 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id); 5413 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); 5414 enable_irq(vector); 5415 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; 5416 5417 if (!i) { 5418 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id); 5419 if (rc) 5420 netdev_warn(bp->dev, "Failed to set async event completion ring.\n"); 5421 } 5422 } 5423 5424 type = HWRM_RING_ALLOC_TX; 5425 for (i = 0; i < bp->tx_nr_rings; i++) { 5426 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 5427 struct bnxt_ring_struct *ring; 5428 u32 map_idx; 5429 5430 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5431 struct bnxt_napi *bnapi = txr->bnapi; 5432 struct bnxt_cp_ring_info *cpr, *cpr2; 5433 u32 type2 = HWRM_RING_ALLOC_CMPL; 5434 5435 cpr = &bnapi->cp_ring; 5436 cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL]; 5437 ring = &cpr2->cp_ring_struct; 5438 ring->handle = BNXT_TX_HDL; 5439 map_idx = bnapi->index; 5440 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx); 5441 if (rc) 5442 goto err_out; 5443 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx, 5444 ring->fw_ring_id); 5445 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons); 5446 } 5447 ring = &txr->tx_ring_struct; 5448 map_idx = i; 5449 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 5450 if (rc) 5451 goto err_out; 5452 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id); 5453 } 5454 5455 type = HWRM_RING_ALLOC_RX; 5456 for (i = 0; i < bp->rx_nr_rings; i++) { 5457 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 5458 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; 5459 struct bnxt_napi *bnapi = rxr->bnapi; 5460 u32 map_idx = bnapi->index; 5461 5462 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 5463 if (rc) 5464 goto err_out; 5465 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id); 5466 /* If we have agg rings, post agg buffers first. */ 5467 if (!agg_rings) 5468 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 5469 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; 5470 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5471 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5472 u32 type2 = HWRM_RING_ALLOC_CMPL; 5473 struct bnxt_cp_ring_info *cpr2; 5474 5475 cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL]; 5476 ring = &cpr2->cp_ring_struct; 5477 ring->handle = BNXT_RX_HDL; 5478 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx); 5479 if (rc) 5480 goto err_out; 5481 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx, 5482 ring->fw_ring_id); 5483 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons); 5484 } 5485 } 5486 5487 if (agg_rings) { 5488 type = HWRM_RING_ALLOC_AGG; 5489 for (i = 0; i < bp->rx_nr_rings; i++) { 5490 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 5491 struct bnxt_ring_struct *ring = 5492 &rxr->rx_agg_ring_struct; 5493 u32 grp_idx = ring->grp_idx; 5494 u32 map_idx = grp_idx + bp->rx_nr_rings; 5495 5496 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 5497 if (rc) 5498 goto err_out; 5499 5500 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx, 5501 ring->fw_ring_id); 5502 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 5503 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 5504 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; 5505 } 5506 } 5507 err_out: 5508 return rc; 5509 } 5510 5511 static int hwrm_ring_free_send_msg(struct bnxt *bp, 5512 struct bnxt_ring_struct *ring, 5513 u32 ring_type, int cmpl_ring_id) 5514 { 5515 int rc; 5516 struct hwrm_ring_free_input req = {0}; 5517 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr; 5518 u16 error_code; 5519 5520 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 5521 return 0; 5522 5523 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1); 5524 req.ring_type = ring_type; 5525 req.ring_id = cpu_to_le16(ring->fw_ring_id); 5526 5527 mutex_lock(&bp->hwrm_cmd_lock); 5528 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5529 error_code = le16_to_cpu(resp->error_code); 5530 mutex_unlock(&bp->hwrm_cmd_lock); 5531 5532 if (rc || error_code) { 5533 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n", 5534 ring_type, rc, error_code); 5535 return -EIO; 5536 } 5537 return 0; 5538 } 5539 5540 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) 5541 { 5542 u32 type; 5543 int i; 5544 5545 if (!bp->bnapi) 5546 return; 5547 5548 for (i = 0; i < bp->tx_nr_rings; i++) { 5549 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 5550 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 5551 5552 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 5553 u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr); 5554 5555 hwrm_ring_free_send_msg(bp, ring, 5556 RING_FREE_REQ_RING_TYPE_TX, 5557 close_path ? cmpl_ring_id : 5558 INVALID_HW_RING_ID); 5559 ring->fw_ring_id = INVALID_HW_RING_ID; 5560 } 5561 } 5562 5563 for (i = 0; i < bp->rx_nr_rings; i++) { 5564 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 5565 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; 5566 u32 grp_idx = rxr->bnapi->index; 5567 5568 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 5569 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); 5570 5571 hwrm_ring_free_send_msg(bp, ring, 5572 RING_FREE_REQ_RING_TYPE_RX, 5573 close_path ? cmpl_ring_id : 5574 INVALID_HW_RING_ID); 5575 ring->fw_ring_id = INVALID_HW_RING_ID; 5576 bp->grp_info[grp_idx].rx_fw_ring_id = 5577 INVALID_HW_RING_ID; 5578 } 5579 } 5580 5581 if (bp->flags & BNXT_FLAG_CHIP_P5) 5582 type = RING_FREE_REQ_RING_TYPE_RX_AGG; 5583 else 5584 type = RING_FREE_REQ_RING_TYPE_RX; 5585 for (i = 0; i < bp->rx_nr_rings; i++) { 5586 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 5587 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; 5588 u32 grp_idx = rxr->bnapi->index; 5589 5590 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 5591 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); 5592 5593 hwrm_ring_free_send_msg(bp, ring, type, 5594 close_path ? cmpl_ring_id : 5595 INVALID_HW_RING_ID); 5596 ring->fw_ring_id = INVALID_HW_RING_ID; 5597 bp->grp_info[grp_idx].agg_fw_ring_id = 5598 INVALID_HW_RING_ID; 5599 } 5600 } 5601 5602 /* The completion rings are about to be freed. After that the 5603 * IRQ doorbell will not work anymore. So we need to disable 5604 * IRQ here. 5605 */ 5606 bnxt_disable_int_sync(bp); 5607 5608 if (bp->flags & BNXT_FLAG_CHIP_P5) 5609 type = RING_FREE_REQ_RING_TYPE_NQ; 5610 else 5611 type = RING_FREE_REQ_RING_TYPE_L2_CMPL; 5612 for (i = 0; i < bp->cp_nr_rings; i++) { 5613 struct bnxt_napi *bnapi = bp->bnapi[i]; 5614 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5615 struct bnxt_ring_struct *ring; 5616 int j; 5617 5618 for (j = 0; j < 2; j++) { 5619 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; 5620 5621 if (cpr2) { 5622 ring = &cpr2->cp_ring_struct; 5623 if (ring->fw_ring_id == INVALID_HW_RING_ID) 5624 continue; 5625 hwrm_ring_free_send_msg(bp, ring, 5626 RING_FREE_REQ_RING_TYPE_L2_CMPL, 5627 INVALID_HW_RING_ID); 5628 ring->fw_ring_id = INVALID_HW_RING_ID; 5629 } 5630 } 5631 ring = &cpr->cp_ring_struct; 5632 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 5633 hwrm_ring_free_send_msg(bp, ring, type, 5634 INVALID_HW_RING_ID); 5635 ring->fw_ring_id = INVALID_HW_RING_ID; 5636 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; 5637 } 5638 } 5639 } 5640 5641 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 5642 bool shared); 5643 5644 static int bnxt_hwrm_get_rings(struct bnxt *bp) 5645 { 5646 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 5647 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 5648 struct hwrm_func_qcfg_input req = {0}; 5649 int rc; 5650 5651 if (bp->hwrm_spec_code < 0x10601) 5652 return 0; 5653 5654 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); 5655 req.fid = cpu_to_le16(0xffff); 5656 mutex_lock(&bp->hwrm_cmd_lock); 5657 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5658 if (rc) { 5659 mutex_unlock(&bp->hwrm_cmd_lock); 5660 return rc; 5661 } 5662 5663 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings); 5664 if (BNXT_NEW_RM(bp)) { 5665 u16 cp, stats; 5666 5667 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings); 5668 hw_resc->resv_hw_ring_grps = 5669 le32_to_cpu(resp->alloc_hw_ring_grps); 5670 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics); 5671 cp = le16_to_cpu(resp->alloc_cmpl_rings); 5672 stats = le16_to_cpu(resp->alloc_stat_ctx); 5673 hw_resc->resv_irqs = cp; 5674 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5675 int rx = hw_resc->resv_rx_rings; 5676 int tx = hw_resc->resv_tx_rings; 5677 5678 if (bp->flags & BNXT_FLAG_AGG_RINGS) 5679 rx >>= 1; 5680 if (cp < (rx + tx)) { 5681 bnxt_trim_rings(bp, &rx, &tx, cp, false); 5682 if (bp->flags & BNXT_FLAG_AGG_RINGS) 5683 rx <<= 1; 5684 hw_resc->resv_rx_rings = rx; 5685 hw_resc->resv_tx_rings = tx; 5686 } 5687 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix); 5688 hw_resc->resv_hw_ring_grps = rx; 5689 } 5690 hw_resc->resv_cp_rings = cp; 5691 hw_resc->resv_stat_ctxs = stats; 5692 } 5693 mutex_unlock(&bp->hwrm_cmd_lock); 5694 return 0; 5695 } 5696 5697 /* Caller must hold bp->hwrm_cmd_lock */ 5698 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) 5699 { 5700 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 5701 struct hwrm_func_qcfg_input req = {0}; 5702 int rc; 5703 5704 if (bp->hwrm_spec_code < 0x10601) 5705 return 0; 5706 5707 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); 5708 req.fid = cpu_to_le16(fid); 5709 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5710 if (!rc) 5711 *tx_rings = le16_to_cpu(resp->alloc_tx_rings); 5712 5713 return rc; 5714 } 5715 5716 static bool bnxt_rfs_supported(struct bnxt *bp); 5717 5718 static void 5719 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req, 5720 int tx_rings, int rx_rings, int ring_grps, 5721 int cp_rings, int stats, int vnics) 5722 { 5723 u32 enables = 0; 5724 5725 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1); 5726 req->fid = cpu_to_le16(0xffff); 5727 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; 5728 req->num_tx_rings = cpu_to_le16(tx_rings); 5729 if (BNXT_NEW_RM(bp)) { 5730 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; 5731 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; 5732 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5733 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0; 5734 enables |= tx_rings + ring_grps ? 5735 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 5736 enables |= rx_rings ? 5737 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 5738 } else { 5739 enables |= cp_rings ? 5740 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 5741 enables |= ring_grps ? 5742 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS | 5743 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 5744 } 5745 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0; 5746 5747 req->num_rx_rings = cpu_to_le16(rx_rings); 5748 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5749 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); 5750 req->num_msix = cpu_to_le16(cp_rings); 5751 req->num_rsscos_ctxs = 5752 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64)); 5753 } else { 5754 req->num_cmpl_rings = cpu_to_le16(cp_rings); 5755 req->num_hw_ring_grps = cpu_to_le16(ring_grps); 5756 req->num_rsscos_ctxs = cpu_to_le16(1); 5757 if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) && 5758 bnxt_rfs_supported(bp)) 5759 req->num_rsscos_ctxs = 5760 cpu_to_le16(ring_grps + 1); 5761 } 5762 req->num_stat_ctxs = cpu_to_le16(stats); 5763 req->num_vnics = cpu_to_le16(vnics); 5764 } 5765 req->enables = cpu_to_le32(enables); 5766 } 5767 5768 static void 5769 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, 5770 struct hwrm_func_vf_cfg_input *req, int tx_rings, 5771 int rx_rings, int ring_grps, int cp_rings, 5772 int stats, int vnics) 5773 { 5774 u32 enables = 0; 5775 5776 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1); 5777 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; 5778 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS | 5779 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 5780 enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; 5781 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5782 enables |= tx_rings + ring_grps ? 5783 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 5784 } else { 5785 enables |= cp_rings ? 5786 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 5787 enables |= ring_grps ? 5788 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; 5789 } 5790 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; 5791 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS; 5792 5793 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); 5794 req->num_tx_rings = cpu_to_le16(tx_rings); 5795 req->num_rx_rings = cpu_to_le16(rx_rings); 5796 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5797 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); 5798 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64)); 5799 } else { 5800 req->num_cmpl_rings = cpu_to_le16(cp_rings); 5801 req->num_hw_ring_grps = cpu_to_le16(ring_grps); 5802 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX); 5803 } 5804 req->num_stat_ctxs = cpu_to_le16(stats); 5805 req->num_vnics = cpu_to_le16(vnics); 5806 5807 req->enables = cpu_to_le32(enables); 5808 } 5809 5810 static int 5811 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 5812 int ring_grps, int cp_rings, int stats, int vnics) 5813 { 5814 struct hwrm_func_cfg_input req = {0}; 5815 int rc; 5816 5817 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, 5818 cp_rings, stats, vnics); 5819 if (!req.enables) 5820 return 0; 5821 5822 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5823 if (rc) 5824 return rc; 5825 5826 if (bp->hwrm_spec_code < 0x10601) 5827 bp->hw_resc.resv_tx_rings = tx_rings; 5828 5829 return bnxt_hwrm_get_rings(bp); 5830 } 5831 5832 static int 5833 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 5834 int ring_grps, int cp_rings, int stats, int vnics) 5835 { 5836 struct hwrm_func_vf_cfg_input req = {0}; 5837 int rc; 5838 5839 if (!BNXT_NEW_RM(bp)) { 5840 bp->hw_resc.resv_tx_rings = tx_rings; 5841 return 0; 5842 } 5843 5844 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, 5845 cp_rings, stats, vnics); 5846 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5847 if (rc) 5848 return rc; 5849 5850 return bnxt_hwrm_get_rings(bp); 5851 } 5852 5853 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp, 5854 int cp, int stat, int vnic) 5855 { 5856 if (BNXT_PF(bp)) 5857 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat, 5858 vnic); 5859 else 5860 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat, 5861 vnic); 5862 } 5863 5864 int bnxt_nq_rings_in_use(struct bnxt *bp) 5865 { 5866 int cp = bp->cp_nr_rings; 5867 int ulp_msix, ulp_base; 5868 5869 ulp_msix = bnxt_get_ulp_msix_num(bp); 5870 if (ulp_msix) { 5871 ulp_base = bnxt_get_ulp_msix_base(bp); 5872 cp += ulp_msix; 5873 if ((ulp_base + ulp_msix) > cp) 5874 cp = ulp_base + ulp_msix; 5875 } 5876 return cp; 5877 } 5878 5879 static int bnxt_cp_rings_in_use(struct bnxt *bp) 5880 { 5881 int cp; 5882 5883 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 5884 return bnxt_nq_rings_in_use(bp); 5885 5886 cp = bp->tx_nr_rings + bp->rx_nr_rings; 5887 return cp; 5888 } 5889 5890 static int bnxt_get_func_stat_ctxs(struct bnxt *bp) 5891 { 5892 int ulp_stat = bnxt_get_ulp_stat_ctxs(bp); 5893 int cp = bp->cp_nr_rings; 5894 5895 if (!ulp_stat) 5896 return cp; 5897 5898 if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp)) 5899 return bnxt_get_ulp_msix_base(bp) + ulp_stat; 5900 5901 return cp + ulp_stat; 5902 } 5903 5904 static bool bnxt_need_reserve_rings(struct bnxt *bp) 5905 { 5906 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 5907 int cp = bnxt_cp_rings_in_use(bp); 5908 int nq = bnxt_nq_rings_in_use(bp); 5909 int rx = bp->rx_nr_rings, stat; 5910 int vnic = 1, grp = rx; 5911 5912 if (bp->hwrm_spec_code < 0x10601) 5913 return false; 5914 5915 if (hw_resc->resv_tx_rings != bp->tx_nr_rings) 5916 return true; 5917 5918 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5)) 5919 vnic = rx + 1; 5920 if (bp->flags & BNXT_FLAG_AGG_RINGS) 5921 rx <<= 1; 5922 stat = bnxt_get_func_stat_ctxs(bp); 5923 if (BNXT_NEW_RM(bp) && 5924 (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp || 5925 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat || 5926 (hw_resc->resv_hw_ring_grps != grp && 5927 !(bp->flags & BNXT_FLAG_CHIP_P5)))) 5928 return true; 5929 if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) && 5930 hw_resc->resv_irqs != nq) 5931 return true; 5932 return false; 5933 } 5934 5935 static int __bnxt_reserve_rings(struct bnxt *bp) 5936 { 5937 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 5938 int cp = bnxt_nq_rings_in_use(bp); 5939 int tx = bp->tx_nr_rings; 5940 int rx = bp->rx_nr_rings; 5941 int grp, rx_rings, rc; 5942 int vnic = 1, stat; 5943 bool sh = false; 5944 5945 if (!bnxt_need_reserve_rings(bp)) 5946 return 0; 5947 5948 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 5949 sh = true; 5950 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5)) 5951 vnic = rx + 1; 5952 if (bp->flags & BNXT_FLAG_AGG_RINGS) 5953 rx <<= 1; 5954 grp = bp->rx_nr_rings; 5955 stat = bnxt_get_func_stat_ctxs(bp); 5956 5957 rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic); 5958 if (rc) 5959 return rc; 5960 5961 tx = hw_resc->resv_tx_rings; 5962 if (BNXT_NEW_RM(bp)) { 5963 rx = hw_resc->resv_rx_rings; 5964 cp = hw_resc->resv_irqs; 5965 grp = hw_resc->resv_hw_ring_grps; 5966 vnic = hw_resc->resv_vnics; 5967 stat = hw_resc->resv_stat_ctxs; 5968 } 5969 5970 rx_rings = rx; 5971 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 5972 if (rx >= 2) { 5973 rx_rings = rx >> 1; 5974 } else { 5975 if (netif_running(bp->dev)) 5976 return -ENOMEM; 5977 5978 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 5979 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 5980 bp->dev->hw_features &= ~NETIF_F_LRO; 5981 bp->dev->features &= ~NETIF_F_LRO; 5982 bnxt_set_ring_params(bp); 5983 } 5984 } 5985 rx_rings = min_t(int, rx_rings, grp); 5986 cp = min_t(int, cp, bp->cp_nr_rings); 5987 if (stat > bnxt_get_ulp_stat_ctxs(bp)) 5988 stat -= bnxt_get_ulp_stat_ctxs(bp); 5989 cp = min_t(int, cp, stat); 5990 rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh); 5991 if (bp->flags & BNXT_FLAG_AGG_RINGS) 5992 rx = rx_rings << 1; 5993 cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings; 5994 bp->tx_nr_rings = tx; 5995 bp->rx_nr_rings = rx_rings; 5996 bp->cp_nr_rings = cp; 5997 5998 if (!tx || !rx || !cp || !grp || !vnic || !stat) 5999 return -ENOMEM; 6000 6001 return rc; 6002 } 6003 6004 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 6005 int ring_grps, int cp_rings, int stats, 6006 int vnics) 6007 { 6008 struct hwrm_func_vf_cfg_input req = {0}; 6009 u32 flags; 6010 6011 if (!BNXT_NEW_RM(bp)) 6012 return 0; 6013 6014 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, 6015 cp_rings, stats, vnics); 6016 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST | 6017 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST | 6018 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | 6019 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | 6020 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST | 6021 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST; 6022 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 6023 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; 6024 6025 req.flags = cpu_to_le32(flags); 6026 return hwrm_send_message_silent(bp, &req, sizeof(req), 6027 HWRM_CMD_TIMEOUT); 6028 } 6029 6030 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 6031 int ring_grps, int cp_rings, int stats, 6032 int vnics) 6033 { 6034 struct hwrm_func_cfg_input req = {0}; 6035 u32 flags; 6036 6037 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, 6038 cp_rings, stats, vnics); 6039 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST; 6040 if (BNXT_NEW_RM(bp)) { 6041 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST | 6042 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | 6043 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | 6044 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; 6045 if (bp->flags & BNXT_FLAG_CHIP_P5) 6046 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST | 6047 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST; 6048 else 6049 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; 6050 } 6051 6052 req.flags = cpu_to_le32(flags); 6053 return hwrm_send_message_silent(bp, &req, sizeof(req), 6054 HWRM_CMD_TIMEOUT); 6055 } 6056 6057 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings, 6058 int ring_grps, int cp_rings, int stats, 6059 int vnics) 6060 { 6061 if (bp->hwrm_spec_code < 0x10801) 6062 return 0; 6063 6064 if (BNXT_PF(bp)) 6065 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings, 6066 ring_grps, cp_rings, stats, 6067 vnics); 6068 6069 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps, 6070 cp_rings, stats, vnics); 6071 } 6072 6073 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp) 6074 { 6075 struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 6076 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 6077 struct hwrm_ring_aggint_qcaps_input req = {0}; 6078 int rc; 6079 6080 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS; 6081 coal_cap->num_cmpl_dma_aggr_max = 63; 6082 coal_cap->num_cmpl_dma_aggr_during_int_max = 63; 6083 coal_cap->cmpl_aggr_dma_tmr_max = 65535; 6084 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535; 6085 coal_cap->int_lat_tmr_min_max = 65535; 6086 coal_cap->int_lat_tmr_max_max = 65535; 6087 coal_cap->num_cmpl_aggr_int_max = 65535; 6088 coal_cap->timer_units = 80; 6089 6090 if (bp->hwrm_spec_code < 0x10902) 6091 return; 6092 6093 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1); 6094 mutex_lock(&bp->hwrm_cmd_lock); 6095 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6096 if (!rc) { 6097 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params); 6098 coal_cap->nq_params = le32_to_cpu(resp->nq_params); 6099 coal_cap->num_cmpl_dma_aggr_max = 6100 le16_to_cpu(resp->num_cmpl_dma_aggr_max); 6101 coal_cap->num_cmpl_dma_aggr_during_int_max = 6102 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max); 6103 coal_cap->cmpl_aggr_dma_tmr_max = 6104 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max); 6105 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 6106 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max); 6107 coal_cap->int_lat_tmr_min_max = 6108 le16_to_cpu(resp->int_lat_tmr_min_max); 6109 coal_cap->int_lat_tmr_max_max = 6110 le16_to_cpu(resp->int_lat_tmr_max_max); 6111 coal_cap->num_cmpl_aggr_int_max = 6112 le16_to_cpu(resp->num_cmpl_aggr_int_max); 6113 coal_cap->timer_units = le16_to_cpu(resp->timer_units); 6114 } 6115 mutex_unlock(&bp->hwrm_cmd_lock); 6116 } 6117 6118 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec) 6119 { 6120 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 6121 6122 return usec * 1000 / coal_cap->timer_units; 6123 } 6124 6125 static void bnxt_hwrm_set_coal_params(struct bnxt *bp, 6126 struct bnxt_coal *hw_coal, 6127 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) 6128 { 6129 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 6130 u32 cmpl_params = coal_cap->cmpl_params; 6131 u16 val, tmr, max, flags = 0; 6132 6133 max = hw_coal->bufs_per_record * 128; 6134 if (hw_coal->budget) 6135 max = hw_coal->bufs_per_record * hw_coal->budget; 6136 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max); 6137 6138 val = clamp_t(u16, hw_coal->coal_bufs, 1, max); 6139 req->num_cmpl_aggr_int = cpu_to_le16(val); 6140 6141 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max); 6142 req->num_cmpl_dma_aggr = cpu_to_le16(val); 6143 6144 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1, 6145 coal_cap->num_cmpl_dma_aggr_during_int_max); 6146 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val); 6147 6148 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks); 6149 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max); 6150 req->int_lat_tmr_max = cpu_to_le16(tmr); 6151 6152 /* min timer set to 1/2 of interrupt timer */ 6153 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) { 6154 val = tmr / 2; 6155 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max); 6156 req->int_lat_tmr_min = cpu_to_le16(val); 6157 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); 6158 } 6159 6160 /* buf timer set to 1/4 of interrupt timer */ 6161 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max); 6162 req->cmpl_aggr_dma_tmr = cpu_to_le16(val); 6163 6164 if (cmpl_params & 6165 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) { 6166 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq); 6167 val = clamp_t(u16, tmr, 1, 6168 coal_cap->cmpl_aggr_dma_tmr_during_int_max); 6169 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val); 6170 req->enables |= 6171 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE); 6172 } 6173 6174 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET) 6175 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 6176 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) && 6177 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh) 6178 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE; 6179 req->flags = cpu_to_le16(flags); 6180 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES); 6181 } 6182 6183 /* Caller holds bp->hwrm_cmd_lock */ 6184 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi, 6185 struct bnxt_coal *hw_coal) 6186 { 6187 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0}; 6188 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 6189 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 6190 u32 nq_params = coal_cap->nq_params; 6191 u16 tmr; 6192 6193 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN)) 6194 return 0; 6195 6196 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, 6197 -1, -1); 6198 req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id); 6199 req.flags = 6200 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ); 6201 6202 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2; 6203 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max); 6204 req.int_lat_tmr_min = cpu_to_le16(tmr); 6205 req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); 6206 return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6207 } 6208 6209 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi) 6210 { 6211 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0}; 6212 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 6213 struct bnxt_coal coal; 6214 6215 /* Tick values in micro seconds. 6216 * 1 coal_buf x bufs_per_record = 1 completion record. 6217 */ 6218 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal)); 6219 6220 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks; 6221 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs; 6222 6223 if (!bnapi->rx_ring) 6224 return -ENODEV; 6225 6226 bnxt_hwrm_cmd_hdr_init(bp, &req_rx, 6227 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); 6228 6229 bnxt_hwrm_set_coal_params(bp, &coal, &req_rx); 6230 6231 req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring)); 6232 6233 return hwrm_send_message(bp, &req_rx, sizeof(req_rx), 6234 HWRM_CMD_TIMEOUT); 6235 } 6236 6237 int bnxt_hwrm_set_coal(struct bnxt *bp) 6238 { 6239 int i, rc = 0; 6240 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0}, 6241 req_tx = {0}, *req; 6242 6243 bnxt_hwrm_cmd_hdr_init(bp, &req_rx, 6244 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); 6245 bnxt_hwrm_cmd_hdr_init(bp, &req_tx, 6246 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); 6247 6248 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx); 6249 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx); 6250 6251 mutex_lock(&bp->hwrm_cmd_lock); 6252 for (i = 0; i < bp->cp_nr_rings; i++) { 6253 struct bnxt_napi *bnapi = bp->bnapi[i]; 6254 struct bnxt_coal *hw_coal; 6255 u16 ring_id; 6256 6257 req = &req_rx; 6258 if (!bnapi->rx_ring) { 6259 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring); 6260 req = &req_tx; 6261 } else { 6262 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring); 6263 } 6264 req->ring_id = cpu_to_le16(ring_id); 6265 6266 rc = _hwrm_send_message(bp, req, sizeof(*req), 6267 HWRM_CMD_TIMEOUT); 6268 if (rc) 6269 break; 6270 6271 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 6272 continue; 6273 6274 if (bnapi->rx_ring && bnapi->tx_ring) { 6275 req = &req_tx; 6276 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring); 6277 req->ring_id = cpu_to_le16(ring_id); 6278 rc = _hwrm_send_message(bp, req, sizeof(*req), 6279 HWRM_CMD_TIMEOUT); 6280 if (rc) 6281 break; 6282 } 6283 if (bnapi->rx_ring) 6284 hw_coal = &bp->rx_coal; 6285 else 6286 hw_coal = &bp->tx_coal; 6287 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal); 6288 } 6289 mutex_unlock(&bp->hwrm_cmd_lock); 6290 return rc; 6291 } 6292 6293 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp) 6294 { 6295 struct hwrm_stat_ctx_clr_stats_input req0 = {0}; 6296 struct hwrm_stat_ctx_free_input req = {0}; 6297 int i; 6298 6299 if (!bp->bnapi) 6300 return; 6301 6302 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 6303 return; 6304 6305 bnxt_hwrm_cmd_hdr_init(bp, &req0, HWRM_STAT_CTX_CLR_STATS, -1, -1); 6306 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1); 6307 6308 mutex_lock(&bp->hwrm_cmd_lock); 6309 for (i = 0; i < bp->cp_nr_rings; i++) { 6310 struct bnxt_napi *bnapi = bp->bnapi[i]; 6311 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 6312 6313 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) { 6314 req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id); 6315 if (BNXT_FW_MAJ(bp) <= 20) { 6316 req0.stat_ctx_id = req.stat_ctx_id; 6317 _hwrm_send_message(bp, &req0, sizeof(req0), 6318 HWRM_CMD_TIMEOUT); 6319 } 6320 _hwrm_send_message(bp, &req, sizeof(req), 6321 HWRM_CMD_TIMEOUT); 6322 6323 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; 6324 } 6325 } 6326 mutex_unlock(&bp->hwrm_cmd_lock); 6327 } 6328 6329 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) 6330 { 6331 int rc = 0, i; 6332 struct hwrm_stat_ctx_alloc_input req = {0}; 6333 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr; 6334 6335 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 6336 return 0; 6337 6338 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1); 6339 6340 req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size); 6341 req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000); 6342 6343 mutex_lock(&bp->hwrm_cmd_lock); 6344 for (i = 0; i < bp->cp_nr_rings; i++) { 6345 struct bnxt_napi *bnapi = bp->bnapi[i]; 6346 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 6347 6348 req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map); 6349 6350 rc = _hwrm_send_message(bp, &req, sizeof(req), 6351 HWRM_CMD_TIMEOUT); 6352 if (rc) 6353 break; 6354 6355 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id); 6356 6357 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; 6358 } 6359 mutex_unlock(&bp->hwrm_cmd_lock); 6360 return rc; 6361 } 6362 6363 static int bnxt_hwrm_func_qcfg(struct bnxt *bp) 6364 { 6365 struct hwrm_func_qcfg_input req = {0}; 6366 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 6367 u32 min_db_offset = 0; 6368 u16 flags; 6369 int rc; 6370 6371 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); 6372 req.fid = cpu_to_le16(0xffff); 6373 mutex_lock(&bp->hwrm_cmd_lock); 6374 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6375 if (rc) 6376 goto func_qcfg_exit; 6377 6378 #ifdef CONFIG_BNXT_SRIOV 6379 if (BNXT_VF(bp)) { 6380 struct bnxt_vf_info *vf = &bp->vf; 6381 6382 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK; 6383 } else { 6384 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs); 6385 } 6386 #endif 6387 flags = le16_to_cpu(resp->flags); 6388 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED | 6389 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) { 6390 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT; 6391 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED) 6392 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT; 6393 } 6394 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)) 6395 bp->flags |= BNXT_FLAG_MULTI_HOST; 6396 6397 switch (resp->port_partition_type) { 6398 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0: 6399 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5: 6400 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0: 6401 bp->port_partition_type = resp->port_partition_type; 6402 break; 6403 } 6404 if (bp->hwrm_spec_code < 0x10707 || 6405 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB) 6406 bp->br_mode = BRIDGE_MODE_VEB; 6407 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA) 6408 bp->br_mode = BRIDGE_MODE_VEPA; 6409 else 6410 bp->br_mode = BRIDGE_MODE_UNDEF; 6411 6412 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured); 6413 if (!bp->max_mtu) 6414 bp->max_mtu = BNXT_MAX_MTU; 6415 6416 if (bp->db_size) 6417 goto func_qcfg_exit; 6418 6419 if (bp->flags & BNXT_FLAG_CHIP_P5) { 6420 if (BNXT_PF(bp)) 6421 min_db_offset = DB_PF_OFFSET_P5; 6422 else 6423 min_db_offset = DB_VF_OFFSET_P5; 6424 } 6425 bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) * 6426 1024); 6427 if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) || 6428 bp->db_size <= min_db_offset) 6429 bp->db_size = pci_resource_len(bp->pdev, 2); 6430 6431 func_qcfg_exit: 6432 mutex_unlock(&bp->hwrm_cmd_lock); 6433 return rc; 6434 } 6435 6436 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) 6437 { 6438 struct hwrm_func_backing_store_qcaps_input req = {0}; 6439 struct hwrm_func_backing_store_qcaps_output *resp = 6440 bp->hwrm_cmd_resp_addr; 6441 int rc; 6442 6443 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx) 6444 return 0; 6445 6446 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1); 6447 mutex_lock(&bp->hwrm_cmd_lock); 6448 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6449 if (!rc) { 6450 struct bnxt_ctx_pg_info *ctx_pg; 6451 struct bnxt_ctx_mem_info *ctx; 6452 int i, tqm_rings; 6453 6454 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 6455 if (!ctx) { 6456 rc = -ENOMEM; 6457 goto ctx_err; 6458 } 6459 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries); 6460 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries); 6461 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries); 6462 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size); 6463 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries); 6464 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries); 6465 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size); 6466 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries); 6467 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries); 6468 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size); 6469 ctx->vnic_max_vnic_entries = 6470 le16_to_cpu(resp->vnic_max_vnic_entries); 6471 ctx->vnic_max_ring_table_entries = 6472 le16_to_cpu(resp->vnic_max_ring_table_entries); 6473 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size); 6474 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries); 6475 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size); 6476 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size); 6477 ctx->tqm_min_entries_per_ring = 6478 le32_to_cpu(resp->tqm_min_entries_per_ring); 6479 ctx->tqm_max_entries_per_ring = 6480 le32_to_cpu(resp->tqm_max_entries_per_ring); 6481 ctx->tqm_entries_multiple = resp->tqm_entries_multiple; 6482 if (!ctx->tqm_entries_multiple) 6483 ctx->tqm_entries_multiple = 1; 6484 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries); 6485 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size); 6486 ctx->mrav_num_entries_units = 6487 le16_to_cpu(resp->mrav_num_entries_units); 6488 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size); 6489 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries); 6490 ctx->ctx_kind_initializer = resp->ctx_kind_initializer; 6491 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count; 6492 if (!ctx->tqm_fp_rings_count) 6493 ctx->tqm_fp_rings_count = bp->max_q; 6494 6495 tqm_rings = ctx->tqm_fp_rings_count + 1; 6496 ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL); 6497 if (!ctx_pg) { 6498 kfree(ctx); 6499 rc = -ENOMEM; 6500 goto ctx_err; 6501 } 6502 for (i = 0; i < tqm_rings; i++, ctx_pg++) 6503 ctx->tqm_mem[i] = ctx_pg; 6504 bp->ctx = ctx; 6505 } else { 6506 rc = 0; 6507 } 6508 ctx_err: 6509 mutex_unlock(&bp->hwrm_cmd_lock); 6510 return rc; 6511 } 6512 6513 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr, 6514 __le64 *pg_dir) 6515 { 6516 u8 pg_size = 0; 6517 6518 if (BNXT_PAGE_SHIFT == 13) 6519 pg_size = 1 << 4; 6520 else if (BNXT_PAGE_SIZE == 16) 6521 pg_size = 2 << 4; 6522 6523 *pg_attr = pg_size; 6524 if (rmem->depth >= 1) { 6525 if (rmem->depth == 2) 6526 *pg_attr |= 2; 6527 else 6528 *pg_attr |= 1; 6529 *pg_dir = cpu_to_le64(rmem->pg_tbl_map); 6530 } else { 6531 *pg_dir = cpu_to_le64(rmem->dma_arr[0]); 6532 } 6533 } 6534 6535 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \ 6536 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \ 6537 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \ 6538 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \ 6539 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \ 6540 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) 6541 6542 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) 6543 { 6544 struct hwrm_func_backing_store_cfg_input req = {0}; 6545 struct bnxt_ctx_mem_info *ctx = bp->ctx; 6546 struct bnxt_ctx_pg_info *ctx_pg; 6547 __le32 *num_entries; 6548 __le64 *pg_dir; 6549 u32 flags = 0; 6550 u8 *pg_attr; 6551 u32 ena; 6552 int i; 6553 6554 if (!ctx) 6555 return 0; 6556 6557 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1); 6558 req.enables = cpu_to_le32(enables); 6559 6560 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) { 6561 ctx_pg = &ctx->qp_mem; 6562 req.qp_num_entries = cpu_to_le32(ctx_pg->entries); 6563 req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries); 6564 req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries); 6565 req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size); 6566 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 6567 &req.qpc_pg_size_qpc_lvl, 6568 &req.qpc_page_dir); 6569 } 6570 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) { 6571 ctx_pg = &ctx->srq_mem; 6572 req.srq_num_entries = cpu_to_le32(ctx_pg->entries); 6573 req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries); 6574 req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size); 6575 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 6576 &req.srq_pg_size_srq_lvl, 6577 &req.srq_page_dir); 6578 } 6579 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) { 6580 ctx_pg = &ctx->cq_mem; 6581 req.cq_num_entries = cpu_to_le32(ctx_pg->entries); 6582 req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries); 6583 req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size); 6584 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl, 6585 &req.cq_page_dir); 6586 } 6587 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) { 6588 ctx_pg = &ctx->vnic_mem; 6589 req.vnic_num_vnic_entries = 6590 cpu_to_le16(ctx->vnic_max_vnic_entries); 6591 req.vnic_num_ring_table_entries = 6592 cpu_to_le16(ctx->vnic_max_ring_table_entries); 6593 req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size); 6594 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 6595 &req.vnic_pg_size_vnic_lvl, 6596 &req.vnic_page_dir); 6597 } 6598 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) { 6599 ctx_pg = &ctx->stat_mem; 6600 req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries); 6601 req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size); 6602 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 6603 &req.stat_pg_size_stat_lvl, 6604 &req.stat_page_dir); 6605 } 6606 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) { 6607 ctx_pg = &ctx->mrav_mem; 6608 req.mrav_num_entries = cpu_to_le32(ctx_pg->entries); 6609 if (ctx->mrav_num_entries_units) 6610 flags |= 6611 FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT; 6612 req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size); 6613 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 6614 &req.mrav_pg_size_mrav_lvl, 6615 &req.mrav_page_dir); 6616 } 6617 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) { 6618 ctx_pg = &ctx->tim_mem; 6619 req.tim_num_entries = cpu_to_le32(ctx_pg->entries); 6620 req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size); 6621 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 6622 &req.tim_pg_size_tim_lvl, 6623 &req.tim_page_dir); 6624 } 6625 for (i = 0, num_entries = &req.tqm_sp_num_entries, 6626 pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl, 6627 pg_dir = &req.tqm_sp_page_dir, 6628 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP; 6629 i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) { 6630 if (!(enables & ena)) 6631 continue; 6632 6633 req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size); 6634 ctx_pg = ctx->tqm_mem[i]; 6635 *num_entries = cpu_to_le32(ctx_pg->entries); 6636 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir); 6637 } 6638 req.flags = cpu_to_le32(flags); 6639 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6640 } 6641 6642 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, 6643 struct bnxt_ctx_pg_info *ctx_pg) 6644 { 6645 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 6646 6647 rmem->page_size = BNXT_PAGE_SIZE; 6648 rmem->pg_arr = ctx_pg->ctx_pg_arr; 6649 rmem->dma_arr = ctx_pg->ctx_dma_arr; 6650 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; 6651 if (rmem->depth >= 1) 6652 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG; 6653 return bnxt_alloc_ring(bp, rmem); 6654 } 6655 6656 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, 6657 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size, 6658 u8 depth, bool use_init_val) 6659 { 6660 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 6661 int rc; 6662 6663 if (!mem_size) 6664 return -EINVAL; 6665 6666 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); 6667 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) { 6668 ctx_pg->nr_pages = 0; 6669 return -EINVAL; 6670 } 6671 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) { 6672 int nr_tbls, i; 6673 6674 rmem->depth = 2; 6675 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg), 6676 GFP_KERNEL); 6677 if (!ctx_pg->ctx_pg_tbl) 6678 return -ENOMEM; 6679 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES); 6680 rmem->nr_pages = nr_tbls; 6681 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); 6682 if (rc) 6683 return rc; 6684 for (i = 0; i < nr_tbls; i++) { 6685 struct bnxt_ctx_pg_info *pg_tbl; 6686 6687 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL); 6688 if (!pg_tbl) 6689 return -ENOMEM; 6690 ctx_pg->ctx_pg_tbl[i] = pg_tbl; 6691 rmem = &pg_tbl->ring_mem; 6692 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i]; 6693 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i]; 6694 rmem->depth = 1; 6695 rmem->nr_pages = MAX_CTX_PAGES; 6696 if (use_init_val) 6697 rmem->init_val = bp->ctx->ctx_kind_initializer; 6698 if (i == (nr_tbls - 1)) { 6699 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES; 6700 6701 if (rem) 6702 rmem->nr_pages = rem; 6703 } 6704 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl); 6705 if (rc) 6706 break; 6707 } 6708 } else { 6709 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); 6710 if (rmem->nr_pages > 1 || depth) 6711 rmem->depth = 1; 6712 if (use_init_val) 6713 rmem->init_val = bp->ctx->ctx_kind_initializer; 6714 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); 6715 } 6716 return rc; 6717 } 6718 6719 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp, 6720 struct bnxt_ctx_pg_info *ctx_pg) 6721 { 6722 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 6723 6724 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES || 6725 ctx_pg->ctx_pg_tbl) { 6726 int i, nr_tbls = rmem->nr_pages; 6727 6728 for (i = 0; i < nr_tbls; i++) { 6729 struct bnxt_ctx_pg_info *pg_tbl; 6730 struct bnxt_ring_mem_info *rmem2; 6731 6732 pg_tbl = ctx_pg->ctx_pg_tbl[i]; 6733 if (!pg_tbl) 6734 continue; 6735 rmem2 = &pg_tbl->ring_mem; 6736 bnxt_free_ring(bp, rmem2); 6737 ctx_pg->ctx_pg_arr[i] = NULL; 6738 kfree(pg_tbl); 6739 ctx_pg->ctx_pg_tbl[i] = NULL; 6740 } 6741 kfree(ctx_pg->ctx_pg_tbl); 6742 ctx_pg->ctx_pg_tbl = NULL; 6743 } 6744 bnxt_free_ring(bp, rmem); 6745 ctx_pg->nr_pages = 0; 6746 } 6747 6748 static void bnxt_free_ctx_mem(struct bnxt *bp) 6749 { 6750 struct bnxt_ctx_mem_info *ctx = bp->ctx; 6751 int i; 6752 6753 if (!ctx) 6754 return; 6755 6756 if (ctx->tqm_mem[0]) { 6757 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) 6758 bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]); 6759 kfree(ctx->tqm_mem[0]); 6760 ctx->tqm_mem[0] = NULL; 6761 } 6762 6763 bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem); 6764 bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem); 6765 bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem); 6766 bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem); 6767 bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem); 6768 bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem); 6769 bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem); 6770 ctx->flags &= ~BNXT_CTX_FLAG_INITED; 6771 } 6772 6773 static int bnxt_alloc_ctx_mem(struct bnxt *bp) 6774 { 6775 struct bnxt_ctx_pg_info *ctx_pg; 6776 struct bnxt_ctx_mem_info *ctx; 6777 u32 mem_size, ena, entries; 6778 u32 entries_sp, min; 6779 u32 num_mr, num_ah; 6780 u32 extra_srqs = 0; 6781 u32 extra_qps = 0; 6782 u8 pg_lvl = 1; 6783 int i, rc; 6784 6785 rc = bnxt_hwrm_func_backing_store_qcaps(bp); 6786 if (rc) { 6787 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n", 6788 rc); 6789 return rc; 6790 } 6791 ctx = bp->ctx; 6792 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) 6793 return 0; 6794 6795 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) { 6796 pg_lvl = 2; 6797 extra_qps = 65536; 6798 extra_srqs = 8192; 6799 } 6800 6801 ctx_pg = &ctx->qp_mem; 6802 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries + 6803 extra_qps; 6804 mem_size = ctx->qp_entry_size * ctx_pg->entries; 6805 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true); 6806 if (rc) 6807 return rc; 6808 6809 ctx_pg = &ctx->srq_mem; 6810 ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs; 6811 mem_size = ctx->srq_entry_size * ctx_pg->entries; 6812 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true); 6813 if (rc) 6814 return rc; 6815 6816 ctx_pg = &ctx->cq_mem; 6817 ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2; 6818 mem_size = ctx->cq_entry_size * ctx_pg->entries; 6819 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true); 6820 if (rc) 6821 return rc; 6822 6823 ctx_pg = &ctx->vnic_mem; 6824 ctx_pg->entries = ctx->vnic_max_vnic_entries + 6825 ctx->vnic_max_ring_table_entries; 6826 mem_size = ctx->vnic_entry_size * ctx_pg->entries; 6827 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true); 6828 if (rc) 6829 return rc; 6830 6831 ctx_pg = &ctx->stat_mem; 6832 ctx_pg->entries = ctx->stat_max_entries; 6833 mem_size = ctx->stat_entry_size * ctx_pg->entries; 6834 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true); 6835 if (rc) 6836 return rc; 6837 6838 ena = 0; 6839 if (!(bp->flags & BNXT_FLAG_ROCE_CAP)) 6840 goto skip_rdma; 6841 6842 ctx_pg = &ctx->mrav_mem; 6843 /* 128K extra is needed to accommodate static AH context 6844 * allocation by f/w. 6845 */ 6846 num_mr = 1024 * 256; 6847 num_ah = 1024 * 128; 6848 ctx_pg->entries = num_mr + num_ah; 6849 mem_size = ctx->mrav_entry_size * ctx_pg->entries; 6850 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, true); 6851 if (rc) 6852 return rc; 6853 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV; 6854 if (ctx->mrav_num_entries_units) 6855 ctx_pg->entries = 6856 ((num_mr / ctx->mrav_num_entries_units) << 16) | 6857 (num_ah / ctx->mrav_num_entries_units); 6858 6859 ctx_pg = &ctx->tim_mem; 6860 ctx_pg->entries = ctx->qp_mem.entries; 6861 mem_size = ctx->tim_entry_size * ctx_pg->entries; 6862 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false); 6863 if (rc) 6864 return rc; 6865 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM; 6866 6867 skip_rdma: 6868 min = ctx->tqm_min_entries_per_ring; 6869 entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries + 6870 2 * (extra_qps + ctx->qp_min_qp1_entries) + min; 6871 entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple); 6872 entries = ctx->qp_max_l2_entries + extra_qps + ctx->qp_min_qp1_entries; 6873 entries = roundup(entries, ctx->tqm_entries_multiple); 6874 entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring); 6875 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) { 6876 ctx_pg = ctx->tqm_mem[i]; 6877 ctx_pg->entries = i ? entries : entries_sp; 6878 mem_size = ctx->tqm_entry_size * ctx_pg->entries; 6879 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false); 6880 if (rc) 6881 return rc; 6882 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i; 6883 } 6884 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES; 6885 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena); 6886 if (rc) { 6887 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n", 6888 rc); 6889 return rc; 6890 } 6891 ctx->flags |= BNXT_CTX_FLAG_INITED; 6892 return 0; 6893 } 6894 6895 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all) 6896 { 6897 struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 6898 struct hwrm_func_resource_qcaps_input req = {0}; 6899 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 6900 int rc; 6901 6902 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1); 6903 req.fid = cpu_to_le16(0xffff); 6904 6905 mutex_lock(&bp->hwrm_cmd_lock); 6906 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), 6907 HWRM_CMD_TIMEOUT); 6908 if (rc) 6909 goto hwrm_func_resc_qcaps_exit; 6910 6911 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs); 6912 if (!all) 6913 goto hwrm_func_resc_qcaps_exit; 6914 6915 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx); 6916 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 6917 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings); 6918 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 6919 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings); 6920 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 6921 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings); 6922 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings); 6923 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps); 6924 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps); 6925 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs); 6926 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); 6927 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics); 6928 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics); 6929 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx); 6930 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 6931 6932 if (bp->flags & BNXT_FLAG_CHIP_P5) { 6933 u16 max_msix = le16_to_cpu(resp->max_msix); 6934 6935 hw_resc->max_nqs = max_msix; 6936 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings; 6937 } 6938 6939 if (BNXT_PF(bp)) { 6940 struct bnxt_pf_info *pf = &bp->pf; 6941 6942 pf->vf_resv_strategy = 6943 le16_to_cpu(resp->vf_reservation_strategy); 6944 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) 6945 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL; 6946 } 6947 hwrm_func_resc_qcaps_exit: 6948 mutex_unlock(&bp->hwrm_cmd_lock); 6949 return rc; 6950 } 6951 6952 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) 6953 { 6954 int rc = 0; 6955 struct hwrm_func_qcaps_input req = {0}; 6956 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 6957 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 6958 u32 flags; 6959 6960 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); 6961 req.fid = cpu_to_le16(0xffff); 6962 6963 mutex_lock(&bp->hwrm_cmd_lock); 6964 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6965 if (rc) 6966 goto hwrm_func_qcaps_exit; 6967 6968 flags = le32_to_cpu(resp->flags); 6969 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED) 6970 bp->flags |= BNXT_FLAG_ROCEV1_CAP; 6971 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED) 6972 bp->flags |= BNXT_FLAG_ROCEV2_CAP; 6973 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED) 6974 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED; 6975 if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE) 6976 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET; 6977 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED) 6978 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED; 6979 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE) 6980 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY; 6981 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD) 6982 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD; 6983 6984 bp->tx_push_thresh = 0; 6985 if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) && 6986 BNXT_FW_MAJ(bp) > 217) 6987 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH; 6988 6989 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 6990 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 6991 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 6992 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings); 6993 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps); 6994 if (!hw_resc->max_hw_ring_grps) 6995 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings; 6996 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); 6997 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics); 6998 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 6999 7000 if (BNXT_PF(bp)) { 7001 struct bnxt_pf_info *pf = &bp->pf; 7002 7003 pf->fw_fid = le16_to_cpu(resp->fid); 7004 pf->port_id = le16_to_cpu(resp->port_id); 7005 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); 7006 pf->first_vf_id = le16_to_cpu(resp->first_vf_id); 7007 pf->max_vfs = le16_to_cpu(resp->max_vfs); 7008 pf->max_encap_records = le32_to_cpu(resp->max_encap_records); 7009 pf->max_decap_records = le32_to_cpu(resp->max_decap_records); 7010 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows); 7011 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows); 7012 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows); 7013 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows); 7014 bp->flags &= ~BNXT_FLAG_WOL_CAP; 7015 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED) 7016 bp->flags |= BNXT_FLAG_WOL_CAP; 7017 } else { 7018 #ifdef CONFIG_BNXT_SRIOV 7019 struct bnxt_vf_info *vf = &bp->vf; 7020 7021 vf->fw_fid = le16_to_cpu(resp->fid); 7022 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN); 7023 #endif 7024 } 7025 7026 hwrm_func_qcaps_exit: 7027 mutex_unlock(&bp->hwrm_cmd_lock); 7028 return rc; 7029 } 7030 7031 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp); 7032 7033 static int bnxt_hwrm_func_qcaps(struct bnxt *bp) 7034 { 7035 int rc; 7036 7037 rc = __bnxt_hwrm_func_qcaps(bp); 7038 if (rc) 7039 return rc; 7040 rc = bnxt_hwrm_queue_qportcfg(bp); 7041 if (rc) { 7042 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc); 7043 return rc; 7044 } 7045 if (bp->hwrm_spec_code >= 0x10803) { 7046 rc = bnxt_alloc_ctx_mem(bp); 7047 if (rc) 7048 return rc; 7049 rc = bnxt_hwrm_func_resc_qcaps(bp, true); 7050 if (!rc) 7051 bp->fw_cap |= BNXT_FW_CAP_NEW_RM; 7052 } 7053 return 0; 7054 } 7055 7056 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp) 7057 { 7058 struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0}; 7059 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp; 7060 int rc = 0; 7061 u32 flags; 7062 7063 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW)) 7064 return 0; 7065 7066 resp = bp->hwrm_cmd_resp_addr; 7067 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, -1, -1); 7068 7069 mutex_lock(&bp->hwrm_cmd_lock); 7070 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7071 if (rc) 7072 goto hwrm_cfa_adv_qcaps_exit; 7073 7074 flags = le32_to_cpu(resp->flags); 7075 if (flags & 7076 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED) 7077 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2; 7078 7079 hwrm_cfa_adv_qcaps_exit: 7080 mutex_unlock(&bp->hwrm_cmd_lock); 7081 return rc; 7082 } 7083 7084 static int bnxt_map_fw_health_regs(struct bnxt *bp) 7085 { 7086 struct bnxt_fw_health *fw_health = bp->fw_health; 7087 u32 reg_base = 0xffffffff; 7088 int i; 7089 7090 /* Only pre-map the monitoring GRC registers using window 3 */ 7091 for (i = 0; i < 4; i++) { 7092 u32 reg = fw_health->regs[i]; 7093 7094 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC) 7095 continue; 7096 if (reg_base == 0xffffffff) 7097 reg_base = reg & BNXT_GRC_BASE_MASK; 7098 if ((reg & BNXT_GRC_BASE_MASK) != reg_base) 7099 return -ERANGE; 7100 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_BASE + 7101 (reg & BNXT_GRC_OFFSET_MASK); 7102 } 7103 if (reg_base == 0xffffffff) 7104 return 0; 7105 7106 writel(reg_base, bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 7107 BNXT_FW_HEALTH_WIN_MAP_OFF); 7108 return 0; 7109 } 7110 7111 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) 7112 { 7113 struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 7114 struct bnxt_fw_health *fw_health = bp->fw_health; 7115 struct hwrm_error_recovery_qcfg_input req = {0}; 7116 int rc, i; 7117 7118 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 7119 return 0; 7120 7121 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG, -1, -1); 7122 mutex_lock(&bp->hwrm_cmd_lock); 7123 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7124 if (rc) 7125 goto err_recovery_out; 7126 fw_health->flags = le32_to_cpu(resp->flags); 7127 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) && 7128 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) { 7129 rc = -EINVAL; 7130 goto err_recovery_out; 7131 } 7132 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq); 7133 fw_health->master_func_wait_dsecs = 7134 le32_to_cpu(resp->master_func_wait_period); 7135 fw_health->normal_func_wait_dsecs = 7136 le32_to_cpu(resp->normal_func_wait_period); 7137 fw_health->post_reset_wait_dsecs = 7138 le32_to_cpu(resp->master_func_wait_period_after_reset); 7139 fw_health->post_reset_max_wait_dsecs = 7140 le32_to_cpu(resp->max_bailout_time_after_reset); 7141 fw_health->regs[BNXT_FW_HEALTH_REG] = 7142 le32_to_cpu(resp->fw_health_status_reg); 7143 fw_health->regs[BNXT_FW_HEARTBEAT_REG] = 7144 le32_to_cpu(resp->fw_heartbeat_reg); 7145 fw_health->regs[BNXT_FW_RESET_CNT_REG] = 7146 le32_to_cpu(resp->fw_reset_cnt_reg); 7147 fw_health->regs[BNXT_FW_RESET_INPROG_REG] = 7148 le32_to_cpu(resp->reset_inprogress_reg); 7149 fw_health->fw_reset_inprog_reg_mask = 7150 le32_to_cpu(resp->reset_inprogress_reg_mask); 7151 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt; 7152 if (fw_health->fw_reset_seq_cnt >= 16) { 7153 rc = -EINVAL; 7154 goto err_recovery_out; 7155 } 7156 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) { 7157 fw_health->fw_reset_seq_regs[i] = 7158 le32_to_cpu(resp->reset_reg[i]); 7159 fw_health->fw_reset_seq_vals[i] = 7160 le32_to_cpu(resp->reset_reg_val[i]); 7161 fw_health->fw_reset_seq_delay_msec[i] = 7162 resp->delay_after_reset[i]; 7163 } 7164 err_recovery_out: 7165 mutex_unlock(&bp->hwrm_cmd_lock); 7166 if (!rc) 7167 rc = bnxt_map_fw_health_regs(bp); 7168 if (rc) 7169 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 7170 return rc; 7171 } 7172 7173 static int bnxt_hwrm_func_reset(struct bnxt *bp) 7174 { 7175 struct hwrm_func_reset_input req = {0}; 7176 7177 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1); 7178 req.enables = 0; 7179 7180 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT); 7181 } 7182 7183 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) 7184 { 7185 int rc = 0; 7186 struct hwrm_queue_qportcfg_input req = {0}; 7187 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr; 7188 u8 i, j, *qptr; 7189 bool no_rdma; 7190 7191 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1); 7192 7193 mutex_lock(&bp->hwrm_cmd_lock); 7194 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7195 if (rc) 7196 goto qportcfg_exit; 7197 7198 if (!resp->max_configurable_queues) { 7199 rc = -EINVAL; 7200 goto qportcfg_exit; 7201 } 7202 bp->max_tc = resp->max_configurable_queues; 7203 bp->max_lltc = resp->max_configurable_lossless_queues; 7204 if (bp->max_tc > BNXT_MAX_QUEUE) 7205 bp->max_tc = BNXT_MAX_QUEUE; 7206 7207 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP); 7208 qptr = &resp->queue_id0; 7209 for (i = 0, j = 0; i < bp->max_tc; i++) { 7210 bp->q_info[j].queue_id = *qptr; 7211 bp->q_ids[i] = *qptr++; 7212 bp->q_info[j].queue_profile = *qptr++; 7213 bp->tc_to_qidx[j] = j; 7214 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) || 7215 (no_rdma && BNXT_PF(bp))) 7216 j++; 7217 } 7218 bp->max_q = bp->max_tc; 7219 bp->max_tc = max_t(u8, j, 1); 7220 7221 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG) 7222 bp->max_tc = 1; 7223 7224 if (bp->max_lltc > bp->max_tc) 7225 bp->max_lltc = bp->max_tc; 7226 7227 qportcfg_exit: 7228 mutex_unlock(&bp->hwrm_cmd_lock); 7229 return rc; 7230 } 7231 7232 static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent) 7233 { 7234 struct hwrm_ver_get_input req = {0}; 7235 int rc; 7236 7237 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1); 7238 req.hwrm_intf_maj = HWRM_VERSION_MAJOR; 7239 req.hwrm_intf_min = HWRM_VERSION_MINOR; 7240 req.hwrm_intf_upd = HWRM_VERSION_UPDATE; 7241 7242 rc = bnxt_hwrm_do_send_msg(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT, 7243 silent); 7244 return rc; 7245 } 7246 7247 static int bnxt_hwrm_ver_get(struct bnxt *bp) 7248 { 7249 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr; 7250 u16 fw_maj, fw_min, fw_bld, fw_rsv; 7251 u32 dev_caps_cfg, hwrm_ver; 7252 int rc, len; 7253 7254 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; 7255 mutex_lock(&bp->hwrm_cmd_lock); 7256 rc = __bnxt_hwrm_ver_get(bp, false); 7257 if (rc) 7258 goto hwrm_ver_get_exit; 7259 7260 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output)); 7261 7262 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 | 7263 resp->hwrm_intf_min_8b << 8 | 7264 resp->hwrm_intf_upd_8b; 7265 if (resp->hwrm_intf_maj_8b < 1) { 7266 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n", 7267 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, 7268 resp->hwrm_intf_upd_8b); 7269 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n"); 7270 } 7271 7272 hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 | 7273 HWRM_VERSION_UPDATE; 7274 7275 if (bp->hwrm_spec_code > hwrm_ver) 7276 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d", 7277 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, 7278 HWRM_VERSION_UPDATE); 7279 else 7280 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d", 7281 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, 7282 resp->hwrm_intf_upd_8b); 7283 7284 fw_maj = le16_to_cpu(resp->hwrm_fw_major); 7285 if (bp->hwrm_spec_code > 0x10803 && fw_maj) { 7286 fw_min = le16_to_cpu(resp->hwrm_fw_minor); 7287 fw_bld = le16_to_cpu(resp->hwrm_fw_build); 7288 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch); 7289 len = FW_VER_STR_LEN; 7290 } else { 7291 fw_maj = resp->hwrm_fw_maj_8b; 7292 fw_min = resp->hwrm_fw_min_8b; 7293 fw_bld = resp->hwrm_fw_bld_8b; 7294 fw_rsv = resp->hwrm_fw_rsvd_8b; 7295 len = BC_HWRM_STR_LEN; 7296 } 7297 bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv); 7298 snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld, 7299 fw_rsv); 7300 7301 if (strlen(resp->active_pkg_name)) { 7302 int fw_ver_len = strlen(bp->fw_ver_str); 7303 7304 snprintf(bp->fw_ver_str + fw_ver_len, 7305 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s", 7306 resp->active_pkg_name); 7307 bp->fw_cap |= BNXT_FW_CAP_PKG_VER; 7308 } 7309 7310 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout); 7311 if (!bp->hwrm_cmd_timeout) 7312 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; 7313 7314 if (resp->hwrm_intf_maj_8b >= 1) { 7315 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len); 7316 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len); 7317 } 7318 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN) 7319 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN; 7320 7321 bp->chip_num = le16_to_cpu(resp->chip_num); 7322 bp->chip_rev = resp->chip_rev; 7323 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev && 7324 !resp->chip_metal) 7325 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0; 7326 7327 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg); 7328 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) && 7329 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) 7330 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD; 7331 7332 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) 7333 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL; 7334 7335 if (dev_caps_cfg & 7336 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED) 7337 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE; 7338 7339 if (dev_caps_cfg & 7340 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED) 7341 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF; 7342 7343 if (dev_caps_cfg & 7344 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) 7345 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW; 7346 7347 hwrm_ver_get_exit: 7348 mutex_unlock(&bp->hwrm_cmd_lock); 7349 return rc; 7350 } 7351 7352 int bnxt_hwrm_fw_set_time(struct bnxt *bp) 7353 { 7354 struct hwrm_fw_set_time_input req = {0}; 7355 struct tm tm; 7356 time64_t now = ktime_get_real_seconds(); 7357 7358 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) || 7359 bp->hwrm_spec_code < 0x10400) 7360 return -EOPNOTSUPP; 7361 7362 time64_to_tm(now, 0, &tm); 7363 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1); 7364 req.year = cpu_to_le16(1900 + tm.tm_year); 7365 req.month = 1 + tm.tm_mon; 7366 req.day = tm.tm_mday; 7367 req.hour = tm.tm_hour; 7368 req.minute = tm.tm_min; 7369 req.second = tm.tm_sec; 7370 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7371 } 7372 7373 static int bnxt_hwrm_port_qstats(struct bnxt *bp) 7374 { 7375 struct bnxt_pf_info *pf = &bp->pf; 7376 struct hwrm_port_qstats_input req = {0}; 7377 7378 if (!(bp->flags & BNXT_FLAG_PORT_STATS)) 7379 return 0; 7380 7381 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1); 7382 req.port_id = cpu_to_le16(pf->port_id); 7383 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map); 7384 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map); 7385 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7386 } 7387 7388 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp) 7389 { 7390 struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr; 7391 struct hwrm_queue_pri2cos_qcfg_input req2 = {0}; 7392 struct hwrm_port_qstats_ext_input req = {0}; 7393 struct bnxt_pf_info *pf = &bp->pf; 7394 u32 tx_stat_size; 7395 int rc; 7396 7397 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) 7398 return 0; 7399 7400 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1); 7401 req.port_id = cpu_to_le16(pf->port_id); 7402 req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext)); 7403 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map); 7404 tx_stat_size = bp->hw_tx_port_stats_ext ? 7405 sizeof(*bp->hw_tx_port_stats_ext) : 0; 7406 req.tx_stat_size = cpu_to_le16(tx_stat_size); 7407 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map); 7408 mutex_lock(&bp->hwrm_cmd_lock); 7409 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7410 if (!rc) { 7411 bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8; 7412 bp->fw_tx_stats_ext_size = tx_stat_size ? 7413 le16_to_cpu(resp->tx_stat_size) / 8 : 0; 7414 } else { 7415 bp->fw_rx_stats_ext_size = 0; 7416 bp->fw_tx_stats_ext_size = 0; 7417 } 7418 if (bp->fw_tx_stats_ext_size <= 7419 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) { 7420 mutex_unlock(&bp->hwrm_cmd_lock); 7421 bp->pri2cos_valid = 0; 7422 return rc; 7423 } 7424 7425 bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1); 7426 req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); 7427 7428 rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT); 7429 if (!rc) { 7430 struct hwrm_queue_pri2cos_qcfg_output *resp2; 7431 u8 *pri2cos; 7432 int i, j; 7433 7434 resp2 = bp->hwrm_cmd_resp_addr; 7435 pri2cos = &resp2->pri0_cos_queue_id; 7436 for (i = 0; i < 8; i++) { 7437 u8 queue_id = pri2cos[i]; 7438 u8 queue_idx; 7439 7440 /* Per port queue IDs start from 0, 10, 20, etc */ 7441 queue_idx = queue_id % 10; 7442 if (queue_idx > BNXT_MAX_QUEUE) { 7443 bp->pri2cos_valid = false; 7444 goto qstats_done; 7445 } 7446 for (j = 0; j < bp->max_q; j++) { 7447 if (bp->q_ids[j] == queue_id) 7448 bp->pri2cos_idx[i] = queue_idx; 7449 } 7450 } 7451 bp->pri2cos_valid = 1; 7452 } 7453 qstats_done: 7454 mutex_unlock(&bp->hwrm_cmd_lock); 7455 return rc; 7456 } 7457 7458 static int bnxt_hwrm_pcie_qstats(struct bnxt *bp) 7459 { 7460 struct hwrm_pcie_qstats_input req = {0}; 7461 7462 if (!(bp->flags & BNXT_FLAG_PCIE_STATS)) 7463 return 0; 7464 7465 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1); 7466 req.pcie_stat_size = cpu_to_le16(sizeof(struct pcie_ctx_hw_stats)); 7467 req.pcie_stat_host_addr = cpu_to_le64(bp->hw_pcie_stats_map); 7468 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7469 } 7470 7471 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) 7472 { 7473 if (bp->vxlan_port_cnt) { 7474 bnxt_hwrm_tunnel_dst_port_free( 7475 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 7476 } 7477 bp->vxlan_port_cnt = 0; 7478 if (bp->nge_port_cnt) { 7479 bnxt_hwrm_tunnel_dst_port_free( 7480 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 7481 } 7482 bp->nge_port_cnt = 0; 7483 } 7484 7485 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa) 7486 { 7487 int rc, i; 7488 u32 tpa_flags = 0; 7489 7490 if (set_tpa) 7491 tpa_flags = bp->flags & BNXT_FLAG_TPA; 7492 else if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 7493 return 0; 7494 for (i = 0; i < bp->nr_vnics; i++) { 7495 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags); 7496 if (rc) { 7497 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n", 7498 i, rc); 7499 return rc; 7500 } 7501 } 7502 return 0; 7503 } 7504 7505 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp) 7506 { 7507 int i; 7508 7509 for (i = 0; i < bp->nr_vnics; i++) 7510 bnxt_hwrm_vnic_set_rss(bp, i, false); 7511 } 7512 7513 static void bnxt_clear_vnic(struct bnxt *bp) 7514 { 7515 if (!bp->vnic_info) 7516 return; 7517 7518 bnxt_hwrm_clear_vnic_filter(bp); 7519 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) { 7520 /* clear all RSS setting before free vnic ctx */ 7521 bnxt_hwrm_clear_vnic_rss(bp); 7522 bnxt_hwrm_vnic_ctx_free(bp); 7523 } 7524 /* before free the vnic, undo the vnic tpa settings */ 7525 if (bp->flags & BNXT_FLAG_TPA) 7526 bnxt_set_tpa(bp, false); 7527 bnxt_hwrm_vnic_free(bp); 7528 if (bp->flags & BNXT_FLAG_CHIP_P5) 7529 bnxt_hwrm_vnic_ctx_free(bp); 7530 } 7531 7532 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, 7533 bool irq_re_init) 7534 { 7535 bnxt_clear_vnic(bp); 7536 bnxt_hwrm_ring_free(bp, close_path); 7537 bnxt_hwrm_ring_grp_free(bp); 7538 if (irq_re_init) { 7539 bnxt_hwrm_stat_ctx_free(bp); 7540 bnxt_hwrm_free_tunnel_ports(bp); 7541 } 7542 } 7543 7544 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode) 7545 { 7546 struct hwrm_func_cfg_input req = {0}; 7547 7548 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 7549 req.fid = cpu_to_le16(0xffff); 7550 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE); 7551 if (br_mode == BRIDGE_MODE_VEB) 7552 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB; 7553 else if (br_mode == BRIDGE_MODE_VEPA) 7554 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA; 7555 else 7556 return -EINVAL; 7557 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7558 } 7559 7560 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size) 7561 { 7562 struct hwrm_func_cfg_input req = {0}; 7563 7564 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803) 7565 return 0; 7566 7567 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 7568 req.fid = cpu_to_le16(0xffff); 7569 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE); 7570 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64; 7571 if (size == 128) 7572 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128; 7573 7574 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7575 } 7576 7577 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) 7578 { 7579 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 7580 int rc; 7581 7582 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) 7583 goto skip_rss_ctx; 7584 7585 /* allocate context for vnic */ 7586 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0); 7587 if (rc) { 7588 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 7589 vnic_id, rc); 7590 goto vnic_setup_err; 7591 } 7592 bp->rsscos_nr_ctxs++; 7593 7594 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 7595 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1); 7596 if (rc) { 7597 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n", 7598 vnic_id, rc); 7599 goto vnic_setup_err; 7600 } 7601 bp->rsscos_nr_ctxs++; 7602 } 7603 7604 skip_rss_ctx: 7605 /* configure default vnic, ring grp */ 7606 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); 7607 if (rc) { 7608 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", 7609 vnic_id, rc); 7610 goto vnic_setup_err; 7611 } 7612 7613 /* Enable RSS hashing on vnic */ 7614 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true); 7615 if (rc) { 7616 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n", 7617 vnic_id, rc); 7618 goto vnic_setup_err; 7619 } 7620 7621 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 7622 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id); 7623 if (rc) { 7624 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", 7625 vnic_id, rc); 7626 } 7627 } 7628 7629 vnic_setup_err: 7630 return rc; 7631 } 7632 7633 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id) 7634 { 7635 int rc, i, nr_ctxs; 7636 7637 nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64); 7638 for (i = 0; i < nr_ctxs; i++) { 7639 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i); 7640 if (rc) { 7641 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n", 7642 vnic_id, i, rc); 7643 break; 7644 } 7645 bp->rsscos_nr_ctxs++; 7646 } 7647 if (i < nr_ctxs) 7648 return -ENOMEM; 7649 7650 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true); 7651 if (rc) { 7652 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n", 7653 vnic_id, rc); 7654 return rc; 7655 } 7656 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); 7657 if (rc) { 7658 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", 7659 vnic_id, rc); 7660 return rc; 7661 } 7662 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 7663 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id); 7664 if (rc) { 7665 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", 7666 vnic_id, rc); 7667 } 7668 } 7669 return rc; 7670 } 7671 7672 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) 7673 { 7674 if (bp->flags & BNXT_FLAG_CHIP_P5) 7675 return __bnxt_setup_vnic_p5(bp, vnic_id); 7676 else 7677 return __bnxt_setup_vnic(bp, vnic_id); 7678 } 7679 7680 static int bnxt_alloc_rfs_vnics(struct bnxt *bp) 7681 { 7682 #ifdef CONFIG_RFS_ACCEL 7683 int i, rc = 0; 7684 7685 if (bp->flags & BNXT_FLAG_CHIP_P5) 7686 return 0; 7687 7688 for (i = 0; i < bp->rx_nr_rings; i++) { 7689 struct bnxt_vnic_info *vnic; 7690 u16 vnic_id = i + 1; 7691 u16 ring_id = i; 7692 7693 if (vnic_id >= bp->nr_vnics) 7694 break; 7695 7696 vnic = &bp->vnic_info[vnic_id]; 7697 vnic->flags |= BNXT_VNIC_RFS_FLAG; 7698 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) 7699 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG; 7700 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1); 7701 if (rc) { 7702 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 7703 vnic_id, rc); 7704 break; 7705 } 7706 rc = bnxt_setup_vnic(bp, vnic_id); 7707 if (rc) 7708 break; 7709 } 7710 return rc; 7711 #else 7712 return 0; 7713 #endif 7714 } 7715 7716 /* Allow PF and VF with default VLAN to be in promiscuous mode */ 7717 static bool bnxt_promisc_ok(struct bnxt *bp) 7718 { 7719 #ifdef CONFIG_BNXT_SRIOV 7720 if (BNXT_VF(bp) && !bp->vf.vlan) 7721 return false; 7722 #endif 7723 return true; 7724 } 7725 7726 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp) 7727 { 7728 unsigned int rc = 0; 7729 7730 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1); 7731 if (rc) { 7732 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", 7733 rc); 7734 return rc; 7735 } 7736 7737 rc = bnxt_hwrm_vnic_cfg(bp, 1); 7738 if (rc) { 7739 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", 7740 rc); 7741 return rc; 7742 } 7743 return rc; 7744 } 7745 7746 static int bnxt_cfg_rx_mode(struct bnxt *); 7747 static bool bnxt_mc_list_updated(struct bnxt *, u32 *); 7748 7749 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) 7750 { 7751 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 7752 int rc = 0; 7753 unsigned int rx_nr_rings = bp->rx_nr_rings; 7754 7755 if (irq_re_init) { 7756 rc = bnxt_hwrm_stat_ctx_alloc(bp); 7757 if (rc) { 7758 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n", 7759 rc); 7760 goto err_out; 7761 } 7762 } 7763 7764 rc = bnxt_hwrm_ring_alloc(bp); 7765 if (rc) { 7766 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc); 7767 goto err_out; 7768 } 7769 7770 rc = bnxt_hwrm_ring_grp_alloc(bp); 7771 if (rc) { 7772 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc); 7773 goto err_out; 7774 } 7775 7776 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 7777 rx_nr_rings--; 7778 7779 /* default vnic 0 */ 7780 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings); 7781 if (rc) { 7782 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc); 7783 goto err_out; 7784 } 7785 7786 rc = bnxt_setup_vnic(bp, 0); 7787 if (rc) 7788 goto err_out; 7789 7790 if (bp->flags & BNXT_FLAG_RFS) { 7791 rc = bnxt_alloc_rfs_vnics(bp); 7792 if (rc) 7793 goto err_out; 7794 } 7795 7796 if (bp->flags & BNXT_FLAG_TPA) { 7797 rc = bnxt_set_tpa(bp, true); 7798 if (rc) 7799 goto err_out; 7800 } 7801 7802 if (BNXT_VF(bp)) 7803 bnxt_update_vf_mac(bp); 7804 7805 /* Filter for default vnic 0 */ 7806 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr); 7807 if (rc) { 7808 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); 7809 goto err_out; 7810 } 7811 vnic->uc_filter_count = 1; 7812 7813 vnic->rx_mask = 0; 7814 if (bp->dev->flags & IFF_BROADCAST) 7815 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 7816 7817 if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp)) 7818 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 7819 7820 if (bp->dev->flags & IFF_ALLMULTI) { 7821 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 7822 vnic->mc_list_count = 0; 7823 } else { 7824 u32 mask = 0; 7825 7826 bnxt_mc_list_updated(bp, &mask); 7827 vnic->rx_mask |= mask; 7828 } 7829 7830 rc = bnxt_cfg_rx_mode(bp); 7831 if (rc) 7832 goto err_out; 7833 7834 rc = bnxt_hwrm_set_coal(bp); 7835 if (rc) 7836 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n", 7837 rc); 7838 7839 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 7840 rc = bnxt_setup_nitroa0_vnic(bp); 7841 if (rc) 7842 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n", 7843 rc); 7844 } 7845 7846 if (BNXT_VF(bp)) { 7847 bnxt_hwrm_func_qcfg(bp); 7848 netdev_update_features(bp->dev); 7849 } 7850 7851 return 0; 7852 7853 err_out: 7854 bnxt_hwrm_resource_free(bp, 0, true); 7855 7856 return rc; 7857 } 7858 7859 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init) 7860 { 7861 bnxt_hwrm_resource_free(bp, 1, irq_re_init); 7862 return 0; 7863 } 7864 7865 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init) 7866 { 7867 bnxt_init_cp_rings(bp); 7868 bnxt_init_rx_rings(bp); 7869 bnxt_init_tx_rings(bp); 7870 bnxt_init_ring_grps(bp, irq_re_init); 7871 bnxt_init_vnics(bp); 7872 7873 return bnxt_init_chip(bp, irq_re_init); 7874 } 7875 7876 static int bnxt_set_real_num_queues(struct bnxt *bp) 7877 { 7878 int rc; 7879 struct net_device *dev = bp->dev; 7880 7881 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings - 7882 bp->tx_nr_rings_xdp); 7883 if (rc) 7884 return rc; 7885 7886 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings); 7887 if (rc) 7888 return rc; 7889 7890 #ifdef CONFIG_RFS_ACCEL 7891 if (bp->flags & BNXT_FLAG_RFS) 7892 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings); 7893 #endif 7894 7895 return rc; 7896 } 7897 7898 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 7899 bool shared) 7900 { 7901 int _rx = *rx, _tx = *tx; 7902 7903 if (shared) { 7904 *rx = min_t(int, _rx, max); 7905 *tx = min_t(int, _tx, max); 7906 } else { 7907 if (max < 2) 7908 return -ENOMEM; 7909 7910 while (_rx + _tx > max) { 7911 if (_rx > _tx && _rx > 1) 7912 _rx--; 7913 else if (_tx > 1) 7914 _tx--; 7915 } 7916 *rx = _rx; 7917 *tx = _tx; 7918 } 7919 return 0; 7920 } 7921 7922 static void bnxt_setup_msix(struct bnxt *bp) 7923 { 7924 const int len = sizeof(bp->irq_tbl[0].name); 7925 struct net_device *dev = bp->dev; 7926 int tcs, i; 7927 7928 tcs = netdev_get_num_tc(dev); 7929 if (tcs) { 7930 int i, off, count; 7931 7932 for (i = 0; i < tcs; i++) { 7933 count = bp->tx_nr_rings_per_tc; 7934 off = i * count; 7935 netdev_set_tc_queue(dev, i, count, off); 7936 } 7937 } 7938 7939 for (i = 0; i < bp->cp_nr_rings; i++) { 7940 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 7941 char *attr; 7942 7943 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 7944 attr = "TxRx"; 7945 else if (i < bp->rx_nr_rings) 7946 attr = "rx"; 7947 else 7948 attr = "tx"; 7949 7950 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name, 7951 attr, i); 7952 bp->irq_tbl[map_idx].handler = bnxt_msix; 7953 } 7954 } 7955 7956 static void bnxt_setup_inta(struct bnxt *bp) 7957 { 7958 const int len = sizeof(bp->irq_tbl[0].name); 7959 7960 if (netdev_get_num_tc(bp->dev)) 7961 netdev_reset_tc(bp->dev); 7962 7963 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx", 7964 0); 7965 bp->irq_tbl[0].handler = bnxt_inta; 7966 } 7967 7968 static int bnxt_setup_int_mode(struct bnxt *bp) 7969 { 7970 int rc; 7971 7972 if (bp->flags & BNXT_FLAG_USING_MSIX) 7973 bnxt_setup_msix(bp); 7974 else 7975 bnxt_setup_inta(bp); 7976 7977 rc = bnxt_set_real_num_queues(bp); 7978 return rc; 7979 } 7980 7981 #ifdef CONFIG_RFS_ACCEL 7982 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp) 7983 { 7984 return bp->hw_resc.max_rsscos_ctxs; 7985 } 7986 7987 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp) 7988 { 7989 return bp->hw_resc.max_vnics; 7990 } 7991 #endif 7992 7993 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp) 7994 { 7995 return bp->hw_resc.max_stat_ctxs; 7996 } 7997 7998 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp) 7999 { 8000 return bp->hw_resc.max_cp_rings; 8001 } 8002 8003 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp) 8004 { 8005 unsigned int cp = bp->hw_resc.max_cp_rings; 8006 8007 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 8008 cp -= bnxt_get_ulp_msix_num(bp); 8009 8010 return cp; 8011 } 8012 8013 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) 8014 { 8015 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 8016 8017 if (bp->flags & BNXT_FLAG_CHIP_P5) 8018 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs); 8019 8020 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings); 8021 } 8022 8023 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs) 8024 { 8025 bp->hw_resc.max_irqs = max_irqs; 8026 } 8027 8028 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp) 8029 { 8030 unsigned int cp; 8031 8032 cp = bnxt_get_max_func_cp_rings_for_en(bp); 8033 if (bp->flags & BNXT_FLAG_CHIP_P5) 8034 return cp - bp->rx_nr_rings - bp->tx_nr_rings; 8035 else 8036 return cp - bp->cp_nr_rings; 8037 } 8038 8039 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp) 8040 { 8041 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp); 8042 } 8043 8044 int bnxt_get_avail_msix(struct bnxt *bp, int num) 8045 { 8046 int max_cp = bnxt_get_max_func_cp_rings(bp); 8047 int max_irq = bnxt_get_max_func_irqs(bp); 8048 int total_req = bp->cp_nr_rings + num; 8049 int max_idx, avail_msix; 8050 8051 max_idx = bp->total_irqs; 8052 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 8053 max_idx = min_t(int, bp->total_irqs, max_cp); 8054 avail_msix = max_idx - bp->cp_nr_rings; 8055 if (!BNXT_NEW_RM(bp) || avail_msix >= num) 8056 return avail_msix; 8057 8058 if (max_irq < total_req) { 8059 num = max_irq - bp->cp_nr_rings; 8060 if (num <= 0) 8061 return 0; 8062 } 8063 return num; 8064 } 8065 8066 static int bnxt_get_num_msix(struct bnxt *bp) 8067 { 8068 if (!BNXT_NEW_RM(bp)) 8069 return bnxt_get_max_func_irqs(bp); 8070 8071 return bnxt_nq_rings_in_use(bp); 8072 } 8073 8074 static int bnxt_init_msix(struct bnxt *bp) 8075 { 8076 int i, total_vecs, max, rc = 0, min = 1, ulp_msix; 8077 struct msix_entry *msix_ent; 8078 8079 total_vecs = bnxt_get_num_msix(bp); 8080 max = bnxt_get_max_func_irqs(bp); 8081 if (total_vecs > max) 8082 total_vecs = max; 8083 8084 if (!total_vecs) 8085 return 0; 8086 8087 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL); 8088 if (!msix_ent) 8089 return -ENOMEM; 8090 8091 for (i = 0; i < total_vecs; i++) { 8092 msix_ent[i].entry = i; 8093 msix_ent[i].vector = 0; 8094 } 8095 8096 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) 8097 min = 2; 8098 8099 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs); 8100 ulp_msix = bnxt_get_ulp_msix_num(bp); 8101 if (total_vecs < 0 || total_vecs < ulp_msix) { 8102 rc = -ENODEV; 8103 goto msix_setup_exit; 8104 } 8105 8106 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL); 8107 if (bp->irq_tbl) { 8108 for (i = 0; i < total_vecs; i++) 8109 bp->irq_tbl[i].vector = msix_ent[i].vector; 8110 8111 bp->total_irqs = total_vecs; 8112 /* Trim rings based upon num of vectors allocated */ 8113 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings, 8114 total_vecs - ulp_msix, min == 1); 8115 if (rc) 8116 goto msix_setup_exit; 8117 8118 bp->cp_nr_rings = (min == 1) ? 8119 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : 8120 bp->tx_nr_rings + bp->rx_nr_rings; 8121 8122 } else { 8123 rc = -ENOMEM; 8124 goto msix_setup_exit; 8125 } 8126 bp->flags |= BNXT_FLAG_USING_MSIX; 8127 kfree(msix_ent); 8128 return 0; 8129 8130 msix_setup_exit: 8131 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc); 8132 kfree(bp->irq_tbl); 8133 bp->irq_tbl = NULL; 8134 pci_disable_msix(bp->pdev); 8135 kfree(msix_ent); 8136 return rc; 8137 } 8138 8139 static int bnxt_init_inta(struct bnxt *bp) 8140 { 8141 bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL); 8142 if (!bp->irq_tbl) 8143 return -ENOMEM; 8144 8145 bp->total_irqs = 1; 8146 bp->rx_nr_rings = 1; 8147 bp->tx_nr_rings = 1; 8148 bp->cp_nr_rings = 1; 8149 bp->flags |= BNXT_FLAG_SHARED_RINGS; 8150 bp->irq_tbl[0].vector = bp->pdev->irq; 8151 return 0; 8152 } 8153 8154 static int bnxt_init_int_mode(struct bnxt *bp) 8155 { 8156 int rc = 0; 8157 8158 if (bp->flags & BNXT_FLAG_MSIX_CAP) 8159 rc = bnxt_init_msix(bp); 8160 8161 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) { 8162 /* fallback to INTA */ 8163 rc = bnxt_init_inta(bp); 8164 } 8165 return rc; 8166 } 8167 8168 static void bnxt_clear_int_mode(struct bnxt *bp) 8169 { 8170 if (bp->flags & BNXT_FLAG_USING_MSIX) 8171 pci_disable_msix(bp->pdev); 8172 8173 kfree(bp->irq_tbl); 8174 bp->irq_tbl = NULL; 8175 bp->flags &= ~BNXT_FLAG_USING_MSIX; 8176 } 8177 8178 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init) 8179 { 8180 int tcs = netdev_get_num_tc(bp->dev); 8181 bool irq_cleared = false; 8182 int rc; 8183 8184 if (!bnxt_need_reserve_rings(bp)) 8185 return 0; 8186 8187 if (irq_re_init && BNXT_NEW_RM(bp) && 8188 bnxt_get_num_msix(bp) != bp->total_irqs) { 8189 bnxt_ulp_irq_stop(bp); 8190 bnxt_clear_int_mode(bp); 8191 irq_cleared = true; 8192 } 8193 rc = __bnxt_reserve_rings(bp); 8194 if (irq_cleared) { 8195 if (!rc) 8196 rc = bnxt_init_int_mode(bp); 8197 bnxt_ulp_irq_restart(bp, rc); 8198 } 8199 if (rc) { 8200 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc); 8201 return rc; 8202 } 8203 if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) { 8204 netdev_err(bp->dev, "tx ring reservation failure\n"); 8205 netdev_reset_tc(bp->dev); 8206 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 8207 return -ENOMEM; 8208 } 8209 return 0; 8210 } 8211 8212 static void bnxt_free_irq(struct bnxt *bp) 8213 { 8214 struct bnxt_irq *irq; 8215 int i; 8216 8217 #ifdef CONFIG_RFS_ACCEL 8218 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap); 8219 bp->dev->rx_cpu_rmap = NULL; 8220 #endif 8221 if (!bp->irq_tbl || !bp->bnapi) 8222 return; 8223 8224 for (i = 0; i < bp->cp_nr_rings; i++) { 8225 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 8226 8227 irq = &bp->irq_tbl[map_idx]; 8228 if (irq->requested) { 8229 if (irq->have_cpumask) { 8230 irq_set_affinity_hint(irq->vector, NULL); 8231 free_cpumask_var(irq->cpu_mask); 8232 irq->have_cpumask = 0; 8233 } 8234 free_irq(irq->vector, bp->bnapi[i]); 8235 } 8236 8237 irq->requested = 0; 8238 } 8239 } 8240 8241 static int bnxt_request_irq(struct bnxt *bp) 8242 { 8243 int i, j, rc = 0; 8244 unsigned long flags = 0; 8245 #ifdef CONFIG_RFS_ACCEL 8246 struct cpu_rmap *rmap; 8247 #endif 8248 8249 rc = bnxt_setup_int_mode(bp); 8250 if (rc) { 8251 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n", 8252 rc); 8253 return rc; 8254 } 8255 #ifdef CONFIG_RFS_ACCEL 8256 rmap = bp->dev->rx_cpu_rmap; 8257 #endif 8258 if (!(bp->flags & BNXT_FLAG_USING_MSIX)) 8259 flags = IRQF_SHARED; 8260 8261 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { 8262 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 8263 struct bnxt_irq *irq = &bp->irq_tbl[map_idx]; 8264 8265 #ifdef CONFIG_RFS_ACCEL 8266 if (rmap && bp->bnapi[i]->rx_ring) { 8267 rc = irq_cpu_rmap_add(rmap, irq->vector); 8268 if (rc) 8269 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n", 8270 j); 8271 j++; 8272 } 8273 #endif 8274 rc = request_irq(irq->vector, irq->handler, flags, irq->name, 8275 bp->bnapi[i]); 8276 if (rc) 8277 break; 8278 8279 irq->requested = 1; 8280 8281 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) { 8282 int numa_node = dev_to_node(&bp->pdev->dev); 8283 8284 irq->have_cpumask = 1; 8285 cpumask_set_cpu(cpumask_local_spread(i, numa_node), 8286 irq->cpu_mask); 8287 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask); 8288 if (rc) { 8289 netdev_warn(bp->dev, 8290 "Set affinity failed, IRQ = %d\n", 8291 irq->vector); 8292 break; 8293 } 8294 } 8295 } 8296 return rc; 8297 } 8298 8299 static void bnxt_del_napi(struct bnxt *bp) 8300 { 8301 int i; 8302 8303 if (!bp->bnapi) 8304 return; 8305 8306 for (i = 0; i < bp->cp_nr_rings; i++) { 8307 struct bnxt_napi *bnapi = bp->bnapi[i]; 8308 8309 napi_hash_del(&bnapi->napi); 8310 netif_napi_del(&bnapi->napi); 8311 } 8312 /* We called napi_hash_del() before netif_napi_del(), we need 8313 * to respect an RCU grace period before freeing napi structures. 8314 */ 8315 synchronize_net(); 8316 } 8317 8318 static void bnxt_init_napi(struct bnxt *bp) 8319 { 8320 int i; 8321 unsigned int cp_nr_rings = bp->cp_nr_rings; 8322 struct bnxt_napi *bnapi; 8323 8324 if (bp->flags & BNXT_FLAG_USING_MSIX) { 8325 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll; 8326 8327 if (bp->flags & BNXT_FLAG_CHIP_P5) 8328 poll_fn = bnxt_poll_p5; 8329 else if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 8330 cp_nr_rings--; 8331 for (i = 0; i < cp_nr_rings; i++) { 8332 bnapi = bp->bnapi[i]; 8333 netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64); 8334 } 8335 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 8336 bnapi = bp->bnapi[cp_nr_rings]; 8337 netif_napi_add(bp->dev, &bnapi->napi, 8338 bnxt_poll_nitroa0, 64); 8339 } 8340 } else { 8341 bnapi = bp->bnapi[0]; 8342 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64); 8343 } 8344 } 8345 8346 static void bnxt_disable_napi(struct bnxt *bp) 8347 { 8348 int i; 8349 8350 if (!bp->bnapi) 8351 return; 8352 8353 for (i = 0; i < bp->cp_nr_rings; i++) { 8354 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; 8355 8356 if (bp->bnapi[i]->rx_ring) 8357 cancel_work_sync(&cpr->dim.work); 8358 8359 napi_disable(&bp->bnapi[i]->napi); 8360 } 8361 } 8362 8363 static void bnxt_enable_napi(struct bnxt *bp) 8364 { 8365 int i; 8366 8367 for (i = 0; i < bp->cp_nr_rings; i++) { 8368 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; 8369 bp->bnapi[i]->in_reset = false; 8370 8371 if (bp->bnapi[i]->rx_ring) { 8372 INIT_WORK(&cpr->dim.work, bnxt_dim_work); 8373 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 8374 } 8375 napi_enable(&bp->bnapi[i]->napi); 8376 } 8377 } 8378 8379 void bnxt_tx_disable(struct bnxt *bp) 8380 { 8381 int i; 8382 struct bnxt_tx_ring_info *txr; 8383 8384 if (bp->tx_ring) { 8385 for (i = 0; i < bp->tx_nr_rings; i++) { 8386 txr = &bp->tx_ring[i]; 8387 txr->dev_state = BNXT_DEV_STATE_CLOSING; 8388 } 8389 } 8390 /* Stop all TX queues */ 8391 netif_tx_disable(bp->dev); 8392 netif_carrier_off(bp->dev); 8393 } 8394 8395 void bnxt_tx_enable(struct bnxt *bp) 8396 { 8397 int i; 8398 struct bnxt_tx_ring_info *txr; 8399 8400 for (i = 0; i < bp->tx_nr_rings; i++) { 8401 txr = &bp->tx_ring[i]; 8402 txr->dev_state = 0; 8403 } 8404 netif_tx_wake_all_queues(bp->dev); 8405 if (bp->link_info.link_up) 8406 netif_carrier_on(bp->dev); 8407 } 8408 8409 static void bnxt_report_link(struct bnxt *bp) 8410 { 8411 if (bp->link_info.link_up) { 8412 const char *duplex; 8413 const char *flow_ctrl; 8414 u32 speed; 8415 u16 fec; 8416 8417 netif_carrier_on(bp->dev); 8418 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL) 8419 duplex = "full"; 8420 else 8421 duplex = "half"; 8422 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH) 8423 flow_ctrl = "ON - receive & transmit"; 8424 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX) 8425 flow_ctrl = "ON - transmit"; 8426 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX) 8427 flow_ctrl = "ON - receive"; 8428 else 8429 flow_ctrl = "none"; 8430 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); 8431 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n", 8432 speed, duplex, flow_ctrl); 8433 if (bp->flags & BNXT_FLAG_EEE_CAP) 8434 netdev_info(bp->dev, "EEE is %s\n", 8435 bp->eee.eee_active ? "active" : 8436 "not active"); 8437 fec = bp->link_info.fec_cfg; 8438 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED)) 8439 netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n", 8440 (fec & BNXT_FEC_AUTONEG) ? "on" : "off", 8441 (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" : 8442 (fec & BNXT_FEC_ENC_RS) ? "RS" : "None"); 8443 } else { 8444 netif_carrier_off(bp->dev); 8445 netdev_err(bp->dev, "NIC Link is Down\n"); 8446 } 8447 } 8448 8449 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) 8450 { 8451 int rc = 0; 8452 struct hwrm_port_phy_qcaps_input req = {0}; 8453 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 8454 struct bnxt_link_info *link_info = &bp->link_info; 8455 8456 bp->flags &= ~BNXT_FLAG_EEE_CAP; 8457 if (bp->test_info) 8458 bp->test_info->flags &= ~(BNXT_TEST_FL_EXT_LPBK | 8459 BNXT_TEST_FL_AN_PHY_LPBK); 8460 if (bp->hwrm_spec_code < 0x10201) 8461 return 0; 8462 8463 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1); 8464 8465 mutex_lock(&bp->hwrm_cmd_lock); 8466 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8467 if (rc) 8468 goto hwrm_phy_qcaps_exit; 8469 8470 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) { 8471 struct ethtool_eee *eee = &bp->eee; 8472 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode); 8473 8474 bp->flags |= BNXT_FLAG_EEE_CAP; 8475 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 8476 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) & 8477 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK; 8478 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) & 8479 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK; 8480 } 8481 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED) { 8482 if (bp->test_info) 8483 bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK; 8484 } 8485 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED) { 8486 if (bp->test_info) 8487 bp->test_info->flags |= BNXT_TEST_FL_AN_PHY_LPBK; 8488 } 8489 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED) { 8490 if (BNXT_PF(bp)) 8491 bp->fw_cap |= BNXT_FW_CAP_SHARED_PORT_CFG; 8492 } 8493 if (resp->supported_speeds_auto_mode) 8494 link_info->support_auto_speeds = 8495 le16_to_cpu(resp->supported_speeds_auto_mode); 8496 8497 bp->port_count = resp->port_cnt; 8498 8499 hwrm_phy_qcaps_exit: 8500 mutex_unlock(&bp->hwrm_cmd_lock); 8501 return rc; 8502 } 8503 8504 static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) 8505 { 8506 int rc = 0; 8507 struct bnxt_link_info *link_info = &bp->link_info; 8508 struct hwrm_port_phy_qcfg_input req = {0}; 8509 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 8510 u8 link_up = link_info->link_up; 8511 u16 diff; 8512 8513 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1); 8514 8515 mutex_lock(&bp->hwrm_cmd_lock); 8516 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8517 if (rc) { 8518 mutex_unlock(&bp->hwrm_cmd_lock); 8519 return rc; 8520 } 8521 8522 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp)); 8523 link_info->phy_link_status = resp->link; 8524 link_info->duplex = resp->duplex_cfg; 8525 if (bp->hwrm_spec_code >= 0x10800) 8526 link_info->duplex = resp->duplex_state; 8527 link_info->pause = resp->pause; 8528 link_info->auto_mode = resp->auto_mode; 8529 link_info->auto_pause_setting = resp->auto_pause; 8530 link_info->lp_pause = resp->link_partner_adv_pause; 8531 link_info->force_pause_setting = resp->force_pause; 8532 link_info->duplex_setting = resp->duplex_cfg; 8533 if (link_info->phy_link_status == BNXT_LINK_LINK) 8534 link_info->link_speed = le16_to_cpu(resp->link_speed); 8535 else 8536 link_info->link_speed = 0; 8537 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed); 8538 link_info->support_speeds = le16_to_cpu(resp->support_speeds); 8539 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask); 8540 link_info->lp_auto_link_speeds = 8541 le16_to_cpu(resp->link_partner_adv_speeds); 8542 link_info->preemphasis = le32_to_cpu(resp->preemphasis); 8543 link_info->phy_ver[0] = resp->phy_maj; 8544 link_info->phy_ver[1] = resp->phy_min; 8545 link_info->phy_ver[2] = resp->phy_bld; 8546 link_info->media_type = resp->media_type; 8547 link_info->phy_type = resp->phy_type; 8548 link_info->transceiver = resp->xcvr_pkg_type; 8549 link_info->phy_addr = resp->eee_config_phy_addr & 8550 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK; 8551 link_info->module_status = resp->module_status; 8552 8553 if (bp->flags & BNXT_FLAG_EEE_CAP) { 8554 struct ethtool_eee *eee = &bp->eee; 8555 u16 fw_speeds; 8556 8557 eee->eee_active = 0; 8558 if (resp->eee_config_phy_addr & 8559 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) { 8560 eee->eee_active = 1; 8561 fw_speeds = le16_to_cpu( 8562 resp->link_partner_adv_eee_link_speed_mask); 8563 eee->lp_advertised = 8564 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 8565 } 8566 8567 /* Pull initial EEE config */ 8568 if (!chng_link_state) { 8569 if (resp->eee_config_phy_addr & 8570 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED) 8571 eee->eee_enabled = 1; 8572 8573 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask); 8574 eee->advertised = 8575 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 8576 8577 if (resp->eee_config_phy_addr & 8578 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) { 8579 __le32 tmr; 8580 8581 eee->tx_lpi_enabled = 1; 8582 tmr = resp->xcvr_identifier_type_tx_lpi_timer; 8583 eee->tx_lpi_timer = le32_to_cpu(tmr) & 8584 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK; 8585 } 8586 } 8587 } 8588 8589 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED; 8590 if (bp->hwrm_spec_code >= 0x10504) 8591 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg); 8592 8593 /* TODO: need to add more logic to report VF link */ 8594 if (chng_link_state) { 8595 if (link_info->phy_link_status == BNXT_LINK_LINK) 8596 link_info->link_up = 1; 8597 else 8598 link_info->link_up = 0; 8599 if (link_up != link_info->link_up) 8600 bnxt_report_link(bp); 8601 } else { 8602 /* alwasy link down if not require to update link state */ 8603 link_info->link_up = 0; 8604 } 8605 mutex_unlock(&bp->hwrm_cmd_lock); 8606 8607 if (!BNXT_PHY_CFG_ABLE(bp)) 8608 return 0; 8609 8610 diff = link_info->support_auto_speeds ^ link_info->advertising; 8611 if ((link_info->support_auto_speeds | diff) != 8612 link_info->support_auto_speeds) { 8613 /* An advertised speed is no longer supported, so we need to 8614 * update the advertisement settings. Caller holds RTNL 8615 * so we can modify link settings. 8616 */ 8617 link_info->advertising = link_info->support_auto_speeds; 8618 if (link_info->autoneg & BNXT_AUTONEG_SPEED) 8619 bnxt_hwrm_set_link_setting(bp, true, false); 8620 } 8621 return 0; 8622 } 8623 8624 static void bnxt_get_port_module_status(struct bnxt *bp) 8625 { 8626 struct bnxt_link_info *link_info = &bp->link_info; 8627 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp; 8628 u8 module_status; 8629 8630 if (bnxt_update_link(bp, true)) 8631 return; 8632 8633 module_status = link_info->module_status; 8634 switch (module_status) { 8635 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX: 8636 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN: 8637 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG: 8638 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n", 8639 bp->pf.port_id); 8640 if (bp->hwrm_spec_code >= 0x10201) { 8641 netdev_warn(bp->dev, "Module part number %s\n", 8642 resp->phy_vendor_partnumber); 8643 } 8644 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX) 8645 netdev_warn(bp->dev, "TX is disabled\n"); 8646 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN) 8647 netdev_warn(bp->dev, "SFP+ module is shutdown\n"); 8648 } 8649 } 8650 8651 static void 8652 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) 8653 { 8654 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) { 8655 if (bp->hwrm_spec_code >= 0x10201) 8656 req->auto_pause = 8657 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE; 8658 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 8659 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; 8660 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 8661 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX; 8662 req->enables |= 8663 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 8664 } else { 8665 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 8666 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX; 8667 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 8668 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX; 8669 req->enables |= 8670 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE); 8671 if (bp->hwrm_spec_code >= 0x10201) { 8672 req->auto_pause = req->force_pause; 8673 req->enables |= cpu_to_le32( 8674 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 8675 } 8676 } 8677 } 8678 8679 static void bnxt_hwrm_set_link_common(struct bnxt *bp, 8680 struct hwrm_port_phy_cfg_input *req) 8681 { 8682 u8 autoneg = bp->link_info.autoneg; 8683 u16 fw_link_speed = bp->link_info.req_link_speed; 8684 u16 advertising = bp->link_info.advertising; 8685 8686 if (autoneg & BNXT_AUTONEG_SPEED) { 8687 req->auto_mode |= 8688 PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK; 8689 8690 req->enables |= cpu_to_le32( 8691 PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK); 8692 req->auto_link_speed_mask = cpu_to_le16(advertising); 8693 8694 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE); 8695 req->flags |= 8696 cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG); 8697 } else { 8698 req->force_link_speed = cpu_to_le16(fw_link_speed); 8699 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE); 8700 } 8701 8702 /* tell chimp that the setting takes effect immediately */ 8703 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); 8704 } 8705 8706 int bnxt_hwrm_set_pause(struct bnxt *bp) 8707 { 8708 struct hwrm_port_phy_cfg_input req = {0}; 8709 int rc; 8710 8711 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 8712 bnxt_hwrm_set_pause_common(bp, &req); 8713 8714 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) || 8715 bp->link_info.force_link_chng) 8716 bnxt_hwrm_set_link_common(bp, &req); 8717 8718 mutex_lock(&bp->hwrm_cmd_lock); 8719 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8720 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) { 8721 /* since changing of pause setting doesn't trigger any link 8722 * change event, the driver needs to update the current pause 8723 * result upon successfully return of the phy_cfg command 8724 */ 8725 bp->link_info.pause = 8726 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl; 8727 bp->link_info.auto_pause_setting = 0; 8728 if (!bp->link_info.force_link_chng) 8729 bnxt_report_link(bp); 8730 } 8731 bp->link_info.force_link_chng = false; 8732 mutex_unlock(&bp->hwrm_cmd_lock); 8733 return rc; 8734 } 8735 8736 static void bnxt_hwrm_set_eee(struct bnxt *bp, 8737 struct hwrm_port_phy_cfg_input *req) 8738 { 8739 struct ethtool_eee *eee = &bp->eee; 8740 8741 if (eee->eee_enabled) { 8742 u16 eee_speeds; 8743 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE; 8744 8745 if (eee->tx_lpi_enabled) 8746 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE; 8747 else 8748 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE; 8749 8750 req->flags |= cpu_to_le32(flags); 8751 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised); 8752 req->eee_link_speed_mask = cpu_to_le16(eee_speeds); 8753 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer); 8754 } else { 8755 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE); 8756 } 8757 } 8758 8759 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee) 8760 { 8761 struct hwrm_port_phy_cfg_input req = {0}; 8762 8763 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 8764 if (set_pause) 8765 bnxt_hwrm_set_pause_common(bp, &req); 8766 8767 bnxt_hwrm_set_link_common(bp, &req); 8768 8769 if (set_eee) 8770 bnxt_hwrm_set_eee(bp, &req); 8771 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8772 } 8773 8774 static int bnxt_hwrm_shutdown_link(struct bnxt *bp) 8775 { 8776 struct hwrm_port_phy_cfg_input req = {0}; 8777 8778 if (!BNXT_SINGLE_PF(bp)) 8779 return 0; 8780 8781 if (pci_num_vf(bp->pdev)) 8782 return 0; 8783 8784 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 8785 req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN); 8786 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8787 } 8788 8789 static int bnxt_fw_init_one(struct bnxt *bp); 8790 8791 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) 8792 { 8793 struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr; 8794 struct hwrm_func_drv_if_change_input req = {0}; 8795 bool resc_reinit = false, fw_reset = false; 8796 u32 flags = 0; 8797 int rc; 8798 8799 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE)) 8800 return 0; 8801 8802 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1); 8803 if (up) 8804 req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP); 8805 mutex_lock(&bp->hwrm_cmd_lock); 8806 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8807 if (!rc) 8808 flags = le32_to_cpu(resp->flags); 8809 mutex_unlock(&bp->hwrm_cmd_lock); 8810 if (rc) 8811 return rc; 8812 8813 if (!up) 8814 return 0; 8815 8816 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE) 8817 resc_reinit = true; 8818 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE) 8819 fw_reset = true; 8820 8821 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) { 8822 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n"); 8823 return -ENODEV; 8824 } 8825 if (resc_reinit || fw_reset) { 8826 if (fw_reset) { 8827 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 8828 bnxt_ulp_stop(bp); 8829 bnxt_free_ctx_mem(bp); 8830 kfree(bp->ctx); 8831 bp->ctx = NULL; 8832 bnxt_dcb_free(bp); 8833 rc = bnxt_fw_init_one(bp); 8834 if (rc) { 8835 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); 8836 return rc; 8837 } 8838 bnxt_clear_int_mode(bp); 8839 rc = bnxt_init_int_mode(bp); 8840 if (rc) { 8841 netdev_err(bp->dev, "init int mode failed\n"); 8842 return rc; 8843 } 8844 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state); 8845 } 8846 if (BNXT_NEW_RM(bp)) { 8847 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 8848 8849 rc = bnxt_hwrm_func_resc_qcaps(bp, true); 8850 hw_resc->resv_cp_rings = 0; 8851 hw_resc->resv_stat_ctxs = 0; 8852 hw_resc->resv_irqs = 0; 8853 hw_resc->resv_tx_rings = 0; 8854 hw_resc->resv_rx_rings = 0; 8855 hw_resc->resv_hw_ring_grps = 0; 8856 hw_resc->resv_vnics = 0; 8857 if (!fw_reset) { 8858 bp->tx_nr_rings = 0; 8859 bp->rx_nr_rings = 0; 8860 } 8861 } 8862 } 8863 return 0; 8864 } 8865 8866 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) 8867 { 8868 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 8869 struct hwrm_port_led_qcaps_input req = {0}; 8870 struct bnxt_pf_info *pf = &bp->pf; 8871 int rc; 8872 8873 bp->num_leds = 0; 8874 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601) 8875 return 0; 8876 8877 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1); 8878 req.port_id = cpu_to_le16(pf->port_id); 8879 mutex_lock(&bp->hwrm_cmd_lock); 8880 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8881 if (rc) { 8882 mutex_unlock(&bp->hwrm_cmd_lock); 8883 return rc; 8884 } 8885 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) { 8886 int i; 8887 8888 bp->num_leds = resp->num_leds; 8889 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) * 8890 bp->num_leds); 8891 for (i = 0; i < bp->num_leds; i++) { 8892 struct bnxt_led_info *led = &bp->leds[i]; 8893 __le16 caps = led->led_state_caps; 8894 8895 if (!led->led_group_id || 8896 !BNXT_LED_ALT_BLINK_CAP(caps)) { 8897 bp->num_leds = 0; 8898 break; 8899 } 8900 } 8901 } 8902 mutex_unlock(&bp->hwrm_cmd_lock); 8903 return 0; 8904 } 8905 8906 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp) 8907 { 8908 struct hwrm_wol_filter_alloc_input req = {0}; 8909 struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; 8910 int rc; 8911 8912 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1); 8913 req.port_id = cpu_to_le16(bp->pf.port_id); 8914 req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT; 8915 req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS); 8916 memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN); 8917 mutex_lock(&bp->hwrm_cmd_lock); 8918 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8919 if (!rc) 8920 bp->wol_filter_id = resp->wol_filter_id; 8921 mutex_unlock(&bp->hwrm_cmd_lock); 8922 return rc; 8923 } 8924 8925 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp) 8926 { 8927 struct hwrm_wol_filter_free_input req = {0}; 8928 8929 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1); 8930 req.port_id = cpu_to_le16(bp->pf.port_id); 8931 req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID); 8932 req.wol_filter_id = bp->wol_filter_id; 8933 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8934 } 8935 8936 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle) 8937 { 8938 struct hwrm_wol_filter_qcfg_input req = {0}; 8939 struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 8940 u16 next_handle = 0; 8941 int rc; 8942 8943 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1); 8944 req.port_id = cpu_to_le16(bp->pf.port_id); 8945 req.handle = cpu_to_le16(handle); 8946 mutex_lock(&bp->hwrm_cmd_lock); 8947 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8948 if (!rc) { 8949 next_handle = le16_to_cpu(resp->next_handle); 8950 if (next_handle != 0) { 8951 if (resp->wol_type == 8952 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) { 8953 bp->wol = 1; 8954 bp->wol_filter_id = resp->wol_filter_id; 8955 } 8956 } 8957 } 8958 mutex_unlock(&bp->hwrm_cmd_lock); 8959 return next_handle; 8960 } 8961 8962 static void bnxt_get_wol_settings(struct bnxt *bp) 8963 { 8964 u16 handle = 0; 8965 8966 bp->wol = 0; 8967 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP)) 8968 return; 8969 8970 do { 8971 handle = bnxt_hwrm_get_wol_fltrs(bp, handle); 8972 } while (handle && handle != 0xffff); 8973 } 8974 8975 #ifdef CONFIG_BNXT_HWMON 8976 static ssize_t bnxt_show_temp(struct device *dev, 8977 struct device_attribute *devattr, char *buf) 8978 { 8979 struct hwrm_temp_monitor_query_input req = {0}; 8980 struct hwrm_temp_monitor_query_output *resp; 8981 struct bnxt *bp = dev_get_drvdata(dev); 8982 u32 temp = 0; 8983 8984 resp = bp->hwrm_cmd_resp_addr; 8985 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1); 8986 mutex_lock(&bp->hwrm_cmd_lock); 8987 if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT)) 8988 temp = resp->temp * 1000; /* display millidegree */ 8989 mutex_unlock(&bp->hwrm_cmd_lock); 8990 8991 return sprintf(buf, "%u\n", temp); 8992 } 8993 static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0); 8994 8995 static struct attribute *bnxt_attrs[] = { 8996 &sensor_dev_attr_temp1_input.dev_attr.attr, 8997 NULL 8998 }; 8999 ATTRIBUTE_GROUPS(bnxt); 9000 9001 static void bnxt_hwmon_close(struct bnxt *bp) 9002 { 9003 if (bp->hwmon_dev) { 9004 hwmon_device_unregister(bp->hwmon_dev); 9005 bp->hwmon_dev = NULL; 9006 } 9007 } 9008 9009 static void bnxt_hwmon_open(struct bnxt *bp) 9010 { 9011 struct pci_dev *pdev = bp->pdev; 9012 9013 if (bp->hwmon_dev) 9014 return; 9015 9016 bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, 9017 DRV_MODULE_NAME, bp, 9018 bnxt_groups); 9019 if (IS_ERR(bp->hwmon_dev)) { 9020 bp->hwmon_dev = NULL; 9021 dev_warn(&pdev->dev, "Cannot register hwmon device\n"); 9022 } 9023 } 9024 #else 9025 static void bnxt_hwmon_close(struct bnxt *bp) 9026 { 9027 } 9028 9029 static void bnxt_hwmon_open(struct bnxt *bp) 9030 { 9031 } 9032 #endif 9033 9034 static bool bnxt_eee_config_ok(struct bnxt *bp) 9035 { 9036 struct ethtool_eee *eee = &bp->eee; 9037 struct bnxt_link_info *link_info = &bp->link_info; 9038 9039 if (!(bp->flags & BNXT_FLAG_EEE_CAP)) 9040 return true; 9041 9042 if (eee->eee_enabled) { 9043 u32 advertising = 9044 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); 9045 9046 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 9047 eee->eee_enabled = 0; 9048 return false; 9049 } 9050 if (eee->advertised & ~advertising) { 9051 eee->advertised = advertising & eee->supported; 9052 return false; 9053 } 9054 } 9055 return true; 9056 } 9057 9058 static int bnxt_update_phy_setting(struct bnxt *bp) 9059 { 9060 int rc; 9061 bool update_link = false; 9062 bool update_pause = false; 9063 bool update_eee = false; 9064 struct bnxt_link_info *link_info = &bp->link_info; 9065 9066 rc = bnxt_update_link(bp, true); 9067 if (rc) { 9068 netdev_err(bp->dev, "failed to update link (rc: %x)\n", 9069 rc); 9070 return rc; 9071 } 9072 if (!BNXT_SINGLE_PF(bp)) 9073 return 0; 9074 9075 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 9076 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) != 9077 link_info->req_flow_ctrl) 9078 update_pause = true; 9079 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 9080 link_info->force_pause_setting != link_info->req_flow_ctrl) 9081 update_pause = true; 9082 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 9083 if (BNXT_AUTO_MODE(link_info->auto_mode)) 9084 update_link = true; 9085 if (link_info->req_link_speed != link_info->force_link_speed) 9086 update_link = true; 9087 if (link_info->req_duplex != link_info->duplex_setting) 9088 update_link = true; 9089 } else { 9090 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE) 9091 update_link = true; 9092 if (link_info->advertising != link_info->auto_link_speeds) 9093 update_link = true; 9094 } 9095 9096 /* The last close may have shutdown the link, so need to call 9097 * PHY_CFG to bring it back up. 9098 */ 9099 if (!bp->link_info.link_up) 9100 update_link = true; 9101 9102 if (!bnxt_eee_config_ok(bp)) 9103 update_eee = true; 9104 9105 if (update_link) 9106 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee); 9107 else if (update_pause) 9108 rc = bnxt_hwrm_set_pause(bp); 9109 if (rc) { 9110 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n", 9111 rc); 9112 return rc; 9113 } 9114 9115 return rc; 9116 } 9117 9118 /* Common routine to pre-map certain register block to different GRC window. 9119 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows 9120 * in PF and 3 windows in VF that can be customized to map in different 9121 * register blocks. 9122 */ 9123 static void bnxt_preset_reg_win(struct bnxt *bp) 9124 { 9125 if (BNXT_PF(bp)) { 9126 /* CAG registers map to GRC window #4 */ 9127 writel(BNXT_CAG_REG_BASE, 9128 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12); 9129 } 9130 } 9131 9132 static int bnxt_init_dflt_ring_mode(struct bnxt *bp); 9133 9134 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 9135 { 9136 int rc = 0; 9137 9138 bnxt_preset_reg_win(bp); 9139 netif_carrier_off(bp->dev); 9140 if (irq_re_init) { 9141 /* Reserve rings now if none were reserved at driver probe. */ 9142 rc = bnxt_init_dflt_ring_mode(bp); 9143 if (rc) { 9144 netdev_err(bp->dev, "Failed to reserve default rings at open\n"); 9145 return rc; 9146 } 9147 } 9148 rc = bnxt_reserve_rings(bp, irq_re_init); 9149 if (rc) 9150 return rc; 9151 if ((bp->flags & BNXT_FLAG_RFS) && 9152 !(bp->flags & BNXT_FLAG_USING_MSIX)) { 9153 /* disable RFS if falling back to INTA */ 9154 bp->dev->hw_features &= ~NETIF_F_NTUPLE; 9155 bp->flags &= ~BNXT_FLAG_RFS; 9156 } 9157 9158 rc = bnxt_alloc_mem(bp, irq_re_init); 9159 if (rc) { 9160 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); 9161 goto open_err_free_mem; 9162 } 9163 9164 if (irq_re_init) { 9165 bnxt_init_napi(bp); 9166 rc = bnxt_request_irq(bp); 9167 if (rc) { 9168 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc); 9169 goto open_err_irq; 9170 } 9171 } 9172 9173 bnxt_enable_napi(bp); 9174 bnxt_debug_dev_init(bp); 9175 9176 rc = bnxt_init_nic(bp, irq_re_init); 9177 if (rc) { 9178 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); 9179 goto open_err; 9180 } 9181 9182 if (link_re_init) { 9183 mutex_lock(&bp->link_lock); 9184 rc = bnxt_update_phy_setting(bp); 9185 mutex_unlock(&bp->link_lock); 9186 if (rc) { 9187 netdev_warn(bp->dev, "failed to update phy settings\n"); 9188 if (BNXT_SINGLE_PF(bp)) { 9189 bp->link_info.phy_retry = true; 9190 bp->link_info.phy_retry_expires = 9191 jiffies + 5 * HZ; 9192 } 9193 } 9194 } 9195 9196 if (irq_re_init) 9197 udp_tunnel_get_rx_info(bp->dev); 9198 9199 set_bit(BNXT_STATE_OPEN, &bp->state); 9200 bnxt_enable_int(bp); 9201 /* Enable TX queues */ 9202 bnxt_tx_enable(bp); 9203 mod_timer(&bp->timer, jiffies + bp->current_interval); 9204 /* Poll link status and check for SFP+ module status */ 9205 bnxt_get_port_module_status(bp); 9206 9207 /* VF-reps may need to be re-opened after the PF is re-opened */ 9208 if (BNXT_PF(bp)) 9209 bnxt_vf_reps_open(bp); 9210 return 0; 9211 9212 open_err: 9213 bnxt_debug_dev_exit(bp); 9214 bnxt_disable_napi(bp); 9215 9216 open_err_irq: 9217 bnxt_del_napi(bp); 9218 9219 open_err_free_mem: 9220 bnxt_free_skbs(bp); 9221 bnxt_free_irq(bp); 9222 bnxt_free_mem(bp, true); 9223 return rc; 9224 } 9225 9226 /* rtnl_lock held */ 9227 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 9228 { 9229 int rc = 0; 9230 9231 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init); 9232 if (rc) { 9233 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc); 9234 dev_close(bp->dev); 9235 } 9236 return rc; 9237 } 9238 9239 /* rtnl_lock held, open the NIC half way by allocating all resources, but 9240 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline 9241 * self tests. 9242 */ 9243 int bnxt_half_open_nic(struct bnxt *bp) 9244 { 9245 int rc = 0; 9246 9247 rc = bnxt_alloc_mem(bp, false); 9248 if (rc) { 9249 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); 9250 goto half_open_err; 9251 } 9252 rc = bnxt_init_nic(bp, false); 9253 if (rc) { 9254 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); 9255 goto half_open_err; 9256 } 9257 return 0; 9258 9259 half_open_err: 9260 bnxt_free_skbs(bp); 9261 bnxt_free_mem(bp, false); 9262 dev_close(bp->dev); 9263 return rc; 9264 } 9265 9266 /* rtnl_lock held, this call can only be made after a previous successful 9267 * call to bnxt_half_open_nic(). 9268 */ 9269 void bnxt_half_close_nic(struct bnxt *bp) 9270 { 9271 bnxt_hwrm_resource_free(bp, false, false); 9272 bnxt_free_skbs(bp); 9273 bnxt_free_mem(bp, false); 9274 } 9275 9276 static void bnxt_reenable_sriov(struct bnxt *bp) 9277 { 9278 if (BNXT_PF(bp)) { 9279 struct bnxt_pf_info *pf = &bp->pf; 9280 int n = pf->active_vfs; 9281 9282 if (n) 9283 bnxt_cfg_hw_sriov(bp, &n, true); 9284 } 9285 } 9286 9287 static int bnxt_open(struct net_device *dev) 9288 { 9289 struct bnxt *bp = netdev_priv(dev); 9290 int rc; 9291 9292 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { 9293 netdev_err(bp->dev, "A previous firmware reset did not complete, aborting\n"); 9294 return -ENODEV; 9295 } 9296 9297 rc = bnxt_hwrm_if_change(bp, true); 9298 if (rc) 9299 return rc; 9300 rc = __bnxt_open_nic(bp, true, true); 9301 if (rc) { 9302 bnxt_hwrm_if_change(bp, false); 9303 } else { 9304 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) { 9305 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 9306 bnxt_ulp_start(bp, 0); 9307 bnxt_reenable_sriov(bp); 9308 } 9309 } 9310 bnxt_hwmon_open(bp); 9311 } 9312 9313 return rc; 9314 } 9315 9316 static bool bnxt_drv_busy(struct bnxt *bp) 9317 { 9318 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) || 9319 test_bit(BNXT_STATE_READ_STATS, &bp->state)); 9320 } 9321 9322 static void bnxt_get_ring_stats(struct bnxt *bp, 9323 struct rtnl_link_stats64 *stats); 9324 9325 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, 9326 bool link_re_init) 9327 { 9328 /* Close the VF-reps before closing PF */ 9329 if (BNXT_PF(bp)) 9330 bnxt_vf_reps_close(bp); 9331 9332 /* Change device state to avoid TX queue wake up's */ 9333 bnxt_tx_disable(bp); 9334 9335 clear_bit(BNXT_STATE_OPEN, &bp->state); 9336 smp_mb__after_atomic(); 9337 while (bnxt_drv_busy(bp)) 9338 msleep(20); 9339 9340 /* Flush rings and and disable interrupts */ 9341 bnxt_shutdown_nic(bp, irq_re_init); 9342 9343 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */ 9344 9345 bnxt_debug_dev_exit(bp); 9346 bnxt_disable_napi(bp); 9347 del_timer_sync(&bp->timer); 9348 bnxt_free_skbs(bp); 9349 9350 /* Save ring stats before shutdown */ 9351 if (bp->bnapi && irq_re_init) 9352 bnxt_get_ring_stats(bp, &bp->net_stats_prev); 9353 if (irq_re_init) { 9354 bnxt_free_irq(bp); 9355 bnxt_del_napi(bp); 9356 } 9357 bnxt_free_mem(bp, irq_re_init); 9358 } 9359 9360 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 9361 { 9362 int rc = 0; 9363 9364 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 9365 /* If we get here, it means firmware reset is in progress 9366 * while we are trying to close. We can safely proceed with 9367 * the close because we are holding rtnl_lock(). Some firmware 9368 * messages may fail as we proceed to close. We set the 9369 * ABORT_ERR flag here so that the FW reset thread will later 9370 * abort when it gets the rtnl_lock() and sees the flag. 9371 */ 9372 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n"); 9373 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); 9374 } 9375 9376 #ifdef CONFIG_BNXT_SRIOV 9377 if (bp->sriov_cfg) { 9378 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait, 9379 !bp->sriov_cfg, 9380 BNXT_SRIOV_CFG_WAIT_TMO); 9381 if (rc) 9382 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n"); 9383 } 9384 #endif 9385 __bnxt_close_nic(bp, irq_re_init, link_re_init); 9386 return rc; 9387 } 9388 9389 static int bnxt_close(struct net_device *dev) 9390 { 9391 struct bnxt *bp = netdev_priv(dev); 9392 9393 bnxt_hwmon_close(bp); 9394 bnxt_close_nic(bp, true, true); 9395 bnxt_hwrm_shutdown_link(bp); 9396 bnxt_hwrm_if_change(bp, false); 9397 return 0; 9398 } 9399 9400 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg, 9401 u16 *val) 9402 { 9403 struct hwrm_port_phy_mdio_read_output *resp = bp->hwrm_cmd_resp_addr; 9404 struct hwrm_port_phy_mdio_read_input req = {0}; 9405 int rc; 9406 9407 if (bp->hwrm_spec_code < 0x10a00) 9408 return -EOPNOTSUPP; 9409 9410 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_READ, -1, -1); 9411 req.port_id = cpu_to_le16(bp->pf.port_id); 9412 req.phy_addr = phy_addr; 9413 req.reg_addr = cpu_to_le16(reg & 0x1f); 9414 if (mdio_phy_id_is_c45(phy_addr)) { 9415 req.cl45_mdio = 1; 9416 req.phy_addr = mdio_phy_id_prtad(phy_addr); 9417 req.dev_addr = mdio_phy_id_devad(phy_addr); 9418 req.reg_addr = cpu_to_le16(reg); 9419 } 9420 9421 mutex_lock(&bp->hwrm_cmd_lock); 9422 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 9423 if (!rc) 9424 *val = le16_to_cpu(resp->reg_data); 9425 mutex_unlock(&bp->hwrm_cmd_lock); 9426 return rc; 9427 } 9428 9429 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg, 9430 u16 val) 9431 { 9432 struct hwrm_port_phy_mdio_write_input req = {0}; 9433 9434 if (bp->hwrm_spec_code < 0x10a00) 9435 return -EOPNOTSUPP; 9436 9437 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_WRITE, -1, -1); 9438 req.port_id = cpu_to_le16(bp->pf.port_id); 9439 req.phy_addr = phy_addr; 9440 req.reg_addr = cpu_to_le16(reg & 0x1f); 9441 if (mdio_phy_id_is_c45(phy_addr)) { 9442 req.cl45_mdio = 1; 9443 req.phy_addr = mdio_phy_id_prtad(phy_addr); 9444 req.dev_addr = mdio_phy_id_devad(phy_addr); 9445 req.reg_addr = cpu_to_le16(reg); 9446 } 9447 req.reg_data = cpu_to_le16(val); 9448 9449 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 9450 } 9451 9452 /* rtnl_lock held */ 9453 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 9454 { 9455 struct mii_ioctl_data *mdio = if_mii(ifr); 9456 struct bnxt *bp = netdev_priv(dev); 9457 int rc; 9458 9459 switch (cmd) { 9460 case SIOCGMIIPHY: 9461 mdio->phy_id = bp->link_info.phy_addr; 9462 9463 /* fallthru */ 9464 case SIOCGMIIREG: { 9465 u16 mii_regval = 0; 9466 9467 if (!netif_running(dev)) 9468 return -EAGAIN; 9469 9470 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num, 9471 &mii_regval); 9472 mdio->val_out = mii_regval; 9473 return rc; 9474 } 9475 9476 case SIOCSMIIREG: 9477 if (!netif_running(dev)) 9478 return -EAGAIN; 9479 9480 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num, 9481 mdio->val_in); 9482 9483 default: 9484 /* do nothing */ 9485 break; 9486 } 9487 return -EOPNOTSUPP; 9488 } 9489 9490 static void bnxt_get_ring_stats(struct bnxt *bp, 9491 struct rtnl_link_stats64 *stats) 9492 { 9493 int i; 9494 9495 9496 for (i = 0; i < bp->cp_nr_rings; i++) { 9497 struct bnxt_napi *bnapi = bp->bnapi[i]; 9498 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 9499 struct ctx_hw_stats *hw_stats = cpr->hw_stats; 9500 9501 stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts); 9502 stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts); 9503 stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts); 9504 9505 stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts); 9506 stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts); 9507 stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts); 9508 9509 stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes); 9510 stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes); 9511 stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes); 9512 9513 stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes); 9514 stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes); 9515 stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes); 9516 9517 stats->rx_missed_errors += 9518 le64_to_cpu(hw_stats->rx_discard_pkts); 9519 9520 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts); 9521 9522 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts); 9523 } 9524 } 9525 9526 static void bnxt_add_prev_stats(struct bnxt *bp, 9527 struct rtnl_link_stats64 *stats) 9528 { 9529 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev; 9530 9531 stats->rx_packets += prev_stats->rx_packets; 9532 stats->tx_packets += prev_stats->tx_packets; 9533 stats->rx_bytes += prev_stats->rx_bytes; 9534 stats->tx_bytes += prev_stats->tx_bytes; 9535 stats->rx_missed_errors += prev_stats->rx_missed_errors; 9536 stats->multicast += prev_stats->multicast; 9537 stats->tx_dropped += prev_stats->tx_dropped; 9538 } 9539 9540 static void 9541 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 9542 { 9543 struct bnxt *bp = netdev_priv(dev); 9544 9545 set_bit(BNXT_STATE_READ_STATS, &bp->state); 9546 /* Make sure bnxt_close_nic() sees that we are reading stats before 9547 * we check the BNXT_STATE_OPEN flag. 9548 */ 9549 smp_mb__after_atomic(); 9550 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 9551 clear_bit(BNXT_STATE_READ_STATS, &bp->state); 9552 *stats = bp->net_stats_prev; 9553 return; 9554 } 9555 9556 bnxt_get_ring_stats(bp, stats); 9557 bnxt_add_prev_stats(bp, stats); 9558 9559 if (bp->flags & BNXT_FLAG_PORT_STATS) { 9560 struct rx_port_stats *rx = bp->hw_rx_port_stats; 9561 struct tx_port_stats *tx = bp->hw_tx_port_stats; 9562 9563 stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames); 9564 stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames); 9565 stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) + 9566 le64_to_cpu(rx->rx_ovrsz_frames) + 9567 le64_to_cpu(rx->rx_runt_frames); 9568 stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) + 9569 le64_to_cpu(rx->rx_jbr_frames); 9570 stats->collisions = le64_to_cpu(tx->tx_total_collisions); 9571 stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns); 9572 stats->tx_errors = le64_to_cpu(tx->tx_err); 9573 } 9574 clear_bit(BNXT_STATE_READ_STATS, &bp->state); 9575 } 9576 9577 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) 9578 { 9579 struct net_device *dev = bp->dev; 9580 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 9581 struct netdev_hw_addr *ha; 9582 u8 *haddr; 9583 int mc_count = 0; 9584 bool update = false; 9585 int off = 0; 9586 9587 netdev_for_each_mc_addr(ha, dev) { 9588 if (mc_count >= BNXT_MAX_MC_ADDRS) { 9589 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 9590 vnic->mc_list_count = 0; 9591 return false; 9592 } 9593 haddr = ha->addr; 9594 if (!ether_addr_equal(haddr, vnic->mc_list + off)) { 9595 memcpy(vnic->mc_list + off, haddr, ETH_ALEN); 9596 update = true; 9597 } 9598 off += ETH_ALEN; 9599 mc_count++; 9600 } 9601 if (mc_count) 9602 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; 9603 9604 if (mc_count != vnic->mc_list_count) { 9605 vnic->mc_list_count = mc_count; 9606 update = true; 9607 } 9608 return update; 9609 } 9610 9611 static bool bnxt_uc_list_updated(struct bnxt *bp) 9612 { 9613 struct net_device *dev = bp->dev; 9614 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 9615 struct netdev_hw_addr *ha; 9616 int off = 0; 9617 9618 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1)) 9619 return true; 9620 9621 netdev_for_each_uc_addr(ha, dev) { 9622 if (!ether_addr_equal(ha->addr, vnic->uc_list + off)) 9623 return true; 9624 9625 off += ETH_ALEN; 9626 } 9627 return false; 9628 } 9629 9630 static void bnxt_set_rx_mode(struct net_device *dev) 9631 { 9632 struct bnxt *bp = netdev_priv(dev); 9633 struct bnxt_vnic_info *vnic; 9634 bool mc_update = false; 9635 bool uc_update; 9636 u32 mask; 9637 9638 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) 9639 return; 9640 9641 vnic = &bp->vnic_info[0]; 9642 mask = vnic->rx_mask; 9643 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS | 9644 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST | 9645 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST | 9646 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST); 9647 9648 if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp)) 9649 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 9650 9651 uc_update = bnxt_uc_list_updated(bp); 9652 9653 if (dev->flags & IFF_BROADCAST) 9654 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 9655 if (dev->flags & IFF_ALLMULTI) { 9656 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 9657 vnic->mc_list_count = 0; 9658 } else { 9659 mc_update = bnxt_mc_list_updated(bp, &mask); 9660 } 9661 9662 if (mask != vnic->rx_mask || uc_update || mc_update) { 9663 vnic->rx_mask = mask; 9664 9665 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event); 9666 bnxt_queue_sp_work(bp); 9667 } 9668 } 9669 9670 static int bnxt_cfg_rx_mode(struct bnxt *bp) 9671 { 9672 struct net_device *dev = bp->dev; 9673 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 9674 struct netdev_hw_addr *ha; 9675 int i, off = 0, rc; 9676 bool uc_update; 9677 9678 netif_addr_lock_bh(dev); 9679 uc_update = bnxt_uc_list_updated(bp); 9680 netif_addr_unlock_bh(dev); 9681 9682 if (!uc_update) 9683 goto skip_uc; 9684 9685 mutex_lock(&bp->hwrm_cmd_lock); 9686 for (i = 1; i < vnic->uc_filter_count; i++) { 9687 struct hwrm_cfa_l2_filter_free_input req = {0}; 9688 9689 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1, 9690 -1); 9691 9692 req.l2_filter_id = vnic->fw_l2_filter_id[i]; 9693 9694 rc = _hwrm_send_message(bp, &req, sizeof(req), 9695 HWRM_CMD_TIMEOUT); 9696 } 9697 mutex_unlock(&bp->hwrm_cmd_lock); 9698 9699 vnic->uc_filter_count = 1; 9700 9701 netif_addr_lock_bh(dev); 9702 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) { 9703 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 9704 } else { 9705 netdev_for_each_uc_addr(ha, dev) { 9706 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN); 9707 off += ETH_ALEN; 9708 vnic->uc_filter_count++; 9709 } 9710 } 9711 netif_addr_unlock_bh(dev); 9712 9713 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) { 9714 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off); 9715 if (rc) { 9716 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", 9717 rc); 9718 vnic->uc_filter_count = i; 9719 return rc; 9720 } 9721 } 9722 9723 skip_uc: 9724 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); 9725 if (rc && vnic->mc_list_count) { 9726 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n", 9727 rc); 9728 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 9729 vnic->mc_list_count = 0; 9730 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); 9731 } 9732 if (rc) 9733 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n", 9734 rc); 9735 9736 return rc; 9737 } 9738 9739 static bool bnxt_can_reserve_rings(struct bnxt *bp) 9740 { 9741 #ifdef CONFIG_BNXT_SRIOV 9742 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) { 9743 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 9744 9745 /* No minimum rings were provisioned by the PF. Don't 9746 * reserve rings by default when device is down. 9747 */ 9748 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings) 9749 return true; 9750 9751 if (!netif_running(bp->dev)) 9752 return false; 9753 } 9754 #endif 9755 return true; 9756 } 9757 9758 /* If the chip and firmware supports RFS */ 9759 static bool bnxt_rfs_supported(struct bnxt *bp) 9760 { 9761 if (bp->flags & BNXT_FLAG_CHIP_P5) { 9762 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) 9763 return true; 9764 return false; 9765 } 9766 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) 9767 return true; 9768 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) 9769 return true; 9770 return false; 9771 } 9772 9773 /* If runtime conditions support RFS */ 9774 static bool bnxt_rfs_capable(struct bnxt *bp) 9775 { 9776 #ifdef CONFIG_RFS_ACCEL 9777 int vnics, max_vnics, max_rss_ctxs; 9778 9779 if (bp->flags & BNXT_FLAG_CHIP_P5) 9780 return bnxt_rfs_supported(bp); 9781 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp)) 9782 return false; 9783 9784 vnics = 1 + bp->rx_nr_rings; 9785 max_vnics = bnxt_get_max_func_vnics(bp); 9786 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp); 9787 9788 /* RSS contexts not a limiting factor */ 9789 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) 9790 max_rss_ctxs = max_vnics; 9791 if (vnics > max_vnics || vnics > max_rss_ctxs) { 9792 if (bp->rx_nr_rings > 1) 9793 netdev_warn(bp->dev, 9794 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n", 9795 min(max_rss_ctxs - 1, max_vnics - 1)); 9796 return false; 9797 } 9798 9799 if (!BNXT_NEW_RM(bp)) 9800 return true; 9801 9802 if (vnics == bp->hw_resc.resv_vnics) 9803 return true; 9804 9805 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics); 9806 if (vnics <= bp->hw_resc.resv_vnics) 9807 return true; 9808 9809 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n"); 9810 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1); 9811 return false; 9812 #else 9813 return false; 9814 #endif 9815 } 9816 9817 static netdev_features_t bnxt_fix_features(struct net_device *dev, 9818 netdev_features_t features) 9819 { 9820 struct bnxt *bp = netdev_priv(dev); 9821 netdev_features_t vlan_features; 9822 9823 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp)) 9824 features &= ~NETIF_F_NTUPLE; 9825 9826 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 9827 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 9828 9829 if (!(features & NETIF_F_GRO)) 9830 features &= ~NETIF_F_GRO_HW; 9831 9832 if (features & NETIF_F_GRO_HW) 9833 features &= ~NETIF_F_LRO; 9834 9835 /* Both CTAG and STAG VLAN accelaration on the RX side have to be 9836 * turned on or off together. 9837 */ 9838 vlan_features = features & (NETIF_F_HW_VLAN_CTAG_RX | 9839 NETIF_F_HW_VLAN_STAG_RX); 9840 if (vlan_features != (NETIF_F_HW_VLAN_CTAG_RX | 9841 NETIF_F_HW_VLAN_STAG_RX)) { 9842 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) 9843 features &= ~(NETIF_F_HW_VLAN_CTAG_RX | 9844 NETIF_F_HW_VLAN_STAG_RX); 9845 else if (vlan_features) 9846 features |= NETIF_F_HW_VLAN_CTAG_RX | 9847 NETIF_F_HW_VLAN_STAG_RX; 9848 } 9849 #ifdef CONFIG_BNXT_SRIOV 9850 if (BNXT_VF(bp)) { 9851 if (bp->vf.vlan) { 9852 features &= ~(NETIF_F_HW_VLAN_CTAG_RX | 9853 NETIF_F_HW_VLAN_STAG_RX); 9854 } 9855 } 9856 #endif 9857 return features; 9858 } 9859 9860 static int bnxt_set_features(struct net_device *dev, netdev_features_t features) 9861 { 9862 struct bnxt *bp = netdev_priv(dev); 9863 u32 flags = bp->flags; 9864 u32 changes; 9865 int rc = 0; 9866 bool re_init = false; 9867 bool update_tpa = false; 9868 9869 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS; 9870 if (features & NETIF_F_GRO_HW) 9871 flags |= BNXT_FLAG_GRO; 9872 else if (features & NETIF_F_LRO) 9873 flags |= BNXT_FLAG_LRO; 9874 9875 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 9876 flags &= ~BNXT_FLAG_TPA; 9877 9878 if (features & NETIF_F_HW_VLAN_CTAG_RX) 9879 flags |= BNXT_FLAG_STRIP_VLAN; 9880 9881 if (features & NETIF_F_NTUPLE) 9882 flags |= BNXT_FLAG_RFS; 9883 9884 changes = flags ^ bp->flags; 9885 if (changes & BNXT_FLAG_TPA) { 9886 update_tpa = true; 9887 if ((bp->flags & BNXT_FLAG_TPA) == 0 || 9888 (flags & BNXT_FLAG_TPA) == 0 || 9889 (bp->flags & BNXT_FLAG_CHIP_P5)) 9890 re_init = true; 9891 } 9892 9893 if (changes & ~BNXT_FLAG_TPA) 9894 re_init = true; 9895 9896 if (flags != bp->flags) { 9897 u32 old_flags = bp->flags; 9898 9899 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 9900 bp->flags = flags; 9901 if (update_tpa) 9902 bnxt_set_ring_params(bp); 9903 return rc; 9904 } 9905 9906 if (re_init) { 9907 bnxt_close_nic(bp, false, false); 9908 bp->flags = flags; 9909 if (update_tpa) 9910 bnxt_set_ring_params(bp); 9911 9912 return bnxt_open_nic(bp, false, false); 9913 } 9914 if (update_tpa) { 9915 bp->flags = flags; 9916 rc = bnxt_set_tpa(bp, 9917 (flags & BNXT_FLAG_TPA) ? 9918 true : false); 9919 if (rc) 9920 bp->flags = old_flags; 9921 } 9922 } 9923 return rc; 9924 } 9925 9926 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type, 9927 u32 ring_id, u32 *prod, u32 *cons) 9928 { 9929 struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr; 9930 struct hwrm_dbg_ring_info_get_input req = {0}; 9931 int rc; 9932 9933 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1); 9934 req.ring_type = ring_type; 9935 req.fw_ring_id = cpu_to_le32(ring_id); 9936 mutex_lock(&bp->hwrm_cmd_lock); 9937 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 9938 if (!rc) { 9939 *prod = le32_to_cpu(resp->producer_index); 9940 *cons = le32_to_cpu(resp->consumer_index); 9941 } 9942 mutex_unlock(&bp->hwrm_cmd_lock); 9943 return rc; 9944 } 9945 9946 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi) 9947 { 9948 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; 9949 int i = bnapi->index; 9950 9951 if (!txr) 9952 return; 9953 9954 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n", 9955 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod, 9956 txr->tx_cons); 9957 } 9958 9959 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi) 9960 { 9961 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 9962 int i = bnapi->index; 9963 9964 if (!rxr) 9965 return; 9966 9967 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n", 9968 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod, 9969 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod, 9970 rxr->rx_sw_agg_prod); 9971 } 9972 9973 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi) 9974 { 9975 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 9976 int i = bnapi->index; 9977 9978 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n", 9979 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons); 9980 } 9981 9982 static void bnxt_dbg_dump_states(struct bnxt *bp) 9983 { 9984 int i; 9985 struct bnxt_napi *bnapi; 9986 9987 for (i = 0; i < bp->cp_nr_rings; i++) { 9988 bnapi = bp->bnapi[i]; 9989 if (netif_msg_drv(bp)) { 9990 bnxt_dump_tx_sw_state(bnapi); 9991 bnxt_dump_rx_sw_state(bnapi); 9992 bnxt_dump_cp_sw_state(bnapi); 9993 } 9994 } 9995 } 9996 9997 static void bnxt_reset_task(struct bnxt *bp, bool silent) 9998 { 9999 if (!silent) 10000 bnxt_dbg_dump_states(bp); 10001 if (netif_running(bp->dev)) { 10002 int rc; 10003 10004 if (silent) { 10005 bnxt_close_nic(bp, false, false); 10006 bnxt_open_nic(bp, false, false); 10007 } else { 10008 bnxt_ulp_stop(bp); 10009 bnxt_close_nic(bp, true, false); 10010 rc = bnxt_open_nic(bp, true, false); 10011 bnxt_ulp_start(bp, rc); 10012 } 10013 } 10014 } 10015 10016 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue) 10017 { 10018 struct bnxt *bp = netdev_priv(dev); 10019 10020 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n"); 10021 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 10022 bnxt_queue_sp_work(bp); 10023 } 10024 10025 static void bnxt_fw_health_check(struct bnxt *bp) 10026 { 10027 struct bnxt_fw_health *fw_health = bp->fw_health; 10028 u32 val; 10029 10030 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 10031 return; 10032 10033 if (fw_health->tmr_counter) { 10034 fw_health->tmr_counter--; 10035 return; 10036 } 10037 10038 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 10039 if (val == fw_health->last_fw_heartbeat) 10040 goto fw_reset; 10041 10042 fw_health->last_fw_heartbeat = val; 10043 10044 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 10045 if (val != fw_health->last_fw_reset_cnt) 10046 goto fw_reset; 10047 10048 fw_health->tmr_counter = fw_health->tmr_multiplier; 10049 return; 10050 10051 fw_reset: 10052 set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event); 10053 bnxt_queue_sp_work(bp); 10054 } 10055 10056 static void bnxt_timer(struct timer_list *t) 10057 { 10058 struct bnxt *bp = from_timer(bp, t, timer); 10059 struct net_device *dev = bp->dev; 10060 10061 if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state)) 10062 return; 10063 10064 if (atomic_read(&bp->intr_sem) != 0) 10065 goto bnxt_restart_timer; 10066 10067 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) 10068 bnxt_fw_health_check(bp); 10069 10070 if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) && 10071 bp->stats_coal_ticks) { 10072 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event); 10073 bnxt_queue_sp_work(bp); 10074 } 10075 10076 if (bnxt_tc_flower_enabled(bp)) { 10077 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event); 10078 bnxt_queue_sp_work(bp); 10079 } 10080 10081 #ifdef CONFIG_RFS_ACCEL 10082 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) { 10083 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event); 10084 bnxt_queue_sp_work(bp); 10085 } 10086 #endif /*CONFIG_RFS_ACCEL*/ 10087 10088 if (bp->link_info.phy_retry) { 10089 if (time_after(jiffies, bp->link_info.phy_retry_expires)) { 10090 bp->link_info.phy_retry = false; 10091 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n"); 10092 } else { 10093 set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event); 10094 bnxt_queue_sp_work(bp); 10095 } 10096 } 10097 10098 if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev && 10099 netif_carrier_ok(dev)) { 10100 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event); 10101 bnxt_queue_sp_work(bp); 10102 } 10103 bnxt_restart_timer: 10104 mod_timer(&bp->timer, jiffies + bp->current_interval); 10105 } 10106 10107 static void bnxt_rtnl_lock_sp(struct bnxt *bp) 10108 { 10109 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK 10110 * set. If the device is being closed, bnxt_close() may be holding 10111 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we 10112 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl(). 10113 */ 10114 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 10115 rtnl_lock(); 10116 } 10117 10118 static void bnxt_rtnl_unlock_sp(struct bnxt *bp) 10119 { 10120 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 10121 rtnl_unlock(); 10122 } 10123 10124 /* Only called from bnxt_sp_task() */ 10125 static void bnxt_reset(struct bnxt *bp, bool silent) 10126 { 10127 bnxt_rtnl_lock_sp(bp); 10128 if (test_bit(BNXT_STATE_OPEN, &bp->state)) 10129 bnxt_reset_task(bp, silent); 10130 bnxt_rtnl_unlock_sp(bp); 10131 } 10132 10133 static void bnxt_fw_reset_close(struct bnxt *bp) 10134 { 10135 bnxt_ulp_stop(bp); 10136 /* When firmware is fatal state, disable PCI device to prevent 10137 * any potential bad DMAs before freeing kernel memory. 10138 */ 10139 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 10140 pci_disable_device(bp->pdev); 10141 __bnxt_close_nic(bp, true, false); 10142 bnxt_clear_int_mode(bp); 10143 bnxt_hwrm_func_drv_unrgtr(bp); 10144 if (pci_is_enabled(bp->pdev)) 10145 pci_disable_device(bp->pdev); 10146 bnxt_free_ctx_mem(bp); 10147 kfree(bp->ctx); 10148 bp->ctx = NULL; 10149 } 10150 10151 static bool is_bnxt_fw_ok(struct bnxt *bp) 10152 { 10153 struct bnxt_fw_health *fw_health = bp->fw_health; 10154 bool no_heartbeat = false, has_reset = false; 10155 u32 val; 10156 10157 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 10158 if (val == fw_health->last_fw_heartbeat) 10159 no_heartbeat = true; 10160 10161 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 10162 if (val != fw_health->last_fw_reset_cnt) 10163 has_reset = true; 10164 10165 if (!no_heartbeat && has_reset) 10166 return true; 10167 10168 return false; 10169 } 10170 10171 /* rtnl_lock is acquired before calling this function */ 10172 static void bnxt_force_fw_reset(struct bnxt *bp) 10173 { 10174 struct bnxt_fw_health *fw_health = bp->fw_health; 10175 u32 wait_dsecs; 10176 10177 if (!test_bit(BNXT_STATE_OPEN, &bp->state) || 10178 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 10179 return; 10180 10181 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 10182 bnxt_fw_reset_close(bp); 10183 wait_dsecs = fw_health->master_func_wait_dsecs; 10184 if (fw_health->master) { 10185 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) 10186 wait_dsecs = 0; 10187 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; 10188 } else { 10189 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10; 10190 wait_dsecs = fw_health->normal_func_wait_dsecs; 10191 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 10192 } 10193 10194 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs; 10195 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs; 10196 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10); 10197 } 10198 10199 void bnxt_fw_exception(struct bnxt *bp) 10200 { 10201 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n"); 10202 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); 10203 bnxt_rtnl_lock_sp(bp); 10204 bnxt_force_fw_reset(bp); 10205 bnxt_rtnl_unlock_sp(bp); 10206 } 10207 10208 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or 10209 * < 0 on error. 10210 */ 10211 static int bnxt_get_registered_vfs(struct bnxt *bp) 10212 { 10213 #ifdef CONFIG_BNXT_SRIOV 10214 int rc; 10215 10216 if (!BNXT_PF(bp)) 10217 return 0; 10218 10219 rc = bnxt_hwrm_func_qcfg(bp); 10220 if (rc) { 10221 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc); 10222 return rc; 10223 } 10224 if (bp->pf.registered_vfs) 10225 return bp->pf.registered_vfs; 10226 if (bp->sriov_cfg) 10227 return 1; 10228 #endif 10229 return 0; 10230 } 10231 10232 void bnxt_fw_reset(struct bnxt *bp) 10233 { 10234 bnxt_rtnl_lock_sp(bp); 10235 if (test_bit(BNXT_STATE_OPEN, &bp->state) && 10236 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 10237 int n = 0, tmo; 10238 10239 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 10240 if (bp->pf.active_vfs && 10241 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 10242 n = bnxt_get_registered_vfs(bp); 10243 if (n < 0) { 10244 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n", 10245 n); 10246 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 10247 dev_close(bp->dev); 10248 goto fw_reset_exit; 10249 } else if (n > 0) { 10250 u16 vf_tmo_dsecs = n * 10; 10251 10252 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs) 10253 bp->fw_reset_max_dsecs = vf_tmo_dsecs; 10254 bp->fw_reset_state = 10255 BNXT_FW_RESET_STATE_POLL_VF; 10256 bnxt_queue_fw_reset_work(bp, HZ / 10); 10257 goto fw_reset_exit; 10258 } 10259 bnxt_fw_reset_close(bp); 10260 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { 10261 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; 10262 tmo = HZ / 10; 10263 } else { 10264 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 10265 tmo = bp->fw_reset_min_dsecs * HZ / 10; 10266 } 10267 bnxt_queue_fw_reset_work(bp, tmo); 10268 } 10269 fw_reset_exit: 10270 bnxt_rtnl_unlock_sp(bp); 10271 } 10272 10273 static void bnxt_chk_missed_irq(struct bnxt *bp) 10274 { 10275 int i; 10276 10277 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 10278 return; 10279 10280 for (i = 0; i < bp->cp_nr_rings; i++) { 10281 struct bnxt_napi *bnapi = bp->bnapi[i]; 10282 struct bnxt_cp_ring_info *cpr; 10283 u32 fw_ring_id; 10284 int j; 10285 10286 if (!bnapi) 10287 continue; 10288 10289 cpr = &bnapi->cp_ring; 10290 for (j = 0; j < 2; j++) { 10291 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; 10292 u32 val[2]; 10293 10294 if (!cpr2 || cpr2->has_more_work || 10295 !bnxt_has_work(bp, cpr2)) 10296 continue; 10297 10298 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) { 10299 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons; 10300 continue; 10301 } 10302 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id; 10303 bnxt_dbg_hwrm_ring_info_get(bp, 10304 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL, 10305 fw_ring_id, &val[0], &val[1]); 10306 cpr->sw_stats.cmn.missed_irqs++; 10307 } 10308 } 10309 } 10310 10311 static void bnxt_cfg_ntp_filters(struct bnxt *); 10312 10313 static void bnxt_init_ethtool_link_settings(struct bnxt *bp) 10314 { 10315 struct bnxt_link_info *link_info = &bp->link_info; 10316 10317 if (BNXT_AUTO_MODE(link_info->auto_mode)) { 10318 link_info->autoneg = BNXT_AUTONEG_SPEED; 10319 if (bp->hwrm_spec_code >= 0x10201) { 10320 if (link_info->auto_pause_setting & 10321 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE) 10322 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 10323 } else { 10324 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 10325 } 10326 link_info->advertising = link_info->auto_link_speeds; 10327 } else { 10328 link_info->req_link_speed = link_info->force_link_speed; 10329 link_info->req_duplex = link_info->duplex_setting; 10330 } 10331 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 10332 link_info->req_flow_ctrl = 10333 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH; 10334 else 10335 link_info->req_flow_ctrl = link_info->force_pause_setting; 10336 } 10337 10338 static void bnxt_sp_task(struct work_struct *work) 10339 { 10340 struct bnxt *bp = container_of(work, struct bnxt, sp_task); 10341 10342 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 10343 smp_mb__after_atomic(); 10344 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 10345 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 10346 return; 10347 } 10348 10349 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) 10350 bnxt_cfg_rx_mode(bp); 10351 10352 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event)) 10353 bnxt_cfg_ntp_filters(bp); 10354 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) 10355 bnxt_hwrm_exec_fwd_req(bp); 10356 if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) { 10357 bnxt_hwrm_tunnel_dst_port_alloc( 10358 bp, bp->vxlan_port, 10359 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 10360 } 10361 if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) { 10362 bnxt_hwrm_tunnel_dst_port_free( 10363 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 10364 } 10365 if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) { 10366 bnxt_hwrm_tunnel_dst_port_alloc( 10367 bp, bp->nge_port, 10368 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 10369 } 10370 if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) { 10371 bnxt_hwrm_tunnel_dst_port_free( 10372 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 10373 } 10374 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) { 10375 bnxt_hwrm_port_qstats(bp); 10376 bnxt_hwrm_port_qstats_ext(bp); 10377 bnxt_hwrm_pcie_qstats(bp); 10378 } 10379 10380 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { 10381 int rc; 10382 10383 mutex_lock(&bp->link_lock); 10384 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, 10385 &bp->sp_event)) 10386 bnxt_hwrm_phy_qcaps(bp); 10387 10388 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, 10389 &bp->sp_event)) 10390 bnxt_init_ethtool_link_settings(bp); 10391 10392 rc = bnxt_update_link(bp, true); 10393 mutex_unlock(&bp->link_lock); 10394 if (rc) 10395 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", 10396 rc); 10397 } 10398 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) { 10399 int rc; 10400 10401 mutex_lock(&bp->link_lock); 10402 rc = bnxt_update_phy_setting(bp); 10403 mutex_unlock(&bp->link_lock); 10404 if (rc) { 10405 netdev_warn(bp->dev, "update phy settings retry failed\n"); 10406 } else { 10407 bp->link_info.phy_retry = false; 10408 netdev_info(bp->dev, "update phy settings retry succeeded\n"); 10409 } 10410 } 10411 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) { 10412 mutex_lock(&bp->link_lock); 10413 bnxt_get_port_module_status(bp); 10414 mutex_unlock(&bp->link_lock); 10415 } 10416 10417 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event)) 10418 bnxt_tc_flow_stats_work(bp); 10419 10420 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event)) 10421 bnxt_chk_missed_irq(bp); 10422 10423 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They 10424 * must be the last functions to be called before exiting. 10425 */ 10426 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) 10427 bnxt_reset(bp, false); 10428 10429 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event)) 10430 bnxt_reset(bp, true); 10431 10432 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) 10433 bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT); 10434 10435 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) { 10436 if (!is_bnxt_fw_ok(bp)) 10437 bnxt_devlink_health_report(bp, 10438 BNXT_FW_EXCEPTION_SP_EVENT); 10439 } 10440 10441 smp_mb__before_atomic(); 10442 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 10443 } 10444 10445 /* Under rtnl_lock */ 10446 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, 10447 int tx_xdp) 10448 { 10449 int max_rx, max_tx, tx_sets = 1; 10450 int tx_rings_needed, stats; 10451 int rx_rings = rx; 10452 int cp, vnics, rc; 10453 10454 if (tcs) 10455 tx_sets = tcs; 10456 10457 rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh); 10458 if (rc) 10459 return rc; 10460 10461 if (max_rx < rx) 10462 return -ENOMEM; 10463 10464 tx_rings_needed = tx * tx_sets + tx_xdp; 10465 if (max_tx < tx_rings_needed) 10466 return -ENOMEM; 10467 10468 vnics = 1; 10469 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS) 10470 vnics += rx_rings; 10471 10472 if (bp->flags & BNXT_FLAG_AGG_RINGS) 10473 rx_rings <<= 1; 10474 cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx; 10475 stats = cp; 10476 if (BNXT_NEW_RM(bp)) { 10477 cp += bnxt_get_ulp_msix_num(bp); 10478 stats += bnxt_get_ulp_stat_ctxs(bp); 10479 } 10480 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp, 10481 stats, vnics); 10482 } 10483 10484 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) 10485 { 10486 if (bp->bar2) { 10487 pci_iounmap(pdev, bp->bar2); 10488 bp->bar2 = NULL; 10489 } 10490 10491 if (bp->bar1) { 10492 pci_iounmap(pdev, bp->bar1); 10493 bp->bar1 = NULL; 10494 } 10495 10496 if (bp->bar0) { 10497 pci_iounmap(pdev, bp->bar0); 10498 bp->bar0 = NULL; 10499 } 10500 } 10501 10502 static void bnxt_cleanup_pci(struct bnxt *bp) 10503 { 10504 bnxt_unmap_bars(bp, bp->pdev); 10505 pci_release_regions(bp->pdev); 10506 if (pci_is_enabled(bp->pdev)) 10507 pci_disable_device(bp->pdev); 10508 } 10509 10510 static void bnxt_init_dflt_coal(struct bnxt *bp) 10511 { 10512 struct bnxt_coal *coal; 10513 10514 /* Tick values in micro seconds. 10515 * 1 coal_buf x bufs_per_record = 1 completion record. 10516 */ 10517 coal = &bp->rx_coal; 10518 coal->coal_ticks = 10; 10519 coal->coal_bufs = 30; 10520 coal->coal_ticks_irq = 1; 10521 coal->coal_bufs_irq = 2; 10522 coal->idle_thresh = 50; 10523 coal->bufs_per_record = 2; 10524 coal->budget = 64; /* NAPI budget */ 10525 10526 coal = &bp->tx_coal; 10527 coal->coal_ticks = 28; 10528 coal->coal_bufs = 30; 10529 coal->coal_ticks_irq = 2; 10530 coal->coal_bufs_irq = 2; 10531 coal->bufs_per_record = 1; 10532 10533 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; 10534 } 10535 10536 static void bnxt_alloc_fw_health(struct bnxt *bp) 10537 { 10538 if (bp->fw_health) 10539 return; 10540 10541 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) && 10542 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 10543 return; 10544 10545 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL); 10546 if (!bp->fw_health) { 10547 netdev_warn(bp->dev, "Failed to allocate fw_health\n"); 10548 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET; 10549 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 10550 } 10551 } 10552 10553 static int bnxt_fw_init_one_p1(struct bnxt *bp) 10554 { 10555 int rc; 10556 10557 bp->fw_cap = 0; 10558 rc = bnxt_hwrm_ver_get(bp); 10559 if (rc) 10560 return rc; 10561 10562 if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) { 10563 rc = bnxt_alloc_kong_hwrm_resources(bp); 10564 if (rc) 10565 bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL; 10566 } 10567 10568 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || 10569 bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) { 10570 rc = bnxt_alloc_hwrm_short_cmd_req(bp); 10571 if (rc) 10572 return rc; 10573 } 10574 rc = bnxt_hwrm_func_reset(bp); 10575 if (rc) 10576 return -ENODEV; 10577 10578 bnxt_hwrm_fw_set_time(bp); 10579 return 0; 10580 } 10581 10582 static int bnxt_fw_init_one_p2(struct bnxt *bp) 10583 { 10584 int rc; 10585 10586 /* Get the MAX capabilities for this function */ 10587 rc = bnxt_hwrm_func_qcaps(bp); 10588 if (rc) { 10589 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n", 10590 rc); 10591 return -ENODEV; 10592 } 10593 10594 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp); 10595 if (rc) 10596 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n", 10597 rc); 10598 10599 bnxt_alloc_fw_health(bp); 10600 rc = bnxt_hwrm_error_recovery_qcfg(bp); 10601 if (rc) 10602 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n", 10603 rc); 10604 10605 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false); 10606 if (rc) 10607 return -ENODEV; 10608 10609 bnxt_hwrm_func_qcfg(bp); 10610 bnxt_hwrm_vnic_qcaps(bp); 10611 bnxt_hwrm_port_led_qcaps(bp); 10612 bnxt_ethtool_init(bp); 10613 bnxt_dcb_init(bp); 10614 return 0; 10615 } 10616 10617 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp) 10618 { 10619 bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP; 10620 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 | 10621 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 | 10622 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | 10623 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; 10624 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) { 10625 bp->flags |= BNXT_FLAG_UDP_RSS_CAP; 10626 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 | 10627 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 10628 } 10629 } 10630 10631 static void bnxt_set_dflt_rfs(struct bnxt *bp) 10632 { 10633 struct net_device *dev = bp->dev; 10634 10635 dev->hw_features &= ~NETIF_F_NTUPLE; 10636 dev->features &= ~NETIF_F_NTUPLE; 10637 bp->flags &= ~BNXT_FLAG_RFS; 10638 if (bnxt_rfs_supported(bp)) { 10639 dev->hw_features |= NETIF_F_NTUPLE; 10640 if (bnxt_rfs_capable(bp)) { 10641 bp->flags |= BNXT_FLAG_RFS; 10642 dev->features |= NETIF_F_NTUPLE; 10643 } 10644 } 10645 } 10646 10647 static void bnxt_fw_init_one_p3(struct bnxt *bp) 10648 { 10649 struct pci_dev *pdev = bp->pdev; 10650 10651 bnxt_set_dflt_rss_hash_type(bp); 10652 bnxt_set_dflt_rfs(bp); 10653 10654 bnxt_get_wol_settings(bp); 10655 if (bp->flags & BNXT_FLAG_WOL_CAP) 10656 device_set_wakeup_enable(&pdev->dev, bp->wol); 10657 else 10658 device_set_wakeup_capable(&pdev->dev, false); 10659 10660 bnxt_hwrm_set_cache_line_size(bp, cache_line_size()); 10661 bnxt_hwrm_coal_params_qcaps(bp); 10662 } 10663 10664 static int bnxt_fw_init_one(struct bnxt *bp) 10665 { 10666 int rc; 10667 10668 rc = bnxt_fw_init_one_p1(bp); 10669 if (rc) { 10670 netdev_err(bp->dev, "Firmware init phase 1 failed\n"); 10671 return rc; 10672 } 10673 rc = bnxt_fw_init_one_p2(bp); 10674 if (rc) { 10675 netdev_err(bp->dev, "Firmware init phase 2 failed\n"); 10676 return rc; 10677 } 10678 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false); 10679 if (rc) 10680 return rc; 10681 10682 /* In case fw capabilities have changed, destroy the unneeded 10683 * reporters and create newly capable ones. 10684 */ 10685 bnxt_dl_fw_reporters_destroy(bp, false); 10686 bnxt_dl_fw_reporters_create(bp); 10687 bnxt_fw_init_one_p3(bp); 10688 return 0; 10689 } 10690 10691 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx) 10692 { 10693 struct bnxt_fw_health *fw_health = bp->fw_health; 10694 u32 reg = fw_health->fw_reset_seq_regs[reg_idx]; 10695 u32 val = fw_health->fw_reset_seq_vals[reg_idx]; 10696 u32 reg_type, reg_off, delay_msecs; 10697 10698 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx]; 10699 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg); 10700 reg_off = BNXT_FW_HEALTH_REG_OFF(reg); 10701 switch (reg_type) { 10702 case BNXT_FW_HEALTH_REG_TYPE_CFG: 10703 pci_write_config_dword(bp->pdev, reg_off, val); 10704 break; 10705 case BNXT_FW_HEALTH_REG_TYPE_GRC: 10706 writel(reg_off & BNXT_GRC_BASE_MASK, 10707 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 10708 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000; 10709 /* fall through */ 10710 case BNXT_FW_HEALTH_REG_TYPE_BAR0: 10711 writel(val, bp->bar0 + reg_off); 10712 break; 10713 case BNXT_FW_HEALTH_REG_TYPE_BAR1: 10714 writel(val, bp->bar1 + reg_off); 10715 break; 10716 } 10717 if (delay_msecs) { 10718 pci_read_config_dword(bp->pdev, 0, &val); 10719 msleep(delay_msecs); 10720 } 10721 } 10722 10723 static void bnxt_reset_all(struct bnxt *bp) 10724 { 10725 struct bnxt_fw_health *fw_health = bp->fw_health; 10726 int i, rc; 10727 10728 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { 10729 #ifdef CONFIG_TEE_BNXT_FW 10730 rc = tee_bnxt_fw_load(); 10731 if (rc) 10732 netdev_err(bp->dev, "Unable to reset FW rc=%d\n", rc); 10733 bp->fw_reset_timestamp = jiffies; 10734 #endif 10735 return; 10736 } 10737 10738 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) { 10739 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) 10740 bnxt_fw_reset_writel(bp, i); 10741 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) { 10742 struct hwrm_fw_reset_input req = {0}; 10743 10744 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1); 10745 req.resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr); 10746 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP; 10747 req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP; 10748 req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; 10749 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 10750 if (rc) 10751 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc); 10752 } 10753 bp->fw_reset_timestamp = jiffies; 10754 } 10755 10756 static void bnxt_fw_reset_task(struct work_struct *work) 10757 { 10758 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work); 10759 int rc; 10760 10761 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 10762 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n"); 10763 return; 10764 } 10765 10766 switch (bp->fw_reset_state) { 10767 case BNXT_FW_RESET_STATE_POLL_VF: { 10768 int n = bnxt_get_registered_vfs(bp); 10769 int tmo; 10770 10771 if (n < 0) { 10772 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n", 10773 n, jiffies_to_msecs(jiffies - 10774 bp->fw_reset_timestamp)); 10775 goto fw_reset_abort; 10776 } else if (n > 0) { 10777 if (time_after(jiffies, bp->fw_reset_timestamp + 10778 (bp->fw_reset_max_dsecs * HZ / 10))) { 10779 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 10780 bp->fw_reset_state = 0; 10781 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n", 10782 n); 10783 return; 10784 } 10785 bnxt_queue_fw_reset_work(bp, HZ / 10); 10786 return; 10787 } 10788 bp->fw_reset_timestamp = jiffies; 10789 rtnl_lock(); 10790 bnxt_fw_reset_close(bp); 10791 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { 10792 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; 10793 tmo = HZ / 10; 10794 } else { 10795 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 10796 tmo = bp->fw_reset_min_dsecs * HZ / 10; 10797 } 10798 rtnl_unlock(); 10799 bnxt_queue_fw_reset_work(bp, tmo); 10800 return; 10801 } 10802 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: { 10803 u32 val; 10804 10805 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 10806 if (!(val & BNXT_FW_STATUS_SHUTDOWN) && 10807 !time_after(jiffies, bp->fw_reset_timestamp + 10808 (bp->fw_reset_max_dsecs * HZ / 10))) { 10809 bnxt_queue_fw_reset_work(bp, HZ / 5); 10810 return; 10811 } 10812 10813 if (!bp->fw_health->master) { 10814 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs; 10815 10816 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 10817 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10); 10818 return; 10819 } 10820 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; 10821 } 10822 /* fall through */ 10823 case BNXT_FW_RESET_STATE_RESET_FW: 10824 bnxt_reset_all(bp); 10825 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 10826 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10); 10827 return; 10828 case BNXT_FW_RESET_STATE_ENABLE_DEV: 10829 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) { 10830 u32 val; 10831 10832 val = bnxt_fw_health_readl(bp, 10833 BNXT_FW_RESET_INPROG_REG); 10834 if (val) 10835 netdev_warn(bp->dev, "FW reset inprog %x after min wait time.\n", 10836 val); 10837 } 10838 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); 10839 if (pci_enable_device(bp->pdev)) { 10840 netdev_err(bp->dev, "Cannot re-enable PCI device\n"); 10841 goto fw_reset_abort; 10842 } 10843 pci_set_master(bp->pdev); 10844 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW; 10845 /* fall through */ 10846 case BNXT_FW_RESET_STATE_POLL_FW: 10847 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT; 10848 rc = __bnxt_hwrm_ver_get(bp, true); 10849 if (rc) { 10850 if (time_after(jiffies, bp->fw_reset_timestamp + 10851 (bp->fw_reset_max_dsecs * HZ / 10))) { 10852 netdev_err(bp->dev, "Firmware reset aborted\n"); 10853 goto fw_reset_abort; 10854 } 10855 bnxt_queue_fw_reset_work(bp, HZ / 5); 10856 return; 10857 } 10858 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; 10859 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING; 10860 /* fall through */ 10861 case BNXT_FW_RESET_STATE_OPENING: 10862 while (!rtnl_trylock()) { 10863 bnxt_queue_fw_reset_work(bp, HZ / 10); 10864 return; 10865 } 10866 rc = bnxt_open(bp->dev); 10867 if (rc) { 10868 netdev_err(bp->dev, "bnxt_open_nic() failed\n"); 10869 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 10870 dev_close(bp->dev); 10871 } 10872 10873 bp->fw_reset_state = 0; 10874 /* Make sure fw_reset_state is 0 before clearing the flag */ 10875 smp_mb__before_atomic(); 10876 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 10877 bnxt_ulp_start(bp, rc); 10878 if (!rc) 10879 bnxt_reenable_sriov(bp); 10880 bnxt_dl_health_recovery_done(bp); 10881 bnxt_dl_health_status_update(bp, true); 10882 rtnl_unlock(); 10883 break; 10884 } 10885 return; 10886 10887 fw_reset_abort: 10888 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 10889 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) 10890 bnxt_dl_health_status_update(bp, false); 10891 bp->fw_reset_state = 0; 10892 rtnl_lock(); 10893 dev_close(bp->dev); 10894 rtnl_unlock(); 10895 } 10896 10897 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) 10898 { 10899 int rc; 10900 struct bnxt *bp = netdev_priv(dev); 10901 10902 SET_NETDEV_DEV(dev, &pdev->dev); 10903 10904 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 10905 rc = pci_enable_device(pdev); 10906 if (rc) { 10907 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 10908 goto init_err; 10909 } 10910 10911 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 10912 dev_err(&pdev->dev, 10913 "Cannot find PCI device base address, aborting\n"); 10914 rc = -ENODEV; 10915 goto init_err_disable; 10916 } 10917 10918 rc = pci_request_regions(pdev, DRV_MODULE_NAME); 10919 if (rc) { 10920 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); 10921 goto init_err_disable; 10922 } 10923 10924 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 && 10925 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) { 10926 dev_err(&pdev->dev, "System does not support DMA, aborting\n"); 10927 goto init_err_disable; 10928 } 10929 10930 pci_set_master(pdev); 10931 10932 bp->dev = dev; 10933 bp->pdev = pdev; 10934 10935 /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2() 10936 * determines the BAR size. 10937 */ 10938 bp->bar0 = pci_ioremap_bar(pdev, 0); 10939 if (!bp->bar0) { 10940 dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); 10941 rc = -ENOMEM; 10942 goto init_err_release; 10943 } 10944 10945 bp->bar2 = pci_ioremap_bar(pdev, 4); 10946 if (!bp->bar2) { 10947 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n"); 10948 rc = -ENOMEM; 10949 goto init_err_release; 10950 } 10951 10952 pci_enable_pcie_error_reporting(pdev); 10953 10954 INIT_WORK(&bp->sp_task, bnxt_sp_task); 10955 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task); 10956 10957 spin_lock_init(&bp->ntp_fltr_lock); 10958 #if BITS_PER_LONG == 32 10959 spin_lock_init(&bp->db_lock); 10960 #endif 10961 10962 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE; 10963 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE; 10964 10965 bnxt_init_dflt_coal(bp); 10966 10967 timer_setup(&bp->timer, bnxt_timer, 0); 10968 bp->current_interval = BNXT_TIMER_INTERVAL; 10969 10970 clear_bit(BNXT_STATE_OPEN, &bp->state); 10971 return 0; 10972 10973 init_err_release: 10974 bnxt_unmap_bars(bp, pdev); 10975 pci_release_regions(pdev); 10976 10977 init_err_disable: 10978 pci_disable_device(pdev); 10979 10980 init_err: 10981 return rc; 10982 } 10983 10984 /* rtnl_lock held */ 10985 static int bnxt_change_mac_addr(struct net_device *dev, void *p) 10986 { 10987 struct sockaddr *addr = p; 10988 struct bnxt *bp = netdev_priv(dev); 10989 int rc = 0; 10990 10991 if (!is_valid_ether_addr(addr->sa_data)) 10992 return -EADDRNOTAVAIL; 10993 10994 if (ether_addr_equal(addr->sa_data, dev->dev_addr)) 10995 return 0; 10996 10997 rc = bnxt_approve_mac(bp, addr->sa_data, true); 10998 if (rc) 10999 return rc; 11000 11001 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 11002 if (netif_running(dev)) { 11003 bnxt_close_nic(bp, false, false); 11004 rc = bnxt_open_nic(bp, false, false); 11005 } 11006 11007 return rc; 11008 } 11009 11010 /* rtnl_lock held */ 11011 static int bnxt_change_mtu(struct net_device *dev, int new_mtu) 11012 { 11013 struct bnxt *bp = netdev_priv(dev); 11014 11015 if (netif_running(dev)) 11016 bnxt_close_nic(bp, true, false); 11017 11018 dev->mtu = new_mtu; 11019 bnxt_set_ring_params(bp); 11020 11021 if (netif_running(dev)) 11022 return bnxt_open_nic(bp, true, false); 11023 11024 return 0; 11025 } 11026 11027 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) 11028 { 11029 struct bnxt *bp = netdev_priv(dev); 11030 bool sh = false; 11031 int rc; 11032 11033 if (tc > bp->max_tc) { 11034 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n", 11035 tc, bp->max_tc); 11036 return -EINVAL; 11037 } 11038 11039 if (netdev_get_num_tc(dev) == tc) 11040 return 0; 11041 11042 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 11043 sh = true; 11044 11045 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, 11046 sh, tc, bp->tx_nr_rings_xdp); 11047 if (rc) 11048 return rc; 11049 11050 /* Needs to close the device and do hw resource re-allocations */ 11051 if (netif_running(bp->dev)) 11052 bnxt_close_nic(bp, true, false); 11053 11054 if (tc) { 11055 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc; 11056 netdev_set_num_tc(dev, tc); 11057 } else { 11058 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 11059 netdev_reset_tc(dev); 11060 } 11061 bp->tx_nr_rings += bp->tx_nr_rings_xdp; 11062 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : 11063 bp->tx_nr_rings + bp->rx_nr_rings; 11064 11065 if (netif_running(bp->dev)) 11066 return bnxt_open_nic(bp, true, false); 11067 11068 return 0; 11069 } 11070 11071 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 11072 void *cb_priv) 11073 { 11074 struct bnxt *bp = cb_priv; 11075 11076 if (!bnxt_tc_flower_enabled(bp) || 11077 !tc_cls_can_offload_and_chain0(bp->dev, type_data)) 11078 return -EOPNOTSUPP; 11079 11080 switch (type) { 11081 case TC_SETUP_CLSFLOWER: 11082 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data); 11083 default: 11084 return -EOPNOTSUPP; 11085 } 11086 } 11087 11088 LIST_HEAD(bnxt_block_cb_list); 11089 11090 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, 11091 void *type_data) 11092 { 11093 struct bnxt *bp = netdev_priv(dev); 11094 11095 switch (type) { 11096 case TC_SETUP_BLOCK: 11097 return flow_block_cb_setup_simple(type_data, 11098 &bnxt_block_cb_list, 11099 bnxt_setup_tc_block_cb, 11100 bp, bp, true); 11101 case TC_SETUP_QDISC_MQPRIO: { 11102 struct tc_mqprio_qopt *mqprio = type_data; 11103 11104 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; 11105 11106 return bnxt_setup_mq_tc(dev, mqprio->num_tc); 11107 } 11108 default: 11109 return -EOPNOTSUPP; 11110 } 11111 } 11112 11113 #ifdef CONFIG_RFS_ACCEL 11114 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1, 11115 struct bnxt_ntuple_filter *f2) 11116 { 11117 struct flow_keys *keys1 = &f1->fkeys; 11118 struct flow_keys *keys2 = &f2->fkeys; 11119 11120 if (keys1->basic.n_proto != keys2->basic.n_proto || 11121 keys1->basic.ip_proto != keys2->basic.ip_proto) 11122 return false; 11123 11124 if (keys1->basic.n_proto == htons(ETH_P_IP)) { 11125 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src || 11126 keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst) 11127 return false; 11128 } else { 11129 if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src, 11130 sizeof(keys1->addrs.v6addrs.src)) || 11131 memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst, 11132 sizeof(keys1->addrs.v6addrs.dst))) 11133 return false; 11134 } 11135 11136 if (keys1->ports.ports == keys2->ports.ports && 11137 keys1->control.flags == keys2->control.flags && 11138 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) && 11139 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr)) 11140 return true; 11141 11142 return false; 11143 } 11144 11145 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, 11146 u16 rxq_index, u32 flow_id) 11147 { 11148 struct bnxt *bp = netdev_priv(dev); 11149 struct bnxt_ntuple_filter *fltr, *new_fltr; 11150 struct flow_keys *fkeys; 11151 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb); 11152 int rc = 0, idx, bit_id, l2_idx = 0; 11153 struct hlist_head *head; 11154 u32 flags; 11155 11156 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) { 11157 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 11158 int off = 0, j; 11159 11160 netif_addr_lock_bh(dev); 11161 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) { 11162 if (ether_addr_equal(eth->h_dest, 11163 vnic->uc_list + off)) { 11164 l2_idx = j + 1; 11165 break; 11166 } 11167 } 11168 netif_addr_unlock_bh(dev); 11169 if (!l2_idx) 11170 return -EINVAL; 11171 } 11172 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC); 11173 if (!new_fltr) 11174 return -ENOMEM; 11175 11176 fkeys = &new_fltr->fkeys; 11177 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) { 11178 rc = -EPROTONOSUPPORT; 11179 goto err_free; 11180 } 11181 11182 if ((fkeys->basic.n_proto != htons(ETH_P_IP) && 11183 fkeys->basic.n_proto != htons(ETH_P_IPV6)) || 11184 ((fkeys->basic.ip_proto != IPPROTO_TCP) && 11185 (fkeys->basic.ip_proto != IPPROTO_UDP))) { 11186 rc = -EPROTONOSUPPORT; 11187 goto err_free; 11188 } 11189 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) && 11190 bp->hwrm_spec_code < 0x10601) { 11191 rc = -EPROTONOSUPPORT; 11192 goto err_free; 11193 } 11194 flags = fkeys->control.flags; 11195 if (((flags & FLOW_DIS_ENCAPSULATION) && 11196 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) { 11197 rc = -EPROTONOSUPPORT; 11198 goto err_free; 11199 } 11200 11201 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN); 11202 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN); 11203 11204 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK; 11205 head = &bp->ntp_fltr_hash_tbl[idx]; 11206 rcu_read_lock(); 11207 hlist_for_each_entry_rcu(fltr, head, hash) { 11208 if (bnxt_fltr_match(fltr, new_fltr)) { 11209 rcu_read_unlock(); 11210 rc = 0; 11211 goto err_free; 11212 } 11213 } 11214 rcu_read_unlock(); 11215 11216 spin_lock_bh(&bp->ntp_fltr_lock); 11217 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, 11218 BNXT_NTP_FLTR_MAX_FLTR, 0); 11219 if (bit_id < 0) { 11220 spin_unlock_bh(&bp->ntp_fltr_lock); 11221 rc = -ENOMEM; 11222 goto err_free; 11223 } 11224 11225 new_fltr->sw_id = (u16)bit_id; 11226 new_fltr->flow_id = flow_id; 11227 new_fltr->l2_fltr_idx = l2_idx; 11228 new_fltr->rxq = rxq_index; 11229 hlist_add_head_rcu(&new_fltr->hash, head); 11230 bp->ntp_fltr_count++; 11231 spin_unlock_bh(&bp->ntp_fltr_lock); 11232 11233 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event); 11234 bnxt_queue_sp_work(bp); 11235 11236 return new_fltr->sw_id; 11237 11238 err_free: 11239 kfree(new_fltr); 11240 return rc; 11241 } 11242 11243 static void bnxt_cfg_ntp_filters(struct bnxt *bp) 11244 { 11245 int i; 11246 11247 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 11248 struct hlist_head *head; 11249 struct hlist_node *tmp; 11250 struct bnxt_ntuple_filter *fltr; 11251 int rc; 11252 11253 head = &bp->ntp_fltr_hash_tbl[i]; 11254 hlist_for_each_entry_safe(fltr, tmp, head, hash) { 11255 bool del = false; 11256 11257 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) { 11258 if (rps_may_expire_flow(bp->dev, fltr->rxq, 11259 fltr->flow_id, 11260 fltr->sw_id)) { 11261 bnxt_hwrm_cfa_ntuple_filter_free(bp, 11262 fltr); 11263 del = true; 11264 } 11265 } else { 11266 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, 11267 fltr); 11268 if (rc) 11269 del = true; 11270 else 11271 set_bit(BNXT_FLTR_VALID, &fltr->state); 11272 } 11273 11274 if (del) { 11275 spin_lock_bh(&bp->ntp_fltr_lock); 11276 hlist_del_rcu(&fltr->hash); 11277 bp->ntp_fltr_count--; 11278 spin_unlock_bh(&bp->ntp_fltr_lock); 11279 synchronize_rcu(); 11280 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap); 11281 kfree(fltr); 11282 } 11283 } 11284 } 11285 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event)) 11286 netdev_info(bp->dev, "Receive PF driver unload event!\n"); 11287 } 11288 11289 #else 11290 11291 static void bnxt_cfg_ntp_filters(struct bnxt *bp) 11292 { 11293 } 11294 11295 #endif /* CONFIG_RFS_ACCEL */ 11296 11297 static void bnxt_udp_tunnel_add(struct net_device *dev, 11298 struct udp_tunnel_info *ti) 11299 { 11300 struct bnxt *bp = netdev_priv(dev); 11301 11302 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET) 11303 return; 11304 11305 if (!netif_running(dev)) 11306 return; 11307 11308 switch (ti->type) { 11309 case UDP_TUNNEL_TYPE_VXLAN: 11310 if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port) 11311 return; 11312 11313 bp->vxlan_port_cnt++; 11314 if (bp->vxlan_port_cnt == 1) { 11315 bp->vxlan_port = ti->port; 11316 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event); 11317 bnxt_queue_sp_work(bp); 11318 } 11319 break; 11320 case UDP_TUNNEL_TYPE_GENEVE: 11321 if (bp->nge_port_cnt && bp->nge_port != ti->port) 11322 return; 11323 11324 bp->nge_port_cnt++; 11325 if (bp->nge_port_cnt == 1) { 11326 bp->nge_port = ti->port; 11327 set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event); 11328 } 11329 break; 11330 default: 11331 return; 11332 } 11333 11334 bnxt_queue_sp_work(bp); 11335 } 11336 11337 static void bnxt_udp_tunnel_del(struct net_device *dev, 11338 struct udp_tunnel_info *ti) 11339 { 11340 struct bnxt *bp = netdev_priv(dev); 11341 11342 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET) 11343 return; 11344 11345 if (!netif_running(dev)) 11346 return; 11347 11348 switch (ti->type) { 11349 case UDP_TUNNEL_TYPE_VXLAN: 11350 if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port) 11351 return; 11352 bp->vxlan_port_cnt--; 11353 11354 if (bp->vxlan_port_cnt != 0) 11355 return; 11356 11357 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event); 11358 break; 11359 case UDP_TUNNEL_TYPE_GENEVE: 11360 if (!bp->nge_port_cnt || bp->nge_port != ti->port) 11361 return; 11362 bp->nge_port_cnt--; 11363 11364 if (bp->nge_port_cnt != 0) 11365 return; 11366 11367 set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event); 11368 break; 11369 default: 11370 return; 11371 } 11372 11373 bnxt_queue_sp_work(bp); 11374 } 11375 11376 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 11377 struct net_device *dev, u32 filter_mask, 11378 int nlflags) 11379 { 11380 struct bnxt *bp = netdev_priv(dev); 11381 11382 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0, 11383 nlflags, filter_mask, NULL); 11384 } 11385 11386 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, 11387 u16 flags, struct netlink_ext_ack *extack) 11388 { 11389 struct bnxt *bp = netdev_priv(dev); 11390 struct nlattr *attr, *br_spec; 11391 int rem, rc = 0; 11392 11393 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp)) 11394 return -EOPNOTSUPP; 11395 11396 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 11397 if (!br_spec) 11398 return -EINVAL; 11399 11400 nla_for_each_nested(attr, br_spec, rem) { 11401 u16 mode; 11402 11403 if (nla_type(attr) != IFLA_BRIDGE_MODE) 11404 continue; 11405 11406 if (nla_len(attr) < sizeof(mode)) 11407 return -EINVAL; 11408 11409 mode = nla_get_u16(attr); 11410 if (mode == bp->br_mode) 11411 break; 11412 11413 rc = bnxt_hwrm_set_br_mode(bp, mode); 11414 if (!rc) 11415 bp->br_mode = mode; 11416 break; 11417 } 11418 return rc; 11419 } 11420 11421 int bnxt_get_port_parent_id(struct net_device *dev, 11422 struct netdev_phys_item_id *ppid) 11423 { 11424 struct bnxt *bp = netdev_priv(dev); 11425 11426 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) 11427 return -EOPNOTSUPP; 11428 11429 /* The PF and it's VF-reps only support the switchdev framework */ 11430 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID)) 11431 return -EOPNOTSUPP; 11432 11433 ppid->id_len = sizeof(bp->dsn); 11434 memcpy(ppid->id, bp->dsn, ppid->id_len); 11435 11436 return 0; 11437 } 11438 11439 static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev) 11440 { 11441 struct bnxt *bp = netdev_priv(dev); 11442 11443 return &bp->dl_port; 11444 } 11445 11446 static const struct net_device_ops bnxt_netdev_ops = { 11447 .ndo_open = bnxt_open, 11448 .ndo_start_xmit = bnxt_start_xmit, 11449 .ndo_stop = bnxt_close, 11450 .ndo_get_stats64 = bnxt_get_stats64, 11451 .ndo_set_rx_mode = bnxt_set_rx_mode, 11452 .ndo_do_ioctl = bnxt_ioctl, 11453 .ndo_validate_addr = eth_validate_addr, 11454 .ndo_set_mac_address = bnxt_change_mac_addr, 11455 .ndo_change_mtu = bnxt_change_mtu, 11456 .ndo_fix_features = bnxt_fix_features, 11457 .ndo_set_features = bnxt_set_features, 11458 .ndo_tx_timeout = bnxt_tx_timeout, 11459 #ifdef CONFIG_BNXT_SRIOV 11460 .ndo_get_vf_config = bnxt_get_vf_config, 11461 .ndo_set_vf_mac = bnxt_set_vf_mac, 11462 .ndo_set_vf_vlan = bnxt_set_vf_vlan, 11463 .ndo_set_vf_rate = bnxt_set_vf_bw, 11464 .ndo_set_vf_link_state = bnxt_set_vf_link_state, 11465 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk, 11466 .ndo_set_vf_trust = bnxt_set_vf_trust, 11467 #endif 11468 .ndo_setup_tc = bnxt_setup_tc, 11469 #ifdef CONFIG_RFS_ACCEL 11470 .ndo_rx_flow_steer = bnxt_rx_flow_steer, 11471 #endif 11472 .ndo_udp_tunnel_add = bnxt_udp_tunnel_add, 11473 .ndo_udp_tunnel_del = bnxt_udp_tunnel_del, 11474 .ndo_bpf = bnxt_xdp, 11475 .ndo_xdp_xmit = bnxt_xdp_xmit, 11476 .ndo_bridge_getlink = bnxt_bridge_getlink, 11477 .ndo_bridge_setlink = bnxt_bridge_setlink, 11478 .ndo_get_devlink_port = bnxt_get_devlink_port, 11479 }; 11480 11481 static void bnxt_remove_one(struct pci_dev *pdev) 11482 { 11483 struct net_device *dev = pci_get_drvdata(pdev); 11484 struct bnxt *bp = netdev_priv(dev); 11485 11486 if (BNXT_PF(bp)) 11487 bnxt_sriov_disable(bp); 11488 11489 bnxt_dl_fw_reporters_destroy(bp, true); 11490 if (BNXT_PF(bp)) 11491 devlink_port_type_clear(&bp->dl_port); 11492 pci_disable_pcie_error_reporting(pdev); 11493 unregister_netdev(dev); 11494 bnxt_dl_unregister(bp); 11495 bnxt_shutdown_tc(bp); 11496 bnxt_cancel_sp_work(bp); 11497 bp->sp_event = 0; 11498 11499 bnxt_clear_int_mode(bp); 11500 bnxt_hwrm_func_drv_unrgtr(bp); 11501 bnxt_free_hwrm_resources(bp); 11502 bnxt_free_hwrm_short_cmd_req(bp); 11503 bnxt_ethtool_free(bp); 11504 bnxt_dcb_free(bp); 11505 kfree(bp->edev); 11506 bp->edev = NULL; 11507 kfree(bp->fw_health); 11508 bp->fw_health = NULL; 11509 bnxt_cleanup_pci(bp); 11510 bnxt_free_ctx_mem(bp); 11511 kfree(bp->ctx); 11512 bp->ctx = NULL; 11513 bnxt_free_port_stats(bp); 11514 free_netdev(dev); 11515 } 11516 11517 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt) 11518 { 11519 int rc = 0; 11520 struct bnxt_link_info *link_info = &bp->link_info; 11521 11522 rc = bnxt_hwrm_phy_qcaps(bp); 11523 if (rc) { 11524 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n", 11525 rc); 11526 return rc; 11527 } 11528 if (!fw_dflt) 11529 return 0; 11530 11531 rc = bnxt_update_link(bp, false); 11532 if (rc) { 11533 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n", 11534 rc); 11535 return rc; 11536 } 11537 11538 /* Older firmware does not have supported_auto_speeds, so assume 11539 * that all supported speeds can be autonegotiated. 11540 */ 11541 if (link_info->auto_link_speeds && !link_info->support_auto_speeds) 11542 link_info->support_auto_speeds = link_info->support_speeds; 11543 11544 bnxt_init_ethtool_link_settings(bp); 11545 return 0; 11546 } 11547 11548 static int bnxt_get_max_irq(struct pci_dev *pdev) 11549 { 11550 u16 ctrl; 11551 11552 if (!pdev->msix_cap) 11553 return 1; 11554 11555 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl); 11556 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; 11557 } 11558 11559 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, 11560 int *max_cp) 11561 { 11562 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 11563 int max_ring_grps = 0, max_irq; 11564 11565 *max_tx = hw_resc->max_tx_rings; 11566 *max_rx = hw_resc->max_rx_rings; 11567 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp); 11568 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) - 11569 bnxt_get_ulp_msix_num(bp), 11570 hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp)); 11571 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 11572 *max_cp = min_t(int, *max_cp, max_irq); 11573 max_ring_grps = hw_resc->max_hw_ring_grps; 11574 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { 11575 *max_cp -= 1; 11576 *max_rx -= 2; 11577 } 11578 if (bp->flags & BNXT_FLAG_AGG_RINGS) 11579 *max_rx >>= 1; 11580 if (bp->flags & BNXT_FLAG_CHIP_P5) { 11581 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false); 11582 /* On P5 chips, max_cp output param should be available NQs */ 11583 *max_cp = max_irq; 11584 } 11585 *max_rx = min_t(int, *max_rx, max_ring_grps); 11586 } 11587 11588 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared) 11589 { 11590 int rx, tx, cp; 11591 11592 _bnxt_get_max_rings(bp, &rx, &tx, &cp); 11593 *max_rx = rx; 11594 *max_tx = tx; 11595 if (!rx || !tx || !cp) 11596 return -ENOMEM; 11597 11598 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared); 11599 } 11600 11601 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx, 11602 bool shared) 11603 { 11604 int rc; 11605 11606 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); 11607 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) { 11608 /* Not enough rings, try disabling agg rings. */ 11609 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 11610 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); 11611 if (rc) { 11612 /* set BNXT_FLAG_AGG_RINGS back for consistency */ 11613 bp->flags |= BNXT_FLAG_AGG_RINGS; 11614 return rc; 11615 } 11616 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 11617 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 11618 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 11619 bnxt_set_ring_params(bp); 11620 } 11621 11622 if (bp->flags & BNXT_FLAG_ROCE_CAP) { 11623 int max_cp, max_stat, max_irq; 11624 11625 /* Reserve minimum resources for RoCE */ 11626 max_cp = bnxt_get_max_func_cp_rings(bp); 11627 max_stat = bnxt_get_max_func_stat_ctxs(bp); 11628 max_irq = bnxt_get_max_func_irqs(bp); 11629 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS || 11630 max_irq <= BNXT_MIN_ROCE_CP_RINGS || 11631 max_stat <= BNXT_MIN_ROCE_STAT_CTXS) 11632 return 0; 11633 11634 max_cp -= BNXT_MIN_ROCE_CP_RINGS; 11635 max_irq -= BNXT_MIN_ROCE_CP_RINGS; 11636 max_stat -= BNXT_MIN_ROCE_STAT_CTXS; 11637 max_cp = min_t(int, max_cp, max_irq); 11638 max_cp = min_t(int, max_cp, max_stat); 11639 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared); 11640 if (rc) 11641 rc = 0; 11642 } 11643 return rc; 11644 } 11645 11646 /* In initial default shared ring setting, each shared ring must have a 11647 * RX/TX ring pair. 11648 */ 11649 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp) 11650 { 11651 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings); 11652 bp->rx_nr_rings = bp->cp_nr_rings; 11653 bp->tx_nr_rings_per_tc = bp->cp_nr_rings; 11654 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 11655 } 11656 11657 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) 11658 { 11659 int dflt_rings, max_rx_rings, max_tx_rings, rc; 11660 11661 if (!bnxt_can_reserve_rings(bp)) 11662 return 0; 11663 11664 if (sh) 11665 bp->flags |= BNXT_FLAG_SHARED_RINGS; 11666 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues(); 11667 /* Reduce default rings on multi-port cards so that total default 11668 * rings do not exceed CPU count. 11669 */ 11670 if (bp->port_count > 1) { 11671 int max_rings = 11672 max_t(int, num_online_cpus() / bp->port_count, 1); 11673 11674 dflt_rings = min_t(int, dflt_rings, max_rings); 11675 } 11676 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh); 11677 if (rc) 11678 return rc; 11679 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); 11680 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); 11681 if (sh) 11682 bnxt_trim_dflt_sh_rings(bp); 11683 else 11684 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings; 11685 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 11686 11687 rc = __bnxt_reserve_rings(bp); 11688 if (rc) 11689 netdev_warn(bp->dev, "Unable to reserve tx rings\n"); 11690 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 11691 if (sh) 11692 bnxt_trim_dflt_sh_rings(bp); 11693 11694 /* Rings may have been trimmed, re-reserve the trimmed rings. */ 11695 if (bnxt_need_reserve_rings(bp)) { 11696 rc = __bnxt_reserve_rings(bp); 11697 if (rc) 11698 netdev_warn(bp->dev, "2nd rings reservation failed.\n"); 11699 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 11700 } 11701 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 11702 bp->rx_nr_rings++; 11703 bp->cp_nr_rings++; 11704 } 11705 if (rc) { 11706 bp->tx_nr_rings = 0; 11707 bp->rx_nr_rings = 0; 11708 } 11709 return rc; 11710 } 11711 11712 static int bnxt_init_dflt_ring_mode(struct bnxt *bp) 11713 { 11714 int rc; 11715 11716 if (bp->tx_nr_rings) 11717 return 0; 11718 11719 bnxt_ulp_irq_stop(bp); 11720 bnxt_clear_int_mode(bp); 11721 rc = bnxt_set_dflt_rings(bp, true); 11722 if (rc) { 11723 netdev_err(bp->dev, "Not enough rings available.\n"); 11724 goto init_dflt_ring_err; 11725 } 11726 rc = bnxt_init_int_mode(bp); 11727 if (rc) 11728 goto init_dflt_ring_err; 11729 11730 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 11731 if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) { 11732 bp->flags |= BNXT_FLAG_RFS; 11733 bp->dev->features |= NETIF_F_NTUPLE; 11734 } 11735 init_dflt_ring_err: 11736 bnxt_ulp_irq_restart(bp, rc); 11737 return rc; 11738 } 11739 11740 int bnxt_restore_pf_fw_resources(struct bnxt *bp) 11741 { 11742 int rc; 11743 11744 ASSERT_RTNL(); 11745 bnxt_hwrm_func_qcaps(bp); 11746 11747 if (netif_running(bp->dev)) 11748 __bnxt_close_nic(bp, true, false); 11749 11750 bnxt_ulp_irq_stop(bp); 11751 bnxt_clear_int_mode(bp); 11752 rc = bnxt_init_int_mode(bp); 11753 bnxt_ulp_irq_restart(bp, rc); 11754 11755 if (netif_running(bp->dev)) { 11756 if (rc) 11757 dev_close(bp->dev); 11758 else 11759 rc = bnxt_open_nic(bp, true, false); 11760 } 11761 11762 return rc; 11763 } 11764 11765 static int bnxt_init_mac_addr(struct bnxt *bp) 11766 { 11767 int rc = 0; 11768 11769 if (BNXT_PF(bp)) { 11770 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN); 11771 } else { 11772 #ifdef CONFIG_BNXT_SRIOV 11773 struct bnxt_vf_info *vf = &bp->vf; 11774 bool strict_approval = true; 11775 11776 if (is_valid_ether_addr(vf->mac_addr)) { 11777 /* overwrite netdev dev_addr with admin VF MAC */ 11778 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); 11779 /* Older PF driver or firmware may not approve this 11780 * correctly. 11781 */ 11782 strict_approval = false; 11783 } else { 11784 eth_hw_addr_random(bp->dev); 11785 } 11786 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval); 11787 #endif 11788 } 11789 return rc; 11790 } 11791 11792 #define BNXT_VPD_LEN 512 11793 static void bnxt_vpd_read_info(struct bnxt *bp) 11794 { 11795 struct pci_dev *pdev = bp->pdev; 11796 int i, len, pos, ro_size; 11797 ssize_t vpd_size; 11798 u8 *vpd_data; 11799 11800 vpd_data = kmalloc(BNXT_VPD_LEN, GFP_KERNEL); 11801 if (!vpd_data) 11802 return; 11803 11804 vpd_size = pci_read_vpd(pdev, 0, BNXT_VPD_LEN, vpd_data); 11805 if (vpd_size <= 0) { 11806 netdev_err(bp->dev, "Unable to read VPD\n"); 11807 goto exit; 11808 } 11809 11810 i = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA); 11811 if (i < 0) { 11812 netdev_err(bp->dev, "VPD READ-Only not found\n"); 11813 goto exit; 11814 } 11815 11816 ro_size = pci_vpd_lrdt_size(&vpd_data[i]); 11817 i += PCI_VPD_LRDT_TAG_SIZE; 11818 if (i + ro_size > vpd_size) 11819 goto exit; 11820 11821 pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size, 11822 PCI_VPD_RO_KEYWORD_PARTNO); 11823 if (pos < 0) 11824 goto read_sn; 11825 11826 len = pci_vpd_info_field_size(&vpd_data[pos]); 11827 pos += PCI_VPD_INFO_FLD_HDR_SIZE; 11828 if (len + pos > vpd_size) 11829 goto read_sn; 11830 11831 strlcpy(bp->board_partno, &vpd_data[pos], min(len, BNXT_VPD_FLD_LEN)); 11832 11833 read_sn: 11834 pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size, 11835 PCI_VPD_RO_KEYWORD_SERIALNO); 11836 if (pos < 0) 11837 goto exit; 11838 11839 len = pci_vpd_info_field_size(&vpd_data[pos]); 11840 pos += PCI_VPD_INFO_FLD_HDR_SIZE; 11841 if (len + pos > vpd_size) 11842 goto exit; 11843 11844 strlcpy(bp->board_serialno, &vpd_data[pos], min(len, BNXT_VPD_FLD_LEN)); 11845 exit: 11846 kfree(vpd_data); 11847 } 11848 11849 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[]) 11850 { 11851 struct pci_dev *pdev = bp->pdev; 11852 u64 qword; 11853 11854 qword = pci_get_dsn(pdev); 11855 if (!qword) { 11856 netdev_info(bp->dev, "Unable to read adapter's DSN\n"); 11857 return -EOPNOTSUPP; 11858 } 11859 11860 put_unaligned_le64(qword, dsn); 11861 11862 bp->flags |= BNXT_FLAG_DSN_VALID; 11863 return 0; 11864 } 11865 11866 static int bnxt_map_db_bar(struct bnxt *bp) 11867 { 11868 if (!bp->db_size) 11869 return -ENODEV; 11870 bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size); 11871 if (!bp->bar1) 11872 return -ENOMEM; 11873 return 0; 11874 } 11875 11876 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 11877 { 11878 struct net_device *dev; 11879 struct bnxt *bp; 11880 int rc, max_irqs; 11881 11882 if (pci_is_bridge(pdev)) 11883 return -ENODEV; 11884 11885 /* Clear any pending DMA transactions from crash kernel 11886 * while loading driver in capture kernel. 11887 */ 11888 if (is_kdump_kernel()) { 11889 pci_clear_master(pdev); 11890 pcie_flr(pdev); 11891 } 11892 11893 max_irqs = bnxt_get_max_irq(pdev); 11894 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs); 11895 if (!dev) 11896 return -ENOMEM; 11897 11898 bp = netdev_priv(dev); 11899 bnxt_set_max_func_irqs(bp, max_irqs); 11900 11901 if (bnxt_vf_pciid(ent->driver_data)) 11902 bp->flags |= BNXT_FLAG_VF; 11903 11904 if (pdev->msix_cap) 11905 bp->flags |= BNXT_FLAG_MSIX_CAP; 11906 11907 rc = bnxt_init_board(pdev, dev); 11908 if (rc < 0) 11909 goto init_err_free; 11910 11911 dev->netdev_ops = &bnxt_netdev_ops; 11912 dev->watchdog_timeo = BNXT_TX_TIMEOUT; 11913 dev->ethtool_ops = &bnxt_ethtool_ops; 11914 pci_set_drvdata(pdev, dev); 11915 11916 if (BNXT_PF(bp)) 11917 bnxt_vpd_read_info(bp); 11918 11919 rc = bnxt_alloc_hwrm_resources(bp); 11920 if (rc) 11921 goto init_err_pci_clean; 11922 11923 mutex_init(&bp->hwrm_cmd_lock); 11924 mutex_init(&bp->link_lock); 11925 11926 rc = bnxt_fw_init_one_p1(bp); 11927 if (rc) 11928 goto init_err_pci_clean; 11929 11930 if (BNXT_CHIP_P5(bp)) 11931 bp->flags |= BNXT_FLAG_CHIP_P5; 11932 11933 rc = bnxt_fw_init_one_p2(bp); 11934 if (rc) 11935 goto init_err_pci_clean; 11936 11937 rc = bnxt_map_db_bar(bp); 11938 if (rc) { 11939 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n", 11940 rc); 11941 goto init_err_pci_clean; 11942 } 11943 11944 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 11945 NETIF_F_TSO | NETIF_F_TSO6 | 11946 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | 11947 NETIF_F_GSO_IPXIP4 | 11948 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | 11949 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH | 11950 NETIF_F_RXCSUM | NETIF_F_GRO; 11951 11952 if (BNXT_SUPPORTS_TPA(bp)) 11953 dev->hw_features |= NETIF_F_LRO; 11954 11955 dev->hw_enc_features = 11956 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 11957 NETIF_F_TSO | NETIF_F_TSO6 | 11958 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | 11959 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | 11960 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL; 11961 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM | 11962 NETIF_F_GSO_GRE_CSUM; 11963 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA; 11964 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | 11965 NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX; 11966 if (BNXT_SUPPORTS_TPA(bp)) 11967 dev->hw_features |= NETIF_F_GRO_HW; 11968 dev->features |= dev->hw_features | NETIF_F_HIGHDMA; 11969 if (dev->features & NETIF_F_GRO_HW) 11970 dev->features &= ~NETIF_F_LRO; 11971 dev->priv_flags |= IFF_UNICAST_FLT; 11972 11973 #ifdef CONFIG_BNXT_SRIOV 11974 init_waitqueue_head(&bp->sriov_cfg_wait); 11975 mutex_init(&bp->sriov_lock); 11976 #endif 11977 if (BNXT_SUPPORTS_TPA(bp)) { 11978 bp->gro_func = bnxt_gro_func_5730x; 11979 if (BNXT_CHIP_P4(bp)) 11980 bp->gro_func = bnxt_gro_func_5731x; 11981 else if (BNXT_CHIP_P5(bp)) 11982 bp->gro_func = bnxt_gro_func_5750x; 11983 } 11984 if (!BNXT_CHIP_P4_PLUS(bp)) 11985 bp->flags |= BNXT_FLAG_DOUBLE_DB; 11986 11987 bp->ulp_probe = bnxt_ulp_probe; 11988 11989 rc = bnxt_init_mac_addr(bp); 11990 if (rc) { 11991 dev_err(&pdev->dev, "Unable to initialize mac address.\n"); 11992 rc = -EADDRNOTAVAIL; 11993 goto init_err_pci_clean; 11994 } 11995 11996 if (BNXT_PF(bp)) { 11997 /* Read the adapter's DSN to use as the eswitch switch_id */ 11998 rc = bnxt_pcie_dsn_get(bp, bp->dsn); 11999 } 12000 12001 /* MTU range: 60 - FW defined max */ 12002 dev->min_mtu = ETH_ZLEN; 12003 dev->max_mtu = bp->max_mtu; 12004 12005 rc = bnxt_probe_phy(bp, true); 12006 if (rc) 12007 goto init_err_pci_clean; 12008 12009 bnxt_set_rx_skb_mode(bp, false); 12010 bnxt_set_tpa_flags(bp); 12011 bnxt_set_ring_params(bp); 12012 rc = bnxt_set_dflt_rings(bp, true); 12013 if (rc) { 12014 netdev_err(bp->dev, "Not enough rings available.\n"); 12015 rc = -ENOMEM; 12016 goto init_err_pci_clean; 12017 } 12018 12019 bnxt_fw_init_one_p3(bp); 12020 12021 if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX) 12022 bp->flags |= BNXT_FLAG_STRIP_VLAN; 12023 12024 rc = bnxt_init_int_mode(bp); 12025 if (rc) 12026 goto init_err_pci_clean; 12027 12028 /* No TC has been set yet and rings may have been trimmed due to 12029 * limited MSIX, so we re-initialize the TX rings per TC. 12030 */ 12031 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 12032 12033 if (BNXT_PF(bp)) { 12034 if (!bnxt_pf_wq) { 12035 bnxt_pf_wq = 12036 create_singlethread_workqueue("bnxt_pf_wq"); 12037 if (!bnxt_pf_wq) { 12038 dev_err(&pdev->dev, "Unable to create workqueue.\n"); 12039 goto init_err_pci_clean; 12040 } 12041 } 12042 bnxt_init_tc(bp); 12043 } 12044 12045 bnxt_dl_register(bp); 12046 12047 rc = register_netdev(dev); 12048 if (rc) 12049 goto init_err_cleanup; 12050 12051 if (BNXT_PF(bp)) 12052 devlink_port_type_eth_set(&bp->dl_port, bp->dev); 12053 bnxt_dl_fw_reporters_create(bp); 12054 12055 netdev_info(dev, "%s found at mem %lx, node addr %pM\n", 12056 board_info[ent->driver_data].name, 12057 (long)pci_resource_start(pdev, 0), dev->dev_addr); 12058 pcie_print_link_status(pdev); 12059 12060 return 0; 12061 12062 init_err_cleanup: 12063 bnxt_dl_unregister(bp); 12064 bnxt_shutdown_tc(bp); 12065 bnxt_clear_int_mode(bp); 12066 12067 init_err_pci_clean: 12068 bnxt_hwrm_func_drv_unrgtr(bp); 12069 bnxt_free_hwrm_short_cmd_req(bp); 12070 bnxt_free_hwrm_resources(bp); 12071 kfree(bp->fw_health); 12072 bp->fw_health = NULL; 12073 bnxt_cleanup_pci(bp); 12074 bnxt_free_ctx_mem(bp); 12075 kfree(bp->ctx); 12076 bp->ctx = NULL; 12077 12078 init_err_free: 12079 free_netdev(dev); 12080 return rc; 12081 } 12082 12083 static void bnxt_shutdown(struct pci_dev *pdev) 12084 { 12085 struct net_device *dev = pci_get_drvdata(pdev); 12086 struct bnxt *bp; 12087 12088 if (!dev) 12089 return; 12090 12091 rtnl_lock(); 12092 bp = netdev_priv(dev); 12093 if (!bp) 12094 goto shutdown_exit; 12095 12096 if (netif_running(dev)) 12097 dev_close(dev); 12098 12099 bnxt_ulp_shutdown(bp); 12100 bnxt_clear_int_mode(bp); 12101 pci_disable_device(pdev); 12102 12103 if (system_state == SYSTEM_POWER_OFF) { 12104 pci_wake_from_d3(pdev, bp->wol); 12105 pci_set_power_state(pdev, PCI_D3hot); 12106 } 12107 12108 shutdown_exit: 12109 rtnl_unlock(); 12110 } 12111 12112 #ifdef CONFIG_PM_SLEEP 12113 static int bnxt_suspend(struct device *device) 12114 { 12115 struct net_device *dev = dev_get_drvdata(device); 12116 struct bnxt *bp = netdev_priv(dev); 12117 int rc = 0; 12118 12119 rtnl_lock(); 12120 bnxt_ulp_stop(bp); 12121 if (netif_running(dev)) { 12122 netif_device_detach(dev); 12123 rc = bnxt_close(dev); 12124 } 12125 bnxt_hwrm_func_drv_unrgtr(bp); 12126 pci_disable_device(bp->pdev); 12127 bnxt_free_ctx_mem(bp); 12128 kfree(bp->ctx); 12129 bp->ctx = NULL; 12130 rtnl_unlock(); 12131 return rc; 12132 } 12133 12134 static int bnxt_resume(struct device *device) 12135 { 12136 struct net_device *dev = dev_get_drvdata(device); 12137 struct bnxt *bp = netdev_priv(dev); 12138 int rc = 0; 12139 12140 rtnl_lock(); 12141 rc = pci_enable_device(bp->pdev); 12142 if (rc) { 12143 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n", 12144 rc); 12145 goto resume_exit; 12146 } 12147 pci_set_master(bp->pdev); 12148 if (bnxt_hwrm_ver_get(bp)) { 12149 rc = -ENODEV; 12150 goto resume_exit; 12151 } 12152 rc = bnxt_hwrm_func_reset(bp); 12153 if (rc) { 12154 rc = -EBUSY; 12155 goto resume_exit; 12156 } 12157 12158 rc = bnxt_hwrm_func_qcaps(bp); 12159 if (rc) 12160 goto resume_exit; 12161 12162 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) { 12163 rc = -ENODEV; 12164 goto resume_exit; 12165 } 12166 12167 bnxt_get_wol_settings(bp); 12168 if (netif_running(dev)) { 12169 rc = bnxt_open(dev); 12170 if (!rc) 12171 netif_device_attach(dev); 12172 } 12173 12174 resume_exit: 12175 bnxt_ulp_start(bp, rc); 12176 if (!rc) 12177 bnxt_reenable_sriov(bp); 12178 rtnl_unlock(); 12179 return rc; 12180 } 12181 12182 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume); 12183 #define BNXT_PM_OPS (&bnxt_pm_ops) 12184 12185 #else 12186 12187 #define BNXT_PM_OPS NULL 12188 12189 #endif /* CONFIG_PM_SLEEP */ 12190 12191 /** 12192 * bnxt_io_error_detected - called when PCI error is detected 12193 * @pdev: Pointer to PCI device 12194 * @state: The current pci connection state 12195 * 12196 * This function is called after a PCI bus error affecting 12197 * this device has been detected. 12198 */ 12199 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, 12200 pci_channel_state_t state) 12201 { 12202 struct net_device *netdev = pci_get_drvdata(pdev); 12203 struct bnxt *bp = netdev_priv(netdev); 12204 12205 netdev_info(netdev, "PCI I/O error detected\n"); 12206 12207 rtnl_lock(); 12208 netif_device_detach(netdev); 12209 12210 bnxt_ulp_stop(bp); 12211 12212 if (state == pci_channel_io_perm_failure) { 12213 rtnl_unlock(); 12214 return PCI_ERS_RESULT_DISCONNECT; 12215 } 12216 12217 if (netif_running(netdev)) 12218 bnxt_close(netdev); 12219 12220 pci_disable_device(pdev); 12221 bnxt_free_ctx_mem(bp); 12222 kfree(bp->ctx); 12223 bp->ctx = NULL; 12224 rtnl_unlock(); 12225 12226 /* Request a slot slot reset. */ 12227 return PCI_ERS_RESULT_NEED_RESET; 12228 } 12229 12230 /** 12231 * bnxt_io_slot_reset - called after the pci bus has been reset. 12232 * @pdev: Pointer to PCI device 12233 * 12234 * Restart the card from scratch, as if from a cold-boot. 12235 * At this point, the card has exprienced a hard reset, 12236 * followed by fixups by BIOS, and has its config space 12237 * set up identically to what it was at cold boot. 12238 */ 12239 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) 12240 { 12241 struct net_device *netdev = pci_get_drvdata(pdev); 12242 struct bnxt *bp = netdev_priv(netdev); 12243 int err = 0; 12244 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT; 12245 12246 netdev_info(bp->dev, "PCI Slot Reset\n"); 12247 12248 rtnl_lock(); 12249 12250 if (pci_enable_device(pdev)) { 12251 dev_err(&pdev->dev, 12252 "Cannot re-enable PCI device after reset.\n"); 12253 } else { 12254 pci_set_master(pdev); 12255 12256 err = bnxt_hwrm_func_reset(bp); 12257 if (!err) { 12258 err = bnxt_hwrm_func_qcaps(bp); 12259 if (!err && netif_running(netdev)) 12260 err = bnxt_open(netdev); 12261 } 12262 bnxt_ulp_start(bp, err); 12263 if (!err) { 12264 bnxt_reenable_sriov(bp); 12265 result = PCI_ERS_RESULT_RECOVERED; 12266 } 12267 } 12268 12269 if (result != PCI_ERS_RESULT_RECOVERED) { 12270 if (netif_running(netdev)) 12271 dev_close(netdev); 12272 pci_disable_device(pdev); 12273 } 12274 12275 rtnl_unlock(); 12276 12277 return result; 12278 } 12279 12280 /** 12281 * bnxt_io_resume - called when traffic can start flowing again. 12282 * @pdev: Pointer to PCI device 12283 * 12284 * This callback is called when the error recovery driver tells 12285 * us that its OK to resume normal operation. 12286 */ 12287 static void bnxt_io_resume(struct pci_dev *pdev) 12288 { 12289 struct net_device *netdev = pci_get_drvdata(pdev); 12290 12291 rtnl_lock(); 12292 12293 netif_device_attach(netdev); 12294 12295 rtnl_unlock(); 12296 } 12297 12298 static const struct pci_error_handlers bnxt_err_handler = { 12299 .error_detected = bnxt_io_error_detected, 12300 .slot_reset = bnxt_io_slot_reset, 12301 .resume = bnxt_io_resume 12302 }; 12303 12304 static struct pci_driver bnxt_pci_driver = { 12305 .name = DRV_MODULE_NAME, 12306 .id_table = bnxt_pci_tbl, 12307 .probe = bnxt_init_one, 12308 .remove = bnxt_remove_one, 12309 .shutdown = bnxt_shutdown, 12310 .driver.pm = BNXT_PM_OPS, 12311 .err_handler = &bnxt_err_handler, 12312 #if defined(CONFIG_BNXT_SRIOV) 12313 .sriov_configure = bnxt_sriov_configure, 12314 #endif 12315 }; 12316 12317 static int __init bnxt_init(void) 12318 { 12319 bnxt_debug_init(); 12320 return pci_register_driver(&bnxt_pci_driver); 12321 } 12322 12323 static void __exit bnxt_exit(void) 12324 { 12325 pci_unregister_driver(&bnxt_pci_driver); 12326 if (bnxt_pf_wq) 12327 destroy_workqueue(bnxt_pf_wq); 12328 bnxt_debug_exit(); 12329 } 12330 12331 module_init(bnxt_init); 12332 module_exit(bnxt_exit); 12333