1 /* Broadcom NetXtreme-C/E network driver. 2 * 3 * Copyright (c) 2014-2016 Broadcom Corporation 4 * Copyright (c) 2016-2019 Broadcom Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 */ 10 11 #include <linux/module.h> 12 13 #include <linux/stringify.h> 14 #include <linux/kernel.h> 15 #include <linux/timer.h> 16 #include <linux/errno.h> 17 #include <linux/ioport.h> 18 #include <linux/slab.h> 19 #include <linux/vmalloc.h> 20 #include <linux/interrupt.h> 21 #include <linux/pci.h> 22 #include <linux/netdevice.h> 23 #include <linux/etherdevice.h> 24 #include <linux/skbuff.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/bitops.h> 27 #include <linux/io.h> 28 #include <linux/irq.h> 29 #include <linux/delay.h> 30 #include <asm/byteorder.h> 31 #include <asm/page.h> 32 #include <linux/time.h> 33 #include <linux/mii.h> 34 #include <linux/mdio.h> 35 #include <linux/if.h> 36 #include <linux/if_vlan.h> 37 #include <linux/if_bridge.h> 38 #include <linux/rtc.h> 39 #include <linux/bpf.h> 40 #include <net/ip.h> 41 #include <net/tcp.h> 42 #include <net/udp.h> 43 #include <net/checksum.h> 44 #include <net/ip6_checksum.h> 45 #include <net/udp_tunnel.h> 46 #include <linux/workqueue.h> 47 #include <linux/prefetch.h> 48 #include <linux/cache.h> 49 #include <linux/log2.h> 50 #include <linux/aer.h> 51 #include <linux/bitmap.h> 52 #include <linux/cpu_rmap.h> 53 #include <linux/cpumask.h> 54 #include <net/pkt_cls.h> 55 #include <linux/hwmon.h> 56 #include <linux/hwmon-sysfs.h> 57 #include <net/page_pool.h> 58 59 #include "bnxt_hsi.h" 60 #include "bnxt.h" 61 #include "bnxt_ulp.h" 62 #include "bnxt_sriov.h" 63 #include "bnxt_ethtool.h" 64 #include "bnxt_dcb.h" 65 #include "bnxt_xdp.h" 66 #include "bnxt_vfr.h" 67 #include "bnxt_tc.h" 68 #include "bnxt_devlink.h" 69 #include "bnxt_debugfs.h" 70 71 #define BNXT_TX_TIMEOUT (5 * HZ) 72 73 static const char version[] = 74 "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n"; 75 76 MODULE_LICENSE("GPL"); 77 MODULE_DESCRIPTION("Broadcom BCM573xx network driver"); 78 MODULE_VERSION(DRV_MODULE_VERSION); 79 80 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN) 81 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD 82 #define BNXT_RX_COPY_THRESH 256 83 84 #define BNXT_TX_PUSH_THRESH 164 85 86 enum board_idx { 87 BCM57301, 88 BCM57302, 89 BCM57304, 90 BCM57417_NPAR, 91 BCM58700, 92 BCM57311, 93 BCM57312, 94 BCM57402, 95 BCM57404, 96 BCM57406, 97 BCM57402_NPAR, 98 BCM57407, 99 BCM57412, 100 BCM57414, 101 BCM57416, 102 BCM57417, 103 BCM57412_NPAR, 104 BCM57314, 105 BCM57417_SFP, 106 BCM57416_SFP, 107 BCM57404_NPAR, 108 BCM57406_NPAR, 109 BCM57407_SFP, 110 BCM57407_NPAR, 111 BCM57414_NPAR, 112 BCM57416_NPAR, 113 BCM57452, 114 BCM57454, 115 BCM5745x_NPAR, 116 BCM57508, 117 BCM57504, 118 BCM57502, 119 BCM57508_NPAR, 120 BCM57504_NPAR, 121 BCM57502_NPAR, 122 BCM58802, 123 BCM58804, 124 BCM58808, 125 NETXTREME_E_VF, 126 NETXTREME_C_VF, 127 NETXTREME_S_VF, 128 NETXTREME_E_P5_VF, 129 }; 130 131 /* indexed by enum above */ 132 static const struct { 133 char *name; 134 } board_info[] = { 135 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" }, 136 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" }, 137 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, 138 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" }, 139 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" }, 140 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" }, 141 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" }, 142 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" }, 143 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" }, 144 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" }, 145 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" }, 146 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" }, 147 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" }, 148 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" }, 149 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" }, 150 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" }, 151 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" }, 152 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, 153 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" }, 154 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" }, 155 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" }, 156 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" }, 157 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" }, 158 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" }, 159 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" }, 160 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" }, 161 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" }, 162 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 163 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" }, 164 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, 165 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, 166 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" }, 167 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" }, 168 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" }, 169 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" }, 170 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" }, 171 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 172 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 173 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" }, 174 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" }, 175 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" }, 176 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" }, 177 }; 178 179 static const struct pci_device_id bnxt_pci_tbl[] = { 180 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR }, 181 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR }, 182 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 }, 183 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR }, 184 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 }, 185 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 }, 186 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 }, 187 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR }, 188 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 }, 189 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 }, 190 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 }, 191 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 }, 192 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 }, 193 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 }, 194 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR }, 195 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 }, 196 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 }, 197 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 }, 198 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 }, 199 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 }, 200 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR }, 201 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 }, 202 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP }, 203 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP }, 204 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR }, 205 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR }, 206 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP }, 207 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR }, 208 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR }, 209 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR }, 210 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR }, 211 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR }, 212 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR }, 213 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 }, 214 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 }, 215 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 }, 216 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 }, 217 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 }, 218 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR }, 219 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR }, 220 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR }, 221 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR }, 222 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR }, 223 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR }, 224 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 }, 225 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 }, 226 #ifdef CONFIG_BNXT_SRIOV 227 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF }, 228 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF }, 229 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF }, 230 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF }, 231 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF }, 232 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF }, 233 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF }, 234 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF }, 235 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF }, 236 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF }, 237 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF }, 238 #endif 239 { 0 } 240 }; 241 242 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl); 243 244 static const u16 bnxt_vf_req_snif[] = { 245 HWRM_FUNC_CFG, 246 HWRM_FUNC_VF_CFG, 247 HWRM_PORT_PHY_QCFG, 248 HWRM_CFA_L2_FILTER_ALLOC, 249 }; 250 251 static const u16 bnxt_async_events_arr[] = { 252 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE, 253 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE, 254 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD, 255 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED, 256 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE, 257 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, 258 ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE, 259 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY, 260 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY, 261 }; 262 263 static struct workqueue_struct *bnxt_pf_wq; 264 265 static bool bnxt_vf_pciid(enum board_idx idx) 266 { 267 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF || 268 idx == NETXTREME_S_VF || idx == NETXTREME_E_P5_VF); 269 } 270 271 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) 272 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS) 273 #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS) 274 275 #define BNXT_CP_DB_IRQ_DIS(db) \ 276 writel(DB_CP_IRQ_DIS_FLAGS, db) 277 278 #define BNXT_DB_CQ(db, idx) \ 279 writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell) 280 281 #define BNXT_DB_NQ_P5(db, idx) \ 282 writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell) 283 284 #define BNXT_DB_CQ_ARM(db, idx) \ 285 writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell) 286 287 #define BNXT_DB_NQ_ARM_P5(db, idx) \ 288 writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell) 289 290 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 291 { 292 if (bp->flags & BNXT_FLAG_CHIP_P5) 293 BNXT_DB_NQ_P5(db, idx); 294 else 295 BNXT_DB_CQ(db, idx); 296 } 297 298 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 299 { 300 if (bp->flags & BNXT_FLAG_CHIP_P5) 301 BNXT_DB_NQ_ARM_P5(db, idx); 302 else 303 BNXT_DB_CQ_ARM(db, idx); 304 } 305 306 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 307 { 308 if (bp->flags & BNXT_FLAG_CHIP_P5) 309 writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx), 310 db->doorbell); 311 else 312 BNXT_DB_CQ(db, idx); 313 } 314 315 const u16 bnxt_lhint_arr[] = { 316 TX_BD_FLAGS_LHINT_512_AND_SMALLER, 317 TX_BD_FLAGS_LHINT_512_TO_1023, 318 TX_BD_FLAGS_LHINT_1024_TO_2047, 319 TX_BD_FLAGS_LHINT_1024_TO_2047, 320 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 321 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 322 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 323 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 324 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 325 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 326 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 327 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 328 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 329 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 330 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 331 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 332 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 333 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 334 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 335 }; 336 337 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb) 338 { 339 struct metadata_dst *md_dst = skb_metadata_dst(skb); 340 341 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX) 342 return 0; 343 344 return md_dst->u.port_info.port_id; 345 } 346 347 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) 348 { 349 struct bnxt *bp = netdev_priv(dev); 350 struct tx_bd *txbd; 351 struct tx_bd_ext *txbd1; 352 struct netdev_queue *txq; 353 int i; 354 dma_addr_t mapping; 355 unsigned int length, pad = 0; 356 u32 len, free_size, vlan_tag_flags, cfa_action, flags; 357 u16 prod, last_frag; 358 struct pci_dev *pdev = bp->pdev; 359 struct bnxt_tx_ring_info *txr; 360 struct bnxt_sw_tx_bd *tx_buf; 361 362 i = skb_get_queue_mapping(skb); 363 if (unlikely(i >= bp->tx_nr_rings)) { 364 dev_kfree_skb_any(skb); 365 return NETDEV_TX_OK; 366 } 367 368 txq = netdev_get_tx_queue(dev, i); 369 txr = &bp->tx_ring[bp->tx_ring_map[i]]; 370 prod = txr->tx_prod; 371 372 free_size = bnxt_tx_avail(bp, txr); 373 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) { 374 netif_tx_stop_queue(txq); 375 return NETDEV_TX_BUSY; 376 } 377 378 length = skb->len; 379 len = skb_headlen(skb); 380 last_frag = skb_shinfo(skb)->nr_frags; 381 382 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 383 384 txbd->tx_bd_opaque = prod; 385 386 tx_buf = &txr->tx_buf_ring[prod]; 387 tx_buf->skb = skb; 388 tx_buf->nr_frags = last_frag; 389 390 vlan_tag_flags = 0; 391 cfa_action = bnxt_xmit_get_cfa_action(skb); 392 if (skb_vlan_tag_present(skb)) { 393 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN | 394 skb_vlan_tag_get(skb); 395 /* Currently supports 8021Q, 8021AD vlan offloads 396 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated 397 */ 398 if (skb->vlan_proto == htons(ETH_P_8021Q)) 399 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT; 400 } 401 402 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) { 403 struct tx_push_buffer *tx_push_buf = txr->tx_push; 404 struct tx_push_bd *tx_push = &tx_push_buf->push_bd; 405 struct tx_bd_ext *tx_push1 = &tx_push->txbd2; 406 void __iomem *db = txr->tx_db.doorbell; 407 void *pdata = tx_push_buf->data; 408 u64 *end; 409 int j, push_len; 410 411 /* Set COAL_NOW to be ready quickly for the next push */ 412 tx_push->tx_bd_len_flags_type = 413 cpu_to_le32((length << TX_BD_LEN_SHIFT) | 414 TX_BD_TYPE_LONG_TX_BD | 415 TX_BD_FLAGS_LHINT_512_AND_SMALLER | 416 TX_BD_FLAGS_COAL_NOW | 417 TX_BD_FLAGS_PACKET_END | 418 (2 << TX_BD_FLAGS_BD_CNT_SHIFT)); 419 420 if (skb->ip_summed == CHECKSUM_PARTIAL) 421 tx_push1->tx_bd_hsize_lflags = 422 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); 423 else 424 tx_push1->tx_bd_hsize_lflags = 0; 425 426 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 427 tx_push1->tx_bd_cfa_action = 428 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); 429 430 end = pdata + length; 431 end = PTR_ALIGN(end, 8) - 1; 432 *end = 0; 433 434 skb_copy_from_linear_data(skb, pdata, len); 435 pdata += len; 436 for (j = 0; j < last_frag; j++) { 437 skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; 438 void *fptr; 439 440 fptr = skb_frag_address_safe(frag); 441 if (!fptr) 442 goto normal_tx; 443 444 memcpy(pdata, fptr, skb_frag_size(frag)); 445 pdata += skb_frag_size(frag); 446 } 447 448 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type; 449 txbd->tx_bd_haddr = txr->data_mapping; 450 prod = NEXT_TX(prod); 451 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 452 memcpy(txbd, tx_push1, sizeof(*txbd)); 453 prod = NEXT_TX(prod); 454 tx_push->doorbell = 455 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod); 456 txr->tx_prod = prod; 457 458 tx_buf->is_push = 1; 459 netdev_tx_sent_queue(txq, skb->len); 460 wmb(); /* Sync is_push and byte queue before pushing data */ 461 462 push_len = (length + sizeof(*tx_push) + 7) / 8; 463 if (push_len > 16) { 464 __iowrite64_copy(db, tx_push_buf, 16); 465 __iowrite32_copy(db + 4, tx_push_buf + 1, 466 (push_len - 16) << 1); 467 } else { 468 __iowrite64_copy(db, tx_push_buf, push_len); 469 } 470 471 goto tx_done; 472 } 473 474 normal_tx: 475 if (length < BNXT_MIN_PKT_SIZE) { 476 pad = BNXT_MIN_PKT_SIZE - length; 477 if (skb_pad(skb, pad)) { 478 /* SKB already freed. */ 479 tx_buf->skb = NULL; 480 return NETDEV_TX_OK; 481 } 482 length = BNXT_MIN_PKT_SIZE; 483 } 484 485 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE); 486 487 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) { 488 dev_kfree_skb_any(skb); 489 tx_buf->skb = NULL; 490 return NETDEV_TX_OK; 491 } 492 493 dma_unmap_addr_set(tx_buf, mapping, mapping); 494 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD | 495 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT); 496 497 txbd->tx_bd_haddr = cpu_to_le64(mapping); 498 499 prod = NEXT_TX(prod); 500 txbd1 = (struct tx_bd_ext *) 501 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 502 503 txbd1->tx_bd_hsize_lflags = 0; 504 if (skb_is_gso(skb)) { 505 u32 hdr_len; 506 507 if (skb->encapsulation) 508 hdr_len = skb_inner_network_offset(skb) + 509 skb_inner_network_header_len(skb) + 510 inner_tcp_hdrlen(skb); 511 else 512 hdr_len = skb_transport_offset(skb) + 513 tcp_hdrlen(skb); 514 515 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO | 516 TX_BD_FLAGS_T_IPID | 517 (hdr_len << (TX_BD_HSIZE_SHIFT - 1))); 518 length = skb_shinfo(skb)->gso_size; 519 txbd1->tx_bd_mss = cpu_to_le32(length); 520 length += hdr_len; 521 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 522 txbd1->tx_bd_hsize_lflags = 523 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); 524 txbd1->tx_bd_mss = 0; 525 } 526 527 length >>= 9; 528 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) { 529 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n", 530 skb->len); 531 i = 0; 532 goto tx_dma_error; 533 } 534 flags |= bnxt_lhint_arr[length]; 535 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 536 537 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 538 txbd1->tx_bd_cfa_action = 539 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); 540 for (i = 0; i < last_frag; i++) { 541 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 542 543 prod = NEXT_TX(prod); 544 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 545 546 len = skb_frag_size(frag); 547 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len, 548 DMA_TO_DEVICE); 549 550 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) 551 goto tx_dma_error; 552 553 tx_buf = &txr->tx_buf_ring[prod]; 554 dma_unmap_addr_set(tx_buf, mapping, mapping); 555 556 txbd->tx_bd_haddr = cpu_to_le64(mapping); 557 558 flags = len << TX_BD_LEN_SHIFT; 559 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 560 } 561 562 flags &= ~TX_BD_LEN; 563 txbd->tx_bd_len_flags_type = 564 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags | 565 TX_BD_FLAGS_PACKET_END); 566 567 netdev_tx_sent_queue(txq, skb->len); 568 569 /* Sync BD data before updating doorbell */ 570 wmb(); 571 572 prod = NEXT_TX(prod); 573 txr->tx_prod = prod; 574 575 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) 576 bnxt_db_write(bp, &txr->tx_db, prod); 577 578 tx_done: 579 580 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) { 581 if (netdev_xmit_more() && !tx_buf->is_push) 582 bnxt_db_write(bp, &txr->tx_db, prod); 583 584 netif_tx_stop_queue(txq); 585 586 /* netif_tx_stop_queue() must be done before checking 587 * tx index in bnxt_tx_avail() below, because in 588 * bnxt_tx_int(), we update tx index before checking for 589 * netif_tx_queue_stopped(). 590 */ 591 smp_mb(); 592 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) 593 netif_tx_wake_queue(txq); 594 } 595 return NETDEV_TX_OK; 596 597 tx_dma_error: 598 last_frag = i; 599 600 /* start back at beginning and unmap skb */ 601 prod = txr->tx_prod; 602 tx_buf = &txr->tx_buf_ring[prod]; 603 tx_buf->skb = NULL; 604 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 605 skb_headlen(skb), PCI_DMA_TODEVICE); 606 prod = NEXT_TX(prod); 607 608 /* unmap remaining mapped pages */ 609 for (i = 0; i < last_frag; i++) { 610 prod = NEXT_TX(prod); 611 tx_buf = &txr->tx_buf_ring[prod]; 612 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 613 skb_frag_size(&skb_shinfo(skb)->frags[i]), 614 PCI_DMA_TODEVICE); 615 } 616 617 dev_kfree_skb_any(skb); 618 return NETDEV_TX_OK; 619 } 620 621 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) 622 { 623 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; 624 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index); 625 u16 cons = txr->tx_cons; 626 struct pci_dev *pdev = bp->pdev; 627 int i; 628 unsigned int tx_bytes = 0; 629 630 for (i = 0; i < nr_pkts; i++) { 631 struct bnxt_sw_tx_bd *tx_buf; 632 struct sk_buff *skb; 633 int j, last; 634 635 tx_buf = &txr->tx_buf_ring[cons]; 636 cons = NEXT_TX(cons); 637 skb = tx_buf->skb; 638 tx_buf->skb = NULL; 639 640 if (tx_buf->is_push) { 641 tx_buf->is_push = 0; 642 goto next_tx_int; 643 } 644 645 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 646 skb_headlen(skb), PCI_DMA_TODEVICE); 647 last = tx_buf->nr_frags; 648 649 for (j = 0; j < last; j++) { 650 cons = NEXT_TX(cons); 651 tx_buf = &txr->tx_buf_ring[cons]; 652 dma_unmap_page( 653 &pdev->dev, 654 dma_unmap_addr(tx_buf, mapping), 655 skb_frag_size(&skb_shinfo(skb)->frags[j]), 656 PCI_DMA_TODEVICE); 657 } 658 659 next_tx_int: 660 cons = NEXT_TX(cons); 661 662 tx_bytes += skb->len; 663 dev_kfree_skb_any(skb); 664 } 665 666 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes); 667 txr->tx_cons = cons; 668 669 /* Need to make the tx_cons update visible to bnxt_start_xmit() 670 * before checking for netif_tx_queue_stopped(). Without the 671 * memory barrier, there is a small possibility that bnxt_start_xmit() 672 * will miss it and cause the queue to be stopped forever. 673 */ 674 smp_mb(); 675 676 if (unlikely(netif_tx_queue_stopped(txq)) && 677 (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) { 678 __netif_tx_lock(txq, smp_processor_id()); 679 if (netif_tx_queue_stopped(txq) && 680 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh && 681 txr->dev_state != BNXT_DEV_STATE_CLOSING) 682 netif_tx_wake_queue(txq); 683 __netif_tx_unlock(txq); 684 } 685 } 686 687 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, 688 struct bnxt_rx_ring_info *rxr, 689 gfp_t gfp) 690 { 691 struct device *dev = &bp->pdev->dev; 692 struct page *page; 693 694 page = page_pool_dev_alloc_pages(rxr->page_pool); 695 if (!page) 696 return NULL; 697 698 *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir, 699 DMA_ATTR_WEAK_ORDERING); 700 if (dma_mapping_error(dev, *mapping)) { 701 page_pool_recycle_direct(rxr->page_pool, page); 702 return NULL; 703 } 704 *mapping += bp->rx_dma_offset; 705 return page; 706 } 707 708 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping, 709 gfp_t gfp) 710 { 711 u8 *data; 712 struct pci_dev *pdev = bp->pdev; 713 714 data = kmalloc(bp->rx_buf_size, gfp); 715 if (!data) 716 return NULL; 717 718 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset, 719 bp->rx_buf_use_size, bp->rx_dir, 720 DMA_ATTR_WEAK_ORDERING); 721 722 if (dma_mapping_error(&pdev->dev, *mapping)) { 723 kfree(data); 724 data = NULL; 725 } 726 return data; 727 } 728 729 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 730 u16 prod, gfp_t gfp) 731 { 732 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 733 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod]; 734 dma_addr_t mapping; 735 736 if (BNXT_RX_PAGE_MODE(bp)) { 737 struct page *page = 738 __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp); 739 740 if (!page) 741 return -ENOMEM; 742 743 rx_buf->data = page; 744 rx_buf->data_ptr = page_address(page) + bp->rx_offset; 745 } else { 746 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp); 747 748 if (!data) 749 return -ENOMEM; 750 751 rx_buf->data = data; 752 rx_buf->data_ptr = data + bp->rx_offset; 753 } 754 rx_buf->mapping = mapping; 755 756 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 757 return 0; 758 } 759 760 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data) 761 { 762 u16 prod = rxr->rx_prod; 763 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 764 struct rx_bd *cons_bd, *prod_bd; 765 766 prod_rx_buf = &rxr->rx_buf_ring[prod]; 767 cons_rx_buf = &rxr->rx_buf_ring[cons]; 768 769 prod_rx_buf->data = data; 770 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr; 771 772 prod_rx_buf->mapping = cons_rx_buf->mapping; 773 774 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 775 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)]; 776 777 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr; 778 } 779 780 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) 781 { 782 u16 next, max = rxr->rx_agg_bmap_size; 783 784 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx); 785 if (next >= max) 786 next = find_first_zero_bit(rxr->rx_agg_bmap, max); 787 return next; 788 } 789 790 static inline int bnxt_alloc_rx_page(struct bnxt *bp, 791 struct bnxt_rx_ring_info *rxr, 792 u16 prod, gfp_t gfp) 793 { 794 struct rx_bd *rxbd = 795 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 796 struct bnxt_sw_rx_agg_bd *rx_agg_buf; 797 struct pci_dev *pdev = bp->pdev; 798 struct page *page; 799 dma_addr_t mapping; 800 u16 sw_prod = rxr->rx_sw_agg_prod; 801 unsigned int offset = 0; 802 803 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) { 804 page = rxr->rx_page; 805 if (!page) { 806 page = alloc_page(gfp); 807 if (!page) 808 return -ENOMEM; 809 rxr->rx_page = page; 810 rxr->rx_page_offset = 0; 811 } 812 offset = rxr->rx_page_offset; 813 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE; 814 if (rxr->rx_page_offset == PAGE_SIZE) 815 rxr->rx_page = NULL; 816 else 817 get_page(page); 818 } else { 819 page = alloc_page(gfp); 820 if (!page) 821 return -ENOMEM; 822 } 823 824 mapping = dma_map_page_attrs(&pdev->dev, page, offset, 825 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE, 826 DMA_ATTR_WEAK_ORDERING); 827 if (dma_mapping_error(&pdev->dev, mapping)) { 828 __free_page(page); 829 return -EIO; 830 } 831 832 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) 833 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); 834 835 __set_bit(sw_prod, rxr->rx_agg_bmap); 836 rx_agg_buf = &rxr->rx_agg_ring[sw_prod]; 837 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod); 838 839 rx_agg_buf->page = page; 840 rx_agg_buf->offset = offset; 841 rx_agg_buf->mapping = mapping; 842 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 843 rxbd->rx_bd_opaque = sw_prod; 844 return 0; 845 } 846 847 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp, 848 struct bnxt_cp_ring_info *cpr, 849 u16 cp_cons, u16 curr) 850 { 851 struct rx_agg_cmp *agg; 852 853 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr)); 854 agg = (struct rx_agg_cmp *) 855 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 856 return agg; 857 } 858 859 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp, 860 struct bnxt_rx_ring_info *rxr, 861 u16 agg_id, u16 curr) 862 { 863 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id]; 864 865 return &tpa_info->agg_arr[curr]; 866 } 867 868 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx, 869 u16 start, u32 agg_bufs, bool tpa) 870 { 871 struct bnxt_napi *bnapi = cpr->bnapi; 872 struct bnxt *bp = bnapi->bp; 873 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 874 u16 prod = rxr->rx_agg_prod; 875 u16 sw_prod = rxr->rx_sw_agg_prod; 876 bool p5_tpa = false; 877 u32 i; 878 879 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa) 880 p5_tpa = true; 881 882 for (i = 0; i < agg_bufs; i++) { 883 u16 cons; 884 struct rx_agg_cmp *agg; 885 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf; 886 struct rx_bd *prod_bd; 887 struct page *page; 888 889 if (p5_tpa) 890 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i); 891 else 892 agg = bnxt_get_agg(bp, cpr, idx, start + i); 893 cons = agg->rx_agg_cmp_opaque; 894 __clear_bit(cons, rxr->rx_agg_bmap); 895 896 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) 897 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); 898 899 __set_bit(sw_prod, rxr->rx_agg_bmap); 900 prod_rx_buf = &rxr->rx_agg_ring[sw_prod]; 901 cons_rx_buf = &rxr->rx_agg_ring[cons]; 902 903 /* It is possible for sw_prod to be equal to cons, so 904 * set cons_rx_buf->page to NULL first. 905 */ 906 page = cons_rx_buf->page; 907 cons_rx_buf->page = NULL; 908 prod_rx_buf->page = page; 909 prod_rx_buf->offset = cons_rx_buf->offset; 910 911 prod_rx_buf->mapping = cons_rx_buf->mapping; 912 913 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 914 915 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping); 916 prod_bd->rx_bd_opaque = sw_prod; 917 918 prod = NEXT_RX_AGG(prod); 919 sw_prod = NEXT_RX_AGG(sw_prod); 920 } 921 rxr->rx_agg_prod = prod; 922 rxr->rx_sw_agg_prod = sw_prod; 923 } 924 925 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, 926 struct bnxt_rx_ring_info *rxr, 927 u16 cons, void *data, u8 *data_ptr, 928 dma_addr_t dma_addr, 929 unsigned int offset_and_len) 930 { 931 unsigned int payload = offset_and_len >> 16; 932 unsigned int len = offset_and_len & 0xffff; 933 skb_frag_t *frag; 934 struct page *page = data; 935 u16 prod = rxr->rx_prod; 936 struct sk_buff *skb; 937 int off, err; 938 939 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 940 if (unlikely(err)) { 941 bnxt_reuse_rx_data(rxr, cons, data); 942 return NULL; 943 } 944 dma_addr -= bp->rx_dma_offset; 945 dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir, 946 DMA_ATTR_WEAK_ORDERING); 947 948 if (unlikely(!payload)) 949 payload = eth_get_headlen(bp->dev, data_ptr, len); 950 951 skb = napi_alloc_skb(&rxr->bnapi->napi, payload); 952 if (!skb) { 953 __free_page(page); 954 return NULL; 955 } 956 957 off = (void *)data_ptr - page_address(page); 958 skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE); 959 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN, 960 payload + NET_IP_ALIGN); 961 962 frag = &skb_shinfo(skb)->frags[0]; 963 skb_frag_size_sub(frag, payload); 964 skb_frag_off_add(frag, payload); 965 skb->data_len -= payload; 966 skb->tail += payload; 967 968 return skb; 969 } 970 971 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, 972 struct bnxt_rx_ring_info *rxr, u16 cons, 973 void *data, u8 *data_ptr, 974 dma_addr_t dma_addr, 975 unsigned int offset_and_len) 976 { 977 u16 prod = rxr->rx_prod; 978 struct sk_buff *skb; 979 int err; 980 981 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 982 if (unlikely(err)) { 983 bnxt_reuse_rx_data(rxr, cons, data); 984 return NULL; 985 } 986 987 skb = build_skb(data, 0); 988 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, 989 bp->rx_dir, DMA_ATTR_WEAK_ORDERING); 990 if (!skb) { 991 kfree(data); 992 return NULL; 993 } 994 995 skb_reserve(skb, bp->rx_offset); 996 skb_put(skb, offset_and_len & 0xffff); 997 return skb; 998 } 999 1000 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, 1001 struct bnxt_cp_ring_info *cpr, 1002 struct sk_buff *skb, u16 idx, 1003 u32 agg_bufs, bool tpa) 1004 { 1005 struct bnxt_napi *bnapi = cpr->bnapi; 1006 struct pci_dev *pdev = bp->pdev; 1007 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1008 u16 prod = rxr->rx_agg_prod; 1009 bool p5_tpa = false; 1010 u32 i; 1011 1012 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa) 1013 p5_tpa = true; 1014 1015 for (i = 0; i < agg_bufs; i++) { 1016 u16 cons, frag_len; 1017 struct rx_agg_cmp *agg; 1018 struct bnxt_sw_rx_agg_bd *cons_rx_buf; 1019 struct page *page; 1020 dma_addr_t mapping; 1021 1022 if (p5_tpa) 1023 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i); 1024 else 1025 agg = bnxt_get_agg(bp, cpr, idx, i); 1026 cons = agg->rx_agg_cmp_opaque; 1027 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) & 1028 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT; 1029 1030 cons_rx_buf = &rxr->rx_agg_ring[cons]; 1031 skb_fill_page_desc(skb, i, cons_rx_buf->page, 1032 cons_rx_buf->offset, frag_len); 1033 __clear_bit(cons, rxr->rx_agg_bmap); 1034 1035 /* It is possible for bnxt_alloc_rx_page() to allocate 1036 * a sw_prod index that equals the cons index, so we 1037 * need to clear the cons entry now. 1038 */ 1039 mapping = cons_rx_buf->mapping; 1040 page = cons_rx_buf->page; 1041 cons_rx_buf->page = NULL; 1042 1043 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) { 1044 struct skb_shared_info *shinfo; 1045 unsigned int nr_frags; 1046 1047 shinfo = skb_shinfo(skb); 1048 nr_frags = --shinfo->nr_frags; 1049 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL); 1050 1051 dev_kfree_skb(skb); 1052 1053 cons_rx_buf->page = page; 1054 1055 /* Update prod since possibly some pages have been 1056 * allocated already. 1057 */ 1058 rxr->rx_agg_prod = prod; 1059 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa); 1060 return NULL; 1061 } 1062 1063 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE, 1064 PCI_DMA_FROMDEVICE, 1065 DMA_ATTR_WEAK_ORDERING); 1066 1067 skb->data_len += frag_len; 1068 skb->len += frag_len; 1069 skb->truesize += PAGE_SIZE; 1070 1071 prod = NEXT_RX_AGG(prod); 1072 } 1073 rxr->rx_agg_prod = prod; 1074 return skb; 1075 } 1076 1077 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 1078 u8 agg_bufs, u32 *raw_cons) 1079 { 1080 u16 last; 1081 struct rx_agg_cmp *agg; 1082 1083 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs); 1084 last = RING_CMP(*raw_cons); 1085 agg = (struct rx_agg_cmp *) 1086 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)]; 1087 return RX_AGG_CMP_VALID(agg, *raw_cons); 1088 } 1089 1090 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data, 1091 unsigned int len, 1092 dma_addr_t mapping) 1093 { 1094 struct bnxt *bp = bnapi->bp; 1095 struct pci_dev *pdev = bp->pdev; 1096 struct sk_buff *skb; 1097 1098 skb = napi_alloc_skb(&bnapi->napi, len); 1099 if (!skb) 1100 return NULL; 1101 1102 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh, 1103 bp->rx_dir); 1104 1105 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN, 1106 len + NET_IP_ALIGN); 1107 1108 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh, 1109 bp->rx_dir); 1110 1111 skb_put(skb, len); 1112 return skb; 1113 } 1114 1115 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 1116 u32 *raw_cons, void *cmp) 1117 { 1118 struct rx_cmp *rxcmp = cmp; 1119 u32 tmp_raw_cons = *raw_cons; 1120 u8 cmp_type, agg_bufs = 0; 1121 1122 cmp_type = RX_CMP_TYPE(rxcmp); 1123 1124 if (cmp_type == CMP_TYPE_RX_L2_CMP) { 1125 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & 1126 RX_CMP_AGG_BUFS) >> 1127 RX_CMP_AGG_BUFS_SHIFT; 1128 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1129 struct rx_tpa_end_cmp *tpa_end = cmp; 1130 1131 if (bp->flags & BNXT_FLAG_CHIP_P5) 1132 return 0; 1133 1134 agg_bufs = TPA_END_AGG_BUFS(tpa_end); 1135 } 1136 1137 if (agg_bufs) { 1138 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) 1139 return -EBUSY; 1140 } 1141 *raw_cons = tmp_raw_cons; 1142 return 0; 1143 } 1144 1145 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay) 1146 { 1147 if (BNXT_PF(bp)) 1148 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay); 1149 else 1150 schedule_delayed_work(&bp->fw_reset_task, delay); 1151 } 1152 1153 static void bnxt_queue_sp_work(struct bnxt *bp) 1154 { 1155 if (BNXT_PF(bp)) 1156 queue_work(bnxt_pf_wq, &bp->sp_task); 1157 else 1158 schedule_work(&bp->sp_task); 1159 } 1160 1161 static void bnxt_cancel_sp_work(struct bnxt *bp) 1162 { 1163 if (BNXT_PF(bp)) 1164 flush_workqueue(bnxt_pf_wq); 1165 else 1166 cancel_work_sync(&bp->sp_task); 1167 } 1168 1169 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 1170 { 1171 if (!rxr->bnapi->in_reset) { 1172 rxr->bnapi->in_reset = true; 1173 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 1174 bnxt_queue_sp_work(bp); 1175 } 1176 rxr->rx_next_cons = 0xffff; 1177 } 1178 1179 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) 1180 { 1181 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; 1182 u16 idx = agg_id & MAX_TPA_P5_MASK; 1183 1184 if (test_bit(idx, map->agg_idx_bmap)) 1185 idx = find_first_zero_bit(map->agg_idx_bmap, 1186 BNXT_AGG_IDX_BMAP_SIZE); 1187 __set_bit(idx, map->agg_idx_bmap); 1188 map->agg_id_tbl[agg_id] = idx; 1189 return idx; 1190 } 1191 1192 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) 1193 { 1194 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; 1195 1196 __clear_bit(idx, map->agg_idx_bmap); 1197 } 1198 1199 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) 1200 { 1201 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; 1202 1203 return map->agg_id_tbl[agg_id]; 1204 } 1205 1206 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 1207 struct rx_tpa_start_cmp *tpa_start, 1208 struct rx_tpa_start_cmp_ext *tpa_start1) 1209 { 1210 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 1211 struct bnxt_tpa_info *tpa_info; 1212 u16 cons, prod, agg_id; 1213 struct rx_bd *prod_bd; 1214 dma_addr_t mapping; 1215 1216 if (bp->flags & BNXT_FLAG_CHIP_P5) { 1217 agg_id = TPA_START_AGG_ID_P5(tpa_start); 1218 agg_id = bnxt_alloc_agg_idx(rxr, agg_id); 1219 } else { 1220 agg_id = TPA_START_AGG_ID(tpa_start); 1221 } 1222 cons = tpa_start->rx_tpa_start_cmp_opaque; 1223 prod = rxr->rx_prod; 1224 cons_rx_buf = &rxr->rx_buf_ring[cons]; 1225 prod_rx_buf = &rxr->rx_buf_ring[prod]; 1226 tpa_info = &rxr->rx_tpa[agg_id]; 1227 1228 if (unlikely(cons != rxr->rx_next_cons || 1229 TPA_START_ERROR(tpa_start))) { 1230 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n", 1231 cons, rxr->rx_next_cons, 1232 TPA_START_ERROR_CODE(tpa_start1)); 1233 bnxt_sched_reset(bp, rxr); 1234 return; 1235 } 1236 /* Store cfa_code in tpa_info to use in tpa_end 1237 * completion processing. 1238 */ 1239 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1); 1240 prod_rx_buf->data = tpa_info->data; 1241 prod_rx_buf->data_ptr = tpa_info->data_ptr; 1242 1243 mapping = tpa_info->mapping; 1244 prod_rx_buf->mapping = mapping; 1245 1246 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 1247 1248 prod_bd->rx_bd_haddr = cpu_to_le64(mapping); 1249 1250 tpa_info->data = cons_rx_buf->data; 1251 tpa_info->data_ptr = cons_rx_buf->data_ptr; 1252 cons_rx_buf->data = NULL; 1253 tpa_info->mapping = cons_rx_buf->mapping; 1254 1255 tpa_info->len = 1256 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >> 1257 RX_TPA_START_CMP_LEN_SHIFT; 1258 if (likely(TPA_START_HASH_VALID(tpa_start))) { 1259 u32 hash_type = TPA_START_HASH_TYPE(tpa_start); 1260 1261 tpa_info->hash_type = PKT_HASH_TYPE_L4; 1262 tpa_info->gso_type = SKB_GSO_TCPV4; 1263 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ 1264 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1)) 1265 tpa_info->gso_type = SKB_GSO_TCPV6; 1266 tpa_info->rss_hash = 1267 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash); 1268 } else { 1269 tpa_info->hash_type = PKT_HASH_TYPE_NONE; 1270 tpa_info->gso_type = 0; 1271 if (netif_msg_rx_err(bp)) 1272 netdev_warn(bp->dev, "TPA packet without valid hash\n"); 1273 } 1274 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2); 1275 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata); 1276 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info); 1277 tpa_info->agg_count = 0; 1278 1279 rxr->rx_prod = NEXT_RX(prod); 1280 cons = NEXT_RX(cons); 1281 rxr->rx_next_cons = NEXT_RX(cons); 1282 cons_rx_buf = &rxr->rx_buf_ring[cons]; 1283 1284 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data); 1285 rxr->rx_prod = NEXT_RX(rxr->rx_prod); 1286 cons_rx_buf->data = NULL; 1287 } 1288 1289 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs) 1290 { 1291 if (agg_bufs) 1292 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true); 1293 } 1294 1295 #ifdef CONFIG_INET 1296 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto) 1297 { 1298 struct udphdr *uh = NULL; 1299 1300 if (ip_proto == htons(ETH_P_IP)) { 1301 struct iphdr *iph = (struct iphdr *)skb->data; 1302 1303 if (iph->protocol == IPPROTO_UDP) 1304 uh = (struct udphdr *)(iph + 1); 1305 } else { 1306 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; 1307 1308 if (iph->nexthdr == IPPROTO_UDP) 1309 uh = (struct udphdr *)(iph + 1); 1310 } 1311 if (uh) { 1312 if (uh->check) 1313 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM; 1314 else 1315 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; 1316 } 1317 } 1318 #endif 1319 1320 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info, 1321 int payload_off, int tcp_ts, 1322 struct sk_buff *skb) 1323 { 1324 #ifdef CONFIG_INET 1325 struct tcphdr *th; 1326 int len, nw_off; 1327 u16 outer_ip_off, inner_ip_off, inner_mac_off; 1328 u32 hdr_info = tpa_info->hdr_info; 1329 bool loopback = false; 1330 1331 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); 1332 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); 1333 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); 1334 1335 /* If the packet is an internal loopback packet, the offsets will 1336 * have an extra 4 bytes. 1337 */ 1338 if (inner_mac_off == 4) { 1339 loopback = true; 1340 } else if (inner_mac_off > 4) { 1341 __be16 proto = *((__be16 *)(skb->data + inner_ip_off - 1342 ETH_HLEN - 2)); 1343 1344 /* We only support inner iPv4/ipv6. If we don't see the 1345 * correct protocol ID, it must be a loopback packet where 1346 * the offsets are off by 4. 1347 */ 1348 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6)) 1349 loopback = true; 1350 } 1351 if (loopback) { 1352 /* internal loopback packet, subtract all offsets by 4 */ 1353 inner_ip_off -= 4; 1354 inner_mac_off -= 4; 1355 outer_ip_off -= 4; 1356 } 1357 1358 nw_off = inner_ip_off - ETH_HLEN; 1359 skb_set_network_header(skb, nw_off); 1360 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) { 1361 struct ipv6hdr *iph = ipv6_hdr(skb); 1362 1363 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); 1364 len = skb->len - skb_transport_offset(skb); 1365 th = tcp_hdr(skb); 1366 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); 1367 } else { 1368 struct iphdr *iph = ip_hdr(skb); 1369 1370 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); 1371 len = skb->len - skb_transport_offset(skb); 1372 th = tcp_hdr(skb); 1373 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); 1374 } 1375 1376 if (inner_mac_off) { /* tunnel */ 1377 __be16 proto = *((__be16 *)(skb->data + outer_ip_off - 1378 ETH_HLEN - 2)); 1379 1380 bnxt_gro_tunnel(skb, proto); 1381 } 1382 #endif 1383 return skb; 1384 } 1385 1386 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info, 1387 int payload_off, int tcp_ts, 1388 struct sk_buff *skb) 1389 { 1390 #ifdef CONFIG_INET 1391 u16 outer_ip_off, inner_ip_off, inner_mac_off; 1392 u32 hdr_info = tpa_info->hdr_info; 1393 int iphdr_len, nw_off; 1394 1395 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); 1396 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); 1397 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); 1398 1399 nw_off = inner_ip_off - ETH_HLEN; 1400 skb_set_network_header(skb, nw_off); 1401 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ? 1402 sizeof(struct ipv6hdr) : sizeof(struct iphdr); 1403 skb_set_transport_header(skb, nw_off + iphdr_len); 1404 1405 if (inner_mac_off) { /* tunnel */ 1406 __be16 proto = *((__be16 *)(skb->data + outer_ip_off - 1407 ETH_HLEN - 2)); 1408 1409 bnxt_gro_tunnel(skb, proto); 1410 } 1411 #endif 1412 return skb; 1413 } 1414 1415 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr)) 1416 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr)) 1417 1418 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info, 1419 int payload_off, int tcp_ts, 1420 struct sk_buff *skb) 1421 { 1422 #ifdef CONFIG_INET 1423 struct tcphdr *th; 1424 int len, nw_off, tcp_opt_len = 0; 1425 1426 if (tcp_ts) 1427 tcp_opt_len = 12; 1428 1429 if (tpa_info->gso_type == SKB_GSO_TCPV4) { 1430 struct iphdr *iph; 1431 1432 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len - 1433 ETH_HLEN; 1434 skb_set_network_header(skb, nw_off); 1435 iph = ip_hdr(skb); 1436 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); 1437 len = skb->len - skb_transport_offset(skb); 1438 th = tcp_hdr(skb); 1439 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); 1440 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) { 1441 struct ipv6hdr *iph; 1442 1443 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len - 1444 ETH_HLEN; 1445 skb_set_network_header(skb, nw_off); 1446 iph = ipv6_hdr(skb); 1447 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); 1448 len = skb->len - skb_transport_offset(skb); 1449 th = tcp_hdr(skb); 1450 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); 1451 } else { 1452 dev_kfree_skb_any(skb); 1453 return NULL; 1454 } 1455 1456 if (nw_off) /* tunnel */ 1457 bnxt_gro_tunnel(skb, skb->protocol); 1458 #endif 1459 return skb; 1460 } 1461 1462 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp, 1463 struct bnxt_tpa_info *tpa_info, 1464 struct rx_tpa_end_cmp *tpa_end, 1465 struct rx_tpa_end_cmp_ext *tpa_end1, 1466 struct sk_buff *skb) 1467 { 1468 #ifdef CONFIG_INET 1469 int payload_off; 1470 u16 segs; 1471 1472 segs = TPA_END_TPA_SEGS(tpa_end); 1473 if (segs == 1) 1474 return skb; 1475 1476 NAPI_GRO_CB(skb)->count = segs; 1477 skb_shinfo(skb)->gso_size = 1478 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len); 1479 skb_shinfo(skb)->gso_type = tpa_info->gso_type; 1480 if (bp->flags & BNXT_FLAG_CHIP_P5) 1481 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1); 1482 else 1483 payload_off = TPA_END_PAYLOAD_OFF(tpa_end); 1484 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb); 1485 if (likely(skb)) 1486 tcp_gro_complete(skb); 1487 #endif 1488 return skb; 1489 } 1490 1491 /* Given the cfa_code of a received packet determine which 1492 * netdev (vf-rep or PF) the packet is destined to. 1493 */ 1494 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code) 1495 { 1496 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code); 1497 1498 /* if vf-rep dev is NULL, the must belongs to the PF */ 1499 return dev ? dev : bp->dev; 1500 } 1501 1502 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, 1503 struct bnxt_cp_ring_info *cpr, 1504 u32 *raw_cons, 1505 struct rx_tpa_end_cmp *tpa_end, 1506 struct rx_tpa_end_cmp_ext *tpa_end1, 1507 u8 *event) 1508 { 1509 struct bnxt_napi *bnapi = cpr->bnapi; 1510 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1511 u8 *data_ptr, agg_bufs; 1512 unsigned int len; 1513 struct bnxt_tpa_info *tpa_info; 1514 dma_addr_t mapping; 1515 struct sk_buff *skb; 1516 u16 idx = 0, agg_id; 1517 void *data; 1518 bool gro; 1519 1520 if (unlikely(bnapi->in_reset)) { 1521 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end); 1522 1523 if (rc < 0) 1524 return ERR_PTR(-EBUSY); 1525 return NULL; 1526 } 1527 1528 if (bp->flags & BNXT_FLAG_CHIP_P5) { 1529 agg_id = TPA_END_AGG_ID_P5(tpa_end); 1530 agg_id = bnxt_lookup_agg_idx(rxr, agg_id); 1531 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1); 1532 tpa_info = &rxr->rx_tpa[agg_id]; 1533 if (unlikely(agg_bufs != tpa_info->agg_count)) { 1534 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n", 1535 agg_bufs, tpa_info->agg_count); 1536 agg_bufs = tpa_info->agg_count; 1537 } 1538 tpa_info->agg_count = 0; 1539 *event |= BNXT_AGG_EVENT; 1540 bnxt_free_agg_idx(rxr, agg_id); 1541 idx = agg_id; 1542 gro = !!(bp->flags & BNXT_FLAG_GRO); 1543 } else { 1544 agg_id = TPA_END_AGG_ID(tpa_end); 1545 agg_bufs = TPA_END_AGG_BUFS(tpa_end); 1546 tpa_info = &rxr->rx_tpa[agg_id]; 1547 idx = RING_CMP(*raw_cons); 1548 if (agg_bufs) { 1549 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons)) 1550 return ERR_PTR(-EBUSY); 1551 1552 *event |= BNXT_AGG_EVENT; 1553 idx = NEXT_CMP(idx); 1554 } 1555 gro = !!TPA_END_GRO(tpa_end); 1556 } 1557 data = tpa_info->data; 1558 data_ptr = tpa_info->data_ptr; 1559 prefetch(data_ptr); 1560 len = tpa_info->len; 1561 mapping = tpa_info->mapping; 1562 1563 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) { 1564 bnxt_abort_tpa(cpr, idx, agg_bufs); 1565 if (agg_bufs > MAX_SKB_FRAGS) 1566 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", 1567 agg_bufs, (int)MAX_SKB_FRAGS); 1568 return NULL; 1569 } 1570 1571 if (len <= bp->rx_copy_thresh) { 1572 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping); 1573 if (!skb) { 1574 bnxt_abort_tpa(cpr, idx, agg_bufs); 1575 return NULL; 1576 } 1577 } else { 1578 u8 *new_data; 1579 dma_addr_t new_mapping; 1580 1581 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC); 1582 if (!new_data) { 1583 bnxt_abort_tpa(cpr, idx, agg_bufs); 1584 return NULL; 1585 } 1586 1587 tpa_info->data = new_data; 1588 tpa_info->data_ptr = new_data + bp->rx_offset; 1589 tpa_info->mapping = new_mapping; 1590 1591 skb = build_skb(data, 0); 1592 dma_unmap_single_attrs(&bp->pdev->dev, mapping, 1593 bp->rx_buf_use_size, bp->rx_dir, 1594 DMA_ATTR_WEAK_ORDERING); 1595 1596 if (!skb) { 1597 kfree(data); 1598 bnxt_abort_tpa(cpr, idx, agg_bufs); 1599 return NULL; 1600 } 1601 skb_reserve(skb, bp->rx_offset); 1602 skb_put(skb, len); 1603 } 1604 1605 if (agg_bufs) { 1606 skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true); 1607 if (!skb) { 1608 /* Page reuse already handled by bnxt_rx_pages(). */ 1609 return NULL; 1610 } 1611 } 1612 1613 skb->protocol = 1614 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code)); 1615 1616 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE) 1617 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); 1618 1619 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) && 1620 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { 1621 u16 vlan_proto = tpa_info->metadata >> 1622 RX_CMP_FLAGS2_METADATA_TPID_SFT; 1623 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK; 1624 1625 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); 1626 } 1627 1628 skb_checksum_none_assert(skb); 1629 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) { 1630 skb->ip_summed = CHECKSUM_UNNECESSARY; 1631 skb->csum_level = 1632 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3; 1633 } 1634 1635 if (gro) 1636 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb); 1637 1638 return skb; 1639 } 1640 1641 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 1642 struct rx_agg_cmp *rx_agg) 1643 { 1644 u16 agg_id = TPA_AGG_AGG_ID(rx_agg); 1645 struct bnxt_tpa_info *tpa_info; 1646 1647 agg_id = bnxt_lookup_agg_idx(rxr, agg_id); 1648 tpa_info = &rxr->rx_tpa[agg_id]; 1649 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS); 1650 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg; 1651 } 1652 1653 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi, 1654 struct sk_buff *skb) 1655 { 1656 if (skb->dev != bp->dev) { 1657 /* this packet belongs to a vf-rep */ 1658 bnxt_vf_rep_rx(bp, skb); 1659 return; 1660 } 1661 skb_record_rx_queue(skb, bnapi->index); 1662 napi_gro_receive(&bnapi->napi, skb); 1663 } 1664 1665 /* returns the following: 1666 * 1 - 1 packet successfully received 1667 * 0 - successful TPA_START, packet not completed yet 1668 * -EBUSY - completion ring does not have all the agg buffers yet 1669 * -ENOMEM - packet aborted due to out of memory 1670 * -EIO - packet aborted due to hw error indicated in BD 1671 */ 1672 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 1673 u32 *raw_cons, u8 *event) 1674 { 1675 struct bnxt_napi *bnapi = cpr->bnapi; 1676 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1677 struct net_device *dev = bp->dev; 1678 struct rx_cmp *rxcmp; 1679 struct rx_cmp_ext *rxcmp1; 1680 u32 tmp_raw_cons = *raw_cons; 1681 u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons); 1682 struct bnxt_sw_rx_bd *rx_buf; 1683 unsigned int len; 1684 u8 *data_ptr, agg_bufs, cmp_type; 1685 dma_addr_t dma_addr; 1686 struct sk_buff *skb; 1687 void *data; 1688 int rc = 0; 1689 u32 misc; 1690 1691 rxcmp = (struct rx_cmp *) 1692 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1693 1694 cmp_type = RX_CMP_TYPE(rxcmp); 1695 1696 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) { 1697 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp); 1698 goto next_rx_no_prod_no_len; 1699 } 1700 1701 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 1702 cp_cons = RING_CMP(tmp_raw_cons); 1703 rxcmp1 = (struct rx_cmp_ext *) 1704 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1705 1706 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 1707 return -EBUSY; 1708 1709 prod = rxr->rx_prod; 1710 1711 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) { 1712 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp, 1713 (struct rx_tpa_start_cmp_ext *)rxcmp1); 1714 1715 *event |= BNXT_RX_EVENT; 1716 goto next_rx_no_prod_no_len; 1717 1718 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1719 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons, 1720 (struct rx_tpa_end_cmp *)rxcmp, 1721 (struct rx_tpa_end_cmp_ext *)rxcmp1, event); 1722 1723 if (IS_ERR(skb)) 1724 return -EBUSY; 1725 1726 rc = -ENOMEM; 1727 if (likely(skb)) { 1728 bnxt_deliver_skb(bp, bnapi, skb); 1729 rc = 1; 1730 } 1731 *event |= BNXT_RX_EVENT; 1732 goto next_rx_no_prod_no_len; 1733 } 1734 1735 cons = rxcmp->rx_cmp_opaque; 1736 if (unlikely(cons != rxr->rx_next_cons)) { 1737 int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp); 1738 1739 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n", 1740 cons, rxr->rx_next_cons); 1741 bnxt_sched_reset(bp, rxr); 1742 return rc1; 1743 } 1744 rx_buf = &rxr->rx_buf_ring[cons]; 1745 data = rx_buf->data; 1746 data_ptr = rx_buf->data_ptr; 1747 prefetch(data_ptr); 1748 1749 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1); 1750 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT; 1751 1752 if (agg_bufs) { 1753 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) 1754 return -EBUSY; 1755 1756 cp_cons = NEXT_CMP(cp_cons); 1757 *event |= BNXT_AGG_EVENT; 1758 } 1759 *event |= BNXT_RX_EVENT; 1760 1761 rx_buf->data = NULL; 1762 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) { 1763 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2); 1764 1765 bnxt_reuse_rx_data(rxr, cons, data); 1766 if (agg_bufs) 1767 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs, 1768 false); 1769 1770 rc = -EIO; 1771 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) { 1772 bnapi->cp_ring.rx_buf_errors++; 1773 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) { 1774 netdev_warn(bp->dev, "RX buffer error %x\n", 1775 rx_err); 1776 bnxt_sched_reset(bp, rxr); 1777 } 1778 } 1779 goto next_rx_no_len; 1780 } 1781 1782 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT; 1783 dma_addr = rx_buf->mapping; 1784 1785 if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) { 1786 rc = 1; 1787 goto next_rx; 1788 } 1789 1790 if (len <= bp->rx_copy_thresh) { 1791 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr); 1792 bnxt_reuse_rx_data(rxr, cons, data); 1793 if (!skb) { 1794 if (agg_bufs) 1795 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, 1796 agg_bufs, false); 1797 rc = -ENOMEM; 1798 goto next_rx; 1799 } 1800 } else { 1801 u32 payload; 1802 1803 if (rx_buf->data_ptr == data_ptr) 1804 payload = misc & RX_CMP_PAYLOAD_OFFSET; 1805 else 1806 payload = 0; 1807 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr, 1808 payload | len); 1809 if (!skb) { 1810 rc = -ENOMEM; 1811 goto next_rx; 1812 } 1813 } 1814 1815 if (agg_bufs) { 1816 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false); 1817 if (!skb) { 1818 rc = -ENOMEM; 1819 goto next_rx; 1820 } 1821 } 1822 1823 if (RX_CMP_HASH_VALID(rxcmp)) { 1824 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp); 1825 enum pkt_hash_types type = PKT_HASH_TYPE_L4; 1826 1827 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ 1828 if (hash_type != 1 && hash_type != 3) 1829 type = PKT_HASH_TYPE_L3; 1830 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type); 1831 } 1832 1833 cfa_code = RX_CMP_CFA_CODE(rxcmp1); 1834 skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code)); 1835 1836 if ((rxcmp1->rx_cmp_flags2 & 1837 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) && 1838 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { 1839 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); 1840 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK; 1841 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; 1842 1843 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); 1844 } 1845 1846 skb_checksum_none_assert(skb); 1847 if (RX_CMP_L4_CS_OK(rxcmp1)) { 1848 if (dev->features & NETIF_F_RXCSUM) { 1849 skb->ip_summed = CHECKSUM_UNNECESSARY; 1850 skb->csum_level = RX_CMP_ENCAP(rxcmp1); 1851 } 1852 } else { 1853 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) { 1854 if (dev->features & NETIF_F_RXCSUM) 1855 bnapi->cp_ring.rx_l4_csum_errors++; 1856 } 1857 } 1858 1859 bnxt_deliver_skb(bp, bnapi, skb); 1860 rc = 1; 1861 1862 next_rx: 1863 cpr->rx_packets += 1; 1864 cpr->rx_bytes += len; 1865 1866 next_rx_no_len: 1867 rxr->rx_prod = NEXT_RX(prod); 1868 rxr->rx_next_cons = NEXT_RX(cons); 1869 1870 next_rx_no_prod_no_len: 1871 *raw_cons = tmp_raw_cons; 1872 1873 return rc; 1874 } 1875 1876 /* In netpoll mode, if we are using a combined completion ring, we need to 1877 * discard the rx packets and recycle the buffers. 1878 */ 1879 static int bnxt_force_rx_discard(struct bnxt *bp, 1880 struct bnxt_cp_ring_info *cpr, 1881 u32 *raw_cons, u8 *event) 1882 { 1883 u32 tmp_raw_cons = *raw_cons; 1884 struct rx_cmp_ext *rxcmp1; 1885 struct rx_cmp *rxcmp; 1886 u16 cp_cons; 1887 u8 cmp_type; 1888 1889 cp_cons = RING_CMP(tmp_raw_cons); 1890 rxcmp = (struct rx_cmp *) 1891 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1892 1893 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 1894 cp_cons = RING_CMP(tmp_raw_cons); 1895 rxcmp1 = (struct rx_cmp_ext *) 1896 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1897 1898 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 1899 return -EBUSY; 1900 1901 cmp_type = RX_CMP_TYPE(rxcmp); 1902 if (cmp_type == CMP_TYPE_RX_L2_CMP) { 1903 rxcmp1->rx_cmp_cfa_code_errors_v2 |= 1904 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); 1905 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1906 struct rx_tpa_end_cmp_ext *tpa_end1; 1907 1908 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1; 1909 tpa_end1->rx_tpa_end_cmp_errors_v2 |= 1910 cpu_to_le32(RX_TPA_END_CMP_ERRORS); 1911 } 1912 return bnxt_rx_pkt(bp, cpr, raw_cons, event); 1913 } 1914 1915 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx) 1916 { 1917 struct bnxt_fw_health *fw_health = bp->fw_health; 1918 u32 reg = fw_health->regs[reg_idx]; 1919 u32 reg_type, reg_off, val = 0; 1920 1921 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg); 1922 reg_off = BNXT_FW_HEALTH_REG_OFF(reg); 1923 switch (reg_type) { 1924 case BNXT_FW_HEALTH_REG_TYPE_CFG: 1925 pci_read_config_dword(bp->pdev, reg_off, &val); 1926 break; 1927 case BNXT_FW_HEALTH_REG_TYPE_GRC: 1928 reg_off = fw_health->mapped_regs[reg_idx]; 1929 /* fall through */ 1930 case BNXT_FW_HEALTH_REG_TYPE_BAR0: 1931 val = readl(bp->bar0 + reg_off); 1932 break; 1933 case BNXT_FW_HEALTH_REG_TYPE_BAR1: 1934 val = readl(bp->bar1 + reg_off); 1935 break; 1936 } 1937 if (reg_idx == BNXT_FW_RESET_INPROG_REG) 1938 val &= fw_health->fw_reset_inprog_reg_mask; 1939 return val; 1940 } 1941 1942 #define BNXT_GET_EVENT_PORT(data) \ 1943 ((data) & \ 1944 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK) 1945 1946 static int bnxt_async_event_process(struct bnxt *bp, 1947 struct hwrm_async_event_cmpl *cmpl) 1948 { 1949 u16 event_id = le16_to_cpu(cmpl->event_id); 1950 1951 /* TODO CHIMP_FW: Define event id's for link change, error etc */ 1952 switch (event_id) { 1953 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: { 1954 u32 data1 = le32_to_cpu(cmpl->event_data1); 1955 struct bnxt_link_info *link_info = &bp->link_info; 1956 1957 if (BNXT_VF(bp)) 1958 goto async_event_process_exit; 1959 1960 /* print unsupported speed warning in forced speed mode only */ 1961 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) && 1962 (data1 & 0x20000)) { 1963 u16 fw_speed = link_info->force_link_speed; 1964 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed); 1965 1966 if (speed != SPEED_UNKNOWN) 1967 netdev_warn(bp->dev, "Link speed %d no longer supported\n", 1968 speed); 1969 } 1970 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); 1971 } 1972 /* fall through */ 1973 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE: 1974 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE: 1975 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event); 1976 /* fall through */ 1977 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: 1978 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); 1979 break; 1980 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD: 1981 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event); 1982 break; 1983 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: { 1984 u32 data1 = le32_to_cpu(cmpl->event_data1); 1985 u16 port_id = BNXT_GET_EVENT_PORT(data1); 1986 1987 if (BNXT_VF(bp)) 1988 break; 1989 1990 if (bp->pf.port_id != port_id) 1991 break; 1992 1993 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event); 1994 break; 1995 } 1996 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE: 1997 if (BNXT_PF(bp)) 1998 goto async_event_process_exit; 1999 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event); 2000 break; 2001 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: { 2002 u32 data1 = le32_to_cpu(cmpl->event_data1); 2003 2004 bp->fw_reset_timestamp = jiffies; 2005 bp->fw_reset_min_dsecs = cmpl->timestamp_lo; 2006 if (!bp->fw_reset_min_dsecs) 2007 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS; 2008 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi); 2009 if (!bp->fw_reset_max_dsecs) 2010 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS; 2011 if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) { 2012 netdev_warn(bp->dev, "Firmware fatal reset event received\n"); 2013 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); 2014 } else { 2015 netdev_warn(bp->dev, "Firmware non-fatal reset event received, max wait time %d msec\n", 2016 bp->fw_reset_max_dsecs * 100); 2017 } 2018 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event); 2019 break; 2020 } 2021 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: { 2022 struct bnxt_fw_health *fw_health = bp->fw_health; 2023 u32 data1 = le32_to_cpu(cmpl->event_data1); 2024 2025 if (!fw_health) 2026 goto async_event_process_exit; 2027 2028 fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1); 2029 fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1); 2030 if (!fw_health->enabled) 2031 break; 2032 2033 if (netif_msg_drv(bp)) 2034 netdev_info(bp->dev, "Error recovery info: error recovery[%d], master[%d], reset count[0x%x], health status: 0x%x\n", 2035 fw_health->enabled, fw_health->master, 2036 bnxt_fw_health_readl(bp, 2037 BNXT_FW_RESET_CNT_REG), 2038 bnxt_fw_health_readl(bp, 2039 BNXT_FW_HEALTH_REG)); 2040 fw_health->tmr_multiplier = 2041 DIV_ROUND_UP(fw_health->polling_dsecs * HZ, 2042 bp->current_interval * 10); 2043 fw_health->tmr_counter = fw_health->tmr_multiplier; 2044 fw_health->last_fw_heartbeat = 2045 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 2046 fw_health->last_fw_reset_cnt = 2047 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 2048 goto async_event_process_exit; 2049 } 2050 default: 2051 goto async_event_process_exit; 2052 } 2053 bnxt_queue_sp_work(bp); 2054 async_event_process_exit: 2055 bnxt_ulp_async_events(bp, cmpl); 2056 return 0; 2057 } 2058 2059 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) 2060 { 2061 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id; 2062 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp; 2063 struct hwrm_fwd_req_cmpl *fwd_req_cmpl = 2064 (struct hwrm_fwd_req_cmpl *)txcmp; 2065 2066 switch (cmpl_type) { 2067 case CMPL_BASE_TYPE_HWRM_DONE: 2068 seq_id = le16_to_cpu(h_cmpl->sequence_id); 2069 if (seq_id == bp->hwrm_intr_seq_id) 2070 bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id; 2071 else 2072 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id); 2073 break; 2074 2075 case CMPL_BASE_TYPE_HWRM_FWD_REQ: 2076 vf_id = le16_to_cpu(fwd_req_cmpl->source_id); 2077 2078 if ((vf_id < bp->pf.first_vf_id) || 2079 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) { 2080 netdev_err(bp->dev, "Msg contains invalid VF id %x\n", 2081 vf_id); 2082 return -EINVAL; 2083 } 2084 2085 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap); 2086 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event); 2087 bnxt_queue_sp_work(bp); 2088 break; 2089 2090 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: 2091 bnxt_async_event_process(bp, 2092 (struct hwrm_async_event_cmpl *)txcmp); 2093 2094 default: 2095 break; 2096 } 2097 2098 return 0; 2099 } 2100 2101 static irqreturn_t bnxt_msix(int irq, void *dev_instance) 2102 { 2103 struct bnxt_napi *bnapi = dev_instance; 2104 struct bnxt *bp = bnapi->bp; 2105 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2106 u32 cons = RING_CMP(cpr->cp_raw_cons); 2107 2108 cpr->event_ctr++; 2109 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); 2110 napi_schedule(&bnapi->napi); 2111 return IRQ_HANDLED; 2112 } 2113 2114 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) 2115 { 2116 u32 raw_cons = cpr->cp_raw_cons; 2117 u16 cons = RING_CMP(raw_cons); 2118 struct tx_cmp *txcmp; 2119 2120 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 2121 2122 return TX_CMP_VALID(txcmp, raw_cons); 2123 } 2124 2125 static irqreturn_t bnxt_inta(int irq, void *dev_instance) 2126 { 2127 struct bnxt_napi *bnapi = dev_instance; 2128 struct bnxt *bp = bnapi->bp; 2129 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2130 u32 cons = RING_CMP(cpr->cp_raw_cons); 2131 u32 int_status; 2132 2133 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); 2134 2135 if (!bnxt_has_work(bp, cpr)) { 2136 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS); 2137 /* return if erroneous interrupt */ 2138 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id))) 2139 return IRQ_NONE; 2140 } 2141 2142 /* disable ring IRQ */ 2143 BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell); 2144 2145 /* Return here if interrupt is shared and is disabled. */ 2146 if (unlikely(atomic_read(&bp->intr_sem) != 0)) 2147 return IRQ_HANDLED; 2148 2149 napi_schedule(&bnapi->napi); 2150 return IRQ_HANDLED; 2151 } 2152 2153 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 2154 int budget) 2155 { 2156 struct bnxt_napi *bnapi = cpr->bnapi; 2157 u32 raw_cons = cpr->cp_raw_cons; 2158 u32 cons; 2159 int tx_pkts = 0; 2160 int rx_pkts = 0; 2161 u8 event = 0; 2162 struct tx_cmp *txcmp; 2163 2164 cpr->has_more_work = 0; 2165 while (1) { 2166 int rc; 2167 2168 cons = RING_CMP(raw_cons); 2169 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 2170 2171 if (!TX_CMP_VALID(txcmp, raw_cons)) 2172 break; 2173 2174 /* The valid test of the entry must be done first before 2175 * reading any further. 2176 */ 2177 dma_rmb(); 2178 cpr->had_work_done = 1; 2179 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) { 2180 tx_pkts++; 2181 /* return full budget so NAPI will complete. */ 2182 if (unlikely(tx_pkts > bp->tx_wake_thresh)) { 2183 rx_pkts = budget; 2184 raw_cons = NEXT_RAW_CMP(raw_cons); 2185 if (budget) 2186 cpr->has_more_work = 1; 2187 break; 2188 } 2189 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { 2190 if (likely(budget)) 2191 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event); 2192 else 2193 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons, 2194 &event); 2195 if (likely(rc >= 0)) 2196 rx_pkts += rc; 2197 /* Increment rx_pkts when rc is -ENOMEM to count towards 2198 * the NAPI budget. Otherwise, we may potentially loop 2199 * here forever if we consistently cannot allocate 2200 * buffers. 2201 */ 2202 else if (rc == -ENOMEM && budget) 2203 rx_pkts++; 2204 else if (rc == -EBUSY) /* partial completion */ 2205 break; 2206 } else if (unlikely((TX_CMP_TYPE(txcmp) == 2207 CMPL_BASE_TYPE_HWRM_DONE) || 2208 (TX_CMP_TYPE(txcmp) == 2209 CMPL_BASE_TYPE_HWRM_FWD_REQ) || 2210 (TX_CMP_TYPE(txcmp) == 2211 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) { 2212 bnxt_hwrm_handler(bp, txcmp); 2213 } 2214 raw_cons = NEXT_RAW_CMP(raw_cons); 2215 2216 if (rx_pkts && rx_pkts == budget) { 2217 cpr->has_more_work = 1; 2218 break; 2219 } 2220 } 2221 2222 if (event & BNXT_REDIRECT_EVENT) 2223 xdp_do_flush_map(); 2224 2225 if (event & BNXT_TX_EVENT) { 2226 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; 2227 u16 prod = txr->tx_prod; 2228 2229 /* Sync BD data before updating doorbell */ 2230 wmb(); 2231 2232 bnxt_db_write_relaxed(bp, &txr->tx_db, prod); 2233 } 2234 2235 cpr->cp_raw_cons = raw_cons; 2236 bnapi->tx_pkts += tx_pkts; 2237 bnapi->events |= event; 2238 return rx_pkts; 2239 } 2240 2241 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi) 2242 { 2243 if (bnapi->tx_pkts) { 2244 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts); 2245 bnapi->tx_pkts = 0; 2246 } 2247 2248 if (bnapi->events & BNXT_RX_EVENT) { 2249 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 2250 2251 if (bnapi->events & BNXT_AGG_EVENT) 2252 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 2253 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 2254 } 2255 bnapi->events = 0; 2256 } 2257 2258 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 2259 int budget) 2260 { 2261 struct bnxt_napi *bnapi = cpr->bnapi; 2262 int rx_pkts; 2263 2264 rx_pkts = __bnxt_poll_work(bp, cpr, budget); 2265 2266 /* ACK completion ring before freeing tx ring and producing new 2267 * buffers in rx/agg rings to prevent overflowing the completion 2268 * ring. 2269 */ 2270 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons); 2271 2272 __bnxt_poll_work_done(bp, bnapi); 2273 return rx_pkts; 2274 } 2275 2276 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) 2277 { 2278 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 2279 struct bnxt *bp = bnapi->bp; 2280 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2281 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 2282 struct tx_cmp *txcmp; 2283 struct rx_cmp_ext *rxcmp1; 2284 u32 cp_cons, tmp_raw_cons; 2285 u32 raw_cons = cpr->cp_raw_cons; 2286 u32 rx_pkts = 0; 2287 u8 event = 0; 2288 2289 while (1) { 2290 int rc; 2291 2292 cp_cons = RING_CMP(raw_cons); 2293 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2294 2295 if (!TX_CMP_VALID(txcmp, raw_cons)) 2296 break; 2297 2298 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { 2299 tmp_raw_cons = NEXT_RAW_CMP(raw_cons); 2300 cp_cons = RING_CMP(tmp_raw_cons); 2301 rxcmp1 = (struct rx_cmp_ext *) 2302 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2303 2304 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 2305 break; 2306 2307 /* force an error to recycle the buffer */ 2308 rxcmp1->rx_cmp_cfa_code_errors_v2 |= 2309 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); 2310 2311 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event); 2312 if (likely(rc == -EIO) && budget) 2313 rx_pkts++; 2314 else if (rc == -EBUSY) /* partial completion */ 2315 break; 2316 } else if (unlikely(TX_CMP_TYPE(txcmp) == 2317 CMPL_BASE_TYPE_HWRM_DONE)) { 2318 bnxt_hwrm_handler(bp, txcmp); 2319 } else { 2320 netdev_err(bp->dev, 2321 "Invalid completion received on special ring\n"); 2322 } 2323 raw_cons = NEXT_RAW_CMP(raw_cons); 2324 2325 if (rx_pkts == budget) 2326 break; 2327 } 2328 2329 cpr->cp_raw_cons = raw_cons; 2330 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons); 2331 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 2332 2333 if (event & BNXT_AGG_EVENT) 2334 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 2335 2336 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) { 2337 napi_complete_done(napi, rx_pkts); 2338 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 2339 } 2340 return rx_pkts; 2341 } 2342 2343 static int bnxt_poll(struct napi_struct *napi, int budget) 2344 { 2345 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 2346 struct bnxt *bp = bnapi->bp; 2347 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2348 int work_done = 0; 2349 2350 while (1) { 2351 work_done += bnxt_poll_work(bp, cpr, budget - work_done); 2352 2353 if (work_done >= budget) { 2354 if (!budget) 2355 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 2356 break; 2357 } 2358 2359 if (!bnxt_has_work(bp, cpr)) { 2360 if (napi_complete_done(napi, work_done)) 2361 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 2362 break; 2363 } 2364 } 2365 if (bp->flags & BNXT_FLAG_DIM) { 2366 struct dim_sample dim_sample = {}; 2367 2368 dim_update_sample(cpr->event_ctr, 2369 cpr->rx_packets, 2370 cpr->rx_bytes, 2371 &dim_sample); 2372 net_dim(&cpr->dim, dim_sample); 2373 } 2374 return work_done; 2375 } 2376 2377 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) 2378 { 2379 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2380 int i, work_done = 0; 2381 2382 for (i = 0; i < 2; i++) { 2383 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i]; 2384 2385 if (cpr2) { 2386 work_done += __bnxt_poll_work(bp, cpr2, 2387 budget - work_done); 2388 cpr->has_more_work |= cpr2->has_more_work; 2389 } 2390 } 2391 return work_done; 2392 } 2393 2394 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi, 2395 u64 dbr_type, bool all) 2396 { 2397 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2398 int i; 2399 2400 for (i = 0; i < 2; i++) { 2401 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i]; 2402 struct bnxt_db_info *db; 2403 2404 if (cpr2 && (all || cpr2->had_work_done)) { 2405 db = &cpr2->cp_db; 2406 writeq(db->db_key64 | dbr_type | 2407 RING_CMP(cpr2->cp_raw_cons), db->doorbell); 2408 cpr2->had_work_done = 0; 2409 } 2410 } 2411 __bnxt_poll_work_done(bp, bnapi); 2412 } 2413 2414 static int bnxt_poll_p5(struct napi_struct *napi, int budget) 2415 { 2416 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 2417 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2418 u32 raw_cons = cpr->cp_raw_cons; 2419 struct bnxt *bp = bnapi->bp; 2420 struct nqe_cn *nqcmp; 2421 int work_done = 0; 2422 u32 cons; 2423 2424 if (cpr->has_more_work) { 2425 cpr->has_more_work = 0; 2426 work_done = __bnxt_poll_cqs(bp, bnapi, budget); 2427 if (cpr->has_more_work) { 2428 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, false); 2429 return work_done; 2430 } 2431 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, true); 2432 if (napi_complete_done(napi, work_done)) 2433 BNXT_DB_NQ_ARM_P5(&cpr->cp_db, cpr->cp_raw_cons); 2434 return work_done; 2435 } 2436 while (1) { 2437 cons = RING_CMP(raw_cons); 2438 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 2439 2440 if (!NQ_CMP_VALID(nqcmp, raw_cons)) { 2441 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, 2442 false); 2443 cpr->cp_raw_cons = raw_cons; 2444 if (napi_complete_done(napi, work_done)) 2445 BNXT_DB_NQ_ARM_P5(&cpr->cp_db, 2446 cpr->cp_raw_cons); 2447 return work_done; 2448 } 2449 2450 /* The valid test of the entry must be done first before 2451 * reading any further. 2452 */ 2453 dma_rmb(); 2454 2455 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) { 2456 u32 idx = le32_to_cpu(nqcmp->cq_handle_low); 2457 struct bnxt_cp_ring_info *cpr2; 2458 2459 cpr2 = cpr->cp_ring_arr[idx]; 2460 work_done += __bnxt_poll_work(bp, cpr2, 2461 budget - work_done); 2462 cpr->has_more_work = cpr2->has_more_work; 2463 } else { 2464 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp); 2465 } 2466 raw_cons = NEXT_RAW_CMP(raw_cons); 2467 if (cpr->has_more_work) 2468 break; 2469 } 2470 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, true); 2471 cpr->cp_raw_cons = raw_cons; 2472 return work_done; 2473 } 2474 2475 static void bnxt_free_tx_skbs(struct bnxt *bp) 2476 { 2477 int i, max_idx; 2478 struct pci_dev *pdev = bp->pdev; 2479 2480 if (!bp->tx_ring) 2481 return; 2482 2483 max_idx = bp->tx_nr_pages * TX_DESC_CNT; 2484 for (i = 0; i < bp->tx_nr_rings; i++) { 2485 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 2486 int j; 2487 2488 for (j = 0; j < max_idx;) { 2489 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; 2490 struct sk_buff *skb; 2491 int k, last; 2492 2493 if (i < bp->tx_nr_rings_xdp && 2494 tx_buf->action == XDP_REDIRECT) { 2495 dma_unmap_single(&pdev->dev, 2496 dma_unmap_addr(tx_buf, mapping), 2497 dma_unmap_len(tx_buf, len), 2498 PCI_DMA_TODEVICE); 2499 xdp_return_frame(tx_buf->xdpf); 2500 tx_buf->action = 0; 2501 tx_buf->xdpf = NULL; 2502 j++; 2503 continue; 2504 } 2505 2506 skb = tx_buf->skb; 2507 if (!skb) { 2508 j++; 2509 continue; 2510 } 2511 2512 tx_buf->skb = NULL; 2513 2514 if (tx_buf->is_push) { 2515 dev_kfree_skb(skb); 2516 j += 2; 2517 continue; 2518 } 2519 2520 dma_unmap_single(&pdev->dev, 2521 dma_unmap_addr(tx_buf, mapping), 2522 skb_headlen(skb), 2523 PCI_DMA_TODEVICE); 2524 2525 last = tx_buf->nr_frags; 2526 j += 2; 2527 for (k = 0; k < last; k++, j++) { 2528 int ring_idx = j & bp->tx_ring_mask; 2529 skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; 2530 2531 tx_buf = &txr->tx_buf_ring[ring_idx]; 2532 dma_unmap_page( 2533 &pdev->dev, 2534 dma_unmap_addr(tx_buf, mapping), 2535 skb_frag_size(frag), PCI_DMA_TODEVICE); 2536 } 2537 dev_kfree_skb(skb); 2538 } 2539 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i)); 2540 } 2541 } 2542 2543 static void bnxt_free_rx_skbs(struct bnxt *bp) 2544 { 2545 int i, max_idx, max_agg_idx; 2546 struct pci_dev *pdev = bp->pdev; 2547 2548 if (!bp->rx_ring) 2549 return; 2550 2551 max_idx = bp->rx_nr_pages * RX_DESC_CNT; 2552 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; 2553 for (i = 0; i < bp->rx_nr_rings; i++) { 2554 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2555 struct bnxt_tpa_idx_map *map; 2556 int j; 2557 2558 if (rxr->rx_tpa) { 2559 for (j = 0; j < bp->max_tpa; j++) { 2560 struct bnxt_tpa_info *tpa_info = 2561 &rxr->rx_tpa[j]; 2562 u8 *data = tpa_info->data; 2563 2564 if (!data) 2565 continue; 2566 2567 dma_unmap_single_attrs(&pdev->dev, 2568 tpa_info->mapping, 2569 bp->rx_buf_use_size, 2570 bp->rx_dir, 2571 DMA_ATTR_WEAK_ORDERING); 2572 2573 tpa_info->data = NULL; 2574 2575 kfree(data); 2576 } 2577 } 2578 2579 for (j = 0; j < max_idx; j++) { 2580 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j]; 2581 dma_addr_t mapping = rx_buf->mapping; 2582 void *data = rx_buf->data; 2583 2584 if (!data) 2585 continue; 2586 2587 rx_buf->data = NULL; 2588 2589 if (BNXT_RX_PAGE_MODE(bp)) { 2590 mapping -= bp->rx_dma_offset; 2591 dma_unmap_page_attrs(&pdev->dev, mapping, 2592 PAGE_SIZE, bp->rx_dir, 2593 DMA_ATTR_WEAK_ORDERING); 2594 page_pool_recycle_direct(rxr->page_pool, data); 2595 } else { 2596 dma_unmap_single_attrs(&pdev->dev, mapping, 2597 bp->rx_buf_use_size, 2598 bp->rx_dir, 2599 DMA_ATTR_WEAK_ORDERING); 2600 kfree(data); 2601 } 2602 } 2603 2604 for (j = 0; j < max_agg_idx; j++) { 2605 struct bnxt_sw_rx_agg_bd *rx_agg_buf = 2606 &rxr->rx_agg_ring[j]; 2607 struct page *page = rx_agg_buf->page; 2608 2609 if (!page) 2610 continue; 2611 2612 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping, 2613 BNXT_RX_PAGE_SIZE, 2614 PCI_DMA_FROMDEVICE, 2615 DMA_ATTR_WEAK_ORDERING); 2616 2617 rx_agg_buf->page = NULL; 2618 __clear_bit(j, rxr->rx_agg_bmap); 2619 2620 __free_page(page); 2621 } 2622 if (rxr->rx_page) { 2623 __free_page(rxr->rx_page); 2624 rxr->rx_page = NULL; 2625 } 2626 map = rxr->rx_tpa_idx_map; 2627 if (map) 2628 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap)); 2629 } 2630 } 2631 2632 static void bnxt_free_skbs(struct bnxt *bp) 2633 { 2634 bnxt_free_tx_skbs(bp); 2635 bnxt_free_rx_skbs(bp); 2636 } 2637 2638 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) 2639 { 2640 struct pci_dev *pdev = bp->pdev; 2641 int i; 2642 2643 for (i = 0; i < rmem->nr_pages; i++) { 2644 if (!rmem->pg_arr[i]) 2645 continue; 2646 2647 dma_free_coherent(&pdev->dev, rmem->page_size, 2648 rmem->pg_arr[i], rmem->dma_arr[i]); 2649 2650 rmem->pg_arr[i] = NULL; 2651 } 2652 if (rmem->pg_tbl) { 2653 size_t pg_tbl_size = rmem->nr_pages * 8; 2654 2655 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) 2656 pg_tbl_size = rmem->page_size; 2657 dma_free_coherent(&pdev->dev, pg_tbl_size, 2658 rmem->pg_tbl, rmem->pg_tbl_map); 2659 rmem->pg_tbl = NULL; 2660 } 2661 if (rmem->vmem_size && *rmem->vmem) { 2662 vfree(*rmem->vmem); 2663 *rmem->vmem = NULL; 2664 } 2665 } 2666 2667 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) 2668 { 2669 struct pci_dev *pdev = bp->pdev; 2670 u64 valid_bit = 0; 2671 int i; 2672 2673 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG)) 2674 valid_bit = PTU_PTE_VALID; 2675 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) { 2676 size_t pg_tbl_size = rmem->nr_pages * 8; 2677 2678 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) 2679 pg_tbl_size = rmem->page_size; 2680 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size, 2681 &rmem->pg_tbl_map, 2682 GFP_KERNEL); 2683 if (!rmem->pg_tbl) 2684 return -ENOMEM; 2685 } 2686 2687 for (i = 0; i < rmem->nr_pages; i++) { 2688 u64 extra_bits = valid_bit; 2689 2690 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev, 2691 rmem->page_size, 2692 &rmem->dma_arr[i], 2693 GFP_KERNEL); 2694 if (!rmem->pg_arr[i]) 2695 return -ENOMEM; 2696 2697 if (rmem->init_val) 2698 memset(rmem->pg_arr[i], rmem->init_val, 2699 rmem->page_size); 2700 if (rmem->nr_pages > 1 || rmem->depth > 0) { 2701 if (i == rmem->nr_pages - 2 && 2702 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 2703 extra_bits |= PTU_PTE_NEXT_TO_LAST; 2704 else if (i == rmem->nr_pages - 1 && 2705 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 2706 extra_bits |= PTU_PTE_LAST; 2707 rmem->pg_tbl[i] = 2708 cpu_to_le64(rmem->dma_arr[i] | extra_bits); 2709 } 2710 } 2711 2712 if (rmem->vmem_size) { 2713 *rmem->vmem = vzalloc(rmem->vmem_size); 2714 if (!(*rmem->vmem)) 2715 return -ENOMEM; 2716 } 2717 return 0; 2718 } 2719 2720 static void bnxt_free_tpa_info(struct bnxt *bp) 2721 { 2722 int i; 2723 2724 for (i = 0; i < bp->rx_nr_rings; i++) { 2725 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2726 2727 kfree(rxr->rx_tpa_idx_map); 2728 rxr->rx_tpa_idx_map = NULL; 2729 if (rxr->rx_tpa) { 2730 kfree(rxr->rx_tpa[0].agg_arr); 2731 rxr->rx_tpa[0].agg_arr = NULL; 2732 } 2733 kfree(rxr->rx_tpa); 2734 rxr->rx_tpa = NULL; 2735 } 2736 } 2737 2738 static int bnxt_alloc_tpa_info(struct bnxt *bp) 2739 { 2740 int i, j, total_aggs = 0; 2741 2742 bp->max_tpa = MAX_TPA; 2743 if (bp->flags & BNXT_FLAG_CHIP_P5) { 2744 if (!bp->max_tpa_v2) 2745 return 0; 2746 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5); 2747 total_aggs = bp->max_tpa * MAX_SKB_FRAGS; 2748 } 2749 2750 for (i = 0; i < bp->rx_nr_rings; i++) { 2751 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2752 struct rx_agg_cmp *agg; 2753 2754 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info), 2755 GFP_KERNEL); 2756 if (!rxr->rx_tpa) 2757 return -ENOMEM; 2758 2759 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 2760 continue; 2761 agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL); 2762 rxr->rx_tpa[0].agg_arr = agg; 2763 if (!agg) 2764 return -ENOMEM; 2765 for (j = 1; j < bp->max_tpa; j++) 2766 rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS; 2767 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map), 2768 GFP_KERNEL); 2769 if (!rxr->rx_tpa_idx_map) 2770 return -ENOMEM; 2771 } 2772 return 0; 2773 } 2774 2775 static void bnxt_free_rx_rings(struct bnxt *bp) 2776 { 2777 int i; 2778 2779 if (!bp->rx_ring) 2780 return; 2781 2782 bnxt_free_tpa_info(bp); 2783 for (i = 0; i < bp->rx_nr_rings; i++) { 2784 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2785 struct bnxt_ring_struct *ring; 2786 2787 if (rxr->xdp_prog) 2788 bpf_prog_put(rxr->xdp_prog); 2789 2790 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq)) 2791 xdp_rxq_info_unreg(&rxr->xdp_rxq); 2792 2793 page_pool_destroy(rxr->page_pool); 2794 rxr->page_pool = NULL; 2795 2796 kfree(rxr->rx_agg_bmap); 2797 rxr->rx_agg_bmap = NULL; 2798 2799 ring = &rxr->rx_ring_struct; 2800 bnxt_free_ring(bp, &ring->ring_mem); 2801 2802 ring = &rxr->rx_agg_ring_struct; 2803 bnxt_free_ring(bp, &ring->ring_mem); 2804 } 2805 } 2806 2807 static int bnxt_alloc_rx_page_pool(struct bnxt *bp, 2808 struct bnxt_rx_ring_info *rxr) 2809 { 2810 struct page_pool_params pp = { 0 }; 2811 2812 pp.pool_size = bp->rx_ring_size; 2813 pp.nid = dev_to_node(&bp->pdev->dev); 2814 pp.dev = &bp->pdev->dev; 2815 pp.dma_dir = DMA_BIDIRECTIONAL; 2816 2817 rxr->page_pool = page_pool_create(&pp); 2818 if (IS_ERR(rxr->page_pool)) { 2819 int err = PTR_ERR(rxr->page_pool); 2820 2821 rxr->page_pool = NULL; 2822 return err; 2823 } 2824 return 0; 2825 } 2826 2827 static int bnxt_alloc_rx_rings(struct bnxt *bp) 2828 { 2829 int i, rc = 0, agg_rings = 0; 2830 2831 if (!bp->rx_ring) 2832 return -ENOMEM; 2833 2834 if (bp->flags & BNXT_FLAG_AGG_RINGS) 2835 agg_rings = 1; 2836 2837 for (i = 0; i < bp->rx_nr_rings; i++) { 2838 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2839 struct bnxt_ring_struct *ring; 2840 2841 ring = &rxr->rx_ring_struct; 2842 2843 rc = bnxt_alloc_rx_page_pool(bp, rxr); 2844 if (rc) 2845 return rc; 2846 2847 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i); 2848 if (rc < 0) 2849 return rc; 2850 2851 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq, 2852 MEM_TYPE_PAGE_POOL, 2853 rxr->page_pool); 2854 if (rc) { 2855 xdp_rxq_info_unreg(&rxr->xdp_rxq); 2856 return rc; 2857 } 2858 2859 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 2860 if (rc) 2861 return rc; 2862 2863 ring->grp_idx = i; 2864 if (agg_rings) { 2865 u16 mem_size; 2866 2867 ring = &rxr->rx_agg_ring_struct; 2868 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 2869 if (rc) 2870 return rc; 2871 2872 ring->grp_idx = i; 2873 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1; 2874 mem_size = rxr->rx_agg_bmap_size / 8; 2875 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL); 2876 if (!rxr->rx_agg_bmap) 2877 return -ENOMEM; 2878 } 2879 } 2880 if (bp->flags & BNXT_FLAG_TPA) 2881 rc = bnxt_alloc_tpa_info(bp); 2882 return rc; 2883 } 2884 2885 static void bnxt_free_tx_rings(struct bnxt *bp) 2886 { 2887 int i; 2888 struct pci_dev *pdev = bp->pdev; 2889 2890 if (!bp->tx_ring) 2891 return; 2892 2893 for (i = 0; i < bp->tx_nr_rings; i++) { 2894 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 2895 struct bnxt_ring_struct *ring; 2896 2897 if (txr->tx_push) { 2898 dma_free_coherent(&pdev->dev, bp->tx_push_size, 2899 txr->tx_push, txr->tx_push_mapping); 2900 txr->tx_push = NULL; 2901 } 2902 2903 ring = &txr->tx_ring_struct; 2904 2905 bnxt_free_ring(bp, &ring->ring_mem); 2906 } 2907 } 2908 2909 static int bnxt_alloc_tx_rings(struct bnxt *bp) 2910 { 2911 int i, j, rc; 2912 struct pci_dev *pdev = bp->pdev; 2913 2914 bp->tx_push_size = 0; 2915 if (bp->tx_push_thresh) { 2916 int push_size; 2917 2918 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) + 2919 bp->tx_push_thresh); 2920 2921 if (push_size > 256) { 2922 push_size = 0; 2923 bp->tx_push_thresh = 0; 2924 } 2925 2926 bp->tx_push_size = push_size; 2927 } 2928 2929 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) { 2930 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 2931 struct bnxt_ring_struct *ring; 2932 u8 qidx; 2933 2934 ring = &txr->tx_ring_struct; 2935 2936 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 2937 if (rc) 2938 return rc; 2939 2940 ring->grp_idx = txr->bnapi->index; 2941 if (bp->tx_push_size) { 2942 dma_addr_t mapping; 2943 2944 /* One pre-allocated DMA buffer to backup 2945 * TX push operation 2946 */ 2947 txr->tx_push = dma_alloc_coherent(&pdev->dev, 2948 bp->tx_push_size, 2949 &txr->tx_push_mapping, 2950 GFP_KERNEL); 2951 2952 if (!txr->tx_push) 2953 return -ENOMEM; 2954 2955 mapping = txr->tx_push_mapping + 2956 sizeof(struct tx_push_bd); 2957 txr->data_mapping = cpu_to_le64(mapping); 2958 } 2959 qidx = bp->tc_to_qidx[j]; 2960 ring->queue_id = bp->q_info[qidx].queue_id; 2961 if (i < bp->tx_nr_rings_xdp) 2962 continue; 2963 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1)) 2964 j++; 2965 } 2966 return 0; 2967 } 2968 2969 static void bnxt_free_cp_rings(struct bnxt *bp) 2970 { 2971 int i; 2972 2973 if (!bp->bnapi) 2974 return; 2975 2976 for (i = 0; i < bp->cp_nr_rings; i++) { 2977 struct bnxt_napi *bnapi = bp->bnapi[i]; 2978 struct bnxt_cp_ring_info *cpr; 2979 struct bnxt_ring_struct *ring; 2980 int j; 2981 2982 if (!bnapi) 2983 continue; 2984 2985 cpr = &bnapi->cp_ring; 2986 ring = &cpr->cp_ring_struct; 2987 2988 bnxt_free_ring(bp, &ring->ring_mem); 2989 2990 for (j = 0; j < 2; j++) { 2991 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; 2992 2993 if (cpr2) { 2994 ring = &cpr2->cp_ring_struct; 2995 bnxt_free_ring(bp, &ring->ring_mem); 2996 kfree(cpr2); 2997 cpr->cp_ring_arr[j] = NULL; 2998 } 2999 } 3000 } 3001 } 3002 3003 static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp) 3004 { 3005 struct bnxt_ring_mem_info *rmem; 3006 struct bnxt_ring_struct *ring; 3007 struct bnxt_cp_ring_info *cpr; 3008 int rc; 3009 3010 cpr = kzalloc(sizeof(*cpr), GFP_KERNEL); 3011 if (!cpr) 3012 return NULL; 3013 3014 ring = &cpr->cp_ring_struct; 3015 rmem = &ring->ring_mem; 3016 rmem->nr_pages = bp->cp_nr_pages; 3017 rmem->page_size = HW_CMPD_RING_SIZE; 3018 rmem->pg_arr = (void **)cpr->cp_desc_ring; 3019 rmem->dma_arr = cpr->cp_desc_mapping; 3020 rmem->flags = BNXT_RMEM_RING_PTE_FLAG; 3021 rc = bnxt_alloc_ring(bp, rmem); 3022 if (rc) { 3023 bnxt_free_ring(bp, rmem); 3024 kfree(cpr); 3025 cpr = NULL; 3026 } 3027 return cpr; 3028 } 3029 3030 static int bnxt_alloc_cp_rings(struct bnxt *bp) 3031 { 3032 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS); 3033 int i, rc, ulp_base_vec, ulp_msix; 3034 3035 ulp_msix = bnxt_get_ulp_msix_num(bp); 3036 ulp_base_vec = bnxt_get_ulp_msix_base(bp); 3037 for (i = 0; i < bp->cp_nr_rings; i++) { 3038 struct bnxt_napi *bnapi = bp->bnapi[i]; 3039 struct bnxt_cp_ring_info *cpr; 3040 struct bnxt_ring_struct *ring; 3041 3042 if (!bnapi) 3043 continue; 3044 3045 cpr = &bnapi->cp_ring; 3046 cpr->bnapi = bnapi; 3047 ring = &cpr->cp_ring_struct; 3048 3049 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 3050 if (rc) 3051 return rc; 3052 3053 if (ulp_msix && i >= ulp_base_vec) 3054 ring->map_idx = i + ulp_msix; 3055 else 3056 ring->map_idx = i; 3057 3058 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 3059 continue; 3060 3061 if (i < bp->rx_nr_rings) { 3062 struct bnxt_cp_ring_info *cpr2 = 3063 bnxt_alloc_cp_sub_ring(bp); 3064 3065 cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2; 3066 if (!cpr2) 3067 return -ENOMEM; 3068 cpr2->bnapi = bnapi; 3069 } 3070 if ((sh && i < bp->tx_nr_rings) || 3071 (!sh && i >= bp->rx_nr_rings)) { 3072 struct bnxt_cp_ring_info *cpr2 = 3073 bnxt_alloc_cp_sub_ring(bp); 3074 3075 cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2; 3076 if (!cpr2) 3077 return -ENOMEM; 3078 cpr2->bnapi = bnapi; 3079 } 3080 } 3081 return 0; 3082 } 3083 3084 static void bnxt_init_ring_struct(struct bnxt *bp) 3085 { 3086 int i; 3087 3088 for (i = 0; i < bp->cp_nr_rings; i++) { 3089 struct bnxt_napi *bnapi = bp->bnapi[i]; 3090 struct bnxt_ring_mem_info *rmem; 3091 struct bnxt_cp_ring_info *cpr; 3092 struct bnxt_rx_ring_info *rxr; 3093 struct bnxt_tx_ring_info *txr; 3094 struct bnxt_ring_struct *ring; 3095 3096 if (!bnapi) 3097 continue; 3098 3099 cpr = &bnapi->cp_ring; 3100 ring = &cpr->cp_ring_struct; 3101 rmem = &ring->ring_mem; 3102 rmem->nr_pages = bp->cp_nr_pages; 3103 rmem->page_size = HW_CMPD_RING_SIZE; 3104 rmem->pg_arr = (void **)cpr->cp_desc_ring; 3105 rmem->dma_arr = cpr->cp_desc_mapping; 3106 rmem->vmem_size = 0; 3107 3108 rxr = bnapi->rx_ring; 3109 if (!rxr) 3110 goto skip_rx; 3111 3112 ring = &rxr->rx_ring_struct; 3113 rmem = &ring->ring_mem; 3114 rmem->nr_pages = bp->rx_nr_pages; 3115 rmem->page_size = HW_RXBD_RING_SIZE; 3116 rmem->pg_arr = (void **)rxr->rx_desc_ring; 3117 rmem->dma_arr = rxr->rx_desc_mapping; 3118 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; 3119 rmem->vmem = (void **)&rxr->rx_buf_ring; 3120 3121 ring = &rxr->rx_agg_ring_struct; 3122 rmem = &ring->ring_mem; 3123 rmem->nr_pages = bp->rx_agg_nr_pages; 3124 rmem->page_size = HW_RXBD_RING_SIZE; 3125 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring; 3126 rmem->dma_arr = rxr->rx_agg_desc_mapping; 3127 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; 3128 rmem->vmem = (void **)&rxr->rx_agg_ring; 3129 3130 skip_rx: 3131 txr = bnapi->tx_ring; 3132 if (!txr) 3133 continue; 3134 3135 ring = &txr->tx_ring_struct; 3136 rmem = &ring->ring_mem; 3137 rmem->nr_pages = bp->tx_nr_pages; 3138 rmem->page_size = HW_RXBD_RING_SIZE; 3139 rmem->pg_arr = (void **)txr->tx_desc_ring; 3140 rmem->dma_arr = txr->tx_desc_mapping; 3141 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages; 3142 rmem->vmem = (void **)&txr->tx_buf_ring; 3143 } 3144 } 3145 3146 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type) 3147 { 3148 int i; 3149 u32 prod; 3150 struct rx_bd **rx_buf_ring; 3151 3152 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr; 3153 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) { 3154 int j; 3155 struct rx_bd *rxbd; 3156 3157 rxbd = rx_buf_ring[i]; 3158 if (!rxbd) 3159 continue; 3160 3161 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) { 3162 rxbd->rx_bd_len_flags_type = cpu_to_le32(type); 3163 rxbd->rx_bd_opaque = prod; 3164 } 3165 } 3166 } 3167 3168 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) 3169 { 3170 struct net_device *dev = bp->dev; 3171 struct bnxt_rx_ring_info *rxr; 3172 struct bnxt_ring_struct *ring; 3173 u32 prod, type; 3174 int i; 3175 3176 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) | 3177 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP; 3178 3179 if (NET_IP_ALIGN == 2) 3180 type |= RX_BD_FLAGS_SOP; 3181 3182 rxr = &bp->rx_ring[ring_nr]; 3183 ring = &rxr->rx_ring_struct; 3184 bnxt_init_rxbd_pages(ring, type); 3185 3186 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) { 3187 bpf_prog_add(bp->xdp_prog, 1); 3188 rxr->xdp_prog = bp->xdp_prog; 3189 } 3190 prod = rxr->rx_prod; 3191 for (i = 0; i < bp->rx_ring_size; i++) { 3192 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) { 3193 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n", 3194 ring_nr, i, bp->rx_ring_size); 3195 break; 3196 } 3197 prod = NEXT_RX(prod); 3198 } 3199 rxr->rx_prod = prod; 3200 ring->fw_ring_id = INVALID_HW_RING_ID; 3201 3202 ring = &rxr->rx_agg_ring_struct; 3203 ring->fw_ring_id = INVALID_HW_RING_ID; 3204 3205 if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) 3206 return 0; 3207 3208 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) | 3209 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; 3210 3211 bnxt_init_rxbd_pages(ring, type); 3212 3213 prod = rxr->rx_agg_prod; 3214 for (i = 0; i < bp->rx_agg_ring_size; i++) { 3215 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) { 3216 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n", 3217 ring_nr, i, bp->rx_ring_size); 3218 break; 3219 } 3220 prod = NEXT_RX_AGG(prod); 3221 } 3222 rxr->rx_agg_prod = prod; 3223 3224 if (bp->flags & BNXT_FLAG_TPA) { 3225 if (rxr->rx_tpa) { 3226 u8 *data; 3227 dma_addr_t mapping; 3228 3229 for (i = 0; i < bp->max_tpa; i++) { 3230 data = __bnxt_alloc_rx_data(bp, &mapping, 3231 GFP_KERNEL); 3232 if (!data) 3233 return -ENOMEM; 3234 3235 rxr->rx_tpa[i].data = data; 3236 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset; 3237 rxr->rx_tpa[i].mapping = mapping; 3238 } 3239 } else { 3240 netdev_err(bp->dev, "No resource allocated for LRO/GRO\n"); 3241 return -ENOMEM; 3242 } 3243 } 3244 3245 return 0; 3246 } 3247 3248 static void bnxt_init_cp_rings(struct bnxt *bp) 3249 { 3250 int i, j; 3251 3252 for (i = 0; i < bp->cp_nr_rings; i++) { 3253 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; 3254 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 3255 3256 ring->fw_ring_id = INVALID_HW_RING_ID; 3257 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; 3258 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; 3259 for (j = 0; j < 2; j++) { 3260 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; 3261 3262 if (!cpr2) 3263 continue; 3264 3265 ring = &cpr2->cp_ring_struct; 3266 ring->fw_ring_id = INVALID_HW_RING_ID; 3267 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; 3268 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; 3269 } 3270 } 3271 } 3272 3273 static int bnxt_init_rx_rings(struct bnxt *bp) 3274 { 3275 int i, rc = 0; 3276 3277 if (BNXT_RX_PAGE_MODE(bp)) { 3278 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM; 3279 bp->rx_dma_offset = XDP_PACKET_HEADROOM; 3280 } else { 3281 bp->rx_offset = BNXT_RX_OFFSET; 3282 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET; 3283 } 3284 3285 for (i = 0; i < bp->rx_nr_rings; i++) { 3286 rc = bnxt_init_one_rx_ring(bp, i); 3287 if (rc) 3288 break; 3289 } 3290 3291 return rc; 3292 } 3293 3294 static int bnxt_init_tx_rings(struct bnxt *bp) 3295 { 3296 u16 i; 3297 3298 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2, 3299 MAX_SKB_FRAGS + 1); 3300 3301 for (i = 0; i < bp->tx_nr_rings; i++) { 3302 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 3303 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 3304 3305 ring->fw_ring_id = INVALID_HW_RING_ID; 3306 } 3307 3308 return 0; 3309 } 3310 3311 static void bnxt_free_ring_grps(struct bnxt *bp) 3312 { 3313 kfree(bp->grp_info); 3314 bp->grp_info = NULL; 3315 } 3316 3317 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init) 3318 { 3319 int i; 3320 3321 if (irq_re_init) { 3322 bp->grp_info = kcalloc(bp->cp_nr_rings, 3323 sizeof(struct bnxt_ring_grp_info), 3324 GFP_KERNEL); 3325 if (!bp->grp_info) 3326 return -ENOMEM; 3327 } 3328 for (i = 0; i < bp->cp_nr_rings; i++) { 3329 if (irq_re_init) 3330 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID; 3331 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; 3332 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID; 3333 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID; 3334 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; 3335 } 3336 return 0; 3337 } 3338 3339 static void bnxt_free_vnics(struct bnxt *bp) 3340 { 3341 kfree(bp->vnic_info); 3342 bp->vnic_info = NULL; 3343 bp->nr_vnics = 0; 3344 } 3345 3346 static int bnxt_alloc_vnics(struct bnxt *bp) 3347 { 3348 int num_vnics = 1; 3349 3350 #ifdef CONFIG_RFS_ACCEL 3351 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS) 3352 num_vnics += bp->rx_nr_rings; 3353 #endif 3354 3355 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 3356 num_vnics++; 3357 3358 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info), 3359 GFP_KERNEL); 3360 if (!bp->vnic_info) 3361 return -ENOMEM; 3362 3363 bp->nr_vnics = num_vnics; 3364 return 0; 3365 } 3366 3367 static void bnxt_init_vnics(struct bnxt *bp) 3368 { 3369 int i; 3370 3371 for (i = 0; i < bp->nr_vnics; i++) { 3372 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 3373 int j; 3374 3375 vnic->fw_vnic_id = INVALID_HW_RING_ID; 3376 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) 3377 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID; 3378 3379 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID; 3380 3381 if (bp->vnic_info[i].rss_hash_key) { 3382 if (i == 0) 3383 prandom_bytes(vnic->rss_hash_key, 3384 HW_HASH_KEY_SIZE); 3385 else 3386 memcpy(vnic->rss_hash_key, 3387 bp->vnic_info[0].rss_hash_key, 3388 HW_HASH_KEY_SIZE); 3389 } 3390 } 3391 } 3392 3393 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg) 3394 { 3395 int pages; 3396 3397 pages = ring_size / desc_per_pg; 3398 3399 if (!pages) 3400 return 1; 3401 3402 pages++; 3403 3404 while (pages & (pages - 1)) 3405 pages++; 3406 3407 return pages; 3408 } 3409 3410 void bnxt_set_tpa_flags(struct bnxt *bp) 3411 { 3412 bp->flags &= ~BNXT_FLAG_TPA; 3413 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 3414 return; 3415 if (bp->dev->features & NETIF_F_LRO) 3416 bp->flags |= BNXT_FLAG_LRO; 3417 else if (bp->dev->features & NETIF_F_GRO_HW) 3418 bp->flags |= BNXT_FLAG_GRO; 3419 } 3420 3421 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must 3422 * be set on entry. 3423 */ 3424 void bnxt_set_ring_params(struct bnxt *bp) 3425 { 3426 u32 ring_size, rx_size, rx_space; 3427 u32 agg_factor = 0, agg_ring_size = 0; 3428 3429 /* 8 for CRC and VLAN */ 3430 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8); 3431 3432 rx_space = rx_size + NET_SKB_PAD + 3433 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 3434 3435 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH; 3436 ring_size = bp->rx_ring_size; 3437 bp->rx_agg_ring_size = 0; 3438 bp->rx_agg_nr_pages = 0; 3439 3440 if (bp->flags & BNXT_FLAG_TPA) 3441 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE); 3442 3443 bp->flags &= ~BNXT_FLAG_JUMBO; 3444 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) { 3445 u32 jumbo_factor; 3446 3447 bp->flags |= BNXT_FLAG_JUMBO; 3448 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT; 3449 if (jumbo_factor > agg_factor) 3450 agg_factor = jumbo_factor; 3451 } 3452 agg_ring_size = ring_size * agg_factor; 3453 3454 if (agg_ring_size) { 3455 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size, 3456 RX_DESC_CNT); 3457 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) { 3458 u32 tmp = agg_ring_size; 3459 3460 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES; 3461 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1; 3462 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n", 3463 tmp, agg_ring_size); 3464 } 3465 bp->rx_agg_ring_size = agg_ring_size; 3466 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1; 3467 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN); 3468 rx_space = rx_size + NET_SKB_PAD + 3469 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 3470 } 3471 3472 bp->rx_buf_use_size = rx_size; 3473 bp->rx_buf_size = rx_space; 3474 3475 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT); 3476 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1; 3477 3478 ring_size = bp->tx_ring_size; 3479 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT); 3480 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1; 3481 3482 ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size; 3483 bp->cp_ring_size = ring_size; 3484 3485 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT); 3486 if (bp->cp_nr_pages > MAX_CP_PAGES) { 3487 bp->cp_nr_pages = MAX_CP_PAGES; 3488 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1; 3489 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n", 3490 ring_size, bp->cp_ring_size); 3491 } 3492 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT; 3493 bp->cp_ring_mask = bp->cp_bit - 1; 3494 } 3495 3496 /* Changing allocation mode of RX rings. 3497 * TODO: Update when extending xdp_rxq_info to support allocation modes. 3498 */ 3499 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) 3500 { 3501 if (page_mode) { 3502 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) 3503 return -EOPNOTSUPP; 3504 bp->dev->max_mtu = 3505 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU); 3506 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 3507 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE; 3508 bp->rx_dir = DMA_BIDIRECTIONAL; 3509 bp->rx_skb_func = bnxt_rx_page_skb; 3510 /* Disable LRO or GRO_HW */ 3511 netdev_update_features(bp->dev); 3512 } else { 3513 bp->dev->max_mtu = bp->max_mtu; 3514 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE; 3515 bp->rx_dir = DMA_FROM_DEVICE; 3516 bp->rx_skb_func = bnxt_rx_skb; 3517 } 3518 return 0; 3519 } 3520 3521 static void bnxt_free_vnic_attributes(struct bnxt *bp) 3522 { 3523 int i; 3524 struct bnxt_vnic_info *vnic; 3525 struct pci_dev *pdev = bp->pdev; 3526 3527 if (!bp->vnic_info) 3528 return; 3529 3530 for (i = 0; i < bp->nr_vnics; i++) { 3531 vnic = &bp->vnic_info[i]; 3532 3533 kfree(vnic->fw_grp_ids); 3534 vnic->fw_grp_ids = NULL; 3535 3536 kfree(vnic->uc_list); 3537 vnic->uc_list = NULL; 3538 3539 if (vnic->mc_list) { 3540 dma_free_coherent(&pdev->dev, vnic->mc_list_size, 3541 vnic->mc_list, vnic->mc_list_mapping); 3542 vnic->mc_list = NULL; 3543 } 3544 3545 if (vnic->rss_table) { 3546 dma_free_coherent(&pdev->dev, PAGE_SIZE, 3547 vnic->rss_table, 3548 vnic->rss_table_dma_addr); 3549 vnic->rss_table = NULL; 3550 } 3551 3552 vnic->rss_hash_key = NULL; 3553 vnic->flags = 0; 3554 } 3555 } 3556 3557 static int bnxt_alloc_vnic_attributes(struct bnxt *bp) 3558 { 3559 int i, rc = 0, size; 3560 struct bnxt_vnic_info *vnic; 3561 struct pci_dev *pdev = bp->pdev; 3562 int max_rings; 3563 3564 for (i = 0; i < bp->nr_vnics; i++) { 3565 vnic = &bp->vnic_info[i]; 3566 3567 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) { 3568 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN; 3569 3570 if (mem_size > 0) { 3571 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL); 3572 if (!vnic->uc_list) { 3573 rc = -ENOMEM; 3574 goto out; 3575 } 3576 } 3577 } 3578 3579 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) { 3580 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN; 3581 vnic->mc_list = 3582 dma_alloc_coherent(&pdev->dev, 3583 vnic->mc_list_size, 3584 &vnic->mc_list_mapping, 3585 GFP_KERNEL); 3586 if (!vnic->mc_list) { 3587 rc = -ENOMEM; 3588 goto out; 3589 } 3590 } 3591 3592 if (bp->flags & BNXT_FLAG_CHIP_P5) 3593 goto vnic_skip_grps; 3594 3595 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 3596 max_rings = bp->rx_nr_rings; 3597 else 3598 max_rings = 1; 3599 3600 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL); 3601 if (!vnic->fw_grp_ids) { 3602 rc = -ENOMEM; 3603 goto out; 3604 } 3605 vnic_skip_grps: 3606 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) && 3607 !(vnic->flags & BNXT_VNIC_RSS_FLAG)) 3608 continue; 3609 3610 /* Allocate rss table and hash key */ 3611 vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, 3612 &vnic->rss_table_dma_addr, 3613 GFP_KERNEL); 3614 if (!vnic->rss_table) { 3615 rc = -ENOMEM; 3616 goto out; 3617 } 3618 3619 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16)); 3620 3621 vnic->rss_hash_key = ((void *)vnic->rss_table) + size; 3622 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size; 3623 } 3624 return 0; 3625 3626 out: 3627 return rc; 3628 } 3629 3630 static void bnxt_free_hwrm_resources(struct bnxt *bp) 3631 { 3632 struct pci_dev *pdev = bp->pdev; 3633 3634 if (bp->hwrm_cmd_resp_addr) { 3635 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr, 3636 bp->hwrm_cmd_resp_dma_addr); 3637 bp->hwrm_cmd_resp_addr = NULL; 3638 } 3639 3640 if (bp->hwrm_cmd_kong_resp_addr) { 3641 dma_free_coherent(&pdev->dev, PAGE_SIZE, 3642 bp->hwrm_cmd_kong_resp_addr, 3643 bp->hwrm_cmd_kong_resp_dma_addr); 3644 bp->hwrm_cmd_kong_resp_addr = NULL; 3645 } 3646 } 3647 3648 static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp) 3649 { 3650 struct pci_dev *pdev = bp->pdev; 3651 3652 if (bp->hwrm_cmd_kong_resp_addr) 3653 return 0; 3654 3655 bp->hwrm_cmd_kong_resp_addr = 3656 dma_alloc_coherent(&pdev->dev, PAGE_SIZE, 3657 &bp->hwrm_cmd_kong_resp_dma_addr, 3658 GFP_KERNEL); 3659 if (!bp->hwrm_cmd_kong_resp_addr) 3660 return -ENOMEM; 3661 3662 return 0; 3663 } 3664 3665 static int bnxt_alloc_hwrm_resources(struct bnxt *bp) 3666 { 3667 struct pci_dev *pdev = bp->pdev; 3668 3669 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, 3670 &bp->hwrm_cmd_resp_dma_addr, 3671 GFP_KERNEL); 3672 if (!bp->hwrm_cmd_resp_addr) 3673 return -ENOMEM; 3674 3675 return 0; 3676 } 3677 3678 static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp) 3679 { 3680 if (bp->hwrm_short_cmd_req_addr) { 3681 struct pci_dev *pdev = bp->pdev; 3682 3683 dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len, 3684 bp->hwrm_short_cmd_req_addr, 3685 bp->hwrm_short_cmd_req_dma_addr); 3686 bp->hwrm_short_cmd_req_addr = NULL; 3687 } 3688 } 3689 3690 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp) 3691 { 3692 struct pci_dev *pdev = bp->pdev; 3693 3694 if (bp->hwrm_short_cmd_req_addr) 3695 return 0; 3696 3697 bp->hwrm_short_cmd_req_addr = 3698 dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len, 3699 &bp->hwrm_short_cmd_req_dma_addr, 3700 GFP_KERNEL); 3701 if (!bp->hwrm_short_cmd_req_addr) 3702 return -ENOMEM; 3703 3704 return 0; 3705 } 3706 3707 static void bnxt_free_port_stats(struct bnxt *bp) 3708 { 3709 struct pci_dev *pdev = bp->pdev; 3710 3711 bp->flags &= ~BNXT_FLAG_PORT_STATS; 3712 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT; 3713 3714 if (bp->hw_rx_port_stats) { 3715 dma_free_coherent(&pdev->dev, bp->hw_port_stats_size, 3716 bp->hw_rx_port_stats, 3717 bp->hw_rx_port_stats_map); 3718 bp->hw_rx_port_stats = NULL; 3719 } 3720 3721 if (bp->hw_tx_port_stats_ext) { 3722 dma_free_coherent(&pdev->dev, sizeof(struct tx_port_stats_ext), 3723 bp->hw_tx_port_stats_ext, 3724 bp->hw_tx_port_stats_ext_map); 3725 bp->hw_tx_port_stats_ext = NULL; 3726 } 3727 3728 if (bp->hw_rx_port_stats_ext) { 3729 dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext), 3730 bp->hw_rx_port_stats_ext, 3731 bp->hw_rx_port_stats_ext_map); 3732 bp->hw_rx_port_stats_ext = NULL; 3733 } 3734 3735 if (bp->hw_pcie_stats) { 3736 dma_free_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats), 3737 bp->hw_pcie_stats, bp->hw_pcie_stats_map); 3738 bp->hw_pcie_stats = NULL; 3739 } 3740 } 3741 3742 static void bnxt_free_ring_stats(struct bnxt *bp) 3743 { 3744 struct pci_dev *pdev = bp->pdev; 3745 int size, i; 3746 3747 if (!bp->bnapi) 3748 return; 3749 3750 size = bp->hw_ring_stats_size; 3751 3752 for (i = 0; i < bp->cp_nr_rings; i++) { 3753 struct bnxt_napi *bnapi = bp->bnapi[i]; 3754 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3755 3756 if (cpr->hw_stats) { 3757 dma_free_coherent(&pdev->dev, size, cpr->hw_stats, 3758 cpr->hw_stats_map); 3759 cpr->hw_stats = NULL; 3760 } 3761 } 3762 } 3763 3764 static int bnxt_alloc_stats(struct bnxt *bp) 3765 { 3766 u32 size, i; 3767 struct pci_dev *pdev = bp->pdev; 3768 3769 size = bp->hw_ring_stats_size; 3770 3771 for (i = 0; i < bp->cp_nr_rings; i++) { 3772 struct bnxt_napi *bnapi = bp->bnapi[i]; 3773 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3774 3775 cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size, 3776 &cpr->hw_stats_map, 3777 GFP_KERNEL); 3778 if (!cpr->hw_stats) 3779 return -ENOMEM; 3780 3781 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; 3782 } 3783 3784 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700) 3785 return 0; 3786 3787 if (bp->hw_rx_port_stats) 3788 goto alloc_ext_stats; 3789 3790 bp->hw_port_stats_size = sizeof(struct rx_port_stats) + 3791 sizeof(struct tx_port_stats) + 1024; 3792 3793 bp->hw_rx_port_stats = 3794 dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size, 3795 &bp->hw_rx_port_stats_map, 3796 GFP_KERNEL); 3797 if (!bp->hw_rx_port_stats) 3798 return -ENOMEM; 3799 3800 bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) + 512; 3801 bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map + 3802 sizeof(struct rx_port_stats) + 512; 3803 bp->flags |= BNXT_FLAG_PORT_STATS; 3804 3805 alloc_ext_stats: 3806 /* Display extended statistics only if FW supports it */ 3807 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900) 3808 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) 3809 return 0; 3810 3811 if (bp->hw_rx_port_stats_ext) 3812 goto alloc_tx_ext_stats; 3813 3814 bp->hw_rx_port_stats_ext = 3815 dma_alloc_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext), 3816 &bp->hw_rx_port_stats_ext_map, GFP_KERNEL); 3817 if (!bp->hw_rx_port_stats_ext) 3818 return 0; 3819 3820 alloc_tx_ext_stats: 3821 if (bp->hw_tx_port_stats_ext) 3822 goto alloc_pcie_stats; 3823 3824 if (bp->hwrm_spec_code >= 0x10902 || 3825 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) { 3826 bp->hw_tx_port_stats_ext = 3827 dma_alloc_coherent(&pdev->dev, 3828 sizeof(struct tx_port_stats_ext), 3829 &bp->hw_tx_port_stats_ext_map, 3830 GFP_KERNEL); 3831 } 3832 bp->flags |= BNXT_FLAG_PORT_STATS_EXT; 3833 3834 alloc_pcie_stats: 3835 if (bp->hw_pcie_stats || 3836 !(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)) 3837 return 0; 3838 3839 bp->hw_pcie_stats = 3840 dma_alloc_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats), 3841 &bp->hw_pcie_stats_map, GFP_KERNEL); 3842 if (!bp->hw_pcie_stats) 3843 return 0; 3844 3845 bp->flags |= BNXT_FLAG_PCIE_STATS; 3846 return 0; 3847 } 3848 3849 static void bnxt_clear_ring_indices(struct bnxt *bp) 3850 { 3851 int i; 3852 3853 if (!bp->bnapi) 3854 return; 3855 3856 for (i = 0; i < bp->cp_nr_rings; i++) { 3857 struct bnxt_napi *bnapi = bp->bnapi[i]; 3858 struct bnxt_cp_ring_info *cpr; 3859 struct bnxt_rx_ring_info *rxr; 3860 struct bnxt_tx_ring_info *txr; 3861 3862 if (!bnapi) 3863 continue; 3864 3865 cpr = &bnapi->cp_ring; 3866 cpr->cp_raw_cons = 0; 3867 3868 txr = bnapi->tx_ring; 3869 if (txr) { 3870 txr->tx_prod = 0; 3871 txr->tx_cons = 0; 3872 } 3873 3874 rxr = bnapi->rx_ring; 3875 if (rxr) { 3876 rxr->rx_prod = 0; 3877 rxr->rx_agg_prod = 0; 3878 rxr->rx_sw_agg_prod = 0; 3879 rxr->rx_next_cons = 0; 3880 } 3881 } 3882 } 3883 3884 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit) 3885 { 3886 #ifdef CONFIG_RFS_ACCEL 3887 int i; 3888 3889 /* Under rtnl_lock and all our NAPIs have been disabled. It's 3890 * safe to delete the hash table. 3891 */ 3892 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 3893 struct hlist_head *head; 3894 struct hlist_node *tmp; 3895 struct bnxt_ntuple_filter *fltr; 3896 3897 head = &bp->ntp_fltr_hash_tbl[i]; 3898 hlist_for_each_entry_safe(fltr, tmp, head, hash) { 3899 hlist_del(&fltr->hash); 3900 kfree(fltr); 3901 } 3902 } 3903 if (irq_reinit) { 3904 kfree(bp->ntp_fltr_bmap); 3905 bp->ntp_fltr_bmap = NULL; 3906 } 3907 bp->ntp_fltr_count = 0; 3908 #endif 3909 } 3910 3911 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp) 3912 { 3913 #ifdef CONFIG_RFS_ACCEL 3914 int i, rc = 0; 3915 3916 if (!(bp->flags & BNXT_FLAG_RFS)) 3917 return 0; 3918 3919 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) 3920 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]); 3921 3922 bp->ntp_fltr_count = 0; 3923 bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR), 3924 sizeof(long), 3925 GFP_KERNEL); 3926 3927 if (!bp->ntp_fltr_bmap) 3928 rc = -ENOMEM; 3929 3930 return rc; 3931 #else 3932 return 0; 3933 #endif 3934 } 3935 3936 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) 3937 { 3938 bnxt_free_vnic_attributes(bp); 3939 bnxt_free_tx_rings(bp); 3940 bnxt_free_rx_rings(bp); 3941 bnxt_free_cp_rings(bp); 3942 bnxt_free_ntp_fltrs(bp, irq_re_init); 3943 if (irq_re_init) { 3944 bnxt_free_ring_stats(bp); 3945 bnxt_free_ring_grps(bp); 3946 bnxt_free_vnics(bp); 3947 kfree(bp->tx_ring_map); 3948 bp->tx_ring_map = NULL; 3949 kfree(bp->tx_ring); 3950 bp->tx_ring = NULL; 3951 kfree(bp->rx_ring); 3952 bp->rx_ring = NULL; 3953 kfree(bp->bnapi); 3954 bp->bnapi = NULL; 3955 } else { 3956 bnxt_clear_ring_indices(bp); 3957 } 3958 } 3959 3960 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) 3961 { 3962 int i, j, rc, size, arr_size; 3963 void *bnapi; 3964 3965 if (irq_re_init) { 3966 /* Allocate bnapi mem pointer array and mem block for 3967 * all queues 3968 */ 3969 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) * 3970 bp->cp_nr_rings); 3971 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi)); 3972 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL); 3973 if (!bnapi) 3974 return -ENOMEM; 3975 3976 bp->bnapi = bnapi; 3977 bnapi += arr_size; 3978 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) { 3979 bp->bnapi[i] = bnapi; 3980 bp->bnapi[i]->index = i; 3981 bp->bnapi[i]->bp = bp; 3982 if (bp->flags & BNXT_FLAG_CHIP_P5) { 3983 struct bnxt_cp_ring_info *cpr = 3984 &bp->bnapi[i]->cp_ring; 3985 3986 cpr->cp_ring_struct.ring_mem.flags = 3987 BNXT_RMEM_RING_PTE_FLAG; 3988 } 3989 } 3990 3991 bp->rx_ring = kcalloc(bp->rx_nr_rings, 3992 sizeof(struct bnxt_rx_ring_info), 3993 GFP_KERNEL); 3994 if (!bp->rx_ring) 3995 return -ENOMEM; 3996 3997 for (i = 0; i < bp->rx_nr_rings; i++) { 3998 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3999 4000 if (bp->flags & BNXT_FLAG_CHIP_P5) { 4001 rxr->rx_ring_struct.ring_mem.flags = 4002 BNXT_RMEM_RING_PTE_FLAG; 4003 rxr->rx_agg_ring_struct.ring_mem.flags = 4004 BNXT_RMEM_RING_PTE_FLAG; 4005 } 4006 rxr->bnapi = bp->bnapi[i]; 4007 bp->bnapi[i]->rx_ring = &bp->rx_ring[i]; 4008 } 4009 4010 bp->tx_ring = kcalloc(bp->tx_nr_rings, 4011 sizeof(struct bnxt_tx_ring_info), 4012 GFP_KERNEL); 4013 if (!bp->tx_ring) 4014 return -ENOMEM; 4015 4016 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16), 4017 GFP_KERNEL); 4018 4019 if (!bp->tx_ring_map) 4020 return -ENOMEM; 4021 4022 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 4023 j = 0; 4024 else 4025 j = bp->rx_nr_rings; 4026 4027 for (i = 0; i < bp->tx_nr_rings; i++, j++) { 4028 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 4029 4030 if (bp->flags & BNXT_FLAG_CHIP_P5) 4031 txr->tx_ring_struct.ring_mem.flags = 4032 BNXT_RMEM_RING_PTE_FLAG; 4033 txr->bnapi = bp->bnapi[j]; 4034 bp->bnapi[j]->tx_ring = txr; 4035 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i; 4036 if (i >= bp->tx_nr_rings_xdp) { 4037 txr->txq_index = i - bp->tx_nr_rings_xdp; 4038 bp->bnapi[j]->tx_int = bnxt_tx_int; 4039 } else { 4040 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP; 4041 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp; 4042 } 4043 } 4044 4045 rc = bnxt_alloc_stats(bp); 4046 if (rc) 4047 goto alloc_mem_err; 4048 4049 rc = bnxt_alloc_ntp_fltrs(bp); 4050 if (rc) 4051 goto alloc_mem_err; 4052 4053 rc = bnxt_alloc_vnics(bp); 4054 if (rc) 4055 goto alloc_mem_err; 4056 } 4057 4058 bnxt_init_ring_struct(bp); 4059 4060 rc = bnxt_alloc_rx_rings(bp); 4061 if (rc) 4062 goto alloc_mem_err; 4063 4064 rc = bnxt_alloc_tx_rings(bp); 4065 if (rc) 4066 goto alloc_mem_err; 4067 4068 rc = bnxt_alloc_cp_rings(bp); 4069 if (rc) 4070 goto alloc_mem_err; 4071 4072 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG | 4073 BNXT_VNIC_UCAST_FLAG; 4074 rc = bnxt_alloc_vnic_attributes(bp); 4075 if (rc) 4076 goto alloc_mem_err; 4077 return 0; 4078 4079 alloc_mem_err: 4080 bnxt_free_mem(bp, true); 4081 return rc; 4082 } 4083 4084 static void bnxt_disable_int(struct bnxt *bp) 4085 { 4086 int i; 4087 4088 if (!bp->bnapi) 4089 return; 4090 4091 for (i = 0; i < bp->cp_nr_rings; i++) { 4092 struct bnxt_napi *bnapi = bp->bnapi[i]; 4093 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4094 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 4095 4096 if (ring->fw_ring_id != INVALID_HW_RING_ID) 4097 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); 4098 } 4099 } 4100 4101 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n) 4102 { 4103 struct bnxt_napi *bnapi = bp->bnapi[n]; 4104 struct bnxt_cp_ring_info *cpr; 4105 4106 cpr = &bnapi->cp_ring; 4107 return cpr->cp_ring_struct.map_idx; 4108 } 4109 4110 static void bnxt_disable_int_sync(struct bnxt *bp) 4111 { 4112 int i; 4113 4114 atomic_inc(&bp->intr_sem); 4115 4116 bnxt_disable_int(bp); 4117 for (i = 0; i < bp->cp_nr_rings; i++) { 4118 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 4119 4120 synchronize_irq(bp->irq_tbl[map_idx].vector); 4121 } 4122 } 4123 4124 static void bnxt_enable_int(struct bnxt *bp) 4125 { 4126 int i; 4127 4128 atomic_set(&bp->intr_sem, 0); 4129 for (i = 0; i < bp->cp_nr_rings; i++) { 4130 struct bnxt_napi *bnapi = bp->bnapi[i]; 4131 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4132 4133 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons); 4134 } 4135 } 4136 4137 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type, 4138 u16 cmpl_ring, u16 target_id) 4139 { 4140 struct input *req = request; 4141 4142 req->req_type = cpu_to_le16(req_type); 4143 req->cmpl_ring = cpu_to_le16(cmpl_ring); 4144 req->target_id = cpu_to_le16(target_id); 4145 if (bnxt_kong_hwrm_message(bp, req)) 4146 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr); 4147 else 4148 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr); 4149 } 4150 4151 static int bnxt_hwrm_to_stderr(u32 hwrm_err) 4152 { 4153 switch (hwrm_err) { 4154 case HWRM_ERR_CODE_SUCCESS: 4155 return 0; 4156 case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED: 4157 return -EACCES; 4158 case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR: 4159 return -ENOSPC; 4160 case HWRM_ERR_CODE_INVALID_PARAMS: 4161 case HWRM_ERR_CODE_INVALID_FLAGS: 4162 case HWRM_ERR_CODE_INVALID_ENABLES: 4163 case HWRM_ERR_CODE_UNSUPPORTED_TLV: 4164 case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR: 4165 return -EINVAL; 4166 case HWRM_ERR_CODE_NO_BUFFER: 4167 return -ENOMEM; 4168 case HWRM_ERR_CODE_HOT_RESET_PROGRESS: 4169 return -EAGAIN; 4170 case HWRM_ERR_CODE_CMD_NOT_SUPPORTED: 4171 return -EOPNOTSUPP; 4172 default: 4173 return -EIO; 4174 } 4175 } 4176 4177 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, 4178 int timeout, bool silent) 4179 { 4180 int i, intr_process, rc, tmo_count; 4181 struct input *req = msg; 4182 u32 *data = msg; 4183 __le32 *resp_len; 4184 u8 *valid; 4185 u16 cp_ring_id, len = 0; 4186 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; 4187 u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN; 4188 struct hwrm_short_input short_input = {0}; 4189 u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER; 4190 u8 *resp_addr = (u8 *)bp->hwrm_cmd_resp_addr; 4191 u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM; 4192 u16 dst = BNXT_HWRM_CHNL_CHIMP; 4193 4194 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 4195 return -EBUSY; 4196 4197 if (msg_len > BNXT_HWRM_MAX_REQ_LEN) { 4198 if (msg_len > bp->hwrm_max_ext_req_len || 4199 !bp->hwrm_short_cmd_req_addr) 4200 return -EINVAL; 4201 } 4202 4203 if (bnxt_hwrm_kong_chnl(bp, req)) { 4204 dst = BNXT_HWRM_CHNL_KONG; 4205 bar_offset = BNXT_GRCPF_REG_KONG_COMM; 4206 doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER; 4207 resp = bp->hwrm_cmd_kong_resp_addr; 4208 resp_addr = (u8 *)bp->hwrm_cmd_kong_resp_addr; 4209 } 4210 4211 memset(resp, 0, PAGE_SIZE); 4212 cp_ring_id = le16_to_cpu(req->cmpl_ring); 4213 intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1; 4214 4215 req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst)); 4216 /* currently supports only one outstanding message */ 4217 if (intr_process) 4218 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id); 4219 4220 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || 4221 msg_len > BNXT_HWRM_MAX_REQ_LEN) { 4222 void *short_cmd_req = bp->hwrm_short_cmd_req_addr; 4223 u16 max_msg_len; 4224 4225 /* Set boundary for maximum extended request length for short 4226 * cmd format. If passed up from device use the max supported 4227 * internal req length. 4228 */ 4229 max_msg_len = bp->hwrm_max_ext_req_len; 4230 4231 memcpy(short_cmd_req, req, msg_len); 4232 if (msg_len < max_msg_len) 4233 memset(short_cmd_req + msg_len, 0, 4234 max_msg_len - msg_len); 4235 4236 short_input.req_type = req->req_type; 4237 short_input.signature = 4238 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD); 4239 short_input.size = cpu_to_le16(msg_len); 4240 short_input.req_addr = 4241 cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr); 4242 4243 data = (u32 *)&short_input; 4244 msg_len = sizeof(short_input); 4245 4246 /* Sync memory write before updating doorbell */ 4247 wmb(); 4248 4249 max_req_len = BNXT_HWRM_SHORT_REQ_LEN; 4250 } 4251 4252 /* Write request msg to hwrm channel */ 4253 __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4); 4254 4255 for (i = msg_len; i < max_req_len; i += 4) 4256 writel(0, bp->bar0 + bar_offset + i); 4257 4258 /* Ring channel doorbell */ 4259 writel(1, bp->bar0 + doorbell_offset); 4260 4261 if (!pci_is_enabled(bp->pdev)) 4262 return 0; 4263 4264 if (!timeout) 4265 timeout = DFLT_HWRM_CMD_TIMEOUT; 4266 /* convert timeout to usec */ 4267 timeout *= 1000; 4268 4269 i = 0; 4270 /* Short timeout for the first few iterations: 4271 * number of loops = number of loops for short timeout + 4272 * number of loops for standard timeout. 4273 */ 4274 tmo_count = HWRM_SHORT_TIMEOUT_COUNTER; 4275 timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER; 4276 tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT); 4277 resp_len = (__le32 *)(resp_addr + HWRM_RESP_LEN_OFFSET); 4278 4279 if (intr_process) { 4280 u16 seq_id = bp->hwrm_intr_seq_id; 4281 4282 /* Wait until hwrm response cmpl interrupt is processed */ 4283 while (bp->hwrm_intr_seq_id != (u16)~seq_id && 4284 i++ < tmo_count) { 4285 /* Abort the wait for completion if the FW health 4286 * check has failed. 4287 */ 4288 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 4289 return -EBUSY; 4290 /* on first few passes, just barely sleep */ 4291 if (i < HWRM_SHORT_TIMEOUT_COUNTER) 4292 usleep_range(HWRM_SHORT_MIN_TIMEOUT, 4293 HWRM_SHORT_MAX_TIMEOUT); 4294 else 4295 usleep_range(HWRM_MIN_TIMEOUT, 4296 HWRM_MAX_TIMEOUT); 4297 } 4298 4299 if (bp->hwrm_intr_seq_id != (u16)~seq_id) { 4300 if (!silent) 4301 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n", 4302 le16_to_cpu(req->req_type)); 4303 return -EBUSY; 4304 } 4305 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> 4306 HWRM_RESP_LEN_SFT; 4307 valid = resp_addr + len - 1; 4308 } else { 4309 int j; 4310 4311 /* Check if response len is updated */ 4312 for (i = 0; i < tmo_count; i++) { 4313 /* Abort the wait for completion if the FW health 4314 * check has failed. 4315 */ 4316 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 4317 return -EBUSY; 4318 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> 4319 HWRM_RESP_LEN_SFT; 4320 if (len) 4321 break; 4322 /* on first few passes, just barely sleep */ 4323 if (i < HWRM_SHORT_TIMEOUT_COUNTER) 4324 usleep_range(HWRM_SHORT_MIN_TIMEOUT, 4325 HWRM_SHORT_MAX_TIMEOUT); 4326 else 4327 usleep_range(HWRM_MIN_TIMEOUT, 4328 HWRM_MAX_TIMEOUT); 4329 } 4330 4331 if (i >= tmo_count) { 4332 if (!silent) 4333 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n", 4334 HWRM_TOTAL_TIMEOUT(i), 4335 le16_to_cpu(req->req_type), 4336 le16_to_cpu(req->seq_id), len); 4337 return -EBUSY; 4338 } 4339 4340 /* Last byte of resp contains valid bit */ 4341 valid = resp_addr + len - 1; 4342 for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) { 4343 /* make sure we read from updated DMA memory */ 4344 dma_rmb(); 4345 if (*valid) 4346 break; 4347 usleep_range(1, 5); 4348 } 4349 4350 if (j >= HWRM_VALID_BIT_DELAY_USEC) { 4351 if (!silent) 4352 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n", 4353 HWRM_TOTAL_TIMEOUT(i), 4354 le16_to_cpu(req->req_type), 4355 le16_to_cpu(req->seq_id), len, 4356 *valid); 4357 return -EBUSY; 4358 } 4359 } 4360 4361 /* Zero valid bit for compatibility. Valid bit in an older spec 4362 * may become a new field in a newer spec. We must make sure that 4363 * a new field not implemented by old spec will read zero. 4364 */ 4365 *valid = 0; 4366 rc = le16_to_cpu(resp->error_code); 4367 if (rc && !silent) 4368 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n", 4369 le16_to_cpu(resp->req_type), 4370 le16_to_cpu(resp->seq_id), rc); 4371 return bnxt_hwrm_to_stderr(rc); 4372 } 4373 4374 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) 4375 { 4376 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false); 4377 } 4378 4379 int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len, 4380 int timeout) 4381 { 4382 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true); 4383 } 4384 4385 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) 4386 { 4387 int rc; 4388 4389 mutex_lock(&bp->hwrm_cmd_lock); 4390 rc = _hwrm_send_message(bp, msg, msg_len, timeout); 4391 mutex_unlock(&bp->hwrm_cmd_lock); 4392 return rc; 4393 } 4394 4395 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len, 4396 int timeout) 4397 { 4398 int rc; 4399 4400 mutex_lock(&bp->hwrm_cmd_lock); 4401 rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true); 4402 mutex_unlock(&bp->hwrm_cmd_lock); 4403 return rc; 4404 } 4405 4406 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, 4407 bool async_only) 4408 { 4409 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr; 4410 struct hwrm_func_drv_rgtr_input req = {0}; 4411 DECLARE_BITMAP(async_events_bmap, 256); 4412 u32 *events = (u32 *)async_events_bmap; 4413 u32 flags; 4414 int rc, i; 4415 4416 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1); 4417 4418 req.enables = 4419 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | 4420 FUNC_DRV_RGTR_REQ_ENABLES_VER | 4421 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); 4422 4423 req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); 4424 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE | 4425 FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT; 4426 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) 4427 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT | 4428 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT; 4429 req.flags = cpu_to_le32(flags); 4430 req.ver_maj_8b = DRV_VER_MAJ; 4431 req.ver_min_8b = DRV_VER_MIN; 4432 req.ver_upd_8b = DRV_VER_UPD; 4433 req.ver_maj = cpu_to_le16(DRV_VER_MAJ); 4434 req.ver_min = cpu_to_le16(DRV_VER_MIN); 4435 req.ver_upd = cpu_to_le16(DRV_VER_UPD); 4436 4437 if (BNXT_PF(bp)) { 4438 u32 data[8]; 4439 int i; 4440 4441 memset(data, 0, sizeof(data)); 4442 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) { 4443 u16 cmd = bnxt_vf_req_snif[i]; 4444 unsigned int bit, idx; 4445 4446 idx = cmd / 32; 4447 bit = cmd % 32; 4448 data[idx] |= 1 << bit; 4449 } 4450 4451 for (i = 0; i < 8; i++) 4452 req.vf_req_fwd[i] = cpu_to_le32(data[i]); 4453 4454 req.enables |= 4455 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); 4456 } 4457 4458 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) 4459 req.flags |= cpu_to_le32( 4460 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE); 4461 4462 memset(async_events_bmap, 0, sizeof(async_events_bmap)); 4463 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) { 4464 u16 event_id = bnxt_async_events_arr[i]; 4465 4466 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY && 4467 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 4468 continue; 4469 __set_bit(bnxt_async_events_arr[i], async_events_bmap); 4470 } 4471 if (bmap && bmap_size) { 4472 for (i = 0; i < bmap_size; i++) { 4473 if (test_bit(i, bmap)) 4474 __set_bit(i, async_events_bmap); 4475 } 4476 } 4477 for (i = 0; i < 8; i++) 4478 req.async_event_fwd[i] |= cpu_to_le32(events[i]); 4479 4480 if (async_only) 4481 req.enables = 4482 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); 4483 4484 mutex_lock(&bp->hwrm_cmd_lock); 4485 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4486 if (!rc) { 4487 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state); 4488 if (resp->flags & 4489 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED)) 4490 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE; 4491 } 4492 mutex_unlock(&bp->hwrm_cmd_lock); 4493 return rc; 4494 } 4495 4496 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp) 4497 { 4498 struct hwrm_func_drv_unrgtr_input req = {0}; 4499 4500 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state)) 4501 return 0; 4502 4503 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1); 4504 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4505 } 4506 4507 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type) 4508 { 4509 u32 rc = 0; 4510 struct hwrm_tunnel_dst_port_free_input req = {0}; 4511 4512 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1); 4513 req.tunnel_type = tunnel_type; 4514 4515 switch (tunnel_type) { 4516 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN: 4517 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id; 4518 break; 4519 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE: 4520 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id; 4521 break; 4522 default: 4523 break; 4524 } 4525 4526 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4527 if (rc) 4528 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n", 4529 rc); 4530 return rc; 4531 } 4532 4533 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port, 4534 u8 tunnel_type) 4535 { 4536 u32 rc = 0; 4537 struct hwrm_tunnel_dst_port_alloc_input req = {0}; 4538 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr; 4539 4540 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1); 4541 4542 req.tunnel_type = tunnel_type; 4543 req.tunnel_dst_port_val = port; 4544 4545 mutex_lock(&bp->hwrm_cmd_lock); 4546 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4547 if (rc) { 4548 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n", 4549 rc); 4550 goto err_out; 4551 } 4552 4553 switch (tunnel_type) { 4554 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN: 4555 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id; 4556 break; 4557 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE: 4558 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id; 4559 break; 4560 default: 4561 break; 4562 } 4563 4564 err_out: 4565 mutex_unlock(&bp->hwrm_cmd_lock); 4566 return rc; 4567 } 4568 4569 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id) 4570 { 4571 struct hwrm_cfa_l2_set_rx_mask_input req = {0}; 4572 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 4573 4574 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1); 4575 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id); 4576 4577 req.num_mc_entries = cpu_to_le32(vnic->mc_list_count); 4578 req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); 4579 req.mask = cpu_to_le32(vnic->rx_mask); 4580 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4581 } 4582 4583 #ifdef CONFIG_RFS_ACCEL 4584 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, 4585 struct bnxt_ntuple_filter *fltr) 4586 { 4587 struct hwrm_cfa_ntuple_filter_free_input req = {0}; 4588 4589 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1); 4590 req.ntuple_filter_id = fltr->filter_id; 4591 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4592 } 4593 4594 #define BNXT_NTP_FLTR_FLAGS \ 4595 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \ 4596 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \ 4597 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \ 4598 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \ 4599 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \ 4600 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \ 4601 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \ 4602 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \ 4603 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \ 4604 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \ 4605 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \ 4606 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \ 4607 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \ 4608 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID) 4609 4610 #define BNXT_NTP_TUNNEL_FLTR_FLAG \ 4611 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 4612 4613 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, 4614 struct bnxt_ntuple_filter *fltr) 4615 { 4616 struct hwrm_cfa_ntuple_filter_alloc_input req = {0}; 4617 struct hwrm_cfa_ntuple_filter_alloc_output *resp; 4618 struct flow_keys *keys = &fltr->fkeys; 4619 struct bnxt_vnic_info *vnic; 4620 u32 flags = 0; 4621 int rc = 0; 4622 4623 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1); 4624 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx]; 4625 4626 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) { 4627 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX; 4628 req.dst_id = cpu_to_le16(fltr->rxq); 4629 } else { 4630 vnic = &bp->vnic_info[fltr->rxq + 1]; 4631 req.dst_id = cpu_to_le16(vnic->fw_vnic_id); 4632 } 4633 req.flags = cpu_to_le32(flags); 4634 req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS); 4635 4636 req.ethertype = htons(ETH_P_IP); 4637 memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN); 4638 req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; 4639 req.ip_protocol = keys->basic.ip_proto; 4640 4641 if (keys->basic.n_proto == htons(ETH_P_IPV6)) { 4642 int i; 4643 4644 req.ethertype = htons(ETH_P_IPV6); 4645 req.ip_addr_type = 4646 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6; 4647 *(struct in6_addr *)&req.src_ipaddr[0] = 4648 keys->addrs.v6addrs.src; 4649 *(struct in6_addr *)&req.dst_ipaddr[0] = 4650 keys->addrs.v6addrs.dst; 4651 for (i = 0; i < 4; i++) { 4652 req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff); 4653 req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff); 4654 } 4655 } else { 4656 req.src_ipaddr[0] = keys->addrs.v4addrs.src; 4657 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff); 4658 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst; 4659 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff); 4660 } 4661 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) { 4662 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG); 4663 req.tunnel_type = 4664 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL; 4665 } 4666 4667 req.src_port = keys->ports.src; 4668 req.src_port_mask = cpu_to_be16(0xffff); 4669 req.dst_port = keys->ports.dst; 4670 req.dst_port_mask = cpu_to_be16(0xffff); 4671 4672 mutex_lock(&bp->hwrm_cmd_lock); 4673 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4674 if (!rc) { 4675 resp = bnxt_get_hwrm_resp_addr(bp, &req); 4676 fltr->filter_id = resp->ntuple_filter_id; 4677 } 4678 mutex_unlock(&bp->hwrm_cmd_lock); 4679 return rc; 4680 } 4681 #endif 4682 4683 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx, 4684 u8 *mac_addr) 4685 { 4686 u32 rc = 0; 4687 struct hwrm_cfa_l2_filter_alloc_input req = {0}; 4688 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; 4689 4690 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1); 4691 req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX); 4692 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) 4693 req.flags |= 4694 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST); 4695 req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id); 4696 req.enables = 4697 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR | 4698 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID | 4699 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK); 4700 memcpy(req.l2_addr, mac_addr, ETH_ALEN); 4701 req.l2_addr_mask[0] = 0xff; 4702 req.l2_addr_mask[1] = 0xff; 4703 req.l2_addr_mask[2] = 0xff; 4704 req.l2_addr_mask[3] = 0xff; 4705 req.l2_addr_mask[4] = 0xff; 4706 req.l2_addr_mask[5] = 0xff; 4707 4708 mutex_lock(&bp->hwrm_cmd_lock); 4709 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4710 if (!rc) 4711 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] = 4712 resp->l2_filter_id; 4713 mutex_unlock(&bp->hwrm_cmd_lock); 4714 return rc; 4715 } 4716 4717 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp) 4718 { 4719 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */ 4720 int rc = 0; 4721 4722 /* Any associated ntuple filters will also be cleared by firmware. */ 4723 mutex_lock(&bp->hwrm_cmd_lock); 4724 for (i = 0; i < num_of_vnics; i++) { 4725 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 4726 4727 for (j = 0; j < vnic->uc_filter_count; j++) { 4728 struct hwrm_cfa_l2_filter_free_input req = {0}; 4729 4730 bnxt_hwrm_cmd_hdr_init(bp, &req, 4731 HWRM_CFA_L2_FILTER_FREE, -1, -1); 4732 4733 req.l2_filter_id = vnic->fw_l2_filter_id[j]; 4734 4735 rc = _hwrm_send_message(bp, &req, sizeof(req), 4736 HWRM_CMD_TIMEOUT); 4737 } 4738 vnic->uc_filter_count = 0; 4739 } 4740 mutex_unlock(&bp->hwrm_cmd_lock); 4741 4742 return rc; 4743 } 4744 4745 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) 4746 { 4747 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 4748 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX; 4749 struct hwrm_vnic_tpa_cfg_input req = {0}; 4750 4751 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) 4752 return 0; 4753 4754 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1); 4755 4756 if (tpa_flags) { 4757 u16 mss = bp->dev->mtu - 40; 4758 u32 nsegs, n, segs = 0, flags; 4759 4760 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA | 4761 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA | 4762 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE | 4763 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN | 4764 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ; 4765 if (tpa_flags & BNXT_FLAG_GRO) 4766 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO; 4767 4768 req.flags = cpu_to_le32(flags); 4769 4770 req.enables = 4771 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS | 4772 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS | 4773 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN); 4774 4775 /* Number of segs are log2 units, and first packet is not 4776 * included as part of this units. 4777 */ 4778 if (mss <= BNXT_RX_PAGE_SIZE) { 4779 n = BNXT_RX_PAGE_SIZE / mss; 4780 nsegs = (MAX_SKB_FRAGS - 1) * n; 4781 } else { 4782 n = mss / BNXT_RX_PAGE_SIZE; 4783 if (mss & (BNXT_RX_PAGE_SIZE - 1)) 4784 n++; 4785 nsegs = (MAX_SKB_FRAGS - n) / n; 4786 } 4787 4788 if (bp->flags & BNXT_FLAG_CHIP_P5) { 4789 segs = MAX_TPA_SEGS_P5; 4790 max_aggs = bp->max_tpa; 4791 } else { 4792 segs = ilog2(nsegs); 4793 } 4794 req.max_agg_segs = cpu_to_le16(segs); 4795 req.max_aggs = cpu_to_le16(max_aggs); 4796 4797 req.min_agg_len = cpu_to_le32(512); 4798 } 4799 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); 4800 4801 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4802 } 4803 4804 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring) 4805 { 4806 struct bnxt_ring_grp_info *grp_info; 4807 4808 grp_info = &bp->grp_info[ring->grp_idx]; 4809 return grp_info->cp_fw_ring_id; 4810 } 4811 4812 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 4813 { 4814 if (bp->flags & BNXT_FLAG_CHIP_P5) { 4815 struct bnxt_napi *bnapi = rxr->bnapi; 4816 struct bnxt_cp_ring_info *cpr; 4817 4818 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL]; 4819 return cpr->cp_ring_struct.fw_ring_id; 4820 } else { 4821 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct); 4822 } 4823 } 4824 4825 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr) 4826 { 4827 if (bp->flags & BNXT_FLAG_CHIP_P5) { 4828 struct bnxt_napi *bnapi = txr->bnapi; 4829 struct bnxt_cp_ring_info *cpr; 4830 4831 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL]; 4832 return cpr->cp_ring_struct.fw_ring_id; 4833 } else { 4834 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct); 4835 } 4836 } 4837 4838 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) 4839 { 4840 u32 i, j, max_rings; 4841 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 4842 struct hwrm_vnic_rss_cfg_input req = {0}; 4843 4844 if ((bp->flags & BNXT_FLAG_CHIP_P5) || 4845 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID) 4846 return 0; 4847 4848 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); 4849 if (set_rss) { 4850 req.hash_type = cpu_to_le32(bp->rss_hash_cfg); 4851 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; 4852 if (vnic->flags & BNXT_VNIC_RSS_FLAG) { 4853 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 4854 max_rings = bp->rx_nr_rings - 1; 4855 else 4856 max_rings = bp->rx_nr_rings; 4857 } else { 4858 max_rings = 1; 4859 } 4860 4861 /* Fill the RSS indirection table with ring group ids */ 4862 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) { 4863 if (j == max_rings) 4864 j = 0; 4865 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]); 4866 } 4867 4868 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); 4869 req.hash_key_tbl_addr = 4870 cpu_to_le64(vnic->rss_hash_key_dma_addr); 4871 } 4872 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 4873 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4874 } 4875 4876 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss) 4877 { 4878 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 4879 u32 i, j, k, nr_ctxs, max_rings = bp->rx_nr_rings; 4880 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; 4881 struct hwrm_vnic_rss_cfg_input req = {0}; 4882 4883 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); 4884 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); 4885 if (!set_rss) { 4886 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4887 return 0; 4888 } 4889 req.hash_type = cpu_to_le32(bp->rss_hash_cfg); 4890 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; 4891 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); 4892 req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); 4893 nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64); 4894 for (i = 0, k = 0; i < nr_ctxs; i++) { 4895 __le16 *ring_tbl = vnic->rss_table; 4896 int rc; 4897 4898 req.ring_table_pair_index = i; 4899 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]); 4900 for (j = 0; j < 64; j++) { 4901 u16 ring_id; 4902 4903 ring_id = rxr->rx_ring_struct.fw_ring_id; 4904 *ring_tbl++ = cpu_to_le16(ring_id); 4905 ring_id = bnxt_cp_ring_for_rx(bp, rxr); 4906 *ring_tbl++ = cpu_to_le16(ring_id); 4907 rxr++; 4908 k++; 4909 if (k == max_rings) { 4910 k = 0; 4911 rxr = &bp->rx_ring[0]; 4912 } 4913 } 4914 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4915 if (rc) 4916 return rc; 4917 } 4918 return 0; 4919 } 4920 4921 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id) 4922 { 4923 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 4924 struct hwrm_vnic_plcmodes_cfg_input req = {0}; 4925 4926 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1); 4927 req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT | 4928 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | 4929 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); 4930 req.enables = 4931 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID | 4932 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID); 4933 /* thresholds not implemented in firmware yet */ 4934 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh); 4935 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh); 4936 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id); 4937 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4938 } 4939 4940 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id, 4941 u16 ctx_idx) 4942 { 4943 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0}; 4944 4945 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1); 4946 req.rss_cos_lb_ctx_id = 4947 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]); 4948 4949 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4950 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID; 4951 } 4952 4953 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp) 4954 { 4955 int i, j; 4956 4957 for (i = 0; i < bp->nr_vnics; i++) { 4958 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 4959 4960 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) { 4961 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID) 4962 bnxt_hwrm_vnic_ctx_free_one(bp, i, j); 4963 } 4964 } 4965 bp->rsscos_nr_ctxs = 0; 4966 } 4967 4968 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx) 4969 { 4970 int rc; 4971 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0}; 4972 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp = 4973 bp->hwrm_cmd_resp_addr; 4974 4975 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1, 4976 -1); 4977 4978 mutex_lock(&bp->hwrm_cmd_lock); 4979 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4980 if (!rc) 4981 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = 4982 le16_to_cpu(resp->rss_cos_lb_ctx_id); 4983 mutex_unlock(&bp->hwrm_cmd_lock); 4984 4985 return rc; 4986 } 4987 4988 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp) 4989 { 4990 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP) 4991 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE; 4992 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE; 4993 } 4994 4995 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) 4996 { 4997 unsigned int ring = 0, grp_idx; 4998 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 4999 struct hwrm_vnic_cfg_input req = {0}; 5000 u16 def_vlan = 0; 5001 5002 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1); 5003 5004 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5005 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; 5006 5007 req.default_rx_ring_id = 5008 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id); 5009 req.default_cmpl_ring_id = 5010 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr)); 5011 req.enables = 5012 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID | 5013 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID); 5014 goto vnic_mru; 5015 } 5016 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP); 5017 /* Only RSS support for now TBD: COS & LB */ 5018 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) { 5019 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 5020 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | 5021 VNIC_CFG_REQ_ENABLES_MRU); 5022 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) { 5023 req.rss_rule = 5024 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]); 5025 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | 5026 VNIC_CFG_REQ_ENABLES_MRU); 5027 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE); 5028 } else { 5029 req.rss_rule = cpu_to_le16(0xffff); 5030 } 5031 5032 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && 5033 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) { 5034 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]); 5035 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE); 5036 } else { 5037 req.cos_rule = cpu_to_le16(0xffff); 5038 } 5039 5040 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 5041 ring = 0; 5042 else if (vnic->flags & BNXT_VNIC_RFS_FLAG) 5043 ring = vnic_id - 1; 5044 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp)) 5045 ring = bp->rx_nr_rings - 1; 5046 5047 grp_idx = bp->rx_ring[ring].bnapi->index; 5048 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); 5049 req.lb_rule = cpu_to_le16(0xffff); 5050 vnic_mru: 5051 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + 5052 VLAN_HLEN); 5053 5054 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); 5055 #ifdef CONFIG_BNXT_SRIOV 5056 if (BNXT_VF(bp)) 5057 def_vlan = bp->vf.vlan; 5058 #endif 5059 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan) 5060 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); 5061 if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) 5062 req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp)); 5063 5064 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5065 } 5066 5067 static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id) 5068 { 5069 u32 rc = 0; 5070 5071 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) { 5072 struct hwrm_vnic_free_input req = {0}; 5073 5074 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1); 5075 req.vnic_id = 5076 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id); 5077 5078 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5079 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID; 5080 } 5081 return rc; 5082 } 5083 5084 static void bnxt_hwrm_vnic_free(struct bnxt *bp) 5085 { 5086 u16 i; 5087 5088 for (i = 0; i < bp->nr_vnics; i++) 5089 bnxt_hwrm_vnic_free_one(bp, i); 5090 } 5091 5092 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, 5093 unsigned int start_rx_ring_idx, 5094 unsigned int nr_rings) 5095 { 5096 int rc = 0; 5097 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings; 5098 struct hwrm_vnic_alloc_input req = {0}; 5099 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr; 5100 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 5101 5102 if (bp->flags & BNXT_FLAG_CHIP_P5) 5103 goto vnic_no_ring_grps; 5104 5105 /* map ring groups to this vnic */ 5106 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) { 5107 grp_idx = bp->rx_ring[i].bnapi->index; 5108 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) { 5109 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n", 5110 j, nr_rings); 5111 break; 5112 } 5113 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id; 5114 } 5115 5116 vnic_no_ring_grps: 5117 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) 5118 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID; 5119 if (vnic_id == 0) 5120 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); 5121 5122 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1); 5123 5124 mutex_lock(&bp->hwrm_cmd_lock); 5125 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5126 if (!rc) 5127 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id); 5128 mutex_unlock(&bp->hwrm_cmd_lock); 5129 return rc; 5130 } 5131 5132 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) 5133 { 5134 struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 5135 struct hwrm_vnic_qcaps_input req = {0}; 5136 int rc; 5137 5138 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats); 5139 bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP); 5140 if (bp->hwrm_spec_code < 0x10600) 5141 return 0; 5142 5143 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1); 5144 mutex_lock(&bp->hwrm_cmd_lock); 5145 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5146 if (!rc) { 5147 u32 flags = le32_to_cpu(resp->flags); 5148 5149 if (!(bp->flags & BNXT_FLAG_CHIP_P5) && 5150 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP)) 5151 bp->flags |= BNXT_FLAG_NEW_RSS_CAP; 5152 if (flags & 5153 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP) 5154 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP; 5155 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported); 5156 if (bp->max_tpa_v2) 5157 bp->hw_ring_stats_size = 5158 sizeof(struct ctx_hw_stats_ext); 5159 } 5160 mutex_unlock(&bp->hwrm_cmd_lock); 5161 return rc; 5162 } 5163 5164 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp) 5165 { 5166 u16 i; 5167 u32 rc = 0; 5168 5169 if (bp->flags & BNXT_FLAG_CHIP_P5) 5170 return 0; 5171 5172 mutex_lock(&bp->hwrm_cmd_lock); 5173 for (i = 0; i < bp->rx_nr_rings; i++) { 5174 struct hwrm_ring_grp_alloc_input req = {0}; 5175 struct hwrm_ring_grp_alloc_output *resp = 5176 bp->hwrm_cmd_resp_addr; 5177 unsigned int grp_idx = bp->rx_ring[i].bnapi->index; 5178 5179 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1); 5180 5181 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id); 5182 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id); 5183 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id); 5184 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx); 5185 5186 rc = _hwrm_send_message(bp, &req, sizeof(req), 5187 HWRM_CMD_TIMEOUT); 5188 if (rc) 5189 break; 5190 5191 bp->grp_info[grp_idx].fw_grp_id = 5192 le32_to_cpu(resp->ring_group_id); 5193 } 5194 mutex_unlock(&bp->hwrm_cmd_lock); 5195 return rc; 5196 } 5197 5198 static int bnxt_hwrm_ring_grp_free(struct bnxt *bp) 5199 { 5200 u16 i; 5201 u32 rc = 0; 5202 struct hwrm_ring_grp_free_input req = {0}; 5203 5204 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5)) 5205 return 0; 5206 5207 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1); 5208 5209 mutex_lock(&bp->hwrm_cmd_lock); 5210 for (i = 0; i < bp->cp_nr_rings; i++) { 5211 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) 5212 continue; 5213 req.ring_group_id = 5214 cpu_to_le32(bp->grp_info[i].fw_grp_id); 5215 5216 rc = _hwrm_send_message(bp, &req, sizeof(req), 5217 HWRM_CMD_TIMEOUT); 5218 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; 5219 } 5220 mutex_unlock(&bp->hwrm_cmd_lock); 5221 return rc; 5222 } 5223 5224 static int hwrm_ring_alloc_send_msg(struct bnxt *bp, 5225 struct bnxt_ring_struct *ring, 5226 u32 ring_type, u32 map_index) 5227 { 5228 int rc = 0, err = 0; 5229 struct hwrm_ring_alloc_input req = {0}; 5230 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr; 5231 struct bnxt_ring_mem_info *rmem = &ring->ring_mem; 5232 struct bnxt_ring_grp_info *grp_info; 5233 u16 ring_id; 5234 5235 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1); 5236 5237 req.enables = 0; 5238 if (rmem->nr_pages > 1) { 5239 req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map); 5240 /* Page size is in log2 units */ 5241 req.page_size = BNXT_PAGE_SHIFT; 5242 req.page_tbl_depth = 1; 5243 } else { 5244 req.page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]); 5245 } 5246 req.fbo = 0; 5247 /* Association of ring index with doorbell index and MSIX number */ 5248 req.logical_id = cpu_to_le16(map_index); 5249 5250 switch (ring_type) { 5251 case HWRM_RING_ALLOC_TX: { 5252 struct bnxt_tx_ring_info *txr; 5253 5254 txr = container_of(ring, struct bnxt_tx_ring_info, 5255 tx_ring_struct); 5256 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX; 5257 /* Association of transmit ring with completion ring */ 5258 grp_info = &bp->grp_info[ring->grp_idx]; 5259 req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr)); 5260 req.length = cpu_to_le32(bp->tx_ring_mask + 1); 5261 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); 5262 req.queue_id = cpu_to_le16(ring->queue_id); 5263 break; 5264 } 5265 case HWRM_RING_ALLOC_RX: 5266 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; 5267 req.length = cpu_to_le32(bp->rx_ring_mask + 1); 5268 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5269 u16 flags = 0; 5270 5271 /* Association of rx ring with stats context */ 5272 grp_info = &bp->grp_info[ring->grp_idx]; 5273 req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size); 5274 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); 5275 req.enables |= cpu_to_le32( 5276 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); 5277 if (NET_IP_ALIGN == 2) 5278 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD; 5279 req.flags = cpu_to_le16(flags); 5280 } 5281 break; 5282 case HWRM_RING_ALLOC_AGG: 5283 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5284 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG; 5285 /* Association of agg ring with rx ring */ 5286 grp_info = &bp->grp_info[ring->grp_idx]; 5287 req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id); 5288 req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE); 5289 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); 5290 req.enables |= cpu_to_le32( 5291 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID | 5292 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); 5293 } else { 5294 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; 5295 } 5296 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1); 5297 break; 5298 case HWRM_RING_ALLOC_CMPL: 5299 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; 5300 req.length = cpu_to_le32(bp->cp_ring_mask + 1); 5301 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5302 /* Association of cp ring with nq */ 5303 grp_info = &bp->grp_info[map_index]; 5304 req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id); 5305 req.cq_handle = cpu_to_le64(ring->handle); 5306 req.enables |= cpu_to_le32( 5307 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID); 5308 } else if (bp->flags & BNXT_FLAG_USING_MSIX) { 5309 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; 5310 } 5311 break; 5312 case HWRM_RING_ALLOC_NQ: 5313 req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ; 5314 req.length = cpu_to_le32(bp->cp_ring_mask + 1); 5315 if (bp->flags & BNXT_FLAG_USING_MSIX) 5316 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; 5317 break; 5318 default: 5319 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n", 5320 ring_type); 5321 return -1; 5322 } 5323 5324 mutex_lock(&bp->hwrm_cmd_lock); 5325 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5326 err = le16_to_cpu(resp->error_code); 5327 ring_id = le16_to_cpu(resp->ring_id); 5328 mutex_unlock(&bp->hwrm_cmd_lock); 5329 5330 if (rc || err) { 5331 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n", 5332 ring_type, rc, err); 5333 return -EIO; 5334 } 5335 ring->fw_ring_id = ring_id; 5336 return rc; 5337 } 5338 5339 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx) 5340 { 5341 int rc; 5342 5343 if (BNXT_PF(bp)) { 5344 struct hwrm_func_cfg_input req = {0}; 5345 5346 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 5347 req.fid = cpu_to_le16(0xffff); 5348 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR); 5349 req.async_event_cr = cpu_to_le16(idx); 5350 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5351 } else { 5352 struct hwrm_func_vf_cfg_input req = {0}; 5353 5354 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); 5355 req.enables = 5356 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR); 5357 req.async_event_cr = cpu_to_le16(idx); 5358 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5359 } 5360 return rc; 5361 } 5362 5363 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type, 5364 u32 map_idx, u32 xid) 5365 { 5366 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5367 if (BNXT_PF(bp)) 5368 db->doorbell = bp->bar1 + 0x10000; 5369 else 5370 db->doorbell = bp->bar1 + 0x4000; 5371 switch (ring_type) { 5372 case HWRM_RING_ALLOC_TX: 5373 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ; 5374 break; 5375 case HWRM_RING_ALLOC_RX: 5376 case HWRM_RING_ALLOC_AGG: 5377 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ; 5378 break; 5379 case HWRM_RING_ALLOC_CMPL: 5380 db->db_key64 = DBR_PATH_L2; 5381 break; 5382 case HWRM_RING_ALLOC_NQ: 5383 db->db_key64 = DBR_PATH_L2; 5384 break; 5385 } 5386 db->db_key64 |= (u64)xid << DBR_XID_SFT; 5387 } else { 5388 db->doorbell = bp->bar1 + map_idx * 0x80; 5389 switch (ring_type) { 5390 case HWRM_RING_ALLOC_TX: 5391 db->db_key32 = DB_KEY_TX; 5392 break; 5393 case HWRM_RING_ALLOC_RX: 5394 case HWRM_RING_ALLOC_AGG: 5395 db->db_key32 = DB_KEY_RX; 5396 break; 5397 case HWRM_RING_ALLOC_CMPL: 5398 db->db_key32 = DB_KEY_CP; 5399 break; 5400 } 5401 } 5402 } 5403 5404 static int bnxt_hwrm_ring_alloc(struct bnxt *bp) 5405 { 5406 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS); 5407 int i, rc = 0; 5408 u32 type; 5409 5410 if (bp->flags & BNXT_FLAG_CHIP_P5) 5411 type = HWRM_RING_ALLOC_NQ; 5412 else 5413 type = HWRM_RING_ALLOC_CMPL; 5414 for (i = 0; i < bp->cp_nr_rings; i++) { 5415 struct bnxt_napi *bnapi = bp->bnapi[i]; 5416 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5417 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 5418 u32 map_idx = ring->map_idx; 5419 unsigned int vector; 5420 5421 vector = bp->irq_tbl[map_idx].vector; 5422 disable_irq_nosync(vector); 5423 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 5424 if (rc) { 5425 enable_irq(vector); 5426 goto err_out; 5427 } 5428 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id); 5429 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); 5430 enable_irq(vector); 5431 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; 5432 5433 if (!i) { 5434 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id); 5435 if (rc) 5436 netdev_warn(bp->dev, "Failed to set async event completion ring.\n"); 5437 } 5438 } 5439 5440 type = HWRM_RING_ALLOC_TX; 5441 for (i = 0; i < bp->tx_nr_rings; i++) { 5442 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 5443 struct bnxt_ring_struct *ring; 5444 u32 map_idx; 5445 5446 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5447 struct bnxt_napi *bnapi = txr->bnapi; 5448 struct bnxt_cp_ring_info *cpr, *cpr2; 5449 u32 type2 = HWRM_RING_ALLOC_CMPL; 5450 5451 cpr = &bnapi->cp_ring; 5452 cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL]; 5453 ring = &cpr2->cp_ring_struct; 5454 ring->handle = BNXT_TX_HDL; 5455 map_idx = bnapi->index; 5456 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx); 5457 if (rc) 5458 goto err_out; 5459 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx, 5460 ring->fw_ring_id); 5461 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons); 5462 } 5463 ring = &txr->tx_ring_struct; 5464 map_idx = i; 5465 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 5466 if (rc) 5467 goto err_out; 5468 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id); 5469 } 5470 5471 type = HWRM_RING_ALLOC_RX; 5472 for (i = 0; i < bp->rx_nr_rings; i++) { 5473 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 5474 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; 5475 struct bnxt_napi *bnapi = rxr->bnapi; 5476 u32 map_idx = bnapi->index; 5477 5478 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 5479 if (rc) 5480 goto err_out; 5481 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id); 5482 /* If we have agg rings, post agg buffers first. */ 5483 if (!agg_rings) 5484 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 5485 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; 5486 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5487 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5488 u32 type2 = HWRM_RING_ALLOC_CMPL; 5489 struct bnxt_cp_ring_info *cpr2; 5490 5491 cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL]; 5492 ring = &cpr2->cp_ring_struct; 5493 ring->handle = BNXT_RX_HDL; 5494 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx); 5495 if (rc) 5496 goto err_out; 5497 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx, 5498 ring->fw_ring_id); 5499 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons); 5500 } 5501 } 5502 5503 if (agg_rings) { 5504 type = HWRM_RING_ALLOC_AGG; 5505 for (i = 0; i < bp->rx_nr_rings; i++) { 5506 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 5507 struct bnxt_ring_struct *ring = 5508 &rxr->rx_agg_ring_struct; 5509 u32 grp_idx = ring->grp_idx; 5510 u32 map_idx = grp_idx + bp->rx_nr_rings; 5511 5512 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 5513 if (rc) 5514 goto err_out; 5515 5516 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx, 5517 ring->fw_ring_id); 5518 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 5519 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 5520 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; 5521 } 5522 } 5523 err_out: 5524 return rc; 5525 } 5526 5527 static int hwrm_ring_free_send_msg(struct bnxt *bp, 5528 struct bnxt_ring_struct *ring, 5529 u32 ring_type, int cmpl_ring_id) 5530 { 5531 int rc; 5532 struct hwrm_ring_free_input req = {0}; 5533 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr; 5534 u16 error_code; 5535 5536 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 5537 return 0; 5538 5539 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1); 5540 req.ring_type = ring_type; 5541 req.ring_id = cpu_to_le16(ring->fw_ring_id); 5542 5543 mutex_lock(&bp->hwrm_cmd_lock); 5544 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5545 error_code = le16_to_cpu(resp->error_code); 5546 mutex_unlock(&bp->hwrm_cmd_lock); 5547 5548 if (rc || error_code) { 5549 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n", 5550 ring_type, rc, error_code); 5551 return -EIO; 5552 } 5553 return 0; 5554 } 5555 5556 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) 5557 { 5558 u32 type; 5559 int i; 5560 5561 if (!bp->bnapi) 5562 return; 5563 5564 for (i = 0; i < bp->tx_nr_rings; i++) { 5565 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 5566 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 5567 5568 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 5569 u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr); 5570 5571 hwrm_ring_free_send_msg(bp, ring, 5572 RING_FREE_REQ_RING_TYPE_TX, 5573 close_path ? cmpl_ring_id : 5574 INVALID_HW_RING_ID); 5575 ring->fw_ring_id = INVALID_HW_RING_ID; 5576 } 5577 } 5578 5579 for (i = 0; i < bp->rx_nr_rings; i++) { 5580 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 5581 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; 5582 u32 grp_idx = rxr->bnapi->index; 5583 5584 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 5585 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); 5586 5587 hwrm_ring_free_send_msg(bp, ring, 5588 RING_FREE_REQ_RING_TYPE_RX, 5589 close_path ? cmpl_ring_id : 5590 INVALID_HW_RING_ID); 5591 ring->fw_ring_id = INVALID_HW_RING_ID; 5592 bp->grp_info[grp_idx].rx_fw_ring_id = 5593 INVALID_HW_RING_ID; 5594 } 5595 } 5596 5597 if (bp->flags & BNXT_FLAG_CHIP_P5) 5598 type = RING_FREE_REQ_RING_TYPE_RX_AGG; 5599 else 5600 type = RING_FREE_REQ_RING_TYPE_RX; 5601 for (i = 0; i < bp->rx_nr_rings; i++) { 5602 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 5603 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; 5604 u32 grp_idx = rxr->bnapi->index; 5605 5606 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 5607 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); 5608 5609 hwrm_ring_free_send_msg(bp, ring, type, 5610 close_path ? cmpl_ring_id : 5611 INVALID_HW_RING_ID); 5612 ring->fw_ring_id = INVALID_HW_RING_ID; 5613 bp->grp_info[grp_idx].agg_fw_ring_id = 5614 INVALID_HW_RING_ID; 5615 } 5616 } 5617 5618 /* The completion rings are about to be freed. After that the 5619 * IRQ doorbell will not work anymore. So we need to disable 5620 * IRQ here. 5621 */ 5622 bnxt_disable_int_sync(bp); 5623 5624 if (bp->flags & BNXT_FLAG_CHIP_P5) 5625 type = RING_FREE_REQ_RING_TYPE_NQ; 5626 else 5627 type = RING_FREE_REQ_RING_TYPE_L2_CMPL; 5628 for (i = 0; i < bp->cp_nr_rings; i++) { 5629 struct bnxt_napi *bnapi = bp->bnapi[i]; 5630 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5631 struct bnxt_ring_struct *ring; 5632 int j; 5633 5634 for (j = 0; j < 2; j++) { 5635 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; 5636 5637 if (cpr2) { 5638 ring = &cpr2->cp_ring_struct; 5639 if (ring->fw_ring_id == INVALID_HW_RING_ID) 5640 continue; 5641 hwrm_ring_free_send_msg(bp, ring, 5642 RING_FREE_REQ_RING_TYPE_L2_CMPL, 5643 INVALID_HW_RING_ID); 5644 ring->fw_ring_id = INVALID_HW_RING_ID; 5645 } 5646 } 5647 ring = &cpr->cp_ring_struct; 5648 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 5649 hwrm_ring_free_send_msg(bp, ring, type, 5650 INVALID_HW_RING_ID); 5651 ring->fw_ring_id = INVALID_HW_RING_ID; 5652 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; 5653 } 5654 } 5655 } 5656 5657 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 5658 bool shared); 5659 5660 static int bnxt_hwrm_get_rings(struct bnxt *bp) 5661 { 5662 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 5663 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 5664 struct hwrm_func_qcfg_input req = {0}; 5665 int rc; 5666 5667 if (bp->hwrm_spec_code < 0x10601) 5668 return 0; 5669 5670 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); 5671 req.fid = cpu_to_le16(0xffff); 5672 mutex_lock(&bp->hwrm_cmd_lock); 5673 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5674 if (rc) { 5675 mutex_unlock(&bp->hwrm_cmd_lock); 5676 return rc; 5677 } 5678 5679 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings); 5680 if (BNXT_NEW_RM(bp)) { 5681 u16 cp, stats; 5682 5683 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings); 5684 hw_resc->resv_hw_ring_grps = 5685 le32_to_cpu(resp->alloc_hw_ring_grps); 5686 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics); 5687 cp = le16_to_cpu(resp->alloc_cmpl_rings); 5688 stats = le16_to_cpu(resp->alloc_stat_ctx); 5689 hw_resc->resv_irqs = cp; 5690 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5691 int rx = hw_resc->resv_rx_rings; 5692 int tx = hw_resc->resv_tx_rings; 5693 5694 if (bp->flags & BNXT_FLAG_AGG_RINGS) 5695 rx >>= 1; 5696 if (cp < (rx + tx)) { 5697 bnxt_trim_rings(bp, &rx, &tx, cp, false); 5698 if (bp->flags & BNXT_FLAG_AGG_RINGS) 5699 rx <<= 1; 5700 hw_resc->resv_rx_rings = rx; 5701 hw_resc->resv_tx_rings = tx; 5702 } 5703 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix); 5704 hw_resc->resv_hw_ring_grps = rx; 5705 } 5706 hw_resc->resv_cp_rings = cp; 5707 hw_resc->resv_stat_ctxs = stats; 5708 } 5709 mutex_unlock(&bp->hwrm_cmd_lock); 5710 return 0; 5711 } 5712 5713 /* Caller must hold bp->hwrm_cmd_lock */ 5714 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) 5715 { 5716 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 5717 struct hwrm_func_qcfg_input req = {0}; 5718 int rc; 5719 5720 if (bp->hwrm_spec_code < 0x10601) 5721 return 0; 5722 5723 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); 5724 req.fid = cpu_to_le16(fid); 5725 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5726 if (!rc) 5727 *tx_rings = le16_to_cpu(resp->alloc_tx_rings); 5728 5729 return rc; 5730 } 5731 5732 static bool bnxt_rfs_supported(struct bnxt *bp); 5733 5734 static void 5735 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req, 5736 int tx_rings, int rx_rings, int ring_grps, 5737 int cp_rings, int stats, int vnics) 5738 { 5739 u32 enables = 0; 5740 5741 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1); 5742 req->fid = cpu_to_le16(0xffff); 5743 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; 5744 req->num_tx_rings = cpu_to_le16(tx_rings); 5745 if (BNXT_NEW_RM(bp)) { 5746 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; 5747 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; 5748 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5749 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0; 5750 enables |= tx_rings + ring_grps ? 5751 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 5752 enables |= rx_rings ? 5753 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 5754 } else { 5755 enables |= cp_rings ? 5756 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 5757 enables |= ring_grps ? 5758 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS | 5759 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 5760 } 5761 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0; 5762 5763 req->num_rx_rings = cpu_to_le16(rx_rings); 5764 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5765 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); 5766 req->num_msix = cpu_to_le16(cp_rings); 5767 req->num_rsscos_ctxs = 5768 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64)); 5769 } else { 5770 req->num_cmpl_rings = cpu_to_le16(cp_rings); 5771 req->num_hw_ring_grps = cpu_to_le16(ring_grps); 5772 req->num_rsscos_ctxs = cpu_to_le16(1); 5773 if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) && 5774 bnxt_rfs_supported(bp)) 5775 req->num_rsscos_ctxs = 5776 cpu_to_le16(ring_grps + 1); 5777 } 5778 req->num_stat_ctxs = cpu_to_le16(stats); 5779 req->num_vnics = cpu_to_le16(vnics); 5780 } 5781 req->enables = cpu_to_le32(enables); 5782 } 5783 5784 static void 5785 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, 5786 struct hwrm_func_vf_cfg_input *req, int tx_rings, 5787 int rx_rings, int ring_grps, int cp_rings, 5788 int stats, int vnics) 5789 { 5790 u32 enables = 0; 5791 5792 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1); 5793 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; 5794 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS | 5795 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 5796 enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; 5797 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5798 enables |= tx_rings + ring_grps ? 5799 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 5800 } else { 5801 enables |= cp_rings ? 5802 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 5803 enables |= ring_grps ? 5804 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; 5805 } 5806 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; 5807 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS; 5808 5809 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); 5810 req->num_tx_rings = cpu_to_le16(tx_rings); 5811 req->num_rx_rings = cpu_to_le16(rx_rings); 5812 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5813 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); 5814 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64)); 5815 } else { 5816 req->num_cmpl_rings = cpu_to_le16(cp_rings); 5817 req->num_hw_ring_grps = cpu_to_le16(ring_grps); 5818 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX); 5819 } 5820 req->num_stat_ctxs = cpu_to_le16(stats); 5821 req->num_vnics = cpu_to_le16(vnics); 5822 5823 req->enables = cpu_to_le32(enables); 5824 } 5825 5826 static int 5827 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 5828 int ring_grps, int cp_rings, int stats, int vnics) 5829 { 5830 struct hwrm_func_cfg_input req = {0}; 5831 int rc; 5832 5833 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, 5834 cp_rings, stats, vnics); 5835 if (!req.enables) 5836 return 0; 5837 5838 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5839 if (rc) 5840 return rc; 5841 5842 if (bp->hwrm_spec_code < 0x10601) 5843 bp->hw_resc.resv_tx_rings = tx_rings; 5844 5845 rc = bnxt_hwrm_get_rings(bp); 5846 return rc; 5847 } 5848 5849 static int 5850 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 5851 int ring_grps, int cp_rings, int stats, int vnics) 5852 { 5853 struct hwrm_func_vf_cfg_input req = {0}; 5854 int rc; 5855 5856 if (!BNXT_NEW_RM(bp)) { 5857 bp->hw_resc.resv_tx_rings = tx_rings; 5858 return 0; 5859 } 5860 5861 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, 5862 cp_rings, stats, vnics); 5863 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5864 if (rc) 5865 return rc; 5866 5867 rc = bnxt_hwrm_get_rings(bp); 5868 return rc; 5869 } 5870 5871 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp, 5872 int cp, int stat, int vnic) 5873 { 5874 if (BNXT_PF(bp)) 5875 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat, 5876 vnic); 5877 else 5878 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat, 5879 vnic); 5880 } 5881 5882 int bnxt_nq_rings_in_use(struct bnxt *bp) 5883 { 5884 int cp = bp->cp_nr_rings; 5885 int ulp_msix, ulp_base; 5886 5887 ulp_msix = bnxt_get_ulp_msix_num(bp); 5888 if (ulp_msix) { 5889 ulp_base = bnxt_get_ulp_msix_base(bp); 5890 cp += ulp_msix; 5891 if ((ulp_base + ulp_msix) > cp) 5892 cp = ulp_base + ulp_msix; 5893 } 5894 return cp; 5895 } 5896 5897 static int bnxt_cp_rings_in_use(struct bnxt *bp) 5898 { 5899 int cp; 5900 5901 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 5902 return bnxt_nq_rings_in_use(bp); 5903 5904 cp = bp->tx_nr_rings + bp->rx_nr_rings; 5905 return cp; 5906 } 5907 5908 static int bnxt_get_func_stat_ctxs(struct bnxt *bp) 5909 { 5910 int ulp_stat = bnxt_get_ulp_stat_ctxs(bp); 5911 int cp = bp->cp_nr_rings; 5912 5913 if (!ulp_stat) 5914 return cp; 5915 5916 if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp)) 5917 return bnxt_get_ulp_msix_base(bp) + ulp_stat; 5918 5919 return cp + ulp_stat; 5920 } 5921 5922 static bool bnxt_need_reserve_rings(struct bnxt *bp) 5923 { 5924 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 5925 int cp = bnxt_cp_rings_in_use(bp); 5926 int nq = bnxt_nq_rings_in_use(bp); 5927 int rx = bp->rx_nr_rings, stat; 5928 int vnic = 1, grp = rx; 5929 5930 if (bp->hwrm_spec_code < 0x10601) 5931 return false; 5932 5933 if (hw_resc->resv_tx_rings != bp->tx_nr_rings) 5934 return true; 5935 5936 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5)) 5937 vnic = rx + 1; 5938 if (bp->flags & BNXT_FLAG_AGG_RINGS) 5939 rx <<= 1; 5940 stat = bnxt_get_func_stat_ctxs(bp); 5941 if (BNXT_NEW_RM(bp) && 5942 (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp || 5943 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat || 5944 (hw_resc->resv_hw_ring_grps != grp && 5945 !(bp->flags & BNXT_FLAG_CHIP_P5)))) 5946 return true; 5947 if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) && 5948 hw_resc->resv_irqs != nq) 5949 return true; 5950 return false; 5951 } 5952 5953 static int __bnxt_reserve_rings(struct bnxt *bp) 5954 { 5955 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 5956 int cp = bnxt_nq_rings_in_use(bp); 5957 int tx = bp->tx_nr_rings; 5958 int rx = bp->rx_nr_rings; 5959 int grp, rx_rings, rc; 5960 int vnic = 1, stat; 5961 bool sh = false; 5962 5963 if (!bnxt_need_reserve_rings(bp)) 5964 return 0; 5965 5966 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 5967 sh = true; 5968 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5)) 5969 vnic = rx + 1; 5970 if (bp->flags & BNXT_FLAG_AGG_RINGS) 5971 rx <<= 1; 5972 grp = bp->rx_nr_rings; 5973 stat = bnxt_get_func_stat_ctxs(bp); 5974 5975 rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic); 5976 if (rc) 5977 return rc; 5978 5979 tx = hw_resc->resv_tx_rings; 5980 if (BNXT_NEW_RM(bp)) { 5981 rx = hw_resc->resv_rx_rings; 5982 cp = hw_resc->resv_irqs; 5983 grp = hw_resc->resv_hw_ring_grps; 5984 vnic = hw_resc->resv_vnics; 5985 stat = hw_resc->resv_stat_ctxs; 5986 } 5987 5988 rx_rings = rx; 5989 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 5990 if (rx >= 2) { 5991 rx_rings = rx >> 1; 5992 } else { 5993 if (netif_running(bp->dev)) 5994 return -ENOMEM; 5995 5996 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 5997 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 5998 bp->dev->hw_features &= ~NETIF_F_LRO; 5999 bp->dev->features &= ~NETIF_F_LRO; 6000 bnxt_set_ring_params(bp); 6001 } 6002 } 6003 rx_rings = min_t(int, rx_rings, grp); 6004 cp = min_t(int, cp, bp->cp_nr_rings); 6005 if (stat > bnxt_get_ulp_stat_ctxs(bp)) 6006 stat -= bnxt_get_ulp_stat_ctxs(bp); 6007 cp = min_t(int, cp, stat); 6008 rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh); 6009 if (bp->flags & BNXT_FLAG_AGG_RINGS) 6010 rx = rx_rings << 1; 6011 cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings; 6012 bp->tx_nr_rings = tx; 6013 bp->rx_nr_rings = rx_rings; 6014 bp->cp_nr_rings = cp; 6015 6016 if (!tx || !rx || !cp || !grp || !vnic || !stat) 6017 return -ENOMEM; 6018 6019 return rc; 6020 } 6021 6022 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 6023 int ring_grps, int cp_rings, int stats, 6024 int vnics) 6025 { 6026 struct hwrm_func_vf_cfg_input req = {0}; 6027 u32 flags; 6028 int rc; 6029 6030 if (!BNXT_NEW_RM(bp)) 6031 return 0; 6032 6033 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, 6034 cp_rings, stats, vnics); 6035 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST | 6036 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST | 6037 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | 6038 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | 6039 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST | 6040 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST; 6041 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 6042 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; 6043 6044 req.flags = cpu_to_le32(flags); 6045 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6046 return rc; 6047 } 6048 6049 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 6050 int ring_grps, int cp_rings, int stats, 6051 int vnics) 6052 { 6053 struct hwrm_func_cfg_input req = {0}; 6054 u32 flags; 6055 int rc; 6056 6057 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, 6058 cp_rings, stats, vnics); 6059 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST; 6060 if (BNXT_NEW_RM(bp)) { 6061 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST | 6062 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | 6063 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | 6064 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; 6065 if (bp->flags & BNXT_FLAG_CHIP_P5) 6066 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST | 6067 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST; 6068 else 6069 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; 6070 } 6071 6072 req.flags = cpu_to_le32(flags); 6073 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6074 return rc; 6075 } 6076 6077 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings, 6078 int ring_grps, int cp_rings, int stats, 6079 int vnics) 6080 { 6081 if (bp->hwrm_spec_code < 0x10801) 6082 return 0; 6083 6084 if (BNXT_PF(bp)) 6085 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings, 6086 ring_grps, cp_rings, stats, 6087 vnics); 6088 6089 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps, 6090 cp_rings, stats, vnics); 6091 } 6092 6093 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp) 6094 { 6095 struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 6096 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 6097 struct hwrm_ring_aggint_qcaps_input req = {0}; 6098 int rc; 6099 6100 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS; 6101 coal_cap->num_cmpl_dma_aggr_max = 63; 6102 coal_cap->num_cmpl_dma_aggr_during_int_max = 63; 6103 coal_cap->cmpl_aggr_dma_tmr_max = 65535; 6104 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535; 6105 coal_cap->int_lat_tmr_min_max = 65535; 6106 coal_cap->int_lat_tmr_max_max = 65535; 6107 coal_cap->num_cmpl_aggr_int_max = 65535; 6108 coal_cap->timer_units = 80; 6109 6110 if (bp->hwrm_spec_code < 0x10902) 6111 return; 6112 6113 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1); 6114 mutex_lock(&bp->hwrm_cmd_lock); 6115 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6116 if (!rc) { 6117 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params); 6118 coal_cap->nq_params = le32_to_cpu(resp->nq_params); 6119 coal_cap->num_cmpl_dma_aggr_max = 6120 le16_to_cpu(resp->num_cmpl_dma_aggr_max); 6121 coal_cap->num_cmpl_dma_aggr_during_int_max = 6122 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max); 6123 coal_cap->cmpl_aggr_dma_tmr_max = 6124 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max); 6125 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 6126 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max); 6127 coal_cap->int_lat_tmr_min_max = 6128 le16_to_cpu(resp->int_lat_tmr_min_max); 6129 coal_cap->int_lat_tmr_max_max = 6130 le16_to_cpu(resp->int_lat_tmr_max_max); 6131 coal_cap->num_cmpl_aggr_int_max = 6132 le16_to_cpu(resp->num_cmpl_aggr_int_max); 6133 coal_cap->timer_units = le16_to_cpu(resp->timer_units); 6134 } 6135 mutex_unlock(&bp->hwrm_cmd_lock); 6136 } 6137 6138 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec) 6139 { 6140 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 6141 6142 return usec * 1000 / coal_cap->timer_units; 6143 } 6144 6145 static void bnxt_hwrm_set_coal_params(struct bnxt *bp, 6146 struct bnxt_coal *hw_coal, 6147 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) 6148 { 6149 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 6150 u32 cmpl_params = coal_cap->cmpl_params; 6151 u16 val, tmr, max, flags = 0; 6152 6153 max = hw_coal->bufs_per_record * 128; 6154 if (hw_coal->budget) 6155 max = hw_coal->bufs_per_record * hw_coal->budget; 6156 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max); 6157 6158 val = clamp_t(u16, hw_coal->coal_bufs, 1, max); 6159 req->num_cmpl_aggr_int = cpu_to_le16(val); 6160 6161 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max); 6162 req->num_cmpl_dma_aggr = cpu_to_le16(val); 6163 6164 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1, 6165 coal_cap->num_cmpl_dma_aggr_during_int_max); 6166 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val); 6167 6168 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks); 6169 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max); 6170 req->int_lat_tmr_max = cpu_to_le16(tmr); 6171 6172 /* min timer set to 1/2 of interrupt timer */ 6173 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) { 6174 val = tmr / 2; 6175 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max); 6176 req->int_lat_tmr_min = cpu_to_le16(val); 6177 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); 6178 } 6179 6180 /* buf timer set to 1/4 of interrupt timer */ 6181 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max); 6182 req->cmpl_aggr_dma_tmr = cpu_to_le16(val); 6183 6184 if (cmpl_params & 6185 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) { 6186 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq); 6187 val = clamp_t(u16, tmr, 1, 6188 coal_cap->cmpl_aggr_dma_tmr_during_int_max); 6189 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr); 6190 req->enables |= 6191 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE); 6192 } 6193 6194 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET) 6195 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 6196 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) && 6197 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh) 6198 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE; 6199 req->flags = cpu_to_le16(flags); 6200 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES); 6201 } 6202 6203 /* Caller holds bp->hwrm_cmd_lock */ 6204 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi, 6205 struct bnxt_coal *hw_coal) 6206 { 6207 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0}; 6208 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 6209 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 6210 u32 nq_params = coal_cap->nq_params; 6211 u16 tmr; 6212 6213 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN)) 6214 return 0; 6215 6216 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, 6217 -1, -1); 6218 req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id); 6219 req.flags = 6220 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ); 6221 6222 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2; 6223 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max); 6224 req.int_lat_tmr_min = cpu_to_le16(tmr); 6225 req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); 6226 return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6227 } 6228 6229 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi) 6230 { 6231 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0}; 6232 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 6233 struct bnxt_coal coal; 6234 6235 /* Tick values in micro seconds. 6236 * 1 coal_buf x bufs_per_record = 1 completion record. 6237 */ 6238 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal)); 6239 6240 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks; 6241 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs; 6242 6243 if (!bnapi->rx_ring) 6244 return -ENODEV; 6245 6246 bnxt_hwrm_cmd_hdr_init(bp, &req_rx, 6247 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); 6248 6249 bnxt_hwrm_set_coal_params(bp, &coal, &req_rx); 6250 6251 req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring)); 6252 6253 return hwrm_send_message(bp, &req_rx, sizeof(req_rx), 6254 HWRM_CMD_TIMEOUT); 6255 } 6256 6257 int bnxt_hwrm_set_coal(struct bnxt *bp) 6258 { 6259 int i, rc = 0; 6260 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0}, 6261 req_tx = {0}, *req; 6262 6263 bnxt_hwrm_cmd_hdr_init(bp, &req_rx, 6264 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); 6265 bnxt_hwrm_cmd_hdr_init(bp, &req_tx, 6266 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); 6267 6268 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx); 6269 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx); 6270 6271 mutex_lock(&bp->hwrm_cmd_lock); 6272 for (i = 0; i < bp->cp_nr_rings; i++) { 6273 struct bnxt_napi *bnapi = bp->bnapi[i]; 6274 struct bnxt_coal *hw_coal; 6275 u16 ring_id; 6276 6277 req = &req_rx; 6278 if (!bnapi->rx_ring) { 6279 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring); 6280 req = &req_tx; 6281 } else { 6282 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring); 6283 } 6284 req->ring_id = cpu_to_le16(ring_id); 6285 6286 rc = _hwrm_send_message(bp, req, sizeof(*req), 6287 HWRM_CMD_TIMEOUT); 6288 if (rc) 6289 break; 6290 6291 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 6292 continue; 6293 6294 if (bnapi->rx_ring && bnapi->tx_ring) { 6295 req = &req_tx; 6296 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring); 6297 req->ring_id = cpu_to_le16(ring_id); 6298 rc = _hwrm_send_message(bp, req, sizeof(*req), 6299 HWRM_CMD_TIMEOUT); 6300 if (rc) 6301 break; 6302 } 6303 if (bnapi->rx_ring) 6304 hw_coal = &bp->rx_coal; 6305 else 6306 hw_coal = &bp->tx_coal; 6307 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal); 6308 } 6309 mutex_unlock(&bp->hwrm_cmd_lock); 6310 return rc; 6311 } 6312 6313 static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp) 6314 { 6315 int rc = 0, i; 6316 struct hwrm_stat_ctx_free_input req = {0}; 6317 6318 if (!bp->bnapi) 6319 return 0; 6320 6321 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 6322 return 0; 6323 6324 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1); 6325 6326 mutex_lock(&bp->hwrm_cmd_lock); 6327 for (i = 0; i < bp->cp_nr_rings; i++) { 6328 struct bnxt_napi *bnapi = bp->bnapi[i]; 6329 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 6330 6331 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) { 6332 req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id); 6333 6334 rc = _hwrm_send_message(bp, &req, sizeof(req), 6335 HWRM_CMD_TIMEOUT); 6336 6337 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; 6338 } 6339 } 6340 mutex_unlock(&bp->hwrm_cmd_lock); 6341 return rc; 6342 } 6343 6344 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) 6345 { 6346 int rc = 0, i; 6347 struct hwrm_stat_ctx_alloc_input req = {0}; 6348 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr; 6349 6350 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 6351 return 0; 6352 6353 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1); 6354 6355 req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size); 6356 req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000); 6357 6358 mutex_lock(&bp->hwrm_cmd_lock); 6359 for (i = 0; i < bp->cp_nr_rings; i++) { 6360 struct bnxt_napi *bnapi = bp->bnapi[i]; 6361 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 6362 6363 req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map); 6364 6365 rc = _hwrm_send_message(bp, &req, sizeof(req), 6366 HWRM_CMD_TIMEOUT); 6367 if (rc) 6368 break; 6369 6370 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id); 6371 6372 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; 6373 } 6374 mutex_unlock(&bp->hwrm_cmd_lock); 6375 return rc; 6376 } 6377 6378 static int bnxt_hwrm_func_qcfg(struct bnxt *bp) 6379 { 6380 struct hwrm_func_qcfg_input req = {0}; 6381 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 6382 u16 flags; 6383 int rc; 6384 6385 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); 6386 req.fid = cpu_to_le16(0xffff); 6387 mutex_lock(&bp->hwrm_cmd_lock); 6388 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6389 if (rc) 6390 goto func_qcfg_exit; 6391 6392 #ifdef CONFIG_BNXT_SRIOV 6393 if (BNXT_VF(bp)) { 6394 struct bnxt_vf_info *vf = &bp->vf; 6395 6396 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK; 6397 } else { 6398 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs); 6399 } 6400 #endif 6401 flags = le16_to_cpu(resp->flags); 6402 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED | 6403 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) { 6404 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT; 6405 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED) 6406 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT; 6407 } 6408 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)) 6409 bp->flags |= BNXT_FLAG_MULTI_HOST; 6410 6411 switch (resp->port_partition_type) { 6412 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0: 6413 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5: 6414 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0: 6415 bp->port_partition_type = resp->port_partition_type; 6416 break; 6417 } 6418 if (bp->hwrm_spec_code < 0x10707 || 6419 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB) 6420 bp->br_mode = BRIDGE_MODE_VEB; 6421 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA) 6422 bp->br_mode = BRIDGE_MODE_VEPA; 6423 else 6424 bp->br_mode = BRIDGE_MODE_UNDEF; 6425 6426 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured); 6427 if (!bp->max_mtu) 6428 bp->max_mtu = BNXT_MAX_MTU; 6429 6430 func_qcfg_exit: 6431 mutex_unlock(&bp->hwrm_cmd_lock); 6432 return rc; 6433 } 6434 6435 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) 6436 { 6437 struct hwrm_func_backing_store_qcaps_input req = {0}; 6438 struct hwrm_func_backing_store_qcaps_output *resp = 6439 bp->hwrm_cmd_resp_addr; 6440 int rc; 6441 6442 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx) 6443 return 0; 6444 6445 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1); 6446 mutex_lock(&bp->hwrm_cmd_lock); 6447 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6448 if (!rc) { 6449 struct bnxt_ctx_pg_info *ctx_pg; 6450 struct bnxt_ctx_mem_info *ctx; 6451 int i; 6452 6453 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 6454 if (!ctx) { 6455 rc = -ENOMEM; 6456 goto ctx_err; 6457 } 6458 ctx_pg = kzalloc(sizeof(*ctx_pg) * (bp->max_q + 1), GFP_KERNEL); 6459 if (!ctx_pg) { 6460 kfree(ctx); 6461 rc = -ENOMEM; 6462 goto ctx_err; 6463 } 6464 for (i = 0; i < bp->max_q + 1; i++, ctx_pg++) 6465 ctx->tqm_mem[i] = ctx_pg; 6466 6467 bp->ctx = ctx; 6468 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries); 6469 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries); 6470 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries); 6471 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size); 6472 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries); 6473 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries); 6474 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size); 6475 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries); 6476 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries); 6477 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size); 6478 ctx->vnic_max_vnic_entries = 6479 le16_to_cpu(resp->vnic_max_vnic_entries); 6480 ctx->vnic_max_ring_table_entries = 6481 le16_to_cpu(resp->vnic_max_ring_table_entries); 6482 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size); 6483 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries); 6484 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size); 6485 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size); 6486 ctx->tqm_min_entries_per_ring = 6487 le32_to_cpu(resp->tqm_min_entries_per_ring); 6488 ctx->tqm_max_entries_per_ring = 6489 le32_to_cpu(resp->tqm_max_entries_per_ring); 6490 ctx->tqm_entries_multiple = resp->tqm_entries_multiple; 6491 if (!ctx->tqm_entries_multiple) 6492 ctx->tqm_entries_multiple = 1; 6493 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries); 6494 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size); 6495 ctx->mrav_num_entries_units = 6496 le16_to_cpu(resp->mrav_num_entries_units); 6497 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size); 6498 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries); 6499 ctx->ctx_kind_initializer = resp->ctx_kind_initializer; 6500 } else { 6501 rc = 0; 6502 } 6503 ctx_err: 6504 mutex_unlock(&bp->hwrm_cmd_lock); 6505 return rc; 6506 } 6507 6508 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr, 6509 __le64 *pg_dir) 6510 { 6511 u8 pg_size = 0; 6512 6513 if (BNXT_PAGE_SHIFT == 13) 6514 pg_size = 1 << 4; 6515 else if (BNXT_PAGE_SIZE == 16) 6516 pg_size = 2 << 4; 6517 6518 *pg_attr = pg_size; 6519 if (rmem->depth >= 1) { 6520 if (rmem->depth == 2) 6521 *pg_attr |= 2; 6522 else 6523 *pg_attr |= 1; 6524 *pg_dir = cpu_to_le64(rmem->pg_tbl_map); 6525 } else { 6526 *pg_dir = cpu_to_le64(rmem->dma_arr[0]); 6527 } 6528 } 6529 6530 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \ 6531 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \ 6532 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \ 6533 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \ 6534 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \ 6535 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) 6536 6537 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) 6538 { 6539 struct hwrm_func_backing_store_cfg_input req = {0}; 6540 struct bnxt_ctx_mem_info *ctx = bp->ctx; 6541 struct bnxt_ctx_pg_info *ctx_pg; 6542 __le32 *num_entries; 6543 __le64 *pg_dir; 6544 u32 flags = 0; 6545 u8 *pg_attr; 6546 int i, rc; 6547 u32 ena; 6548 6549 if (!ctx) 6550 return 0; 6551 6552 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1); 6553 req.enables = cpu_to_le32(enables); 6554 6555 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) { 6556 ctx_pg = &ctx->qp_mem; 6557 req.qp_num_entries = cpu_to_le32(ctx_pg->entries); 6558 req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries); 6559 req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries); 6560 req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size); 6561 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 6562 &req.qpc_pg_size_qpc_lvl, 6563 &req.qpc_page_dir); 6564 } 6565 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) { 6566 ctx_pg = &ctx->srq_mem; 6567 req.srq_num_entries = cpu_to_le32(ctx_pg->entries); 6568 req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries); 6569 req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size); 6570 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 6571 &req.srq_pg_size_srq_lvl, 6572 &req.srq_page_dir); 6573 } 6574 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) { 6575 ctx_pg = &ctx->cq_mem; 6576 req.cq_num_entries = cpu_to_le32(ctx_pg->entries); 6577 req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries); 6578 req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size); 6579 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl, 6580 &req.cq_page_dir); 6581 } 6582 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) { 6583 ctx_pg = &ctx->vnic_mem; 6584 req.vnic_num_vnic_entries = 6585 cpu_to_le16(ctx->vnic_max_vnic_entries); 6586 req.vnic_num_ring_table_entries = 6587 cpu_to_le16(ctx->vnic_max_ring_table_entries); 6588 req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size); 6589 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 6590 &req.vnic_pg_size_vnic_lvl, 6591 &req.vnic_page_dir); 6592 } 6593 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) { 6594 ctx_pg = &ctx->stat_mem; 6595 req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries); 6596 req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size); 6597 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 6598 &req.stat_pg_size_stat_lvl, 6599 &req.stat_page_dir); 6600 } 6601 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) { 6602 ctx_pg = &ctx->mrav_mem; 6603 req.mrav_num_entries = cpu_to_le32(ctx_pg->entries); 6604 if (ctx->mrav_num_entries_units) 6605 flags |= 6606 FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT; 6607 req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size); 6608 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 6609 &req.mrav_pg_size_mrav_lvl, 6610 &req.mrav_page_dir); 6611 } 6612 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) { 6613 ctx_pg = &ctx->tim_mem; 6614 req.tim_num_entries = cpu_to_le32(ctx_pg->entries); 6615 req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size); 6616 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 6617 &req.tim_pg_size_tim_lvl, 6618 &req.tim_page_dir); 6619 } 6620 for (i = 0, num_entries = &req.tqm_sp_num_entries, 6621 pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl, 6622 pg_dir = &req.tqm_sp_page_dir, 6623 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP; 6624 i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) { 6625 if (!(enables & ena)) 6626 continue; 6627 6628 req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size); 6629 ctx_pg = ctx->tqm_mem[i]; 6630 *num_entries = cpu_to_le32(ctx_pg->entries); 6631 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir); 6632 } 6633 req.flags = cpu_to_le32(flags); 6634 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6635 return rc; 6636 } 6637 6638 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, 6639 struct bnxt_ctx_pg_info *ctx_pg) 6640 { 6641 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 6642 6643 rmem->page_size = BNXT_PAGE_SIZE; 6644 rmem->pg_arr = ctx_pg->ctx_pg_arr; 6645 rmem->dma_arr = ctx_pg->ctx_dma_arr; 6646 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; 6647 if (rmem->depth >= 1) 6648 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG; 6649 return bnxt_alloc_ring(bp, rmem); 6650 } 6651 6652 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, 6653 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size, 6654 u8 depth, bool use_init_val) 6655 { 6656 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 6657 int rc; 6658 6659 if (!mem_size) 6660 return 0; 6661 6662 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); 6663 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) { 6664 ctx_pg->nr_pages = 0; 6665 return -EINVAL; 6666 } 6667 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) { 6668 int nr_tbls, i; 6669 6670 rmem->depth = 2; 6671 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg), 6672 GFP_KERNEL); 6673 if (!ctx_pg->ctx_pg_tbl) 6674 return -ENOMEM; 6675 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES); 6676 rmem->nr_pages = nr_tbls; 6677 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); 6678 if (rc) 6679 return rc; 6680 for (i = 0; i < nr_tbls; i++) { 6681 struct bnxt_ctx_pg_info *pg_tbl; 6682 6683 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL); 6684 if (!pg_tbl) 6685 return -ENOMEM; 6686 ctx_pg->ctx_pg_tbl[i] = pg_tbl; 6687 rmem = &pg_tbl->ring_mem; 6688 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i]; 6689 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i]; 6690 rmem->depth = 1; 6691 rmem->nr_pages = MAX_CTX_PAGES; 6692 if (use_init_val) 6693 rmem->init_val = bp->ctx->ctx_kind_initializer; 6694 if (i == (nr_tbls - 1)) { 6695 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES; 6696 6697 if (rem) 6698 rmem->nr_pages = rem; 6699 } 6700 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl); 6701 if (rc) 6702 break; 6703 } 6704 } else { 6705 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); 6706 if (rmem->nr_pages > 1 || depth) 6707 rmem->depth = 1; 6708 if (use_init_val) 6709 rmem->init_val = bp->ctx->ctx_kind_initializer; 6710 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); 6711 } 6712 return rc; 6713 } 6714 6715 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp, 6716 struct bnxt_ctx_pg_info *ctx_pg) 6717 { 6718 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 6719 6720 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES || 6721 ctx_pg->ctx_pg_tbl) { 6722 int i, nr_tbls = rmem->nr_pages; 6723 6724 for (i = 0; i < nr_tbls; i++) { 6725 struct bnxt_ctx_pg_info *pg_tbl; 6726 struct bnxt_ring_mem_info *rmem2; 6727 6728 pg_tbl = ctx_pg->ctx_pg_tbl[i]; 6729 if (!pg_tbl) 6730 continue; 6731 rmem2 = &pg_tbl->ring_mem; 6732 bnxt_free_ring(bp, rmem2); 6733 ctx_pg->ctx_pg_arr[i] = NULL; 6734 kfree(pg_tbl); 6735 ctx_pg->ctx_pg_tbl[i] = NULL; 6736 } 6737 kfree(ctx_pg->ctx_pg_tbl); 6738 ctx_pg->ctx_pg_tbl = NULL; 6739 } 6740 bnxt_free_ring(bp, rmem); 6741 ctx_pg->nr_pages = 0; 6742 } 6743 6744 static void bnxt_free_ctx_mem(struct bnxt *bp) 6745 { 6746 struct bnxt_ctx_mem_info *ctx = bp->ctx; 6747 int i; 6748 6749 if (!ctx) 6750 return; 6751 6752 if (ctx->tqm_mem[0]) { 6753 for (i = 0; i < bp->max_q + 1; i++) 6754 bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]); 6755 kfree(ctx->tqm_mem[0]); 6756 ctx->tqm_mem[0] = NULL; 6757 } 6758 6759 bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem); 6760 bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem); 6761 bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem); 6762 bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem); 6763 bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem); 6764 bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem); 6765 bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem); 6766 ctx->flags &= ~BNXT_CTX_FLAG_INITED; 6767 } 6768 6769 static int bnxt_alloc_ctx_mem(struct bnxt *bp) 6770 { 6771 struct bnxt_ctx_pg_info *ctx_pg; 6772 struct bnxt_ctx_mem_info *ctx; 6773 u32 mem_size, ena, entries; 6774 u32 num_mr, num_ah; 6775 u32 extra_srqs = 0; 6776 u32 extra_qps = 0; 6777 u8 pg_lvl = 1; 6778 int i, rc; 6779 6780 rc = bnxt_hwrm_func_backing_store_qcaps(bp); 6781 if (rc) { 6782 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n", 6783 rc); 6784 return rc; 6785 } 6786 ctx = bp->ctx; 6787 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) 6788 return 0; 6789 6790 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) { 6791 pg_lvl = 2; 6792 extra_qps = 65536; 6793 extra_srqs = 8192; 6794 } 6795 6796 ctx_pg = &ctx->qp_mem; 6797 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries + 6798 extra_qps; 6799 mem_size = ctx->qp_entry_size * ctx_pg->entries; 6800 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true); 6801 if (rc) 6802 return rc; 6803 6804 ctx_pg = &ctx->srq_mem; 6805 ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs; 6806 mem_size = ctx->srq_entry_size * ctx_pg->entries; 6807 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true); 6808 if (rc) 6809 return rc; 6810 6811 ctx_pg = &ctx->cq_mem; 6812 ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2; 6813 mem_size = ctx->cq_entry_size * ctx_pg->entries; 6814 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true); 6815 if (rc) 6816 return rc; 6817 6818 ctx_pg = &ctx->vnic_mem; 6819 ctx_pg->entries = ctx->vnic_max_vnic_entries + 6820 ctx->vnic_max_ring_table_entries; 6821 mem_size = ctx->vnic_entry_size * ctx_pg->entries; 6822 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true); 6823 if (rc) 6824 return rc; 6825 6826 ctx_pg = &ctx->stat_mem; 6827 ctx_pg->entries = ctx->stat_max_entries; 6828 mem_size = ctx->stat_entry_size * ctx_pg->entries; 6829 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true); 6830 if (rc) 6831 return rc; 6832 6833 ena = 0; 6834 if (!(bp->flags & BNXT_FLAG_ROCE_CAP)) 6835 goto skip_rdma; 6836 6837 ctx_pg = &ctx->mrav_mem; 6838 /* 128K extra is needed to accommodate static AH context 6839 * allocation by f/w. 6840 */ 6841 num_mr = 1024 * 256; 6842 num_ah = 1024 * 128; 6843 ctx_pg->entries = num_mr + num_ah; 6844 mem_size = ctx->mrav_entry_size * ctx_pg->entries; 6845 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, true); 6846 if (rc) 6847 return rc; 6848 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV; 6849 if (ctx->mrav_num_entries_units) 6850 ctx_pg->entries = 6851 ((num_mr / ctx->mrav_num_entries_units) << 16) | 6852 (num_ah / ctx->mrav_num_entries_units); 6853 6854 ctx_pg = &ctx->tim_mem; 6855 ctx_pg->entries = ctx->qp_mem.entries; 6856 mem_size = ctx->tim_entry_size * ctx_pg->entries; 6857 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false); 6858 if (rc) 6859 return rc; 6860 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM; 6861 6862 skip_rdma: 6863 entries = ctx->qp_max_l2_entries + extra_qps; 6864 entries = roundup(entries, ctx->tqm_entries_multiple); 6865 entries = clamp_t(u32, entries, ctx->tqm_min_entries_per_ring, 6866 ctx->tqm_max_entries_per_ring); 6867 for (i = 0; i < bp->max_q + 1; i++) { 6868 ctx_pg = ctx->tqm_mem[i]; 6869 ctx_pg->entries = entries; 6870 mem_size = ctx->tqm_entry_size * entries; 6871 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false); 6872 if (rc) 6873 return rc; 6874 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i; 6875 } 6876 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES; 6877 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena); 6878 if (rc) 6879 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n", 6880 rc); 6881 else 6882 ctx->flags |= BNXT_CTX_FLAG_INITED; 6883 6884 return 0; 6885 } 6886 6887 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all) 6888 { 6889 struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 6890 struct hwrm_func_resource_qcaps_input req = {0}; 6891 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 6892 int rc; 6893 6894 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1); 6895 req.fid = cpu_to_le16(0xffff); 6896 6897 mutex_lock(&bp->hwrm_cmd_lock); 6898 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), 6899 HWRM_CMD_TIMEOUT); 6900 if (rc) 6901 goto hwrm_func_resc_qcaps_exit; 6902 6903 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs); 6904 if (!all) 6905 goto hwrm_func_resc_qcaps_exit; 6906 6907 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx); 6908 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 6909 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings); 6910 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 6911 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings); 6912 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 6913 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings); 6914 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings); 6915 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps); 6916 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps); 6917 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs); 6918 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); 6919 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics); 6920 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics); 6921 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx); 6922 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 6923 6924 if (bp->flags & BNXT_FLAG_CHIP_P5) { 6925 u16 max_msix = le16_to_cpu(resp->max_msix); 6926 6927 hw_resc->max_nqs = max_msix; 6928 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings; 6929 } 6930 6931 if (BNXT_PF(bp)) { 6932 struct bnxt_pf_info *pf = &bp->pf; 6933 6934 pf->vf_resv_strategy = 6935 le16_to_cpu(resp->vf_reservation_strategy); 6936 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) 6937 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL; 6938 } 6939 hwrm_func_resc_qcaps_exit: 6940 mutex_unlock(&bp->hwrm_cmd_lock); 6941 return rc; 6942 } 6943 6944 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) 6945 { 6946 int rc = 0; 6947 struct hwrm_func_qcaps_input req = {0}; 6948 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 6949 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 6950 u32 flags; 6951 6952 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); 6953 req.fid = cpu_to_le16(0xffff); 6954 6955 mutex_lock(&bp->hwrm_cmd_lock); 6956 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6957 if (rc) 6958 goto hwrm_func_qcaps_exit; 6959 6960 flags = le32_to_cpu(resp->flags); 6961 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED) 6962 bp->flags |= BNXT_FLAG_ROCEV1_CAP; 6963 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED) 6964 bp->flags |= BNXT_FLAG_ROCEV2_CAP; 6965 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED) 6966 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED; 6967 if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE) 6968 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET; 6969 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED) 6970 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED; 6971 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE) 6972 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY; 6973 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD) 6974 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD; 6975 6976 bp->tx_push_thresh = 0; 6977 if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) 6978 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH; 6979 6980 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 6981 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 6982 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 6983 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings); 6984 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps); 6985 if (!hw_resc->max_hw_ring_grps) 6986 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings; 6987 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); 6988 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics); 6989 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 6990 6991 if (BNXT_PF(bp)) { 6992 struct bnxt_pf_info *pf = &bp->pf; 6993 6994 pf->fw_fid = le16_to_cpu(resp->fid); 6995 pf->port_id = le16_to_cpu(resp->port_id); 6996 bp->dev->dev_port = pf->port_id; 6997 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); 6998 pf->first_vf_id = le16_to_cpu(resp->first_vf_id); 6999 pf->max_vfs = le16_to_cpu(resp->max_vfs); 7000 pf->max_encap_records = le32_to_cpu(resp->max_encap_records); 7001 pf->max_decap_records = le32_to_cpu(resp->max_decap_records); 7002 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows); 7003 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows); 7004 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows); 7005 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows); 7006 bp->flags &= ~BNXT_FLAG_WOL_CAP; 7007 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED) 7008 bp->flags |= BNXT_FLAG_WOL_CAP; 7009 } else { 7010 #ifdef CONFIG_BNXT_SRIOV 7011 struct bnxt_vf_info *vf = &bp->vf; 7012 7013 vf->fw_fid = le16_to_cpu(resp->fid); 7014 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN); 7015 #endif 7016 } 7017 7018 hwrm_func_qcaps_exit: 7019 mutex_unlock(&bp->hwrm_cmd_lock); 7020 return rc; 7021 } 7022 7023 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp); 7024 7025 static int bnxt_hwrm_func_qcaps(struct bnxt *bp) 7026 { 7027 int rc; 7028 7029 rc = __bnxt_hwrm_func_qcaps(bp); 7030 if (rc) 7031 return rc; 7032 rc = bnxt_hwrm_queue_qportcfg(bp); 7033 if (rc) { 7034 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc); 7035 return rc; 7036 } 7037 if (bp->hwrm_spec_code >= 0x10803) { 7038 rc = bnxt_alloc_ctx_mem(bp); 7039 if (rc) 7040 return rc; 7041 rc = bnxt_hwrm_func_resc_qcaps(bp, true); 7042 if (!rc) 7043 bp->fw_cap |= BNXT_FW_CAP_NEW_RM; 7044 } 7045 return 0; 7046 } 7047 7048 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp) 7049 { 7050 struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0}; 7051 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp; 7052 int rc = 0; 7053 u32 flags; 7054 7055 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW)) 7056 return 0; 7057 7058 resp = bp->hwrm_cmd_resp_addr; 7059 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, -1, -1); 7060 7061 mutex_lock(&bp->hwrm_cmd_lock); 7062 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7063 if (rc) 7064 goto hwrm_cfa_adv_qcaps_exit; 7065 7066 flags = le32_to_cpu(resp->flags); 7067 if (flags & 7068 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED) 7069 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2; 7070 7071 hwrm_cfa_adv_qcaps_exit: 7072 mutex_unlock(&bp->hwrm_cmd_lock); 7073 return rc; 7074 } 7075 7076 static int bnxt_map_fw_health_regs(struct bnxt *bp) 7077 { 7078 struct bnxt_fw_health *fw_health = bp->fw_health; 7079 u32 reg_base = 0xffffffff; 7080 int i; 7081 7082 /* Only pre-map the monitoring GRC registers using window 3 */ 7083 for (i = 0; i < 4; i++) { 7084 u32 reg = fw_health->regs[i]; 7085 7086 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC) 7087 continue; 7088 if (reg_base == 0xffffffff) 7089 reg_base = reg & BNXT_GRC_BASE_MASK; 7090 if ((reg & BNXT_GRC_BASE_MASK) != reg_base) 7091 return -ERANGE; 7092 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_BASE + 7093 (reg & BNXT_GRC_OFFSET_MASK); 7094 } 7095 if (reg_base == 0xffffffff) 7096 return 0; 7097 7098 writel(reg_base, bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 7099 BNXT_FW_HEALTH_WIN_MAP_OFF); 7100 return 0; 7101 } 7102 7103 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) 7104 { 7105 struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 7106 struct bnxt_fw_health *fw_health = bp->fw_health; 7107 struct hwrm_error_recovery_qcfg_input req = {0}; 7108 int rc, i; 7109 7110 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 7111 return 0; 7112 7113 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG, -1, -1); 7114 mutex_lock(&bp->hwrm_cmd_lock); 7115 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7116 if (rc) 7117 goto err_recovery_out; 7118 if (!fw_health) { 7119 fw_health = kzalloc(sizeof(*fw_health), GFP_KERNEL); 7120 bp->fw_health = fw_health; 7121 if (!fw_health) { 7122 rc = -ENOMEM; 7123 goto err_recovery_out; 7124 } 7125 } 7126 fw_health->flags = le32_to_cpu(resp->flags); 7127 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) && 7128 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) { 7129 rc = -EINVAL; 7130 goto err_recovery_out; 7131 } 7132 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq); 7133 fw_health->master_func_wait_dsecs = 7134 le32_to_cpu(resp->master_func_wait_period); 7135 fw_health->normal_func_wait_dsecs = 7136 le32_to_cpu(resp->normal_func_wait_period); 7137 fw_health->post_reset_wait_dsecs = 7138 le32_to_cpu(resp->master_func_wait_period_after_reset); 7139 fw_health->post_reset_max_wait_dsecs = 7140 le32_to_cpu(resp->max_bailout_time_after_reset); 7141 fw_health->regs[BNXT_FW_HEALTH_REG] = 7142 le32_to_cpu(resp->fw_health_status_reg); 7143 fw_health->regs[BNXT_FW_HEARTBEAT_REG] = 7144 le32_to_cpu(resp->fw_heartbeat_reg); 7145 fw_health->regs[BNXT_FW_RESET_CNT_REG] = 7146 le32_to_cpu(resp->fw_reset_cnt_reg); 7147 fw_health->regs[BNXT_FW_RESET_INPROG_REG] = 7148 le32_to_cpu(resp->reset_inprogress_reg); 7149 fw_health->fw_reset_inprog_reg_mask = 7150 le32_to_cpu(resp->reset_inprogress_reg_mask); 7151 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt; 7152 if (fw_health->fw_reset_seq_cnt >= 16) { 7153 rc = -EINVAL; 7154 goto err_recovery_out; 7155 } 7156 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) { 7157 fw_health->fw_reset_seq_regs[i] = 7158 le32_to_cpu(resp->reset_reg[i]); 7159 fw_health->fw_reset_seq_vals[i] = 7160 le32_to_cpu(resp->reset_reg_val[i]); 7161 fw_health->fw_reset_seq_delay_msec[i] = 7162 resp->delay_after_reset[i]; 7163 } 7164 err_recovery_out: 7165 mutex_unlock(&bp->hwrm_cmd_lock); 7166 if (!rc) 7167 rc = bnxt_map_fw_health_regs(bp); 7168 if (rc) 7169 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 7170 return rc; 7171 } 7172 7173 static int bnxt_hwrm_func_reset(struct bnxt *bp) 7174 { 7175 struct hwrm_func_reset_input req = {0}; 7176 7177 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1); 7178 req.enables = 0; 7179 7180 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT); 7181 } 7182 7183 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) 7184 { 7185 int rc = 0; 7186 struct hwrm_queue_qportcfg_input req = {0}; 7187 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr; 7188 u8 i, j, *qptr; 7189 bool no_rdma; 7190 7191 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1); 7192 7193 mutex_lock(&bp->hwrm_cmd_lock); 7194 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7195 if (rc) 7196 goto qportcfg_exit; 7197 7198 if (!resp->max_configurable_queues) { 7199 rc = -EINVAL; 7200 goto qportcfg_exit; 7201 } 7202 bp->max_tc = resp->max_configurable_queues; 7203 bp->max_lltc = resp->max_configurable_lossless_queues; 7204 if (bp->max_tc > BNXT_MAX_QUEUE) 7205 bp->max_tc = BNXT_MAX_QUEUE; 7206 7207 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP); 7208 qptr = &resp->queue_id0; 7209 for (i = 0, j = 0; i < bp->max_tc; i++) { 7210 bp->q_info[j].queue_id = *qptr; 7211 bp->q_ids[i] = *qptr++; 7212 bp->q_info[j].queue_profile = *qptr++; 7213 bp->tc_to_qidx[j] = j; 7214 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) || 7215 (no_rdma && BNXT_PF(bp))) 7216 j++; 7217 } 7218 bp->max_q = bp->max_tc; 7219 bp->max_tc = max_t(u8, j, 1); 7220 7221 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG) 7222 bp->max_tc = 1; 7223 7224 if (bp->max_lltc > bp->max_tc) 7225 bp->max_lltc = bp->max_tc; 7226 7227 qportcfg_exit: 7228 mutex_unlock(&bp->hwrm_cmd_lock); 7229 return rc; 7230 } 7231 7232 static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent) 7233 { 7234 struct hwrm_ver_get_input req = {0}; 7235 int rc; 7236 7237 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1); 7238 req.hwrm_intf_maj = HWRM_VERSION_MAJOR; 7239 req.hwrm_intf_min = HWRM_VERSION_MINOR; 7240 req.hwrm_intf_upd = HWRM_VERSION_UPDATE; 7241 7242 rc = bnxt_hwrm_do_send_msg(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT, 7243 silent); 7244 return rc; 7245 } 7246 7247 static int bnxt_hwrm_ver_get(struct bnxt *bp) 7248 { 7249 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr; 7250 u32 dev_caps_cfg; 7251 int rc; 7252 7253 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; 7254 mutex_lock(&bp->hwrm_cmd_lock); 7255 rc = __bnxt_hwrm_ver_get(bp, false); 7256 if (rc) 7257 goto hwrm_ver_get_exit; 7258 7259 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output)); 7260 7261 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 | 7262 resp->hwrm_intf_min_8b << 8 | 7263 resp->hwrm_intf_upd_8b; 7264 if (resp->hwrm_intf_maj_8b < 1) { 7265 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n", 7266 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, 7267 resp->hwrm_intf_upd_8b); 7268 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n"); 7269 } 7270 snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d", 7271 resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b, 7272 resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b); 7273 7274 if (strlen(resp->active_pkg_name)) { 7275 int fw_ver_len = strlen(bp->fw_ver_str); 7276 7277 snprintf(bp->fw_ver_str + fw_ver_len, 7278 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s", 7279 resp->active_pkg_name); 7280 bp->fw_cap |= BNXT_FW_CAP_PKG_VER; 7281 } 7282 7283 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout); 7284 if (!bp->hwrm_cmd_timeout) 7285 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; 7286 7287 if (resp->hwrm_intf_maj_8b >= 1) { 7288 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len); 7289 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len); 7290 } 7291 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN) 7292 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN; 7293 7294 bp->chip_num = le16_to_cpu(resp->chip_num); 7295 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev && 7296 !resp->chip_metal) 7297 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0; 7298 7299 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg); 7300 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) && 7301 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) 7302 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD; 7303 7304 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) 7305 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL; 7306 7307 if (dev_caps_cfg & 7308 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED) 7309 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE; 7310 7311 if (dev_caps_cfg & 7312 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED) 7313 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF; 7314 7315 if (dev_caps_cfg & 7316 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) 7317 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW; 7318 7319 hwrm_ver_get_exit: 7320 mutex_unlock(&bp->hwrm_cmd_lock); 7321 return rc; 7322 } 7323 7324 int bnxt_hwrm_fw_set_time(struct bnxt *bp) 7325 { 7326 struct hwrm_fw_set_time_input req = {0}; 7327 struct tm tm; 7328 time64_t now = ktime_get_real_seconds(); 7329 7330 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) || 7331 bp->hwrm_spec_code < 0x10400) 7332 return -EOPNOTSUPP; 7333 7334 time64_to_tm(now, 0, &tm); 7335 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1); 7336 req.year = cpu_to_le16(1900 + tm.tm_year); 7337 req.month = 1 + tm.tm_mon; 7338 req.day = tm.tm_mday; 7339 req.hour = tm.tm_hour; 7340 req.minute = tm.tm_min; 7341 req.second = tm.tm_sec; 7342 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7343 } 7344 7345 static int bnxt_hwrm_port_qstats(struct bnxt *bp) 7346 { 7347 int rc; 7348 struct bnxt_pf_info *pf = &bp->pf; 7349 struct hwrm_port_qstats_input req = {0}; 7350 7351 if (!(bp->flags & BNXT_FLAG_PORT_STATS)) 7352 return 0; 7353 7354 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1); 7355 req.port_id = cpu_to_le16(pf->port_id); 7356 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map); 7357 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map); 7358 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7359 return rc; 7360 } 7361 7362 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp) 7363 { 7364 struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr; 7365 struct hwrm_queue_pri2cos_qcfg_input req2 = {0}; 7366 struct hwrm_port_qstats_ext_input req = {0}; 7367 struct bnxt_pf_info *pf = &bp->pf; 7368 u32 tx_stat_size; 7369 int rc; 7370 7371 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) 7372 return 0; 7373 7374 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1); 7375 req.port_id = cpu_to_le16(pf->port_id); 7376 req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext)); 7377 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map); 7378 tx_stat_size = bp->hw_tx_port_stats_ext ? 7379 sizeof(*bp->hw_tx_port_stats_ext) : 0; 7380 req.tx_stat_size = cpu_to_le16(tx_stat_size); 7381 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map); 7382 mutex_lock(&bp->hwrm_cmd_lock); 7383 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7384 if (!rc) { 7385 bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8; 7386 bp->fw_tx_stats_ext_size = tx_stat_size ? 7387 le16_to_cpu(resp->tx_stat_size) / 8 : 0; 7388 } else { 7389 bp->fw_rx_stats_ext_size = 0; 7390 bp->fw_tx_stats_ext_size = 0; 7391 } 7392 if (bp->fw_tx_stats_ext_size <= 7393 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) { 7394 mutex_unlock(&bp->hwrm_cmd_lock); 7395 bp->pri2cos_valid = 0; 7396 return rc; 7397 } 7398 7399 bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1); 7400 req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); 7401 7402 rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT); 7403 if (!rc) { 7404 struct hwrm_queue_pri2cos_qcfg_output *resp2; 7405 u8 *pri2cos; 7406 int i, j; 7407 7408 resp2 = bp->hwrm_cmd_resp_addr; 7409 pri2cos = &resp2->pri0_cos_queue_id; 7410 for (i = 0; i < 8; i++) { 7411 u8 queue_id = pri2cos[i]; 7412 7413 for (j = 0; j < bp->max_q; j++) { 7414 if (bp->q_ids[j] == queue_id) 7415 bp->pri2cos[i] = j; 7416 } 7417 } 7418 bp->pri2cos_valid = 1; 7419 } 7420 mutex_unlock(&bp->hwrm_cmd_lock); 7421 return rc; 7422 } 7423 7424 static int bnxt_hwrm_pcie_qstats(struct bnxt *bp) 7425 { 7426 struct hwrm_pcie_qstats_input req = {0}; 7427 7428 if (!(bp->flags & BNXT_FLAG_PCIE_STATS)) 7429 return 0; 7430 7431 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1); 7432 req.pcie_stat_size = cpu_to_le16(sizeof(struct pcie_ctx_hw_stats)); 7433 req.pcie_stat_host_addr = cpu_to_le64(bp->hw_pcie_stats_map); 7434 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7435 } 7436 7437 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) 7438 { 7439 if (bp->vxlan_port_cnt) { 7440 bnxt_hwrm_tunnel_dst_port_free( 7441 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 7442 } 7443 bp->vxlan_port_cnt = 0; 7444 if (bp->nge_port_cnt) { 7445 bnxt_hwrm_tunnel_dst_port_free( 7446 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 7447 } 7448 bp->nge_port_cnt = 0; 7449 } 7450 7451 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa) 7452 { 7453 int rc, i; 7454 u32 tpa_flags = 0; 7455 7456 if (set_tpa) 7457 tpa_flags = bp->flags & BNXT_FLAG_TPA; 7458 else if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 7459 return 0; 7460 for (i = 0; i < bp->nr_vnics; i++) { 7461 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags); 7462 if (rc) { 7463 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n", 7464 i, rc); 7465 return rc; 7466 } 7467 } 7468 return 0; 7469 } 7470 7471 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp) 7472 { 7473 int i; 7474 7475 for (i = 0; i < bp->nr_vnics; i++) 7476 bnxt_hwrm_vnic_set_rss(bp, i, false); 7477 } 7478 7479 static void bnxt_clear_vnic(struct bnxt *bp) 7480 { 7481 if (!bp->vnic_info) 7482 return; 7483 7484 bnxt_hwrm_clear_vnic_filter(bp); 7485 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) { 7486 /* clear all RSS setting before free vnic ctx */ 7487 bnxt_hwrm_clear_vnic_rss(bp); 7488 bnxt_hwrm_vnic_ctx_free(bp); 7489 } 7490 /* before free the vnic, undo the vnic tpa settings */ 7491 if (bp->flags & BNXT_FLAG_TPA) 7492 bnxt_set_tpa(bp, false); 7493 bnxt_hwrm_vnic_free(bp); 7494 if (bp->flags & BNXT_FLAG_CHIP_P5) 7495 bnxt_hwrm_vnic_ctx_free(bp); 7496 } 7497 7498 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, 7499 bool irq_re_init) 7500 { 7501 bnxt_clear_vnic(bp); 7502 bnxt_hwrm_ring_free(bp, close_path); 7503 bnxt_hwrm_ring_grp_free(bp); 7504 if (irq_re_init) { 7505 bnxt_hwrm_stat_ctx_free(bp); 7506 bnxt_hwrm_free_tunnel_ports(bp); 7507 } 7508 } 7509 7510 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode) 7511 { 7512 struct hwrm_func_cfg_input req = {0}; 7513 int rc; 7514 7515 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 7516 req.fid = cpu_to_le16(0xffff); 7517 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE); 7518 if (br_mode == BRIDGE_MODE_VEB) 7519 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB; 7520 else if (br_mode == BRIDGE_MODE_VEPA) 7521 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA; 7522 else 7523 return -EINVAL; 7524 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7525 return rc; 7526 } 7527 7528 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size) 7529 { 7530 struct hwrm_func_cfg_input req = {0}; 7531 int rc; 7532 7533 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803) 7534 return 0; 7535 7536 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 7537 req.fid = cpu_to_le16(0xffff); 7538 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE); 7539 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64; 7540 if (size == 128) 7541 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128; 7542 7543 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7544 return rc; 7545 } 7546 7547 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) 7548 { 7549 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 7550 int rc; 7551 7552 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) 7553 goto skip_rss_ctx; 7554 7555 /* allocate context for vnic */ 7556 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0); 7557 if (rc) { 7558 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 7559 vnic_id, rc); 7560 goto vnic_setup_err; 7561 } 7562 bp->rsscos_nr_ctxs++; 7563 7564 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 7565 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1); 7566 if (rc) { 7567 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n", 7568 vnic_id, rc); 7569 goto vnic_setup_err; 7570 } 7571 bp->rsscos_nr_ctxs++; 7572 } 7573 7574 skip_rss_ctx: 7575 /* configure default vnic, ring grp */ 7576 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); 7577 if (rc) { 7578 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", 7579 vnic_id, rc); 7580 goto vnic_setup_err; 7581 } 7582 7583 /* Enable RSS hashing on vnic */ 7584 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true); 7585 if (rc) { 7586 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n", 7587 vnic_id, rc); 7588 goto vnic_setup_err; 7589 } 7590 7591 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 7592 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id); 7593 if (rc) { 7594 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", 7595 vnic_id, rc); 7596 } 7597 } 7598 7599 vnic_setup_err: 7600 return rc; 7601 } 7602 7603 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id) 7604 { 7605 int rc, i, nr_ctxs; 7606 7607 nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64); 7608 for (i = 0; i < nr_ctxs; i++) { 7609 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i); 7610 if (rc) { 7611 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n", 7612 vnic_id, i, rc); 7613 break; 7614 } 7615 bp->rsscos_nr_ctxs++; 7616 } 7617 if (i < nr_ctxs) 7618 return -ENOMEM; 7619 7620 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true); 7621 if (rc) { 7622 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n", 7623 vnic_id, rc); 7624 return rc; 7625 } 7626 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); 7627 if (rc) { 7628 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", 7629 vnic_id, rc); 7630 return rc; 7631 } 7632 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 7633 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id); 7634 if (rc) { 7635 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", 7636 vnic_id, rc); 7637 } 7638 } 7639 return rc; 7640 } 7641 7642 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) 7643 { 7644 if (bp->flags & BNXT_FLAG_CHIP_P5) 7645 return __bnxt_setup_vnic_p5(bp, vnic_id); 7646 else 7647 return __bnxt_setup_vnic(bp, vnic_id); 7648 } 7649 7650 static int bnxt_alloc_rfs_vnics(struct bnxt *bp) 7651 { 7652 #ifdef CONFIG_RFS_ACCEL 7653 int i, rc = 0; 7654 7655 if (bp->flags & BNXT_FLAG_CHIP_P5) 7656 return 0; 7657 7658 for (i = 0; i < bp->rx_nr_rings; i++) { 7659 struct bnxt_vnic_info *vnic; 7660 u16 vnic_id = i + 1; 7661 u16 ring_id = i; 7662 7663 if (vnic_id >= bp->nr_vnics) 7664 break; 7665 7666 vnic = &bp->vnic_info[vnic_id]; 7667 vnic->flags |= BNXT_VNIC_RFS_FLAG; 7668 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) 7669 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG; 7670 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1); 7671 if (rc) { 7672 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 7673 vnic_id, rc); 7674 break; 7675 } 7676 rc = bnxt_setup_vnic(bp, vnic_id); 7677 if (rc) 7678 break; 7679 } 7680 return rc; 7681 #else 7682 return 0; 7683 #endif 7684 } 7685 7686 /* Allow PF and VF with default VLAN to be in promiscuous mode */ 7687 static bool bnxt_promisc_ok(struct bnxt *bp) 7688 { 7689 #ifdef CONFIG_BNXT_SRIOV 7690 if (BNXT_VF(bp) && !bp->vf.vlan) 7691 return false; 7692 #endif 7693 return true; 7694 } 7695 7696 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp) 7697 { 7698 unsigned int rc = 0; 7699 7700 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1); 7701 if (rc) { 7702 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", 7703 rc); 7704 return rc; 7705 } 7706 7707 rc = bnxt_hwrm_vnic_cfg(bp, 1); 7708 if (rc) { 7709 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", 7710 rc); 7711 return rc; 7712 } 7713 return rc; 7714 } 7715 7716 static int bnxt_cfg_rx_mode(struct bnxt *); 7717 static bool bnxt_mc_list_updated(struct bnxt *, u32 *); 7718 7719 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) 7720 { 7721 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 7722 int rc = 0; 7723 unsigned int rx_nr_rings = bp->rx_nr_rings; 7724 7725 if (irq_re_init) { 7726 rc = bnxt_hwrm_stat_ctx_alloc(bp); 7727 if (rc) { 7728 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n", 7729 rc); 7730 goto err_out; 7731 } 7732 } 7733 7734 rc = bnxt_hwrm_ring_alloc(bp); 7735 if (rc) { 7736 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc); 7737 goto err_out; 7738 } 7739 7740 rc = bnxt_hwrm_ring_grp_alloc(bp); 7741 if (rc) { 7742 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc); 7743 goto err_out; 7744 } 7745 7746 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 7747 rx_nr_rings--; 7748 7749 /* default vnic 0 */ 7750 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings); 7751 if (rc) { 7752 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc); 7753 goto err_out; 7754 } 7755 7756 rc = bnxt_setup_vnic(bp, 0); 7757 if (rc) 7758 goto err_out; 7759 7760 if (bp->flags & BNXT_FLAG_RFS) { 7761 rc = bnxt_alloc_rfs_vnics(bp); 7762 if (rc) 7763 goto err_out; 7764 } 7765 7766 if (bp->flags & BNXT_FLAG_TPA) { 7767 rc = bnxt_set_tpa(bp, true); 7768 if (rc) 7769 goto err_out; 7770 } 7771 7772 if (BNXT_VF(bp)) 7773 bnxt_update_vf_mac(bp); 7774 7775 /* Filter for default vnic 0 */ 7776 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr); 7777 if (rc) { 7778 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); 7779 goto err_out; 7780 } 7781 vnic->uc_filter_count = 1; 7782 7783 vnic->rx_mask = 0; 7784 if (bp->dev->flags & IFF_BROADCAST) 7785 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 7786 7787 if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp)) 7788 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 7789 7790 if (bp->dev->flags & IFF_ALLMULTI) { 7791 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 7792 vnic->mc_list_count = 0; 7793 } else { 7794 u32 mask = 0; 7795 7796 bnxt_mc_list_updated(bp, &mask); 7797 vnic->rx_mask |= mask; 7798 } 7799 7800 rc = bnxt_cfg_rx_mode(bp); 7801 if (rc) 7802 goto err_out; 7803 7804 rc = bnxt_hwrm_set_coal(bp); 7805 if (rc) 7806 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n", 7807 rc); 7808 7809 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 7810 rc = bnxt_setup_nitroa0_vnic(bp); 7811 if (rc) 7812 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n", 7813 rc); 7814 } 7815 7816 if (BNXT_VF(bp)) { 7817 bnxt_hwrm_func_qcfg(bp); 7818 netdev_update_features(bp->dev); 7819 } 7820 7821 return 0; 7822 7823 err_out: 7824 bnxt_hwrm_resource_free(bp, 0, true); 7825 7826 return rc; 7827 } 7828 7829 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init) 7830 { 7831 bnxt_hwrm_resource_free(bp, 1, irq_re_init); 7832 return 0; 7833 } 7834 7835 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init) 7836 { 7837 bnxt_init_cp_rings(bp); 7838 bnxt_init_rx_rings(bp); 7839 bnxt_init_tx_rings(bp); 7840 bnxt_init_ring_grps(bp, irq_re_init); 7841 bnxt_init_vnics(bp); 7842 7843 return bnxt_init_chip(bp, irq_re_init); 7844 } 7845 7846 static int bnxt_set_real_num_queues(struct bnxt *bp) 7847 { 7848 int rc; 7849 struct net_device *dev = bp->dev; 7850 7851 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings - 7852 bp->tx_nr_rings_xdp); 7853 if (rc) 7854 return rc; 7855 7856 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings); 7857 if (rc) 7858 return rc; 7859 7860 #ifdef CONFIG_RFS_ACCEL 7861 if (bp->flags & BNXT_FLAG_RFS) 7862 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings); 7863 #endif 7864 7865 return rc; 7866 } 7867 7868 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 7869 bool shared) 7870 { 7871 int _rx = *rx, _tx = *tx; 7872 7873 if (shared) { 7874 *rx = min_t(int, _rx, max); 7875 *tx = min_t(int, _tx, max); 7876 } else { 7877 if (max < 2) 7878 return -ENOMEM; 7879 7880 while (_rx + _tx > max) { 7881 if (_rx > _tx && _rx > 1) 7882 _rx--; 7883 else if (_tx > 1) 7884 _tx--; 7885 } 7886 *rx = _rx; 7887 *tx = _tx; 7888 } 7889 return 0; 7890 } 7891 7892 static void bnxt_setup_msix(struct bnxt *bp) 7893 { 7894 const int len = sizeof(bp->irq_tbl[0].name); 7895 struct net_device *dev = bp->dev; 7896 int tcs, i; 7897 7898 tcs = netdev_get_num_tc(dev); 7899 if (tcs > 1) { 7900 int i, off, count; 7901 7902 for (i = 0; i < tcs; i++) { 7903 count = bp->tx_nr_rings_per_tc; 7904 off = i * count; 7905 netdev_set_tc_queue(dev, i, count, off); 7906 } 7907 } 7908 7909 for (i = 0; i < bp->cp_nr_rings; i++) { 7910 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 7911 char *attr; 7912 7913 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 7914 attr = "TxRx"; 7915 else if (i < bp->rx_nr_rings) 7916 attr = "rx"; 7917 else 7918 attr = "tx"; 7919 7920 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name, 7921 attr, i); 7922 bp->irq_tbl[map_idx].handler = bnxt_msix; 7923 } 7924 } 7925 7926 static void bnxt_setup_inta(struct bnxt *bp) 7927 { 7928 const int len = sizeof(bp->irq_tbl[0].name); 7929 7930 if (netdev_get_num_tc(bp->dev)) 7931 netdev_reset_tc(bp->dev); 7932 7933 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx", 7934 0); 7935 bp->irq_tbl[0].handler = bnxt_inta; 7936 } 7937 7938 static int bnxt_setup_int_mode(struct bnxt *bp) 7939 { 7940 int rc; 7941 7942 if (bp->flags & BNXT_FLAG_USING_MSIX) 7943 bnxt_setup_msix(bp); 7944 else 7945 bnxt_setup_inta(bp); 7946 7947 rc = bnxt_set_real_num_queues(bp); 7948 return rc; 7949 } 7950 7951 #ifdef CONFIG_RFS_ACCEL 7952 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp) 7953 { 7954 return bp->hw_resc.max_rsscos_ctxs; 7955 } 7956 7957 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp) 7958 { 7959 return bp->hw_resc.max_vnics; 7960 } 7961 #endif 7962 7963 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp) 7964 { 7965 return bp->hw_resc.max_stat_ctxs; 7966 } 7967 7968 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp) 7969 { 7970 return bp->hw_resc.max_cp_rings; 7971 } 7972 7973 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp) 7974 { 7975 unsigned int cp = bp->hw_resc.max_cp_rings; 7976 7977 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 7978 cp -= bnxt_get_ulp_msix_num(bp); 7979 7980 return cp; 7981 } 7982 7983 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) 7984 { 7985 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 7986 7987 if (bp->flags & BNXT_FLAG_CHIP_P5) 7988 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs); 7989 7990 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings); 7991 } 7992 7993 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs) 7994 { 7995 bp->hw_resc.max_irqs = max_irqs; 7996 } 7997 7998 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp) 7999 { 8000 unsigned int cp; 8001 8002 cp = bnxt_get_max_func_cp_rings_for_en(bp); 8003 if (bp->flags & BNXT_FLAG_CHIP_P5) 8004 return cp - bp->rx_nr_rings - bp->tx_nr_rings; 8005 else 8006 return cp - bp->cp_nr_rings; 8007 } 8008 8009 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp) 8010 { 8011 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp); 8012 } 8013 8014 int bnxt_get_avail_msix(struct bnxt *bp, int num) 8015 { 8016 int max_cp = bnxt_get_max_func_cp_rings(bp); 8017 int max_irq = bnxt_get_max_func_irqs(bp); 8018 int total_req = bp->cp_nr_rings + num; 8019 int max_idx, avail_msix; 8020 8021 max_idx = bp->total_irqs; 8022 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 8023 max_idx = min_t(int, bp->total_irqs, max_cp); 8024 avail_msix = max_idx - bp->cp_nr_rings; 8025 if (!BNXT_NEW_RM(bp) || avail_msix >= num) 8026 return avail_msix; 8027 8028 if (max_irq < total_req) { 8029 num = max_irq - bp->cp_nr_rings; 8030 if (num <= 0) 8031 return 0; 8032 } 8033 return num; 8034 } 8035 8036 static int bnxt_get_num_msix(struct bnxt *bp) 8037 { 8038 if (!BNXT_NEW_RM(bp)) 8039 return bnxt_get_max_func_irqs(bp); 8040 8041 return bnxt_nq_rings_in_use(bp); 8042 } 8043 8044 static int bnxt_init_msix(struct bnxt *bp) 8045 { 8046 int i, total_vecs, max, rc = 0, min = 1, ulp_msix; 8047 struct msix_entry *msix_ent; 8048 8049 total_vecs = bnxt_get_num_msix(bp); 8050 max = bnxt_get_max_func_irqs(bp); 8051 if (total_vecs > max) 8052 total_vecs = max; 8053 8054 if (!total_vecs) 8055 return 0; 8056 8057 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL); 8058 if (!msix_ent) 8059 return -ENOMEM; 8060 8061 for (i = 0; i < total_vecs; i++) { 8062 msix_ent[i].entry = i; 8063 msix_ent[i].vector = 0; 8064 } 8065 8066 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) 8067 min = 2; 8068 8069 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs); 8070 ulp_msix = bnxt_get_ulp_msix_num(bp); 8071 if (total_vecs < 0 || total_vecs < ulp_msix) { 8072 rc = -ENODEV; 8073 goto msix_setup_exit; 8074 } 8075 8076 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL); 8077 if (bp->irq_tbl) { 8078 for (i = 0; i < total_vecs; i++) 8079 bp->irq_tbl[i].vector = msix_ent[i].vector; 8080 8081 bp->total_irqs = total_vecs; 8082 /* Trim rings based upon num of vectors allocated */ 8083 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings, 8084 total_vecs - ulp_msix, min == 1); 8085 if (rc) 8086 goto msix_setup_exit; 8087 8088 bp->cp_nr_rings = (min == 1) ? 8089 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : 8090 bp->tx_nr_rings + bp->rx_nr_rings; 8091 8092 } else { 8093 rc = -ENOMEM; 8094 goto msix_setup_exit; 8095 } 8096 bp->flags |= BNXT_FLAG_USING_MSIX; 8097 kfree(msix_ent); 8098 return 0; 8099 8100 msix_setup_exit: 8101 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc); 8102 kfree(bp->irq_tbl); 8103 bp->irq_tbl = NULL; 8104 pci_disable_msix(bp->pdev); 8105 kfree(msix_ent); 8106 return rc; 8107 } 8108 8109 static int bnxt_init_inta(struct bnxt *bp) 8110 { 8111 bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL); 8112 if (!bp->irq_tbl) 8113 return -ENOMEM; 8114 8115 bp->total_irqs = 1; 8116 bp->rx_nr_rings = 1; 8117 bp->tx_nr_rings = 1; 8118 bp->cp_nr_rings = 1; 8119 bp->flags |= BNXT_FLAG_SHARED_RINGS; 8120 bp->irq_tbl[0].vector = bp->pdev->irq; 8121 return 0; 8122 } 8123 8124 static int bnxt_init_int_mode(struct bnxt *bp) 8125 { 8126 int rc = 0; 8127 8128 if (bp->flags & BNXT_FLAG_MSIX_CAP) 8129 rc = bnxt_init_msix(bp); 8130 8131 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) { 8132 /* fallback to INTA */ 8133 rc = bnxt_init_inta(bp); 8134 } 8135 return rc; 8136 } 8137 8138 static void bnxt_clear_int_mode(struct bnxt *bp) 8139 { 8140 if (bp->flags & BNXT_FLAG_USING_MSIX) 8141 pci_disable_msix(bp->pdev); 8142 8143 kfree(bp->irq_tbl); 8144 bp->irq_tbl = NULL; 8145 bp->flags &= ~BNXT_FLAG_USING_MSIX; 8146 } 8147 8148 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init) 8149 { 8150 int tcs = netdev_get_num_tc(bp->dev); 8151 bool irq_cleared = false; 8152 int rc; 8153 8154 if (!bnxt_need_reserve_rings(bp)) 8155 return 0; 8156 8157 if (irq_re_init && BNXT_NEW_RM(bp) && 8158 bnxt_get_num_msix(bp) != bp->total_irqs) { 8159 bnxt_ulp_irq_stop(bp); 8160 bnxt_clear_int_mode(bp); 8161 irq_cleared = true; 8162 } 8163 rc = __bnxt_reserve_rings(bp); 8164 if (irq_cleared) { 8165 if (!rc) 8166 rc = bnxt_init_int_mode(bp); 8167 bnxt_ulp_irq_restart(bp, rc); 8168 } 8169 if (rc) { 8170 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc); 8171 return rc; 8172 } 8173 if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) { 8174 netdev_err(bp->dev, "tx ring reservation failure\n"); 8175 netdev_reset_tc(bp->dev); 8176 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 8177 return -ENOMEM; 8178 } 8179 return 0; 8180 } 8181 8182 static void bnxt_free_irq(struct bnxt *bp) 8183 { 8184 struct bnxt_irq *irq; 8185 int i; 8186 8187 #ifdef CONFIG_RFS_ACCEL 8188 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap); 8189 bp->dev->rx_cpu_rmap = NULL; 8190 #endif 8191 if (!bp->irq_tbl || !bp->bnapi) 8192 return; 8193 8194 for (i = 0; i < bp->cp_nr_rings; i++) { 8195 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 8196 8197 irq = &bp->irq_tbl[map_idx]; 8198 if (irq->requested) { 8199 if (irq->have_cpumask) { 8200 irq_set_affinity_hint(irq->vector, NULL); 8201 free_cpumask_var(irq->cpu_mask); 8202 irq->have_cpumask = 0; 8203 } 8204 free_irq(irq->vector, bp->bnapi[i]); 8205 } 8206 8207 irq->requested = 0; 8208 } 8209 } 8210 8211 static int bnxt_request_irq(struct bnxt *bp) 8212 { 8213 int i, j, rc = 0; 8214 unsigned long flags = 0; 8215 #ifdef CONFIG_RFS_ACCEL 8216 struct cpu_rmap *rmap; 8217 #endif 8218 8219 rc = bnxt_setup_int_mode(bp); 8220 if (rc) { 8221 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n", 8222 rc); 8223 return rc; 8224 } 8225 #ifdef CONFIG_RFS_ACCEL 8226 rmap = bp->dev->rx_cpu_rmap; 8227 #endif 8228 if (!(bp->flags & BNXT_FLAG_USING_MSIX)) 8229 flags = IRQF_SHARED; 8230 8231 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { 8232 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 8233 struct bnxt_irq *irq = &bp->irq_tbl[map_idx]; 8234 8235 #ifdef CONFIG_RFS_ACCEL 8236 if (rmap && bp->bnapi[i]->rx_ring) { 8237 rc = irq_cpu_rmap_add(rmap, irq->vector); 8238 if (rc) 8239 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n", 8240 j); 8241 j++; 8242 } 8243 #endif 8244 rc = request_irq(irq->vector, irq->handler, flags, irq->name, 8245 bp->bnapi[i]); 8246 if (rc) 8247 break; 8248 8249 irq->requested = 1; 8250 8251 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) { 8252 int numa_node = dev_to_node(&bp->pdev->dev); 8253 8254 irq->have_cpumask = 1; 8255 cpumask_set_cpu(cpumask_local_spread(i, numa_node), 8256 irq->cpu_mask); 8257 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask); 8258 if (rc) { 8259 netdev_warn(bp->dev, 8260 "Set affinity failed, IRQ = %d\n", 8261 irq->vector); 8262 break; 8263 } 8264 } 8265 } 8266 return rc; 8267 } 8268 8269 static void bnxt_del_napi(struct bnxt *bp) 8270 { 8271 int i; 8272 8273 if (!bp->bnapi) 8274 return; 8275 8276 for (i = 0; i < bp->cp_nr_rings; i++) { 8277 struct bnxt_napi *bnapi = bp->bnapi[i]; 8278 8279 napi_hash_del(&bnapi->napi); 8280 netif_napi_del(&bnapi->napi); 8281 } 8282 /* We called napi_hash_del() before netif_napi_del(), we need 8283 * to respect an RCU grace period before freeing napi structures. 8284 */ 8285 synchronize_net(); 8286 } 8287 8288 static void bnxt_init_napi(struct bnxt *bp) 8289 { 8290 int i; 8291 unsigned int cp_nr_rings = bp->cp_nr_rings; 8292 struct bnxt_napi *bnapi; 8293 8294 if (bp->flags & BNXT_FLAG_USING_MSIX) { 8295 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll; 8296 8297 if (bp->flags & BNXT_FLAG_CHIP_P5) 8298 poll_fn = bnxt_poll_p5; 8299 else if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 8300 cp_nr_rings--; 8301 for (i = 0; i < cp_nr_rings; i++) { 8302 bnapi = bp->bnapi[i]; 8303 netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64); 8304 } 8305 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 8306 bnapi = bp->bnapi[cp_nr_rings]; 8307 netif_napi_add(bp->dev, &bnapi->napi, 8308 bnxt_poll_nitroa0, 64); 8309 } 8310 } else { 8311 bnapi = bp->bnapi[0]; 8312 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64); 8313 } 8314 } 8315 8316 static void bnxt_disable_napi(struct bnxt *bp) 8317 { 8318 int i; 8319 8320 if (!bp->bnapi) 8321 return; 8322 8323 for (i = 0; i < bp->cp_nr_rings; i++) { 8324 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; 8325 8326 if (bp->bnapi[i]->rx_ring) 8327 cancel_work_sync(&cpr->dim.work); 8328 8329 napi_disable(&bp->bnapi[i]->napi); 8330 } 8331 } 8332 8333 static void bnxt_enable_napi(struct bnxt *bp) 8334 { 8335 int i; 8336 8337 for (i = 0; i < bp->cp_nr_rings; i++) { 8338 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; 8339 bp->bnapi[i]->in_reset = false; 8340 8341 if (bp->bnapi[i]->rx_ring) { 8342 INIT_WORK(&cpr->dim.work, bnxt_dim_work); 8343 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 8344 } 8345 napi_enable(&bp->bnapi[i]->napi); 8346 } 8347 } 8348 8349 void bnxt_tx_disable(struct bnxt *bp) 8350 { 8351 int i; 8352 struct bnxt_tx_ring_info *txr; 8353 8354 if (bp->tx_ring) { 8355 for (i = 0; i < bp->tx_nr_rings; i++) { 8356 txr = &bp->tx_ring[i]; 8357 txr->dev_state = BNXT_DEV_STATE_CLOSING; 8358 } 8359 } 8360 /* Stop all TX queues */ 8361 netif_tx_disable(bp->dev); 8362 netif_carrier_off(bp->dev); 8363 } 8364 8365 void bnxt_tx_enable(struct bnxt *bp) 8366 { 8367 int i; 8368 struct bnxt_tx_ring_info *txr; 8369 8370 for (i = 0; i < bp->tx_nr_rings; i++) { 8371 txr = &bp->tx_ring[i]; 8372 txr->dev_state = 0; 8373 } 8374 netif_tx_wake_all_queues(bp->dev); 8375 if (bp->link_info.link_up) 8376 netif_carrier_on(bp->dev); 8377 } 8378 8379 static void bnxt_report_link(struct bnxt *bp) 8380 { 8381 if (bp->link_info.link_up) { 8382 const char *duplex; 8383 const char *flow_ctrl; 8384 u32 speed; 8385 u16 fec; 8386 8387 netif_carrier_on(bp->dev); 8388 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL) 8389 duplex = "full"; 8390 else 8391 duplex = "half"; 8392 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH) 8393 flow_ctrl = "ON - receive & transmit"; 8394 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX) 8395 flow_ctrl = "ON - transmit"; 8396 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX) 8397 flow_ctrl = "ON - receive"; 8398 else 8399 flow_ctrl = "none"; 8400 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); 8401 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n", 8402 speed, duplex, flow_ctrl); 8403 if (bp->flags & BNXT_FLAG_EEE_CAP) 8404 netdev_info(bp->dev, "EEE is %s\n", 8405 bp->eee.eee_active ? "active" : 8406 "not active"); 8407 fec = bp->link_info.fec_cfg; 8408 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED)) 8409 netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n", 8410 (fec & BNXT_FEC_AUTONEG) ? "on" : "off", 8411 (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" : 8412 (fec & BNXT_FEC_ENC_RS) ? "RS" : "None"); 8413 } else { 8414 netif_carrier_off(bp->dev); 8415 netdev_err(bp->dev, "NIC Link is Down\n"); 8416 } 8417 } 8418 8419 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) 8420 { 8421 int rc = 0; 8422 struct hwrm_port_phy_qcaps_input req = {0}; 8423 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 8424 struct bnxt_link_info *link_info = &bp->link_info; 8425 8426 bp->flags &= ~BNXT_FLAG_EEE_CAP; 8427 if (bp->test_info) 8428 bp->test_info->flags &= ~(BNXT_TEST_FL_EXT_LPBK | 8429 BNXT_TEST_FL_AN_PHY_LPBK); 8430 if (bp->hwrm_spec_code < 0x10201) 8431 return 0; 8432 8433 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1); 8434 8435 mutex_lock(&bp->hwrm_cmd_lock); 8436 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8437 if (rc) 8438 goto hwrm_phy_qcaps_exit; 8439 8440 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) { 8441 struct ethtool_eee *eee = &bp->eee; 8442 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode); 8443 8444 bp->flags |= BNXT_FLAG_EEE_CAP; 8445 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 8446 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) & 8447 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK; 8448 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) & 8449 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK; 8450 } 8451 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED) { 8452 if (bp->test_info) 8453 bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK; 8454 } 8455 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED) { 8456 if (bp->test_info) 8457 bp->test_info->flags |= BNXT_TEST_FL_AN_PHY_LPBK; 8458 } 8459 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED) { 8460 if (BNXT_PF(bp)) 8461 bp->fw_cap |= BNXT_FW_CAP_SHARED_PORT_CFG; 8462 } 8463 if (resp->supported_speeds_auto_mode) 8464 link_info->support_auto_speeds = 8465 le16_to_cpu(resp->supported_speeds_auto_mode); 8466 8467 bp->port_count = resp->port_cnt; 8468 8469 hwrm_phy_qcaps_exit: 8470 mutex_unlock(&bp->hwrm_cmd_lock); 8471 return rc; 8472 } 8473 8474 static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) 8475 { 8476 int rc = 0; 8477 struct bnxt_link_info *link_info = &bp->link_info; 8478 struct hwrm_port_phy_qcfg_input req = {0}; 8479 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 8480 u8 link_up = link_info->link_up; 8481 u16 diff; 8482 8483 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1); 8484 8485 mutex_lock(&bp->hwrm_cmd_lock); 8486 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8487 if (rc) { 8488 mutex_unlock(&bp->hwrm_cmd_lock); 8489 return rc; 8490 } 8491 8492 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp)); 8493 link_info->phy_link_status = resp->link; 8494 link_info->duplex = resp->duplex_cfg; 8495 if (bp->hwrm_spec_code >= 0x10800) 8496 link_info->duplex = resp->duplex_state; 8497 link_info->pause = resp->pause; 8498 link_info->auto_mode = resp->auto_mode; 8499 link_info->auto_pause_setting = resp->auto_pause; 8500 link_info->lp_pause = resp->link_partner_adv_pause; 8501 link_info->force_pause_setting = resp->force_pause; 8502 link_info->duplex_setting = resp->duplex_cfg; 8503 if (link_info->phy_link_status == BNXT_LINK_LINK) 8504 link_info->link_speed = le16_to_cpu(resp->link_speed); 8505 else 8506 link_info->link_speed = 0; 8507 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed); 8508 link_info->support_speeds = le16_to_cpu(resp->support_speeds); 8509 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask); 8510 link_info->lp_auto_link_speeds = 8511 le16_to_cpu(resp->link_partner_adv_speeds); 8512 link_info->preemphasis = le32_to_cpu(resp->preemphasis); 8513 link_info->phy_ver[0] = resp->phy_maj; 8514 link_info->phy_ver[1] = resp->phy_min; 8515 link_info->phy_ver[2] = resp->phy_bld; 8516 link_info->media_type = resp->media_type; 8517 link_info->phy_type = resp->phy_type; 8518 link_info->transceiver = resp->xcvr_pkg_type; 8519 link_info->phy_addr = resp->eee_config_phy_addr & 8520 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK; 8521 link_info->module_status = resp->module_status; 8522 8523 if (bp->flags & BNXT_FLAG_EEE_CAP) { 8524 struct ethtool_eee *eee = &bp->eee; 8525 u16 fw_speeds; 8526 8527 eee->eee_active = 0; 8528 if (resp->eee_config_phy_addr & 8529 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) { 8530 eee->eee_active = 1; 8531 fw_speeds = le16_to_cpu( 8532 resp->link_partner_adv_eee_link_speed_mask); 8533 eee->lp_advertised = 8534 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 8535 } 8536 8537 /* Pull initial EEE config */ 8538 if (!chng_link_state) { 8539 if (resp->eee_config_phy_addr & 8540 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED) 8541 eee->eee_enabled = 1; 8542 8543 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask); 8544 eee->advertised = 8545 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 8546 8547 if (resp->eee_config_phy_addr & 8548 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) { 8549 __le32 tmr; 8550 8551 eee->tx_lpi_enabled = 1; 8552 tmr = resp->xcvr_identifier_type_tx_lpi_timer; 8553 eee->tx_lpi_timer = le32_to_cpu(tmr) & 8554 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK; 8555 } 8556 } 8557 } 8558 8559 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED; 8560 if (bp->hwrm_spec_code >= 0x10504) 8561 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg); 8562 8563 /* TODO: need to add more logic to report VF link */ 8564 if (chng_link_state) { 8565 if (link_info->phy_link_status == BNXT_LINK_LINK) 8566 link_info->link_up = 1; 8567 else 8568 link_info->link_up = 0; 8569 if (link_up != link_info->link_up) 8570 bnxt_report_link(bp); 8571 } else { 8572 /* alwasy link down if not require to update link state */ 8573 link_info->link_up = 0; 8574 } 8575 mutex_unlock(&bp->hwrm_cmd_lock); 8576 8577 if (!BNXT_PHY_CFG_ABLE(bp)) 8578 return 0; 8579 8580 diff = link_info->support_auto_speeds ^ link_info->advertising; 8581 if ((link_info->support_auto_speeds | diff) != 8582 link_info->support_auto_speeds) { 8583 /* An advertised speed is no longer supported, so we need to 8584 * update the advertisement settings. Caller holds RTNL 8585 * so we can modify link settings. 8586 */ 8587 link_info->advertising = link_info->support_auto_speeds; 8588 if (link_info->autoneg & BNXT_AUTONEG_SPEED) 8589 bnxt_hwrm_set_link_setting(bp, true, false); 8590 } 8591 return 0; 8592 } 8593 8594 static void bnxt_get_port_module_status(struct bnxt *bp) 8595 { 8596 struct bnxt_link_info *link_info = &bp->link_info; 8597 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp; 8598 u8 module_status; 8599 8600 if (bnxt_update_link(bp, true)) 8601 return; 8602 8603 module_status = link_info->module_status; 8604 switch (module_status) { 8605 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX: 8606 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN: 8607 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG: 8608 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n", 8609 bp->pf.port_id); 8610 if (bp->hwrm_spec_code >= 0x10201) { 8611 netdev_warn(bp->dev, "Module part number %s\n", 8612 resp->phy_vendor_partnumber); 8613 } 8614 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX) 8615 netdev_warn(bp->dev, "TX is disabled\n"); 8616 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN) 8617 netdev_warn(bp->dev, "SFP+ module is shutdown\n"); 8618 } 8619 } 8620 8621 static void 8622 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) 8623 { 8624 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) { 8625 if (bp->hwrm_spec_code >= 0x10201) 8626 req->auto_pause = 8627 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE; 8628 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 8629 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; 8630 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 8631 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX; 8632 req->enables |= 8633 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 8634 } else { 8635 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 8636 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX; 8637 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 8638 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX; 8639 req->enables |= 8640 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE); 8641 if (bp->hwrm_spec_code >= 0x10201) { 8642 req->auto_pause = req->force_pause; 8643 req->enables |= cpu_to_le32( 8644 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 8645 } 8646 } 8647 } 8648 8649 static void bnxt_hwrm_set_link_common(struct bnxt *bp, 8650 struct hwrm_port_phy_cfg_input *req) 8651 { 8652 u8 autoneg = bp->link_info.autoneg; 8653 u16 fw_link_speed = bp->link_info.req_link_speed; 8654 u16 advertising = bp->link_info.advertising; 8655 8656 if (autoneg & BNXT_AUTONEG_SPEED) { 8657 req->auto_mode |= 8658 PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK; 8659 8660 req->enables |= cpu_to_le32( 8661 PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK); 8662 req->auto_link_speed_mask = cpu_to_le16(advertising); 8663 8664 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE); 8665 req->flags |= 8666 cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG); 8667 } else { 8668 req->force_link_speed = cpu_to_le16(fw_link_speed); 8669 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE); 8670 } 8671 8672 /* tell chimp that the setting takes effect immediately */ 8673 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); 8674 } 8675 8676 int bnxt_hwrm_set_pause(struct bnxt *bp) 8677 { 8678 struct hwrm_port_phy_cfg_input req = {0}; 8679 int rc; 8680 8681 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 8682 bnxt_hwrm_set_pause_common(bp, &req); 8683 8684 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) || 8685 bp->link_info.force_link_chng) 8686 bnxt_hwrm_set_link_common(bp, &req); 8687 8688 mutex_lock(&bp->hwrm_cmd_lock); 8689 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8690 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) { 8691 /* since changing of pause setting doesn't trigger any link 8692 * change event, the driver needs to update the current pause 8693 * result upon successfully return of the phy_cfg command 8694 */ 8695 bp->link_info.pause = 8696 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl; 8697 bp->link_info.auto_pause_setting = 0; 8698 if (!bp->link_info.force_link_chng) 8699 bnxt_report_link(bp); 8700 } 8701 bp->link_info.force_link_chng = false; 8702 mutex_unlock(&bp->hwrm_cmd_lock); 8703 return rc; 8704 } 8705 8706 static void bnxt_hwrm_set_eee(struct bnxt *bp, 8707 struct hwrm_port_phy_cfg_input *req) 8708 { 8709 struct ethtool_eee *eee = &bp->eee; 8710 8711 if (eee->eee_enabled) { 8712 u16 eee_speeds; 8713 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE; 8714 8715 if (eee->tx_lpi_enabled) 8716 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE; 8717 else 8718 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE; 8719 8720 req->flags |= cpu_to_le32(flags); 8721 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised); 8722 req->eee_link_speed_mask = cpu_to_le16(eee_speeds); 8723 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer); 8724 } else { 8725 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE); 8726 } 8727 } 8728 8729 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee) 8730 { 8731 struct hwrm_port_phy_cfg_input req = {0}; 8732 8733 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 8734 if (set_pause) 8735 bnxt_hwrm_set_pause_common(bp, &req); 8736 8737 bnxt_hwrm_set_link_common(bp, &req); 8738 8739 if (set_eee) 8740 bnxt_hwrm_set_eee(bp, &req); 8741 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8742 } 8743 8744 static int bnxt_hwrm_shutdown_link(struct bnxt *bp) 8745 { 8746 struct hwrm_port_phy_cfg_input req = {0}; 8747 8748 if (!BNXT_SINGLE_PF(bp)) 8749 return 0; 8750 8751 if (pci_num_vf(bp->pdev)) 8752 return 0; 8753 8754 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 8755 req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN); 8756 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8757 } 8758 8759 static int bnxt_fw_init_one(struct bnxt *bp); 8760 8761 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) 8762 { 8763 struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr; 8764 struct hwrm_func_drv_if_change_input req = {0}; 8765 bool resc_reinit = false, fw_reset = false; 8766 u32 flags = 0; 8767 int rc; 8768 8769 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE)) 8770 return 0; 8771 8772 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1); 8773 if (up) 8774 req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP); 8775 mutex_lock(&bp->hwrm_cmd_lock); 8776 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8777 if (!rc) 8778 flags = le32_to_cpu(resp->flags); 8779 mutex_unlock(&bp->hwrm_cmd_lock); 8780 if (rc) 8781 return rc; 8782 8783 if (!up) 8784 return 0; 8785 8786 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE) 8787 resc_reinit = true; 8788 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE) 8789 fw_reset = true; 8790 8791 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) { 8792 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n"); 8793 return -ENODEV; 8794 } 8795 if (resc_reinit || fw_reset) { 8796 if (fw_reset) { 8797 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 8798 bnxt_ulp_stop(bp); 8799 rc = bnxt_fw_init_one(bp); 8800 if (rc) { 8801 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); 8802 return rc; 8803 } 8804 bnxt_clear_int_mode(bp); 8805 rc = bnxt_init_int_mode(bp); 8806 if (rc) { 8807 netdev_err(bp->dev, "init int mode failed\n"); 8808 return rc; 8809 } 8810 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state); 8811 } 8812 if (BNXT_NEW_RM(bp)) { 8813 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 8814 8815 rc = bnxt_hwrm_func_resc_qcaps(bp, true); 8816 hw_resc->resv_cp_rings = 0; 8817 hw_resc->resv_stat_ctxs = 0; 8818 hw_resc->resv_irqs = 0; 8819 hw_resc->resv_tx_rings = 0; 8820 hw_resc->resv_rx_rings = 0; 8821 hw_resc->resv_hw_ring_grps = 0; 8822 hw_resc->resv_vnics = 0; 8823 if (!fw_reset) { 8824 bp->tx_nr_rings = 0; 8825 bp->rx_nr_rings = 0; 8826 } 8827 } 8828 } 8829 return 0; 8830 } 8831 8832 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) 8833 { 8834 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 8835 struct hwrm_port_led_qcaps_input req = {0}; 8836 struct bnxt_pf_info *pf = &bp->pf; 8837 int rc; 8838 8839 bp->num_leds = 0; 8840 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601) 8841 return 0; 8842 8843 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1); 8844 req.port_id = cpu_to_le16(pf->port_id); 8845 mutex_lock(&bp->hwrm_cmd_lock); 8846 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8847 if (rc) { 8848 mutex_unlock(&bp->hwrm_cmd_lock); 8849 return rc; 8850 } 8851 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) { 8852 int i; 8853 8854 bp->num_leds = resp->num_leds; 8855 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) * 8856 bp->num_leds); 8857 for (i = 0; i < bp->num_leds; i++) { 8858 struct bnxt_led_info *led = &bp->leds[i]; 8859 __le16 caps = led->led_state_caps; 8860 8861 if (!led->led_group_id || 8862 !BNXT_LED_ALT_BLINK_CAP(caps)) { 8863 bp->num_leds = 0; 8864 break; 8865 } 8866 } 8867 } 8868 mutex_unlock(&bp->hwrm_cmd_lock); 8869 return 0; 8870 } 8871 8872 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp) 8873 { 8874 struct hwrm_wol_filter_alloc_input req = {0}; 8875 struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; 8876 int rc; 8877 8878 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1); 8879 req.port_id = cpu_to_le16(bp->pf.port_id); 8880 req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT; 8881 req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS); 8882 memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN); 8883 mutex_lock(&bp->hwrm_cmd_lock); 8884 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8885 if (!rc) 8886 bp->wol_filter_id = resp->wol_filter_id; 8887 mutex_unlock(&bp->hwrm_cmd_lock); 8888 return rc; 8889 } 8890 8891 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp) 8892 { 8893 struct hwrm_wol_filter_free_input req = {0}; 8894 int rc; 8895 8896 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1); 8897 req.port_id = cpu_to_le16(bp->pf.port_id); 8898 req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID); 8899 req.wol_filter_id = bp->wol_filter_id; 8900 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8901 return rc; 8902 } 8903 8904 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle) 8905 { 8906 struct hwrm_wol_filter_qcfg_input req = {0}; 8907 struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 8908 u16 next_handle = 0; 8909 int rc; 8910 8911 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1); 8912 req.port_id = cpu_to_le16(bp->pf.port_id); 8913 req.handle = cpu_to_le16(handle); 8914 mutex_lock(&bp->hwrm_cmd_lock); 8915 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8916 if (!rc) { 8917 next_handle = le16_to_cpu(resp->next_handle); 8918 if (next_handle != 0) { 8919 if (resp->wol_type == 8920 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) { 8921 bp->wol = 1; 8922 bp->wol_filter_id = resp->wol_filter_id; 8923 } 8924 } 8925 } 8926 mutex_unlock(&bp->hwrm_cmd_lock); 8927 return next_handle; 8928 } 8929 8930 static void bnxt_get_wol_settings(struct bnxt *bp) 8931 { 8932 u16 handle = 0; 8933 8934 bp->wol = 0; 8935 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP)) 8936 return; 8937 8938 do { 8939 handle = bnxt_hwrm_get_wol_fltrs(bp, handle); 8940 } while (handle && handle != 0xffff); 8941 } 8942 8943 #ifdef CONFIG_BNXT_HWMON 8944 static ssize_t bnxt_show_temp(struct device *dev, 8945 struct device_attribute *devattr, char *buf) 8946 { 8947 struct hwrm_temp_monitor_query_input req = {0}; 8948 struct hwrm_temp_monitor_query_output *resp; 8949 struct bnxt *bp = dev_get_drvdata(dev); 8950 u32 temp = 0; 8951 8952 resp = bp->hwrm_cmd_resp_addr; 8953 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1); 8954 mutex_lock(&bp->hwrm_cmd_lock); 8955 if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT)) 8956 temp = resp->temp * 1000; /* display millidegree */ 8957 mutex_unlock(&bp->hwrm_cmd_lock); 8958 8959 return sprintf(buf, "%u\n", temp); 8960 } 8961 static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0); 8962 8963 static struct attribute *bnxt_attrs[] = { 8964 &sensor_dev_attr_temp1_input.dev_attr.attr, 8965 NULL 8966 }; 8967 ATTRIBUTE_GROUPS(bnxt); 8968 8969 static void bnxt_hwmon_close(struct bnxt *bp) 8970 { 8971 if (bp->hwmon_dev) { 8972 hwmon_device_unregister(bp->hwmon_dev); 8973 bp->hwmon_dev = NULL; 8974 } 8975 } 8976 8977 static void bnxt_hwmon_open(struct bnxt *bp) 8978 { 8979 struct pci_dev *pdev = bp->pdev; 8980 8981 if (bp->hwmon_dev) 8982 return; 8983 8984 bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, 8985 DRV_MODULE_NAME, bp, 8986 bnxt_groups); 8987 if (IS_ERR(bp->hwmon_dev)) { 8988 bp->hwmon_dev = NULL; 8989 dev_warn(&pdev->dev, "Cannot register hwmon device\n"); 8990 } 8991 } 8992 #else 8993 static void bnxt_hwmon_close(struct bnxt *bp) 8994 { 8995 } 8996 8997 static void bnxt_hwmon_open(struct bnxt *bp) 8998 { 8999 } 9000 #endif 9001 9002 static bool bnxt_eee_config_ok(struct bnxt *bp) 9003 { 9004 struct ethtool_eee *eee = &bp->eee; 9005 struct bnxt_link_info *link_info = &bp->link_info; 9006 9007 if (!(bp->flags & BNXT_FLAG_EEE_CAP)) 9008 return true; 9009 9010 if (eee->eee_enabled) { 9011 u32 advertising = 9012 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); 9013 9014 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 9015 eee->eee_enabled = 0; 9016 return false; 9017 } 9018 if (eee->advertised & ~advertising) { 9019 eee->advertised = advertising & eee->supported; 9020 return false; 9021 } 9022 } 9023 return true; 9024 } 9025 9026 static int bnxt_update_phy_setting(struct bnxt *bp) 9027 { 9028 int rc; 9029 bool update_link = false; 9030 bool update_pause = false; 9031 bool update_eee = false; 9032 struct bnxt_link_info *link_info = &bp->link_info; 9033 9034 rc = bnxt_update_link(bp, true); 9035 if (rc) { 9036 netdev_err(bp->dev, "failed to update link (rc: %x)\n", 9037 rc); 9038 return rc; 9039 } 9040 if (!BNXT_SINGLE_PF(bp)) 9041 return 0; 9042 9043 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 9044 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) != 9045 link_info->req_flow_ctrl) 9046 update_pause = true; 9047 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 9048 link_info->force_pause_setting != link_info->req_flow_ctrl) 9049 update_pause = true; 9050 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 9051 if (BNXT_AUTO_MODE(link_info->auto_mode)) 9052 update_link = true; 9053 if (link_info->req_link_speed != link_info->force_link_speed) 9054 update_link = true; 9055 if (link_info->req_duplex != link_info->duplex_setting) 9056 update_link = true; 9057 } else { 9058 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE) 9059 update_link = true; 9060 if (link_info->advertising != link_info->auto_link_speeds) 9061 update_link = true; 9062 } 9063 9064 /* The last close may have shutdown the link, so need to call 9065 * PHY_CFG to bring it back up. 9066 */ 9067 if (!netif_carrier_ok(bp->dev)) 9068 update_link = true; 9069 9070 if (!bnxt_eee_config_ok(bp)) 9071 update_eee = true; 9072 9073 if (update_link) 9074 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee); 9075 else if (update_pause) 9076 rc = bnxt_hwrm_set_pause(bp); 9077 if (rc) { 9078 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n", 9079 rc); 9080 return rc; 9081 } 9082 9083 return rc; 9084 } 9085 9086 /* Common routine to pre-map certain register block to different GRC window. 9087 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows 9088 * in PF and 3 windows in VF that can be customized to map in different 9089 * register blocks. 9090 */ 9091 static void bnxt_preset_reg_win(struct bnxt *bp) 9092 { 9093 if (BNXT_PF(bp)) { 9094 /* CAG registers map to GRC window #4 */ 9095 writel(BNXT_CAG_REG_BASE, 9096 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12); 9097 } 9098 } 9099 9100 static int bnxt_init_dflt_ring_mode(struct bnxt *bp); 9101 9102 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 9103 { 9104 int rc = 0; 9105 9106 bnxt_preset_reg_win(bp); 9107 netif_carrier_off(bp->dev); 9108 if (irq_re_init) { 9109 /* Reserve rings now if none were reserved at driver probe. */ 9110 rc = bnxt_init_dflt_ring_mode(bp); 9111 if (rc) { 9112 netdev_err(bp->dev, "Failed to reserve default rings at open\n"); 9113 return rc; 9114 } 9115 } 9116 rc = bnxt_reserve_rings(bp, irq_re_init); 9117 if (rc) 9118 return rc; 9119 if ((bp->flags & BNXT_FLAG_RFS) && 9120 !(bp->flags & BNXT_FLAG_USING_MSIX)) { 9121 /* disable RFS if falling back to INTA */ 9122 bp->dev->hw_features &= ~NETIF_F_NTUPLE; 9123 bp->flags &= ~BNXT_FLAG_RFS; 9124 } 9125 9126 rc = bnxt_alloc_mem(bp, irq_re_init); 9127 if (rc) { 9128 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); 9129 goto open_err_free_mem; 9130 } 9131 9132 if (irq_re_init) { 9133 bnxt_init_napi(bp); 9134 rc = bnxt_request_irq(bp); 9135 if (rc) { 9136 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc); 9137 goto open_err_irq; 9138 } 9139 } 9140 9141 bnxt_enable_napi(bp); 9142 bnxt_debug_dev_init(bp); 9143 9144 rc = bnxt_init_nic(bp, irq_re_init); 9145 if (rc) { 9146 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); 9147 goto open_err; 9148 } 9149 9150 if (link_re_init) { 9151 mutex_lock(&bp->link_lock); 9152 rc = bnxt_update_phy_setting(bp); 9153 mutex_unlock(&bp->link_lock); 9154 if (rc) { 9155 netdev_warn(bp->dev, "failed to update phy settings\n"); 9156 if (BNXT_SINGLE_PF(bp)) { 9157 bp->link_info.phy_retry = true; 9158 bp->link_info.phy_retry_expires = 9159 jiffies + 5 * HZ; 9160 } 9161 } 9162 } 9163 9164 if (irq_re_init) 9165 udp_tunnel_get_rx_info(bp->dev); 9166 9167 set_bit(BNXT_STATE_OPEN, &bp->state); 9168 bnxt_enable_int(bp); 9169 /* Enable TX queues */ 9170 bnxt_tx_enable(bp); 9171 mod_timer(&bp->timer, jiffies + bp->current_interval); 9172 /* Poll link status and check for SFP+ module status */ 9173 bnxt_get_port_module_status(bp); 9174 9175 /* VF-reps may need to be re-opened after the PF is re-opened */ 9176 if (BNXT_PF(bp)) 9177 bnxt_vf_reps_open(bp); 9178 return 0; 9179 9180 open_err: 9181 bnxt_debug_dev_exit(bp); 9182 bnxt_disable_napi(bp); 9183 9184 open_err_irq: 9185 bnxt_del_napi(bp); 9186 9187 open_err_free_mem: 9188 bnxt_free_skbs(bp); 9189 bnxt_free_irq(bp); 9190 bnxt_free_mem(bp, true); 9191 return rc; 9192 } 9193 9194 /* rtnl_lock held */ 9195 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 9196 { 9197 int rc = 0; 9198 9199 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init); 9200 if (rc) { 9201 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc); 9202 dev_close(bp->dev); 9203 } 9204 return rc; 9205 } 9206 9207 /* rtnl_lock held, open the NIC half way by allocating all resources, but 9208 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline 9209 * self tests. 9210 */ 9211 int bnxt_half_open_nic(struct bnxt *bp) 9212 { 9213 int rc = 0; 9214 9215 rc = bnxt_alloc_mem(bp, false); 9216 if (rc) { 9217 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); 9218 goto half_open_err; 9219 } 9220 rc = bnxt_init_nic(bp, false); 9221 if (rc) { 9222 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); 9223 goto half_open_err; 9224 } 9225 return 0; 9226 9227 half_open_err: 9228 bnxt_free_skbs(bp); 9229 bnxt_free_mem(bp, false); 9230 dev_close(bp->dev); 9231 return rc; 9232 } 9233 9234 /* rtnl_lock held, this call can only be made after a previous successful 9235 * call to bnxt_half_open_nic(). 9236 */ 9237 void bnxt_half_close_nic(struct bnxt *bp) 9238 { 9239 bnxt_hwrm_resource_free(bp, false, false); 9240 bnxt_free_skbs(bp); 9241 bnxt_free_mem(bp, false); 9242 } 9243 9244 static int bnxt_open(struct net_device *dev) 9245 { 9246 struct bnxt *bp = netdev_priv(dev); 9247 int rc; 9248 9249 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { 9250 netdev_err(bp->dev, "A previous firmware reset did not complete, aborting\n"); 9251 return -ENODEV; 9252 } 9253 9254 rc = bnxt_hwrm_if_change(bp, true); 9255 if (rc) 9256 return rc; 9257 rc = __bnxt_open_nic(bp, true, true); 9258 if (rc) { 9259 bnxt_hwrm_if_change(bp, false); 9260 } else { 9261 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) { 9262 if (BNXT_PF(bp)) { 9263 struct bnxt_pf_info *pf = &bp->pf; 9264 int n = pf->active_vfs; 9265 9266 if (n) 9267 bnxt_cfg_hw_sriov(bp, &n, true); 9268 } 9269 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 9270 bnxt_ulp_start(bp, 0); 9271 } 9272 bnxt_hwmon_open(bp); 9273 } 9274 9275 return rc; 9276 } 9277 9278 static bool bnxt_drv_busy(struct bnxt *bp) 9279 { 9280 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) || 9281 test_bit(BNXT_STATE_READ_STATS, &bp->state)); 9282 } 9283 9284 static void bnxt_get_ring_stats(struct bnxt *bp, 9285 struct rtnl_link_stats64 *stats); 9286 9287 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, 9288 bool link_re_init) 9289 { 9290 /* Close the VF-reps before closing PF */ 9291 if (BNXT_PF(bp)) 9292 bnxt_vf_reps_close(bp); 9293 9294 /* Change device state to avoid TX queue wake up's */ 9295 bnxt_tx_disable(bp); 9296 9297 clear_bit(BNXT_STATE_OPEN, &bp->state); 9298 smp_mb__after_atomic(); 9299 while (bnxt_drv_busy(bp)) 9300 msleep(20); 9301 9302 /* Flush rings and and disable interrupts */ 9303 bnxt_shutdown_nic(bp, irq_re_init); 9304 9305 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */ 9306 9307 bnxt_debug_dev_exit(bp); 9308 bnxt_disable_napi(bp); 9309 del_timer_sync(&bp->timer); 9310 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && 9311 pci_is_enabled(bp->pdev)) 9312 pci_disable_device(bp->pdev); 9313 9314 bnxt_free_skbs(bp); 9315 9316 /* Save ring stats before shutdown */ 9317 if (bp->bnapi) 9318 bnxt_get_ring_stats(bp, &bp->net_stats_prev); 9319 if (irq_re_init) { 9320 bnxt_free_irq(bp); 9321 bnxt_del_napi(bp); 9322 } 9323 bnxt_free_mem(bp, irq_re_init); 9324 } 9325 9326 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 9327 { 9328 int rc = 0; 9329 9330 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 9331 /* If we get here, it means firmware reset is in progress 9332 * while we are trying to close. We can safely proceed with 9333 * the close because we are holding rtnl_lock(). Some firmware 9334 * messages may fail as we proceed to close. We set the 9335 * ABORT_ERR flag here so that the FW reset thread will later 9336 * abort when it gets the rtnl_lock() and sees the flag. 9337 */ 9338 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n"); 9339 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); 9340 } 9341 9342 #ifdef CONFIG_BNXT_SRIOV 9343 if (bp->sriov_cfg) { 9344 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait, 9345 !bp->sriov_cfg, 9346 BNXT_SRIOV_CFG_WAIT_TMO); 9347 if (rc) 9348 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n"); 9349 } 9350 #endif 9351 __bnxt_close_nic(bp, irq_re_init, link_re_init); 9352 return rc; 9353 } 9354 9355 static int bnxt_close(struct net_device *dev) 9356 { 9357 struct bnxt *bp = netdev_priv(dev); 9358 9359 bnxt_hwmon_close(bp); 9360 bnxt_close_nic(bp, true, true); 9361 bnxt_hwrm_shutdown_link(bp); 9362 bnxt_hwrm_if_change(bp, false); 9363 return 0; 9364 } 9365 9366 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg, 9367 u16 *val) 9368 { 9369 struct hwrm_port_phy_mdio_read_output *resp = bp->hwrm_cmd_resp_addr; 9370 struct hwrm_port_phy_mdio_read_input req = {0}; 9371 int rc; 9372 9373 if (bp->hwrm_spec_code < 0x10a00) 9374 return -EOPNOTSUPP; 9375 9376 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_READ, -1, -1); 9377 req.port_id = cpu_to_le16(bp->pf.port_id); 9378 req.phy_addr = phy_addr; 9379 req.reg_addr = cpu_to_le16(reg & 0x1f); 9380 if (mdio_phy_id_is_c45(phy_addr)) { 9381 req.cl45_mdio = 1; 9382 req.phy_addr = mdio_phy_id_prtad(phy_addr); 9383 req.dev_addr = mdio_phy_id_devad(phy_addr); 9384 req.reg_addr = cpu_to_le16(reg); 9385 } 9386 9387 mutex_lock(&bp->hwrm_cmd_lock); 9388 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 9389 if (!rc) 9390 *val = le16_to_cpu(resp->reg_data); 9391 mutex_unlock(&bp->hwrm_cmd_lock); 9392 return rc; 9393 } 9394 9395 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg, 9396 u16 val) 9397 { 9398 struct hwrm_port_phy_mdio_write_input req = {0}; 9399 9400 if (bp->hwrm_spec_code < 0x10a00) 9401 return -EOPNOTSUPP; 9402 9403 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_WRITE, -1, -1); 9404 req.port_id = cpu_to_le16(bp->pf.port_id); 9405 req.phy_addr = phy_addr; 9406 req.reg_addr = cpu_to_le16(reg & 0x1f); 9407 if (mdio_phy_id_is_c45(phy_addr)) { 9408 req.cl45_mdio = 1; 9409 req.phy_addr = mdio_phy_id_prtad(phy_addr); 9410 req.dev_addr = mdio_phy_id_devad(phy_addr); 9411 req.reg_addr = cpu_to_le16(reg); 9412 } 9413 req.reg_data = cpu_to_le16(val); 9414 9415 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 9416 } 9417 9418 /* rtnl_lock held */ 9419 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 9420 { 9421 struct mii_ioctl_data *mdio = if_mii(ifr); 9422 struct bnxt *bp = netdev_priv(dev); 9423 int rc; 9424 9425 switch (cmd) { 9426 case SIOCGMIIPHY: 9427 mdio->phy_id = bp->link_info.phy_addr; 9428 9429 /* fallthru */ 9430 case SIOCGMIIREG: { 9431 u16 mii_regval = 0; 9432 9433 if (!netif_running(dev)) 9434 return -EAGAIN; 9435 9436 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num, 9437 &mii_regval); 9438 mdio->val_out = mii_regval; 9439 return rc; 9440 } 9441 9442 case SIOCSMIIREG: 9443 if (!netif_running(dev)) 9444 return -EAGAIN; 9445 9446 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num, 9447 mdio->val_in); 9448 9449 default: 9450 /* do nothing */ 9451 break; 9452 } 9453 return -EOPNOTSUPP; 9454 } 9455 9456 static void bnxt_get_ring_stats(struct bnxt *bp, 9457 struct rtnl_link_stats64 *stats) 9458 { 9459 int i; 9460 9461 9462 for (i = 0; i < bp->cp_nr_rings; i++) { 9463 struct bnxt_napi *bnapi = bp->bnapi[i]; 9464 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 9465 struct ctx_hw_stats *hw_stats = cpr->hw_stats; 9466 9467 stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts); 9468 stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts); 9469 stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts); 9470 9471 stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts); 9472 stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts); 9473 stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts); 9474 9475 stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes); 9476 stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes); 9477 stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes); 9478 9479 stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes); 9480 stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes); 9481 stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes); 9482 9483 stats->rx_missed_errors += 9484 le64_to_cpu(hw_stats->rx_discard_pkts); 9485 9486 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts); 9487 9488 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts); 9489 } 9490 } 9491 9492 static void bnxt_add_prev_stats(struct bnxt *bp, 9493 struct rtnl_link_stats64 *stats) 9494 { 9495 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev; 9496 9497 stats->rx_packets += prev_stats->rx_packets; 9498 stats->tx_packets += prev_stats->tx_packets; 9499 stats->rx_bytes += prev_stats->rx_bytes; 9500 stats->tx_bytes += prev_stats->tx_bytes; 9501 stats->rx_missed_errors += prev_stats->rx_missed_errors; 9502 stats->multicast += prev_stats->multicast; 9503 stats->tx_dropped += prev_stats->tx_dropped; 9504 } 9505 9506 static void 9507 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 9508 { 9509 struct bnxt *bp = netdev_priv(dev); 9510 9511 set_bit(BNXT_STATE_READ_STATS, &bp->state); 9512 /* Make sure bnxt_close_nic() sees that we are reading stats before 9513 * we check the BNXT_STATE_OPEN flag. 9514 */ 9515 smp_mb__after_atomic(); 9516 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 9517 clear_bit(BNXT_STATE_READ_STATS, &bp->state); 9518 *stats = bp->net_stats_prev; 9519 return; 9520 } 9521 9522 bnxt_get_ring_stats(bp, stats); 9523 bnxt_add_prev_stats(bp, stats); 9524 9525 if (bp->flags & BNXT_FLAG_PORT_STATS) { 9526 struct rx_port_stats *rx = bp->hw_rx_port_stats; 9527 struct tx_port_stats *tx = bp->hw_tx_port_stats; 9528 9529 stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames); 9530 stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames); 9531 stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) + 9532 le64_to_cpu(rx->rx_ovrsz_frames) + 9533 le64_to_cpu(rx->rx_runt_frames); 9534 stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) + 9535 le64_to_cpu(rx->rx_jbr_frames); 9536 stats->collisions = le64_to_cpu(tx->tx_total_collisions); 9537 stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns); 9538 stats->tx_errors = le64_to_cpu(tx->tx_err); 9539 } 9540 clear_bit(BNXT_STATE_READ_STATS, &bp->state); 9541 } 9542 9543 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) 9544 { 9545 struct net_device *dev = bp->dev; 9546 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 9547 struct netdev_hw_addr *ha; 9548 u8 *haddr; 9549 int mc_count = 0; 9550 bool update = false; 9551 int off = 0; 9552 9553 netdev_for_each_mc_addr(ha, dev) { 9554 if (mc_count >= BNXT_MAX_MC_ADDRS) { 9555 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 9556 vnic->mc_list_count = 0; 9557 return false; 9558 } 9559 haddr = ha->addr; 9560 if (!ether_addr_equal(haddr, vnic->mc_list + off)) { 9561 memcpy(vnic->mc_list + off, haddr, ETH_ALEN); 9562 update = true; 9563 } 9564 off += ETH_ALEN; 9565 mc_count++; 9566 } 9567 if (mc_count) 9568 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; 9569 9570 if (mc_count != vnic->mc_list_count) { 9571 vnic->mc_list_count = mc_count; 9572 update = true; 9573 } 9574 return update; 9575 } 9576 9577 static bool bnxt_uc_list_updated(struct bnxt *bp) 9578 { 9579 struct net_device *dev = bp->dev; 9580 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 9581 struct netdev_hw_addr *ha; 9582 int off = 0; 9583 9584 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1)) 9585 return true; 9586 9587 netdev_for_each_uc_addr(ha, dev) { 9588 if (!ether_addr_equal(ha->addr, vnic->uc_list + off)) 9589 return true; 9590 9591 off += ETH_ALEN; 9592 } 9593 return false; 9594 } 9595 9596 static void bnxt_set_rx_mode(struct net_device *dev) 9597 { 9598 struct bnxt *bp = netdev_priv(dev); 9599 struct bnxt_vnic_info *vnic; 9600 bool mc_update = false; 9601 bool uc_update; 9602 u32 mask; 9603 9604 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) 9605 return; 9606 9607 vnic = &bp->vnic_info[0]; 9608 mask = vnic->rx_mask; 9609 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS | 9610 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST | 9611 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST | 9612 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST); 9613 9614 if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp)) 9615 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 9616 9617 uc_update = bnxt_uc_list_updated(bp); 9618 9619 if (dev->flags & IFF_BROADCAST) 9620 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 9621 if (dev->flags & IFF_ALLMULTI) { 9622 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 9623 vnic->mc_list_count = 0; 9624 } else { 9625 mc_update = bnxt_mc_list_updated(bp, &mask); 9626 } 9627 9628 if (mask != vnic->rx_mask || uc_update || mc_update) { 9629 vnic->rx_mask = mask; 9630 9631 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event); 9632 bnxt_queue_sp_work(bp); 9633 } 9634 } 9635 9636 static int bnxt_cfg_rx_mode(struct bnxt *bp) 9637 { 9638 struct net_device *dev = bp->dev; 9639 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 9640 struct netdev_hw_addr *ha; 9641 int i, off = 0, rc; 9642 bool uc_update; 9643 9644 netif_addr_lock_bh(dev); 9645 uc_update = bnxt_uc_list_updated(bp); 9646 netif_addr_unlock_bh(dev); 9647 9648 if (!uc_update) 9649 goto skip_uc; 9650 9651 mutex_lock(&bp->hwrm_cmd_lock); 9652 for (i = 1; i < vnic->uc_filter_count; i++) { 9653 struct hwrm_cfa_l2_filter_free_input req = {0}; 9654 9655 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1, 9656 -1); 9657 9658 req.l2_filter_id = vnic->fw_l2_filter_id[i]; 9659 9660 rc = _hwrm_send_message(bp, &req, sizeof(req), 9661 HWRM_CMD_TIMEOUT); 9662 } 9663 mutex_unlock(&bp->hwrm_cmd_lock); 9664 9665 vnic->uc_filter_count = 1; 9666 9667 netif_addr_lock_bh(dev); 9668 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) { 9669 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 9670 } else { 9671 netdev_for_each_uc_addr(ha, dev) { 9672 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN); 9673 off += ETH_ALEN; 9674 vnic->uc_filter_count++; 9675 } 9676 } 9677 netif_addr_unlock_bh(dev); 9678 9679 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) { 9680 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off); 9681 if (rc) { 9682 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", 9683 rc); 9684 vnic->uc_filter_count = i; 9685 return rc; 9686 } 9687 } 9688 9689 skip_uc: 9690 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); 9691 if (rc && vnic->mc_list_count) { 9692 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n", 9693 rc); 9694 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 9695 vnic->mc_list_count = 0; 9696 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); 9697 } 9698 if (rc) 9699 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n", 9700 rc); 9701 9702 return rc; 9703 } 9704 9705 static bool bnxt_can_reserve_rings(struct bnxt *bp) 9706 { 9707 #ifdef CONFIG_BNXT_SRIOV 9708 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) { 9709 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 9710 9711 /* No minimum rings were provisioned by the PF. Don't 9712 * reserve rings by default when device is down. 9713 */ 9714 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings) 9715 return true; 9716 9717 if (!netif_running(bp->dev)) 9718 return false; 9719 } 9720 #endif 9721 return true; 9722 } 9723 9724 /* If the chip and firmware supports RFS */ 9725 static bool bnxt_rfs_supported(struct bnxt *bp) 9726 { 9727 if (bp->flags & BNXT_FLAG_CHIP_P5) { 9728 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) 9729 return true; 9730 return false; 9731 } 9732 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) 9733 return true; 9734 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) 9735 return true; 9736 return false; 9737 } 9738 9739 /* If runtime conditions support RFS */ 9740 static bool bnxt_rfs_capable(struct bnxt *bp) 9741 { 9742 #ifdef CONFIG_RFS_ACCEL 9743 int vnics, max_vnics, max_rss_ctxs; 9744 9745 if (bp->flags & BNXT_FLAG_CHIP_P5) 9746 return bnxt_rfs_supported(bp); 9747 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp)) 9748 return false; 9749 9750 vnics = 1 + bp->rx_nr_rings; 9751 max_vnics = bnxt_get_max_func_vnics(bp); 9752 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp); 9753 9754 /* RSS contexts not a limiting factor */ 9755 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) 9756 max_rss_ctxs = max_vnics; 9757 if (vnics > max_vnics || vnics > max_rss_ctxs) { 9758 if (bp->rx_nr_rings > 1) 9759 netdev_warn(bp->dev, 9760 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n", 9761 min(max_rss_ctxs - 1, max_vnics - 1)); 9762 return false; 9763 } 9764 9765 if (!BNXT_NEW_RM(bp)) 9766 return true; 9767 9768 if (vnics == bp->hw_resc.resv_vnics) 9769 return true; 9770 9771 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics); 9772 if (vnics <= bp->hw_resc.resv_vnics) 9773 return true; 9774 9775 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n"); 9776 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1); 9777 return false; 9778 #else 9779 return false; 9780 #endif 9781 } 9782 9783 static netdev_features_t bnxt_fix_features(struct net_device *dev, 9784 netdev_features_t features) 9785 { 9786 struct bnxt *bp = netdev_priv(dev); 9787 9788 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp)) 9789 features &= ~NETIF_F_NTUPLE; 9790 9791 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 9792 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 9793 9794 if (!(features & NETIF_F_GRO)) 9795 features &= ~NETIF_F_GRO_HW; 9796 9797 if (features & NETIF_F_GRO_HW) 9798 features &= ~NETIF_F_LRO; 9799 9800 /* Both CTAG and STAG VLAN accelaration on the RX side have to be 9801 * turned on or off together. 9802 */ 9803 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) != 9804 (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) { 9805 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) 9806 features &= ~(NETIF_F_HW_VLAN_CTAG_RX | 9807 NETIF_F_HW_VLAN_STAG_RX); 9808 else 9809 features |= NETIF_F_HW_VLAN_CTAG_RX | 9810 NETIF_F_HW_VLAN_STAG_RX; 9811 } 9812 #ifdef CONFIG_BNXT_SRIOV 9813 if (BNXT_VF(bp)) { 9814 if (bp->vf.vlan) { 9815 features &= ~(NETIF_F_HW_VLAN_CTAG_RX | 9816 NETIF_F_HW_VLAN_STAG_RX); 9817 } 9818 } 9819 #endif 9820 return features; 9821 } 9822 9823 static int bnxt_set_features(struct net_device *dev, netdev_features_t features) 9824 { 9825 struct bnxt *bp = netdev_priv(dev); 9826 u32 flags = bp->flags; 9827 u32 changes; 9828 int rc = 0; 9829 bool re_init = false; 9830 bool update_tpa = false; 9831 9832 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS; 9833 if (features & NETIF_F_GRO_HW) 9834 flags |= BNXT_FLAG_GRO; 9835 else if (features & NETIF_F_LRO) 9836 flags |= BNXT_FLAG_LRO; 9837 9838 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 9839 flags &= ~BNXT_FLAG_TPA; 9840 9841 if (features & NETIF_F_HW_VLAN_CTAG_RX) 9842 flags |= BNXT_FLAG_STRIP_VLAN; 9843 9844 if (features & NETIF_F_NTUPLE) 9845 flags |= BNXT_FLAG_RFS; 9846 9847 changes = flags ^ bp->flags; 9848 if (changes & BNXT_FLAG_TPA) { 9849 update_tpa = true; 9850 if ((bp->flags & BNXT_FLAG_TPA) == 0 || 9851 (flags & BNXT_FLAG_TPA) == 0 || 9852 (bp->flags & BNXT_FLAG_CHIP_P5)) 9853 re_init = true; 9854 } 9855 9856 if (changes & ~BNXT_FLAG_TPA) 9857 re_init = true; 9858 9859 if (flags != bp->flags) { 9860 u32 old_flags = bp->flags; 9861 9862 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 9863 bp->flags = flags; 9864 if (update_tpa) 9865 bnxt_set_ring_params(bp); 9866 return rc; 9867 } 9868 9869 if (re_init) { 9870 bnxt_close_nic(bp, false, false); 9871 bp->flags = flags; 9872 if (update_tpa) 9873 bnxt_set_ring_params(bp); 9874 9875 return bnxt_open_nic(bp, false, false); 9876 } 9877 if (update_tpa) { 9878 bp->flags = flags; 9879 rc = bnxt_set_tpa(bp, 9880 (flags & BNXT_FLAG_TPA) ? 9881 true : false); 9882 if (rc) 9883 bp->flags = old_flags; 9884 } 9885 } 9886 return rc; 9887 } 9888 9889 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type, 9890 u32 ring_id, u32 *prod, u32 *cons) 9891 { 9892 struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr; 9893 struct hwrm_dbg_ring_info_get_input req = {0}; 9894 int rc; 9895 9896 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1); 9897 req.ring_type = ring_type; 9898 req.fw_ring_id = cpu_to_le32(ring_id); 9899 mutex_lock(&bp->hwrm_cmd_lock); 9900 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 9901 if (!rc) { 9902 *prod = le32_to_cpu(resp->producer_index); 9903 *cons = le32_to_cpu(resp->consumer_index); 9904 } 9905 mutex_unlock(&bp->hwrm_cmd_lock); 9906 return rc; 9907 } 9908 9909 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi) 9910 { 9911 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; 9912 int i = bnapi->index; 9913 9914 if (!txr) 9915 return; 9916 9917 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n", 9918 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod, 9919 txr->tx_cons); 9920 } 9921 9922 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi) 9923 { 9924 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 9925 int i = bnapi->index; 9926 9927 if (!rxr) 9928 return; 9929 9930 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n", 9931 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod, 9932 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod, 9933 rxr->rx_sw_agg_prod); 9934 } 9935 9936 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi) 9937 { 9938 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 9939 int i = bnapi->index; 9940 9941 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n", 9942 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons); 9943 } 9944 9945 static void bnxt_dbg_dump_states(struct bnxt *bp) 9946 { 9947 int i; 9948 struct bnxt_napi *bnapi; 9949 9950 for (i = 0; i < bp->cp_nr_rings; i++) { 9951 bnapi = bp->bnapi[i]; 9952 if (netif_msg_drv(bp)) { 9953 bnxt_dump_tx_sw_state(bnapi); 9954 bnxt_dump_rx_sw_state(bnapi); 9955 bnxt_dump_cp_sw_state(bnapi); 9956 } 9957 } 9958 } 9959 9960 static void bnxt_reset_task(struct bnxt *bp, bool silent) 9961 { 9962 if (!silent) 9963 bnxt_dbg_dump_states(bp); 9964 if (netif_running(bp->dev)) { 9965 int rc; 9966 9967 if (silent) { 9968 bnxt_close_nic(bp, false, false); 9969 bnxt_open_nic(bp, false, false); 9970 } else { 9971 bnxt_ulp_stop(bp); 9972 bnxt_close_nic(bp, true, false); 9973 rc = bnxt_open_nic(bp, true, false); 9974 bnxt_ulp_start(bp, rc); 9975 } 9976 } 9977 } 9978 9979 static void bnxt_tx_timeout(struct net_device *dev) 9980 { 9981 struct bnxt *bp = netdev_priv(dev); 9982 9983 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n"); 9984 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 9985 bnxt_queue_sp_work(bp); 9986 } 9987 9988 static void bnxt_fw_health_check(struct bnxt *bp) 9989 { 9990 struct bnxt_fw_health *fw_health = bp->fw_health; 9991 u32 val; 9992 9993 if (!fw_health || !fw_health->enabled || 9994 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 9995 return; 9996 9997 if (fw_health->tmr_counter) { 9998 fw_health->tmr_counter--; 9999 return; 10000 } 10001 10002 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 10003 if (val == fw_health->last_fw_heartbeat) 10004 goto fw_reset; 10005 10006 fw_health->last_fw_heartbeat = val; 10007 10008 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 10009 if (val != fw_health->last_fw_reset_cnt) 10010 goto fw_reset; 10011 10012 fw_health->tmr_counter = fw_health->tmr_multiplier; 10013 return; 10014 10015 fw_reset: 10016 set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event); 10017 bnxt_queue_sp_work(bp); 10018 } 10019 10020 static void bnxt_timer(struct timer_list *t) 10021 { 10022 struct bnxt *bp = from_timer(bp, t, timer); 10023 struct net_device *dev = bp->dev; 10024 10025 if (!netif_running(dev)) 10026 return; 10027 10028 if (atomic_read(&bp->intr_sem) != 0) 10029 goto bnxt_restart_timer; 10030 10031 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) 10032 bnxt_fw_health_check(bp); 10033 10034 if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) && 10035 bp->stats_coal_ticks) { 10036 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event); 10037 bnxt_queue_sp_work(bp); 10038 } 10039 10040 if (bnxt_tc_flower_enabled(bp)) { 10041 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event); 10042 bnxt_queue_sp_work(bp); 10043 } 10044 10045 if (bp->link_info.phy_retry) { 10046 if (time_after(jiffies, bp->link_info.phy_retry_expires)) { 10047 bp->link_info.phy_retry = false; 10048 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n"); 10049 } else { 10050 set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event); 10051 bnxt_queue_sp_work(bp); 10052 } 10053 } 10054 10055 if ((bp->flags & BNXT_FLAG_CHIP_P5) && netif_carrier_ok(dev)) { 10056 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event); 10057 bnxt_queue_sp_work(bp); 10058 } 10059 bnxt_restart_timer: 10060 mod_timer(&bp->timer, jiffies + bp->current_interval); 10061 } 10062 10063 static void bnxt_rtnl_lock_sp(struct bnxt *bp) 10064 { 10065 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK 10066 * set. If the device is being closed, bnxt_close() may be holding 10067 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we 10068 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl(). 10069 */ 10070 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 10071 rtnl_lock(); 10072 } 10073 10074 static void bnxt_rtnl_unlock_sp(struct bnxt *bp) 10075 { 10076 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 10077 rtnl_unlock(); 10078 } 10079 10080 /* Only called from bnxt_sp_task() */ 10081 static void bnxt_reset(struct bnxt *bp, bool silent) 10082 { 10083 bnxt_rtnl_lock_sp(bp); 10084 if (test_bit(BNXT_STATE_OPEN, &bp->state)) 10085 bnxt_reset_task(bp, silent); 10086 bnxt_rtnl_unlock_sp(bp); 10087 } 10088 10089 static void bnxt_fw_reset_close(struct bnxt *bp) 10090 { 10091 bnxt_ulp_stop(bp); 10092 __bnxt_close_nic(bp, true, false); 10093 bnxt_clear_int_mode(bp); 10094 bnxt_hwrm_func_drv_unrgtr(bp); 10095 bnxt_free_ctx_mem(bp); 10096 kfree(bp->ctx); 10097 bp->ctx = NULL; 10098 } 10099 10100 static bool is_bnxt_fw_ok(struct bnxt *bp) 10101 { 10102 struct bnxt_fw_health *fw_health = bp->fw_health; 10103 bool no_heartbeat = false, has_reset = false; 10104 u32 val; 10105 10106 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 10107 if (val == fw_health->last_fw_heartbeat) 10108 no_heartbeat = true; 10109 10110 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 10111 if (val != fw_health->last_fw_reset_cnt) 10112 has_reset = true; 10113 10114 if (!no_heartbeat && has_reset) 10115 return true; 10116 10117 return false; 10118 } 10119 10120 /* rtnl_lock is acquired before calling this function */ 10121 static void bnxt_force_fw_reset(struct bnxt *bp) 10122 { 10123 struct bnxt_fw_health *fw_health = bp->fw_health; 10124 u32 wait_dsecs; 10125 10126 if (!test_bit(BNXT_STATE_OPEN, &bp->state) || 10127 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 10128 return; 10129 10130 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 10131 bnxt_fw_reset_close(bp); 10132 wait_dsecs = fw_health->master_func_wait_dsecs; 10133 if (fw_health->master) { 10134 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) 10135 wait_dsecs = 0; 10136 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; 10137 } else { 10138 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10; 10139 wait_dsecs = fw_health->normal_func_wait_dsecs; 10140 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 10141 } 10142 10143 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs; 10144 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs; 10145 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10); 10146 } 10147 10148 void bnxt_fw_exception(struct bnxt *bp) 10149 { 10150 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n"); 10151 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); 10152 bnxt_rtnl_lock_sp(bp); 10153 bnxt_force_fw_reset(bp); 10154 bnxt_rtnl_unlock_sp(bp); 10155 } 10156 10157 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or 10158 * < 0 on error. 10159 */ 10160 static int bnxt_get_registered_vfs(struct bnxt *bp) 10161 { 10162 #ifdef CONFIG_BNXT_SRIOV 10163 int rc; 10164 10165 if (!BNXT_PF(bp)) 10166 return 0; 10167 10168 rc = bnxt_hwrm_func_qcfg(bp); 10169 if (rc) { 10170 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc); 10171 return rc; 10172 } 10173 if (bp->pf.registered_vfs) 10174 return bp->pf.registered_vfs; 10175 if (bp->sriov_cfg) 10176 return 1; 10177 #endif 10178 return 0; 10179 } 10180 10181 void bnxt_fw_reset(struct bnxt *bp) 10182 { 10183 bnxt_rtnl_lock_sp(bp); 10184 if (test_bit(BNXT_STATE_OPEN, &bp->state) && 10185 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 10186 int n = 0, tmo; 10187 10188 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 10189 if (bp->pf.active_vfs && 10190 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 10191 n = bnxt_get_registered_vfs(bp); 10192 if (n < 0) { 10193 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n", 10194 n); 10195 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 10196 dev_close(bp->dev); 10197 goto fw_reset_exit; 10198 } else if (n > 0) { 10199 u16 vf_tmo_dsecs = n * 10; 10200 10201 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs) 10202 bp->fw_reset_max_dsecs = vf_tmo_dsecs; 10203 bp->fw_reset_state = 10204 BNXT_FW_RESET_STATE_POLL_VF; 10205 bnxt_queue_fw_reset_work(bp, HZ / 10); 10206 goto fw_reset_exit; 10207 } 10208 bnxt_fw_reset_close(bp); 10209 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { 10210 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; 10211 tmo = HZ / 10; 10212 } else { 10213 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 10214 tmo = bp->fw_reset_min_dsecs * HZ / 10; 10215 } 10216 bnxt_queue_fw_reset_work(bp, tmo); 10217 } 10218 fw_reset_exit: 10219 bnxt_rtnl_unlock_sp(bp); 10220 } 10221 10222 static void bnxt_chk_missed_irq(struct bnxt *bp) 10223 { 10224 int i; 10225 10226 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 10227 return; 10228 10229 for (i = 0; i < bp->cp_nr_rings; i++) { 10230 struct bnxt_napi *bnapi = bp->bnapi[i]; 10231 struct bnxt_cp_ring_info *cpr; 10232 u32 fw_ring_id; 10233 int j; 10234 10235 if (!bnapi) 10236 continue; 10237 10238 cpr = &bnapi->cp_ring; 10239 for (j = 0; j < 2; j++) { 10240 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; 10241 u32 val[2]; 10242 10243 if (!cpr2 || cpr2->has_more_work || 10244 !bnxt_has_work(bp, cpr2)) 10245 continue; 10246 10247 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) { 10248 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons; 10249 continue; 10250 } 10251 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id; 10252 bnxt_dbg_hwrm_ring_info_get(bp, 10253 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL, 10254 fw_ring_id, &val[0], &val[1]); 10255 cpr->missed_irqs++; 10256 } 10257 } 10258 } 10259 10260 static void bnxt_cfg_ntp_filters(struct bnxt *); 10261 10262 static void bnxt_init_ethtool_link_settings(struct bnxt *bp) 10263 { 10264 struct bnxt_link_info *link_info = &bp->link_info; 10265 10266 if (BNXT_AUTO_MODE(link_info->auto_mode)) { 10267 link_info->autoneg = BNXT_AUTONEG_SPEED; 10268 if (bp->hwrm_spec_code >= 0x10201) { 10269 if (link_info->auto_pause_setting & 10270 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE) 10271 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 10272 } else { 10273 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 10274 } 10275 link_info->advertising = link_info->auto_link_speeds; 10276 } else { 10277 link_info->req_link_speed = link_info->force_link_speed; 10278 link_info->req_duplex = link_info->duplex_setting; 10279 } 10280 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 10281 link_info->req_flow_ctrl = 10282 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH; 10283 else 10284 link_info->req_flow_ctrl = link_info->force_pause_setting; 10285 } 10286 10287 static void bnxt_sp_task(struct work_struct *work) 10288 { 10289 struct bnxt *bp = container_of(work, struct bnxt, sp_task); 10290 10291 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 10292 smp_mb__after_atomic(); 10293 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 10294 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 10295 return; 10296 } 10297 10298 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) 10299 bnxt_cfg_rx_mode(bp); 10300 10301 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event)) 10302 bnxt_cfg_ntp_filters(bp); 10303 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) 10304 bnxt_hwrm_exec_fwd_req(bp); 10305 if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) { 10306 bnxt_hwrm_tunnel_dst_port_alloc( 10307 bp, bp->vxlan_port, 10308 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 10309 } 10310 if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) { 10311 bnxt_hwrm_tunnel_dst_port_free( 10312 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 10313 } 10314 if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) { 10315 bnxt_hwrm_tunnel_dst_port_alloc( 10316 bp, bp->nge_port, 10317 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 10318 } 10319 if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) { 10320 bnxt_hwrm_tunnel_dst_port_free( 10321 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 10322 } 10323 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) { 10324 bnxt_hwrm_port_qstats(bp); 10325 bnxt_hwrm_port_qstats_ext(bp); 10326 bnxt_hwrm_pcie_qstats(bp); 10327 } 10328 10329 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { 10330 int rc; 10331 10332 mutex_lock(&bp->link_lock); 10333 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, 10334 &bp->sp_event)) 10335 bnxt_hwrm_phy_qcaps(bp); 10336 10337 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, 10338 &bp->sp_event)) 10339 bnxt_init_ethtool_link_settings(bp); 10340 10341 rc = bnxt_update_link(bp, true); 10342 mutex_unlock(&bp->link_lock); 10343 if (rc) 10344 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", 10345 rc); 10346 } 10347 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) { 10348 int rc; 10349 10350 mutex_lock(&bp->link_lock); 10351 rc = bnxt_update_phy_setting(bp); 10352 mutex_unlock(&bp->link_lock); 10353 if (rc) { 10354 netdev_warn(bp->dev, "update phy settings retry failed\n"); 10355 } else { 10356 bp->link_info.phy_retry = false; 10357 netdev_info(bp->dev, "update phy settings retry succeeded\n"); 10358 } 10359 } 10360 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) { 10361 mutex_lock(&bp->link_lock); 10362 bnxt_get_port_module_status(bp); 10363 mutex_unlock(&bp->link_lock); 10364 } 10365 10366 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event)) 10367 bnxt_tc_flow_stats_work(bp); 10368 10369 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event)) 10370 bnxt_chk_missed_irq(bp); 10371 10372 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They 10373 * must be the last functions to be called before exiting. 10374 */ 10375 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) 10376 bnxt_reset(bp, false); 10377 10378 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event)) 10379 bnxt_reset(bp, true); 10380 10381 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) 10382 bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT); 10383 10384 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) { 10385 if (!is_bnxt_fw_ok(bp)) 10386 bnxt_devlink_health_report(bp, 10387 BNXT_FW_EXCEPTION_SP_EVENT); 10388 } 10389 10390 smp_mb__before_atomic(); 10391 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 10392 } 10393 10394 /* Under rtnl_lock */ 10395 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, 10396 int tx_xdp) 10397 { 10398 int max_rx, max_tx, tx_sets = 1; 10399 int tx_rings_needed, stats; 10400 int rx_rings = rx; 10401 int cp, vnics, rc; 10402 10403 if (tcs) 10404 tx_sets = tcs; 10405 10406 rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh); 10407 if (rc) 10408 return rc; 10409 10410 if (max_rx < rx) 10411 return -ENOMEM; 10412 10413 tx_rings_needed = tx * tx_sets + tx_xdp; 10414 if (max_tx < tx_rings_needed) 10415 return -ENOMEM; 10416 10417 vnics = 1; 10418 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS) 10419 vnics += rx_rings; 10420 10421 if (bp->flags & BNXT_FLAG_AGG_RINGS) 10422 rx_rings <<= 1; 10423 cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx; 10424 stats = cp; 10425 if (BNXT_NEW_RM(bp)) { 10426 cp += bnxt_get_ulp_msix_num(bp); 10427 stats += bnxt_get_ulp_stat_ctxs(bp); 10428 } 10429 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp, 10430 stats, vnics); 10431 } 10432 10433 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) 10434 { 10435 if (bp->bar2) { 10436 pci_iounmap(pdev, bp->bar2); 10437 bp->bar2 = NULL; 10438 } 10439 10440 if (bp->bar1) { 10441 pci_iounmap(pdev, bp->bar1); 10442 bp->bar1 = NULL; 10443 } 10444 10445 if (bp->bar0) { 10446 pci_iounmap(pdev, bp->bar0); 10447 bp->bar0 = NULL; 10448 } 10449 } 10450 10451 static void bnxt_cleanup_pci(struct bnxt *bp) 10452 { 10453 bnxt_unmap_bars(bp, bp->pdev); 10454 pci_release_regions(bp->pdev); 10455 if (pci_is_enabled(bp->pdev)) 10456 pci_disable_device(bp->pdev); 10457 } 10458 10459 static void bnxt_init_dflt_coal(struct bnxt *bp) 10460 { 10461 struct bnxt_coal *coal; 10462 10463 /* Tick values in micro seconds. 10464 * 1 coal_buf x bufs_per_record = 1 completion record. 10465 */ 10466 coal = &bp->rx_coal; 10467 coal->coal_ticks = 10; 10468 coal->coal_bufs = 30; 10469 coal->coal_ticks_irq = 1; 10470 coal->coal_bufs_irq = 2; 10471 coal->idle_thresh = 50; 10472 coal->bufs_per_record = 2; 10473 coal->budget = 64; /* NAPI budget */ 10474 10475 coal = &bp->tx_coal; 10476 coal->coal_ticks = 28; 10477 coal->coal_bufs = 30; 10478 coal->coal_ticks_irq = 2; 10479 coal->coal_bufs_irq = 2; 10480 coal->bufs_per_record = 1; 10481 10482 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; 10483 } 10484 10485 static int bnxt_fw_init_one_p1(struct bnxt *bp) 10486 { 10487 int rc; 10488 10489 bp->fw_cap = 0; 10490 rc = bnxt_hwrm_ver_get(bp); 10491 if (rc) 10492 return rc; 10493 10494 if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) { 10495 rc = bnxt_alloc_kong_hwrm_resources(bp); 10496 if (rc) 10497 bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL; 10498 } 10499 10500 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || 10501 bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) { 10502 rc = bnxt_alloc_hwrm_short_cmd_req(bp); 10503 if (rc) 10504 return rc; 10505 } 10506 rc = bnxt_hwrm_func_reset(bp); 10507 if (rc) 10508 return -ENODEV; 10509 10510 bnxt_hwrm_fw_set_time(bp); 10511 return 0; 10512 } 10513 10514 static int bnxt_fw_init_one_p2(struct bnxt *bp) 10515 { 10516 int rc; 10517 10518 /* Get the MAX capabilities for this function */ 10519 rc = bnxt_hwrm_func_qcaps(bp); 10520 if (rc) { 10521 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n", 10522 rc); 10523 return -ENODEV; 10524 } 10525 10526 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp); 10527 if (rc) 10528 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n", 10529 rc); 10530 10531 rc = bnxt_hwrm_error_recovery_qcfg(bp); 10532 if (rc) 10533 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n", 10534 rc); 10535 10536 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false); 10537 if (rc) 10538 return -ENODEV; 10539 10540 bnxt_hwrm_func_qcfg(bp); 10541 bnxt_hwrm_vnic_qcaps(bp); 10542 bnxt_hwrm_port_led_qcaps(bp); 10543 bnxt_ethtool_init(bp); 10544 bnxt_dcb_init(bp); 10545 return 0; 10546 } 10547 10548 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp) 10549 { 10550 bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP; 10551 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 | 10552 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 | 10553 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | 10554 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; 10555 if (BNXT_CHIP_P4(bp) && bp->hwrm_spec_code >= 0x10501) { 10556 bp->flags |= BNXT_FLAG_UDP_RSS_CAP; 10557 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 | 10558 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 10559 } 10560 } 10561 10562 static void bnxt_set_dflt_rfs(struct bnxt *bp) 10563 { 10564 struct net_device *dev = bp->dev; 10565 10566 dev->hw_features &= ~NETIF_F_NTUPLE; 10567 dev->features &= ~NETIF_F_NTUPLE; 10568 bp->flags &= ~BNXT_FLAG_RFS; 10569 if (bnxt_rfs_supported(bp)) { 10570 dev->hw_features |= NETIF_F_NTUPLE; 10571 if (bnxt_rfs_capable(bp)) { 10572 bp->flags |= BNXT_FLAG_RFS; 10573 dev->features |= NETIF_F_NTUPLE; 10574 } 10575 } 10576 } 10577 10578 static void bnxt_fw_init_one_p3(struct bnxt *bp) 10579 { 10580 struct pci_dev *pdev = bp->pdev; 10581 10582 bnxt_set_dflt_rss_hash_type(bp); 10583 bnxt_set_dflt_rfs(bp); 10584 10585 bnxt_get_wol_settings(bp); 10586 if (bp->flags & BNXT_FLAG_WOL_CAP) 10587 device_set_wakeup_enable(&pdev->dev, bp->wol); 10588 else 10589 device_set_wakeup_capable(&pdev->dev, false); 10590 10591 bnxt_hwrm_set_cache_line_size(bp, cache_line_size()); 10592 bnxt_hwrm_coal_params_qcaps(bp); 10593 } 10594 10595 static int bnxt_fw_init_one(struct bnxt *bp) 10596 { 10597 int rc; 10598 10599 rc = bnxt_fw_init_one_p1(bp); 10600 if (rc) { 10601 netdev_err(bp->dev, "Firmware init phase 1 failed\n"); 10602 return rc; 10603 } 10604 rc = bnxt_fw_init_one_p2(bp); 10605 if (rc) { 10606 netdev_err(bp->dev, "Firmware init phase 2 failed\n"); 10607 return rc; 10608 } 10609 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false); 10610 if (rc) 10611 return rc; 10612 bnxt_fw_init_one_p3(bp); 10613 return 0; 10614 } 10615 10616 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx) 10617 { 10618 struct bnxt_fw_health *fw_health = bp->fw_health; 10619 u32 reg = fw_health->fw_reset_seq_regs[reg_idx]; 10620 u32 val = fw_health->fw_reset_seq_vals[reg_idx]; 10621 u32 reg_type, reg_off, delay_msecs; 10622 10623 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx]; 10624 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg); 10625 reg_off = BNXT_FW_HEALTH_REG_OFF(reg); 10626 switch (reg_type) { 10627 case BNXT_FW_HEALTH_REG_TYPE_CFG: 10628 pci_write_config_dword(bp->pdev, reg_off, val); 10629 break; 10630 case BNXT_FW_HEALTH_REG_TYPE_GRC: 10631 writel(reg_off & BNXT_GRC_BASE_MASK, 10632 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 10633 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000; 10634 /* fall through */ 10635 case BNXT_FW_HEALTH_REG_TYPE_BAR0: 10636 writel(val, bp->bar0 + reg_off); 10637 break; 10638 case BNXT_FW_HEALTH_REG_TYPE_BAR1: 10639 writel(val, bp->bar1 + reg_off); 10640 break; 10641 } 10642 if (delay_msecs) { 10643 pci_read_config_dword(bp->pdev, 0, &val); 10644 msleep(delay_msecs); 10645 } 10646 } 10647 10648 static void bnxt_reset_all(struct bnxt *bp) 10649 { 10650 struct bnxt_fw_health *fw_health = bp->fw_health; 10651 int i, rc; 10652 10653 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { 10654 #ifdef CONFIG_TEE_BNXT_FW 10655 rc = tee_bnxt_fw_load(); 10656 if (rc) 10657 netdev_err(bp->dev, "Unable to reset FW rc=%d\n", rc); 10658 bp->fw_reset_timestamp = jiffies; 10659 #endif 10660 return; 10661 } 10662 10663 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) { 10664 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) 10665 bnxt_fw_reset_writel(bp, i); 10666 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) { 10667 struct hwrm_fw_reset_input req = {0}; 10668 10669 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1); 10670 req.resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr); 10671 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP; 10672 req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP; 10673 req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; 10674 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 10675 if (rc) 10676 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc); 10677 } 10678 bp->fw_reset_timestamp = jiffies; 10679 } 10680 10681 static void bnxt_fw_reset_task(struct work_struct *work) 10682 { 10683 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work); 10684 int rc; 10685 10686 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 10687 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n"); 10688 return; 10689 } 10690 10691 switch (bp->fw_reset_state) { 10692 case BNXT_FW_RESET_STATE_POLL_VF: { 10693 int n = bnxt_get_registered_vfs(bp); 10694 int tmo; 10695 10696 if (n < 0) { 10697 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n", 10698 n, jiffies_to_msecs(jiffies - 10699 bp->fw_reset_timestamp)); 10700 goto fw_reset_abort; 10701 } else if (n > 0) { 10702 if (time_after(jiffies, bp->fw_reset_timestamp + 10703 (bp->fw_reset_max_dsecs * HZ / 10))) { 10704 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 10705 bp->fw_reset_state = 0; 10706 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n", 10707 n); 10708 return; 10709 } 10710 bnxt_queue_fw_reset_work(bp, HZ / 10); 10711 return; 10712 } 10713 bp->fw_reset_timestamp = jiffies; 10714 rtnl_lock(); 10715 bnxt_fw_reset_close(bp); 10716 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { 10717 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; 10718 tmo = HZ / 10; 10719 } else { 10720 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 10721 tmo = bp->fw_reset_min_dsecs * HZ / 10; 10722 } 10723 rtnl_unlock(); 10724 bnxt_queue_fw_reset_work(bp, tmo); 10725 return; 10726 } 10727 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: { 10728 u32 val; 10729 10730 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 10731 if (!(val & BNXT_FW_STATUS_SHUTDOWN) && 10732 !time_after(jiffies, bp->fw_reset_timestamp + 10733 (bp->fw_reset_max_dsecs * HZ / 10))) { 10734 bnxt_queue_fw_reset_work(bp, HZ / 5); 10735 return; 10736 } 10737 10738 if (!bp->fw_health->master) { 10739 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs; 10740 10741 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 10742 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10); 10743 return; 10744 } 10745 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; 10746 } 10747 /* fall through */ 10748 case BNXT_FW_RESET_STATE_RESET_FW: 10749 bnxt_reset_all(bp); 10750 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 10751 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10); 10752 return; 10753 case BNXT_FW_RESET_STATE_ENABLE_DEV: 10754 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) && 10755 bp->fw_health) { 10756 u32 val; 10757 10758 val = bnxt_fw_health_readl(bp, 10759 BNXT_FW_RESET_INPROG_REG); 10760 if (val) 10761 netdev_warn(bp->dev, "FW reset inprog %x after min wait time.\n", 10762 val); 10763 } 10764 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); 10765 if (pci_enable_device(bp->pdev)) { 10766 netdev_err(bp->dev, "Cannot re-enable PCI device\n"); 10767 goto fw_reset_abort; 10768 } 10769 pci_set_master(bp->pdev); 10770 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW; 10771 /* fall through */ 10772 case BNXT_FW_RESET_STATE_POLL_FW: 10773 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT; 10774 rc = __bnxt_hwrm_ver_get(bp, true); 10775 if (rc) { 10776 if (time_after(jiffies, bp->fw_reset_timestamp + 10777 (bp->fw_reset_max_dsecs * HZ / 10))) { 10778 netdev_err(bp->dev, "Firmware reset aborted\n"); 10779 goto fw_reset_abort; 10780 } 10781 bnxt_queue_fw_reset_work(bp, HZ / 5); 10782 return; 10783 } 10784 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; 10785 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING; 10786 /* fall through */ 10787 case BNXT_FW_RESET_STATE_OPENING: 10788 while (!rtnl_trylock()) { 10789 bnxt_queue_fw_reset_work(bp, HZ / 10); 10790 return; 10791 } 10792 rc = bnxt_open(bp->dev); 10793 if (rc) { 10794 netdev_err(bp->dev, "bnxt_open_nic() failed\n"); 10795 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 10796 dev_close(bp->dev); 10797 } 10798 10799 bp->fw_reset_state = 0; 10800 /* Make sure fw_reset_state is 0 before clearing the flag */ 10801 smp_mb__before_atomic(); 10802 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 10803 bnxt_ulp_start(bp, rc); 10804 bnxt_dl_health_status_update(bp, true); 10805 rtnl_unlock(); 10806 break; 10807 } 10808 return; 10809 10810 fw_reset_abort: 10811 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 10812 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) 10813 bnxt_dl_health_status_update(bp, false); 10814 bp->fw_reset_state = 0; 10815 rtnl_lock(); 10816 dev_close(bp->dev); 10817 rtnl_unlock(); 10818 } 10819 10820 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) 10821 { 10822 int rc; 10823 struct bnxt *bp = netdev_priv(dev); 10824 10825 SET_NETDEV_DEV(dev, &pdev->dev); 10826 10827 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 10828 rc = pci_enable_device(pdev); 10829 if (rc) { 10830 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 10831 goto init_err; 10832 } 10833 10834 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 10835 dev_err(&pdev->dev, 10836 "Cannot find PCI device base address, aborting\n"); 10837 rc = -ENODEV; 10838 goto init_err_disable; 10839 } 10840 10841 rc = pci_request_regions(pdev, DRV_MODULE_NAME); 10842 if (rc) { 10843 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); 10844 goto init_err_disable; 10845 } 10846 10847 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 && 10848 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) { 10849 dev_err(&pdev->dev, "System does not support DMA, aborting\n"); 10850 goto init_err_disable; 10851 } 10852 10853 pci_set_master(pdev); 10854 10855 bp->dev = dev; 10856 bp->pdev = pdev; 10857 10858 bp->bar0 = pci_ioremap_bar(pdev, 0); 10859 if (!bp->bar0) { 10860 dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); 10861 rc = -ENOMEM; 10862 goto init_err_release; 10863 } 10864 10865 bp->bar1 = pci_ioremap_bar(pdev, 2); 10866 if (!bp->bar1) { 10867 dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n"); 10868 rc = -ENOMEM; 10869 goto init_err_release; 10870 } 10871 10872 bp->bar2 = pci_ioremap_bar(pdev, 4); 10873 if (!bp->bar2) { 10874 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n"); 10875 rc = -ENOMEM; 10876 goto init_err_release; 10877 } 10878 10879 pci_enable_pcie_error_reporting(pdev); 10880 10881 INIT_WORK(&bp->sp_task, bnxt_sp_task); 10882 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task); 10883 10884 spin_lock_init(&bp->ntp_fltr_lock); 10885 #if BITS_PER_LONG == 32 10886 spin_lock_init(&bp->db_lock); 10887 #endif 10888 10889 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE; 10890 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE; 10891 10892 bnxt_init_dflt_coal(bp); 10893 10894 timer_setup(&bp->timer, bnxt_timer, 0); 10895 bp->current_interval = BNXT_TIMER_INTERVAL; 10896 10897 clear_bit(BNXT_STATE_OPEN, &bp->state); 10898 return 0; 10899 10900 init_err_release: 10901 bnxt_unmap_bars(bp, pdev); 10902 pci_release_regions(pdev); 10903 10904 init_err_disable: 10905 pci_disable_device(pdev); 10906 10907 init_err: 10908 return rc; 10909 } 10910 10911 /* rtnl_lock held */ 10912 static int bnxt_change_mac_addr(struct net_device *dev, void *p) 10913 { 10914 struct sockaddr *addr = p; 10915 struct bnxt *bp = netdev_priv(dev); 10916 int rc = 0; 10917 10918 if (!is_valid_ether_addr(addr->sa_data)) 10919 return -EADDRNOTAVAIL; 10920 10921 if (ether_addr_equal(addr->sa_data, dev->dev_addr)) 10922 return 0; 10923 10924 rc = bnxt_approve_mac(bp, addr->sa_data, true); 10925 if (rc) 10926 return rc; 10927 10928 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 10929 if (netif_running(dev)) { 10930 bnxt_close_nic(bp, false, false); 10931 rc = bnxt_open_nic(bp, false, false); 10932 } 10933 10934 return rc; 10935 } 10936 10937 /* rtnl_lock held */ 10938 static int bnxt_change_mtu(struct net_device *dev, int new_mtu) 10939 { 10940 struct bnxt *bp = netdev_priv(dev); 10941 10942 if (netif_running(dev)) 10943 bnxt_close_nic(bp, false, false); 10944 10945 dev->mtu = new_mtu; 10946 bnxt_set_ring_params(bp); 10947 10948 if (netif_running(dev)) 10949 return bnxt_open_nic(bp, false, false); 10950 10951 return 0; 10952 } 10953 10954 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) 10955 { 10956 struct bnxt *bp = netdev_priv(dev); 10957 bool sh = false; 10958 int rc; 10959 10960 if (tc > bp->max_tc) { 10961 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n", 10962 tc, bp->max_tc); 10963 return -EINVAL; 10964 } 10965 10966 if (netdev_get_num_tc(dev) == tc) 10967 return 0; 10968 10969 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 10970 sh = true; 10971 10972 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, 10973 sh, tc, bp->tx_nr_rings_xdp); 10974 if (rc) 10975 return rc; 10976 10977 /* Needs to close the device and do hw resource re-allocations */ 10978 if (netif_running(bp->dev)) 10979 bnxt_close_nic(bp, true, false); 10980 10981 if (tc) { 10982 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc; 10983 netdev_set_num_tc(dev, tc); 10984 } else { 10985 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 10986 netdev_reset_tc(dev); 10987 } 10988 bp->tx_nr_rings += bp->tx_nr_rings_xdp; 10989 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : 10990 bp->tx_nr_rings + bp->rx_nr_rings; 10991 10992 if (netif_running(bp->dev)) 10993 return bnxt_open_nic(bp, true, false); 10994 10995 return 0; 10996 } 10997 10998 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 10999 void *cb_priv) 11000 { 11001 struct bnxt *bp = cb_priv; 11002 11003 if (!bnxt_tc_flower_enabled(bp) || 11004 !tc_cls_can_offload_and_chain0(bp->dev, type_data)) 11005 return -EOPNOTSUPP; 11006 11007 switch (type) { 11008 case TC_SETUP_CLSFLOWER: 11009 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data); 11010 default: 11011 return -EOPNOTSUPP; 11012 } 11013 } 11014 11015 LIST_HEAD(bnxt_block_cb_list); 11016 11017 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, 11018 void *type_data) 11019 { 11020 struct bnxt *bp = netdev_priv(dev); 11021 11022 switch (type) { 11023 case TC_SETUP_BLOCK: 11024 return flow_block_cb_setup_simple(type_data, 11025 &bnxt_block_cb_list, 11026 bnxt_setup_tc_block_cb, 11027 bp, bp, true); 11028 case TC_SETUP_QDISC_MQPRIO: { 11029 struct tc_mqprio_qopt *mqprio = type_data; 11030 11031 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; 11032 11033 return bnxt_setup_mq_tc(dev, mqprio->num_tc); 11034 } 11035 default: 11036 return -EOPNOTSUPP; 11037 } 11038 } 11039 11040 #ifdef CONFIG_RFS_ACCEL 11041 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1, 11042 struct bnxt_ntuple_filter *f2) 11043 { 11044 struct flow_keys *keys1 = &f1->fkeys; 11045 struct flow_keys *keys2 = &f2->fkeys; 11046 11047 if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src && 11048 keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst && 11049 keys1->ports.ports == keys2->ports.ports && 11050 keys1->basic.ip_proto == keys2->basic.ip_proto && 11051 keys1->basic.n_proto == keys2->basic.n_proto && 11052 keys1->control.flags == keys2->control.flags && 11053 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) && 11054 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr)) 11055 return true; 11056 11057 return false; 11058 } 11059 11060 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, 11061 u16 rxq_index, u32 flow_id) 11062 { 11063 struct bnxt *bp = netdev_priv(dev); 11064 struct bnxt_ntuple_filter *fltr, *new_fltr; 11065 struct flow_keys *fkeys; 11066 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb); 11067 int rc = 0, idx, bit_id, l2_idx = 0; 11068 struct hlist_head *head; 11069 11070 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) { 11071 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 11072 int off = 0, j; 11073 11074 netif_addr_lock_bh(dev); 11075 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) { 11076 if (ether_addr_equal(eth->h_dest, 11077 vnic->uc_list + off)) { 11078 l2_idx = j + 1; 11079 break; 11080 } 11081 } 11082 netif_addr_unlock_bh(dev); 11083 if (!l2_idx) 11084 return -EINVAL; 11085 } 11086 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC); 11087 if (!new_fltr) 11088 return -ENOMEM; 11089 11090 fkeys = &new_fltr->fkeys; 11091 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) { 11092 rc = -EPROTONOSUPPORT; 11093 goto err_free; 11094 } 11095 11096 if ((fkeys->basic.n_proto != htons(ETH_P_IP) && 11097 fkeys->basic.n_proto != htons(ETH_P_IPV6)) || 11098 ((fkeys->basic.ip_proto != IPPROTO_TCP) && 11099 (fkeys->basic.ip_proto != IPPROTO_UDP))) { 11100 rc = -EPROTONOSUPPORT; 11101 goto err_free; 11102 } 11103 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) && 11104 bp->hwrm_spec_code < 0x10601) { 11105 rc = -EPROTONOSUPPORT; 11106 goto err_free; 11107 } 11108 if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) && 11109 bp->hwrm_spec_code < 0x10601) { 11110 rc = -EPROTONOSUPPORT; 11111 goto err_free; 11112 } 11113 11114 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN); 11115 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN); 11116 11117 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK; 11118 head = &bp->ntp_fltr_hash_tbl[idx]; 11119 rcu_read_lock(); 11120 hlist_for_each_entry_rcu(fltr, head, hash) { 11121 if (bnxt_fltr_match(fltr, new_fltr)) { 11122 rcu_read_unlock(); 11123 rc = 0; 11124 goto err_free; 11125 } 11126 } 11127 rcu_read_unlock(); 11128 11129 spin_lock_bh(&bp->ntp_fltr_lock); 11130 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, 11131 BNXT_NTP_FLTR_MAX_FLTR, 0); 11132 if (bit_id < 0) { 11133 spin_unlock_bh(&bp->ntp_fltr_lock); 11134 rc = -ENOMEM; 11135 goto err_free; 11136 } 11137 11138 new_fltr->sw_id = (u16)bit_id; 11139 new_fltr->flow_id = flow_id; 11140 new_fltr->l2_fltr_idx = l2_idx; 11141 new_fltr->rxq = rxq_index; 11142 hlist_add_head_rcu(&new_fltr->hash, head); 11143 bp->ntp_fltr_count++; 11144 spin_unlock_bh(&bp->ntp_fltr_lock); 11145 11146 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event); 11147 bnxt_queue_sp_work(bp); 11148 11149 return new_fltr->sw_id; 11150 11151 err_free: 11152 kfree(new_fltr); 11153 return rc; 11154 } 11155 11156 static void bnxt_cfg_ntp_filters(struct bnxt *bp) 11157 { 11158 int i; 11159 11160 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 11161 struct hlist_head *head; 11162 struct hlist_node *tmp; 11163 struct bnxt_ntuple_filter *fltr; 11164 int rc; 11165 11166 head = &bp->ntp_fltr_hash_tbl[i]; 11167 hlist_for_each_entry_safe(fltr, tmp, head, hash) { 11168 bool del = false; 11169 11170 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) { 11171 if (rps_may_expire_flow(bp->dev, fltr->rxq, 11172 fltr->flow_id, 11173 fltr->sw_id)) { 11174 bnxt_hwrm_cfa_ntuple_filter_free(bp, 11175 fltr); 11176 del = true; 11177 } 11178 } else { 11179 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, 11180 fltr); 11181 if (rc) 11182 del = true; 11183 else 11184 set_bit(BNXT_FLTR_VALID, &fltr->state); 11185 } 11186 11187 if (del) { 11188 spin_lock_bh(&bp->ntp_fltr_lock); 11189 hlist_del_rcu(&fltr->hash); 11190 bp->ntp_fltr_count--; 11191 spin_unlock_bh(&bp->ntp_fltr_lock); 11192 synchronize_rcu(); 11193 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap); 11194 kfree(fltr); 11195 } 11196 } 11197 } 11198 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event)) 11199 netdev_info(bp->dev, "Receive PF driver unload event!"); 11200 } 11201 11202 #else 11203 11204 static void bnxt_cfg_ntp_filters(struct bnxt *bp) 11205 { 11206 } 11207 11208 #endif /* CONFIG_RFS_ACCEL */ 11209 11210 static void bnxt_udp_tunnel_add(struct net_device *dev, 11211 struct udp_tunnel_info *ti) 11212 { 11213 struct bnxt *bp = netdev_priv(dev); 11214 11215 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET) 11216 return; 11217 11218 if (!netif_running(dev)) 11219 return; 11220 11221 switch (ti->type) { 11222 case UDP_TUNNEL_TYPE_VXLAN: 11223 if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port) 11224 return; 11225 11226 bp->vxlan_port_cnt++; 11227 if (bp->vxlan_port_cnt == 1) { 11228 bp->vxlan_port = ti->port; 11229 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event); 11230 bnxt_queue_sp_work(bp); 11231 } 11232 break; 11233 case UDP_TUNNEL_TYPE_GENEVE: 11234 if (bp->nge_port_cnt && bp->nge_port != ti->port) 11235 return; 11236 11237 bp->nge_port_cnt++; 11238 if (bp->nge_port_cnt == 1) { 11239 bp->nge_port = ti->port; 11240 set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event); 11241 } 11242 break; 11243 default: 11244 return; 11245 } 11246 11247 bnxt_queue_sp_work(bp); 11248 } 11249 11250 static void bnxt_udp_tunnel_del(struct net_device *dev, 11251 struct udp_tunnel_info *ti) 11252 { 11253 struct bnxt *bp = netdev_priv(dev); 11254 11255 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET) 11256 return; 11257 11258 if (!netif_running(dev)) 11259 return; 11260 11261 switch (ti->type) { 11262 case UDP_TUNNEL_TYPE_VXLAN: 11263 if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port) 11264 return; 11265 bp->vxlan_port_cnt--; 11266 11267 if (bp->vxlan_port_cnt != 0) 11268 return; 11269 11270 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event); 11271 break; 11272 case UDP_TUNNEL_TYPE_GENEVE: 11273 if (!bp->nge_port_cnt || bp->nge_port != ti->port) 11274 return; 11275 bp->nge_port_cnt--; 11276 11277 if (bp->nge_port_cnt != 0) 11278 return; 11279 11280 set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event); 11281 break; 11282 default: 11283 return; 11284 } 11285 11286 bnxt_queue_sp_work(bp); 11287 } 11288 11289 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 11290 struct net_device *dev, u32 filter_mask, 11291 int nlflags) 11292 { 11293 struct bnxt *bp = netdev_priv(dev); 11294 11295 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0, 11296 nlflags, filter_mask, NULL); 11297 } 11298 11299 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, 11300 u16 flags, struct netlink_ext_ack *extack) 11301 { 11302 struct bnxt *bp = netdev_priv(dev); 11303 struct nlattr *attr, *br_spec; 11304 int rem, rc = 0; 11305 11306 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp)) 11307 return -EOPNOTSUPP; 11308 11309 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 11310 if (!br_spec) 11311 return -EINVAL; 11312 11313 nla_for_each_nested(attr, br_spec, rem) { 11314 u16 mode; 11315 11316 if (nla_type(attr) != IFLA_BRIDGE_MODE) 11317 continue; 11318 11319 if (nla_len(attr) < sizeof(mode)) 11320 return -EINVAL; 11321 11322 mode = nla_get_u16(attr); 11323 if (mode == bp->br_mode) 11324 break; 11325 11326 rc = bnxt_hwrm_set_br_mode(bp, mode); 11327 if (!rc) 11328 bp->br_mode = mode; 11329 break; 11330 } 11331 return rc; 11332 } 11333 11334 int bnxt_get_port_parent_id(struct net_device *dev, 11335 struct netdev_phys_item_id *ppid) 11336 { 11337 struct bnxt *bp = netdev_priv(dev); 11338 11339 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) 11340 return -EOPNOTSUPP; 11341 11342 /* The PF and it's VF-reps only support the switchdev framework */ 11343 if (!BNXT_PF(bp)) 11344 return -EOPNOTSUPP; 11345 11346 ppid->id_len = sizeof(bp->switch_id); 11347 memcpy(ppid->id, bp->switch_id, ppid->id_len); 11348 11349 return 0; 11350 } 11351 11352 static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev) 11353 { 11354 struct bnxt *bp = netdev_priv(dev); 11355 11356 return &bp->dl_port; 11357 } 11358 11359 static const struct net_device_ops bnxt_netdev_ops = { 11360 .ndo_open = bnxt_open, 11361 .ndo_start_xmit = bnxt_start_xmit, 11362 .ndo_stop = bnxt_close, 11363 .ndo_get_stats64 = bnxt_get_stats64, 11364 .ndo_set_rx_mode = bnxt_set_rx_mode, 11365 .ndo_do_ioctl = bnxt_ioctl, 11366 .ndo_validate_addr = eth_validate_addr, 11367 .ndo_set_mac_address = bnxt_change_mac_addr, 11368 .ndo_change_mtu = bnxt_change_mtu, 11369 .ndo_fix_features = bnxt_fix_features, 11370 .ndo_set_features = bnxt_set_features, 11371 .ndo_tx_timeout = bnxt_tx_timeout, 11372 #ifdef CONFIG_BNXT_SRIOV 11373 .ndo_get_vf_config = bnxt_get_vf_config, 11374 .ndo_set_vf_mac = bnxt_set_vf_mac, 11375 .ndo_set_vf_vlan = bnxt_set_vf_vlan, 11376 .ndo_set_vf_rate = bnxt_set_vf_bw, 11377 .ndo_set_vf_link_state = bnxt_set_vf_link_state, 11378 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk, 11379 .ndo_set_vf_trust = bnxt_set_vf_trust, 11380 #endif 11381 .ndo_setup_tc = bnxt_setup_tc, 11382 #ifdef CONFIG_RFS_ACCEL 11383 .ndo_rx_flow_steer = bnxt_rx_flow_steer, 11384 #endif 11385 .ndo_udp_tunnel_add = bnxt_udp_tunnel_add, 11386 .ndo_udp_tunnel_del = bnxt_udp_tunnel_del, 11387 .ndo_bpf = bnxt_xdp, 11388 .ndo_xdp_xmit = bnxt_xdp_xmit, 11389 .ndo_bridge_getlink = bnxt_bridge_getlink, 11390 .ndo_bridge_setlink = bnxt_bridge_setlink, 11391 .ndo_get_devlink_port = bnxt_get_devlink_port, 11392 }; 11393 11394 static void bnxt_remove_one(struct pci_dev *pdev) 11395 { 11396 struct net_device *dev = pci_get_drvdata(pdev); 11397 struct bnxt *bp = netdev_priv(dev); 11398 11399 if (BNXT_PF(bp)) { 11400 bnxt_sriov_disable(bp); 11401 bnxt_dl_unregister(bp); 11402 } 11403 11404 pci_disable_pcie_error_reporting(pdev); 11405 unregister_netdev(dev); 11406 bnxt_shutdown_tc(bp); 11407 bnxt_cancel_sp_work(bp); 11408 bp->sp_event = 0; 11409 11410 bnxt_clear_int_mode(bp); 11411 bnxt_hwrm_func_drv_unrgtr(bp); 11412 bnxt_free_hwrm_resources(bp); 11413 bnxt_free_hwrm_short_cmd_req(bp); 11414 bnxt_ethtool_free(bp); 11415 bnxt_dcb_free(bp); 11416 kfree(bp->edev); 11417 bp->edev = NULL; 11418 bnxt_cleanup_pci(bp); 11419 bnxt_free_ctx_mem(bp); 11420 kfree(bp->ctx); 11421 bp->ctx = NULL; 11422 bnxt_free_port_stats(bp); 11423 free_netdev(dev); 11424 } 11425 11426 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt) 11427 { 11428 int rc = 0; 11429 struct bnxt_link_info *link_info = &bp->link_info; 11430 11431 rc = bnxt_hwrm_phy_qcaps(bp); 11432 if (rc) { 11433 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n", 11434 rc); 11435 return rc; 11436 } 11437 rc = bnxt_update_link(bp, false); 11438 if (rc) { 11439 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n", 11440 rc); 11441 return rc; 11442 } 11443 11444 /* Older firmware does not have supported_auto_speeds, so assume 11445 * that all supported speeds can be autonegotiated. 11446 */ 11447 if (link_info->auto_link_speeds && !link_info->support_auto_speeds) 11448 link_info->support_auto_speeds = link_info->support_speeds; 11449 11450 if (!fw_dflt) 11451 return 0; 11452 11453 bnxt_init_ethtool_link_settings(bp); 11454 return 0; 11455 } 11456 11457 static int bnxt_get_max_irq(struct pci_dev *pdev) 11458 { 11459 u16 ctrl; 11460 11461 if (!pdev->msix_cap) 11462 return 1; 11463 11464 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl); 11465 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; 11466 } 11467 11468 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, 11469 int *max_cp) 11470 { 11471 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 11472 int max_ring_grps = 0, max_irq; 11473 11474 *max_tx = hw_resc->max_tx_rings; 11475 *max_rx = hw_resc->max_rx_rings; 11476 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp); 11477 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) - 11478 bnxt_get_ulp_msix_num(bp), 11479 hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp)); 11480 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 11481 *max_cp = min_t(int, *max_cp, max_irq); 11482 max_ring_grps = hw_resc->max_hw_ring_grps; 11483 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { 11484 *max_cp -= 1; 11485 *max_rx -= 2; 11486 } 11487 if (bp->flags & BNXT_FLAG_AGG_RINGS) 11488 *max_rx >>= 1; 11489 if (bp->flags & BNXT_FLAG_CHIP_P5) { 11490 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false); 11491 /* On P5 chips, max_cp output param should be available NQs */ 11492 *max_cp = max_irq; 11493 } 11494 *max_rx = min_t(int, *max_rx, max_ring_grps); 11495 } 11496 11497 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared) 11498 { 11499 int rx, tx, cp; 11500 11501 _bnxt_get_max_rings(bp, &rx, &tx, &cp); 11502 *max_rx = rx; 11503 *max_tx = tx; 11504 if (!rx || !tx || !cp) 11505 return -ENOMEM; 11506 11507 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared); 11508 } 11509 11510 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx, 11511 bool shared) 11512 { 11513 int rc; 11514 11515 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); 11516 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) { 11517 /* Not enough rings, try disabling agg rings. */ 11518 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 11519 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); 11520 if (rc) { 11521 /* set BNXT_FLAG_AGG_RINGS back for consistency */ 11522 bp->flags |= BNXT_FLAG_AGG_RINGS; 11523 return rc; 11524 } 11525 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 11526 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 11527 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 11528 bnxt_set_ring_params(bp); 11529 } 11530 11531 if (bp->flags & BNXT_FLAG_ROCE_CAP) { 11532 int max_cp, max_stat, max_irq; 11533 11534 /* Reserve minimum resources for RoCE */ 11535 max_cp = bnxt_get_max_func_cp_rings(bp); 11536 max_stat = bnxt_get_max_func_stat_ctxs(bp); 11537 max_irq = bnxt_get_max_func_irqs(bp); 11538 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS || 11539 max_irq <= BNXT_MIN_ROCE_CP_RINGS || 11540 max_stat <= BNXT_MIN_ROCE_STAT_CTXS) 11541 return 0; 11542 11543 max_cp -= BNXT_MIN_ROCE_CP_RINGS; 11544 max_irq -= BNXT_MIN_ROCE_CP_RINGS; 11545 max_stat -= BNXT_MIN_ROCE_STAT_CTXS; 11546 max_cp = min_t(int, max_cp, max_irq); 11547 max_cp = min_t(int, max_cp, max_stat); 11548 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared); 11549 if (rc) 11550 rc = 0; 11551 } 11552 return rc; 11553 } 11554 11555 /* In initial default shared ring setting, each shared ring must have a 11556 * RX/TX ring pair. 11557 */ 11558 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp) 11559 { 11560 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings); 11561 bp->rx_nr_rings = bp->cp_nr_rings; 11562 bp->tx_nr_rings_per_tc = bp->cp_nr_rings; 11563 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 11564 } 11565 11566 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) 11567 { 11568 int dflt_rings, max_rx_rings, max_tx_rings, rc; 11569 11570 if (!bnxt_can_reserve_rings(bp)) 11571 return 0; 11572 11573 if (sh) 11574 bp->flags |= BNXT_FLAG_SHARED_RINGS; 11575 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues(); 11576 /* Reduce default rings on multi-port cards so that total default 11577 * rings do not exceed CPU count. 11578 */ 11579 if (bp->port_count > 1) { 11580 int max_rings = 11581 max_t(int, num_online_cpus() / bp->port_count, 1); 11582 11583 dflt_rings = min_t(int, dflt_rings, max_rings); 11584 } 11585 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh); 11586 if (rc) 11587 return rc; 11588 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); 11589 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); 11590 if (sh) 11591 bnxt_trim_dflt_sh_rings(bp); 11592 else 11593 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings; 11594 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 11595 11596 rc = __bnxt_reserve_rings(bp); 11597 if (rc) 11598 netdev_warn(bp->dev, "Unable to reserve tx rings\n"); 11599 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 11600 if (sh) 11601 bnxt_trim_dflt_sh_rings(bp); 11602 11603 /* Rings may have been trimmed, re-reserve the trimmed rings. */ 11604 if (bnxt_need_reserve_rings(bp)) { 11605 rc = __bnxt_reserve_rings(bp); 11606 if (rc) 11607 netdev_warn(bp->dev, "2nd rings reservation failed.\n"); 11608 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 11609 } 11610 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 11611 bp->rx_nr_rings++; 11612 bp->cp_nr_rings++; 11613 } 11614 return rc; 11615 } 11616 11617 static int bnxt_init_dflt_ring_mode(struct bnxt *bp) 11618 { 11619 int rc; 11620 11621 if (bp->tx_nr_rings) 11622 return 0; 11623 11624 bnxt_ulp_irq_stop(bp); 11625 bnxt_clear_int_mode(bp); 11626 rc = bnxt_set_dflt_rings(bp, true); 11627 if (rc) { 11628 netdev_err(bp->dev, "Not enough rings available.\n"); 11629 goto init_dflt_ring_err; 11630 } 11631 rc = bnxt_init_int_mode(bp); 11632 if (rc) 11633 goto init_dflt_ring_err; 11634 11635 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 11636 if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) { 11637 bp->flags |= BNXT_FLAG_RFS; 11638 bp->dev->features |= NETIF_F_NTUPLE; 11639 } 11640 init_dflt_ring_err: 11641 bnxt_ulp_irq_restart(bp, rc); 11642 return rc; 11643 } 11644 11645 int bnxt_restore_pf_fw_resources(struct bnxt *bp) 11646 { 11647 int rc; 11648 11649 ASSERT_RTNL(); 11650 bnxt_hwrm_func_qcaps(bp); 11651 11652 if (netif_running(bp->dev)) 11653 __bnxt_close_nic(bp, true, false); 11654 11655 bnxt_ulp_irq_stop(bp); 11656 bnxt_clear_int_mode(bp); 11657 rc = bnxt_init_int_mode(bp); 11658 bnxt_ulp_irq_restart(bp, rc); 11659 11660 if (netif_running(bp->dev)) { 11661 if (rc) 11662 dev_close(bp->dev); 11663 else 11664 rc = bnxt_open_nic(bp, true, false); 11665 } 11666 11667 return rc; 11668 } 11669 11670 static int bnxt_init_mac_addr(struct bnxt *bp) 11671 { 11672 int rc = 0; 11673 11674 if (BNXT_PF(bp)) { 11675 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN); 11676 } else { 11677 #ifdef CONFIG_BNXT_SRIOV 11678 struct bnxt_vf_info *vf = &bp->vf; 11679 bool strict_approval = true; 11680 11681 if (is_valid_ether_addr(vf->mac_addr)) { 11682 /* overwrite netdev dev_addr with admin VF MAC */ 11683 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); 11684 /* Older PF driver or firmware may not approve this 11685 * correctly. 11686 */ 11687 strict_approval = false; 11688 } else { 11689 eth_hw_addr_random(bp->dev); 11690 } 11691 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval); 11692 #endif 11693 } 11694 return rc; 11695 } 11696 11697 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[]) 11698 { 11699 struct pci_dev *pdev = bp->pdev; 11700 int pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN); 11701 u32 dw; 11702 11703 if (!pos) { 11704 netdev_info(bp->dev, "Unable do read adapter's DSN"); 11705 return -EOPNOTSUPP; 11706 } 11707 11708 /* DSN (two dw) is at an offset of 4 from the cap pos */ 11709 pos += 4; 11710 pci_read_config_dword(pdev, pos, &dw); 11711 put_unaligned_le32(dw, &dsn[0]); 11712 pci_read_config_dword(pdev, pos + 4, &dw); 11713 put_unaligned_le32(dw, &dsn[4]); 11714 return 0; 11715 } 11716 11717 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 11718 { 11719 static int version_printed; 11720 struct net_device *dev; 11721 struct bnxt *bp; 11722 int rc, max_irqs; 11723 11724 if (pci_is_bridge(pdev)) 11725 return -ENODEV; 11726 11727 if (version_printed++ == 0) 11728 pr_info("%s", version); 11729 11730 max_irqs = bnxt_get_max_irq(pdev); 11731 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs); 11732 if (!dev) 11733 return -ENOMEM; 11734 11735 bp = netdev_priv(dev); 11736 bnxt_set_max_func_irqs(bp, max_irqs); 11737 11738 if (bnxt_vf_pciid(ent->driver_data)) 11739 bp->flags |= BNXT_FLAG_VF; 11740 11741 if (pdev->msix_cap) 11742 bp->flags |= BNXT_FLAG_MSIX_CAP; 11743 11744 rc = bnxt_init_board(pdev, dev); 11745 if (rc < 0) 11746 goto init_err_free; 11747 11748 dev->netdev_ops = &bnxt_netdev_ops; 11749 dev->watchdog_timeo = BNXT_TX_TIMEOUT; 11750 dev->ethtool_ops = &bnxt_ethtool_ops; 11751 pci_set_drvdata(pdev, dev); 11752 11753 rc = bnxt_alloc_hwrm_resources(bp); 11754 if (rc) 11755 goto init_err_pci_clean; 11756 11757 mutex_init(&bp->hwrm_cmd_lock); 11758 mutex_init(&bp->link_lock); 11759 11760 rc = bnxt_fw_init_one_p1(bp); 11761 if (rc) 11762 goto init_err_pci_clean; 11763 11764 if (BNXT_CHIP_P5(bp)) 11765 bp->flags |= BNXT_FLAG_CHIP_P5; 11766 11767 rc = bnxt_fw_init_one_p2(bp); 11768 if (rc) 11769 goto init_err_pci_clean; 11770 11771 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 11772 NETIF_F_TSO | NETIF_F_TSO6 | 11773 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | 11774 NETIF_F_GSO_IPXIP4 | 11775 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | 11776 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH | 11777 NETIF_F_RXCSUM | NETIF_F_GRO; 11778 11779 if (BNXT_SUPPORTS_TPA(bp)) 11780 dev->hw_features |= NETIF_F_LRO; 11781 11782 dev->hw_enc_features = 11783 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 11784 NETIF_F_TSO | NETIF_F_TSO6 | 11785 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | 11786 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | 11787 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL; 11788 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM | 11789 NETIF_F_GSO_GRE_CSUM; 11790 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA; 11791 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | 11792 NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX; 11793 if (BNXT_SUPPORTS_TPA(bp)) 11794 dev->hw_features |= NETIF_F_GRO_HW; 11795 dev->features |= dev->hw_features | NETIF_F_HIGHDMA; 11796 if (dev->features & NETIF_F_GRO_HW) 11797 dev->features &= ~NETIF_F_LRO; 11798 dev->priv_flags |= IFF_UNICAST_FLT; 11799 11800 #ifdef CONFIG_BNXT_SRIOV 11801 init_waitqueue_head(&bp->sriov_cfg_wait); 11802 mutex_init(&bp->sriov_lock); 11803 #endif 11804 if (BNXT_SUPPORTS_TPA(bp)) { 11805 bp->gro_func = bnxt_gro_func_5730x; 11806 if (BNXT_CHIP_P4(bp)) 11807 bp->gro_func = bnxt_gro_func_5731x; 11808 else if (BNXT_CHIP_P5(bp)) 11809 bp->gro_func = bnxt_gro_func_5750x; 11810 } 11811 if (!BNXT_CHIP_P4_PLUS(bp)) 11812 bp->flags |= BNXT_FLAG_DOUBLE_DB; 11813 11814 bp->ulp_probe = bnxt_ulp_probe; 11815 11816 rc = bnxt_init_mac_addr(bp); 11817 if (rc) { 11818 dev_err(&pdev->dev, "Unable to initialize mac address.\n"); 11819 rc = -EADDRNOTAVAIL; 11820 goto init_err_pci_clean; 11821 } 11822 11823 if (BNXT_PF(bp)) { 11824 /* Read the adapter's DSN to use as the eswitch switch_id */ 11825 rc = bnxt_pcie_dsn_get(bp, bp->switch_id); 11826 if (rc) 11827 goto init_err_pci_clean; 11828 } 11829 11830 /* MTU range: 60 - FW defined max */ 11831 dev->min_mtu = ETH_ZLEN; 11832 dev->max_mtu = bp->max_mtu; 11833 11834 rc = bnxt_probe_phy(bp, true); 11835 if (rc) 11836 goto init_err_pci_clean; 11837 11838 bnxt_set_rx_skb_mode(bp, false); 11839 bnxt_set_tpa_flags(bp); 11840 bnxt_set_ring_params(bp); 11841 rc = bnxt_set_dflt_rings(bp, true); 11842 if (rc) { 11843 netdev_err(bp->dev, "Not enough rings available.\n"); 11844 rc = -ENOMEM; 11845 goto init_err_pci_clean; 11846 } 11847 11848 bnxt_fw_init_one_p3(bp); 11849 11850 if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX) 11851 bp->flags |= BNXT_FLAG_STRIP_VLAN; 11852 11853 rc = bnxt_init_int_mode(bp); 11854 if (rc) 11855 goto init_err_pci_clean; 11856 11857 /* No TC has been set yet and rings may have been trimmed due to 11858 * limited MSIX, so we re-initialize the TX rings per TC. 11859 */ 11860 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 11861 11862 if (BNXT_PF(bp)) { 11863 if (!bnxt_pf_wq) { 11864 bnxt_pf_wq = 11865 create_singlethread_workqueue("bnxt_pf_wq"); 11866 if (!bnxt_pf_wq) { 11867 dev_err(&pdev->dev, "Unable to create workqueue.\n"); 11868 goto init_err_pci_clean; 11869 } 11870 } 11871 bnxt_init_tc(bp); 11872 } 11873 11874 rc = register_netdev(dev); 11875 if (rc) 11876 goto init_err_cleanup_tc; 11877 11878 if (BNXT_PF(bp)) 11879 bnxt_dl_register(bp); 11880 11881 netdev_info(dev, "%s found at mem %lx, node addr %pM\n", 11882 board_info[ent->driver_data].name, 11883 (long)pci_resource_start(pdev, 0), dev->dev_addr); 11884 pcie_print_link_status(pdev); 11885 11886 return 0; 11887 11888 init_err_cleanup_tc: 11889 bnxt_shutdown_tc(bp); 11890 bnxt_clear_int_mode(bp); 11891 11892 init_err_pci_clean: 11893 bnxt_hwrm_func_drv_unrgtr(bp); 11894 bnxt_free_hwrm_short_cmd_req(bp); 11895 bnxt_free_hwrm_resources(bp); 11896 bnxt_free_ctx_mem(bp); 11897 kfree(bp->ctx); 11898 bp->ctx = NULL; 11899 kfree(bp->fw_health); 11900 bp->fw_health = NULL; 11901 bnxt_cleanup_pci(bp); 11902 11903 init_err_free: 11904 free_netdev(dev); 11905 return rc; 11906 } 11907 11908 static void bnxt_shutdown(struct pci_dev *pdev) 11909 { 11910 struct net_device *dev = pci_get_drvdata(pdev); 11911 struct bnxt *bp; 11912 11913 if (!dev) 11914 return; 11915 11916 rtnl_lock(); 11917 bp = netdev_priv(dev); 11918 if (!bp) 11919 goto shutdown_exit; 11920 11921 if (netif_running(dev)) 11922 dev_close(dev); 11923 11924 bnxt_ulp_shutdown(bp); 11925 11926 if (system_state == SYSTEM_POWER_OFF) { 11927 bnxt_clear_int_mode(bp); 11928 pci_disable_device(pdev); 11929 pci_wake_from_d3(pdev, bp->wol); 11930 pci_set_power_state(pdev, PCI_D3hot); 11931 } 11932 11933 shutdown_exit: 11934 rtnl_unlock(); 11935 } 11936 11937 #ifdef CONFIG_PM_SLEEP 11938 static int bnxt_suspend(struct device *device) 11939 { 11940 struct net_device *dev = dev_get_drvdata(device); 11941 struct bnxt *bp = netdev_priv(dev); 11942 int rc = 0; 11943 11944 rtnl_lock(); 11945 bnxt_ulp_stop(bp); 11946 if (netif_running(dev)) { 11947 netif_device_detach(dev); 11948 rc = bnxt_close(dev); 11949 } 11950 bnxt_hwrm_func_drv_unrgtr(bp); 11951 pci_disable_device(bp->pdev); 11952 bnxt_free_ctx_mem(bp); 11953 kfree(bp->ctx); 11954 bp->ctx = NULL; 11955 rtnl_unlock(); 11956 return rc; 11957 } 11958 11959 static int bnxt_resume(struct device *device) 11960 { 11961 struct net_device *dev = dev_get_drvdata(device); 11962 struct bnxt *bp = netdev_priv(dev); 11963 int rc = 0; 11964 11965 rtnl_lock(); 11966 rc = pci_enable_device(bp->pdev); 11967 if (rc) { 11968 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n", 11969 rc); 11970 goto resume_exit; 11971 } 11972 pci_set_master(bp->pdev); 11973 if (bnxt_hwrm_ver_get(bp)) { 11974 rc = -ENODEV; 11975 goto resume_exit; 11976 } 11977 rc = bnxt_hwrm_func_reset(bp); 11978 if (rc) { 11979 rc = -EBUSY; 11980 goto resume_exit; 11981 } 11982 11983 if (bnxt_hwrm_queue_qportcfg(bp)) { 11984 rc = -ENODEV; 11985 goto resume_exit; 11986 } 11987 11988 if (bp->hwrm_spec_code >= 0x10803) { 11989 if (bnxt_alloc_ctx_mem(bp)) { 11990 rc = -ENODEV; 11991 goto resume_exit; 11992 } 11993 } 11994 if (BNXT_NEW_RM(bp)) 11995 bnxt_hwrm_func_resc_qcaps(bp, false); 11996 11997 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) { 11998 rc = -ENODEV; 11999 goto resume_exit; 12000 } 12001 12002 bnxt_get_wol_settings(bp); 12003 if (netif_running(dev)) { 12004 rc = bnxt_open(dev); 12005 if (!rc) 12006 netif_device_attach(dev); 12007 } 12008 12009 resume_exit: 12010 bnxt_ulp_start(bp, rc); 12011 rtnl_unlock(); 12012 return rc; 12013 } 12014 12015 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume); 12016 #define BNXT_PM_OPS (&bnxt_pm_ops) 12017 12018 #else 12019 12020 #define BNXT_PM_OPS NULL 12021 12022 #endif /* CONFIG_PM_SLEEP */ 12023 12024 /** 12025 * bnxt_io_error_detected - called when PCI error is detected 12026 * @pdev: Pointer to PCI device 12027 * @state: The current pci connection state 12028 * 12029 * This function is called after a PCI bus error affecting 12030 * this device has been detected. 12031 */ 12032 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, 12033 pci_channel_state_t state) 12034 { 12035 struct net_device *netdev = pci_get_drvdata(pdev); 12036 struct bnxt *bp = netdev_priv(netdev); 12037 12038 netdev_info(netdev, "PCI I/O error detected\n"); 12039 12040 rtnl_lock(); 12041 netif_device_detach(netdev); 12042 12043 bnxt_ulp_stop(bp); 12044 12045 if (state == pci_channel_io_perm_failure) { 12046 rtnl_unlock(); 12047 return PCI_ERS_RESULT_DISCONNECT; 12048 } 12049 12050 if (netif_running(netdev)) 12051 bnxt_close(netdev); 12052 12053 pci_disable_device(pdev); 12054 rtnl_unlock(); 12055 12056 /* Request a slot slot reset. */ 12057 return PCI_ERS_RESULT_NEED_RESET; 12058 } 12059 12060 /** 12061 * bnxt_io_slot_reset - called after the pci bus has been reset. 12062 * @pdev: Pointer to PCI device 12063 * 12064 * Restart the card from scratch, as if from a cold-boot. 12065 * At this point, the card has exprienced a hard reset, 12066 * followed by fixups by BIOS, and has its config space 12067 * set up identically to what it was at cold boot. 12068 */ 12069 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) 12070 { 12071 struct net_device *netdev = pci_get_drvdata(pdev); 12072 struct bnxt *bp = netdev_priv(netdev); 12073 int err = 0; 12074 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT; 12075 12076 netdev_info(bp->dev, "PCI Slot Reset\n"); 12077 12078 rtnl_lock(); 12079 12080 if (pci_enable_device(pdev)) { 12081 dev_err(&pdev->dev, 12082 "Cannot re-enable PCI device after reset.\n"); 12083 } else { 12084 pci_set_master(pdev); 12085 12086 err = bnxt_hwrm_func_reset(bp); 12087 if (!err && netif_running(netdev)) 12088 err = bnxt_open(netdev); 12089 12090 if (!err) 12091 result = PCI_ERS_RESULT_RECOVERED; 12092 bnxt_ulp_start(bp, err); 12093 } 12094 12095 if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) 12096 dev_close(netdev); 12097 12098 rtnl_unlock(); 12099 12100 return PCI_ERS_RESULT_RECOVERED; 12101 } 12102 12103 /** 12104 * bnxt_io_resume - called when traffic can start flowing again. 12105 * @pdev: Pointer to PCI device 12106 * 12107 * This callback is called when the error recovery driver tells 12108 * us that its OK to resume normal operation. 12109 */ 12110 static void bnxt_io_resume(struct pci_dev *pdev) 12111 { 12112 struct net_device *netdev = pci_get_drvdata(pdev); 12113 12114 rtnl_lock(); 12115 12116 netif_device_attach(netdev); 12117 12118 rtnl_unlock(); 12119 } 12120 12121 static const struct pci_error_handlers bnxt_err_handler = { 12122 .error_detected = bnxt_io_error_detected, 12123 .slot_reset = bnxt_io_slot_reset, 12124 .resume = bnxt_io_resume 12125 }; 12126 12127 static struct pci_driver bnxt_pci_driver = { 12128 .name = DRV_MODULE_NAME, 12129 .id_table = bnxt_pci_tbl, 12130 .probe = bnxt_init_one, 12131 .remove = bnxt_remove_one, 12132 .shutdown = bnxt_shutdown, 12133 .driver.pm = BNXT_PM_OPS, 12134 .err_handler = &bnxt_err_handler, 12135 #if defined(CONFIG_BNXT_SRIOV) 12136 .sriov_configure = bnxt_sriov_configure, 12137 #endif 12138 }; 12139 12140 static int __init bnxt_init(void) 12141 { 12142 bnxt_debug_init(); 12143 return pci_register_driver(&bnxt_pci_driver); 12144 } 12145 12146 static void __exit bnxt_exit(void) 12147 { 12148 pci_unregister_driver(&bnxt_pci_driver); 12149 if (bnxt_pf_wq) 12150 destroy_workqueue(bnxt_pf_wq); 12151 bnxt_debug_exit(); 12152 } 12153 12154 module_init(bnxt_init); 12155 module_exit(bnxt_exit); 12156