1 /* Broadcom NetXtreme-C/E network driver. 2 * 3 * Copyright (c) 2014-2016 Broadcom Corporation 4 * Copyright (c) 2016-2019 Broadcom Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 */ 10 11 #include <linux/module.h> 12 13 #include <linux/stringify.h> 14 #include <linux/kernel.h> 15 #include <linux/timer.h> 16 #include <linux/errno.h> 17 #include <linux/ioport.h> 18 #include <linux/slab.h> 19 #include <linux/vmalloc.h> 20 #include <linux/interrupt.h> 21 #include <linux/pci.h> 22 #include <linux/netdevice.h> 23 #include <linux/etherdevice.h> 24 #include <linux/skbuff.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/bitops.h> 27 #include <linux/io.h> 28 #include <linux/irq.h> 29 #include <linux/delay.h> 30 #include <asm/byteorder.h> 31 #include <asm/page.h> 32 #include <linux/time.h> 33 #include <linux/mii.h> 34 #include <linux/mdio.h> 35 #include <linux/if.h> 36 #include <linux/if_vlan.h> 37 #include <linux/if_bridge.h> 38 #include <linux/rtc.h> 39 #include <linux/bpf.h> 40 #include <net/ip.h> 41 #include <net/tcp.h> 42 #include <net/udp.h> 43 #include <net/checksum.h> 44 #include <net/ip6_checksum.h> 45 #include <net/udp_tunnel.h> 46 #include <linux/workqueue.h> 47 #include <linux/prefetch.h> 48 #include <linux/cache.h> 49 #include <linux/log2.h> 50 #include <linux/aer.h> 51 #include <linux/bitmap.h> 52 #include <linux/cpu_rmap.h> 53 #include <linux/cpumask.h> 54 #include <net/pkt_cls.h> 55 #include <linux/hwmon.h> 56 #include <linux/hwmon-sysfs.h> 57 #include <net/page_pool.h> 58 59 #include "bnxt_hsi.h" 60 #include "bnxt.h" 61 #include "bnxt_ulp.h" 62 #include "bnxt_sriov.h" 63 #include "bnxt_ethtool.h" 64 #include "bnxt_dcb.h" 65 #include "bnxt_xdp.h" 66 #include "bnxt_vfr.h" 67 #include "bnxt_tc.h" 68 #include "bnxt_devlink.h" 69 #include "bnxt_debugfs.h" 70 71 #define BNXT_TX_TIMEOUT (5 * HZ) 72 73 static const char version[] = 74 "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n"; 75 76 MODULE_LICENSE("GPL"); 77 MODULE_DESCRIPTION("Broadcom BCM573xx network driver"); 78 MODULE_VERSION(DRV_MODULE_VERSION); 79 80 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN) 81 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD 82 #define BNXT_RX_COPY_THRESH 256 83 84 #define BNXT_TX_PUSH_THRESH 164 85 86 enum board_idx { 87 BCM57301, 88 BCM57302, 89 BCM57304, 90 BCM57417_NPAR, 91 BCM58700, 92 BCM57311, 93 BCM57312, 94 BCM57402, 95 BCM57404, 96 BCM57406, 97 BCM57402_NPAR, 98 BCM57407, 99 BCM57412, 100 BCM57414, 101 BCM57416, 102 BCM57417, 103 BCM57412_NPAR, 104 BCM57314, 105 BCM57417_SFP, 106 BCM57416_SFP, 107 BCM57404_NPAR, 108 BCM57406_NPAR, 109 BCM57407_SFP, 110 BCM57407_NPAR, 111 BCM57414_NPAR, 112 BCM57416_NPAR, 113 BCM57452, 114 BCM57454, 115 BCM5745x_NPAR, 116 BCM57508, 117 BCM57504, 118 BCM57502, 119 BCM57508_NPAR, 120 BCM57504_NPAR, 121 BCM57502_NPAR, 122 BCM58802, 123 BCM58804, 124 BCM58808, 125 NETXTREME_E_VF, 126 NETXTREME_C_VF, 127 NETXTREME_S_VF, 128 NETXTREME_E_P5_VF, 129 }; 130 131 /* indexed by enum above */ 132 static const struct { 133 char *name; 134 } board_info[] = { 135 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" }, 136 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" }, 137 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, 138 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" }, 139 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" }, 140 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" }, 141 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" }, 142 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" }, 143 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" }, 144 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" }, 145 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" }, 146 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" }, 147 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" }, 148 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" }, 149 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" }, 150 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" }, 151 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" }, 152 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, 153 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" }, 154 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" }, 155 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" }, 156 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" }, 157 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" }, 158 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" }, 159 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" }, 160 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" }, 161 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" }, 162 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 163 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" }, 164 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, 165 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, 166 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" }, 167 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" }, 168 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" }, 169 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" }, 170 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" }, 171 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 172 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 173 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" }, 174 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" }, 175 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" }, 176 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" }, 177 }; 178 179 static const struct pci_device_id bnxt_pci_tbl[] = { 180 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR }, 181 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR }, 182 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 }, 183 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR }, 184 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 }, 185 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 }, 186 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 }, 187 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR }, 188 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 }, 189 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 }, 190 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 }, 191 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 }, 192 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 }, 193 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 }, 194 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR }, 195 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 }, 196 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 }, 197 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 }, 198 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 }, 199 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 }, 200 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR }, 201 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 }, 202 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP }, 203 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP }, 204 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR }, 205 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR }, 206 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP }, 207 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR }, 208 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR }, 209 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR }, 210 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR }, 211 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR }, 212 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR }, 213 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 }, 214 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 }, 215 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 }, 216 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 }, 217 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 }, 218 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR }, 219 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR }, 220 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR }, 221 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR }, 222 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR }, 223 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR }, 224 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 }, 225 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 }, 226 #ifdef CONFIG_BNXT_SRIOV 227 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF }, 228 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF }, 229 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF }, 230 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF }, 231 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF }, 232 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF }, 233 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF }, 234 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF }, 235 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF }, 236 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF }, 237 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF }, 238 #endif 239 { 0 } 240 }; 241 242 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl); 243 244 static const u16 bnxt_vf_req_snif[] = { 245 HWRM_FUNC_CFG, 246 HWRM_FUNC_VF_CFG, 247 HWRM_PORT_PHY_QCFG, 248 HWRM_CFA_L2_FILTER_ALLOC, 249 }; 250 251 static const u16 bnxt_async_events_arr[] = { 252 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE, 253 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE, 254 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD, 255 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED, 256 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE, 257 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, 258 ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE, 259 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY, 260 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY, 261 }; 262 263 static struct workqueue_struct *bnxt_pf_wq; 264 265 static bool bnxt_vf_pciid(enum board_idx idx) 266 { 267 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF || 268 idx == NETXTREME_S_VF || idx == NETXTREME_E_P5_VF); 269 } 270 271 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) 272 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS) 273 #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS) 274 275 #define BNXT_CP_DB_IRQ_DIS(db) \ 276 writel(DB_CP_IRQ_DIS_FLAGS, db) 277 278 #define BNXT_DB_CQ(db, idx) \ 279 writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell) 280 281 #define BNXT_DB_NQ_P5(db, idx) \ 282 writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell) 283 284 #define BNXT_DB_CQ_ARM(db, idx) \ 285 writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell) 286 287 #define BNXT_DB_NQ_ARM_P5(db, idx) \ 288 writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell) 289 290 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 291 { 292 if (bp->flags & BNXT_FLAG_CHIP_P5) 293 BNXT_DB_NQ_P5(db, idx); 294 else 295 BNXT_DB_CQ(db, idx); 296 } 297 298 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 299 { 300 if (bp->flags & BNXT_FLAG_CHIP_P5) 301 BNXT_DB_NQ_ARM_P5(db, idx); 302 else 303 BNXT_DB_CQ_ARM(db, idx); 304 } 305 306 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 307 { 308 if (bp->flags & BNXT_FLAG_CHIP_P5) 309 writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx), 310 db->doorbell); 311 else 312 BNXT_DB_CQ(db, idx); 313 } 314 315 const u16 bnxt_lhint_arr[] = { 316 TX_BD_FLAGS_LHINT_512_AND_SMALLER, 317 TX_BD_FLAGS_LHINT_512_TO_1023, 318 TX_BD_FLAGS_LHINT_1024_TO_2047, 319 TX_BD_FLAGS_LHINT_1024_TO_2047, 320 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 321 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 322 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 323 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 324 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 325 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 326 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 327 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 328 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 329 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 330 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 331 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 332 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 333 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 334 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 335 }; 336 337 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb) 338 { 339 struct metadata_dst *md_dst = skb_metadata_dst(skb); 340 341 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX) 342 return 0; 343 344 return md_dst->u.port_info.port_id; 345 } 346 347 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) 348 { 349 struct bnxt *bp = netdev_priv(dev); 350 struct tx_bd *txbd; 351 struct tx_bd_ext *txbd1; 352 struct netdev_queue *txq; 353 int i; 354 dma_addr_t mapping; 355 unsigned int length, pad = 0; 356 u32 len, free_size, vlan_tag_flags, cfa_action, flags; 357 u16 prod, last_frag; 358 struct pci_dev *pdev = bp->pdev; 359 struct bnxt_tx_ring_info *txr; 360 struct bnxt_sw_tx_bd *tx_buf; 361 362 i = skb_get_queue_mapping(skb); 363 if (unlikely(i >= bp->tx_nr_rings)) { 364 dev_kfree_skb_any(skb); 365 return NETDEV_TX_OK; 366 } 367 368 txq = netdev_get_tx_queue(dev, i); 369 txr = &bp->tx_ring[bp->tx_ring_map[i]]; 370 prod = txr->tx_prod; 371 372 free_size = bnxt_tx_avail(bp, txr); 373 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) { 374 netif_tx_stop_queue(txq); 375 return NETDEV_TX_BUSY; 376 } 377 378 length = skb->len; 379 len = skb_headlen(skb); 380 last_frag = skb_shinfo(skb)->nr_frags; 381 382 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 383 384 txbd->tx_bd_opaque = prod; 385 386 tx_buf = &txr->tx_buf_ring[prod]; 387 tx_buf->skb = skb; 388 tx_buf->nr_frags = last_frag; 389 390 vlan_tag_flags = 0; 391 cfa_action = bnxt_xmit_get_cfa_action(skb); 392 if (skb_vlan_tag_present(skb)) { 393 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN | 394 skb_vlan_tag_get(skb); 395 /* Currently supports 8021Q, 8021AD vlan offloads 396 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated 397 */ 398 if (skb->vlan_proto == htons(ETH_P_8021Q)) 399 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT; 400 } 401 402 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) { 403 struct tx_push_buffer *tx_push_buf = txr->tx_push; 404 struct tx_push_bd *tx_push = &tx_push_buf->push_bd; 405 struct tx_bd_ext *tx_push1 = &tx_push->txbd2; 406 void __iomem *db = txr->tx_db.doorbell; 407 void *pdata = tx_push_buf->data; 408 u64 *end; 409 int j, push_len; 410 411 /* Set COAL_NOW to be ready quickly for the next push */ 412 tx_push->tx_bd_len_flags_type = 413 cpu_to_le32((length << TX_BD_LEN_SHIFT) | 414 TX_BD_TYPE_LONG_TX_BD | 415 TX_BD_FLAGS_LHINT_512_AND_SMALLER | 416 TX_BD_FLAGS_COAL_NOW | 417 TX_BD_FLAGS_PACKET_END | 418 (2 << TX_BD_FLAGS_BD_CNT_SHIFT)); 419 420 if (skb->ip_summed == CHECKSUM_PARTIAL) 421 tx_push1->tx_bd_hsize_lflags = 422 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); 423 else 424 tx_push1->tx_bd_hsize_lflags = 0; 425 426 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 427 tx_push1->tx_bd_cfa_action = 428 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); 429 430 end = pdata + length; 431 end = PTR_ALIGN(end, 8) - 1; 432 *end = 0; 433 434 skb_copy_from_linear_data(skb, pdata, len); 435 pdata += len; 436 for (j = 0; j < last_frag; j++) { 437 skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; 438 void *fptr; 439 440 fptr = skb_frag_address_safe(frag); 441 if (!fptr) 442 goto normal_tx; 443 444 memcpy(pdata, fptr, skb_frag_size(frag)); 445 pdata += skb_frag_size(frag); 446 } 447 448 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type; 449 txbd->tx_bd_haddr = txr->data_mapping; 450 prod = NEXT_TX(prod); 451 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 452 memcpy(txbd, tx_push1, sizeof(*txbd)); 453 prod = NEXT_TX(prod); 454 tx_push->doorbell = 455 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod); 456 txr->tx_prod = prod; 457 458 tx_buf->is_push = 1; 459 netdev_tx_sent_queue(txq, skb->len); 460 wmb(); /* Sync is_push and byte queue before pushing data */ 461 462 push_len = (length + sizeof(*tx_push) + 7) / 8; 463 if (push_len > 16) { 464 __iowrite64_copy(db, tx_push_buf, 16); 465 __iowrite32_copy(db + 4, tx_push_buf + 1, 466 (push_len - 16) << 1); 467 } else { 468 __iowrite64_copy(db, tx_push_buf, push_len); 469 } 470 471 goto tx_done; 472 } 473 474 normal_tx: 475 if (length < BNXT_MIN_PKT_SIZE) { 476 pad = BNXT_MIN_PKT_SIZE - length; 477 if (skb_pad(skb, pad)) { 478 /* SKB already freed. */ 479 tx_buf->skb = NULL; 480 return NETDEV_TX_OK; 481 } 482 length = BNXT_MIN_PKT_SIZE; 483 } 484 485 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE); 486 487 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) { 488 dev_kfree_skb_any(skb); 489 tx_buf->skb = NULL; 490 return NETDEV_TX_OK; 491 } 492 493 dma_unmap_addr_set(tx_buf, mapping, mapping); 494 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD | 495 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT); 496 497 txbd->tx_bd_haddr = cpu_to_le64(mapping); 498 499 prod = NEXT_TX(prod); 500 txbd1 = (struct tx_bd_ext *) 501 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 502 503 txbd1->tx_bd_hsize_lflags = 0; 504 if (skb_is_gso(skb)) { 505 u32 hdr_len; 506 507 if (skb->encapsulation) 508 hdr_len = skb_inner_network_offset(skb) + 509 skb_inner_network_header_len(skb) + 510 inner_tcp_hdrlen(skb); 511 else 512 hdr_len = skb_transport_offset(skb) + 513 tcp_hdrlen(skb); 514 515 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO | 516 TX_BD_FLAGS_T_IPID | 517 (hdr_len << (TX_BD_HSIZE_SHIFT - 1))); 518 length = skb_shinfo(skb)->gso_size; 519 txbd1->tx_bd_mss = cpu_to_le32(length); 520 length += hdr_len; 521 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 522 txbd1->tx_bd_hsize_lflags = 523 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); 524 txbd1->tx_bd_mss = 0; 525 } 526 527 length >>= 9; 528 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) { 529 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n", 530 skb->len); 531 i = 0; 532 goto tx_dma_error; 533 } 534 flags |= bnxt_lhint_arr[length]; 535 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 536 537 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 538 txbd1->tx_bd_cfa_action = 539 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); 540 for (i = 0; i < last_frag; i++) { 541 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 542 543 prod = NEXT_TX(prod); 544 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 545 546 len = skb_frag_size(frag); 547 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len, 548 DMA_TO_DEVICE); 549 550 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) 551 goto tx_dma_error; 552 553 tx_buf = &txr->tx_buf_ring[prod]; 554 dma_unmap_addr_set(tx_buf, mapping, mapping); 555 556 txbd->tx_bd_haddr = cpu_to_le64(mapping); 557 558 flags = len << TX_BD_LEN_SHIFT; 559 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 560 } 561 562 flags &= ~TX_BD_LEN; 563 txbd->tx_bd_len_flags_type = 564 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags | 565 TX_BD_FLAGS_PACKET_END); 566 567 netdev_tx_sent_queue(txq, skb->len); 568 569 /* Sync BD data before updating doorbell */ 570 wmb(); 571 572 prod = NEXT_TX(prod); 573 txr->tx_prod = prod; 574 575 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) 576 bnxt_db_write(bp, &txr->tx_db, prod); 577 578 tx_done: 579 580 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) { 581 if (netdev_xmit_more() && !tx_buf->is_push) 582 bnxt_db_write(bp, &txr->tx_db, prod); 583 584 netif_tx_stop_queue(txq); 585 586 /* netif_tx_stop_queue() must be done before checking 587 * tx index in bnxt_tx_avail() below, because in 588 * bnxt_tx_int(), we update tx index before checking for 589 * netif_tx_queue_stopped(). 590 */ 591 smp_mb(); 592 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) 593 netif_tx_wake_queue(txq); 594 } 595 return NETDEV_TX_OK; 596 597 tx_dma_error: 598 last_frag = i; 599 600 /* start back at beginning and unmap skb */ 601 prod = txr->tx_prod; 602 tx_buf = &txr->tx_buf_ring[prod]; 603 tx_buf->skb = NULL; 604 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 605 skb_headlen(skb), PCI_DMA_TODEVICE); 606 prod = NEXT_TX(prod); 607 608 /* unmap remaining mapped pages */ 609 for (i = 0; i < last_frag; i++) { 610 prod = NEXT_TX(prod); 611 tx_buf = &txr->tx_buf_ring[prod]; 612 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 613 skb_frag_size(&skb_shinfo(skb)->frags[i]), 614 PCI_DMA_TODEVICE); 615 } 616 617 dev_kfree_skb_any(skb); 618 return NETDEV_TX_OK; 619 } 620 621 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) 622 { 623 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; 624 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index); 625 u16 cons = txr->tx_cons; 626 struct pci_dev *pdev = bp->pdev; 627 int i; 628 unsigned int tx_bytes = 0; 629 630 for (i = 0; i < nr_pkts; i++) { 631 struct bnxt_sw_tx_bd *tx_buf; 632 struct sk_buff *skb; 633 int j, last; 634 635 tx_buf = &txr->tx_buf_ring[cons]; 636 cons = NEXT_TX(cons); 637 skb = tx_buf->skb; 638 tx_buf->skb = NULL; 639 640 if (tx_buf->is_push) { 641 tx_buf->is_push = 0; 642 goto next_tx_int; 643 } 644 645 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 646 skb_headlen(skb), PCI_DMA_TODEVICE); 647 last = tx_buf->nr_frags; 648 649 for (j = 0; j < last; j++) { 650 cons = NEXT_TX(cons); 651 tx_buf = &txr->tx_buf_ring[cons]; 652 dma_unmap_page( 653 &pdev->dev, 654 dma_unmap_addr(tx_buf, mapping), 655 skb_frag_size(&skb_shinfo(skb)->frags[j]), 656 PCI_DMA_TODEVICE); 657 } 658 659 next_tx_int: 660 cons = NEXT_TX(cons); 661 662 tx_bytes += skb->len; 663 dev_kfree_skb_any(skb); 664 } 665 666 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes); 667 txr->tx_cons = cons; 668 669 /* Need to make the tx_cons update visible to bnxt_start_xmit() 670 * before checking for netif_tx_queue_stopped(). Without the 671 * memory barrier, there is a small possibility that bnxt_start_xmit() 672 * will miss it and cause the queue to be stopped forever. 673 */ 674 smp_mb(); 675 676 if (unlikely(netif_tx_queue_stopped(txq)) && 677 (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) { 678 __netif_tx_lock(txq, smp_processor_id()); 679 if (netif_tx_queue_stopped(txq) && 680 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh && 681 txr->dev_state != BNXT_DEV_STATE_CLOSING) 682 netif_tx_wake_queue(txq); 683 __netif_tx_unlock(txq); 684 } 685 } 686 687 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, 688 struct bnxt_rx_ring_info *rxr, 689 gfp_t gfp) 690 { 691 struct device *dev = &bp->pdev->dev; 692 struct page *page; 693 694 page = page_pool_dev_alloc_pages(rxr->page_pool); 695 if (!page) 696 return NULL; 697 698 *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir, 699 DMA_ATTR_WEAK_ORDERING); 700 if (dma_mapping_error(dev, *mapping)) { 701 page_pool_recycle_direct(rxr->page_pool, page); 702 return NULL; 703 } 704 *mapping += bp->rx_dma_offset; 705 return page; 706 } 707 708 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping, 709 gfp_t gfp) 710 { 711 u8 *data; 712 struct pci_dev *pdev = bp->pdev; 713 714 data = kmalloc(bp->rx_buf_size, gfp); 715 if (!data) 716 return NULL; 717 718 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset, 719 bp->rx_buf_use_size, bp->rx_dir, 720 DMA_ATTR_WEAK_ORDERING); 721 722 if (dma_mapping_error(&pdev->dev, *mapping)) { 723 kfree(data); 724 data = NULL; 725 } 726 return data; 727 } 728 729 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 730 u16 prod, gfp_t gfp) 731 { 732 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 733 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod]; 734 dma_addr_t mapping; 735 736 if (BNXT_RX_PAGE_MODE(bp)) { 737 struct page *page = 738 __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp); 739 740 if (!page) 741 return -ENOMEM; 742 743 rx_buf->data = page; 744 rx_buf->data_ptr = page_address(page) + bp->rx_offset; 745 } else { 746 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp); 747 748 if (!data) 749 return -ENOMEM; 750 751 rx_buf->data = data; 752 rx_buf->data_ptr = data + bp->rx_offset; 753 } 754 rx_buf->mapping = mapping; 755 756 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 757 return 0; 758 } 759 760 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data) 761 { 762 u16 prod = rxr->rx_prod; 763 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 764 struct rx_bd *cons_bd, *prod_bd; 765 766 prod_rx_buf = &rxr->rx_buf_ring[prod]; 767 cons_rx_buf = &rxr->rx_buf_ring[cons]; 768 769 prod_rx_buf->data = data; 770 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr; 771 772 prod_rx_buf->mapping = cons_rx_buf->mapping; 773 774 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 775 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)]; 776 777 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr; 778 } 779 780 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) 781 { 782 u16 next, max = rxr->rx_agg_bmap_size; 783 784 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx); 785 if (next >= max) 786 next = find_first_zero_bit(rxr->rx_agg_bmap, max); 787 return next; 788 } 789 790 static inline int bnxt_alloc_rx_page(struct bnxt *bp, 791 struct bnxt_rx_ring_info *rxr, 792 u16 prod, gfp_t gfp) 793 { 794 struct rx_bd *rxbd = 795 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 796 struct bnxt_sw_rx_agg_bd *rx_agg_buf; 797 struct pci_dev *pdev = bp->pdev; 798 struct page *page; 799 dma_addr_t mapping; 800 u16 sw_prod = rxr->rx_sw_agg_prod; 801 unsigned int offset = 0; 802 803 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) { 804 page = rxr->rx_page; 805 if (!page) { 806 page = alloc_page(gfp); 807 if (!page) 808 return -ENOMEM; 809 rxr->rx_page = page; 810 rxr->rx_page_offset = 0; 811 } 812 offset = rxr->rx_page_offset; 813 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE; 814 if (rxr->rx_page_offset == PAGE_SIZE) 815 rxr->rx_page = NULL; 816 else 817 get_page(page); 818 } else { 819 page = alloc_page(gfp); 820 if (!page) 821 return -ENOMEM; 822 } 823 824 mapping = dma_map_page_attrs(&pdev->dev, page, offset, 825 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE, 826 DMA_ATTR_WEAK_ORDERING); 827 if (dma_mapping_error(&pdev->dev, mapping)) { 828 __free_page(page); 829 return -EIO; 830 } 831 832 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) 833 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); 834 835 __set_bit(sw_prod, rxr->rx_agg_bmap); 836 rx_agg_buf = &rxr->rx_agg_ring[sw_prod]; 837 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod); 838 839 rx_agg_buf->page = page; 840 rx_agg_buf->offset = offset; 841 rx_agg_buf->mapping = mapping; 842 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 843 rxbd->rx_bd_opaque = sw_prod; 844 return 0; 845 } 846 847 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp, 848 struct bnxt_cp_ring_info *cpr, 849 u16 cp_cons, u16 curr) 850 { 851 struct rx_agg_cmp *agg; 852 853 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr)); 854 agg = (struct rx_agg_cmp *) 855 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 856 return agg; 857 } 858 859 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp, 860 struct bnxt_rx_ring_info *rxr, 861 u16 agg_id, u16 curr) 862 { 863 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id]; 864 865 return &tpa_info->agg_arr[curr]; 866 } 867 868 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx, 869 u16 start, u32 agg_bufs, bool tpa) 870 { 871 struct bnxt_napi *bnapi = cpr->bnapi; 872 struct bnxt *bp = bnapi->bp; 873 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 874 u16 prod = rxr->rx_agg_prod; 875 u16 sw_prod = rxr->rx_sw_agg_prod; 876 bool p5_tpa = false; 877 u32 i; 878 879 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa) 880 p5_tpa = true; 881 882 for (i = 0; i < agg_bufs; i++) { 883 u16 cons; 884 struct rx_agg_cmp *agg; 885 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf; 886 struct rx_bd *prod_bd; 887 struct page *page; 888 889 if (p5_tpa) 890 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i); 891 else 892 agg = bnxt_get_agg(bp, cpr, idx, start + i); 893 cons = agg->rx_agg_cmp_opaque; 894 __clear_bit(cons, rxr->rx_agg_bmap); 895 896 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) 897 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); 898 899 __set_bit(sw_prod, rxr->rx_agg_bmap); 900 prod_rx_buf = &rxr->rx_agg_ring[sw_prod]; 901 cons_rx_buf = &rxr->rx_agg_ring[cons]; 902 903 /* It is possible for sw_prod to be equal to cons, so 904 * set cons_rx_buf->page to NULL first. 905 */ 906 page = cons_rx_buf->page; 907 cons_rx_buf->page = NULL; 908 prod_rx_buf->page = page; 909 prod_rx_buf->offset = cons_rx_buf->offset; 910 911 prod_rx_buf->mapping = cons_rx_buf->mapping; 912 913 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 914 915 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping); 916 prod_bd->rx_bd_opaque = sw_prod; 917 918 prod = NEXT_RX_AGG(prod); 919 sw_prod = NEXT_RX_AGG(sw_prod); 920 } 921 rxr->rx_agg_prod = prod; 922 rxr->rx_sw_agg_prod = sw_prod; 923 } 924 925 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, 926 struct bnxt_rx_ring_info *rxr, 927 u16 cons, void *data, u8 *data_ptr, 928 dma_addr_t dma_addr, 929 unsigned int offset_and_len) 930 { 931 unsigned int payload = offset_and_len >> 16; 932 unsigned int len = offset_and_len & 0xffff; 933 skb_frag_t *frag; 934 struct page *page = data; 935 u16 prod = rxr->rx_prod; 936 struct sk_buff *skb; 937 int off, err; 938 939 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 940 if (unlikely(err)) { 941 bnxt_reuse_rx_data(rxr, cons, data); 942 return NULL; 943 } 944 dma_addr -= bp->rx_dma_offset; 945 dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir, 946 DMA_ATTR_WEAK_ORDERING); 947 page_pool_release_page(rxr->page_pool, page); 948 949 if (unlikely(!payload)) 950 payload = eth_get_headlen(bp->dev, data_ptr, len); 951 952 skb = napi_alloc_skb(&rxr->bnapi->napi, payload); 953 if (!skb) { 954 __free_page(page); 955 return NULL; 956 } 957 958 off = (void *)data_ptr - page_address(page); 959 skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE); 960 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN, 961 payload + NET_IP_ALIGN); 962 963 frag = &skb_shinfo(skb)->frags[0]; 964 skb_frag_size_sub(frag, payload); 965 skb_frag_off_add(frag, payload); 966 skb->data_len -= payload; 967 skb->tail += payload; 968 969 return skb; 970 } 971 972 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, 973 struct bnxt_rx_ring_info *rxr, u16 cons, 974 void *data, u8 *data_ptr, 975 dma_addr_t dma_addr, 976 unsigned int offset_and_len) 977 { 978 u16 prod = rxr->rx_prod; 979 struct sk_buff *skb; 980 int err; 981 982 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 983 if (unlikely(err)) { 984 bnxt_reuse_rx_data(rxr, cons, data); 985 return NULL; 986 } 987 988 skb = build_skb(data, 0); 989 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, 990 bp->rx_dir, DMA_ATTR_WEAK_ORDERING); 991 if (!skb) { 992 kfree(data); 993 return NULL; 994 } 995 996 skb_reserve(skb, bp->rx_offset); 997 skb_put(skb, offset_and_len & 0xffff); 998 return skb; 999 } 1000 1001 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, 1002 struct bnxt_cp_ring_info *cpr, 1003 struct sk_buff *skb, u16 idx, 1004 u32 agg_bufs, bool tpa) 1005 { 1006 struct bnxt_napi *bnapi = cpr->bnapi; 1007 struct pci_dev *pdev = bp->pdev; 1008 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1009 u16 prod = rxr->rx_agg_prod; 1010 bool p5_tpa = false; 1011 u32 i; 1012 1013 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa) 1014 p5_tpa = true; 1015 1016 for (i = 0; i < agg_bufs; i++) { 1017 u16 cons, frag_len; 1018 struct rx_agg_cmp *agg; 1019 struct bnxt_sw_rx_agg_bd *cons_rx_buf; 1020 struct page *page; 1021 dma_addr_t mapping; 1022 1023 if (p5_tpa) 1024 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i); 1025 else 1026 agg = bnxt_get_agg(bp, cpr, idx, i); 1027 cons = agg->rx_agg_cmp_opaque; 1028 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) & 1029 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT; 1030 1031 cons_rx_buf = &rxr->rx_agg_ring[cons]; 1032 skb_fill_page_desc(skb, i, cons_rx_buf->page, 1033 cons_rx_buf->offset, frag_len); 1034 __clear_bit(cons, rxr->rx_agg_bmap); 1035 1036 /* It is possible for bnxt_alloc_rx_page() to allocate 1037 * a sw_prod index that equals the cons index, so we 1038 * need to clear the cons entry now. 1039 */ 1040 mapping = cons_rx_buf->mapping; 1041 page = cons_rx_buf->page; 1042 cons_rx_buf->page = NULL; 1043 1044 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) { 1045 struct skb_shared_info *shinfo; 1046 unsigned int nr_frags; 1047 1048 shinfo = skb_shinfo(skb); 1049 nr_frags = --shinfo->nr_frags; 1050 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL); 1051 1052 dev_kfree_skb(skb); 1053 1054 cons_rx_buf->page = page; 1055 1056 /* Update prod since possibly some pages have been 1057 * allocated already. 1058 */ 1059 rxr->rx_agg_prod = prod; 1060 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa); 1061 return NULL; 1062 } 1063 1064 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE, 1065 PCI_DMA_FROMDEVICE, 1066 DMA_ATTR_WEAK_ORDERING); 1067 1068 skb->data_len += frag_len; 1069 skb->len += frag_len; 1070 skb->truesize += PAGE_SIZE; 1071 1072 prod = NEXT_RX_AGG(prod); 1073 } 1074 rxr->rx_agg_prod = prod; 1075 return skb; 1076 } 1077 1078 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 1079 u8 agg_bufs, u32 *raw_cons) 1080 { 1081 u16 last; 1082 struct rx_agg_cmp *agg; 1083 1084 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs); 1085 last = RING_CMP(*raw_cons); 1086 agg = (struct rx_agg_cmp *) 1087 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)]; 1088 return RX_AGG_CMP_VALID(agg, *raw_cons); 1089 } 1090 1091 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data, 1092 unsigned int len, 1093 dma_addr_t mapping) 1094 { 1095 struct bnxt *bp = bnapi->bp; 1096 struct pci_dev *pdev = bp->pdev; 1097 struct sk_buff *skb; 1098 1099 skb = napi_alloc_skb(&bnapi->napi, len); 1100 if (!skb) 1101 return NULL; 1102 1103 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh, 1104 bp->rx_dir); 1105 1106 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN, 1107 len + NET_IP_ALIGN); 1108 1109 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh, 1110 bp->rx_dir); 1111 1112 skb_put(skb, len); 1113 return skb; 1114 } 1115 1116 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 1117 u32 *raw_cons, void *cmp) 1118 { 1119 struct rx_cmp *rxcmp = cmp; 1120 u32 tmp_raw_cons = *raw_cons; 1121 u8 cmp_type, agg_bufs = 0; 1122 1123 cmp_type = RX_CMP_TYPE(rxcmp); 1124 1125 if (cmp_type == CMP_TYPE_RX_L2_CMP) { 1126 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & 1127 RX_CMP_AGG_BUFS) >> 1128 RX_CMP_AGG_BUFS_SHIFT; 1129 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1130 struct rx_tpa_end_cmp *tpa_end = cmp; 1131 1132 if (bp->flags & BNXT_FLAG_CHIP_P5) 1133 return 0; 1134 1135 agg_bufs = TPA_END_AGG_BUFS(tpa_end); 1136 } 1137 1138 if (agg_bufs) { 1139 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) 1140 return -EBUSY; 1141 } 1142 *raw_cons = tmp_raw_cons; 1143 return 0; 1144 } 1145 1146 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay) 1147 { 1148 if (BNXT_PF(bp)) 1149 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay); 1150 else 1151 schedule_delayed_work(&bp->fw_reset_task, delay); 1152 } 1153 1154 static void bnxt_queue_sp_work(struct bnxt *bp) 1155 { 1156 if (BNXT_PF(bp)) 1157 queue_work(bnxt_pf_wq, &bp->sp_task); 1158 else 1159 schedule_work(&bp->sp_task); 1160 } 1161 1162 static void bnxt_cancel_sp_work(struct bnxt *bp) 1163 { 1164 if (BNXT_PF(bp)) 1165 flush_workqueue(bnxt_pf_wq); 1166 else 1167 cancel_work_sync(&bp->sp_task); 1168 } 1169 1170 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 1171 { 1172 if (!rxr->bnapi->in_reset) { 1173 rxr->bnapi->in_reset = true; 1174 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 1175 bnxt_queue_sp_work(bp); 1176 } 1177 rxr->rx_next_cons = 0xffff; 1178 } 1179 1180 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) 1181 { 1182 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; 1183 u16 idx = agg_id & MAX_TPA_P5_MASK; 1184 1185 if (test_bit(idx, map->agg_idx_bmap)) 1186 idx = find_first_zero_bit(map->agg_idx_bmap, 1187 BNXT_AGG_IDX_BMAP_SIZE); 1188 __set_bit(idx, map->agg_idx_bmap); 1189 map->agg_id_tbl[agg_id] = idx; 1190 return idx; 1191 } 1192 1193 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) 1194 { 1195 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; 1196 1197 __clear_bit(idx, map->agg_idx_bmap); 1198 } 1199 1200 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) 1201 { 1202 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; 1203 1204 return map->agg_id_tbl[agg_id]; 1205 } 1206 1207 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 1208 struct rx_tpa_start_cmp *tpa_start, 1209 struct rx_tpa_start_cmp_ext *tpa_start1) 1210 { 1211 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 1212 struct bnxt_tpa_info *tpa_info; 1213 u16 cons, prod, agg_id; 1214 struct rx_bd *prod_bd; 1215 dma_addr_t mapping; 1216 1217 if (bp->flags & BNXT_FLAG_CHIP_P5) { 1218 agg_id = TPA_START_AGG_ID_P5(tpa_start); 1219 agg_id = bnxt_alloc_agg_idx(rxr, agg_id); 1220 } else { 1221 agg_id = TPA_START_AGG_ID(tpa_start); 1222 } 1223 cons = tpa_start->rx_tpa_start_cmp_opaque; 1224 prod = rxr->rx_prod; 1225 cons_rx_buf = &rxr->rx_buf_ring[cons]; 1226 prod_rx_buf = &rxr->rx_buf_ring[prod]; 1227 tpa_info = &rxr->rx_tpa[agg_id]; 1228 1229 if (unlikely(cons != rxr->rx_next_cons || 1230 TPA_START_ERROR(tpa_start))) { 1231 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n", 1232 cons, rxr->rx_next_cons, 1233 TPA_START_ERROR_CODE(tpa_start1)); 1234 bnxt_sched_reset(bp, rxr); 1235 return; 1236 } 1237 /* Store cfa_code in tpa_info to use in tpa_end 1238 * completion processing. 1239 */ 1240 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1); 1241 prod_rx_buf->data = tpa_info->data; 1242 prod_rx_buf->data_ptr = tpa_info->data_ptr; 1243 1244 mapping = tpa_info->mapping; 1245 prod_rx_buf->mapping = mapping; 1246 1247 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 1248 1249 prod_bd->rx_bd_haddr = cpu_to_le64(mapping); 1250 1251 tpa_info->data = cons_rx_buf->data; 1252 tpa_info->data_ptr = cons_rx_buf->data_ptr; 1253 cons_rx_buf->data = NULL; 1254 tpa_info->mapping = cons_rx_buf->mapping; 1255 1256 tpa_info->len = 1257 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >> 1258 RX_TPA_START_CMP_LEN_SHIFT; 1259 if (likely(TPA_START_HASH_VALID(tpa_start))) { 1260 u32 hash_type = TPA_START_HASH_TYPE(tpa_start); 1261 1262 tpa_info->hash_type = PKT_HASH_TYPE_L4; 1263 tpa_info->gso_type = SKB_GSO_TCPV4; 1264 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ 1265 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1)) 1266 tpa_info->gso_type = SKB_GSO_TCPV6; 1267 tpa_info->rss_hash = 1268 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash); 1269 } else { 1270 tpa_info->hash_type = PKT_HASH_TYPE_NONE; 1271 tpa_info->gso_type = 0; 1272 if (netif_msg_rx_err(bp)) 1273 netdev_warn(bp->dev, "TPA packet without valid hash\n"); 1274 } 1275 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2); 1276 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata); 1277 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info); 1278 tpa_info->agg_count = 0; 1279 1280 rxr->rx_prod = NEXT_RX(prod); 1281 cons = NEXT_RX(cons); 1282 rxr->rx_next_cons = NEXT_RX(cons); 1283 cons_rx_buf = &rxr->rx_buf_ring[cons]; 1284 1285 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data); 1286 rxr->rx_prod = NEXT_RX(rxr->rx_prod); 1287 cons_rx_buf->data = NULL; 1288 } 1289 1290 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs) 1291 { 1292 if (agg_bufs) 1293 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true); 1294 } 1295 1296 #ifdef CONFIG_INET 1297 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto) 1298 { 1299 struct udphdr *uh = NULL; 1300 1301 if (ip_proto == htons(ETH_P_IP)) { 1302 struct iphdr *iph = (struct iphdr *)skb->data; 1303 1304 if (iph->protocol == IPPROTO_UDP) 1305 uh = (struct udphdr *)(iph + 1); 1306 } else { 1307 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; 1308 1309 if (iph->nexthdr == IPPROTO_UDP) 1310 uh = (struct udphdr *)(iph + 1); 1311 } 1312 if (uh) { 1313 if (uh->check) 1314 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM; 1315 else 1316 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; 1317 } 1318 } 1319 #endif 1320 1321 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info, 1322 int payload_off, int tcp_ts, 1323 struct sk_buff *skb) 1324 { 1325 #ifdef CONFIG_INET 1326 struct tcphdr *th; 1327 int len, nw_off; 1328 u16 outer_ip_off, inner_ip_off, inner_mac_off; 1329 u32 hdr_info = tpa_info->hdr_info; 1330 bool loopback = false; 1331 1332 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); 1333 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); 1334 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); 1335 1336 /* If the packet is an internal loopback packet, the offsets will 1337 * have an extra 4 bytes. 1338 */ 1339 if (inner_mac_off == 4) { 1340 loopback = true; 1341 } else if (inner_mac_off > 4) { 1342 __be16 proto = *((__be16 *)(skb->data + inner_ip_off - 1343 ETH_HLEN - 2)); 1344 1345 /* We only support inner iPv4/ipv6. If we don't see the 1346 * correct protocol ID, it must be a loopback packet where 1347 * the offsets are off by 4. 1348 */ 1349 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6)) 1350 loopback = true; 1351 } 1352 if (loopback) { 1353 /* internal loopback packet, subtract all offsets by 4 */ 1354 inner_ip_off -= 4; 1355 inner_mac_off -= 4; 1356 outer_ip_off -= 4; 1357 } 1358 1359 nw_off = inner_ip_off - ETH_HLEN; 1360 skb_set_network_header(skb, nw_off); 1361 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) { 1362 struct ipv6hdr *iph = ipv6_hdr(skb); 1363 1364 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); 1365 len = skb->len - skb_transport_offset(skb); 1366 th = tcp_hdr(skb); 1367 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); 1368 } else { 1369 struct iphdr *iph = ip_hdr(skb); 1370 1371 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); 1372 len = skb->len - skb_transport_offset(skb); 1373 th = tcp_hdr(skb); 1374 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); 1375 } 1376 1377 if (inner_mac_off) { /* tunnel */ 1378 __be16 proto = *((__be16 *)(skb->data + outer_ip_off - 1379 ETH_HLEN - 2)); 1380 1381 bnxt_gro_tunnel(skb, proto); 1382 } 1383 #endif 1384 return skb; 1385 } 1386 1387 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info, 1388 int payload_off, int tcp_ts, 1389 struct sk_buff *skb) 1390 { 1391 #ifdef CONFIG_INET 1392 u16 outer_ip_off, inner_ip_off, inner_mac_off; 1393 u32 hdr_info = tpa_info->hdr_info; 1394 int iphdr_len, nw_off; 1395 1396 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); 1397 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); 1398 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); 1399 1400 nw_off = inner_ip_off - ETH_HLEN; 1401 skb_set_network_header(skb, nw_off); 1402 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ? 1403 sizeof(struct ipv6hdr) : sizeof(struct iphdr); 1404 skb_set_transport_header(skb, nw_off + iphdr_len); 1405 1406 if (inner_mac_off) { /* tunnel */ 1407 __be16 proto = *((__be16 *)(skb->data + outer_ip_off - 1408 ETH_HLEN - 2)); 1409 1410 bnxt_gro_tunnel(skb, proto); 1411 } 1412 #endif 1413 return skb; 1414 } 1415 1416 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr)) 1417 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr)) 1418 1419 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info, 1420 int payload_off, int tcp_ts, 1421 struct sk_buff *skb) 1422 { 1423 #ifdef CONFIG_INET 1424 struct tcphdr *th; 1425 int len, nw_off, tcp_opt_len = 0; 1426 1427 if (tcp_ts) 1428 tcp_opt_len = 12; 1429 1430 if (tpa_info->gso_type == SKB_GSO_TCPV4) { 1431 struct iphdr *iph; 1432 1433 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len - 1434 ETH_HLEN; 1435 skb_set_network_header(skb, nw_off); 1436 iph = ip_hdr(skb); 1437 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); 1438 len = skb->len - skb_transport_offset(skb); 1439 th = tcp_hdr(skb); 1440 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); 1441 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) { 1442 struct ipv6hdr *iph; 1443 1444 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len - 1445 ETH_HLEN; 1446 skb_set_network_header(skb, nw_off); 1447 iph = ipv6_hdr(skb); 1448 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); 1449 len = skb->len - skb_transport_offset(skb); 1450 th = tcp_hdr(skb); 1451 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); 1452 } else { 1453 dev_kfree_skb_any(skb); 1454 return NULL; 1455 } 1456 1457 if (nw_off) /* tunnel */ 1458 bnxt_gro_tunnel(skb, skb->protocol); 1459 #endif 1460 return skb; 1461 } 1462 1463 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp, 1464 struct bnxt_tpa_info *tpa_info, 1465 struct rx_tpa_end_cmp *tpa_end, 1466 struct rx_tpa_end_cmp_ext *tpa_end1, 1467 struct sk_buff *skb) 1468 { 1469 #ifdef CONFIG_INET 1470 int payload_off; 1471 u16 segs; 1472 1473 segs = TPA_END_TPA_SEGS(tpa_end); 1474 if (segs == 1) 1475 return skb; 1476 1477 NAPI_GRO_CB(skb)->count = segs; 1478 skb_shinfo(skb)->gso_size = 1479 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len); 1480 skb_shinfo(skb)->gso_type = tpa_info->gso_type; 1481 if (bp->flags & BNXT_FLAG_CHIP_P5) 1482 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1); 1483 else 1484 payload_off = TPA_END_PAYLOAD_OFF(tpa_end); 1485 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb); 1486 if (likely(skb)) 1487 tcp_gro_complete(skb); 1488 #endif 1489 return skb; 1490 } 1491 1492 /* Given the cfa_code of a received packet determine which 1493 * netdev (vf-rep or PF) the packet is destined to. 1494 */ 1495 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code) 1496 { 1497 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code); 1498 1499 /* if vf-rep dev is NULL, the must belongs to the PF */ 1500 return dev ? dev : bp->dev; 1501 } 1502 1503 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, 1504 struct bnxt_cp_ring_info *cpr, 1505 u32 *raw_cons, 1506 struct rx_tpa_end_cmp *tpa_end, 1507 struct rx_tpa_end_cmp_ext *tpa_end1, 1508 u8 *event) 1509 { 1510 struct bnxt_napi *bnapi = cpr->bnapi; 1511 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1512 u8 *data_ptr, agg_bufs; 1513 unsigned int len; 1514 struct bnxt_tpa_info *tpa_info; 1515 dma_addr_t mapping; 1516 struct sk_buff *skb; 1517 u16 idx = 0, agg_id; 1518 void *data; 1519 bool gro; 1520 1521 if (unlikely(bnapi->in_reset)) { 1522 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end); 1523 1524 if (rc < 0) 1525 return ERR_PTR(-EBUSY); 1526 return NULL; 1527 } 1528 1529 if (bp->flags & BNXT_FLAG_CHIP_P5) { 1530 agg_id = TPA_END_AGG_ID_P5(tpa_end); 1531 agg_id = bnxt_lookup_agg_idx(rxr, agg_id); 1532 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1); 1533 tpa_info = &rxr->rx_tpa[agg_id]; 1534 if (unlikely(agg_bufs != tpa_info->agg_count)) { 1535 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n", 1536 agg_bufs, tpa_info->agg_count); 1537 agg_bufs = tpa_info->agg_count; 1538 } 1539 tpa_info->agg_count = 0; 1540 *event |= BNXT_AGG_EVENT; 1541 bnxt_free_agg_idx(rxr, agg_id); 1542 idx = agg_id; 1543 gro = !!(bp->flags & BNXT_FLAG_GRO); 1544 } else { 1545 agg_id = TPA_END_AGG_ID(tpa_end); 1546 agg_bufs = TPA_END_AGG_BUFS(tpa_end); 1547 tpa_info = &rxr->rx_tpa[agg_id]; 1548 idx = RING_CMP(*raw_cons); 1549 if (agg_bufs) { 1550 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons)) 1551 return ERR_PTR(-EBUSY); 1552 1553 *event |= BNXT_AGG_EVENT; 1554 idx = NEXT_CMP(idx); 1555 } 1556 gro = !!TPA_END_GRO(tpa_end); 1557 } 1558 data = tpa_info->data; 1559 data_ptr = tpa_info->data_ptr; 1560 prefetch(data_ptr); 1561 len = tpa_info->len; 1562 mapping = tpa_info->mapping; 1563 1564 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) { 1565 bnxt_abort_tpa(cpr, idx, agg_bufs); 1566 if (agg_bufs > MAX_SKB_FRAGS) 1567 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", 1568 agg_bufs, (int)MAX_SKB_FRAGS); 1569 return NULL; 1570 } 1571 1572 if (len <= bp->rx_copy_thresh) { 1573 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping); 1574 if (!skb) { 1575 bnxt_abort_tpa(cpr, idx, agg_bufs); 1576 return NULL; 1577 } 1578 } else { 1579 u8 *new_data; 1580 dma_addr_t new_mapping; 1581 1582 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC); 1583 if (!new_data) { 1584 bnxt_abort_tpa(cpr, idx, agg_bufs); 1585 return NULL; 1586 } 1587 1588 tpa_info->data = new_data; 1589 tpa_info->data_ptr = new_data + bp->rx_offset; 1590 tpa_info->mapping = new_mapping; 1591 1592 skb = build_skb(data, 0); 1593 dma_unmap_single_attrs(&bp->pdev->dev, mapping, 1594 bp->rx_buf_use_size, bp->rx_dir, 1595 DMA_ATTR_WEAK_ORDERING); 1596 1597 if (!skb) { 1598 kfree(data); 1599 bnxt_abort_tpa(cpr, idx, agg_bufs); 1600 return NULL; 1601 } 1602 skb_reserve(skb, bp->rx_offset); 1603 skb_put(skb, len); 1604 } 1605 1606 if (agg_bufs) { 1607 skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true); 1608 if (!skb) { 1609 /* Page reuse already handled by bnxt_rx_pages(). */ 1610 return NULL; 1611 } 1612 } 1613 1614 skb->protocol = 1615 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code)); 1616 1617 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE) 1618 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); 1619 1620 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) && 1621 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { 1622 u16 vlan_proto = tpa_info->metadata >> 1623 RX_CMP_FLAGS2_METADATA_TPID_SFT; 1624 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK; 1625 1626 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); 1627 } 1628 1629 skb_checksum_none_assert(skb); 1630 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) { 1631 skb->ip_summed = CHECKSUM_UNNECESSARY; 1632 skb->csum_level = 1633 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3; 1634 } 1635 1636 if (gro) 1637 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb); 1638 1639 return skb; 1640 } 1641 1642 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 1643 struct rx_agg_cmp *rx_agg) 1644 { 1645 u16 agg_id = TPA_AGG_AGG_ID(rx_agg); 1646 struct bnxt_tpa_info *tpa_info; 1647 1648 agg_id = bnxt_lookup_agg_idx(rxr, agg_id); 1649 tpa_info = &rxr->rx_tpa[agg_id]; 1650 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS); 1651 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg; 1652 } 1653 1654 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi, 1655 struct sk_buff *skb) 1656 { 1657 if (skb->dev != bp->dev) { 1658 /* this packet belongs to a vf-rep */ 1659 bnxt_vf_rep_rx(bp, skb); 1660 return; 1661 } 1662 skb_record_rx_queue(skb, bnapi->index); 1663 napi_gro_receive(&bnapi->napi, skb); 1664 } 1665 1666 /* returns the following: 1667 * 1 - 1 packet successfully received 1668 * 0 - successful TPA_START, packet not completed yet 1669 * -EBUSY - completion ring does not have all the agg buffers yet 1670 * -ENOMEM - packet aborted due to out of memory 1671 * -EIO - packet aborted due to hw error indicated in BD 1672 */ 1673 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 1674 u32 *raw_cons, u8 *event) 1675 { 1676 struct bnxt_napi *bnapi = cpr->bnapi; 1677 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1678 struct net_device *dev = bp->dev; 1679 struct rx_cmp *rxcmp; 1680 struct rx_cmp_ext *rxcmp1; 1681 u32 tmp_raw_cons = *raw_cons; 1682 u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons); 1683 struct bnxt_sw_rx_bd *rx_buf; 1684 unsigned int len; 1685 u8 *data_ptr, agg_bufs, cmp_type; 1686 dma_addr_t dma_addr; 1687 struct sk_buff *skb; 1688 void *data; 1689 int rc = 0; 1690 u32 misc; 1691 1692 rxcmp = (struct rx_cmp *) 1693 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1694 1695 cmp_type = RX_CMP_TYPE(rxcmp); 1696 1697 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) { 1698 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp); 1699 goto next_rx_no_prod_no_len; 1700 } 1701 1702 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 1703 cp_cons = RING_CMP(tmp_raw_cons); 1704 rxcmp1 = (struct rx_cmp_ext *) 1705 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1706 1707 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 1708 return -EBUSY; 1709 1710 prod = rxr->rx_prod; 1711 1712 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) { 1713 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp, 1714 (struct rx_tpa_start_cmp_ext *)rxcmp1); 1715 1716 *event |= BNXT_RX_EVENT; 1717 goto next_rx_no_prod_no_len; 1718 1719 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1720 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons, 1721 (struct rx_tpa_end_cmp *)rxcmp, 1722 (struct rx_tpa_end_cmp_ext *)rxcmp1, event); 1723 1724 if (IS_ERR(skb)) 1725 return -EBUSY; 1726 1727 rc = -ENOMEM; 1728 if (likely(skb)) { 1729 bnxt_deliver_skb(bp, bnapi, skb); 1730 rc = 1; 1731 } 1732 *event |= BNXT_RX_EVENT; 1733 goto next_rx_no_prod_no_len; 1734 } 1735 1736 cons = rxcmp->rx_cmp_opaque; 1737 if (unlikely(cons != rxr->rx_next_cons)) { 1738 int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp); 1739 1740 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n", 1741 cons, rxr->rx_next_cons); 1742 bnxt_sched_reset(bp, rxr); 1743 return rc1; 1744 } 1745 rx_buf = &rxr->rx_buf_ring[cons]; 1746 data = rx_buf->data; 1747 data_ptr = rx_buf->data_ptr; 1748 prefetch(data_ptr); 1749 1750 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1); 1751 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT; 1752 1753 if (agg_bufs) { 1754 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) 1755 return -EBUSY; 1756 1757 cp_cons = NEXT_CMP(cp_cons); 1758 *event |= BNXT_AGG_EVENT; 1759 } 1760 *event |= BNXT_RX_EVENT; 1761 1762 rx_buf->data = NULL; 1763 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) { 1764 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2); 1765 1766 bnxt_reuse_rx_data(rxr, cons, data); 1767 if (agg_bufs) 1768 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs, 1769 false); 1770 1771 rc = -EIO; 1772 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) { 1773 bnapi->cp_ring.rx_buf_errors++; 1774 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) { 1775 netdev_warn(bp->dev, "RX buffer error %x\n", 1776 rx_err); 1777 bnxt_sched_reset(bp, rxr); 1778 } 1779 } 1780 goto next_rx_no_len; 1781 } 1782 1783 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT; 1784 dma_addr = rx_buf->mapping; 1785 1786 if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) { 1787 rc = 1; 1788 goto next_rx; 1789 } 1790 1791 if (len <= bp->rx_copy_thresh) { 1792 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr); 1793 bnxt_reuse_rx_data(rxr, cons, data); 1794 if (!skb) { 1795 if (agg_bufs) 1796 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, 1797 agg_bufs, false); 1798 rc = -ENOMEM; 1799 goto next_rx; 1800 } 1801 } else { 1802 u32 payload; 1803 1804 if (rx_buf->data_ptr == data_ptr) 1805 payload = misc & RX_CMP_PAYLOAD_OFFSET; 1806 else 1807 payload = 0; 1808 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr, 1809 payload | len); 1810 if (!skb) { 1811 rc = -ENOMEM; 1812 goto next_rx; 1813 } 1814 } 1815 1816 if (agg_bufs) { 1817 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false); 1818 if (!skb) { 1819 rc = -ENOMEM; 1820 goto next_rx; 1821 } 1822 } 1823 1824 if (RX_CMP_HASH_VALID(rxcmp)) { 1825 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp); 1826 enum pkt_hash_types type = PKT_HASH_TYPE_L4; 1827 1828 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ 1829 if (hash_type != 1 && hash_type != 3) 1830 type = PKT_HASH_TYPE_L3; 1831 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type); 1832 } 1833 1834 cfa_code = RX_CMP_CFA_CODE(rxcmp1); 1835 skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code)); 1836 1837 if ((rxcmp1->rx_cmp_flags2 & 1838 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) && 1839 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { 1840 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); 1841 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK; 1842 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; 1843 1844 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); 1845 } 1846 1847 skb_checksum_none_assert(skb); 1848 if (RX_CMP_L4_CS_OK(rxcmp1)) { 1849 if (dev->features & NETIF_F_RXCSUM) { 1850 skb->ip_summed = CHECKSUM_UNNECESSARY; 1851 skb->csum_level = RX_CMP_ENCAP(rxcmp1); 1852 } 1853 } else { 1854 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) { 1855 if (dev->features & NETIF_F_RXCSUM) 1856 bnapi->cp_ring.rx_l4_csum_errors++; 1857 } 1858 } 1859 1860 bnxt_deliver_skb(bp, bnapi, skb); 1861 rc = 1; 1862 1863 next_rx: 1864 cpr->rx_packets += 1; 1865 cpr->rx_bytes += len; 1866 1867 next_rx_no_len: 1868 rxr->rx_prod = NEXT_RX(prod); 1869 rxr->rx_next_cons = NEXT_RX(cons); 1870 1871 next_rx_no_prod_no_len: 1872 *raw_cons = tmp_raw_cons; 1873 1874 return rc; 1875 } 1876 1877 /* In netpoll mode, if we are using a combined completion ring, we need to 1878 * discard the rx packets and recycle the buffers. 1879 */ 1880 static int bnxt_force_rx_discard(struct bnxt *bp, 1881 struct bnxt_cp_ring_info *cpr, 1882 u32 *raw_cons, u8 *event) 1883 { 1884 u32 tmp_raw_cons = *raw_cons; 1885 struct rx_cmp_ext *rxcmp1; 1886 struct rx_cmp *rxcmp; 1887 u16 cp_cons; 1888 u8 cmp_type; 1889 1890 cp_cons = RING_CMP(tmp_raw_cons); 1891 rxcmp = (struct rx_cmp *) 1892 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1893 1894 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 1895 cp_cons = RING_CMP(tmp_raw_cons); 1896 rxcmp1 = (struct rx_cmp_ext *) 1897 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1898 1899 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 1900 return -EBUSY; 1901 1902 cmp_type = RX_CMP_TYPE(rxcmp); 1903 if (cmp_type == CMP_TYPE_RX_L2_CMP) { 1904 rxcmp1->rx_cmp_cfa_code_errors_v2 |= 1905 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); 1906 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1907 struct rx_tpa_end_cmp_ext *tpa_end1; 1908 1909 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1; 1910 tpa_end1->rx_tpa_end_cmp_errors_v2 |= 1911 cpu_to_le32(RX_TPA_END_CMP_ERRORS); 1912 } 1913 return bnxt_rx_pkt(bp, cpr, raw_cons, event); 1914 } 1915 1916 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx) 1917 { 1918 struct bnxt_fw_health *fw_health = bp->fw_health; 1919 u32 reg = fw_health->regs[reg_idx]; 1920 u32 reg_type, reg_off, val = 0; 1921 1922 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg); 1923 reg_off = BNXT_FW_HEALTH_REG_OFF(reg); 1924 switch (reg_type) { 1925 case BNXT_FW_HEALTH_REG_TYPE_CFG: 1926 pci_read_config_dword(bp->pdev, reg_off, &val); 1927 break; 1928 case BNXT_FW_HEALTH_REG_TYPE_GRC: 1929 reg_off = fw_health->mapped_regs[reg_idx]; 1930 /* fall through */ 1931 case BNXT_FW_HEALTH_REG_TYPE_BAR0: 1932 val = readl(bp->bar0 + reg_off); 1933 break; 1934 case BNXT_FW_HEALTH_REG_TYPE_BAR1: 1935 val = readl(bp->bar1 + reg_off); 1936 break; 1937 } 1938 if (reg_idx == BNXT_FW_RESET_INPROG_REG) 1939 val &= fw_health->fw_reset_inprog_reg_mask; 1940 return val; 1941 } 1942 1943 #define BNXT_GET_EVENT_PORT(data) \ 1944 ((data) & \ 1945 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK) 1946 1947 static int bnxt_async_event_process(struct bnxt *bp, 1948 struct hwrm_async_event_cmpl *cmpl) 1949 { 1950 u16 event_id = le16_to_cpu(cmpl->event_id); 1951 1952 /* TODO CHIMP_FW: Define event id's for link change, error etc */ 1953 switch (event_id) { 1954 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: { 1955 u32 data1 = le32_to_cpu(cmpl->event_data1); 1956 struct bnxt_link_info *link_info = &bp->link_info; 1957 1958 if (BNXT_VF(bp)) 1959 goto async_event_process_exit; 1960 1961 /* print unsupported speed warning in forced speed mode only */ 1962 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) && 1963 (data1 & 0x20000)) { 1964 u16 fw_speed = link_info->force_link_speed; 1965 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed); 1966 1967 if (speed != SPEED_UNKNOWN) 1968 netdev_warn(bp->dev, "Link speed %d no longer supported\n", 1969 speed); 1970 } 1971 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); 1972 } 1973 /* fall through */ 1974 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE: 1975 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE: 1976 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event); 1977 /* fall through */ 1978 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: 1979 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); 1980 break; 1981 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD: 1982 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event); 1983 break; 1984 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: { 1985 u32 data1 = le32_to_cpu(cmpl->event_data1); 1986 u16 port_id = BNXT_GET_EVENT_PORT(data1); 1987 1988 if (BNXT_VF(bp)) 1989 break; 1990 1991 if (bp->pf.port_id != port_id) 1992 break; 1993 1994 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event); 1995 break; 1996 } 1997 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE: 1998 if (BNXT_PF(bp)) 1999 goto async_event_process_exit; 2000 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event); 2001 break; 2002 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: { 2003 u32 data1 = le32_to_cpu(cmpl->event_data1); 2004 2005 if (!bp->fw_health) 2006 goto async_event_process_exit; 2007 2008 bp->fw_reset_timestamp = jiffies; 2009 bp->fw_reset_min_dsecs = cmpl->timestamp_lo; 2010 if (!bp->fw_reset_min_dsecs) 2011 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS; 2012 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi); 2013 if (!bp->fw_reset_max_dsecs) 2014 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS; 2015 if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) { 2016 netdev_warn(bp->dev, "Firmware fatal reset event received\n"); 2017 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); 2018 } else { 2019 netdev_warn(bp->dev, "Firmware non-fatal reset event received, max wait time %d msec\n", 2020 bp->fw_reset_max_dsecs * 100); 2021 } 2022 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event); 2023 break; 2024 } 2025 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: { 2026 struct bnxt_fw_health *fw_health = bp->fw_health; 2027 u32 data1 = le32_to_cpu(cmpl->event_data1); 2028 2029 if (!fw_health) 2030 goto async_event_process_exit; 2031 2032 fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1); 2033 fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1); 2034 if (!fw_health->enabled) 2035 break; 2036 2037 if (netif_msg_drv(bp)) 2038 netdev_info(bp->dev, "Error recovery info: error recovery[%d], master[%d], reset count[0x%x], health status: 0x%x\n", 2039 fw_health->enabled, fw_health->master, 2040 bnxt_fw_health_readl(bp, 2041 BNXT_FW_RESET_CNT_REG), 2042 bnxt_fw_health_readl(bp, 2043 BNXT_FW_HEALTH_REG)); 2044 fw_health->tmr_multiplier = 2045 DIV_ROUND_UP(fw_health->polling_dsecs * HZ, 2046 bp->current_interval * 10); 2047 fw_health->tmr_counter = fw_health->tmr_multiplier; 2048 fw_health->last_fw_heartbeat = 2049 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 2050 fw_health->last_fw_reset_cnt = 2051 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 2052 goto async_event_process_exit; 2053 } 2054 default: 2055 goto async_event_process_exit; 2056 } 2057 bnxt_queue_sp_work(bp); 2058 async_event_process_exit: 2059 bnxt_ulp_async_events(bp, cmpl); 2060 return 0; 2061 } 2062 2063 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) 2064 { 2065 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id; 2066 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp; 2067 struct hwrm_fwd_req_cmpl *fwd_req_cmpl = 2068 (struct hwrm_fwd_req_cmpl *)txcmp; 2069 2070 switch (cmpl_type) { 2071 case CMPL_BASE_TYPE_HWRM_DONE: 2072 seq_id = le16_to_cpu(h_cmpl->sequence_id); 2073 if (seq_id == bp->hwrm_intr_seq_id) 2074 bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id; 2075 else 2076 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id); 2077 break; 2078 2079 case CMPL_BASE_TYPE_HWRM_FWD_REQ: 2080 vf_id = le16_to_cpu(fwd_req_cmpl->source_id); 2081 2082 if ((vf_id < bp->pf.first_vf_id) || 2083 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) { 2084 netdev_err(bp->dev, "Msg contains invalid VF id %x\n", 2085 vf_id); 2086 return -EINVAL; 2087 } 2088 2089 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap); 2090 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event); 2091 bnxt_queue_sp_work(bp); 2092 break; 2093 2094 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: 2095 bnxt_async_event_process(bp, 2096 (struct hwrm_async_event_cmpl *)txcmp); 2097 2098 default: 2099 break; 2100 } 2101 2102 return 0; 2103 } 2104 2105 static irqreturn_t bnxt_msix(int irq, void *dev_instance) 2106 { 2107 struct bnxt_napi *bnapi = dev_instance; 2108 struct bnxt *bp = bnapi->bp; 2109 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2110 u32 cons = RING_CMP(cpr->cp_raw_cons); 2111 2112 cpr->event_ctr++; 2113 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); 2114 napi_schedule(&bnapi->napi); 2115 return IRQ_HANDLED; 2116 } 2117 2118 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) 2119 { 2120 u32 raw_cons = cpr->cp_raw_cons; 2121 u16 cons = RING_CMP(raw_cons); 2122 struct tx_cmp *txcmp; 2123 2124 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 2125 2126 return TX_CMP_VALID(txcmp, raw_cons); 2127 } 2128 2129 static irqreturn_t bnxt_inta(int irq, void *dev_instance) 2130 { 2131 struct bnxt_napi *bnapi = dev_instance; 2132 struct bnxt *bp = bnapi->bp; 2133 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2134 u32 cons = RING_CMP(cpr->cp_raw_cons); 2135 u32 int_status; 2136 2137 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); 2138 2139 if (!bnxt_has_work(bp, cpr)) { 2140 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS); 2141 /* return if erroneous interrupt */ 2142 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id))) 2143 return IRQ_NONE; 2144 } 2145 2146 /* disable ring IRQ */ 2147 BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell); 2148 2149 /* Return here if interrupt is shared and is disabled. */ 2150 if (unlikely(atomic_read(&bp->intr_sem) != 0)) 2151 return IRQ_HANDLED; 2152 2153 napi_schedule(&bnapi->napi); 2154 return IRQ_HANDLED; 2155 } 2156 2157 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 2158 int budget) 2159 { 2160 struct bnxt_napi *bnapi = cpr->bnapi; 2161 u32 raw_cons = cpr->cp_raw_cons; 2162 u32 cons; 2163 int tx_pkts = 0; 2164 int rx_pkts = 0; 2165 u8 event = 0; 2166 struct tx_cmp *txcmp; 2167 2168 cpr->has_more_work = 0; 2169 while (1) { 2170 int rc; 2171 2172 cons = RING_CMP(raw_cons); 2173 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 2174 2175 if (!TX_CMP_VALID(txcmp, raw_cons)) 2176 break; 2177 2178 /* The valid test of the entry must be done first before 2179 * reading any further. 2180 */ 2181 dma_rmb(); 2182 cpr->had_work_done = 1; 2183 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) { 2184 tx_pkts++; 2185 /* return full budget so NAPI will complete. */ 2186 if (unlikely(tx_pkts > bp->tx_wake_thresh)) { 2187 rx_pkts = budget; 2188 raw_cons = NEXT_RAW_CMP(raw_cons); 2189 if (budget) 2190 cpr->has_more_work = 1; 2191 break; 2192 } 2193 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { 2194 if (likely(budget)) 2195 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event); 2196 else 2197 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons, 2198 &event); 2199 if (likely(rc >= 0)) 2200 rx_pkts += rc; 2201 /* Increment rx_pkts when rc is -ENOMEM to count towards 2202 * the NAPI budget. Otherwise, we may potentially loop 2203 * here forever if we consistently cannot allocate 2204 * buffers. 2205 */ 2206 else if (rc == -ENOMEM && budget) 2207 rx_pkts++; 2208 else if (rc == -EBUSY) /* partial completion */ 2209 break; 2210 } else if (unlikely((TX_CMP_TYPE(txcmp) == 2211 CMPL_BASE_TYPE_HWRM_DONE) || 2212 (TX_CMP_TYPE(txcmp) == 2213 CMPL_BASE_TYPE_HWRM_FWD_REQ) || 2214 (TX_CMP_TYPE(txcmp) == 2215 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) { 2216 bnxt_hwrm_handler(bp, txcmp); 2217 } 2218 raw_cons = NEXT_RAW_CMP(raw_cons); 2219 2220 if (rx_pkts && rx_pkts == budget) { 2221 cpr->has_more_work = 1; 2222 break; 2223 } 2224 } 2225 2226 if (event & BNXT_REDIRECT_EVENT) 2227 xdp_do_flush_map(); 2228 2229 if (event & BNXT_TX_EVENT) { 2230 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; 2231 u16 prod = txr->tx_prod; 2232 2233 /* Sync BD data before updating doorbell */ 2234 wmb(); 2235 2236 bnxt_db_write_relaxed(bp, &txr->tx_db, prod); 2237 } 2238 2239 cpr->cp_raw_cons = raw_cons; 2240 bnapi->tx_pkts += tx_pkts; 2241 bnapi->events |= event; 2242 return rx_pkts; 2243 } 2244 2245 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi) 2246 { 2247 if (bnapi->tx_pkts) { 2248 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts); 2249 bnapi->tx_pkts = 0; 2250 } 2251 2252 if (bnapi->events & BNXT_RX_EVENT) { 2253 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 2254 2255 if (bnapi->events & BNXT_AGG_EVENT) 2256 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 2257 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 2258 } 2259 bnapi->events = 0; 2260 } 2261 2262 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 2263 int budget) 2264 { 2265 struct bnxt_napi *bnapi = cpr->bnapi; 2266 int rx_pkts; 2267 2268 rx_pkts = __bnxt_poll_work(bp, cpr, budget); 2269 2270 /* ACK completion ring before freeing tx ring and producing new 2271 * buffers in rx/agg rings to prevent overflowing the completion 2272 * ring. 2273 */ 2274 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons); 2275 2276 __bnxt_poll_work_done(bp, bnapi); 2277 return rx_pkts; 2278 } 2279 2280 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) 2281 { 2282 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 2283 struct bnxt *bp = bnapi->bp; 2284 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2285 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 2286 struct tx_cmp *txcmp; 2287 struct rx_cmp_ext *rxcmp1; 2288 u32 cp_cons, tmp_raw_cons; 2289 u32 raw_cons = cpr->cp_raw_cons; 2290 u32 rx_pkts = 0; 2291 u8 event = 0; 2292 2293 while (1) { 2294 int rc; 2295 2296 cp_cons = RING_CMP(raw_cons); 2297 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2298 2299 if (!TX_CMP_VALID(txcmp, raw_cons)) 2300 break; 2301 2302 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { 2303 tmp_raw_cons = NEXT_RAW_CMP(raw_cons); 2304 cp_cons = RING_CMP(tmp_raw_cons); 2305 rxcmp1 = (struct rx_cmp_ext *) 2306 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2307 2308 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 2309 break; 2310 2311 /* force an error to recycle the buffer */ 2312 rxcmp1->rx_cmp_cfa_code_errors_v2 |= 2313 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); 2314 2315 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event); 2316 if (likely(rc == -EIO) && budget) 2317 rx_pkts++; 2318 else if (rc == -EBUSY) /* partial completion */ 2319 break; 2320 } else if (unlikely(TX_CMP_TYPE(txcmp) == 2321 CMPL_BASE_TYPE_HWRM_DONE)) { 2322 bnxt_hwrm_handler(bp, txcmp); 2323 } else { 2324 netdev_err(bp->dev, 2325 "Invalid completion received on special ring\n"); 2326 } 2327 raw_cons = NEXT_RAW_CMP(raw_cons); 2328 2329 if (rx_pkts == budget) 2330 break; 2331 } 2332 2333 cpr->cp_raw_cons = raw_cons; 2334 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons); 2335 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 2336 2337 if (event & BNXT_AGG_EVENT) 2338 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 2339 2340 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) { 2341 napi_complete_done(napi, rx_pkts); 2342 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 2343 } 2344 return rx_pkts; 2345 } 2346 2347 static int bnxt_poll(struct napi_struct *napi, int budget) 2348 { 2349 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 2350 struct bnxt *bp = bnapi->bp; 2351 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2352 int work_done = 0; 2353 2354 while (1) { 2355 work_done += bnxt_poll_work(bp, cpr, budget - work_done); 2356 2357 if (work_done >= budget) { 2358 if (!budget) 2359 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 2360 break; 2361 } 2362 2363 if (!bnxt_has_work(bp, cpr)) { 2364 if (napi_complete_done(napi, work_done)) 2365 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 2366 break; 2367 } 2368 } 2369 if (bp->flags & BNXT_FLAG_DIM) { 2370 struct dim_sample dim_sample = {}; 2371 2372 dim_update_sample(cpr->event_ctr, 2373 cpr->rx_packets, 2374 cpr->rx_bytes, 2375 &dim_sample); 2376 net_dim(&cpr->dim, dim_sample); 2377 } 2378 return work_done; 2379 } 2380 2381 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) 2382 { 2383 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2384 int i, work_done = 0; 2385 2386 for (i = 0; i < 2; i++) { 2387 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i]; 2388 2389 if (cpr2) { 2390 work_done += __bnxt_poll_work(bp, cpr2, 2391 budget - work_done); 2392 cpr->has_more_work |= cpr2->has_more_work; 2393 } 2394 } 2395 return work_done; 2396 } 2397 2398 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi, 2399 u64 dbr_type, bool all) 2400 { 2401 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2402 int i; 2403 2404 for (i = 0; i < 2; i++) { 2405 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i]; 2406 struct bnxt_db_info *db; 2407 2408 if (cpr2 && (all || cpr2->had_work_done)) { 2409 db = &cpr2->cp_db; 2410 writeq(db->db_key64 | dbr_type | 2411 RING_CMP(cpr2->cp_raw_cons), db->doorbell); 2412 cpr2->had_work_done = 0; 2413 } 2414 } 2415 __bnxt_poll_work_done(bp, bnapi); 2416 } 2417 2418 static int bnxt_poll_p5(struct napi_struct *napi, int budget) 2419 { 2420 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 2421 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2422 u32 raw_cons = cpr->cp_raw_cons; 2423 struct bnxt *bp = bnapi->bp; 2424 struct nqe_cn *nqcmp; 2425 int work_done = 0; 2426 u32 cons; 2427 2428 if (cpr->has_more_work) { 2429 cpr->has_more_work = 0; 2430 work_done = __bnxt_poll_cqs(bp, bnapi, budget); 2431 if (cpr->has_more_work) { 2432 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, false); 2433 return work_done; 2434 } 2435 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, true); 2436 if (napi_complete_done(napi, work_done)) 2437 BNXT_DB_NQ_ARM_P5(&cpr->cp_db, cpr->cp_raw_cons); 2438 return work_done; 2439 } 2440 while (1) { 2441 cons = RING_CMP(raw_cons); 2442 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 2443 2444 if (!NQ_CMP_VALID(nqcmp, raw_cons)) { 2445 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, 2446 false); 2447 cpr->cp_raw_cons = raw_cons; 2448 if (napi_complete_done(napi, work_done)) 2449 BNXT_DB_NQ_ARM_P5(&cpr->cp_db, 2450 cpr->cp_raw_cons); 2451 return work_done; 2452 } 2453 2454 /* The valid test of the entry must be done first before 2455 * reading any further. 2456 */ 2457 dma_rmb(); 2458 2459 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) { 2460 u32 idx = le32_to_cpu(nqcmp->cq_handle_low); 2461 struct bnxt_cp_ring_info *cpr2; 2462 2463 cpr2 = cpr->cp_ring_arr[idx]; 2464 work_done += __bnxt_poll_work(bp, cpr2, 2465 budget - work_done); 2466 cpr->has_more_work = cpr2->has_more_work; 2467 } else { 2468 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp); 2469 } 2470 raw_cons = NEXT_RAW_CMP(raw_cons); 2471 if (cpr->has_more_work) 2472 break; 2473 } 2474 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, true); 2475 cpr->cp_raw_cons = raw_cons; 2476 return work_done; 2477 } 2478 2479 static void bnxt_free_tx_skbs(struct bnxt *bp) 2480 { 2481 int i, max_idx; 2482 struct pci_dev *pdev = bp->pdev; 2483 2484 if (!bp->tx_ring) 2485 return; 2486 2487 max_idx = bp->tx_nr_pages * TX_DESC_CNT; 2488 for (i = 0; i < bp->tx_nr_rings; i++) { 2489 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 2490 int j; 2491 2492 for (j = 0; j < max_idx;) { 2493 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; 2494 struct sk_buff *skb; 2495 int k, last; 2496 2497 if (i < bp->tx_nr_rings_xdp && 2498 tx_buf->action == XDP_REDIRECT) { 2499 dma_unmap_single(&pdev->dev, 2500 dma_unmap_addr(tx_buf, mapping), 2501 dma_unmap_len(tx_buf, len), 2502 PCI_DMA_TODEVICE); 2503 xdp_return_frame(tx_buf->xdpf); 2504 tx_buf->action = 0; 2505 tx_buf->xdpf = NULL; 2506 j++; 2507 continue; 2508 } 2509 2510 skb = tx_buf->skb; 2511 if (!skb) { 2512 j++; 2513 continue; 2514 } 2515 2516 tx_buf->skb = NULL; 2517 2518 if (tx_buf->is_push) { 2519 dev_kfree_skb(skb); 2520 j += 2; 2521 continue; 2522 } 2523 2524 dma_unmap_single(&pdev->dev, 2525 dma_unmap_addr(tx_buf, mapping), 2526 skb_headlen(skb), 2527 PCI_DMA_TODEVICE); 2528 2529 last = tx_buf->nr_frags; 2530 j += 2; 2531 for (k = 0; k < last; k++, j++) { 2532 int ring_idx = j & bp->tx_ring_mask; 2533 skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; 2534 2535 tx_buf = &txr->tx_buf_ring[ring_idx]; 2536 dma_unmap_page( 2537 &pdev->dev, 2538 dma_unmap_addr(tx_buf, mapping), 2539 skb_frag_size(frag), PCI_DMA_TODEVICE); 2540 } 2541 dev_kfree_skb(skb); 2542 } 2543 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i)); 2544 } 2545 } 2546 2547 static void bnxt_free_rx_skbs(struct bnxt *bp) 2548 { 2549 int i, max_idx, max_agg_idx; 2550 struct pci_dev *pdev = bp->pdev; 2551 2552 if (!bp->rx_ring) 2553 return; 2554 2555 max_idx = bp->rx_nr_pages * RX_DESC_CNT; 2556 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; 2557 for (i = 0; i < bp->rx_nr_rings; i++) { 2558 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2559 struct bnxt_tpa_idx_map *map; 2560 int j; 2561 2562 if (rxr->rx_tpa) { 2563 for (j = 0; j < bp->max_tpa; j++) { 2564 struct bnxt_tpa_info *tpa_info = 2565 &rxr->rx_tpa[j]; 2566 u8 *data = tpa_info->data; 2567 2568 if (!data) 2569 continue; 2570 2571 dma_unmap_single_attrs(&pdev->dev, 2572 tpa_info->mapping, 2573 bp->rx_buf_use_size, 2574 bp->rx_dir, 2575 DMA_ATTR_WEAK_ORDERING); 2576 2577 tpa_info->data = NULL; 2578 2579 kfree(data); 2580 } 2581 } 2582 2583 for (j = 0; j < max_idx; j++) { 2584 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j]; 2585 dma_addr_t mapping = rx_buf->mapping; 2586 void *data = rx_buf->data; 2587 2588 if (!data) 2589 continue; 2590 2591 rx_buf->data = NULL; 2592 2593 if (BNXT_RX_PAGE_MODE(bp)) { 2594 mapping -= bp->rx_dma_offset; 2595 dma_unmap_page_attrs(&pdev->dev, mapping, 2596 PAGE_SIZE, bp->rx_dir, 2597 DMA_ATTR_WEAK_ORDERING); 2598 page_pool_recycle_direct(rxr->page_pool, data); 2599 } else { 2600 dma_unmap_single_attrs(&pdev->dev, mapping, 2601 bp->rx_buf_use_size, 2602 bp->rx_dir, 2603 DMA_ATTR_WEAK_ORDERING); 2604 kfree(data); 2605 } 2606 } 2607 2608 for (j = 0; j < max_agg_idx; j++) { 2609 struct bnxt_sw_rx_agg_bd *rx_agg_buf = 2610 &rxr->rx_agg_ring[j]; 2611 struct page *page = rx_agg_buf->page; 2612 2613 if (!page) 2614 continue; 2615 2616 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping, 2617 BNXT_RX_PAGE_SIZE, 2618 PCI_DMA_FROMDEVICE, 2619 DMA_ATTR_WEAK_ORDERING); 2620 2621 rx_agg_buf->page = NULL; 2622 __clear_bit(j, rxr->rx_agg_bmap); 2623 2624 __free_page(page); 2625 } 2626 if (rxr->rx_page) { 2627 __free_page(rxr->rx_page); 2628 rxr->rx_page = NULL; 2629 } 2630 map = rxr->rx_tpa_idx_map; 2631 if (map) 2632 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap)); 2633 } 2634 } 2635 2636 static void bnxt_free_skbs(struct bnxt *bp) 2637 { 2638 bnxt_free_tx_skbs(bp); 2639 bnxt_free_rx_skbs(bp); 2640 } 2641 2642 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) 2643 { 2644 struct pci_dev *pdev = bp->pdev; 2645 int i; 2646 2647 for (i = 0; i < rmem->nr_pages; i++) { 2648 if (!rmem->pg_arr[i]) 2649 continue; 2650 2651 dma_free_coherent(&pdev->dev, rmem->page_size, 2652 rmem->pg_arr[i], rmem->dma_arr[i]); 2653 2654 rmem->pg_arr[i] = NULL; 2655 } 2656 if (rmem->pg_tbl) { 2657 size_t pg_tbl_size = rmem->nr_pages * 8; 2658 2659 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) 2660 pg_tbl_size = rmem->page_size; 2661 dma_free_coherent(&pdev->dev, pg_tbl_size, 2662 rmem->pg_tbl, rmem->pg_tbl_map); 2663 rmem->pg_tbl = NULL; 2664 } 2665 if (rmem->vmem_size && *rmem->vmem) { 2666 vfree(*rmem->vmem); 2667 *rmem->vmem = NULL; 2668 } 2669 } 2670 2671 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) 2672 { 2673 struct pci_dev *pdev = bp->pdev; 2674 u64 valid_bit = 0; 2675 int i; 2676 2677 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG)) 2678 valid_bit = PTU_PTE_VALID; 2679 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) { 2680 size_t pg_tbl_size = rmem->nr_pages * 8; 2681 2682 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) 2683 pg_tbl_size = rmem->page_size; 2684 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size, 2685 &rmem->pg_tbl_map, 2686 GFP_KERNEL); 2687 if (!rmem->pg_tbl) 2688 return -ENOMEM; 2689 } 2690 2691 for (i = 0; i < rmem->nr_pages; i++) { 2692 u64 extra_bits = valid_bit; 2693 2694 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev, 2695 rmem->page_size, 2696 &rmem->dma_arr[i], 2697 GFP_KERNEL); 2698 if (!rmem->pg_arr[i]) 2699 return -ENOMEM; 2700 2701 if (rmem->init_val) 2702 memset(rmem->pg_arr[i], rmem->init_val, 2703 rmem->page_size); 2704 if (rmem->nr_pages > 1 || rmem->depth > 0) { 2705 if (i == rmem->nr_pages - 2 && 2706 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 2707 extra_bits |= PTU_PTE_NEXT_TO_LAST; 2708 else if (i == rmem->nr_pages - 1 && 2709 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 2710 extra_bits |= PTU_PTE_LAST; 2711 rmem->pg_tbl[i] = 2712 cpu_to_le64(rmem->dma_arr[i] | extra_bits); 2713 } 2714 } 2715 2716 if (rmem->vmem_size) { 2717 *rmem->vmem = vzalloc(rmem->vmem_size); 2718 if (!(*rmem->vmem)) 2719 return -ENOMEM; 2720 } 2721 return 0; 2722 } 2723 2724 static void bnxt_free_tpa_info(struct bnxt *bp) 2725 { 2726 int i; 2727 2728 for (i = 0; i < bp->rx_nr_rings; i++) { 2729 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2730 2731 kfree(rxr->rx_tpa_idx_map); 2732 rxr->rx_tpa_idx_map = NULL; 2733 if (rxr->rx_tpa) { 2734 kfree(rxr->rx_tpa[0].agg_arr); 2735 rxr->rx_tpa[0].agg_arr = NULL; 2736 } 2737 kfree(rxr->rx_tpa); 2738 rxr->rx_tpa = NULL; 2739 } 2740 } 2741 2742 static int bnxt_alloc_tpa_info(struct bnxt *bp) 2743 { 2744 int i, j, total_aggs = 0; 2745 2746 bp->max_tpa = MAX_TPA; 2747 if (bp->flags & BNXT_FLAG_CHIP_P5) { 2748 if (!bp->max_tpa_v2) 2749 return 0; 2750 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5); 2751 total_aggs = bp->max_tpa * MAX_SKB_FRAGS; 2752 } 2753 2754 for (i = 0; i < bp->rx_nr_rings; i++) { 2755 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2756 struct rx_agg_cmp *agg; 2757 2758 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info), 2759 GFP_KERNEL); 2760 if (!rxr->rx_tpa) 2761 return -ENOMEM; 2762 2763 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 2764 continue; 2765 agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL); 2766 rxr->rx_tpa[0].agg_arr = agg; 2767 if (!agg) 2768 return -ENOMEM; 2769 for (j = 1; j < bp->max_tpa; j++) 2770 rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS; 2771 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map), 2772 GFP_KERNEL); 2773 if (!rxr->rx_tpa_idx_map) 2774 return -ENOMEM; 2775 } 2776 return 0; 2777 } 2778 2779 static void bnxt_free_rx_rings(struct bnxt *bp) 2780 { 2781 int i; 2782 2783 if (!bp->rx_ring) 2784 return; 2785 2786 bnxt_free_tpa_info(bp); 2787 for (i = 0; i < bp->rx_nr_rings; i++) { 2788 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2789 struct bnxt_ring_struct *ring; 2790 2791 if (rxr->xdp_prog) 2792 bpf_prog_put(rxr->xdp_prog); 2793 2794 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq)) 2795 xdp_rxq_info_unreg(&rxr->xdp_rxq); 2796 2797 page_pool_destroy(rxr->page_pool); 2798 rxr->page_pool = NULL; 2799 2800 kfree(rxr->rx_agg_bmap); 2801 rxr->rx_agg_bmap = NULL; 2802 2803 ring = &rxr->rx_ring_struct; 2804 bnxt_free_ring(bp, &ring->ring_mem); 2805 2806 ring = &rxr->rx_agg_ring_struct; 2807 bnxt_free_ring(bp, &ring->ring_mem); 2808 } 2809 } 2810 2811 static int bnxt_alloc_rx_page_pool(struct bnxt *bp, 2812 struct bnxt_rx_ring_info *rxr) 2813 { 2814 struct page_pool_params pp = { 0 }; 2815 2816 pp.pool_size = bp->rx_ring_size; 2817 pp.nid = dev_to_node(&bp->pdev->dev); 2818 pp.dev = &bp->pdev->dev; 2819 pp.dma_dir = DMA_BIDIRECTIONAL; 2820 2821 rxr->page_pool = page_pool_create(&pp); 2822 if (IS_ERR(rxr->page_pool)) { 2823 int err = PTR_ERR(rxr->page_pool); 2824 2825 rxr->page_pool = NULL; 2826 return err; 2827 } 2828 return 0; 2829 } 2830 2831 static int bnxt_alloc_rx_rings(struct bnxt *bp) 2832 { 2833 int i, rc = 0, agg_rings = 0; 2834 2835 if (!bp->rx_ring) 2836 return -ENOMEM; 2837 2838 if (bp->flags & BNXT_FLAG_AGG_RINGS) 2839 agg_rings = 1; 2840 2841 for (i = 0; i < bp->rx_nr_rings; i++) { 2842 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2843 struct bnxt_ring_struct *ring; 2844 2845 ring = &rxr->rx_ring_struct; 2846 2847 rc = bnxt_alloc_rx_page_pool(bp, rxr); 2848 if (rc) 2849 return rc; 2850 2851 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i); 2852 if (rc < 0) 2853 return rc; 2854 2855 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq, 2856 MEM_TYPE_PAGE_POOL, 2857 rxr->page_pool); 2858 if (rc) { 2859 xdp_rxq_info_unreg(&rxr->xdp_rxq); 2860 return rc; 2861 } 2862 2863 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 2864 if (rc) 2865 return rc; 2866 2867 ring->grp_idx = i; 2868 if (agg_rings) { 2869 u16 mem_size; 2870 2871 ring = &rxr->rx_agg_ring_struct; 2872 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 2873 if (rc) 2874 return rc; 2875 2876 ring->grp_idx = i; 2877 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1; 2878 mem_size = rxr->rx_agg_bmap_size / 8; 2879 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL); 2880 if (!rxr->rx_agg_bmap) 2881 return -ENOMEM; 2882 } 2883 } 2884 if (bp->flags & BNXT_FLAG_TPA) 2885 rc = bnxt_alloc_tpa_info(bp); 2886 return rc; 2887 } 2888 2889 static void bnxt_free_tx_rings(struct bnxt *bp) 2890 { 2891 int i; 2892 struct pci_dev *pdev = bp->pdev; 2893 2894 if (!bp->tx_ring) 2895 return; 2896 2897 for (i = 0; i < bp->tx_nr_rings; i++) { 2898 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 2899 struct bnxt_ring_struct *ring; 2900 2901 if (txr->tx_push) { 2902 dma_free_coherent(&pdev->dev, bp->tx_push_size, 2903 txr->tx_push, txr->tx_push_mapping); 2904 txr->tx_push = NULL; 2905 } 2906 2907 ring = &txr->tx_ring_struct; 2908 2909 bnxt_free_ring(bp, &ring->ring_mem); 2910 } 2911 } 2912 2913 static int bnxt_alloc_tx_rings(struct bnxt *bp) 2914 { 2915 int i, j, rc; 2916 struct pci_dev *pdev = bp->pdev; 2917 2918 bp->tx_push_size = 0; 2919 if (bp->tx_push_thresh) { 2920 int push_size; 2921 2922 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) + 2923 bp->tx_push_thresh); 2924 2925 if (push_size > 256) { 2926 push_size = 0; 2927 bp->tx_push_thresh = 0; 2928 } 2929 2930 bp->tx_push_size = push_size; 2931 } 2932 2933 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) { 2934 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 2935 struct bnxt_ring_struct *ring; 2936 u8 qidx; 2937 2938 ring = &txr->tx_ring_struct; 2939 2940 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 2941 if (rc) 2942 return rc; 2943 2944 ring->grp_idx = txr->bnapi->index; 2945 if (bp->tx_push_size) { 2946 dma_addr_t mapping; 2947 2948 /* One pre-allocated DMA buffer to backup 2949 * TX push operation 2950 */ 2951 txr->tx_push = dma_alloc_coherent(&pdev->dev, 2952 bp->tx_push_size, 2953 &txr->tx_push_mapping, 2954 GFP_KERNEL); 2955 2956 if (!txr->tx_push) 2957 return -ENOMEM; 2958 2959 mapping = txr->tx_push_mapping + 2960 sizeof(struct tx_push_bd); 2961 txr->data_mapping = cpu_to_le64(mapping); 2962 } 2963 qidx = bp->tc_to_qidx[j]; 2964 ring->queue_id = bp->q_info[qidx].queue_id; 2965 if (i < bp->tx_nr_rings_xdp) 2966 continue; 2967 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1)) 2968 j++; 2969 } 2970 return 0; 2971 } 2972 2973 static void bnxt_free_cp_rings(struct bnxt *bp) 2974 { 2975 int i; 2976 2977 if (!bp->bnapi) 2978 return; 2979 2980 for (i = 0; i < bp->cp_nr_rings; i++) { 2981 struct bnxt_napi *bnapi = bp->bnapi[i]; 2982 struct bnxt_cp_ring_info *cpr; 2983 struct bnxt_ring_struct *ring; 2984 int j; 2985 2986 if (!bnapi) 2987 continue; 2988 2989 cpr = &bnapi->cp_ring; 2990 ring = &cpr->cp_ring_struct; 2991 2992 bnxt_free_ring(bp, &ring->ring_mem); 2993 2994 for (j = 0; j < 2; j++) { 2995 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; 2996 2997 if (cpr2) { 2998 ring = &cpr2->cp_ring_struct; 2999 bnxt_free_ring(bp, &ring->ring_mem); 3000 kfree(cpr2); 3001 cpr->cp_ring_arr[j] = NULL; 3002 } 3003 } 3004 } 3005 } 3006 3007 static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp) 3008 { 3009 struct bnxt_ring_mem_info *rmem; 3010 struct bnxt_ring_struct *ring; 3011 struct bnxt_cp_ring_info *cpr; 3012 int rc; 3013 3014 cpr = kzalloc(sizeof(*cpr), GFP_KERNEL); 3015 if (!cpr) 3016 return NULL; 3017 3018 ring = &cpr->cp_ring_struct; 3019 rmem = &ring->ring_mem; 3020 rmem->nr_pages = bp->cp_nr_pages; 3021 rmem->page_size = HW_CMPD_RING_SIZE; 3022 rmem->pg_arr = (void **)cpr->cp_desc_ring; 3023 rmem->dma_arr = cpr->cp_desc_mapping; 3024 rmem->flags = BNXT_RMEM_RING_PTE_FLAG; 3025 rc = bnxt_alloc_ring(bp, rmem); 3026 if (rc) { 3027 bnxt_free_ring(bp, rmem); 3028 kfree(cpr); 3029 cpr = NULL; 3030 } 3031 return cpr; 3032 } 3033 3034 static int bnxt_alloc_cp_rings(struct bnxt *bp) 3035 { 3036 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS); 3037 int i, rc, ulp_base_vec, ulp_msix; 3038 3039 ulp_msix = bnxt_get_ulp_msix_num(bp); 3040 ulp_base_vec = bnxt_get_ulp_msix_base(bp); 3041 for (i = 0; i < bp->cp_nr_rings; i++) { 3042 struct bnxt_napi *bnapi = bp->bnapi[i]; 3043 struct bnxt_cp_ring_info *cpr; 3044 struct bnxt_ring_struct *ring; 3045 3046 if (!bnapi) 3047 continue; 3048 3049 cpr = &bnapi->cp_ring; 3050 cpr->bnapi = bnapi; 3051 ring = &cpr->cp_ring_struct; 3052 3053 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 3054 if (rc) 3055 return rc; 3056 3057 if (ulp_msix && i >= ulp_base_vec) 3058 ring->map_idx = i + ulp_msix; 3059 else 3060 ring->map_idx = i; 3061 3062 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 3063 continue; 3064 3065 if (i < bp->rx_nr_rings) { 3066 struct bnxt_cp_ring_info *cpr2 = 3067 bnxt_alloc_cp_sub_ring(bp); 3068 3069 cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2; 3070 if (!cpr2) 3071 return -ENOMEM; 3072 cpr2->bnapi = bnapi; 3073 } 3074 if ((sh && i < bp->tx_nr_rings) || 3075 (!sh && i >= bp->rx_nr_rings)) { 3076 struct bnxt_cp_ring_info *cpr2 = 3077 bnxt_alloc_cp_sub_ring(bp); 3078 3079 cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2; 3080 if (!cpr2) 3081 return -ENOMEM; 3082 cpr2->bnapi = bnapi; 3083 } 3084 } 3085 return 0; 3086 } 3087 3088 static void bnxt_init_ring_struct(struct bnxt *bp) 3089 { 3090 int i; 3091 3092 for (i = 0; i < bp->cp_nr_rings; i++) { 3093 struct bnxt_napi *bnapi = bp->bnapi[i]; 3094 struct bnxt_ring_mem_info *rmem; 3095 struct bnxt_cp_ring_info *cpr; 3096 struct bnxt_rx_ring_info *rxr; 3097 struct bnxt_tx_ring_info *txr; 3098 struct bnxt_ring_struct *ring; 3099 3100 if (!bnapi) 3101 continue; 3102 3103 cpr = &bnapi->cp_ring; 3104 ring = &cpr->cp_ring_struct; 3105 rmem = &ring->ring_mem; 3106 rmem->nr_pages = bp->cp_nr_pages; 3107 rmem->page_size = HW_CMPD_RING_SIZE; 3108 rmem->pg_arr = (void **)cpr->cp_desc_ring; 3109 rmem->dma_arr = cpr->cp_desc_mapping; 3110 rmem->vmem_size = 0; 3111 3112 rxr = bnapi->rx_ring; 3113 if (!rxr) 3114 goto skip_rx; 3115 3116 ring = &rxr->rx_ring_struct; 3117 rmem = &ring->ring_mem; 3118 rmem->nr_pages = bp->rx_nr_pages; 3119 rmem->page_size = HW_RXBD_RING_SIZE; 3120 rmem->pg_arr = (void **)rxr->rx_desc_ring; 3121 rmem->dma_arr = rxr->rx_desc_mapping; 3122 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; 3123 rmem->vmem = (void **)&rxr->rx_buf_ring; 3124 3125 ring = &rxr->rx_agg_ring_struct; 3126 rmem = &ring->ring_mem; 3127 rmem->nr_pages = bp->rx_agg_nr_pages; 3128 rmem->page_size = HW_RXBD_RING_SIZE; 3129 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring; 3130 rmem->dma_arr = rxr->rx_agg_desc_mapping; 3131 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; 3132 rmem->vmem = (void **)&rxr->rx_agg_ring; 3133 3134 skip_rx: 3135 txr = bnapi->tx_ring; 3136 if (!txr) 3137 continue; 3138 3139 ring = &txr->tx_ring_struct; 3140 rmem = &ring->ring_mem; 3141 rmem->nr_pages = bp->tx_nr_pages; 3142 rmem->page_size = HW_RXBD_RING_SIZE; 3143 rmem->pg_arr = (void **)txr->tx_desc_ring; 3144 rmem->dma_arr = txr->tx_desc_mapping; 3145 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages; 3146 rmem->vmem = (void **)&txr->tx_buf_ring; 3147 } 3148 } 3149 3150 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type) 3151 { 3152 int i; 3153 u32 prod; 3154 struct rx_bd **rx_buf_ring; 3155 3156 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr; 3157 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) { 3158 int j; 3159 struct rx_bd *rxbd; 3160 3161 rxbd = rx_buf_ring[i]; 3162 if (!rxbd) 3163 continue; 3164 3165 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) { 3166 rxbd->rx_bd_len_flags_type = cpu_to_le32(type); 3167 rxbd->rx_bd_opaque = prod; 3168 } 3169 } 3170 } 3171 3172 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) 3173 { 3174 struct net_device *dev = bp->dev; 3175 struct bnxt_rx_ring_info *rxr; 3176 struct bnxt_ring_struct *ring; 3177 u32 prod, type; 3178 int i; 3179 3180 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) | 3181 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP; 3182 3183 if (NET_IP_ALIGN == 2) 3184 type |= RX_BD_FLAGS_SOP; 3185 3186 rxr = &bp->rx_ring[ring_nr]; 3187 ring = &rxr->rx_ring_struct; 3188 bnxt_init_rxbd_pages(ring, type); 3189 3190 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) { 3191 bpf_prog_add(bp->xdp_prog, 1); 3192 rxr->xdp_prog = bp->xdp_prog; 3193 } 3194 prod = rxr->rx_prod; 3195 for (i = 0; i < bp->rx_ring_size; i++) { 3196 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) { 3197 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n", 3198 ring_nr, i, bp->rx_ring_size); 3199 break; 3200 } 3201 prod = NEXT_RX(prod); 3202 } 3203 rxr->rx_prod = prod; 3204 ring->fw_ring_id = INVALID_HW_RING_ID; 3205 3206 ring = &rxr->rx_agg_ring_struct; 3207 ring->fw_ring_id = INVALID_HW_RING_ID; 3208 3209 if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) 3210 return 0; 3211 3212 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) | 3213 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; 3214 3215 bnxt_init_rxbd_pages(ring, type); 3216 3217 prod = rxr->rx_agg_prod; 3218 for (i = 0; i < bp->rx_agg_ring_size; i++) { 3219 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) { 3220 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n", 3221 ring_nr, i, bp->rx_ring_size); 3222 break; 3223 } 3224 prod = NEXT_RX_AGG(prod); 3225 } 3226 rxr->rx_agg_prod = prod; 3227 3228 if (bp->flags & BNXT_FLAG_TPA) { 3229 if (rxr->rx_tpa) { 3230 u8 *data; 3231 dma_addr_t mapping; 3232 3233 for (i = 0; i < bp->max_tpa; i++) { 3234 data = __bnxt_alloc_rx_data(bp, &mapping, 3235 GFP_KERNEL); 3236 if (!data) 3237 return -ENOMEM; 3238 3239 rxr->rx_tpa[i].data = data; 3240 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset; 3241 rxr->rx_tpa[i].mapping = mapping; 3242 } 3243 } else { 3244 netdev_err(bp->dev, "No resource allocated for LRO/GRO\n"); 3245 return -ENOMEM; 3246 } 3247 } 3248 3249 return 0; 3250 } 3251 3252 static void bnxt_init_cp_rings(struct bnxt *bp) 3253 { 3254 int i, j; 3255 3256 for (i = 0; i < bp->cp_nr_rings; i++) { 3257 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; 3258 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 3259 3260 ring->fw_ring_id = INVALID_HW_RING_ID; 3261 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; 3262 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; 3263 for (j = 0; j < 2; j++) { 3264 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; 3265 3266 if (!cpr2) 3267 continue; 3268 3269 ring = &cpr2->cp_ring_struct; 3270 ring->fw_ring_id = INVALID_HW_RING_ID; 3271 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; 3272 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; 3273 } 3274 } 3275 } 3276 3277 static int bnxt_init_rx_rings(struct bnxt *bp) 3278 { 3279 int i, rc = 0; 3280 3281 if (BNXT_RX_PAGE_MODE(bp)) { 3282 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM; 3283 bp->rx_dma_offset = XDP_PACKET_HEADROOM; 3284 } else { 3285 bp->rx_offset = BNXT_RX_OFFSET; 3286 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET; 3287 } 3288 3289 for (i = 0; i < bp->rx_nr_rings; i++) { 3290 rc = bnxt_init_one_rx_ring(bp, i); 3291 if (rc) 3292 break; 3293 } 3294 3295 return rc; 3296 } 3297 3298 static int bnxt_init_tx_rings(struct bnxt *bp) 3299 { 3300 u16 i; 3301 3302 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2, 3303 MAX_SKB_FRAGS + 1); 3304 3305 for (i = 0; i < bp->tx_nr_rings; i++) { 3306 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 3307 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 3308 3309 ring->fw_ring_id = INVALID_HW_RING_ID; 3310 } 3311 3312 return 0; 3313 } 3314 3315 static void bnxt_free_ring_grps(struct bnxt *bp) 3316 { 3317 kfree(bp->grp_info); 3318 bp->grp_info = NULL; 3319 } 3320 3321 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init) 3322 { 3323 int i; 3324 3325 if (irq_re_init) { 3326 bp->grp_info = kcalloc(bp->cp_nr_rings, 3327 sizeof(struct bnxt_ring_grp_info), 3328 GFP_KERNEL); 3329 if (!bp->grp_info) 3330 return -ENOMEM; 3331 } 3332 for (i = 0; i < bp->cp_nr_rings; i++) { 3333 if (irq_re_init) 3334 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID; 3335 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; 3336 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID; 3337 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID; 3338 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; 3339 } 3340 return 0; 3341 } 3342 3343 static void bnxt_free_vnics(struct bnxt *bp) 3344 { 3345 kfree(bp->vnic_info); 3346 bp->vnic_info = NULL; 3347 bp->nr_vnics = 0; 3348 } 3349 3350 static int bnxt_alloc_vnics(struct bnxt *bp) 3351 { 3352 int num_vnics = 1; 3353 3354 #ifdef CONFIG_RFS_ACCEL 3355 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS) 3356 num_vnics += bp->rx_nr_rings; 3357 #endif 3358 3359 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 3360 num_vnics++; 3361 3362 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info), 3363 GFP_KERNEL); 3364 if (!bp->vnic_info) 3365 return -ENOMEM; 3366 3367 bp->nr_vnics = num_vnics; 3368 return 0; 3369 } 3370 3371 static void bnxt_init_vnics(struct bnxt *bp) 3372 { 3373 int i; 3374 3375 for (i = 0; i < bp->nr_vnics; i++) { 3376 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 3377 int j; 3378 3379 vnic->fw_vnic_id = INVALID_HW_RING_ID; 3380 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) 3381 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID; 3382 3383 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID; 3384 3385 if (bp->vnic_info[i].rss_hash_key) { 3386 if (i == 0) 3387 prandom_bytes(vnic->rss_hash_key, 3388 HW_HASH_KEY_SIZE); 3389 else 3390 memcpy(vnic->rss_hash_key, 3391 bp->vnic_info[0].rss_hash_key, 3392 HW_HASH_KEY_SIZE); 3393 } 3394 } 3395 } 3396 3397 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg) 3398 { 3399 int pages; 3400 3401 pages = ring_size / desc_per_pg; 3402 3403 if (!pages) 3404 return 1; 3405 3406 pages++; 3407 3408 while (pages & (pages - 1)) 3409 pages++; 3410 3411 return pages; 3412 } 3413 3414 void bnxt_set_tpa_flags(struct bnxt *bp) 3415 { 3416 bp->flags &= ~BNXT_FLAG_TPA; 3417 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 3418 return; 3419 if (bp->dev->features & NETIF_F_LRO) 3420 bp->flags |= BNXT_FLAG_LRO; 3421 else if (bp->dev->features & NETIF_F_GRO_HW) 3422 bp->flags |= BNXT_FLAG_GRO; 3423 } 3424 3425 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must 3426 * be set on entry. 3427 */ 3428 void bnxt_set_ring_params(struct bnxt *bp) 3429 { 3430 u32 ring_size, rx_size, rx_space; 3431 u32 agg_factor = 0, agg_ring_size = 0; 3432 3433 /* 8 for CRC and VLAN */ 3434 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8); 3435 3436 rx_space = rx_size + NET_SKB_PAD + 3437 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 3438 3439 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH; 3440 ring_size = bp->rx_ring_size; 3441 bp->rx_agg_ring_size = 0; 3442 bp->rx_agg_nr_pages = 0; 3443 3444 if (bp->flags & BNXT_FLAG_TPA) 3445 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE); 3446 3447 bp->flags &= ~BNXT_FLAG_JUMBO; 3448 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) { 3449 u32 jumbo_factor; 3450 3451 bp->flags |= BNXT_FLAG_JUMBO; 3452 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT; 3453 if (jumbo_factor > agg_factor) 3454 agg_factor = jumbo_factor; 3455 } 3456 agg_ring_size = ring_size * agg_factor; 3457 3458 if (agg_ring_size) { 3459 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size, 3460 RX_DESC_CNT); 3461 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) { 3462 u32 tmp = agg_ring_size; 3463 3464 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES; 3465 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1; 3466 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n", 3467 tmp, agg_ring_size); 3468 } 3469 bp->rx_agg_ring_size = agg_ring_size; 3470 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1; 3471 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN); 3472 rx_space = rx_size + NET_SKB_PAD + 3473 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 3474 } 3475 3476 bp->rx_buf_use_size = rx_size; 3477 bp->rx_buf_size = rx_space; 3478 3479 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT); 3480 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1; 3481 3482 ring_size = bp->tx_ring_size; 3483 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT); 3484 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1; 3485 3486 ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size; 3487 bp->cp_ring_size = ring_size; 3488 3489 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT); 3490 if (bp->cp_nr_pages > MAX_CP_PAGES) { 3491 bp->cp_nr_pages = MAX_CP_PAGES; 3492 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1; 3493 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n", 3494 ring_size, bp->cp_ring_size); 3495 } 3496 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT; 3497 bp->cp_ring_mask = bp->cp_bit - 1; 3498 } 3499 3500 /* Changing allocation mode of RX rings. 3501 * TODO: Update when extending xdp_rxq_info to support allocation modes. 3502 */ 3503 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) 3504 { 3505 if (page_mode) { 3506 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) 3507 return -EOPNOTSUPP; 3508 bp->dev->max_mtu = 3509 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU); 3510 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 3511 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE; 3512 bp->rx_dir = DMA_BIDIRECTIONAL; 3513 bp->rx_skb_func = bnxt_rx_page_skb; 3514 /* Disable LRO or GRO_HW */ 3515 netdev_update_features(bp->dev); 3516 } else { 3517 bp->dev->max_mtu = bp->max_mtu; 3518 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE; 3519 bp->rx_dir = DMA_FROM_DEVICE; 3520 bp->rx_skb_func = bnxt_rx_skb; 3521 } 3522 return 0; 3523 } 3524 3525 static void bnxt_free_vnic_attributes(struct bnxt *bp) 3526 { 3527 int i; 3528 struct bnxt_vnic_info *vnic; 3529 struct pci_dev *pdev = bp->pdev; 3530 3531 if (!bp->vnic_info) 3532 return; 3533 3534 for (i = 0; i < bp->nr_vnics; i++) { 3535 vnic = &bp->vnic_info[i]; 3536 3537 kfree(vnic->fw_grp_ids); 3538 vnic->fw_grp_ids = NULL; 3539 3540 kfree(vnic->uc_list); 3541 vnic->uc_list = NULL; 3542 3543 if (vnic->mc_list) { 3544 dma_free_coherent(&pdev->dev, vnic->mc_list_size, 3545 vnic->mc_list, vnic->mc_list_mapping); 3546 vnic->mc_list = NULL; 3547 } 3548 3549 if (vnic->rss_table) { 3550 dma_free_coherent(&pdev->dev, PAGE_SIZE, 3551 vnic->rss_table, 3552 vnic->rss_table_dma_addr); 3553 vnic->rss_table = NULL; 3554 } 3555 3556 vnic->rss_hash_key = NULL; 3557 vnic->flags = 0; 3558 } 3559 } 3560 3561 static int bnxt_alloc_vnic_attributes(struct bnxt *bp) 3562 { 3563 int i, rc = 0, size; 3564 struct bnxt_vnic_info *vnic; 3565 struct pci_dev *pdev = bp->pdev; 3566 int max_rings; 3567 3568 for (i = 0; i < bp->nr_vnics; i++) { 3569 vnic = &bp->vnic_info[i]; 3570 3571 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) { 3572 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN; 3573 3574 if (mem_size > 0) { 3575 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL); 3576 if (!vnic->uc_list) { 3577 rc = -ENOMEM; 3578 goto out; 3579 } 3580 } 3581 } 3582 3583 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) { 3584 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN; 3585 vnic->mc_list = 3586 dma_alloc_coherent(&pdev->dev, 3587 vnic->mc_list_size, 3588 &vnic->mc_list_mapping, 3589 GFP_KERNEL); 3590 if (!vnic->mc_list) { 3591 rc = -ENOMEM; 3592 goto out; 3593 } 3594 } 3595 3596 if (bp->flags & BNXT_FLAG_CHIP_P5) 3597 goto vnic_skip_grps; 3598 3599 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 3600 max_rings = bp->rx_nr_rings; 3601 else 3602 max_rings = 1; 3603 3604 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL); 3605 if (!vnic->fw_grp_ids) { 3606 rc = -ENOMEM; 3607 goto out; 3608 } 3609 vnic_skip_grps: 3610 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) && 3611 !(vnic->flags & BNXT_VNIC_RSS_FLAG)) 3612 continue; 3613 3614 /* Allocate rss table and hash key */ 3615 vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, 3616 &vnic->rss_table_dma_addr, 3617 GFP_KERNEL); 3618 if (!vnic->rss_table) { 3619 rc = -ENOMEM; 3620 goto out; 3621 } 3622 3623 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16)); 3624 3625 vnic->rss_hash_key = ((void *)vnic->rss_table) + size; 3626 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size; 3627 } 3628 return 0; 3629 3630 out: 3631 return rc; 3632 } 3633 3634 static void bnxt_free_hwrm_resources(struct bnxt *bp) 3635 { 3636 struct pci_dev *pdev = bp->pdev; 3637 3638 if (bp->hwrm_cmd_resp_addr) { 3639 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr, 3640 bp->hwrm_cmd_resp_dma_addr); 3641 bp->hwrm_cmd_resp_addr = NULL; 3642 } 3643 3644 if (bp->hwrm_cmd_kong_resp_addr) { 3645 dma_free_coherent(&pdev->dev, PAGE_SIZE, 3646 bp->hwrm_cmd_kong_resp_addr, 3647 bp->hwrm_cmd_kong_resp_dma_addr); 3648 bp->hwrm_cmd_kong_resp_addr = NULL; 3649 } 3650 } 3651 3652 static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp) 3653 { 3654 struct pci_dev *pdev = bp->pdev; 3655 3656 if (bp->hwrm_cmd_kong_resp_addr) 3657 return 0; 3658 3659 bp->hwrm_cmd_kong_resp_addr = 3660 dma_alloc_coherent(&pdev->dev, PAGE_SIZE, 3661 &bp->hwrm_cmd_kong_resp_dma_addr, 3662 GFP_KERNEL); 3663 if (!bp->hwrm_cmd_kong_resp_addr) 3664 return -ENOMEM; 3665 3666 return 0; 3667 } 3668 3669 static int bnxt_alloc_hwrm_resources(struct bnxt *bp) 3670 { 3671 struct pci_dev *pdev = bp->pdev; 3672 3673 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, 3674 &bp->hwrm_cmd_resp_dma_addr, 3675 GFP_KERNEL); 3676 if (!bp->hwrm_cmd_resp_addr) 3677 return -ENOMEM; 3678 3679 return 0; 3680 } 3681 3682 static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp) 3683 { 3684 if (bp->hwrm_short_cmd_req_addr) { 3685 struct pci_dev *pdev = bp->pdev; 3686 3687 dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len, 3688 bp->hwrm_short_cmd_req_addr, 3689 bp->hwrm_short_cmd_req_dma_addr); 3690 bp->hwrm_short_cmd_req_addr = NULL; 3691 } 3692 } 3693 3694 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp) 3695 { 3696 struct pci_dev *pdev = bp->pdev; 3697 3698 if (bp->hwrm_short_cmd_req_addr) 3699 return 0; 3700 3701 bp->hwrm_short_cmd_req_addr = 3702 dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len, 3703 &bp->hwrm_short_cmd_req_dma_addr, 3704 GFP_KERNEL); 3705 if (!bp->hwrm_short_cmd_req_addr) 3706 return -ENOMEM; 3707 3708 return 0; 3709 } 3710 3711 static void bnxt_free_port_stats(struct bnxt *bp) 3712 { 3713 struct pci_dev *pdev = bp->pdev; 3714 3715 bp->flags &= ~BNXT_FLAG_PORT_STATS; 3716 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT; 3717 3718 if (bp->hw_rx_port_stats) { 3719 dma_free_coherent(&pdev->dev, bp->hw_port_stats_size, 3720 bp->hw_rx_port_stats, 3721 bp->hw_rx_port_stats_map); 3722 bp->hw_rx_port_stats = NULL; 3723 } 3724 3725 if (bp->hw_tx_port_stats_ext) { 3726 dma_free_coherent(&pdev->dev, sizeof(struct tx_port_stats_ext), 3727 bp->hw_tx_port_stats_ext, 3728 bp->hw_tx_port_stats_ext_map); 3729 bp->hw_tx_port_stats_ext = NULL; 3730 } 3731 3732 if (bp->hw_rx_port_stats_ext) { 3733 dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext), 3734 bp->hw_rx_port_stats_ext, 3735 bp->hw_rx_port_stats_ext_map); 3736 bp->hw_rx_port_stats_ext = NULL; 3737 } 3738 3739 if (bp->hw_pcie_stats) { 3740 dma_free_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats), 3741 bp->hw_pcie_stats, bp->hw_pcie_stats_map); 3742 bp->hw_pcie_stats = NULL; 3743 } 3744 } 3745 3746 static void bnxt_free_ring_stats(struct bnxt *bp) 3747 { 3748 struct pci_dev *pdev = bp->pdev; 3749 int size, i; 3750 3751 if (!bp->bnapi) 3752 return; 3753 3754 size = bp->hw_ring_stats_size; 3755 3756 for (i = 0; i < bp->cp_nr_rings; i++) { 3757 struct bnxt_napi *bnapi = bp->bnapi[i]; 3758 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3759 3760 if (cpr->hw_stats) { 3761 dma_free_coherent(&pdev->dev, size, cpr->hw_stats, 3762 cpr->hw_stats_map); 3763 cpr->hw_stats = NULL; 3764 } 3765 } 3766 } 3767 3768 static int bnxt_alloc_stats(struct bnxt *bp) 3769 { 3770 u32 size, i; 3771 struct pci_dev *pdev = bp->pdev; 3772 3773 size = bp->hw_ring_stats_size; 3774 3775 for (i = 0; i < bp->cp_nr_rings; i++) { 3776 struct bnxt_napi *bnapi = bp->bnapi[i]; 3777 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3778 3779 cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size, 3780 &cpr->hw_stats_map, 3781 GFP_KERNEL); 3782 if (!cpr->hw_stats) 3783 return -ENOMEM; 3784 3785 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; 3786 } 3787 3788 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700) 3789 return 0; 3790 3791 if (bp->hw_rx_port_stats) 3792 goto alloc_ext_stats; 3793 3794 bp->hw_port_stats_size = sizeof(struct rx_port_stats) + 3795 sizeof(struct tx_port_stats) + 1024; 3796 3797 bp->hw_rx_port_stats = 3798 dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size, 3799 &bp->hw_rx_port_stats_map, 3800 GFP_KERNEL); 3801 if (!bp->hw_rx_port_stats) 3802 return -ENOMEM; 3803 3804 bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) + 512; 3805 bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map + 3806 sizeof(struct rx_port_stats) + 512; 3807 bp->flags |= BNXT_FLAG_PORT_STATS; 3808 3809 alloc_ext_stats: 3810 /* Display extended statistics only if FW supports it */ 3811 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900) 3812 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) 3813 return 0; 3814 3815 if (bp->hw_rx_port_stats_ext) 3816 goto alloc_tx_ext_stats; 3817 3818 bp->hw_rx_port_stats_ext = 3819 dma_alloc_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext), 3820 &bp->hw_rx_port_stats_ext_map, GFP_KERNEL); 3821 if (!bp->hw_rx_port_stats_ext) 3822 return 0; 3823 3824 alloc_tx_ext_stats: 3825 if (bp->hw_tx_port_stats_ext) 3826 goto alloc_pcie_stats; 3827 3828 if (bp->hwrm_spec_code >= 0x10902 || 3829 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) { 3830 bp->hw_tx_port_stats_ext = 3831 dma_alloc_coherent(&pdev->dev, 3832 sizeof(struct tx_port_stats_ext), 3833 &bp->hw_tx_port_stats_ext_map, 3834 GFP_KERNEL); 3835 } 3836 bp->flags |= BNXT_FLAG_PORT_STATS_EXT; 3837 3838 alloc_pcie_stats: 3839 if (bp->hw_pcie_stats || 3840 !(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)) 3841 return 0; 3842 3843 bp->hw_pcie_stats = 3844 dma_alloc_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats), 3845 &bp->hw_pcie_stats_map, GFP_KERNEL); 3846 if (!bp->hw_pcie_stats) 3847 return 0; 3848 3849 bp->flags |= BNXT_FLAG_PCIE_STATS; 3850 return 0; 3851 } 3852 3853 static void bnxt_clear_ring_indices(struct bnxt *bp) 3854 { 3855 int i; 3856 3857 if (!bp->bnapi) 3858 return; 3859 3860 for (i = 0; i < bp->cp_nr_rings; i++) { 3861 struct bnxt_napi *bnapi = bp->bnapi[i]; 3862 struct bnxt_cp_ring_info *cpr; 3863 struct bnxt_rx_ring_info *rxr; 3864 struct bnxt_tx_ring_info *txr; 3865 3866 if (!bnapi) 3867 continue; 3868 3869 cpr = &bnapi->cp_ring; 3870 cpr->cp_raw_cons = 0; 3871 3872 txr = bnapi->tx_ring; 3873 if (txr) { 3874 txr->tx_prod = 0; 3875 txr->tx_cons = 0; 3876 } 3877 3878 rxr = bnapi->rx_ring; 3879 if (rxr) { 3880 rxr->rx_prod = 0; 3881 rxr->rx_agg_prod = 0; 3882 rxr->rx_sw_agg_prod = 0; 3883 rxr->rx_next_cons = 0; 3884 } 3885 } 3886 } 3887 3888 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit) 3889 { 3890 #ifdef CONFIG_RFS_ACCEL 3891 int i; 3892 3893 /* Under rtnl_lock and all our NAPIs have been disabled. It's 3894 * safe to delete the hash table. 3895 */ 3896 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 3897 struct hlist_head *head; 3898 struct hlist_node *tmp; 3899 struct bnxt_ntuple_filter *fltr; 3900 3901 head = &bp->ntp_fltr_hash_tbl[i]; 3902 hlist_for_each_entry_safe(fltr, tmp, head, hash) { 3903 hlist_del(&fltr->hash); 3904 kfree(fltr); 3905 } 3906 } 3907 if (irq_reinit) { 3908 kfree(bp->ntp_fltr_bmap); 3909 bp->ntp_fltr_bmap = NULL; 3910 } 3911 bp->ntp_fltr_count = 0; 3912 #endif 3913 } 3914 3915 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp) 3916 { 3917 #ifdef CONFIG_RFS_ACCEL 3918 int i, rc = 0; 3919 3920 if (!(bp->flags & BNXT_FLAG_RFS)) 3921 return 0; 3922 3923 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) 3924 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]); 3925 3926 bp->ntp_fltr_count = 0; 3927 bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR), 3928 sizeof(long), 3929 GFP_KERNEL); 3930 3931 if (!bp->ntp_fltr_bmap) 3932 rc = -ENOMEM; 3933 3934 return rc; 3935 #else 3936 return 0; 3937 #endif 3938 } 3939 3940 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) 3941 { 3942 bnxt_free_vnic_attributes(bp); 3943 bnxt_free_tx_rings(bp); 3944 bnxt_free_rx_rings(bp); 3945 bnxt_free_cp_rings(bp); 3946 bnxt_free_ntp_fltrs(bp, irq_re_init); 3947 if (irq_re_init) { 3948 bnxt_free_ring_stats(bp); 3949 bnxt_free_ring_grps(bp); 3950 bnxt_free_vnics(bp); 3951 kfree(bp->tx_ring_map); 3952 bp->tx_ring_map = NULL; 3953 kfree(bp->tx_ring); 3954 bp->tx_ring = NULL; 3955 kfree(bp->rx_ring); 3956 bp->rx_ring = NULL; 3957 kfree(bp->bnapi); 3958 bp->bnapi = NULL; 3959 } else { 3960 bnxt_clear_ring_indices(bp); 3961 } 3962 } 3963 3964 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) 3965 { 3966 int i, j, rc, size, arr_size; 3967 void *bnapi; 3968 3969 if (irq_re_init) { 3970 /* Allocate bnapi mem pointer array and mem block for 3971 * all queues 3972 */ 3973 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) * 3974 bp->cp_nr_rings); 3975 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi)); 3976 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL); 3977 if (!bnapi) 3978 return -ENOMEM; 3979 3980 bp->bnapi = bnapi; 3981 bnapi += arr_size; 3982 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) { 3983 bp->bnapi[i] = bnapi; 3984 bp->bnapi[i]->index = i; 3985 bp->bnapi[i]->bp = bp; 3986 if (bp->flags & BNXT_FLAG_CHIP_P5) { 3987 struct bnxt_cp_ring_info *cpr = 3988 &bp->bnapi[i]->cp_ring; 3989 3990 cpr->cp_ring_struct.ring_mem.flags = 3991 BNXT_RMEM_RING_PTE_FLAG; 3992 } 3993 } 3994 3995 bp->rx_ring = kcalloc(bp->rx_nr_rings, 3996 sizeof(struct bnxt_rx_ring_info), 3997 GFP_KERNEL); 3998 if (!bp->rx_ring) 3999 return -ENOMEM; 4000 4001 for (i = 0; i < bp->rx_nr_rings; i++) { 4002 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 4003 4004 if (bp->flags & BNXT_FLAG_CHIP_P5) { 4005 rxr->rx_ring_struct.ring_mem.flags = 4006 BNXT_RMEM_RING_PTE_FLAG; 4007 rxr->rx_agg_ring_struct.ring_mem.flags = 4008 BNXT_RMEM_RING_PTE_FLAG; 4009 } 4010 rxr->bnapi = bp->bnapi[i]; 4011 bp->bnapi[i]->rx_ring = &bp->rx_ring[i]; 4012 } 4013 4014 bp->tx_ring = kcalloc(bp->tx_nr_rings, 4015 sizeof(struct bnxt_tx_ring_info), 4016 GFP_KERNEL); 4017 if (!bp->tx_ring) 4018 return -ENOMEM; 4019 4020 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16), 4021 GFP_KERNEL); 4022 4023 if (!bp->tx_ring_map) 4024 return -ENOMEM; 4025 4026 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 4027 j = 0; 4028 else 4029 j = bp->rx_nr_rings; 4030 4031 for (i = 0; i < bp->tx_nr_rings; i++, j++) { 4032 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 4033 4034 if (bp->flags & BNXT_FLAG_CHIP_P5) 4035 txr->tx_ring_struct.ring_mem.flags = 4036 BNXT_RMEM_RING_PTE_FLAG; 4037 txr->bnapi = bp->bnapi[j]; 4038 bp->bnapi[j]->tx_ring = txr; 4039 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i; 4040 if (i >= bp->tx_nr_rings_xdp) { 4041 txr->txq_index = i - bp->tx_nr_rings_xdp; 4042 bp->bnapi[j]->tx_int = bnxt_tx_int; 4043 } else { 4044 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP; 4045 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp; 4046 } 4047 } 4048 4049 rc = bnxt_alloc_stats(bp); 4050 if (rc) 4051 goto alloc_mem_err; 4052 4053 rc = bnxt_alloc_ntp_fltrs(bp); 4054 if (rc) 4055 goto alloc_mem_err; 4056 4057 rc = bnxt_alloc_vnics(bp); 4058 if (rc) 4059 goto alloc_mem_err; 4060 } 4061 4062 bnxt_init_ring_struct(bp); 4063 4064 rc = bnxt_alloc_rx_rings(bp); 4065 if (rc) 4066 goto alloc_mem_err; 4067 4068 rc = bnxt_alloc_tx_rings(bp); 4069 if (rc) 4070 goto alloc_mem_err; 4071 4072 rc = bnxt_alloc_cp_rings(bp); 4073 if (rc) 4074 goto alloc_mem_err; 4075 4076 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG | 4077 BNXT_VNIC_UCAST_FLAG; 4078 rc = bnxt_alloc_vnic_attributes(bp); 4079 if (rc) 4080 goto alloc_mem_err; 4081 return 0; 4082 4083 alloc_mem_err: 4084 bnxt_free_mem(bp, true); 4085 return rc; 4086 } 4087 4088 static void bnxt_disable_int(struct bnxt *bp) 4089 { 4090 int i; 4091 4092 if (!bp->bnapi) 4093 return; 4094 4095 for (i = 0; i < bp->cp_nr_rings; i++) { 4096 struct bnxt_napi *bnapi = bp->bnapi[i]; 4097 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4098 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 4099 4100 if (ring->fw_ring_id != INVALID_HW_RING_ID) 4101 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); 4102 } 4103 } 4104 4105 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n) 4106 { 4107 struct bnxt_napi *bnapi = bp->bnapi[n]; 4108 struct bnxt_cp_ring_info *cpr; 4109 4110 cpr = &bnapi->cp_ring; 4111 return cpr->cp_ring_struct.map_idx; 4112 } 4113 4114 static void bnxt_disable_int_sync(struct bnxt *bp) 4115 { 4116 int i; 4117 4118 atomic_inc(&bp->intr_sem); 4119 4120 bnxt_disable_int(bp); 4121 for (i = 0; i < bp->cp_nr_rings; i++) { 4122 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 4123 4124 synchronize_irq(bp->irq_tbl[map_idx].vector); 4125 } 4126 } 4127 4128 static void bnxt_enable_int(struct bnxt *bp) 4129 { 4130 int i; 4131 4132 atomic_set(&bp->intr_sem, 0); 4133 for (i = 0; i < bp->cp_nr_rings; i++) { 4134 struct bnxt_napi *bnapi = bp->bnapi[i]; 4135 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4136 4137 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons); 4138 } 4139 } 4140 4141 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type, 4142 u16 cmpl_ring, u16 target_id) 4143 { 4144 struct input *req = request; 4145 4146 req->req_type = cpu_to_le16(req_type); 4147 req->cmpl_ring = cpu_to_le16(cmpl_ring); 4148 req->target_id = cpu_to_le16(target_id); 4149 if (bnxt_kong_hwrm_message(bp, req)) 4150 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr); 4151 else 4152 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr); 4153 } 4154 4155 static int bnxt_hwrm_to_stderr(u32 hwrm_err) 4156 { 4157 switch (hwrm_err) { 4158 case HWRM_ERR_CODE_SUCCESS: 4159 return 0; 4160 case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED: 4161 return -EACCES; 4162 case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR: 4163 return -ENOSPC; 4164 case HWRM_ERR_CODE_INVALID_PARAMS: 4165 case HWRM_ERR_CODE_INVALID_FLAGS: 4166 case HWRM_ERR_CODE_INVALID_ENABLES: 4167 case HWRM_ERR_CODE_UNSUPPORTED_TLV: 4168 case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR: 4169 return -EINVAL; 4170 case HWRM_ERR_CODE_NO_BUFFER: 4171 return -ENOMEM; 4172 case HWRM_ERR_CODE_HOT_RESET_PROGRESS: 4173 return -EAGAIN; 4174 case HWRM_ERR_CODE_CMD_NOT_SUPPORTED: 4175 return -EOPNOTSUPP; 4176 default: 4177 return -EIO; 4178 } 4179 } 4180 4181 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, 4182 int timeout, bool silent) 4183 { 4184 int i, intr_process, rc, tmo_count; 4185 struct input *req = msg; 4186 u32 *data = msg; 4187 __le32 *resp_len; 4188 u8 *valid; 4189 u16 cp_ring_id, len = 0; 4190 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; 4191 u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN; 4192 struct hwrm_short_input short_input = {0}; 4193 u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER; 4194 u8 *resp_addr = (u8 *)bp->hwrm_cmd_resp_addr; 4195 u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM; 4196 u16 dst = BNXT_HWRM_CHNL_CHIMP; 4197 4198 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 4199 return -EBUSY; 4200 4201 if (msg_len > BNXT_HWRM_MAX_REQ_LEN) { 4202 if (msg_len > bp->hwrm_max_ext_req_len || 4203 !bp->hwrm_short_cmd_req_addr) 4204 return -EINVAL; 4205 } 4206 4207 if (bnxt_hwrm_kong_chnl(bp, req)) { 4208 dst = BNXT_HWRM_CHNL_KONG; 4209 bar_offset = BNXT_GRCPF_REG_KONG_COMM; 4210 doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER; 4211 resp = bp->hwrm_cmd_kong_resp_addr; 4212 resp_addr = (u8 *)bp->hwrm_cmd_kong_resp_addr; 4213 } 4214 4215 memset(resp, 0, PAGE_SIZE); 4216 cp_ring_id = le16_to_cpu(req->cmpl_ring); 4217 intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1; 4218 4219 req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst)); 4220 /* currently supports only one outstanding message */ 4221 if (intr_process) 4222 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id); 4223 4224 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || 4225 msg_len > BNXT_HWRM_MAX_REQ_LEN) { 4226 void *short_cmd_req = bp->hwrm_short_cmd_req_addr; 4227 u16 max_msg_len; 4228 4229 /* Set boundary for maximum extended request length for short 4230 * cmd format. If passed up from device use the max supported 4231 * internal req length. 4232 */ 4233 max_msg_len = bp->hwrm_max_ext_req_len; 4234 4235 memcpy(short_cmd_req, req, msg_len); 4236 if (msg_len < max_msg_len) 4237 memset(short_cmd_req + msg_len, 0, 4238 max_msg_len - msg_len); 4239 4240 short_input.req_type = req->req_type; 4241 short_input.signature = 4242 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD); 4243 short_input.size = cpu_to_le16(msg_len); 4244 short_input.req_addr = 4245 cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr); 4246 4247 data = (u32 *)&short_input; 4248 msg_len = sizeof(short_input); 4249 4250 /* Sync memory write before updating doorbell */ 4251 wmb(); 4252 4253 max_req_len = BNXT_HWRM_SHORT_REQ_LEN; 4254 } 4255 4256 /* Write request msg to hwrm channel */ 4257 __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4); 4258 4259 for (i = msg_len; i < max_req_len; i += 4) 4260 writel(0, bp->bar0 + bar_offset + i); 4261 4262 /* Ring channel doorbell */ 4263 writel(1, bp->bar0 + doorbell_offset); 4264 4265 if (!pci_is_enabled(bp->pdev)) 4266 return 0; 4267 4268 if (!timeout) 4269 timeout = DFLT_HWRM_CMD_TIMEOUT; 4270 /* convert timeout to usec */ 4271 timeout *= 1000; 4272 4273 i = 0; 4274 /* Short timeout for the first few iterations: 4275 * number of loops = number of loops for short timeout + 4276 * number of loops for standard timeout. 4277 */ 4278 tmo_count = HWRM_SHORT_TIMEOUT_COUNTER; 4279 timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER; 4280 tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT); 4281 resp_len = (__le32 *)(resp_addr + HWRM_RESP_LEN_OFFSET); 4282 4283 if (intr_process) { 4284 u16 seq_id = bp->hwrm_intr_seq_id; 4285 4286 /* Wait until hwrm response cmpl interrupt is processed */ 4287 while (bp->hwrm_intr_seq_id != (u16)~seq_id && 4288 i++ < tmo_count) { 4289 /* Abort the wait for completion if the FW health 4290 * check has failed. 4291 */ 4292 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 4293 return -EBUSY; 4294 /* on first few passes, just barely sleep */ 4295 if (i < HWRM_SHORT_TIMEOUT_COUNTER) 4296 usleep_range(HWRM_SHORT_MIN_TIMEOUT, 4297 HWRM_SHORT_MAX_TIMEOUT); 4298 else 4299 usleep_range(HWRM_MIN_TIMEOUT, 4300 HWRM_MAX_TIMEOUT); 4301 } 4302 4303 if (bp->hwrm_intr_seq_id != (u16)~seq_id) { 4304 if (!silent) 4305 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n", 4306 le16_to_cpu(req->req_type)); 4307 return -EBUSY; 4308 } 4309 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> 4310 HWRM_RESP_LEN_SFT; 4311 valid = resp_addr + len - 1; 4312 } else { 4313 int j; 4314 4315 /* Check if response len is updated */ 4316 for (i = 0; i < tmo_count; i++) { 4317 /* Abort the wait for completion if the FW health 4318 * check has failed. 4319 */ 4320 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 4321 return -EBUSY; 4322 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> 4323 HWRM_RESP_LEN_SFT; 4324 if (len) 4325 break; 4326 /* on first few passes, just barely sleep */ 4327 if (i < HWRM_SHORT_TIMEOUT_COUNTER) 4328 usleep_range(HWRM_SHORT_MIN_TIMEOUT, 4329 HWRM_SHORT_MAX_TIMEOUT); 4330 else 4331 usleep_range(HWRM_MIN_TIMEOUT, 4332 HWRM_MAX_TIMEOUT); 4333 } 4334 4335 if (i >= tmo_count) { 4336 if (!silent) 4337 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n", 4338 HWRM_TOTAL_TIMEOUT(i), 4339 le16_to_cpu(req->req_type), 4340 le16_to_cpu(req->seq_id), len); 4341 return -EBUSY; 4342 } 4343 4344 /* Last byte of resp contains valid bit */ 4345 valid = resp_addr + len - 1; 4346 for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) { 4347 /* make sure we read from updated DMA memory */ 4348 dma_rmb(); 4349 if (*valid) 4350 break; 4351 usleep_range(1, 5); 4352 } 4353 4354 if (j >= HWRM_VALID_BIT_DELAY_USEC) { 4355 if (!silent) 4356 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n", 4357 HWRM_TOTAL_TIMEOUT(i), 4358 le16_to_cpu(req->req_type), 4359 le16_to_cpu(req->seq_id), len, 4360 *valid); 4361 return -EBUSY; 4362 } 4363 } 4364 4365 /* Zero valid bit for compatibility. Valid bit in an older spec 4366 * may become a new field in a newer spec. We must make sure that 4367 * a new field not implemented by old spec will read zero. 4368 */ 4369 *valid = 0; 4370 rc = le16_to_cpu(resp->error_code); 4371 if (rc && !silent) 4372 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n", 4373 le16_to_cpu(resp->req_type), 4374 le16_to_cpu(resp->seq_id), rc); 4375 return bnxt_hwrm_to_stderr(rc); 4376 } 4377 4378 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) 4379 { 4380 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false); 4381 } 4382 4383 int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len, 4384 int timeout) 4385 { 4386 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true); 4387 } 4388 4389 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) 4390 { 4391 int rc; 4392 4393 mutex_lock(&bp->hwrm_cmd_lock); 4394 rc = _hwrm_send_message(bp, msg, msg_len, timeout); 4395 mutex_unlock(&bp->hwrm_cmd_lock); 4396 return rc; 4397 } 4398 4399 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len, 4400 int timeout) 4401 { 4402 int rc; 4403 4404 mutex_lock(&bp->hwrm_cmd_lock); 4405 rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true); 4406 mutex_unlock(&bp->hwrm_cmd_lock); 4407 return rc; 4408 } 4409 4410 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, 4411 bool async_only) 4412 { 4413 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr; 4414 struct hwrm_func_drv_rgtr_input req = {0}; 4415 DECLARE_BITMAP(async_events_bmap, 256); 4416 u32 *events = (u32 *)async_events_bmap; 4417 u32 flags; 4418 int rc, i; 4419 4420 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1); 4421 4422 req.enables = 4423 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | 4424 FUNC_DRV_RGTR_REQ_ENABLES_VER | 4425 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); 4426 4427 req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); 4428 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE; 4429 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) 4430 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT; 4431 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) 4432 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT | 4433 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT; 4434 req.flags = cpu_to_le32(flags); 4435 req.ver_maj_8b = DRV_VER_MAJ; 4436 req.ver_min_8b = DRV_VER_MIN; 4437 req.ver_upd_8b = DRV_VER_UPD; 4438 req.ver_maj = cpu_to_le16(DRV_VER_MAJ); 4439 req.ver_min = cpu_to_le16(DRV_VER_MIN); 4440 req.ver_upd = cpu_to_le16(DRV_VER_UPD); 4441 4442 if (BNXT_PF(bp)) { 4443 u32 data[8]; 4444 int i; 4445 4446 memset(data, 0, sizeof(data)); 4447 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) { 4448 u16 cmd = bnxt_vf_req_snif[i]; 4449 unsigned int bit, idx; 4450 4451 idx = cmd / 32; 4452 bit = cmd % 32; 4453 data[idx] |= 1 << bit; 4454 } 4455 4456 for (i = 0; i < 8; i++) 4457 req.vf_req_fwd[i] = cpu_to_le32(data[i]); 4458 4459 req.enables |= 4460 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); 4461 } 4462 4463 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) 4464 req.flags |= cpu_to_le32( 4465 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE); 4466 4467 memset(async_events_bmap, 0, sizeof(async_events_bmap)); 4468 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) { 4469 u16 event_id = bnxt_async_events_arr[i]; 4470 4471 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY && 4472 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 4473 continue; 4474 __set_bit(bnxt_async_events_arr[i], async_events_bmap); 4475 } 4476 if (bmap && bmap_size) { 4477 for (i = 0; i < bmap_size; i++) { 4478 if (test_bit(i, bmap)) 4479 __set_bit(i, async_events_bmap); 4480 } 4481 } 4482 for (i = 0; i < 8; i++) 4483 req.async_event_fwd[i] |= cpu_to_le32(events[i]); 4484 4485 if (async_only) 4486 req.enables = 4487 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); 4488 4489 mutex_lock(&bp->hwrm_cmd_lock); 4490 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4491 if (!rc) { 4492 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state); 4493 if (resp->flags & 4494 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED)) 4495 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE; 4496 } 4497 mutex_unlock(&bp->hwrm_cmd_lock); 4498 return rc; 4499 } 4500 4501 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp) 4502 { 4503 struct hwrm_func_drv_unrgtr_input req = {0}; 4504 4505 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state)) 4506 return 0; 4507 4508 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1); 4509 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4510 } 4511 4512 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type) 4513 { 4514 u32 rc = 0; 4515 struct hwrm_tunnel_dst_port_free_input req = {0}; 4516 4517 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1); 4518 req.tunnel_type = tunnel_type; 4519 4520 switch (tunnel_type) { 4521 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN: 4522 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id; 4523 break; 4524 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE: 4525 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id; 4526 break; 4527 default: 4528 break; 4529 } 4530 4531 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4532 if (rc) 4533 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n", 4534 rc); 4535 return rc; 4536 } 4537 4538 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port, 4539 u8 tunnel_type) 4540 { 4541 u32 rc = 0; 4542 struct hwrm_tunnel_dst_port_alloc_input req = {0}; 4543 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr; 4544 4545 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1); 4546 4547 req.tunnel_type = tunnel_type; 4548 req.tunnel_dst_port_val = port; 4549 4550 mutex_lock(&bp->hwrm_cmd_lock); 4551 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4552 if (rc) { 4553 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n", 4554 rc); 4555 goto err_out; 4556 } 4557 4558 switch (tunnel_type) { 4559 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN: 4560 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id; 4561 break; 4562 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE: 4563 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id; 4564 break; 4565 default: 4566 break; 4567 } 4568 4569 err_out: 4570 mutex_unlock(&bp->hwrm_cmd_lock); 4571 return rc; 4572 } 4573 4574 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id) 4575 { 4576 struct hwrm_cfa_l2_set_rx_mask_input req = {0}; 4577 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 4578 4579 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1); 4580 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id); 4581 4582 req.num_mc_entries = cpu_to_le32(vnic->mc_list_count); 4583 req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); 4584 req.mask = cpu_to_le32(vnic->rx_mask); 4585 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4586 } 4587 4588 #ifdef CONFIG_RFS_ACCEL 4589 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, 4590 struct bnxt_ntuple_filter *fltr) 4591 { 4592 struct hwrm_cfa_ntuple_filter_free_input req = {0}; 4593 4594 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1); 4595 req.ntuple_filter_id = fltr->filter_id; 4596 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4597 } 4598 4599 #define BNXT_NTP_FLTR_FLAGS \ 4600 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \ 4601 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \ 4602 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \ 4603 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \ 4604 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \ 4605 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \ 4606 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \ 4607 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \ 4608 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \ 4609 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \ 4610 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \ 4611 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \ 4612 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \ 4613 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID) 4614 4615 #define BNXT_NTP_TUNNEL_FLTR_FLAG \ 4616 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 4617 4618 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, 4619 struct bnxt_ntuple_filter *fltr) 4620 { 4621 struct hwrm_cfa_ntuple_filter_alloc_input req = {0}; 4622 struct hwrm_cfa_ntuple_filter_alloc_output *resp; 4623 struct flow_keys *keys = &fltr->fkeys; 4624 struct bnxt_vnic_info *vnic; 4625 u32 flags = 0; 4626 int rc = 0; 4627 4628 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1); 4629 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx]; 4630 4631 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) { 4632 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX; 4633 req.dst_id = cpu_to_le16(fltr->rxq); 4634 } else { 4635 vnic = &bp->vnic_info[fltr->rxq + 1]; 4636 req.dst_id = cpu_to_le16(vnic->fw_vnic_id); 4637 } 4638 req.flags = cpu_to_le32(flags); 4639 req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS); 4640 4641 req.ethertype = htons(ETH_P_IP); 4642 memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN); 4643 req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; 4644 req.ip_protocol = keys->basic.ip_proto; 4645 4646 if (keys->basic.n_proto == htons(ETH_P_IPV6)) { 4647 int i; 4648 4649 req.ethertype = htons(ETH_P_IPV6); 4650 req.ip_addr_type = 4651 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6; 4652 *(struct in6_addr *)&req.src_ipaddr[0] = 4653 keys->addrs.v6addrs.src; 4654 *(struct in6_addr *)&req.dst_ipaddr[0] = 4655 keys->addrs.v6addrs.dst; 4656 for (i = 0; i < 4; i++) { 4657 req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff); 4658 req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff); 4659 } 4660 } else { 4661 req.src_ipaddr[0] = keys->addrs.v4addrs.src; 4662 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff); 4663 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst; 4664 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff); 4665 } 4666 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) { 4667 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG); 4668 req.tunnel_type = 4669 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL; 4670 } 4671 4672 req.src_port = keys->ports.src; 4673 req.src_port_mask = cpu_to_be16(0xffff); 4674 req.dst_port = keys->ports.dst; 4675 req.dst_port_mask = cpu_to_be16(0xffff); 4676 4677 mutex_lock(&bp->hwrm_cmd_lock); 4678 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4679 if (!rc) { 4680 resp = bnxt_get_hwrm_resp_addr(bp, &req); 4681 fltr->filter_id = resp->ntuple_filter_id; 4682 } 4683 mutex_unlock(&bp->hwrm_cmd_lock); 4684 return rc; 4685 } 4686 #endif 4687 4688 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx, 4689 u8 *mac_addr) 4690 { 4691 u32 rc = 0; 4692 struct hwrm_cfa_l2_filter_alloc_input req = {0}; 4693 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; 4694 4695 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1); 4696 req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX); 4697 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) 4698 req.flags |= 4699 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST); 4700 req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id); 4701 req.enables = 4702 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR | 4703 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID | 4704 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK); 4705 memcpy(req.l2_addr, mac_addr, ETH_ALEN); 4706 req.l2_addr_mask[0] = 0xff; 4707 req.l2_addr_mask[1] = 0xff; 4708 req.l2_addr_mask[2] = 0xff; 4709 req.l2_addr_mask[3] = 0xff; 4710 req.l2_addr_mask[4] = 0xff; 4711 req.l2_addr_mask[5] = 0xff; 4712 4713 mutex_lock(&bp->hwrm_cmd_lock); 4714 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4715 if (!rc) 4716 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] = 4717 resp->l2_filter_id; 4718 mutex_unlock(&bp->hwrm_cmd_lock); 4719 return rc; 4720 } 4721 4722 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp) 4723 { 4724 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */ 4725 int rc = 0; 4726 4727 /* Any associated ntuple filters will also be cleared by firmware. */ 4728 mutex_lock(&bp->hwrm_cmd_lock); 4729 for (i = 0; i < num_of_vnics; i++) { 4730 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 4731 4732 for (j = 0; j < vnic->uc_filter_count; j++) { 4733 struct hwrm_cfa_l2_filter_free_input req = {0}; 4734 4735 bnxt_hwrm_cmd_hdr_init(bp, &req, 4736 HWRM_CFA_L2_FILTER_FREE, -1, -1); 4737 4738 req.l2_filter_id = vnic->fw_l2_filter_id[j]; 4739 4740 rc = _hwrm_send_message(bp, &req, sizeof(req), 4741 HWRM_CMD_TIMEOUT); 4742 } 4743 vnic->uc_filter_count = 0; 4744 } 4745 mutex_unlock(&bp->hwrm_cmd_lock); 4746 4747 return rc; 4748 } 4749 4750 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) 4751 { 4752 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 4753 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX; 4754 struct hwrm_vnic_tpa_cfg_input req = {0}; 4755 4756 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) 4757 return 0; 4758 4759 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1); 4760 4761 if (tpa_flags) { 4762 u16 mss = bp->dev->mtu - 40; 4763 u32 nsegs, n, segs = 0, flags; 4764 4765 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA | 4766 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA | 4767 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE | 4768 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN | 4769 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ; 4770 if (tpa_flags & BNXT_FLAG_GRO) 4771 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO; 4772 4773 req.flags = cpu_to_le32(flags); 4774 4775 req.enables = 4776 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS | 4777 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS | 4778 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN); 4779 4780 /* Number of segs are log2 units, and first packet is not 4781 * included as part of this units. 4782 */ 4783 if (mss <= BNXT_RX_PAGE_SIZE) { 4784 n = BNXT_RX_PAGE_SIZE / mss; 4785 nsegs = (MAX_SKB_FRAGS - 1) * n; 4786 } else { 4787 n = mss / BNXT_RX_PAGE_SIZE; 4788 if (mss & (BNXT_RX_PAGE_SIZE - 1)) 4789 n++; 4790 nsegs = (MAX_SKB_FRAGS - n) / n; 4791 } 4792 4793 if (bp->flags & BNXT_FLAG_CHIP_P5) { 4794 segs = MAX_TPA_SEGS_P5; 4795 max_aggs = bp->max_tpa; 4796 } else { 4797 segs = ilog2(nsegs); 4798 } 4799 req.max_agg_segs = cpu_to_le16(segs); 4800 req.max_aggs = cpu_to_le16(max_aggs); 4801 4802 req.min_agg_len = cpu_to_le32(512); 4803 } 4804 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); 4805 4806 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4807 } 4808 4809 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring) 4810 { 4811 struct bnxt_ring_grp_info *grp_info; 4812 4813 grp_info = &bp->grp_info[ring->grp_idx]; 4814 return grp_info->cp_fw_ring_id; 4815 } 4816 4817 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 4818 { 4819 if (bp->flags & BNXT_FLAG_CHIP_P5) { 4820 struct bnxt_napi *bnapi = rxr->bnapi; 4821 struct bnxt_cp_ring_info *cpr; 4822 4823 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL]; 4824 return cpr->cp_ring_struct.fw_ring_id; 4825 } else { 4826 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct); 4827 } 4828 } 4829 4830 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr) 4831 { 4832 if (bp->flags & BNXT_FLAG_CHIP_P5) { 4833 struct bnxt_napi *bnapi = txr->bnapi; 4834 struct bnxt_cp_ring_info *cpr; 4835 4836 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL]; 4837 return cpr->cp_ring_struct.fw_ring_id; 4838 } else { 4839 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct); 4840 } 4841 } 4842 4843 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) 4844 { 4845 u32 i, j, max_rings; 4846 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 4847 struct hwrm_vnic_rss_cfg_input req = {0}; 4848 4849 if ((bp->flags & BNXT_FLAG_CHIP_P5) || 4850 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID) 4851 return 0; 4852 4853 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); 4854 if (set_rss) { 4855 req.hash_type = cpu_to_le32(bp->rss_hash_cfg); 4856 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; 4857 if (vnic->flags & BNXT_VNIC_RSS_FLAG) { 4858 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 4859 max_rings = bp->rx_nr_rings - 1; 4860 else 4861 max_rings = bp->rx_nr_rings; 4862 } else { 4863 max_rings = 1; 4864 } 4865 4866 /* Fill the RSS indirection table with ring group ids */ 4867 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) { 4868 if (j == max_rings) 4869 j = 0; 4870 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]); 4871 } 4872 4873 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); 4874 req.hash_key_tbl_addr = 4875 cpu_to_le64(vnic->rss_hash_key_dma_addr); 4876 } 4877 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 4878 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4879 } 4880 4881 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss) 4882 { 4883 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 4884 u32 i, j, k, nr_ctxs, max_rings = bp->rx_nr_rings; 4885 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; 4886 struct hwrm_vnic_rss_cfg_input req = {0}; 4887 4888 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); 4889 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); 4890 if (!set_rss) { 4891 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4892 return 0; 4893 } 4894 req.hash_type = cpu_to_le32(bp->rss_hash_cfg); 4895 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; 4896 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); 4897 req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); 4898 nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64); 4899 for (i = 0, k = 0; i < nr_ctxs; i++) { 4900 __le16 *ring_tbl = vnic->rss_table; 4901 int rc; 4902 4903 req.ring_table_pair_index = i; 4904 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]); 4905 for (j = 0; j < 64; j++) { 4906 u16 ring_id; 4907 4908 ring_id = rxr->rx_ring_struct.fw_ring_id; 4909 *ring_tbl++ = cpu_to_le16(ring_id); 4910 ring_id = bnxt_cp_ring_for_rx(bp, rxr); 4911 *ring_tbl++ = cpu_to_le16(ring_id); 4912 rxr++; 4913 k++; 4914 if (k == max_rings) { 4915 k = 0; 4916 rxr = &bp->rx_ring[0]; 4917 } 4918 } 4919 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4920 if (rc) 4921 return rc; 4922 } 4923 return 0; 4924 } 4925 4926 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id) 4927 { 4928 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 4929 struct hwrm_vnic_plcmodes_cfg_input req = {0}; 4930 4931 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1); 4932 req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT | 4933 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | 4934 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); 4935 req.enables = 4936 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID | 4937 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID); 4938 /* thresholds not implemented in firmware yet */ 4939 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh); 4940 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh); 4941 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id); 4942 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4943 } 4944 4945 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id, 4946 u16 ctx_idx) 4947 { 4948 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0}; 4949 4950 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1); 4951 req.rss_cos_lb_ctx_id = 4952 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]); 4953 4954 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4955 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID; 4956 } 4957 4958 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp) 4959 { 4960 int i, j; 4961 4962 for (i = 0; i < bp->nr_vnics; i++) { 4963 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 4964 4965 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) { 4966 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID) 4967 bnxt_hwrm_vnic_ctx_free_one(bp, i, j); 4968 } 4969 } 4970 bp->rsscos_nr_ctxs = 0; 4971 } 4972 4973 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx) 4974 { 4975 int rc; 4976 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0}; 4977 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp = 4978 bp->hwrm_cmd_resp_addr; 4979 4980 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1, 4981 -1); 4982 4983 mutex_lock(&bp->hwrm_cmd_lock); 4984 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4985 if (!rc) 4986 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = 4987 le16_to_cpu(resp->rss_cos_lb_ctx_id); 4988 mutex_unlock(&bp->hwrm_cmd_lock); 4989 4990 return rc; 4991 } 4992 4993 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp) 4994 { 4995 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP) 4996 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE; 4997 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE; 4998 } 4999 5000 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) 5001 { 5002 unsigned int ring = 0, grp_idx; 5003 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 5004 struct hwrm_vnic_cfg_input req = {0}; 5005 u16 def_vlan = 0; 5006 5007 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1); 5008 5009 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5010 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; 5011 5012 req.default_rx_ring_id = 5013 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id); 5014 req.default_cmpl_ring_id = 5015 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr)); 5016 req.enables = 5017 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID | 5018 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID); 5019 goto vnic_mru; 5020 } 5021 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP); 5022 /* Only RSS support for now TBD: COS & LB */ 5023 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) { 5024 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 5025 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | 5026 VNIC_CFG_REQ_ENABLES_MRU); 5027 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) { 5028 req.rss_rule = 5029 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]); 5030 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | 5031 VNIC_CFG_REQ_ENABLES_MRU); 5032 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE); 5033 } else { 5034 req.rss_rule = cpu_to_le16(0xffff); 5035 } 5036 5037 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && 5038 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) { 5039 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]); 5040 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE); 5041 } else { 5042 req.cos_rule = cpu_to_le16(0xffff); 5043 } 5044 5045 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 5046 ring = 0; 5047 else if (vnic->flags & BNXT_VNIC_RFS_FLAG) 5048 ring = vnic_id - 1; 5049 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp)) 5050 ring = bp->rx_nr_rings - 1; 5051 5052 grp_idx = bp->rx_ring[ring].bnapi->index; 5053 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); 5054 req.lb_rule = cpu_to_le16(0xffff); 5055 vnic_mru: 5056 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + 5057 VLAN_HLEN); 5058 5059 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); 5060 #ifdef CONFIG_BNXT_SRIOV 5061 if (BNXT_VF(bp)) 5062 def_vlan = bp->vf.vlan; 5063 #endif 5064 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan) 5065 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); 5066 if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) 5067 req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp)); 5068 5069 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5070 } 5071 5072 static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id) 5073 { 5074 u32 rc = 0; 5075 5076 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) { 5077 struct hwrm_vnic_free_input req = {0}; 5078 5079 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1); 5080 req.vnic_id = 5081 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id); 5082 5083 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5084 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID; 5085 } 5086 return rc; 5087 } 5088 5089 static void bnxt_hwrm_vnic_free(struct bnxt *bp) 5090 { 5091 u16 i; 5092 5093 for (i = 0; i < bp->nr_vnics; i++) 5094 bnxt_hwrm_vnic_free_one(bp, i); 5095 } 5096 5097 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, 5098 unsigned int start_rx_ring_idx, 5099 unsigned int nr_rings) 5100 { 5101 int rc = 0; 5102 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings; 5103 struct hwrm_vnic_alloc_input req = {0}; 5104 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr; 5105 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 5106 5107 if (bp->flags & BNXT_FLAG_CHIP_P5) 5108 goto vnic_no_ring_grps; 5109 5110 /* map ring groups to this vnic */ 5111 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) { 5112 grp_idx = bp->rx_ring[i].bnapi->index; 5113 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) { 5114 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n", 5115 j, nr_rings); 5116 break; 5117 } 5118 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id; 5119 } 5120 5121 vnic_no_ring_grps: 5122 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) 5123 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID; 5124 if (vnic_id == 0) 5125 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); 5126 5127 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1); 5128 5129 mutex_lock(&bp->hwrm_cmd_lock); 5130 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5131 if (!rc) 5132 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id); 5133 mutex_unlock(&bp->hwrm_cmd_lock); 5134 return rc; 5135 } 5136 5137 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) 5138 { 5139 struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 5140 struct hwrm_vnic_qcaps_input req = {0}; 5141 int rc; 5142 5143 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats); 5144 bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP); 5145 if (bp->hwrm_spec_code < 0x10600) 5146 return 0; 5147 5148 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1); 5149 mutex_lock(&bp->hwrm_cmd_lock); 5150 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5151 if (!rc) { 5152 u32 flags = le32_to_cpu(resp->flags); 5153 5154 if (!(bp->flags & BNXT_FLAG_CHIP_P5) && 5155 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP)) 5156 bp->flags |= BNXT_FLAG_NEW_RSS_CAP; 5157 if (flags & 5158 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP) 5159 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP; 5160 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported); 5161 if (bp->max_tpa_v2) 5162 bp->hw_ring_stats_size = 5163 sizeof(struct ctx_hw_stats_ext); 5164 } 5165 mutex_unlock(&bp->hwrm_cmd_lock); 5166 return rc; 5167 } 5168 5169 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp) 5170 { 5171 u16 i; 5172 u32 rc = 0; 5173 5174 if (bp->flags & BNXT_FLAG_CHIP_P5) 5175 return 0; 5176 5177 mutex_lock(&bp->hwrm_cmd_lock); 5178 for (i = 0; i < bp->rx_nr_rings; i++) { 5179 struct hwrm_ring_grp_alloc_input req = {0}; 5180 struct hwrm_ring_grp_alloc_output *resp = 5181 bp->hwrm_cmd_resp_addr; 5182 unsigned int grp_idx = bp->rx_ring[i].bnapi->index; 5183 5184 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1); 5185 5186 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id); 5187 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id); 5188 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id); 5189 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx); 5190 5191 rc = _hwrm_send_message(bp, &req, sizeof(req), 5192 HWRM_CMD_TIMEOUT); 5193 if (rc) 5194 break; 5195 5196 bp->grp_info[grp_idx].fw_grp_id = 5197 le32_to_cpu(resp->ring_group_id); 5198 } 5199 mutex_unlock(&bp->hwrm_cmd_lock); 5200 return rc; 5201 } 5202 5203 static int bnxt_hwrm_ring_grp_free(struct bnxt *bp) 5204 { 5205 u16 i; 5206 u32 rc = 0; 5207 struct hwrm_ring_grp_free_input req = {0}; 5208 5209 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5)) 5210 return 0; 5211 5212 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1); 5213 5214 mutex_lock(&bp->hwrm_cmd_lock); 5215 for (i = 0; i < bp->cp_nr_rings; i++) { 5216 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) 5217 continue; 5218 req.ring_group_id = 5219 cpu_to_le32(bp->grp_info[i].fw_grp_id); 5220 5221 rc = _hwrm_send_message(bp, &req, sizeof(req), 5222 HWRM_CMD_TIMEOUT); 5223 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; 5224 } 5225 mutex_unlock(&bp->hwrm_cmd_lock); 5226 return rc; 5227 } 5228 5229 static int hwrm_ring_alloc_send_msg(struct bnxt *bp, 5230 struct bnxt_ring_struct *ring, 5231 u32 ring_type, u32 map_index) 5232 { 5233 int rc = 0, err = 0; 5234 struct hwrm_ring_alloc_input req = {0}; 5235 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr; 5236 struct bnxt_ring_mem_info *rmem = &ring->ring_mem; 5237 struct bnxt_ring_grp_info *grp_info; 5238 u16 ring_id; 5239 5240 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1); 5241 5242 req.enables = 0; 5243 if (rmem->nr_pages > 1) { 5244 req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map); 5245 /* Page size is in log2 units */ 5246 req.page_size = BNXT_PAGE_SHIFT; 5247 req.page_tbl_depth = 1; 5248 } else { 5249 req.page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]); 5250 } 5251 req.fbo = 0; 5252 /* Association of ring index with doorbell index and MSIX number */ 5253 req.logical_id = cpu_to_le16(map_index); 5254 5255 switch (ring_type) { 5256 case HWRM_RING_ALLOC_TX: { 5257 struct bnxt_tx_ring_info *txr; 5258 5259 txr = container_of(ring, struct bnxt_tx_ring_info, 5260 tx_ring_struct); 5261 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX; 5262 /* Association of transmit ring with completion ring */ 5263 grp_info = &bp->grp_info[ring->grp_idx]; 5264 req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr)); 5265 req.length = cpu_to_le32(bp->tx_ring_mask + 1); 5266 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); 5267 req.queue_id = cpu_to_le16(ring->queue_id); 5268 break; 5269 } 5270 case HWRM_RING_ALLOC_RX: 5271 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; 5272 req.length = cpu_to_le32(bp->rx_ring_mask + 1); 5273 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5274 u16 flags = 0; 5275 5276 /* Association of rx ring with stats context */ 5277 grp_info = &bp->grp_info[ring->grp_idx]; 5278 req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size); 5279 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); 5280 req.enables |= cpu_to_le32( 5281 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); 5282 if (NET_IP_ALIGN == 2) 5283 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD; 5284 req.flags = cpu_to_le16(flags); 5285 } 5286 break; 5287 case HWRM_RING_ALLOC_AGG: 5288 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5289 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG; 5290 /* Association of agg ring with rx ring */ 5291 grp_info = &bp->grp_info[ring->grp_idx]; 5292 req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id); 5293 req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE); 5294 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); 5295 req.enables |= cpu_to_le32( 5296 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID | 5297 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); 5298 } else { 5299 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; 5300 } 5301 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1); 5302 break; 5303 case HWRM_RING_ALLOC_CMPL: 5304 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; 5305 req.length = cpu_to_le32(bp->cp_ring_mask + 1); 5306 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5307 /* Association of cp ring with nq */ 5308 grp_info = &bp->grp_info[map_index]; 5309 req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id); 5310 req.cq_handle = cpu_to_le64(ring->handle); 5311 req.enables |= cpu_to_le32( 5312 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID); 5313 } else if (bp->flags & BNXT_FLAG_USING_MSIX) { 5314 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; 5315 } 5316 break; 5317 case HWRM_RING_ALLOC_NQ: 5318 req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ; 5319 req.length = cpu_to_le32(bp->cp_ring_mask + 1); 5320 if (bp->flags & BNXT_FLAG_USING_MSIX) 5321 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; 5322 break; 5323 default: 5324 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n", 5325 ring_type); 5326 return -1; 5327 } 5328 5329 mutex_lock(&bp->hwrm_cmd_lock); 5330 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5331 err = le16_to_cpu(resp->error_code); 5332 ring_id = le16_to_cpu(resp->ring_id); 5333 mutex_unlock(&bp->hwrm_cmd_lock); 5334 5335 if (rc || err) { 5336 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n", 5337 ring_type, rc, err); 5338 return -EIO; 5339 } 5340 ring->fw_ring_id = ring_id; 5341 return rc; 5342 } 5343 5344 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx) 5345 { 5346 int rc; 5347 5348 if (BNXT_PF(bp)) { 5349 struct hwrm_func_cfg_input req = {0}; 5350 5351 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 5352 req.fid = cpu_to_le16(0xffff); 5353 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR); 5354 req.async_event_cr = cpu_to_le16(idx); 5355 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5356 } else { 5357 struct hwrm_func_vf_cfg_input req = {0}; 5358 5359 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); 5360 req.enables = 5361 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR); 5362 req.async_event_cr = cpu_to_le16(idx); 5363 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5364 } 5365 return rc; 5366 } 5367 5368 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type, 5369 u32 map_idx, u32 xid) 5370 { 5371 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5372 if (BNXT_PF(bp)) 5373 db->doorbell = bp->bar1 + 0x10000; 5374 else 5375 db->doorbell = bp->bar1 + 0x4000; 5376 switch (ring_type) { 5377 case HWRM_RING_ALLOC_TX: 5378 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ; 5379 break; 5380 case HWRM_RING_ALLOC_RX: 5381 case HWRM_RING_ALLOC_AGG: 5382 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ; 5383 break; 5384 case HWRM_RING_ALLOC_CMPL: 5385 db->db_key64 = DBR_PATH_L2; 5386 break; 5387 case HWRM_RING_ALLOC_NQ: 5388 db->db_key64 = DBR_PATH_L2; 5389 break; 5390 } 5391 db->db_key64 |= (u64)xid << DBR_XID_SFT; 5392 } else { 5393 db->doorbell = bp->bar1 + map_idx * 0x80; 5394 switch (ring_type) { 5395 case HWRM_RING_ALLOC_TX: 5396 db->db_key32 = DB_KEY_TX; 5397 break; 5398 case HWRM_RING_ALLOC_RX: 5399 case HWRM_RING_ALLOC_AGG: 5400 db->db_key32 = DB_KEY_RX; 5401 break; 5402 case HWRM_RING_ALLOC_CMPL: 5403 db->db_key32 = DB_KEY_CP; 5404 break; 5405 } 5406 } 5407 } 5408 5409 static int bnxt_hwrm_ring_alloc(struct bnxt *bp) 5410 { 5411 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS); 5412 int i, rc = 0; 5413 u32 type; 5414 5415 if (bp->flags & BNXT_FLAG_CHIP_P5) 5416 type = HWRM_RING_ALLOC_NQ; 5417 else 5418 type = HWRM_RING_ALLOC_CMPL; 5419 for (i = 0; i < bp->cp_nr_rings; i++) { 5420 struct bnxt_napi *bnapi = bp->bnapi[i]; 5421 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5422 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 5423 u32 map_idx = ring->map_idx; 5424 unsigned int vector; 5425 5426 vector = bp->irq_tbl[map_idx].vector; 5427 disable_irq_nosync(vector); 5428 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 5429 if (rc) { 5430 enable_irq(vector); 5431 goto err_out; 5432 } 5433 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id); 5434 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); 5435 enable_irq(vector); 5436 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; 5437 5438 if (!i) { 5439 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id); 5440 if (rc) 5441 netdev_warn(bp->dev, "Failed to set async event completion ring.\n"); 5442 } 5443 } 5444 5445 type = HWRM_RING_ALLOC_TX; 5446 for (i = 0; i < bp->tx_nr_rings; i++) { 5447 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 5448 struct bnxt_ring_struct *ring; 5449 u32 map_idx; 5450 5451 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5452 struct bnxt_napi *bnapi = txr->bnapi; 5453 struct bnxt_cp_ring_info *cpr, *cpr2; 5454 u32 type2 = HWRM_RING_ALLOC_CMPL; 5455 5456 cpr = &bnapi->cp_ring; 5457 cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL]; 5458 ring = &cpr2->cp_ring_struct; 5459 ring->handle = BNXT_TX_HDL; 5460 map_idx = bnapi->index; 5461 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx); 5462 if (rc) 5463 goto err_out; 5464 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx, 5465 ring->fw_ring_id); 5466 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons); 5467 } 5468 ring = &txr->tx_ring_struct; 5469 map_idx = i; 5470 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 5471 if (rc) 5472 goto err_out; 5473 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id); 5474 } 5475 5476 type = HWRM_RING_ALLOC_RX; 5477 for (i = 0; i < bp->rx_nr_rings; i++) { 5478 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 5479 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; 5480 struct bnxt_napi *bnapi = rxr->bnapi; 5481 u32 map_idx = bnapi->index; 5482 5483 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 5484 if (rc) 5485 goto err_out; 5486 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id); 5487 /* If we have agg rings, post agg buffers first. */ 5488 if (!agg_rings) 5489 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 5490 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; 5491 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5492 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5493 u32 type2 = HWRM_RING_ALLOC_CMPL; 5494 struct bnxt_cp_ring_info *cpr2; 5495 5496 cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL]; 5497 ring = &cpr2->cp_ring_struct; 5498 ring->handle = BNXT_RX_HDL; 5499 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx); 5500 if (rc) 5501 goto err_out; 5502 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx, 5503 ring->fw_ring_id); 5504 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons); 5505 } 5506 } 5507 5508 if (agg_rings) { 5509 type = HWRM_RING_ALLOC_AGG; 5510 for (i = 0; i < bp->rx_nr_rings; i++) { 5511 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 5512 struct bnxt_ring_struct *ring = 5513 &rxr->rx_agg_ring_struct; 5514 u32 grp_idx = ring->grp_idx; 5515 u32 map_idx = grp_idx + bp->rx_nr_rings; 5516 5517 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 5518 if (rc) 5519 goto err_out; 5520 5521 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx, 5522 ring->fw_ring_id); 5523 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 5524 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 5525 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; 5526 } 5527 } 5528 err_out: 5529 return rc; 5530 } 5531 5532 static int hwrm_ring_free_send_msg(struct bnxt *bp, 5533 struct bnxt_ring_struct *ring, 5534 u32 ring_type, int cmpl_ring_id) 5535 { 5536 int rc; 5537 struct hwrm_ring_free_input req = {0}; 5538 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr; 5539 u16 error_code; 5540 5541 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 5542 return 0; 5543 5544 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1); 5545 req.ring_type = ring_type; 5546 req.ring_id = cpu_to_le16(ring->fw_ring_id); 5547 5548 mutex_lock(&bp->hwrm_cmd_lock); 5549 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5550 error_code = le16_to_cpu(resp->error_code); 5551 mutex_unlock(&bp->hwrm_cmd_lock); 5552 5553 if (rc || error_code) { 5554 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n", 5555 ring_type, rc, error_code); 5556 return -EIO; 5557 } 5558 return 0; 5559 } 5560 5561 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) 5562 { 5563 u32 type; 5564 int i; 5565 5566 if (!bp->bnapi) 5567 return; 5568 5569 for (i = 0; i < bp->tx_nr_rings; i++) { 5570 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 5571 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 5572 5573 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 5574 u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr); 5575 5576 hwrm_ring_free_send_msg(bp, ring, 5577 RING_FREE_REQ_RING_TYPE_TX, 5578 close_path ? cmpl_ring_id : 5579 INVALID_HW_RING_ID); 5580 ring->fw_ring_id = INVALID_HW_RING_ID; 5581 } 5582 } 5583 5584 for (i = 0; i < bp->rx_nr_rings; i++) { 5585 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 5586 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; 5587 u32 grp_idx = rxr->bnapi->index; 5588 5589 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 5590 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); 5591 5592 hwrm_ring_free_send_msg(bp, ring, 5593 RING_FREE_REQ_RING_TYPE_RX, 5594 close_path ? cmpl_ring_id : 5595 INVALID_HW_RING_ID); 5596 ring->fw_ring_id = INVALID_HW_RING_ID; 5597 bp->grp_info[grp_idx].rx_fw_ring_id = 5598 INVALID_HW_RING_ID; 5599 } 5600 } 5601 5602 if (bp->flags & BNXT_FLAG_CHIP_P5) 5603 type = RING_FREE_REQ_RING_TYPE_RX_AGG; 5604 else 5605 type = RING_FREE_REQ_RING_TYPE_RX; 5606 for (i = 0; i < bp->rx_nr_rings; i++) { 5607 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 5608 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; 5609 u32 grp_idx = rxr->bnapi->index; 5610 5611 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 5612 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); 5613 5614 hwrm_ring_free_send_msg(bp, ring, type, 5615 close_path ? cmpl_ring_id : 5616 INVALID_HW_RING_ID); 5617 ring->fw_ring_id = INVALID_HW_RING_ID; 5618 bp->grp_info[grp_idx].agg_fw_ring_id = 5619 INVALID_HW_RING_ID; 5620 } 5621 } 5622 5623 /* The completion rings are about to be freed. After that the 5624 * IRQ doorbell will not work anymore. So we need to disable 5625 * IRQ here. 5626 */ 5627 bnxt_disable_int_sync(bp); 5628 5629 if (bp->flags & BNXT_FLAG_CHIP_P5) 5630 type = RING_FREE_REQ_RING_TYPE_NQ; 5631 else 5632 type = RING_FREE_REQ_RING_TYPE_L2_CMPL; 5633 for (i = 0; i < bp->cp_nr_rings; i++) { 5634 struct bnxt_napi *bnapi = bp->bnapi[i]; 5635 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5636 struct bnxt_ring_struct *ring; 5637 int j; 5638 5639 for (j = 0; j < 2; j++) { 5640 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; 5641 5642 if (cpr2) { 5643 ring = &cpr2->cp_ring_struct; 5644 if (ring->fw_ring_id == INVALID_HW_RING_ID) 5645 continue; 5646 hwrm_ring_free_send_msg(bp, ring, 5647 RING_FREE_REQ_RING_TYPE_L2_CMPL, 5648 INVALID_HW_RING_ID); 5649 ring->fw_ring_id = INVALID_HW_RING_ID; 5650 } 5651 } 5652 ring = &cpr->cp_ring_struct; 5653 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 5654 hwrm_ring_free_send_msg(bp, ring, type, 5655 INVALID_HW_RING_ID); 5656 ring->fw_ring_id = INVALID_HW_RING_ID; 5657 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; 5658 } 5659 } 5660 } 5661 5662 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 5663 bool shared); 5664 5665 static int bnxt_hwrm_get_rings(struct bnxt *bp) 5666 { 5667 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 5668 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 5669 struct hwrm_func_qcfg_input req = {0}; 5670 int rc; 5671 5672 if (bp->hwrm_spec_code < 0x10601) 5673 return 0; 5674 5675 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); 5676 req.fid = cpu_to_le16(0xffff); 5677 mutex_lock(&bp->hwrm_cmd_lock); 5678 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5679 if (rc) { 5680 mutex_unlock(&bp->hwrm_cmd_lock); 5681 return rc; 5682 } 5683 5684 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings); 5685 if (BNXT_NEW_RM(bp)) { 5686 u16 cp, stats; 5687 5688 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings); 5689 hw_resc->resv_hw_ring_grps = 5690 le32_to_cpu(resp->alloc_hw_ring_grps); 5691 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics); 5692 cp = le16_to_cpu(resp->alloc_cmpl_rings); 5693 stats = le16_to_cpu(resp->alloc_stat_ctx); 5694 hw_resc->resv_irqs = cp; 5695 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5696 int rx = hw_resc->resv_rx_rings; 5697 int tx = hw_resc->resv_tx_rings; 5698 5699 if (bp->flags & BNXT_FLAG_AGG_RINGS) 5700 rx >>= 1; 5701 if (cp < (rx + tx)) { 5702 bnxt_trim_rings(bp, &rx, &tx, cp, false); 5703 if (bp->flags & BNXT_FLAG_AGG_RINGS) 5704 rx <<= 1; 5705 hw_resc->resv_rx_rings = rx; 5706 hw_resc->resv_tx_rings = tx; 5707 } 5708 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix); 5709 hw_resc->resv_hw_ring_grps = rx; 5710 } 5711 hw_resc->resv_cp_rings = cp; 5712 hw_resc->resv_stat_ctxs = stats; 5713 } 5714 mutex_unlock(&bp->hwrm_cmd_lock); 5715 return 0; 5716 } 5717 5718 /* Caller must hold bp->hwrm_cmd_lock */ 5719 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) 5720 { 5721 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 5722 struct hwrm_func_qcfg_input req = {0}; 5723 int rc; 5724 5725 if (bp->hwrm_spec_code < 0x10601) 5726 return 0; 5727 5728 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); 5729 req.fid = cpu_to_le16(fid); 5730 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5731 if (!rc) 5732 *tx_rings = le16_to_cpu(resp->alloc_tx_rings); 5733 5734 return rc; 5735 } 5736 5737 static bool bnxt_rfs_supported(struct bnxt *bp); 5738 5739 static void 5740 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req, 5741 int tx_rings, int rx_rings, int ring_grps, 5742 int cp_rings, int stats, int vnics) 5743 { 5744 u32 enables = 0; 5745 5746 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1); 5747 req->fid = cpu_to_le16(0xffff); 5748 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; 5749 req->num_tx_rings = cpu_to_le16(tx_rings); 5750 if (BNXT_NEW_RM(bp)) { 5751 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; 5752 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; 5753 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5754 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0; 5755 enables |= tx_rings + ring_grps ? 5756 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 5757 enables |= rx_rings ? 5758 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 5759 } else { 5760 enables |= cp_rings ? 5761 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 5762 enables |= ring_grps ? 5763 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS | 5764 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 5765 } 5766 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0; 5767 5768 req->num_rx_rings = cpu_to_le16(rx_rings); 5769 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5770 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); 5771 req->num_msix = cpu_to_le16(cp_rings); 5772 req->num_rsscos_ctxs = 5773 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64)); 5774 } else { 5775 req->num_cmpl_rings = cpu_to_le16(cp_rings); 5776 req->num_hw_ring_grps = cpu_to_le16(ring_grps); 5777 req->num_rsscos_ctxs = cpu_to_le16(1); 5778 if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) && 5779 bnxt_rfs_supported(bp)) 5780 req->num_rsscos_ctxs = 5781 cpu_to_le16(ring_grps + 1); 5782 } 5783 req->num_stat_ctxs = cpu_to_le16(stats); 5784 req->num_vnics = cpu_to_le16(vnics); 5785 } 5786 req->enables = cpu_to_le32(enables); 5787 } 5788 5789 static void 5790 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, 5791 struct hwrm_func_vf_cfg_input *req, int tx_rings, 5792 int rx_rings, int ring_grps, int cp_rings, 5793 int stats, int vnics) 5794 { 5795 u32 enables = 0; 5796 5797 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1); 5798 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; 5799 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS | 5800 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 5801 enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; 5802 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5803 enables |= tx_rings + ring_grps ? 5804 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 5805 } else { 5806 enables |= cp_rings ? 5807 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 5808 enables |= ring_grps ? 5809 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; 5810 } 5811 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; 5812 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS; 5813 5814 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); 5815 req->num_tx_rings = cpu_to_le16(tx_rings); 5816 req->num_rx_rings = cpu_to_le16(rx_rings); 5817 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5818 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); 5819 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64)); 5820 } else { 5821 req->num_cmpl_rings = cpu_to_le16(cp_rings); 5822 req->num_hw_ring_grps = cpu_to_le16(ring_grps); 5823 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX); 5824 } 5825 req->num_stat_ctxs = cpu_to_le16(stats); 5826 req->num_vnics = cpu_to_le16(vnics); 5827 5828 req->enables = cpu_to_le32(enables); 5829 } 5830 5831 static int 5832 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 5833 int ring_grps, int cp_rings, int stats, int vnics) 5834 { 5835 struct hwrm_func_cfg_input req = {0}; 5836 int rc; 5837 5838 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, 5839 cp_rings, stats, vnics); 5840 if (!req.enables) 5841 return 0; 5842 5843 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5844 if (rc) 5845 return rc; 5846 5847 if (bp->hwrm_spec_code < 0x10601) 5848 bp->hw_resc.resv_tx_rings = tx_rings; 5849 5850 rc = bnxt_hwrm_get_rings(bp); 5851 return rc; 5852 } 5853 5854 static int 5855 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 5856 int ring_grps, int cp_rings, int stats, int vnics) 5857 { 5858 struct hwrm_func_vf_cfg_input req = {0}; 5859 int rc; 5860 5861 if (!BNXT_NEW_RM(bp)) { 5862 bp->hw_resc.resv_tx_rings = tx_rings; 5863 return 0; 5864 } 5865 5866 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, 5867 cp_rings, stats, vnics); 5868 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5869 if (rc) 5870 return rc; 5871 5872 rc = bnxt_hwrm_get_rings(bp); 5873 return rc; 5874 } 5875 5876 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp, 5877 int cp, int stat, int vnic) 5878 { 5879 if (BNXT_PF(bp)) 5880 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat, 5881 vnic); 5882 else 5883 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat, 5884 vnic); 5885 } 5886 5887 int bnxt_nq_rings_in_use(struct bnxt *bp) 5888 { 5889 int cp = bp->cp_nr_rings; 5890 int ulp_msix, ulp_base; 5891 5892 ulp_msix = bnxt_get_ulp_msix_num(bp); 5893 if (ulp_msix) { 5894 ulp_base = bnxt_get_ulp_msix_base(bp); 5895 cp += ulp_msix; 5896 if ((ulp_base + ulp_msix) > cp) 5897 cp = ulp_base + ulp_msix; 5898 } 5899 return cp; 5900 } 5901 5902 static int bnxt_cp_rings_in_use(struct bnxt *bp) 5903 { 5904 int cp; 5905 5906 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 5907 return bnxt_nq_rings_in_use(bp); 5908 5909 cp = bp->tx_nr_rings + bp->rx_nr_rings; 5910 return cp; 5911 } 5912 5913 static int bnxt_get_func_stat_ctxs(struct bnxt *bp) 5914 { 5915 int ulp_stat = bnxt_get_ulp_stat_ctxs(bp); 5916 int cp = bp->cp_nr_rings; 5917 5918 if (!ulp_stat) 5919 return cp; 5920 5921 if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp)) 5922 return bnxt_get_ulp_msix_base(bp) + ulp_stat; 5923 5924 return cp + ulp_stat; 5925 } 5926 5927 static bool bnxt_need_reserve_rings(struct bnxt *bp) 5928 { 5929 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 5930 int cp = bnxt_cp_rings_in_use(bp); 5931 int nq = bnxt_nq_rings_in_use(bp); 5932 int rx = bp->rx_nr_rings, stat; 5933 int vnic = 1, grp = rx; 5934 5935 if (bp->hwrm_spec_code < 0x10601) 5936 return false; 5937 5938 if (hw_resc->resv_tx_rings != bp->tx_nr_rings) 5939 return true; 5940 5941 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5)) 5942 vnic = rx + 1; 5943 if (bp->flags & BNXT_FLAG_AGG_RINGS) 5944 rx <<= 1; 5945 stat = bnxt_get_func_stat_ctxs(bp); 5946 if (BNXT_NEW_RM(bp) && 5947 (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp || 5948 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat || 5949 (hw_resc->resv_hw_ring_grps != grp && 5950 !(bp->flags & BNXT_FLAG_CHIP_P5)))) 5951 return true; 5952 if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) && 5953 hw_resc->resv_irqs != nq) 5954 return true; 5955 return false; 5956 } 5957 5958 static int __bnxt_reserve_rings(struct bnxt *bp) 5959 { 5960 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 5961 int cp = bnxt_nq_rings_in_use(bp); 5962 int tx = bp->tx_nr_rings; 5963 int rx = bp->rx_nr_rings; 5964 int grp, rx_rings, rc; 5965 int vnic = 1, stat; 5966 bool sh = false; 5967 5968 if (!bnxt_need_reserve_rings(bp)) 5969 return 0; 5970 5971 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 5972 sh = true; 5973 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5)) 5974 vnic = rx + 1; 5975 if (bp->flags & BNXT_FLAG_AGG_RINGS) 5976 rx <<= 1; 5977 grp = bp->rx_nr_rings; 5978 stat = bnxt_get_func_stat_ctxs(bp); 5979 5980 rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic); 5981 if (rc) 5982 return rc; 5983 5984 tx = hw_resc->resv_tx_rings; 5985 if (BNXT_NEW_RM(bp)) { 5986 rx = hw_resc->resv_rx_rings; 5987 cp = hw_resc->resv_irqs; 5988 grp = hw_resc->resv_hw_ring_grps; 5989 vnic = hw_resc->resv_vnics; 5990 stat = hw_resc->resv_stat_ctxs; 5991 } 5992 5993 rx_rings = rx; 5994 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 5995 if (rx >= 2) { 5996 rx_rings = rx >> 1; 5997 } else { 5998 if (netif_running(bp->dev)) 5999 return -ENOMEM; 6000 6001 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 6002 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 6003 bp->dev->hw_features &= ~NETIF_F_LRO; 6004 bp->dev->features &= ~NETIF_F_LRO; 6005 bnxt_set_ring_params(bp); 6006 } 6007 } 6008 rx_rings = min_t(int, rx_rings, grp); 6009 cp = min_t(int, cp, bp->cp_nr_rings); 6010 if (stat > bnxt_get_ulp_stat_ctxs(bp)) 6011 stat -= bnxt_get_ulp_stat_ctxs(bp); 6012 cp = min_t(int, cp, stat); 6013 rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh); 6014 if (bp->flags & BNXT_FLAG_AGG_RINGS) 6015 rx = rx_rings << 1; 6016 cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings; 6017 bp->tx_nr_rings = tx; 6018 bp->rx_nr_rings = rx_rings; 6019 bp->cp_nr_rings = cp; 6020 6021 if (!tx || !rx || !cp || !grp || !vnic || !stat) 6022 return -ENOMEM; 6023 6024 return rc; 6025 } 6026 6027 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 6028 int ring_grps, int cp_rings, int stats, 6029 int vnics) 6030 { 6031 struct hwrm_func_vf_cfg_input req = {0}; 6032 u32 flags; 6033 int rc; 6034 6035 if (!BNXT_NEW_RM(bp)) 6036 return 0; 6037 6038 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, 6039 cp_rings, stats, vnics); 6040 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST | 6041 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST | 6042 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | 6043 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | 6044 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST | 6045 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST; 6046 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 6047 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; 6048 6049 req.flags = cpu_to_le32(flags); 6050 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6051 return rc; 6052 } 6053 6054 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 6055 int ring_grps, int cp_rings, int stats, 6056 int vnics) 6057 { 6058 struct hwrm_func_cfg_input req = {0}; 6059 u32 flags; 6060 int rc; 6061 6062 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, 6063 cp_rings, stats, vnics); 6064 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST; 6065 if (BNXT_NEW_RM(bp)) { 6066 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST | 6067 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | 6068 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | 6069 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; 6070 if (bp->flags & BNXT_FLAG_CHIP_P5) 6071 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST | 6072 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST; 6073 else 6074 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; 6075 } 6076 6077 req.flags = cpu_to_le32(flags); 6078 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6079 return rc; 6080 } 6081 6082 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings, 6083 int ring_grps, int cp_rings, int stats, 6084 int vnics) 6085 { 6086 if (bp->hwrm_spec_code < 0x10801) 6087 return 0; 6088 6089 if (BNXT_PF(bp)) 6090 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings, 6091 ring_grps, cp_rings, stats, 6092 vnics); 6093 6094 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps, 6095 cp_rings, stats, vnics); 6096 } 6097 6098 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp) 6099 { 6100 struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 6101 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 6102 struct hwrm_ring_aggint_qcaps_input req = {0}; 6103 int rc; 6104 6105 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS; 6106 coal_cap->num_cmpl_dma_aggr_max = 63; 6107 coal_cap->num_cmpl_dma_aggr_during_int_max = 63; 6108 coal_cap->cmpl_aggr_dma_tmr_max = 65535; 6109 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535; 6110 coal_cap->int_lat_tmr_min_max = 65535; 6111 coal_cap->int_lat_tmr_max_max = 65535; 6112 coal_cap->num_cmpl_aggr_int_max = 65535; 6113 coal_cap->timer_units = 80; 6114 6115 if (bp->hwrm_spec_code < 0x10902) 6116 return; 6117 6118 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1); 6119 mutex_lock(&bp->hwrm_cmd_lock); 6120 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6121 if (!rc) { 6122 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params); 6123 coal_cap->nq_params = le32_to_cpu(resp->nq_params); 6124 coal_cap->num_cmpl_dma_aggr_max = 6125 le16_to_cpu(resp->num_cmpl_dma_aggr_max); 6126 coal_cap->num_cmpl_dma_aggr_during_int_max = 6127 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max); 6128 coal_cap->cmpl_aggr_dma_tmr_max = 6129 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max); 6130 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 6131 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max); 6132 coal_cap->int_lat_tmr_min_max = 6133 le16_to_cpu(resp->int_lat_tmr_min_max); 6134 coal_cap->int_lat_tmr_max_max = 6135 le16_to_cpu(resp->int_lat_tmr_max_max); 6136 coal_cap->num_cmpl_aggr_int_max = 6137 le16_to_cpu(resp->num_cmpl_aggr_int_max); 6138 coal_cap->timer_units = le16_to_cpu(resp->timer_units); 6139 } 6140 mutex_unlock(&bp->hwrm_cmd_lock); 6141 } 6142 6143 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec) 6144 { 6145 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 6146 6147 return usec * 1000 / coal_cap->timer_units; 6148 } 6149 6150 static void bnxt_hwrm_set_coal_params(struct bnxt *bp, 6151 struct bnxt_coal *hw_coal, 6152 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) 6153 { 6154 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 6155 u32 cmpl_params = coal_cap->cmpl_params; 6156 u16 val, tmr, max, flags = 0; 6157 6158 max = hw_coal->bufs_per_record * 128; 6159 if (hw_coal->budget) 6160 max = hw_coal->bufs_per_record * hw_coal->budget; 6161 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max); 6162 6163 val = clamp_t(u16, hw_coal->coal_bufs, 1, max); 6164 req->num_cmpl_aggr_int = cpu_to_le16(val); 6165 6166 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max); 6167 req->num_cmpl_dma_aggr = cpu_to_le16(val); 6168 6169 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1, 6170 coal_cap->num_cmpl_dma_aggr_during_int_max); 6171 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val); 6172 6173 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks); 6174 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max); 6175 req->int_lat_tmr_max = cpu_to_le16(tmr); 6176 6177 /* min timer set to 1/2 of interrupt timer */ 6178 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) { 6179 val = tmr / 2; 6180 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max); 6181 req->int_lat_tmr_min = cpu_to_le16(val); 6182 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); 6183 } 6184 6185 /* buf timer set to 1/4 of interrupt timer */ 6186 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max); 6187 req->cmpl_aggr_dma_tmr = cpu_to_le16(val); 6188 6189 if (cmpl_params & 6190 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) { 6191 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq); 6192 val = clamp_t(u16, tmr, 1, 6193 coal_cap->cmpl_aggr_dma_tmr_during_int_max); 6194 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val); 6195 req->enables |= 6196 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE); 6197 } 6198 6199 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET) 6200 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 6201 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) && 6202 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh) 6203 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE; 6204 req->flags = cpu_to_le16(flags); 6205 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES); 6206 } 6207 6208 /* Caller holds bp->hwrm_cmd_lock */ 6209 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi, 6210 struct bnxt_coal *hw_coal) 6211 { 6212 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0}; 6213 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 6214 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 6215 u32 nq_params = coal_cap->nq_params; 6216 u16 tmr; 6217 6218 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN)) 6219 return 0; 6220 6221 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, 6222 -1, -1); 6223 req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id); 6224 req.flags = 6225 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ); 6226 6227 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2; 6228 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max); 6229 req.int_lat_tmr_min = cpu_to_le16(tmr); 6230 req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); 6231 return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6232 } 6233 6234 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi) 6235 { 6236 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0}; 6237 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 6238 struct bnxt_coal coal; 6239 6240 /* Tick values in micro seconds. 6241 * 1 coal_buf x bufs_per_record = 1 completion record. 6242 */ 6243 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal)); 6244 6245 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks; 6246 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs; 6247 6248 if (!bnapi->rx_ring) 6249 return -ENODEV; 6250 6251 bnxt_hwrm_cmd_hdr_init(bp, &req_rx, 6252 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); 6253 6254 bnxt_hwrm_set_coal_params(bp, &coal, &req_rx); 6255 6256 req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring)); 6257 6258 return hwrm_send_message(bp, &req_rx, sizeof(req_rx), 6259 HWRM_CMD_TIMEOUT); 6260 } 6261 6262 int bnxt_hwrm_set_coal(struct bnxt *bp) 6263 { 6264 int i, rc = 0; 6265 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0}, 6266 req_tx = {0}, *req; 6267 6268 bnxt_hwrm_cmd_hdr_init(bp, &req_rx, 6269 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); 6270 bnxt_hwrm_cmd_hdr_init(bp, &req_tx, 6271 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); 6272 6273 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx); 6274 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx); 6275 6276 mutex_lock(&bp->hwrm_cmd_lock); 6277 for (i = 0; i < bp->cp_nr_rings; i++) { 6278 struct bnxt_napi *bnapi = bp->bnapi[i]; 6279 struct bnxt_coal *hw_coal; 6280 u16 ring_id; 6281 6282 req = &req_rx; 6283 if (!bnapi->rx_ring) { 6284 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring); 6285 req = &req_tx; 6286 } else { 6287 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring); 6288 } 6289 req->ring_id = cpu_to_le16(ring_id); 6290 6291 rc = _hwrm_send_message(bp, req, sizeof(*req), 6292 HWRM_CMD_TIMEOUT); 6293 if (rc) 6294 break; 6295 6296 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 6297 continue; 6298 6299 if (bnapi->rx_ring && bnapi->tx_ring) { 6300 req = &req_tx; 6301 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring); 6302 req->ring_id = cpu_to_le16(ring_id); 6303 rc = _hwrm_send_message(bp, req, sizeof(*req), 6304 HWRM_CMD_TIMEOUT); 6305 if (rc) 6306 break; 6307 } 6308 if (bnapi->rx_ring) 6309 hw_coal = &bp->rx_coal; 6310 else 6311 hw_coal = &bp->tx_coal; 6312 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal); 6313 } 6314 mutex_unlock(&bp->hwrm_cmd_lock); 6315 return rc; 6316 } 6317 6318 static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp) 6319 { 6320 int rc = 0, i; 6321 struct hwrm_stat_ctx_free_input req = {0}; 6322 6323 if (!bp->bnapi) 6324 return 0; 6325 6326 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 6327 return 0; 6328 6329 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1); 6330 6331 mutex_lock(&bp->hwrm_cmd_lock); 6332 for (i = 0; i < bp->cp_nr_rings; i++) { 6333 struct bnxt_napi *bnapi = bp->bnapi[i]; 6334 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 6335 6336 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) { 6337 req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id); 6338 6339 rc = _hwrm_send_message(bp, &req, sizeof(req), 6340 HWRM_CMD_TIMEOUT); 6341 6342 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; 6343 } 6344 } 6345 mutex_unlock(&bp->hwrm_cmd_lock); 6346 return rc; 6347 } 6348 6349 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) 6350 { 6351 int rc = 0, i; 6352 struct hwrm_stat_ctx_alloc_input req = {0}; 6353 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr; 6354 6355 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 6356 return 0; 6357 6358 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1); 6359 6360 req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size); 6361 req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000); 6362 6363 mutex_lock(&bp->hwrm_cmd_lock); 6364 for (i = 0; i < bp->cp_nr_rings; i++) { 6365 struct bnxt_napi *bnapi = bp->bnapi[i]; 6366 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 6367 6368 req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map); 6369 6370 rc = _hwrm_send_message(bp, &req, sizeof(req), 6371 HWRM_CMD_TIMEOUT); 6372 if (rc) 6373 break; 6374 6375 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id); 6376 6377 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; 6378 } 6379 mutex_unlock(&bp->hwrm_cmd_lock); 6380 return rc; 6381 } 6382 6383 static int bnxt_hwrm_func_qcfg(struct bnxt *bp) 6384 { 6385 struct hwrm_func_qcfg_input req = {0}; 6386 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 6387 u16 flags; 6388 int rc; 6389 6390 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); 6391 req.fid = cpu_to_le16(0xffff); 6392 mutex_lock(&bp->hwrm_cmd_lock); 6393 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6394 if (rc) 6395 goto func_qcfg_exit; 6396 6397 #ifdef CONFIG_BNXT_SRIOV 6398 if (BNXT_VF(bp)) { 6399 struct bnxt_vf_info *vf = &bp->vf; 6400 6401 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK; 6402 } else { 6403 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs); 6404 } 6405 #endif 6406 flags = le16_to_cpu(resp->flags); 6407 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED | 6408 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) { 6409 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT; 6410 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED) 6411 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT; 6412 } 6413 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)) 6414 bp->flags |= BNXT_FLAG_MULTI_HOST; 6415 6416 switch (resp->port_partition_type) { 6417 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0: 6418 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5: 6419 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0: 6420 bp->port_partition_type = resp->port_partition_type; 6421 break; 6422 } 6423 if (bp->hwrm_spec_code < 0x10707 || 6424 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB) 6425 bp->br_mode = BRIDGE_MODE_VEB; 6426 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA) 6427 bp->br_mode = BRIDGE_MODE_VEPA; 6428 else 6429 bp->br_mode = BRIDGE_MODE_UNDEF; 6430 6431 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured); 6432 if (!bp->max_mtu) 6433 bp->max_mtu = BNXT_MAX_MTU; 6434 6435 func_qcfg_exit: 6436 mutex_unlock(&bp->hwrm_cmd_lock); 6437 return rc; 6438 } 6439 6440 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) 6441 { 6442 struct hwrm_func_backing_store_qcaps_input req = {0}; 6443 struct hwrm_func_backing_store_qcaps_output *resp = 6444 bp->hwrm_cmd_resp_addr; 6445 int rc; 6446 6447 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx) 6448 return 0; 6449 6450 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1); 6451 mutex_lock(&bp->hwrm_cmd_lock); 6452 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6453 if (!rc) { 6454 struct bnxt_ctx_pg_info *ctx_pg; 6455 struct bnxt_ctx_mem_info *ctx; 6456 int i; 6457 6458 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 6459 if (!ctx) { 6460 rc = -ENOMEM; 6461 goto ctx_err; 6462 } 6463 ctx_pg = kzalloc(sizeof(*ctx_pg) * (bp->max_q + 1), GFP_KERNEL); 6464 if (!ctx_pg) { 6465 kfree(ctx); 6466 rc = -ENOMEM; 6467 goto ctx_err; 6468 } 6469 for (i = 0; i < bp->max_q + 1; i++, ctx_pg++) 6470 ctx->tqm_mem[i] = ctx_pg; 6471 6472 bp->ctx = ctx; 6473 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries); 6474 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries); 6475 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries); 6476 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size); 6477 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries); 6478 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries); 6479 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size); 6480 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries); 6481 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries); 6482 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size); 6483 ctx->vnic_max_vnic_entries = 6484 le16_to_cpu(resp->vnic_max_vnic_entries); 6485 ctx->vnic_max_ring_table_entries = 6486 le16_to_cpu(resp->vnic_max_ring_table_entries); 6487 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size); 6488 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries); 6489 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size); 6490 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size); 6491 ctx->tqm_min_entries_per_ring = 6492 le32_to_cpu(resp->tqm_min_entries_per_ring); 6493 ctx->tqm_max_entries_per_ring = 6494 le32_to_cpu(resp->tqm_max_entries_per_ring); 6495 ctx->tqm_entries_multiple = resp->tqm_entries_multiple; 6496 if (!ctx->tqm_entries_multiple) 6497 ctx->tqm_entries_multiple = 1; 6498 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries); 6499 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size); 6500 ctx->mrav_num_entries_units = 6501 le16_to_cpu(resp->mrav_num_entries_units); 6502 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size); 6503 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries); 6504 ctx->ctx_kind_initializer = resp->ctx_kind_initializer; 6505 } else { 6506 rc = 0; 6507 } 6508 ctx_err: 6509 mutex_unlock(&bp->hwrm_cmd_lock); 6510 return rc; 6511 } 6512 6513 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr, 6514 __le64 *pg_dir) 6515 { 6516 u8 pg_size = 0; 6517 6518 if (BNXT_PAGE_SHIFT == 13) 6519 pg_size = 1 << 4; 6520 else if (BNXT_PAGE_SIZE == 16) 6521 pg_size = 2 << 4; 6522 6523 *pg_attr = pg_size; 6524 if (rmem->depth >= 1) { 6525 if (rmem->depth == 2) 6526 *pg_attr |= 2; 6527 else 6528 *pg_attr |= 1; 6529 *pg_dir = cpu_to_le64(rmem->pg_tbl_map); 6530 } else { 6531 *pg_dir = cpu_to_le64(rmem->dma_arr[0]); 6532 } 6533 } 6534 6535 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \ 6536 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \ 6537 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \ 6538 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \ 6539 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \ 6540 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) 6541 6542 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) 6543 { 6544 struct hwrm_func_backing_store_cfg_input req = {0}; 6545 struct bnxt_ctx_mem_info *ctx = bp->ctx; 6546 struct bnxt_ctx_pg_info *ctx_pg; 6547 __le32 *num_entries; 6548 __le64 *pg_dir; 6549 u32 flags = 0; 6550 u8 *pg_attr; 6551 int i, rc; 6552 u32 ena; 6553 6554 if (!ctx) 6555 return 0; 6556 6557 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1); 6558 req.enables = cpu_to_le32(enables); 6559 6560 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) { 6561 ctx_pg = &ctx->qp_mem; 6562 req.qp_num_entries = cpu_to_le32(ctx_pg->entries); 6563 req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries); 6564 req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries); 6565 req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size); 6566 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 6567 &req.qpc_pg_size_qpc_lvl, 6568 &req.qpc_page_dir); 6569 } 6570 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) { 6571 ctx_pg = &ctx->srq_mem; 6572 req.srq_num_entries = cpu_to_le32(ctx_pg->entries); 6573 req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries); 6574 req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size); 6575 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 6576 &req.srq_pg_size_srq_lvl, 6577 &req.srq_page_dir); 6578 } 6579 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) { 6580 ctx_pg = &ctx->cq_mem; 6581 req.cq_num_entries = cpu_to_le32(ctx_pg->entries); 6582 req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries); 6583 req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size); 6584 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl, 6585 &req.cq_page_dir); 6586 } 6587 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) { 6588 ctx_pg = &ctx->vnic_mem; 6589 req.vnic_num_vnic_entries = 6590 cpu_to_le16(ctx->vnic_max_vnic_entries); 6591 req.vnic_num_ring_table_entries = 6592 cpu_to_le16(ctx->vnic_max_ring_table_entries); 6593 req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size); 6594 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 6595 &req.vnic_pg_size_vnic_lvl, 6596 &req.vnic_page_dir); 6597 } 6598 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) { 6599 ctx_pg = &ctx->stat_mem; 6600 req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries); 6601 req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size); 6602 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 6603 &req.stat_pg_size_stat_lvl, 6604 &req.stat_page_dir); 6605 } 6606 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) { 6607 ctx_pg = &ctx->mrav_mem; 6608 req.mrav_num_entries = cpu_to_le32(ctx_pg->entries); 6609 if (ctx->mrav_num_entries_units) 6610 flags |= 6611 FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT; 6612 req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size); 6613 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 6614 &req.mrav_pg_size_mrav_lvl, 6615 &req.mrav_page_dir); 6616 } 6617 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) { 6618 ctx_pg = &ctx->tim_mem; 6619 req.tim_num_entries = cpu_to_le32(ctx_pg->entries); 6620 req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size); 6621 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 6622 &req.tim_pg_size_tim_lvl, 6623 &req.tim_page_dir); 6624 } 6625 for (i = 0, num_entries = &req.tqm_sp_num_entries, 6626 pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl, 6627 pg_dir = &req.tqm_sp_page_dir, 6628 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP; 6629 i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) { 6630 if (!(enables & ena)) 6631 continue; 6632 6633 req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size); 6634 ctx_pg = ctx->tqm_mem[i]; 6635 *num_entries = cpu_to_le32(ctx_pg->entries); 6636 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir); 6637 } 6638 req.flags = cpu_to_le32(flags); 6639 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6640 return rc; 6641 } 6642 6643 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, 6644 struct bnxt_ctx_pg_info *ctx_pg) 6645 { 6646 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 6647 6648 rmem->page_size = BNXT_PAGE_SIZE; 6649 rmem->pg_arr = ctx_pg->ctx_pg_arr; 6650 rmem->dma_arr = ctx_pg->ctx_dma_arr; 6651 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; 6652 if (rmem->depth >= 1) 6653 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG; 6654 return bnxt_alloc_ring(bp, rmem); 6655 } 6656 6657 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, 6658 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size, 6659 u8 depth, bool use_init_val) 6660 { 6661 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 6662 int rc; 6663 6664 if (!mem_size) 6665 return 0; 6666 6667 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); 6668 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) { 6669 ctx_pg->nr_pages = 0; 6670 return -EINVAL; 6671 } 6672 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) { 6673 int nr_tbls, i; 6674 6675 rmem->depth = 2; 6676 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg), 6677 GFP_KERNEL); 6678 if (!ctx_pg->ctx_pg_tbl) 6679 return -ENOMEM; 6680 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES); 6681 rmem->nr_pages = nr_tbls; 6682 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); 6683 if (rc) 6684 return rc; 6685 for (i = 0; i < nr_tbls; i++) { 6686 struct bnxt_ctx_pg_info *pg_tbl; 6687 6688 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL); 6689 if (!pg_tbl) 6690 return -ENOMEM; 6691 ctx_pg->ctx_pg_tbl[i] = pg_tbl; 6692 rmem = &pg_tbl->ring_mem; 6693 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i]; 6694 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i]; 6695 rmem->depth = 1; 6696 rmem->nr_pages = MAX_CTX_PAGES; 6697 if (use_init_val) 6698 rmem->init_val = bp->ctx->ctx_kind_initializer; 6699 if (i == (nr_tbls - 1)) { 6700 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES; 6701 6702 if (rem) 6703 rmem->nr_pages = rem; 6704 } 6705 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl); 6706 if (rc) 6707 break; 6708 } 6709 } else { 6710 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); 6711 if (rmem->nr_pages > 1 || depth) 6712 rmem->depth = 1; 6713 if (use_init_val) 6714 rmem->init_val = bp->ctx->ctx_kind_initializer; 6715 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); 6716 } 6717 return rc; 6718 } 6719 6720 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp, 6721 struct bnxt_ctx_pg_info *ctx_pg) 6722 { 6723 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 6724 6725 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES || 6726 ctx_pg->ctx_pg_tbl) { 6727 int i, nr_tbls = rmem->nr_pages; 6728 6729 for (i = 0; i < nr_tbls; i++) { 6730 struct bnxt_ctx_pg_info *pg_tbl; 6731 struct bnxt_ring_mem_info *rmem2; 6732 6733 pg_tbl = ctx_pg->ctx_pg_tbl[i]; 6734 if (!pg_tbl) 6735 continue; 6736 rmem2 = &pg_tbl->ring_mem; 6737 bnxt_free_ring(bp, rmem2); 6738 ctx_pg->ctx_pg_arr[i] = NULL; 6739 kfree(pg_tbl); 6740 ctx_pg->ctx_pg_tbl[i] = NULL; 6741 } 6742 kfree(ctx_pg->ctx_pg_tbl); 6743 ctx_pg->ctx_pg_tbl = NULL; 6744 } 6745 bnxt_free_ring(bp, rmem); 6746 ctx_pg->nr_pages = 0; 6747 } 6748 6749 static void bnxt_free_ctx_mem(struct bnxt *bp) 6750 { 6751 struct bnxt_ctx_mem_info *ctx = bp->ctx; 6752 int i; 6753 6754 if (!ctx) 6755 return; 6756 6757 if (ctx->tqm_mem[0]) { 6758 for (i = 0; i < bp->max_q + 1; i++) 6759 bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]); 6760 kfree(ctx->tqm_mem[0]); 6761 ctx->tqm_mem[0] = NULL; 6762 } 6763 6764 bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem); 6765 bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem); 6766 bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem); 6767 bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem); 6768 bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem); 6769 bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem); 6770 bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem); 6771 ctx->flags &= ~BNXT_CTX_FLAG_INITED; 6772 } 6773 6774 static int bnxt_alloc_ctx_mem(struct bnxt *bp) 6775 { 6776 struct bnxt_ctx_pg_info *ctx_pg; 6777 struct bnxt_ctx_mem_info *ctx; 6778 u32 mem_size, ena, entries; 6779 u32 num_mr, num_ah; 6780 u32 extra_srqs = 0; 6781 u32 extra_qps = 0; 6782 u8 pg_lvl = 1; 6783 int i, rc; 6784 6785 rc = bnxt_hwrm_func_backing_store_qcaps(bp); 6786 if (rc) { 6787 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n", 6788 rc); 6789 return rc; 6790 } 6791 ctx = bp->ctx; 6792 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) 6793 return 0; 6794 6795 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) { 6796 pg_lvl = 2; 6797 extra_qps = 65536; 6798 extra_srqs = 8192; 6799 } 6800 6801 ctx_pg = &ctx->qp_mem; 6802 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries + 6803 extra_qps; 6804 mem_size = ctx->qp_entry_size * ctx_pg->entries; 6805 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true); 6806 if (rc) 6807 return rc; 6808 6809 ctx_pg = &ctx->srq_mem; 6810 ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs; 6811 mem_size = ctx->srq_entry_size * ctx_pg->entries; 6812 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true); 6813 if (rc) 6814 return rc; 6815 6816 ctx_pg = &ctx->cq_mem; 6817 ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2; 6818 mem_size = ctx->cq_entry_size * ctx_pg->entries; 6819 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true); 6820 if (rc) 6821 return rc; 6822 6823 ctx_pg = &ctx->vnic_mem; 6824 ctx_pg->entries = ctx->vnic_max_vnic_entries + 6825 ctx->vnic_max_ring_table_entries; 6826 mem_size = ctx->vnic_entry_size * ctx_pg->entries; 6827 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true); 6828 if (rc) 6829 return rc; 6830 6831 ctx_pg = &ctx->stat_mem; 6832 ctx_pg->entries = ctx->stat_max_entries; 6833 mem_size = ctx->stat_entry_size * ctx_pg->entries; 6834 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true); 6835 if (rc) 6836 return rc; 6837 6838 ena = 0; 6839 if (!(bp->flags & BNXT_FLAG_ROCE_CAP)) 6840 goto skip_rdma; 6841 6842 ctx_pg = &ctx->mrav_mem; 6843 /* 128K extra is needed to accommodate static AH context 6844 * allocation by f/w. 6845 */ 6846 num_mr = 1024 * 256; 6847 num_ah = 1024 * 128; 6848 ctx_pg->entries = num_mr + num_ah; 6849 mem_size = ctx->mrav_entry_size * ctx_pg->entries; 6850 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, true); 6851 if (rc) 6852 return rc; 6853 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV; 6854 if (ctx->mrav_num_entries_units) 6855 ctx_pg->entries = 6856 ((num_mr / ctx->mrav_num_entries_units) << 16) | 6857 (num_ah / ctx->mrav_num_entries_units); 6858 6859 ctx_pg = &ctx->tim_mem; 6860 ctx_pg->entries = ctx->qp_mem.entries; 6861 mem_size = ctx->tim_entry_size * ctx_pg->entries; 6862 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false); 6863 if (rc) 6864 return rc; 6865 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM; 6866 6867 skip_rdma: 6868 entries = ctx->qp_max_l2_entries + extra_qps; 6869 entries = roundup(entries, ctx->tqm_entries_multiple); 6870 entries = clamp_t(u32, entries, ctx->tqm_min_entries_per_ring, 6871 ctx->tqm_max_entries_per_ring); 6872 for (i = 0; i < bp->max_q + 1; i++) { 6873 ctx_pg = ctx->tqm_mem[i]; 6874 ctx_pg->entries = entries; 6875 mem_size = ctx->tqm_entry_size * entries; 6876 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false); 6877 if (rc) 6878 return rc; 6879 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i; 6880 } 6881 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES; 6882 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena); 6883 if (rc) { 6884 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n", 6885 rc); 6886 return rc; 6887 } 6888 ctx->flags |= BNXT_CTX_FLAG_INITED; 6889 return 0; 6890 } 6891 6892 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all) 6893 { 6894 struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 6895 struct hwrm_func_resource_qcaps_input req = {0}; 6896 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 6897 int rc; 6898 6899 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1); 6900 req.fid = cpu_to_le16(0xffff); 6901 6902 mutex_lock(&bp->hwrm_cmd_lock); 6903 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), 6904 HWRM_CMD_TIMEOUT); 6905 if (rc) 6906 goto hwrm_func_resc_qcaps_exit; 6907 6908 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs); 6909 if (!all) 6910 goto hwrm_func_resc_qcaps_exit; 6911 6912 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx); 6913 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 6914 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings); 6915 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 6916 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings); 6917 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 6918 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings); 6919 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings); 6920 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps); 6921 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps); 6922 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs); 6923 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); 6924 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics); 6925 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics); 6926 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx); 6927 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 6928 6929 if (bp->flags & BNXT_FLAG_CHIP_P5) { 6930 u16 max_msix = le16_to_cpu(resp->max_msix); 6931 6932 hw_resc->max_nqs = max_msix; 6933 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings; 6934 } 6935 6936 if (BNXT_PF(bp)) { 6937 struct bnxt_pf_info *pf = &bp->pf; 6938 6939 pf->vf_resv_strategy = 6940 le16_to_cpu(resp->vf_reservation_strategy); 6941 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) 6942 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL; 6943 } 6944 hwrm_func_resc_qcaps_exit: 6945 mutex_unlock(&bp->hwrm_cmd_lock); 6946 return rc; 6947 } 6948 6949 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) 6950 { 6951 int rc = 0; 6952 struct hwrm_func_qcaps_input req = {0}; 6953 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 6954 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 6955 u32 flags; 6956 6957 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); 6958 req.fid = cpu_to_le16(0xffff); 6959 6960 mutex_lock(&bp->hwrm_cmd_lock); 6961 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6962 if (rc) 6963 goto hwrm_func_qcaps_exit; 6964 6965 flags = le32_to_cpu(resp->flags); 6966 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED) 6967 bp->flags |= BNXT_FLAG_ROCEV1_CAP; 6968 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED) 6969 bp->flags |= BNXT_FLAG_ROCEV2_CAP; 6970 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED) 6971 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED; 6972 if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE) 6973 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET; 6974 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED) 6975 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED; 6976 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE) 6977 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY; 6978 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD) 6979 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD; 6980 6981 bp->tx_push_thresh = 0; 6982 if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) 6983 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH; 6984 6985 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 6986 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 6987 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 6988 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings); 6989 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps); 6990 if (!hw_resc->max_hw_ring_grps) 6991 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings; 6992 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); 6993 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics); 6994 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 6995 6996 if (BNXT_PF(bp)) { 6997 struct bnxt_pf_info *pf = &bp->pf; 6998 6999 pf->fw_fid = le16_to_cpu(resp->fid); 7000 pf->port_id = le16_to_cpu(resp->port_id); 7001 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); 7002 pf->first_vf_id = le16_to_cpu(resp->first_vf_id); 7003 pf->max_vfs = le16_to_cpu(resp->max_vfs); 7004 pf->max_encap_records = le32_to_cpu(resp->max_encap_records); 7005 pf->max_decap_records = le32_to_cpu(resp->max_decap_records); 7006 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows); 7007 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows); 7008 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows); 7009 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows); 7010 bp->flags &= ~BNXT_FLAG_WOL_CAP; 7011 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED) 7012 bp->flags |= BNXT_FLAG_WOL_CAP; 7013 } else { 7014 #ifdef CONFIG_BNXT_SRIOV 7015 struct bnxt_vf_info *vf = &bp->vf; 7016 7017 vf->fw_fid = le16_to_cpu(resp->fid); 7018 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN); 7019 #endif 7020 } 7021 7022 hwrm_func_qcaps_exit: 7023 mutex_unlock(&bp->hwrm_cmd_lock); 7024 return rc; 7025 } 7026 7027 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp); 7028 7029 static int bnxt_hwrm_func_qcaps(struct bnxt *bp) 7030 { 7031 int rc; 7032 7033 rc = __bnxt_hwrm_func_qcaps(bp); 7034 if (rc) 7035 return rc; 7036 rc = bnxt_hwrm_queue_qportcfg(bp); 7037 if (rc) { 7038 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc); 7039 return rc; 7040 } 7041 if (bp->hwrm_spec_code >= 0x10803) { 7042 rc = bnxt_alloc_ctx_mem(bp); 7043 if (rc) 7044 return rc; 7045 rc = bnxt_hwrm_func_resc_qcaps(bp, true); 7046 if (!rc) 7047 bp->fw_cap |= BNXT_FW_CAP_NEW_RM; 7048 } 7049 return 0; 7050 } 7051 7052 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp) 7053 { 7054 struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0}; 7055 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp; 7056 int rc = 0; 7057 u32 flags; 7058 7059 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW)) 7060 return 0; 7061 7062 resp = bp->hwrm_cmd_resp_addr; 7063 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, -1, -1); 7064 7065 mutex_lock(&bp->hwrm_cmd_lock); 7066 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7067 if (rc) 7068 goto hwrm_cfa_adv_qcaps_exit; 7069 7070 flags = le32_to_cpu(resp->flags); 7071 if (flags & 7072 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED) 7073 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2; 7074 7075 hwrm_cfa_adv_qcaps_exit: 7076 mutex_unlock(&bp->hwrm_cmd_lock); 7077 return rc; 7078 } 7079 7080 static int bnxt_map_fw_health_regs(struct bnxt *bp) 7081 { 7082 struct bnxt_fw_health *fw_health = bp->fw_health; 7083 u32 reg_base = 0xffffffff; 7084 int i; 7085 7086 /* Only pre-map the monitoring GRC registers using window 3 */ 7087 for (i = 0; i < 4; i++) { 7088 u32 reg = fw_health->regs[i]; 7089 7090 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC) 7091 continue; 7092 if (reg_base == 0xffffffff) 7093 reg_base = reg & BNXT_GRC_BASE_MASK; 7094 if ((reg & BNXT_GRC_BASE_MASK) != reg_base) 7095 return -ERANGE; 7096 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_BASE + 7097 (reg & BNXT_GRC_OFFSET_MASK); 7098 } 7099 if (reg_base == 0xffffffff) 7100 return 0; 7101 7102 writel(reg_base, bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 7103 BNXT_FW_HEALTH_WIN_MAP_OFF); 7104 return 0; 7105 } 7106 7107 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) 7108 { 7109 struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 7110 struct bnxt_fw_health *fw_health = bp->fw_health; 7111 struct hwrm_error_recovery_qcfg_input req = {0}; 7112 int rc, i; 7113 7114 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 7115 return 0; 7116 7117 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG, -1, -1); 7118 mutex_lock(&bp->hwrm_cmd_lock); 7119 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7120 if (rc) 7121 goto err_recovery_out; 7122 fw_health->flags = le32_to_cpu(resp->flags); 7123 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) && 7124 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) { 7125 rc = -EINVAL; 7126 goto err_recovery_out; 7127 } 7128 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq); 7129 fw_health->master_func_wait_dsecs = 7130 le32_to_cpu(resp->master_func_wait_period); 7131 fw_health->normal_func_wait_dsecs = 7132 le32_to_cpu(resp->normal_func_wait_period); 7133 fw_health->post_reset_wait_dsecs = 7134 le32_to_cpu(resp->master_func_wait_period_after_reset); 7135 fw_health->post_reset_max_wait_dsecs = 7136 le32_to_cpu(resp->max_bailout_time_after_reset); 7137 fw_health->regs[BNXT_FW_HEALTH_REG] = 7138 le32_to_cpu(resp->fw_health_status_reg); 7139 fw_health->regs[BNXT_FW_HEARTBEAT_REG] = 7140 le32_to_cpu(resp->fw_heartbeat_reg); 7141 fw_health->regs[BNXT_FW_RESET_CNT_REG] = 7142 le32_to_cpu(resp->fw_reset_cnt_reg); 7143 fw_health->regs[BNXT_FW_RESET_INPROG_REG] = 7144 le32_to_cpu(resp->reset_inprogress_reg); 7145 fw_health->fw_reset_inprog_reg_mask = 7146 le32_to_cpu(resp->reset_inprogress_reg_mask); 7147 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt; 7148 if (fw_health->fw_reset_seq_cnt >= 16) { 7149 rc = -EINVAL; 7150 goto err_recovery_out; 7151 } 7152 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) { 7153 fw_health->fw_reset_seq_regs[i] = 7154 le32_to_cpu(resp->reset_reg[i]); 7155 fw_health->fw_reset_seq_vals[i] = 7156 le32_to_cpu(resp->reset_reg_val[i]); 7157 fw_health->fw_reset_seq_delay_msec[i] = 7158 resp->delay_after_reset[i]; 7159 } 7160 err_recovery_out: 7161 mutex_unlock(&bp->hwrm_cmd_lock); 7162 if (!rc) 7163 rc = bnxt_map_fw_health_regs(bp); 7164 if (rc) 7165 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 7166 return rc; 7167 } 7168 7169 static int bnxt_hwrm_func_reset(struct bnxt *bp) 7170 { 7171 struct hwrm_func_reset_input req = {0}; 7172 7173 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1); 7174 req.enables = 0; 7175 7176 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT); 7177 } 7178 7179 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) 7180 { 7181 int rc = 0; 7182 struct hwrm_queue_qportcfg_input req = {0}; 7183 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr; 7184 u8 i, j, *qptr; 7185 bool no_rdma; 7186 7187 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1); 7188 7189 mutex_lock(&bp->hwrm_cmd_lock); 7190 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7191 if (rc) 7192 goto qportcfg_exit; 7193 7194 if (!resp->max_configurable_queues) { 7195 rc = -EINVAL; 7196 goto qportcfg_exit; 7197 } 7198 bp->max_tc = resp->max_configurable_queues; 7199 bp->max_lltc = resp->max_configurable_lossless_queues; 7200 if (bp->max_tc > BNXT_MAX_QUEUE) 7201 bp->max_tc = BNXT_MAX_QUEUE; 7202 7203 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP); 7204 qptr = &resp->queue_id0; 7205 for (i = 0, j = 0; i < bp->max_tc; i++) { 7206 bp->q_info[j].queue_id = *qptr; 7207 bp->q_ids[i] = *qptr++; 7208 bp->q_info[j].queue_profile = *qptr++; 7209 bp->tc_to_qidx[j] = j; 7210 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) || 7211 (no_rdma && BNXT_PF(bp))) 7212 j++; 7213 } 7214 bp->max_q = bp->max_tc; 7215 bp->max_tc = max_t(u8, j, 1); 7216 7217 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG) 7218 bp->max_tc = 1; 7219 7220 if (bp->max_lltc > bp->max_tc) 7221 bp->max_lltc = bp->max_tc; 7222 7223 qportcfg_exit: 7224 mutex_unlock(&bp->hwrm_cmd_lock); 7225 return rc; 7226 } 7227 7228 static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent) 7229 { 7230 struct hwrm_ver_get_input req = {0}; 7231 int rc; 7232 7233 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1); 7234 req.hwrm_intf_maj = HWRM_VERSION_MAJOR; 7235 req.hwrm_intf_min = HWRM_VERSION_MINOR; 7236 req.hwrm_intf_upd = HWRM_VERSION_UPDATE; 7237 7238 rc = bnxt_hwrm_do_send_msg(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT, 7239 silent); 7240 return rc; 7241 } 7242 7243 static int bnxt_hwrm_ver_get(struct bnxt *bp) 7244 { 7245 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr; 7246 u32 dev_caps_cfg; 7247 int rc; 7248 7249 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; 7250 mutex_lock(&bp->hwrm_cmd_lock); 7251 rc = __bnxt_hwrm_ver_get(bp, false); 7252 if (rc) 7253 goto hwrm_ver_get_exit; 7254 7255 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output)); 7256 7257 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 | 7258 resp->hwrm_intf_min_8b << 8 | 7259 resp->hwrm_intf_upd_8b; 7260 if (resp->hwrm_intf_maj_8b < 1) { 7261 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n", 7262 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, 7263 resp->hwrm_intf_upd_8b); 7264 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n"); 7265 } 7266 snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d", 7267 resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b, 7268 resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b); 7269 7270 if (strlen(resp->active_pkg_name)) { 7271 int fw_ver_len = strlen(bp->fw_ver_str); 7272 7273 snprintf(bp->fw_ver_str + fw_ver_len, 7274 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s", 7275 resp->active_pkg_name); 7276 bp->fw_cap |= BNXT_FW_CAP_PKG_VER; 7277 } 7278 7279 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout); 7280 if (!bp->hwrm_cmd_timeout) 7281 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; 7282 7283 if (resp->hwrm_intf_maj_8b >= 1) { 7284 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len); 7285 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len); 7286 } 7287 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN) 7288 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN; 7289 7290 bp->chip_num = le16_to_cpu(resp->chip_num); 7291 bp->chip_rev = resp->chip_rev; 7292 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev && 7293 !resp->chip_metal) 7294 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0; 7295 7296 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg); 7297 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) && 7298 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) 7299 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD; 7300 7301 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) 7302 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL; 7303 7304 if (dev_caps_cfg & 7305 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED) 7306 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE; 7307 7308 if (dev_caps_cfg & 7309 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED) 7310 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF; 7311 7312 if (dev_caps_cfg & 7313 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) 7314 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW; 7315 7316 hwrm_ver_get_exit: 7317 mutex_unlock(&bp->hwrm_cmd_lock); 7318 return rc; 7319 } 7320 7321 int bnxt_hwrm_fw_set_time(struct bnxt *bp) 7322 { 7323 struct hwrm_fw_set_time_input req = {0}; 7324 struct tm tm; 7325 time64_t now = ktime_get_real_seconds(); 7326 7327 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) || 7328 bp->hwrm_spec_code < 0x10400) 7329 return -EOPNOTSUPP; 7330 7331 time64_to_tm(now, 0, &tm); 7332 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1); 7333 req.year = cpu_to_le16(1900 + tm.tm_year); 7334 req.month = 1 + tm.tm_mon; 7335 req.day = tm.tm_mday; 7336 req.hour = tm.tm_hour; 7337 req.minute = tm.tm_min; 7338 req.second = tm.tm_sec; 7339 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7340 } 7341 7342 static int bnxt_hwrm_port_qstats(struct bnxt *bp) 7343 { 7344 int rc; 7345 struct bnxt_pf_info *pf = &bp->pf; 7346 struct hwrm_port_qstats_input req = {0}; 7347 7348 if (!(bp->flags & BNXT_FLAG_PORT_STATS)) 7349 return 0; 7350 7351 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1); 7352 req.port_id = cpu_to_le16(pf->port_id); 7353 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map); 7354 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map); 7355 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7356 return rc; 7357 } 7358 7359 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp) 7360 { 7361 struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr; 7362 struct hwrm_queue_pri2cos_qcfg_input req2 = {0}; 7363 struct hwrm_port_qstats_ext_input req = {0}; 7364 struct bnxt_pf_info *pf = &bp->pf; 7365 u32 tx_stat_size; 7366 int rc; 7367 7368 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) 7369 return 0; 7370 7371 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1); 7372 req.port_id = cpu_to_le16(pf->port_id); 7373 req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext)); 7374 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map); 7375 tx_stat_size = bp->hw_tx_port_stats_ext ? 7376 sizeof(*bp->hw_tx_port_stats_ext) : 0; 7377 req.tx_stat_size = cpu_to_le16(tx_stat_size); 7378 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map); 7379 mutex_lock(&bp->hwrm_cmd_lock); 7380 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7381 if (!rc) { 7382 bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8; 7383 bp->fw_tx_stats_ext_size = tx_stat_size ? 7384 le16_to_cpu(resp->tx_stat_size) / 8 : 0; 7385 } else { 7386 bp->fw_rx_stats_ext_size = 0; 7387 bp->fw_tx_stats_ext_size = 0; 7388 } 7389 if (bp->fw_tx_stats_ext_size <= 7390 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) { 7391 mutex_unlock(&bp->hwrm_cmd_lock); 7392 bp->pri2cos_valid = 0; 7393 return rc; 7394 } 7395 7396 bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1); 7397 req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); 7398 7399 rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT); 7400 if (!rc) { 7401 struct hwrm_queue_pri2cos_qcfg_output *resp2; 7402 u8 *pri2cos; 7403 int i, j; 7404 7405 resp2 = bp->hwrm_cmd_resp_addr; 7406 pri2cos = &resp2->pri0_cos_queue_id; 7407 for (i = 0; i < 8; i++) { 7408 u8 queue_id = pri2cos[i]; 7409 u8 queue_idx; 7410 7411 /* Per port queue IDs start from 0, 10, 20, etc */ 7412 queue_idx = queue_id % 10; 7413 if (queue_idx > BNXT_MAX_QUEUE) { 7414 bp->pri2cos_valid = false; 7415 goto qstats_done; 7416 } 7417 for (j = 0; j < bp->max_q; j++) { 7418 if (bp->q_ids[j] == queue_id) 7419 bp->pri2cos_idx[i] = queue_idx; 7420 } 7421 } 7422 bp->pri2cos_valid = 1; 7423 } 7424 qstats_done: 7425 mutex_unlock(&bp->hwrm_cmd_lock); 7426 return rc; 7427 } 7428 7429 static int bnxt_hwrm_pcie_qstats(struct bnxt *bp) 7430 { 7431 struct hwrm_pcie_qstats_input req = {0}; 7432 7433 if (!(bp->flags & BNXT_FLAG_PCIE_STATS)) 7434 return 0; 7435 7436 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1); 7437 req.pcie_stat_size = cpu_to_le16(sizeof(struct pcie_ctx_hw_stats)); 7438 req.pcie_stat_host_addr = cpu_to_le64(bp->hw_pcie_stats_map); 7439 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7440 } 7441 7442 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) 7443 { 7444 if (bp->vxlan_port_cnt) { 7445 bnxt_hwrm_tunnel_dst_port_free( 7446 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 7447 } 7448 bp->vxlan_port_cnt = 0; 7449 if (bp->nge_port_cnt) { 7450 bnxt_hwrm_tunnel_dst_port_free( 7451 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 7452 } 7453 bp->nge_port_cnt = 0; 7454 } 7455 7456 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa) 7457 { 7458 int rc, i; 7459 u32 tpa_flags = 0; 7460 7461 if (set_tpa) 7462 tpa_flags = bp->flags & BNXT_FLAG_TPA; 7463 else if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 7464 return 0; 7465 for (i = 0; i < bp->nr_vnics; i++) { 7466 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags); 7467 if (rc) { 7468 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n", 7469 i, rc); 7470 return rc; 7471 } 7472 } 7473 return 0; 7474 } 7475 7476 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp) 7477 { 7478 int i; 7479 7480 for (i = 0; i < bp->nr_vnics; i++) 7481 bnxt_hwrm_vnic_set_rss(bp, i, false); 7482 } 7483 7484 static void bnxt_clear_vnic(struct bnxt *bp) 7485 { 7486 if (!bp->vnic_info) 7487 return; 7488 7489 bnxt_hwrm_clear_vnic_filter(bp); 7490 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) { 7491 /* clear all RSS setting before free vnic ctx */ 7492 bnxt_hwrm_clear_vnic_rss(bp); 7493 bnxt_hwrm_vnic_ctx_free(bp); 7494 } 7495 /* before free the vnic, undo the vnic tpa settings */ 7496 if (bp->flags & BNXT_FLAG_TPA) 7497 bnxt_set_tpa(bp, false); 7498 bnxt_hwrm_vnic_free(bp); 7499 if (bp->flags & BNXT_FLAG_CHIP_P5) 7500 bnxt_hwrm_vnic_ctx_free(bp); 7501 } 7502 7503 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, 7504 bool irq_re_init) 7505 { 7506 bnxt_clear_vnic(bp); 7507 bnxt_hwrm_ring_free(bp, close_path); 7508 bnxt_hwrm_ring_grp_free(bp); 7509 if (irq_re_init) { 7510 bnxt_hwrm_stat_ctx_free(bp); 7511 bnxt_hwrm_free_tunnel_ports(bp); 7512 } 7513 } 7514 7515 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode) 7516 { 7517 struct hwrm_func_cfg_input req = {0}; 7518 int rc; 7519 7520 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 7521 req.fid = cpu_to_le16(0xffff); 7522 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE); 7523 if (br_mode == BRIDGE_MODE_VEB) 7524 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB; 7525 else if (br_mode == BRIDGE_MODE_VEPA) 7526 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA; 7527 else 7528 return -EINVAL; 7529 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7530 return rc; 7531 } 7532 7533 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size) 7534 { 7535 struct hwrm_func_cfg_input req = {0}; 7536 int rc; 7537 7538 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803) 7539 return 0; 7540 7541 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 7542 req.fid = cpu_to_le16(0xffff); 7543 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE); 7544 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64; 7545 if (size == 128) 7546 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128; 7547 7548 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7549 return rc; 7550 } 7551 7552 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) 7553 { 7554 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 7555 int rc; 7556 7557 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) 7558 goto skip_rss_ctx; 7559 7560 /* allocate context for vnic */ 7561 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0); 7562 if (rc) { 7563 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 7564 vnic_id, rc); 7565 goto vnic_setup_err; 7566 } 7567 bp->rsscos_nr_ctxs++; 7568 7569 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 7570 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1); 7571 if (rc) { 7572 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n", 7573 vnic_id, rc); 7574 goto vnic_setup_err; 7575 } 7576 bp->rsscos_nr_ctxs++; 7577 } 7578 7579 skip_rss_ctx: 7580 /* configure default vnic, ring grp */ 7581 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); 7582 if (rc) { 7583 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", 7584 vnic_id, rc); 7585 goto vnic_setup_err; 7586 } 7587 7588 /* Enable RSS hashing on vnic */ 7589 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true); 7590 if (rc) { 7591 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n", 7592 vnic_id, rc); 7593 goto vnic_setup_err; 7594 } 7595 7596 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 7597 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id); 7598 if (rc) { 7599 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", 7600 vnic_id, rc); 7601 } 7602 } 7603 7604 vnic_setup_err: 7605 return rc; 7606 } 7607 7608 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id) 7609 { 7610 int rc, i, nr_ctxs; 7611 7612 nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64); 7613 for (i = 0; i < nr_ctxs; i++) { 7614 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i); 7615 if (rc) { 7616 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n", 7617 vnic_id, i, rc); 7618 break; 7619 } 7620 bp->rsscos_nr_ctxs++; 7621 } 7622 if (i < nr_ctxs) 7623 return -ENOMEM; 7624 7625 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true); 7626 if (rc) { 7627 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n", 7628 vnic_id, rc); 7629 return rc; 7630 } 7631 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); 7632 if (rc) { 7633 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", 7634 vnic_id, rc); 7635 return rc; 7636 } 7637 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 7638 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id); 7639 if (rc) { 7640 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", 7641 vnic_id, rc); 7642 } 7643 } 7644 return rc; 7645 } 7646 7647 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) 7648 { 7649 if (bp->flags & BNXT_FLAG_CHIP_P5) 7650 return __bnxt_setup_vnic_p5(bp, vnic_id); 7651 else 7652 return __bnxt_setup_vnic(bp, vnic_id); 7653 } 7654 7655 static int bnxt_alloc_rfs_vnics(struct bnxt *bp) 7656 { 7657 #ifdef CONFIG_RFS_ACCEL 7658 int i, rc = 0; 7659 7660 if (bp->flags & BNXT_FLAG_CHIP_P5) 7661 return 0; 7662 7663 for (i = 0; i < bp->rx_nr_rings; i++) { 7664 struct bnxt_vnic_info *vnic; 7665 u16 vnic_id = i + 1; 7666 u16 ring_id = i; 7667 7668 if (vnic_id >= bp->nr_vnics) 7669 break; 7670 7671 vnic = &bp->vnic_info[vnic_id]; 7672 vnic->flags |= BNXT_VNIC_RFS_FLAG; 7673 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) 7674 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG; 7675 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1); 7676 if (rc) { 7677 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 7678 vnic_id, rc); 7679 break; 7680 } 7681 rc = bnxt_setup_vnic(bp, vnic_id); 7682 if (rc) 7683 break; 7684 } 7685 return rc; 7686 #else 7687 return 0; 7688 #endif 7689 } 7690 7691 /* Allow PF and VF with default VLAN to be in promiscuous mode */ 7692 static bool bnxt_promisc_ok(struct bnxt *bp) 7693 { 7694 #ifdef CONFIG_BNXT_SRIOV 7695 if (BNXT_VF(bp) && !bp->vf.vlan) 7696 return false; 7697 #endif 7698 return true; 7699 } 7700 7701 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp) 7702 { 7703 unsigned int rc = 0; 7704 7705 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1); 7706 if (rc) { 7707 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", 7708 rc); 7709 return rc; 7710 } 7711 7712 rc = bnxt_hwrm_vnic_cfg(bp, 1); 7713 if (rc) { 7714 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", 7715 rc); 7716 return rc; 7717 } 7718 return rc; 7719 } 7720 7721 static int bnxt_cfg_rx_mode(struct bnxt *); 7722 static bool bnxt_mc_list_updated(struct bnxt *, u32 *); 7723 7724 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) 7725 { 7726 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 7727 int rc = 0; 7728 unsigned int rx_nr_rings = bp->rx_nr_rings; 7729 7730 if (irq_re_init) { 7731 rc = bnxt_hwrm_stat_ctx_alloc(bp); 7732 if (rc) { 7733 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n", 7734 rc); 7735 goto err_out; 7736 } 7737 } 7738 7739 rc = bnxt_hwrm_ring_alloc(bp); 7740 if (rc) { 7741 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc); 7742 goto err_out; 7743 } 7744 7745 rc = bnxt_hwrm_ring_grp_alloc(bp); 7746 if (rc) { 7747 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc); 7748 goto err_out; 7749 } 7750 7751 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 7752 rx_nr_rings--; 7753 7754 /* default vnic 0 */ 7755 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings); 7756 if (rc) { 7757 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc); 7758 goto err_out; 7759 } 7760 7761 rc = bnxt_setup_vnic(bp, 0); 7762 if (rc) 7763 goto err_out; 7764 7765 if (bp->flags & BNXT_FLAG_RFS) { 7766 rc = bnxt_alloc_rfs_vnics(bp); 7767 if (rc) 7768 goto err_out; 7769 } 7770 7771 if (bp->flags & BNXT_FLAG_TPA) { 7772 rc = bnxt_set_tpa(bp, true); 7773 if (rc) 7774 goto err_out; 7775 } 7776 7777 if (BNXT_VF(bp)) 7778 bnxt_update_vf_mac(bp); 7779 7780 /* Filter for default vnic 0 */ 7781 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr); 7782 if (rc) { 7783 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); 7784 goto err_out; 7785 } 7786 vnic->uc_filter_count = 1; 7787 7788 vnic->rx_mask = 0; 7789 if (bp->dev->flags & IFF_BROADCAST) 7790 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 7791 7792 if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp)) 7793 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 7794 7795 if (bp->dev->flags & IFF_ALLMULTI) { 7796 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 7797 vnic->mc_list_count = 0; 7798 } else { 7799 u32 mask = 0; 7800 7801 bnxt_mc_list_updated(bp, &mask); 7802 vnic->rx_mask |= mask; 7803 } 7804 7805 rc = bnxt_cfg_rx_mode(bp); 7806 if (rc) 7807 goto err_out; 7808 7809 rc = bnxt_hwrm_set_coal(bp); 7810 if (rc) 7811 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n", 7812 rc); 7813 7814 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 7815 rc = bnxt_setup_nitroa0_vnic(bp); 7816 if (rc) 7817 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n", 7818 rc); 7819 } 7820 7821 if (BNXT_VF(bp)) { 7822 bnxt_hwrm_func_qcfg(bp); 7823 netdev_update_features(bp->dev); 7824 } 7825 7826 return 0; 7827 7828 err_out: 7829 bnxt_hwrm_resource_free(bp, 0, true); 7830 7831 return rc; 7832 } 7833 7834 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init) 7835 { 7836 bnxt_hwrm_resource_free(bp, 1, irq_re_init); 7837 return 0; 7838 } 7839 7840 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init) 7841 { 7842 bnxt_init_cp_rings(bp); 7843 bnxt_init_rx_rings(bp); 7844 bnxt_init_tx_rings(bp); 7845 bnxt_init_ring_grps(bp, irq_re_init); 7846 bnxt_init_vnics(bp); 7847 7848 return bnxt_init_chip(bp, irq_re_init); 7849 } 7850 7851 static int bnxt_set_real_num_queues(struct bnxt *bp) 7852 { 7853 int rc; 7854 struct net_device *dev = bp->dev; 7855 7856 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings - 7857 bp->tx_nr_rings_xdp); 7858 if (rc) 7859 return rc; 7860 7861 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings); 7862 if (rc) 7863 return rc; 7864 7865 #ifdef CONFIG_RFS_ACCEL 7866 if (bp->flags & BNXT_FLAG_RFS) 7867 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings); 7868 #endif 7869 7870 return rc; 7871 } 7872 7873 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 7874 bool shared) 7875 { 7876 int _rx = *rx, _tx = *tx; 7877 7878 if (shared) { 7879 *rx = min_t(int, _rx, max); 7880 *tx = min_t(int, _tx, max); 7881 } else { 7882 if (max < 2) 7883 return -ENOMEM; 7884 7885 while (_rx + _tx > max) { 7886 if (_rx > _tx && _rx > 1) 7887 _rx--; 7888 else if (_tx > 1) 7889 _tx--; 7890 } 7891 *rx = _rx; 7892 *tx = _tx; 7893 } 7894 return 0; 7895 } 7896 7897 static void bnxt_setup_msix(struct bnxt *bp) 7898 { 7899 const int len = sizeof(bp->irq_tbl[0].name); 7900 struct net_device *dev = bp->dev; 7901 int tcs, i; 7902 7903 tcs = netdev_get_num_tc(dev); 7904 if (tcs) { 7905 int i, off, count; 7906 7907 for (i = 0; i < tcs; i++) { 7908 count = bp->tx_nr_rings_per_tc; 7909 off = i * count; 7910 netdev_set_tc_queue(dev, i, count, off); 7911 } 7912 } 7913 7914 for (i = 0; i < bp->cp_nr_rings; i++) { 7915 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 7916 char *attr; 7917 7918 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 7919 attr = "TxRx"; 7920 else if (i < bp->rx_nr_rings) 7921 attr = "rx"; 7922 else 7923 attr = "tx"; 7924 7925 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name, 7926 attr, i); 7927 bp->irq_tbl[map_idx].handler = bnxt_msix; 7928 } 7929 } 7930 7931 static void bnxt_setup_inta(struct bnxt *bp) 7932 { 7933 const int len = sizeof(bp->irq_tbl[0].name); 7934 7935 if (netdev_get_num_tc(bp->dev)) 7936 netdev_reset_tc(bp->dev); 7937 7938 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx", 7939 0); 7940 bp->irq_tbl[0].handler = bnxt_inta; 7941 } 7942 7943 static int bnxt_setup_int_mode(struct bnxt *bp) 7944 { 7945 int rc; 7946 7947 if (bp->flags & BNXT_FLAG_USING_MSIX) 7948 bnxt_setup_msix(bp); 7949 else 7950 bnxt_setup_inta(bp); 7951 7952 rc = bnxt_set_real_num_queues(bp); 7953 return rc; 7954 } 7955 7956 #ifdef CONFIG_RFS_ACCEL 7957 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp) 7958 { 7959 return bp->hw_resc.max_rsscos_ctxs; 7960 } 7961 7962 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp) 7963 { 7964 return bp->hw_resc.max_vnics; 7965 } 7966 #endif 7967 7968 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp) 7969 { 7970 return bp->hw_resc.max_stat_ctxs; 7971 } 7972 7973 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp) 7974 { 7975 return bp->hw_resc.max_cp_rings; 7976 } 7977 7978 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp) 7979 { 7980 unsigned int cp = bp->hw_resc.max_cp_rings; 7981 7982 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 7983 cp -= bnxt_get_ulp_msix_num(bp); 7984 7985 return cp; 7986 } 7987 7988 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) 7989 { 7990 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 7991 7992 if (bp->flags & BNXT_FLAG_CHIP_P5) 7993 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs); 7994 7995 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings); 7996 } 7997 7998 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs) 7999 { 8000 bp->hw_resc.max_irqs = max_irqs; 8001 } 8002 8003 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp) 8004 { 8005 unsigned int cp; 8006 8007 cp = bnxt_get_max_func_cp_rings_for_en(bp); 8008 if (bp->flags & BNXT_FLAG_CHIP_P5) 8009 return cp - bp->rx_nr_rings - bp->tx_nr_rings; 8010 else 8011 return cp - bp->cp_nr_rings; 8012 } 8013 8014 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp) 8015 { 8016 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp); 8017 } 8018 8019 int bnxt_get_avail_msix(struct bnxt *bp, int num) 8020 { 8021 int max_cp = bnxt_get_max_func_cp_rings(bp); 8022 int max_irq = bnxt_get_max_func_irqs(bp); 8023 int total_req = bp->cp_nr_rings + num; 8024 int max_idx, avail_msix; 8025 8026 max_idx = bp->total_irqs; 8027 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 8028 max_idx = min_t(int, bp->total_irqs, max_cp); 8029 avail_msix = max_idx - bp->cp_nr_rings; 8030 if (!BNXT_NEW_RM(bp) || avail_msix >= num) 8031 return avail_msix; 8032 8033 if (max_irq < total_req) { 8034 num = max_irq - bp->cp_nr_rings; 8035 if (num <= 0) 8036 return 0; 8037 } 8038 return num; 8039 } 8040 8041 static int bnxt_get_num_msix(struct bnxt *bp) 8042 { 8043 if (!BNXT_NEW_RM(bp)) 8044 return bnxt_get_max_func_irqs(bp); 8045 8046 return bnxt_nq_rings_in_use(bp); 8047 } 8048 8049 static int bnxt_init_msix(struct bnxt *bp) 8050 { 8051 int i, total_vecs, max, rc = 0, min = 1, ulp_msix; 8052 struct msix_entry *msix_ent; 8053 8054 total_vecs = bnxt_get_num_msix(bp); 8055 max = bnxt_get_max_func_irqs(bp); 8056 if (total_vecs > max) 8057 total_vecs = max; 8058 8059 if (!total_vecs) 8060 return 0; 8061 8062 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL); 8063 if (!msix_ent) 8064 return -ENOMEM; 8065 8066 for (i = 0; i < total_vecs; i++) { 8067 msix_ent[i].entry = i; 8068 msix_ent[i].vector = 0; 8069 } 8070 8071 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) 8072 min = 2; 8073 8074 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs); 8075 ulp_msix = bnxt_get_ulp_msix_num(bp); 8076 if (total_vecs < 0 || total_vecs < ulp_msix) { 8077 rc = -ENODEV; 8078 goto msix_setup_exit; 8079 } 8080 8081 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL); 8082 if (bp->irq_tbl) { 8083 for (i = 0; i < total_vecs; i++) 8084 bp->irq_tbl[i].vector = msix_ent[i].vector; 8085 8086 bp->total_irqs = total_vecs; 8087 /* Trim rings based upon num of vectors allocated */ 8088 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings, 8089 total_vecs - ulp_msix, min == 1); 8090 if (rc) 8091 goto msix_setup_exit; 8092 8093 bp->cp_nr_rings = (min == 1) ? 8094 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : 8095 bp->tx_nr_rings + bp->rx_nr_rings; 8096 8097 } else { 8098 rc = -ENOMEM; 8099 goto msix_setup_exit; 8100 } 8101 bp->flags |= BNXT_FLAG_USING_MSIX; 8102 kfree(msix_ent); 8103 return 0; 8104 8105 msix_setup_exit: 8106 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc); 8107 kfree(bp->irq_tbl); 8108 bp->irq_tbl = NULL; 8109 pci_disable_msix(bp->pdev); 8110 kfree(msix_ent); 8111 return rc; 8112 } 8113 8114 static int bnxt_init_inta(struct bnxt *bp) 8115 { 8116 bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL); 8117 if (!bp->irq_tbl) 8118 return -ENOMEM; 8119 8120 bp->total_irqs = 1; 8121 bp->rx_nr_rings = 1; 8122 bp->tx_nr_rings = 1; 8123 bp->cp_nr_rings = 1; 8124 bp->flags |= BNXT_FLAG_SHARED_RINGS; 8125 bp->irq_tbl[0].vector = bp->pdev->irq; 8126 return 0; 8127 } 8128 8129 static int bnxt_init_int_mode(struct bnxt *bp) 8130 { 8131 int rc = 0; 8132 8133 if (bp->flags & BNXT_FLAG_MSIX_CAP) 8134 rc = bnxt_init_msix(bp); 8135 8136 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) { 8137 /* fallback to INTA */ 8138 rc = bnxt_init_inta(bp); 8139 } 8140 return rc; 8141 } 8142 8143 static void bnxt_clear_int_mode(struct bnxt *bp) 8144 { 8145 if (bp->flags & BNXT_FLAG_USING_MSIX) 8146 pci_disable_msix(bp->pdev); 8147 8148 kfree(bp->irq_tbl); 8149 bp->irq_tbl = NULL; 8150 bp->flags &= ~BNXT_FLAG_USING_MSIX; 8151 } 8152 8153 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init) 8154 { 8155 int tcs = netdev_get_num_tc(bp->dev); 8156 bool irq_cleared = false; 8157 int rc; 8158 8159 if (!bnxt_need_reserve_rings(bp)) 8160 return 0; 8161 8162 if (irq_re_init && BNXT_NEW_RM(bp) && 8163 bnxt_get_num_msix(bp) != bp->total_irqs) { 8164 bnxt_ulp_irq_stop(bp); 8165 bnxt_clear_int_mode(bp); 8166 irq_cleared = true; 8167 } 8168 rc = __bnxt_reserve_rings(bp); 8169 if (irq_cleared) { 8170 if (!rc) 8171 rc = bnxt_init_int_mode(bp); 8172 bnxt_ulp_irq_restart(bp, rc); 8173 } 8174 if (rc) { 8175 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc); 8176 return rc; 8177 } 8178 if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) { 8179 netdev_err(bp->dev, "tx ring reservation failure\n"); 8180 netdev_reset_tc(bp->dev); 8181 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 8182 return -ENOMEM; 8183 } 8184 return 0; 8185 } 8186 8187 static void bnxt_free_irq(struct bnxt *bp) 8188 { 8189 struct bnxt_irq *irq; 8190 int i; 8191 8192 #ifdef CONFIG_RFS_ACCEL 8193 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap); 8194 bp->dev->rx_cpu_rmap = NULL; 8195 #endif 8196 if (!bp->irq_tbl || !bp->bnapi) 8197 return; 8198 8199 for (i = 0; i < bp->cp_nr_rings; i++) { 8200 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 8201 8202 irq = &bp->irq_tbl[map_idx]; 8203 if (irq->requested) { 8204 if (irq->have_cpumask) { 8205 irq_set_affinity_hint(irq->vector, NULL); 8206 free_cpumask_var(irq->cpu_mask); 8207 irq->have_cpumask = 0; 8208 } 8209 free_irq(irq->vector, bp->bnapi[i]); 8210 } 8211 8212 irq->requested = 0; 8213 } 8214 } 8215 8216 static int bnxt_request_irq(struct bnxt *bp) 8217 { 8218 int i, j, rc = 0; 8219 unsigned long flags = 0; 8220 #ifdef CONFIG_RFS_ACCEL 8221 struct cpu_rmap *rmap; 8222 #endif 8223 8224 rc = bnxt_setup_int_mode(bp); 8225 if (rc) { 8226 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n", 8227 rc); 8228 return rc; 8229 } 8230 #ifdef CONFIG_RFS_ACCEL 8231 rmap = bp->dev->rx_cpu_rmap; 8232 #endif 8233 if (!(bp->flags & BNXT_FLAG_USING_MSIX)) 8234 flags = IRQF_SHARED; 8235 8236 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { 8237 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 8238 struct bnxt_irq *irq = &bp->irq_tbl[map_idx]; 8239 8240 #ifdef CONFIG_RFS_ACCEL 8241 if (rmap && bp->bnapi[i]->rx_ring) { 8242 rc = irq_cpu_rmap_add(rmap, irq->vector); 8243 if (rc) 8244 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n", 8245 j); 8246 j++; 8247 } 8248 #endif 8249 rc = request_irq(irq->vector, irq->handler, flags, irq->name, 8250 bp->bnapi[i]); 8251 if (rc) 8252 break; 8253 8254 irq->requested = 1; 8255 8256 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) { 8257 int numa_node = dev_to_node(&bp->pdev->dev); 8258 8259 irq->have_cpumask = 1; 8260 cpumask_set_cpu(cpumask_local_spread(i, numa_node), 8261 irq->cpu_mask); 8262 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask); 8263 if (rc) { 8264 netdev_warn(bp->dev, 8265 "Set affinity failed, IRQ = %d\n", 8266 irq->vector); 8267 break; 8268 } 8269 } 8270 } 8271 return rc; 8272 } 8273 8274 static void bnxt_del_napi(struct bnxt *bp) 8275 { 8276 int i; 8277 8278 if (!bp->bnapi) 8279 return; 8280 8281 for (i = 0; i < bp->cp_nr_rings; i++) { 8282 struct bnxt_napi *bnapi = bp->bnapi[i]; 8283 8284 napi_hash_del(&bnapi->napi); 8285 netif_napi_del(&bnapi->napi); 8286 } 8287 /* We called napi_hash_del() before netif_napi_del(), we need 8288 * to respect an RCU grace period before freeing napi structures. 8289 */ 8290 synchronize_net(); 8291 } 8292 8293 static void bnxt_init_napi(struct bnxt *bp) 8294 { 8295 int i; 8296 unsigned int cp_nr_rings = bp->cp_nr_rings; 8297 struct bnxt_napi *bnapi; 8298 8299 if (bp->flags & BNXT_FLAG_USING_MSIX) { 8300 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll; 8301 8302 if (bp->flags & BNXT_FLAG_CHIP_P5) 8303 poll_fn = bnxt_poll_p5; 8304 else if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 8305 cp_nr_rings--; 8306 for (i = 0; i < cp_nr_rings; i++) { 8307 bnapi = bp->bnapi[i]; 8308 netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64); 8309 } 8310 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 8311 bnapi = bp->bnapi[cp_nr_rings]; 8312 netif_napi_add(bp->dev, &bnapi->napi, 8313 bnxt_poll_nitroa0, 64); 8314 } 8315 } else { 8316 bnapi = bp->bnapi[0]; 8317 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64); 8318 } 8319 } 8320 8321 static void bnxt_disable_napi(struct bnxt *bp) 8322 { 8323 int i; 8324 8325 if (!bp->bnapi) 8326 return; 8327 8328 for (i = 0; i < bp->cp_nr_rings; i++) { 8329 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; 8330 8331 if (bp->bnapi[i]->rx_ring) 8332 cancel_work_sync(&cpr->dim.work); 8333 8334 napi_disable(&bp->bnapi[i]->napi); 8335 } 8336 } 8337 8338 static void bnxt_enable_napi(struct bnxt *bp) 8339 { 8340 int i; 8341 8342 for (i = 0; i < bp->cp_nr_rings; i++) { 8343 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; 8344 bp->bnapi[i]->in_reset = false; 8345 8346 if (bp->bnapi[i]->rx_ring) { 8347 INIT_WORK(&cpr->dim.work, bnxt_dim_work); 8348 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 8349 } 8350 napi_enable(&bp->bnapi[i]->napi); 8351 } 8352 } 8353 8354 void bnxt_tx_disable(struct bnxt *bp) 8355 { 8356 int i; 8357 struct bnxt_tx_ring_info *txr; 8358 8359 if (bp->tx_ring) { 8360 for (i = 0; i < bp->tx_nr_rings; i++) { 8361 txr = &bp->tx_ring[i]; 8362 txr->dev_state = BNXT_DEV_STATE_CLOSING; 8363 } 8364 } 8365 /* Stop all TX queues */ 8366 netif_tx_disable(bp->dev); 8367 netif_carrier_off(bp->dev); 8368 } 8369 8370 void bnxt_tx_enable(struct bnxt *bp) 8371 { 8372 int i; 8373 struct bnxt_tx_ring_info *txr; 8374 8375 for (i = 0; i < bp->tx_nr_rings; i++) { 8376 txr = &bp->tx_ring[i]; 8377 txr->dev_state = 0; 8378 } 8379 netif_tx_wake_all_queues(bp->dev); 8380 if (bp->link_info.link_up) 8381 netif_carrier_on(bp->dev); 8382 } 8383 8384 static void bnxt_report_link(struct bnxt *bp) 8385 { 8386 if (bp->link_info.link_up) { 8387 const char *duplex; 8388 const char *flow_ctrl; 8389 u32 speed; 8390 u16 fec; 8391 8392 netif_carrier_on(bp->dev); 8393 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL) 8394 duplex = "full"; 8395 else 8396 duplex = "half"; 8397 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH) 8398 flow_ctrl = "ON - receive & transmit"; 8399 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX) 8400 flow_ctrl = "ON - transmit"; 8401 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX) 8402 flow_ctrl = "ON - receive"; 8403 else 8404 flow_ctrl = "none"; 8405 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); 8406 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n", 8407 speed, duplex, flow_ctrl); 8408 if (bp->flags & BNXT_FLAG_EEE_CAP) 8409 netdev_info(bp->dev, "EEE is %s\n", 8410 bp->eee.eee_active ? "active" : 8411 "not active"); 8412 fec = bp->link_info.fec_cfg; 8413 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED)) 8414 netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n", 8415 (fec & BNXT_FEC_AUTONEG) ? "on" : "off", 8416 (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" : 8417 (fec & BNXT_FEC_ENC_RS) ? "RS" : "None"); 8418 } else { 8419 netif_carrier_off(bp->dev); 8420 netdev_err(bp->dev, "NIC Link is Down\n"); 8421 } 8422 } 8423 8424 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) 8425 { 8426 int rc = 0; 8427 struct hwrm_port_phy_qcaps_input req = {0}; 8428 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 8429 struct bnxt_link_info *link_info = &bp->link_info; 8430 8431 bp->flags &= ~BNXT_FLAG_EEE_CAP; 8432 if (bp->test_info) 8433 bp->test_info->flags &= ~(BNXT_TEST_FL_EXT_LPBK | 8434 BNXT_TEST_FL_AN_PHY_LPBK); 8435 if (bp->hwrm_spec_code < 0x10201) 8436 return 0; 8437 8438 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1); 8439 8440 mutex_lock(&bp->hwrm_cmd_lock); 8441 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8442 if (rc) 8443 goto hwrm_phy_qcaps_exit; 8444 8445 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) { 8446 struct ethtool_eee *eee = &bp->eee; 8447 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode); 8448 8449 bp->flags |= BNXT_FLAG_EEE_CAP; 8450 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 8451 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) & 8452 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK; 8453 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) & 8454 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK; 8455 } 8456 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED) { 8457 if (bp->test_info) 8458 bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK; 8459 } 8460 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED) { 8461 if (bp->test_info) 8462 bp->test_info->flags |= BNXT_TEST_FL_AN_PHY_LPBK; 8463 } 8464 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED) { 8465 if (BNXT_PF(bp)) 8466 bp->fw_cap |= BNXT_FW_CAP_SHARED_PORT_CFG; 8467 } 8468 if (resp->supported_speeds_auto_mode) 8469 link_info->support_auto_speeds = 8470 le16_to_cpu(resp->supported_speeds_auto_mode); 8471 8472 bp->port_count = resp->port_cnt; 8473 8474 hwrm_phy_qcaps_exit: 8475 mutex_unlock(&bp->hwrm_cmd_lock); 8476 return rc; 8477 } 8478 8479 static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) 8480 { 8481 int rc = 0; 8482 struct bnxt_link_info *link_info = &bp->link_info; 8483 struct hwrm_port_phy_qcfg_input req = {0}; 8484 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 8485 u8 link_up = link_info->link_up; 8486 u16 diff; 8487 8488 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1); 8489 8490 mutex_lock(&bp->hwrm_cmd_lock); 8491 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8492 if (rc) { 8493 mutex_unlock(&bp->hwrm_cmd_lock); 8494 return rc; 8495 } 8496 8497 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp)); 8498 link_info->phy_link_status = resp->link; 8499 link_info->duplex = resp->duplex_cfg; 8500 if (bp->hwrm_spec_code >= 0x10800) 8501 link_info->duplex = resp->duplex_state; 8502 link_info->pause = resp->pause; 8503 link_info->auto_mode = resp->auto_mode; 8504 link_info->auto_pause_setting = resp->auto_pause; 8505 link_info->lp_pause = resp->link_partner_adv_pause; 8506 link_info->force_pause_setting = resp->force_pause; 8507 link_info->duplex_setting = resp->duplex_cfg; 8508 if (link_info->phy_link_status == BNXT_LINK_LINK) 8509 link_info->link_speed = le16_to_cpu(resp->link_speed); 8510 else 8511 link_info->link_speed = 0; 8512 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed); 8513 link_info->support_speeds = le16_to_cpu(resp->support_speeds); 8514 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask); 8515 link_info->lp_auto_link_speeds = 8516 le16_to_cpu(resp->link_partner_adv_speeds); 8517 link_info->preemphasis = le32_to_cpu(resp->preemphasis); 8518 link_info->phy_ver[0] = resp->phy_maj; 8519 link_info->phy_ver[1] = resp->phy_min; 8520 link_info->phy_ver[2] = resp->phy_bld; 8521 link_info->media_type = resp->media_type; 8522 link_info->phy_type = resp->phy_type; 8523 link_info->transceiver = resp->xcvr_pkg_type; 8524 link_info->phy_addr = resp->eee_config_phy_addr & 8525 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK; 8526 link_info->module_status = resp->module_status; 8527 8528 if (bp->flags & BNXT_FLAG_EEE_CAP) { 8529 struct ethtool_eee *eee = &bp->eee; 8530 u16 fw_speeds; 8531 8532 eee->eee_active = 0; 8533 if (resp->eee_config_phy_addr & 8534 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) { 8535 eee->eee_active = 1; 8536 fw_speeds = le16_to_cpu( 8537 resp->link_partner_adv_eee_link_speed_mask); 8538 eee->lp_advertised = 8539 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 8540 } 8541 8542 /* Pull initial EEE config */ 8543 if (!chng_link_state) { 8544 if (resp->eee_config_phy_addr & 8545 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED) 8546 eee->eee_enabled = 1; 8547 8548 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask); 8549 eee->advertised = 8550 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 8551 8552 if (resp->eee_config_phy_addr & 8553 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) { 8554 __le32 tmr; 8555 8556 eee->tx_lpi_enabled = 1; 8557 tmr = resp->xcvr_identifier_type_tx_lpi_timer; 8558 eee->tx_lpi_timer = le32_to_cpu(tmr) & 8559 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK; 8560 } 8561 } 8562 } 8563 8564 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED; 8565 if (bp->hwrm_spec_code >= 0x10504) 8566 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg); 8567 8568 /* TODO: need to add more logic to report VF link */ 8569 if (chng_link_state) { 8570 if (link_info->phy_link_status == BNXT_LINK_LINK) 8571 link_info->link_up = 1; 8572 else 8573 link_info->link_up = 0; 8574 if (link_up != link_info->link_up) 8575 bnxt_report_link(bp); 8576 } else { 8577 /* alwasy link down if not require to update link state */ 8578 link_info->link_up = 0; 8579 } 8580 mutex_unlock(&bp->hwrm_cmd_lock); 8581 8582 if (!BNXT_PHY_CFG_ABLE(bp)) 8583 return 0; 8584 8585 diff = link_info->support_auto_speeds ^ link_info->advertising; 8586 if ((link_info->support_auto_speeds | diff) != 8587 link_info->support_auto_speeds) { 8588 /* An advertised speed is no longer supported, so we need to 8589 * update the advertisement settings. Caller holds RTNL 8590 * so we can modify link settings. 8591 */ 8592 link_info->advertising = link_info->support_auto_speeds; 8593 if (link_info->autoneg & BNXT_AUTONEG_SPEED) 8594 bnxt_hwrm_set_link_setting(bp, true, false); 8595 } 8596 return 0; 8597 } 8598 8599 static void bnxt_get_port_module_status(struct bnxt *bp) 8600 { 8601 struct bnxt_link_info *link_info = &bp->link_info; 8602 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp; 8603 u8 module_status; 8604 8605 if (bnxt_update_link(bp, true)) 8606 return; 8607 8608 module_status = link_info->module_status; 8609 switch (module_status) { 8610 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX: 8611 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN: 8612 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG: 8613 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n", 8614 bp->pf.port_id); 8615 if (bp->hwrm_spec_code >= 0x10201) { 8616 netdev_warn(bp->dev, "Module part number %s\n", 8617 resp->phy_vendor_partnumber); 8618 } 8619 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX) 8620 netdev_warn(bp->dev, "TX is disabled\n"); 8621 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN) 8622 netdev_warn(bp->dev, "SFP+ module is shutdown\n"); 8623 } 8624 } 8625 8626 static void 8627 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) 8628 { 8629 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) { 8630 if (bp->hwrm_spec_code >= 0x10201) 8631 req->auto_pause = 8632 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE; 8633 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 8634 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; 8635 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 8636 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX; 8637 req->enables |= 8638 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 8639 } else { 8640 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 8641 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX; 8642 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 8643 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX; 8644 req->enables |= 8645 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE); 8646 if (bp->hwrm_spec_code >= 0x10201) { 8647 req->auto_pause = req->force_pause; 8648 req->enables |= cpu_to_le32( 8649 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 8650 } 8651 } 8652 } 8653 8654 static void bnxt_hwrm_set_link_common(struct bnxt *bp, 8655 struct hwrm_port_phy_cfg_input *req) 8656 { 8657 u8 autoneg = bp->link_info.autoneg; 8658 u16 fw_link_speed = bp->link_info.req_link_speed; 8659 u16 advertising = bp->link_info.advertising; 8660 8661 if (autoneg & BNXT_AUTONEG_SPEED) { 8662 req->auto_mode |= 8663 PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK; 8664 8665 req->enables |= cpu_to_le32( 8666 PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK); 8667 req->auto_link_speed_mask = cpu_to_le16(advertising); 8668 8669 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE); 8670 req->flags |= 8671 cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG); 8672 } else { 8673 req->force_link_speed = cpu_to_le16(fw_link_speed); 8674 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE); 8675 } 8676 8677 /* tell chimp that the setting takes effect immediately */ 8678 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); 8679 } 8680 8681 int bnxt_hwrm_set_pause(struct bnxt *bp) 8682 { 8683 struct hwrm_port_phy_cfg_input req = {0}; 8684 int rc; 8685 8686 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 8687 bnxt_hwrm_set_pause_common(bp, &req); 8688 8689 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) || 8690 bp->link_info.force_link_chng) 8691 bnxt_hwrm_set_link_common(bp, &req); 8692 8693 mutex_lock(&bp->hwrm_cmd_lock); 8694 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8695 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) { 8696 /* since changing of pause setting doesn't trigger any link 8697 * change event, the driver needs to update the current pause 8698 * result upon successfully return of the phy_cfg command 8699 */ 8700 bp->link_info.pause = 8701 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl; 8702 bp->link_info.auto_pause_setting = 0; 8703 if (!bp->link_info.force_link_chng) 8704 bnxt_report_link(bp); 8705 } 8706 bp->link_info.force_link_chng = false; 8707 mutex_unlock(&bp->hwrm_cmd_lock); 8708 return rc; 8709 } 8710 8711 static void bnxt_hwrm_set_eee(struct bnxt *bp, 8712 struct hwrm_port_phy_cfg_input *req) 8713 { 8714 struct ethtool_eee *eee = &bp->eee; 8715 8716 if (eee->eee_enabled) { 8717 u16 eee_speeds; 8718 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE; 8719 8720 if (eee->tx_lpi_enabled) 8721 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE; 8722 else 8723 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE; 8724 8725 req->flags |= cpu_to_le32(flags); 8726 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised); 8727 req->eee_link_speed_mask = cpu_to_le16(eee_speeds); 8728 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer); 8729 } else { 8730 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE); 8731 } 8732 } 8733 8734 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee) 8735 { 8736 struct hwrm_port_phy_cfg_input req = {0}; 8737 8738 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 8739 if (set_pause) 8740 bnxt_hwrm_set_pause_common(bp, &req); 8741 8742 bnxt_hwrm_set_link_common(bp, &req); 8743 8744 if (set_eee) 8745 bnxt_hwrm_set_eee(bp, &req); 8746 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8747 } 8748 8749 static int bnxt_hwrm_shutdown_link(struct bnxt *bp) 8750 { 8751 struct hwrm_port_phy_cfg_input req = {0}; 8752 8753 if (!BNXT_SINGLE_PF(bp)) 8754 return 0; 8755 8756 if (pci_num_vf(bp->pdev)) 8757 return 0; 8758 8759 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 8760 req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN); 8761 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8762 } 8763 8764 static int bnxt_fw_init_one(struct bnxt *bp); 8765 8766 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) 8767 { 8768 struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr; 8769 struct hwrm_func_drv_if_change_input req = {0}; 8770 bool resc_reinit = false, fw_reset = false; 8771 u32 flags = 0; 8772 int rc; 8773 8774 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE)) 8775 return 0; 8776 8777 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1); 8778 if (up) 8779 req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP); 8780 mutex_lock(&bp->hwrm_cmd_lock); 8781 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8782 if (!rc) 8783 flags = le32_to_cpu(resp->flags); 8784 mutex_unlock(&bp->hwrm_cmd_lock); 8785 if (rc) 8786 return rc; 8787 8788 if (!up) 8789 return 0; 8790 8791 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE) 8792 resc_reinit = true; 8793 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE) 8794 fw_reset = true; 8795 8796 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) { 8797 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n"); 8798 return -ENODEV; 8799 } 8800 if (resc_reinit || fw_reset) { 8801 if (fw_reset) { 8802 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 8803 bnxt_ulp_stop(bp); 8804 bnxt_free_ctx_mem(bp); 8805 kfree(bp->ctx); 8806 bp->ctx = NULL; 8807 rc = bnxt_fw_init_one(bp); 8808 if (rc) { 8809 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); 8810 return rc; 8811 } 8812 bnxt_clear_int_mode(bp); 8813 rc = bnxt_init_int_mode(bp); 8814 if (rc) { 8815 netdev_err(bp->dev, "init int mode failed\n"); 8816 return rc; 8817 } 8818 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state); 8819 } 8820 if (BNXT_NEW_RM(bp)) { 8821 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 8822 8823 rc = bnxt_hwrm_func_resc_qcaps(bp, true); 8824 hw_resc->resv_cp_rings = 0; 8825 hw_resc->resv_stat_ctxs = 0; 8826 hw_resc->resv_irqs = 0; 8827 hw_resc->resv_tx_rings = 0; 8828 hw_resc->resv_rx_rings = 0; 8829 hw_resc->resv_hw_ring_grps = 0; 8830 hw_resc->resv_vnics = 0; 8831 if (!fw_reset) { 8832 bp->tx_nr_rings = 0; 8833 bp->rx_nr_rings = 0; 8834 } 8835 } 8836 } 8837 return 0; 8838 } 8839 8840 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) 8841 { 8842 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 8843 struct hwrm_port_led_qcaps_input req = {0}; 8844 struct bnxt_pf_info *pf = &bp->pf; 8845 int rc; 8846 8847 bp->num_leds = 0; 8848 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601) 8849 return 0; 8850 8851 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1); 8852 req.port_id = cpu_to_le16(pf->port_id); 8853 mutex_lock(&bp->hwrm_cmd_lock); 8854 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8855 if (rc) { 8856 mutex_unlock(&bp->hwrm_cmd_lock); 8857 return rc; 8858 } 8859 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) { 8860 int i; 8861 8862 bp->num_leds = resp->num_leds; 8863 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) * 8864 bp->num_leds); 8865 for (i = 0; i < bp->num_leds; i++) { 8866 struct bnxt_led_info *led = &bp->leds[i]; 8867 __le16 caps = led->led_state_caps; 8868 8869 if (!led->led_group_id || 8870 !BNXT_LED_ALT_BLINK_CAP(caps)) { 8871 bp->num_leds = 0; 8872 break; 8873 } 8874 } 8875 } 8876 mutex_unlock(&bp->hwrm_cmd_lock); 8877 return 0; 8878 } 8879 8880 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp) 8881 { 8882 struct hwrm_wol_filter_alloc_input req = {0}; 8883 struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; 8884 int rc; 8885 8886 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1); 8887 req.port_id = cpu_to_le16(bp->pf.port_id); 8888 req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT; 8889 req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS); 8890 memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN); 8891 mutex_lock(&bp->hwrm_cmd_lock); 8892 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8893 if (!rc) 8894 bp->wol_filter_id = resp->wol_filter_id; 8895 mutex_unlock(&bp->hwrm_cmd_lock); 8896 return rc; 8897 } 8898 8899 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp) 8900 { 8901 struct hwrm_wol_filter_free_input req = {0}; 8902 int rc; 8903 8904 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1); 8905 req.port_id = cpu_to_le16(bp->pf.port_id); 8906 req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID); 8907 req.wol_filter_id = bp->wol_filter_id; 8908 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8909 return rc; 8910 } 8911 8912 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle) 8913 { 8914 struct hwrm_wol_filter_qcfg_input req = {0}; 8915 struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 8916 u16 next_handle = 0; 8917 int rc; 8918 8919 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1); 8920 req.port_id = cpu_to_le16(bp->pf.port_id); 8921 req.handle = cpu_to_le16(handle); 8922 mutex_lock(&bp->hwrm_cmd_lock); 8923 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 8924 if (!rc) { 8925 next_handle = le16_to_cpu(resp->next_handle); 8926 if (next_handle != 0) { 8927 if (resp->wol_type == 8928 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) { 8929 bp->wol = 1; 8930 bp->wol_filter_id = resp->wol_filter_id; 8931 } 8932 } 8933 } 8934 mutex_unlock(&bp->hwrm_cmd_lock); 8935 return next_handle; 8936 } 8937 8938 static void bnxt_get_wol_settings(struct bnxt *bp) 8939 { 8940 u16 handle = 0; 8941 8942 bp->wol = 0; 8943 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP)) 8944 return; 8945 8946 do { 8947 handle = bnxt_hwrm_get_wol_fltrs(bp, handle); 8948 } while (handle && handle != 0xffff); 8949 } 8950 8951 #ifdef CONFIG_BNXT_HWMON 8952 static ssize_t bnxt_show_temp(struct device *dev, 8953 struct device_attribute *devattr, char *buf) 8954 { 8955 struct hwrm_temp_monitor_query_input req = {0}; 8956 struct hwrm_temp_monitor_query_output *resp; 8957 struct bnxt *bp = dev_get_drvdata(dev); 8958 u32 temp = 0; 8959 8960 resp = bp->hwrm_cmd_resp_addr; 8961 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1); 8962 mutex_lock(&bp->hwrm_cmd_lock); 8963 if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT)) 8964 temp = resp->temp * 1000; /* display millidegree */ 8965 mutex_unlock(&bp->hwrm_cmd_lock); 8966 8967 return sprintf(buf, "%u\n", temp); 8968 } 8969 static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0); 8970 8971 static struct attribute *bnxt_attrs[] = { 8972 &sensor_dev_attr_temp1_input.dev_attr.attr, 8973 NULL 8974 }; 8975 ATTRIBUTE_GROUPS(bnxt); 8976 8977 static void bnxt_hwmon_close(struct bnxt *bp) 8978 { 8979 if (bp->hwmon_dev) { 8980 hwmon_device_unregister(bp->hwmon_dev); 8981 bp->hwmon_dev = NULL; 8982 } 8983 } 8984 8985 static void bnxt_hwmon_open(struct bnxt *bp) 8986 { 8987 struct pci_dev *pdev = bp->pdev; 8988 8989 if (bp->hwmon_dev) 8990 return; 8991 8992 bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, 8993 DRV_MODULE_NAME, bp, 8994 bnxt_groups); 8995 if (IS_ERR(bp->hwmon_dev)) { 8996 bp->hwmon_dev = NULL; 8997 dev_warn(&pdev->dev, "Cannot register hwmon device\n"); 8998 } 8999 } 9000 #else 9001 static void bnxt_hwmon_close(struct bnxt *bp) 9002 { 9003 } 9004 9005 static void bnxt_hwmon_open(struct bnxt *bp) 9006 { 9007 } 9008 #endif 9009 9010 static bool bnxt_eee_config_ok(struct bnxt *bp) 9011 { 9012 struct ethtool_eee *eee = &bp->eee; 9013 struct bnxt_link_info *link_info = &bp->link_info; 9014 9015 if (!(bp->flags & BNXT_FLAG_EEE_CAP)) 9016 return true; 9017 9018 if (eee->eee_enabled) { 9019 u32 advertising = 9020 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); 9021 9022 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 9023 eee->eee_enabled = 0; 9024 return false; 9025 } 9026 if (eee->advertised & ~advertising) { 9027 eee->advertised = advertising & eee->supported; 9028 return false; 9029 } 9030 } 9031 return true; 9032 } 9033 9034 static int bnxt_update_phy_setting(struct bnxt *bp) 9035 { 9036 int rc; 9037 bool update_link = false; 9038 bool update_pause = false; 9039 bool update_eee = false; 9040 struct bnxt_link_info *link_info = &bp->link_info; 9041 9042 rc = bnxt_update_link(bp, true); 9043 if (rc) { 9044 netdev_err(bp->dev, "failed to update link (rc: %x)\n", 9045 rc); 9046 return rc; 9047 } 9048 if (!BNXT_SINGLE_PF(bp)) 9049 return 0; 9050 9051 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 9052 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) != 9053 link_info->req_flow_ctrl) 9054 update_pause = true; 9055 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 9056 link_info->force_pause_setting != link_info->req_flow_ctrl) 9057 update_pause = true; 9058 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 9059 if (BNXT_AUTO_MODE(link_info->auto_mode)) 9060 update_link = true; 9061 if (link_info->req_link_speed != link_info->force_link_speed) 9062 update_link = true; 9063 if (link_info->req_duplex != link_info->duplex_setting) 9064 update_link = true; 9065 } else { 9066 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE) 9067 update_link = true; 9068 if (link_info->advertising != link_info->auto_link_speeds) 9069 update_link = true; 9070 } 9071 9072 /* The last close may have shutdown the link, so need to call 9073 * PHY_CFG to bring it back up. 9074 */ 9075 if (!bp->link_info.link_up) 9076 update_link = true; 9077 9078 if (!bnxt_eee_config_ok(bp)) 9079 update_eee = true; 9080 9081 if (update_link) 9082 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee); 9083 else if (update_pause) 9084 rc = bnxt_hwrm_set_pause(bp); 9085 if (rc) { 9086 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n", 9087 rc); 9088 return rc; 9089 } 9090 9091 return rc; 9092 } 9093 9094 /* Common routine to pre-map certain register block to different GRC window. 9095 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows 9096 * in PF and 3 windows in VF that can be customized to map in different 9097 * register blocks. 9098 */ 9099 static void bnxt_preset_reg_win(struct bnxt *bp) 9100 { 9101 if (BNXT_PF(bp)) { 9102 /* CAG registers map to GRC window #4 */ 9103 writel(BNXT_CAG_REG_BASE, 9104 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12); 9105 } 9106 } 9107 9108 static int bnxt_init_dflt_ring_mode(struct bnxt *bp); 9109 9110 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 9111 { 9112 int rc = 0; 9113 9114 bnxt_preset_reg_win(bp); 9115 netif_carrier_off(bp->dev); 9116 if (irq_re_init) { 9117 /* Reserve rings now if none were reserved at driver probe. */ 9118 rc = bnxt_init_dflt_ring_mode(bp); 9119 if (rc) { 9120 netdev_err(bp->dev, "Failed to reserve default rings at open\n"); 9121 return rc; 9122 } 9123 } 9124 rc = bnxt_reserve_rings(bp, irq_re_init); 9125 if (rc) 9126 return rc; 9127 if ((bp->flags & BNXT_FLAG_RFS) && 9128 !(bp->flags & BNXT_FLAG_USING_MSIX)) { 9129 /* disable RFS if falling back to INTA */ 9130 bp->dev->hw_features &= ~NETIF_F_NTUPLE; 9131 bp->flags &= ~BNXT_FLAG_RFS; 9132 } 9133 9134 rc = bnxt_alloc_mem(bp, irq_re_init); 9135 if (rc) { 9136 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); 9137 goto open_err_free_mem; 9138 } 9139 9140 if (irq_re_init) { 9141 bnxt_init_napi(bp); 9142 rc = bnxt_request_irq(bp); 9143 if (rc) { 9144 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc); 9145 goto open_err_irq; 9146 } 9147 } 9148 9149 bnxt_enable_napi(bp); 9150 bnxt_debug_dev_init(bp); 9151 9152 rc = bnxt_init_nic(bp, irq_re_init); 9153 if (rc) { 9154 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); 9155 goto open_err; 9156 } 9157 9158 if (link_re_init) { 9159 mutex_lock(&bp->link_lock); 9160 rc = bnxt_update_phy_setting(bp); 9161 mutex_unlock(&bp->link_lock); 9162 if (rc) { 9163 netdev_warn(bp->dev, "failed to update phy settings\n"); 9164 if (BNXT_SINGLE_PF(bp)) { 9165 bp->link_info.phy_retry = true; 9166 bp->link_info.phy_retry_expires = 9167 jiffies + 5 * HZ; 9168 } 9169 } 9170 } 9171 9172 if (irq_re_init) 9173 udp_tunnel_get_rx_info(bp->dev); 9174 9175 set_bit(BNXT_STATE_OPEN, &bp->state); 9176 bnxt_enable_int(bp); 9177 /* Enable TX queues */ 9178 bnxt_tx_enable(bp); 9179 mod_timer(&bp->timer, jiffies + bp->current_interval); 9180 /* Poll link status and check for SFP+ module status */ 9181 bnxt_get_port_module_status(bp); 9182 9183 /* VF-reps may need to be re-opened after the PF is re-opened */ 9184 if (BNXT_PF(bp)) 9185 bnxt_vf_reps_open(bp); 9186 return 0; 9187 9188 open_err: 9189 bnxt_debug_dev_exit(bp); 9190 bnxt_disable_napi(bp); 9191 9192 open_err_irq: 9193 bnxt_del_napi(bp); 9194 9195 open_err_free_mem: 9196 bnxt_free_skbs(bp); 9197 bnxt_free_irq(bp); 9198 bnxt_free_mem(bp, true); 9199 return rc; 9200 } 9201 9202 /* rtnl_lock held */ 9203 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 9204 { 9205 int rc = 0; 9206 9207 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init); 9208 if (rc) { 9209 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc); 9210 dev_close(bp->dev); 9211 } 9212 return rc; 9213 } 9214 9215 /* rtnl_lock held, open the NIC half way by allocating all resources, but 9216 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline 9217 * self tests. 9218 */ 9219 int bnxt_half_open_nic(struct bnxt *bp) 9220 { 9221 int rc = 0; 9222 9223 rc = bnxt_alloc_mem(bp, false); 9224 if (rc) { 9225 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); 9226 goto half_open_err; 9227 } 9228 rc = bnxt_init_nic(bp, false); 9229 if (rc) { 9230 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); 9231 goto half_open_err; 9232 } 9233 return 0; 9234 9235 half_open_err: 9236 bnxt_free_skbs(bp); 9237 bnxt_free_mem(bp, false); 9238 dev_close(bp->dev); 9239 return rc; 9240 } 9241 9242 /* rtnl_lock held, this call can only be made after a previous successful 9243 * call to bnxt_half_open_nic(). 9244 */ 9245 void bnxt_half_close_nic(struct bnxt *bp) 9246 { 9247 bnxt_hwrm_resource_free(bp, false, false); 9248 bnxt_free_skbs(bp); 9249 bnxt_free_mem(bp, false); 9250 } 9251 9252 static void bnxt_reenable_sriov(struct bnxt *bp) 9253 { 9254 if (BNXT_PF(bp)) { 9255 struct bnxt_pf_info *pf = &bp->pf; 9256 int n = pf->active_vfs; 9257 9258 if (n) 9259 bnxt_cfg_hw_sriov(bp, &n, true); 9260 } 9261 } 9262 9263 static int bnxt_open(struct net_device *dev) 9264 { 9265 struct bnxt *bp = netdev_priv(dev); 9266 int rc; 9267 9268 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { 9269 netdev_err(bp->dev, "A previous firmware reset did not complete, aborting\n"); 9270 return -ENODEV; 9271 } 9272 9273 rc = bnxt_hwrm_if_change(bp, true); 9274 if (rc) 9275 return rc; 9276 rc = __bnxt_open_nic(bp, true, true); 9277 if (rc) { 9278 bnxt_hwrm_if_change(bp, false); 9279 } else { 9280 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) { 9281 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 9282 bnxt_ulp_start(bp, 0); 9283 bnxt_reenable_sriov(bp); 9284 } 9285 } 9286 bnxt_hwmon_open(bp); 9287 } 9288 9289 return rc; 9290 } 9291 9292 static bool bnxt_drv_busy(struct bnxt *bp) 9293 { 9294 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) || 9295 test_bit(BNXT_STATE_READ_STATS, &bp->state)); 9296 } 9297 9298 static void bnxt_get_ring_stats(struct bnxt *bp, 9299 struct rtnl_link_stats64 *stats); 9300 9301 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, 9302 bool link_re_init) 9303 { 9304 /* Close the VF-reps before closing PF */ 9305 if (BNXT_PF(bp)) 9306 bnxt_vf_reps_close(bp); 9307 9308 /* Change device state to avoid TX queue wake up's */ 9309 bnxt_tx_disable(bp); 9310 9311 clear_bit(BNXT_STATE_OPEN, &bp->state); 9312 smp_mb__after_atomic(); 9313 while (bnxt_drv_busy(bp)) 9314 msleep(20); 9315 9316 /* Flush rings and and disable interrupts */ 9317 bnxt_shutdown_nic(bp, irq_re_init); 9318 9319 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */ 9320 9321 bnxt_debug_dev_exit(bp); 9322 bnxt_disable_napi(bp); 9323 del_timer_sync(&bp->timer); 9324 bnxt_free_skbs(bp); 9325 9326 /* Save ring stats before shutdown */ 9327 if (bp->bnapi) 9328 bnxt_get_ring_stats(bp, &bp->net_stats_prev); 9329 if (irq_re_init) { 9330 bnxt_free_irq(bp); 9331 bnxt_del_napi(bp); 9332 } 9333 bnxt_free_mem(bp, irq_re_init); 9334 } 9335 9336 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 9337 { 9338 int rc = 0; 9339 9340 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 9341 /* If we get here, it means firmware reset is in progress 9342 * while we are trying to close. We can safely proceed with 9343 * the close because we are holding rtnl_lock(). Some firmware 9344 * messages may fail as we proceed to close. We set the 9345 * ABORT_ERR flag here so that the FW reset thread will later 9346 * abort when it gets the rtnl_lock() and sees the flag. 9347 */ 9348 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n"); 9349 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); 9350 } 9351 9352 #ifdef CONFIG_BNXT_SRIOV 9353 if (bp->sriov_cfg) { 9354 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait, 9355 !bp->sriov_cfg, 9356 BNXT_SRIOV_CFG_WAIT_TMO); 9357 if (rc) 9358 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n"); 9359 } 9360 #endif 9361 __bnxt_close_nic(bp, irq_re_init, link_re_init); 9362 return rc; 9363 } 9364 9365 static int bnxt_close(struct net_device *dev) 9366 { 9367 struct bnxt *bp = netdev_priv(dev); 9368 9369 bnxt_hwmon_close(bp); 9370 bnxt_close_nic(bp, true, true); 9371 bnxt_hwrm_shutdown_link(bp); 9372 bnxt_hwrm_if_change(bp, false); 9373 return 0; 9374 } 9375 9376 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg, 9377 u16 *val) 9378 { 9379 struct hwrm_port_phy_mdio_read_output *resp = bp->hwrm_cmd_resp_addr; 9380 struct hwrm_port_phy_mdio_read_input req = {0}; 9381 int rc; 9382 9383 if (bp->hwrm_spec_code < 0x10a00) 9384 return -EOPNOTSUPP; 9385 9386 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_READ, -1, -1); 9387 req.port_id = cpu_to_le16(bp->pf.port_id); 9388 req.phy_addr = phy_addr; 9389 req.reg_addr = cpu_to_le16(reg & 0x1f); 9390 if (mdio_phy_id_is_c45(phy_addr)) { 9391 req.cl45_mdio = 1; 9392 req.phy_addr = mdio_phy_id_prtad(phy_addr); 9393 req.dev_addr = mdio_phy_id_devad(phy_addr); 9394 req.reg_addr = cpu_to_le16(reg); 9395 } 9396 9397 mutex_lock(&bp->hwrm_cmd_lock); 9398 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 9399 if (!rc) 9400 *val = le16_to_cpu(resp->reg_data); 9401 mutex_unlock(&bp->hwrm_cmd_lock); 9402 return rc; 9403 } 9404 9405 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg, 9406 u16 val) 9407 { 9408 struct hwrm_port_phy_mdio_write_input req = {0}; 9409 9410 if (bp->hwrm_spec_code < 0x10a00) 9411 return -EOPNOTSUPP; 9412 9413 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_WRITE, -1, -1); 9414 req.port_id = cpu_to_le16(bp->pf.port_id); 9415 req.phy_addr = phy_addr; 9416 req.reg_addr = cpu_to_le16(reg & 0x1f); 9417 if (mdio_phy_id_is_c45(phy_addr)) { 9418 req.cl45_mdio = 1; 9419 req.phy_addr = mdio_phy_id_prtad(phy_addr); 9420 req.dev_addr = mdio_phy_id_devad(phy_addr); 9421 req.reg_addr = cpu_to_le16(reg); 9422 } 9423 req.reg_data = cpu_to_le16(val); 9424 9425 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 9426 } 9427 9428 /* rtnl_lock held */ 9429 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 9430 { 9431 struct mii_ioctl_data *mdio = if_mii(ifr); 9432 struct bnxt *bp = netdev_priv(dev); 9433 int rc; 9434 9435 switch (cmd) { 9436 case SIOCGMIIPHY: 9437 mdio->phy_id = bp->link_info.phy_addr; 9438 9439 /* fallthru */ 9440 case SIOCGMIIREG: { 9441 u16 mii_regval = 0; 9442 9443 if (!netif_running(dev)) 9444 return -EAGAIN; 9445 9446 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num, 9447 &mii_regval); 9448 mdio->val_out = mii_regval; 9449 return rc; 9450 } 9451 9452 case SIOCSMIIREG: 9453 if (!netif_running(dev)) 9454 return -EAGAIN; 9455 9456 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num, 9457 mdio->val_in); 9458 9459 default: 9460 /* do nothing */ 9461 break; 9462 } 9463 return -EOPNOTSUPP; 9464 } 9465 9466 static void bnxt_get_ring_stats(struct bnxt *bp, 9467 struct rtnl_link_stats64 *stats) 9468 { 9469 int i; 9470 9471 9472 for (i = 0; i < bp->cp_nr_rings; i++) { 9473 struct bnxt_napi *bnapi = bp->bnapi[i]; 9474 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 9475 struct ctx_hw_stats *hw_stats = cpr->hw_stats; 9476 9477 stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts); 9478 stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts); 9479 stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts); 9480 9481 stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts); 9482 stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts); 9483 stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts); 9484 9485 stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes); 9486 stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes); 9487 stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes); 9488 9489 stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes); 9490 stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes); 9491 stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes); 9492 9493 stats->rx_missed_errors += 9494 le64_to_cpu(hw_stats->rx_discard_pkts); 9495 9496 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts); 9497 9498 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts); 9499 } 9500 } 9501 9502 static void bnxt_add_prev_stats(struct bnxt *bp, 9503 struct rtnl_link_stats64 *stats) 9504 { 9505 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev; 9506 9507 stats->rx_packets += prev_stats->rx_packets; 9508 stats->tx_packets += prev_stats->tx_packets; 9509 stats->rx_bytes += prev_stats->rx_bytes; 9510 stats->tx_bytes += prev_stats->tx_bytes; 9511 stats->rx_missed_errors += prev_stats->rx_missed_errors; 9512 stats->multicast += prev_stats->multicast; 9513 stats->tx_dropped += prev_stats->tx_dropped; 9514 } 9515 9516 static void 9517 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 9518 { 9519 struct bnxt *bp = netdev_priv(dev); 9520 9521 set_bit(BNXT_STATE_READ_STATS, &bp->state); 9522 /* Make sure bnxt_close_nic() sees that we are reading stats before 9523 * we check the BNXT_STATE_OPEN flag. 9524 */ 9525 smp_mb__after_atomic(); 9526 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 9527 clear_bit(BNXT_STATE_READ_STATS, &bp->state); 9528 *stats = bp->net_stats_prev; 9529 return; 9530 } 9531 9532 bnxt_get_ring_stats(bp, stats); 9533 bnxt_add_prev_stats(bp, stats); 9534 9535 if (bp->flags & BNXT_FLAG_PORT_STATS) { 9536 struct rx_port_stats *rx = bp->hw_rx_port_stats; 9537 struct tx_port_stats *tx = bp->hw_tx_port_stats; 9538 9539 stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames); 9540 stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames); 9541 stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) + 9542 le64_to_cpu(rx->rx_ovrsz_frames) + 9543 le64_to_cpu(rx->rx_runt_frames); 9544 stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) + 9545 le64_to_cpu(rx->rx_jbr_frames); 9546 stats->collisions = le64_to_cpu(tx->tx_total_collisions); 9547 stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns); 9548 stats->tx_errors = le64_to_cpu(tx->tx_err); 9549 } 9550 clear_bit(BNXT_STATE_READ_STATS, &bp->state); 9551 } 9552 9553 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) 9554 { 9555 struct net_device *dev = bp->dev; 9556 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 9557 struct netdev_hw_addr *ha; 9558 u8 *haddr; 9559 int mc_count = 0; 9560 bool update = false; 9561 int off = 0; 9562 9563 netdev_for_each_mc_addr(ha, dev) { 9564 if (mc_count >= BNXT_MAX_MC_ADDRS) { 9565 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 9566 vnic->mc_list_count = 0; 9567 return false; 9568 } 9569 haddr = ha->addr; 9570 if (!ether_addr_equal(haddr, vnic->mc_list + off)) { 9571 memcpy(vnic->mc_list + off, haddr, ETH_ALEN); 9572 update = true; 9573 } 9574 off += ETH_ALEN; 9575 mc_count++; 9576 } 9577 if (mc_count) 9578 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; 9579 9580 if (mc_count != vnic->mc_list_count) { 9581 vnic->mc_list_count = mc_count; 9582 update = true; 9583 } 9584 return update; 9585 } 9586 9587 static bool bnxt_uc_list_updated(struct bnxt *bp) 9588 { 9589 struct net_device *dev = bp->dev; 9590 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 9591 struct netdev_hw_addr *ha; 9592 int off = 0; 9593 9594 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1)) 9595 return true; 9596 9597 netdev_for_each_uc_addr(ha, dev) { 9598 if (!ether_addr_equal(ha->addr, vnic->uc_list + off)) 9599 return true; 9600 9601 off += ETH_ALEN; 9602 } 9603 return false; 9604 } 9605 9606 static void bnxt_set_rx_mode(struct net_device *dev) 9607 { 9608 struct bnxt *bp = netdev_priv(dev); 9609 struct bnxt_vnic_info *vnic; 9610 bool mc_update = false; 9611 bool uc_update; 9612 u32 mask; 9613 9614 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) 9615 return; 9616 9617 vnic = &bp->vnic_info[0]; 9618 mask = vnic->rx_mask; 9619 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS | 9620 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST | 9621 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST | 9622 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST); 9623 9624 if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp)) 9625 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 9626 9627 uc_update = bnxt_uc_list_updated(bp); 9628 9629 if (dev->flags & IFF_BROADCAST) 9630 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 9631 if (dev->flags & IFF_ALLMULTI) { 9632 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 9633 vnic->mc_list_count = 0; 9634 } else { 9635 mc_update = bnxt_mc_list_updated(bp, &mask); 9636 } 9637 9638 if (mask != vnic->rx_mask || uc_update || mc_update) { 9639 vnic->rx_mask = mask; 9640 9641 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event); 9642 bnxt_queue_sp_work(bp); 9643 } 9644 } 9645 9646 static int bnxt_cfg_rx_mode(struct bnxt *bp) 9647 { 9648 struct net_device *dev = bp->dev; 9649 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 9650 struct netdev_hw_addr *ha; 9651 int i, off = 0, rc; 9652 bool uc_update; 9653 9654 netif_addr_lock_bh(dev); 9655 uc_update = bnxt_uc_list_updated(bp); 9656 netif_addr_unlock_bh(dev); 9657 9658 if (!uc_update) 9659 goto skip_uc; 9660 9661 mutex_lock(&bp->hwrm_cmd_lock); 9662 for (i = 1; i < vnic->uc_filter_count; i++) { 9663 struct hwrm_cfa_l2_filter_free_input req = {0}; 9664 9665 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1, 9666 -1); 9667 9668 req.l2_filter_id = vnic->fw_l2_filter_id[i]; 9669 9670 rc = _hwrm_send_message(bp, &req, sizeof(req), 9671 HWRM_CMD_TIMEOUT); 9672 } 9673 mutex_unlock(&bp->hwrm_cmd_lock); 9674 9675 vnic->uc_filter_count = 1; 9676 9677 netif_addr_lock_bh(dev); 9678 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) { 9679 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 9680 } else { 9681 netdev_for_each_uc_addr(ha, dev) { 9682 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN); 9683 off += ETH_ALEN; 9684 vnic->uc_filter_count++; 9685 } 9686 } 9687 netif_addr_unlock_bh(dev); 9688 9689 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) { 9690 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off); 9691 if (rc) { 9692 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", 9693 rc); 9694 vnic->uc_filter_count = i; 9695 return rc; 9696 } 9697 } 9698 9699 skip_uc: 9700 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); 9701 if (rc && vnic->mc_list_count) { 9702 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n", 9703 rc); 9704 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 9705 vnic->mc_list_count = 0; 9706 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); 9707 } 9708 if (rc) 9709 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n", 9710 rc); 9711 9712 return rc; 9713 } 9714 9715 static bool bnxt_can_reserve_rings(struct bnxt *bp) 9716 { 9717 #ifdef CONFIG_BNXT_SRIOV 9718 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) { 9719 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 9720 9721 /* No minimum rings were provisioned by the PF. Don't 9722 * reserve rings by default when device is down. 9723 */ 9724 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings) 9725 return true; 9726 9727 if (!netif_running(bp->dev)) 9728 return false; 9729 } 9730 #endif 9731 return true; 9732 } 9733 9734 /* If the chip and firmware supports RFS */ 9735 static bool bnxt_rfs_supported(struct bnxt *bp) 9736 { 9737 if (bp->flags & BNXT_FLAG_CHIP_P5) { 9738 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) 9739 return true; 9740 return false; 9741 } 9742 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) 9743 return true; 9744 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) 9745 return true; 9746 return false; 9747 } 9748 9749 /* If runtime conditions support RFS */ 9750 static bool bnxt_rfs_capable(struct bnxt *bp) 9751 { 9752 #ifdef CONFIG_RFS_ACCEL 9753 int vnics, max_vnics, max_rss_ctxs; 9754 9755 if (bp->flags & BNXT_FLAG_CHIP_P5) 9756 return bnxt_rfs_supported(bp); 9757 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp)) 9758 return false; 9759 9760 vnics = 1 + bp->rx_nr_rings; 9761 max_vnics = bnxt_get_max_func_vnics(bp); 9762 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp); 9763 9764 /* RSS contexts not a limiting factor */ 9765 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) 9766 max_rss_ctxs = max_vnics; 9767 if (vnics > max_vnics || vnics > max_rss_ctxs) { 9768 if (bp->rx_nr_rings > 1) 9769 netdev_warn(bp->dev, 9770 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n", 9771 min(max_rss_ctxs - 1, max_vnics - 1)); 9772 return false; 9773 } 9774 9775 if (!BNXT_NEW_RM(bp)) 9776 return true; 9777 9778 if (vnics == bp->hw_resc.resv_vnics) 9779 return true; 9780 9781 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics); 9782 if (vnics <= bp->hw_resc.resv_vnics) 9783 return true; 9784 9785 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n"); 9786 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1); 9787 return false; 9788 #else 9789 return false; 9790 #endif 9791 } 9792 9793 static netdev_features_t bnxt_fix_features(struct net_device *dev, 9794 netdev_features_t features) 9795 { 9796 struct bnxt *bp = netdev_priv(dev); 9797 9798 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp)) 9799 features &= ~NETIF_F_NTUPLE; 9800 9801 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 9802 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 9803 9804 if (!(features & NETIF_F_GRO)) 9805 features &= ~NETIF_F_GRO_HW; 9806 9807 if (features & NETIF_F_GRO_HW) 9808 features &= ~NETIF_F_LRO; 9809 9810 /* Both CTAG and STAG VLAN accelaration on the RX side have to be 9811 * turned on or off together. 9812 */ 9813 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) != 9814 (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) { 9815 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) 9816 features &= ~(NETIF_F_HW_VLAN_CTAG_RX | 9817 NETIF_F_HW_VLAN_STAG_RX); 9818 else 9819 features |= NETIF_F_HW_VLAN_CTAG_RX | 9820 NETIF_F_HW_VLAN_STAG_RX; 9821 } 9822 #ifdef CONFIG_BNXT_SRIOV 9823 if (BNXT_VF(bp)) { 9824 if (bp->vf.vlan) { 9825 features &= ~(NETIF_F_HW_VLAN_CTAG_RX | 9826 NETIF_F_HW_VLAN_STAG_RX); 9827 } 9828 } 9829 #endif 9830 return features; 9831 } 9832 9833 static int bnxt_set_features(struct net_device *dev, netdev_features_t features) 9834 { 9835 struct bnxt *bp = netdev_priv(dev); 9836 u32 flags = bp->flags; 9837 u32 changes; 9838 int rc = 0; 9839 bool re_init = false; 9840 bool update_tpa = false; 9841 9842 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS; 9843 if (features & NETIF_F_GRO_HW) 9844 flags |= BNXT_FLAG_GRO; 9845 else if (features & NETIF_F_LRO) 9846 flags |= BNXT_FLAG_LRO; 9847 9848 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 9849 flags &= ~BNXT_FLAG_TPA; 9850 9851 if (features & NETIF_F_HW_VLAN_CTAG_RX) 9852 flags |= BNXT_FLAG_STRIP_VLAN; 9853 9854 if (features & NETIF_F_NTUPLE) 9855 flags |= BNXT_FLAG_RFS; 9856 9857 changes = flags ^ bp->flags; 9858 if (changes & BNXT_FLAG_TPA) { 9859 update_tpa = true; 9860 if ((bp->flags & BNXT_FLAG_TPA) == 0 || 9861 (flags & BNXT_FLAG_TPA) == 0 || 9862 (bp->flags & BNXT_FLAG_CHIP_P5)) 9863 re_init = true; 9864 } 9865 9866 if (changes & ~BNXT_FLAG_TPA) 9867 re_init = true; 9868 9869 if (flags != bp->flags) { 9870 u32 old_flags = bp->flags; 9871 9872 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 9873 bp->flags = flags; 9874 if (update_tpa) 9875 bnxt_set_ring_params(bp); 9876 return rc; 9877 } 9878 9879 if (re_init) { 9880 bnxt_close_nic(bp, false, false); 9881 bp->flags = flags; 9882 if (update_tpa) 9883 bnxt_set_ring_params(bp); 9884 9885 return bnxt_open_nic(bp, false, false); 9886 } 9887 if (update_tpa) { 9888 bp->flags = flags; 9889 rc = bnxt_set_tpa(bp, 9890 (flags & BNXT_FLAG_TPA) ? 9891 true : false); 9892 if (rc) 9893 bp->flags = old_flags; 9894 } 9895 } 9896 return rc; 9897 } 9898 9899 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type, 9900 u32 ring_id, u32 *prod, u32 *cons) 9901 { 9902 struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr; 9903 struct hwrm_dbg_ring_info_get_input req = {0}; 9904 int rc; 9905 9906 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1); 9907 req.ring_type = ring_type; 9908 req.fw_ring_id = cpu_to_le32(ring_id); 9909 mutex_lock(&bp->hwrm_cmd_lock); 9910 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 9911 if (!rc) { 9912 *prod = le32_to_cpu(resp->producer_index); 9913 *cons = le32_to_cpu(resp->consumer_index); 9914 } 9915 mutex_unlock(&bp->hwrm_cmd_lock); 9916 return rc; 9917 } 9918 9919 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi) 9920 { 9921 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; 9922 int i = bnapi->index; 9923 9924 if (!txr) 9925 return; 9926 9927 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n", 9928 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod, 9929 txr->tx_cons); 9930 } 9931 9932 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi) 9933 { 9934 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 9935 int i = bnapi->index; 9936 9937 if (!rxr) 9938 return; 9939 9940 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n", 9941 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod, 9942 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod, 9943 rxr->rx_sw_agg_prod); 9944 } 9945 9946 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi) 9947 { 9948 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 9949 int i = bnapi->index; 9950 9951 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n", 9952 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons); 9953 } 9954 9955 static void bnxt_dbg_dump_states(struct bnxt *bp) 9956 { 9957 int i; 9958 struct bnxt_napi *bnapi; 9959 9960 for (i = 0; i < bp->cp_nr_rings; i++) { 9961 bnapi = bp->bnapi[i]; 9962 if (netif_msg_drv(bp)) { 9963 bnxt_dump_tx_sw_state(bnapi); 9964 bnxt_dump_rx_sw_state(bnapi); 9965 bnxt_dump_cp_sw_state(bnapi); 9966 } 9967 } 9968 } 9969 9970 static void bnxt_reset_task(struct bnxt *bp, bool silent) 9971 { 9972 if (!silent) 9973 bnxt_dbg_dump_states(bp); 9974 if (netif_running(bp->dev)) { 9975 int rc; 9976 9977 if (silent) { 9978 bnxt_close_nic(bp, false, false); 9979 bnxt_open_nic(bp, false, false); 9980 } else { 9981 bnxt_ulp_stop(bp); 9982 bnxt_close_nic(bp, true, false); 9983 rc = bnxt_open_nic(bp, true, false); 9984 bnxt_ulp_start(bp, rc); 9985 } 9986 } 9987 } 9988 9989 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue) 9990 { 9991 struct bnxt *bp = netdev_priv(dev); 9992 9993 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n"); 9994 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 9995 bnxt_queue_sp_work(bp); 9996 } 9997 9998 static void bnxt_fw_health_check(struct bnxt *bp) 9999 { 10000 struct bnxt_fw_health *fw_health = bp->fw_health; 10001 u32 val; 10002 10003 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 10004 return; 10005 10006 if (fw_health->tmr_counter) { 10007 fw_health->tmr_counter--; 10008 return; 10009 } 10010 10011 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 10012 if (val == fw_health->last_fw_heartbeat) 10013 goto fw_reset; 10014 10015 fw_health->last_fw_heartbeat = val; 10016 10017 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 10018 if (val != fw_health->last_fw_reset_cnt) 10019 goto fw_reset; 10020 10021 fw_health->tmr_counter = fw_health->tmr_multiplier; 10022 return; 10023 10024 fw_reset: 10025 set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event); 10026 bnxt_queue_sp_work(bp); 10027 } 10028 10029 static void bnxt_timer(struct timer_list *t) 10030 { 10031 struct bnxt *bp = from_timer(bp, t, timer); 10032 struct net_device *dev = bp->dev; 10033 10034 if (!netif_running(dev)) 10035 return; 10036 10037 if (atomic_read(&bp->intr_sem) != 0) 10038 goto bnxt_restart_timer; 10039 10040 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) 10041 bnxt_fw_health_check(bp); 10042 10043 if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) && 10044 bp->stats_coal_ticks) { 10045 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event); 10046 bnxt_queue_sp_work(bp); 10047 } 10048 10049 if (bnxt_tc_flower_enabled(bp)) { 10050 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event); 10051 bnxt_queue_sp_work(bp); 10052 } 10053 10054 #ifdef CONFIG_RFS_ACCEL 10055 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) { 10056 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event); 10057 bnxt_queue_sp_work(bp); 10058 } 10059 #endif /*CONFIG_RFS_ACCEL*/ 10060 10061 if (bp->link_info.phy_retry) { 10062 if (time_after(jiffies, bp->link_info.phy_retry_expires)) { 10063 bp->link_info.phy_retry = false; 10064 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n"); 10065 } else { 10066 set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event); 10067 bnxt_queue_sp_work(bp); 10068 } 10069 } 10070 10071 if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev && 10072 netif_carrier_ok(dev)) { 10073 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event); 10074 bnxt_queue_sp_work(bp); 10075 } 10076 bnxt_restart_timer: 10077 mod_timer(&bp->timer, jiffies + bp->current_interval); 10078 } 10079 10080 static void bnxt_rtnl_lock_sp(struct bnxt *bp) 10081 { 10082 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK 10083 * set. If the device is being closed, bnxt_close() may be holding 10084 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we 10085 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl(). 10086 */ 10087 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 10088 rtnl_lock(); 10089 } 10090 10091 static void bnxt_rtnl_unlock_sp(struct bnxt *bp) 10092 { 10093 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 10094 rtnl_unlock(); 10095 } 10096 10097 /* Only called from bnxt_sp_task() */ 10098 static void bnxt_reset(struct bnxt *bp, bool silent) 10099 { 10100 bnxt_rtnl_lock_sp(bp); 10101 if (test_bit(BNXT_STATE_OPEN, &bp->state)) 10102 bnxt_reset_task(bp, silent); 10103 bnxt_rtnl_unlock_sp(bp); 10104 } 10105 10106 static void bnxt_fw_reset_close(struct bnxt *bp) 10107 { 10108 bnxt_ulp_stop(bp); 10109 /* When firmware is fatal state, disable PCI device to prevent 10110 * any potential bad DMAs before freeing kernel memory. 10111 */ 10112 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 10113 pci_disable_device(bp->pdev); 10114 __bnxt_close_nic(bp, true, false); 10115 bnxt_clear_int_mode(bp); 10116 bnxt_hwrm_func_drv_unrgtr(bp); 10117 if (pci_is_enabled(bp->pdev)) 10118 pci_disable_device(bp->pdev); 10119 bnxt_free_ctx_mem(bp); 10120 kfree(bp->ctx); 10121 bp->ctx = NULL; 10122 } 10123 10124 static bool is_bnxt_fw_ok(struct bnxt *bp) 10125 { 10126 struct bnxt_fw_health *fw_health = bp->fw_health; 10127 bool no_heartbeat = false, has_reset = false; 10128 u32 val; 10129 10130 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 10131 if (val == fw_health->last_fw_heartbeat) 10132 no_heartbeat = true; 10133 10134 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 10135 if (val != fw_health->last_fw_reset_cnt) 10136 has_reset = true; 10137 10138 if (!no_heartbeat && has_reset) 10139 return true; 10140 10141 return false; 10142 } 10143 10144 /* rtnl_lock is acquired before calling this function */ 10145 static void bnxt_force_fw_reset(struct bnxt *bp) 10146 { 10147 struct bnxt_fw_health *fw_health = bp->fw_health; 10148 u32 wait_dsecs; 10149 10150 if (!test_bit(BNXT_STATE_OPEN, &bp->state) || 10151 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 10152 return; 10153 10154 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 10155 bnxt_fw_reset_close(bp); 10156 wait_dsecs = fw_health->master_func_wait_dsecs; 10157 if (fw_health->master) { 10158 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) 10159 wait_dsecs = 0; 10160 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; 10161 } else { 10162 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10; 10163 wait_dsecs = fw_health->normal_func_wait_dsecs; 10164 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 10165 } 10166 10167 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs; 10168 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs; 10169 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10); 10170 } 10171 10172 void bnxt_fw_exception(struct bnxt *bp) 10173 { 10174 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n"); 10175 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); 10176 bnxt_rtnl_lock_sp(bp); 10177 bnxt_force_fw_reset(bp); 10178 bnxt_rtnl_unlock_sp(bp); 10179 } 10180 10181 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or 10182 * < 0 on error. 10183 */ 10184 static int bnxt_get_registered_vfs(struct bnxt *bp) 10185 { 10186 #ifdef CONFIG_BNXT_SRIOV 10187 int rc; 10188 10189 if (!BNXT_PF(bp)) 10190 return 0; 10191 10192 rc = bnxt_hwrm_func_qcfg(bp); 10193 if (rc) { 10194 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc); 10195 return rc; 10196 } 10197 if (bp->pf.registered_vfs) 10198 return bp->pf.registered_vfs; 10199 if (bp->sriov_cfg) 10200 return 1; 10201 #endif 10202 return 0; 10203 } 10204 10205 void bnxt_fw_reset(struct bnxt *bp) 10206 { 10207 bnxt_rtnl_lock_sp(bp); 10208 if (test_bit(BNXT_STATE_OPEN, &bp->state) && 10209 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 10210 int n = 0, tmo; 10211 10212 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 10213 if (bp->pf.active_vfs && 10214 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 10215 n = bnxt_get_registered_vfs(bp); 10216 if (n < 0) { 10217 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n", 10218 n); 10219 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 10220 dev_close(bp->dev); 10221 goto fw_reset_exit; 10222 } else if (n > 0) { 10223 u16 vf_tmo_dsecs = n * 10; 10224 10225 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs) 10226 bp->fw_reset_max_dsecs = vf_tmo_dsecs; 10227 bp->fw_reset_state = 10228 BNXT_FW_RESET_STATE_POLL_VF; 10229 bnxt_queue_fw_reset_work(bp, HZ / 10); 10230 goto fw_reset_exit; 10231 } 10232 bnxt_fw_reset_close(bp); 10233 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { 10234 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; 10235 tmo = HZ / 10; 10236 } else { 10237 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 10238 tmo = bp->fw_reset_min_dsecs * HZ / 10; 10239 } 10240 bnxt_queue_fw_reset_work(bp, tmo); 10241 } 10242 fw_reset_exit: 10243 bnxt_rtnl_unlock_sp(bp); 10244 } 10245 10246 static void bnxt_chk_missed_irq(struct bnxt *bp) 10247 { 10248 int i; 10249 10250 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 10251 return; 10252 10253 for (i = 0; i < bp->cp_nr_rings; i++) { 10254 struct bnxt_napi *bnapi = bp->bnapi[i]; 10255 struct bnxt_cp_ring_info *cpr; 10256 u32 fw_ring_id; 10257 int j; 10258 10259 if (!bnapi) 10260 continue; 10261 10262 cpr = &bnapi->cp_ring; 10263 for (j = 0; j < 2; j++) { 10264 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; 10265 u32 val[2]; 10266 10267 if (!cpr2 || cpr2->has_more_work || 10268 !bnxt_has_work(bp, cpr2)) 10269 continue; 10270 10271 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) { 10272 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons; 10273 continue; 10274 } 10275 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id; 10276 bnxt_dbg_hwrm_ring_info_get(bp, 10277 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL, 10278 fw_ring_id, &val[0], &val[1]); 10279 cpr->missed_irqs++; 10280 } 10281 } 10282 } 10283 10284 static void bnxt_cfg_ntp_filters(struct bnxt *); 10285 10286 static void bnxt_init_ethtool_link_settings(struct bnxt *bp) 10287 { 10288 struct bnxt_link_info *link_info = &bp->link_info; 10289 10290 if (BNXT_AUTO_MODE(link_info->auto_mode)) { 10291 link_info->autoneg = BNXT_AUTONEG_SPEED; 10292 if (bp->hwrm_spec_code >= 0x10201) { 10293 if (link_info->auto_pause_setting & 10294 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE) 10295 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 10296 } else { 10297 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 10298 } 10299 link_info->advertising = link_info->auto_link_speeds; 10300 } else { 10301 link_info->req_link_speed = link_info->force_link_speed; 10302 link_info->req_duplex = link_info->duplex_setting; 10303 } 10304 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 10305 link_info->req_flow_ctrl = 10306 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH; 10307 else 10308 link_info->req_flow_ctrl = link_info->force_pause_setting; 10309 } 10310 10311 static void bnxt_sp_task(struct work_struct *work) 10312 { 10313 struct bnxt *bp = container_of(work, struct bnxt, sp_task); 10314 10315 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 10316 smp_mb__after_atomic(); 10317 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 10318 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 10319 return; 10320 } 10321 10322 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) 10323 bnxt_cfg_rx_mode(bp); 10324 10325 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event)) 10326 bnxt_cfg_ntp_filters(bp); 10327 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) 10328 bnxt_hwrm_exec_fwd_req(bp); 10329 if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) { 10330 bnxt_hwrm_tunnel_dst_port_alloc( 10331 bp, bp->vxlan_port, 10332 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 10333 } 10334 if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) { 10335 bnxt_hwrm_tunnel_dst_port_free( 10336 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 10337 } 10338 if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) { 10339 bnxt_hwrm_tunnel_dst_port_alloc( 10340 bp, bp->nge_port, 10341 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 10342 } 10343 if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) { 10344 bnxt_hwrm_tunnel_dst_port_free( 10345 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 10346 } 10347 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) { 10348 bnxt_hwrm_port_qstats(bp); 10349 bnxt_hwrm_port_qstats_ext(bp); 10350 bnxt_hwrm_pcie_qstats(bp); 10351 } 10352 10353 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { 10354 int rc; 10355 10356 mutex_lock(&bp->link_lock); 10357 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, 10358 &bp->sp_event)) 10359 bnxt_hwrm_phy_qcaps(bp); 10360 10361 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, 10362 &bp->sp_event)) 10363 bnxt_init_ethtool_link_settings(bp); 10364 10365 rc = bnxt_update_link(bp, true); 10366 mutex_unlock(&bp->link_lock); 10367 if (rc) 10368 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", 10369 rc); 10370 } 10371 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) { 10372 int rc; 10373 10374 mutex_lock(&bp->link_lock); 10375 rc = bnxt_update_phy_setting(bp); 10376 mutex_unlock(&bp->link_lock); 10377 if (rc) { 10378 netdev_warn(bp->dev, "update phy settings retry failed\n"); 10379 } else { 10380 bp->link_info.phy_retry = false; 10381 netdev_info(bp->dev, "update phy settings retry succeeded\n"); 10382 } 10383 } 10384 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) { 10385 mutex_lock(&bp->link_lock); 10386 bnxt_get_port_module_status(bp); 10387 mutex_unlock(&bp->link_lock); 10388 } 10389 10390 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event)) 10391 bnxt_tc_flow_stats_work(bp); 10392 10393 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event)) 10394 bnxt_chk_missed_irq(bp); 10395 10396 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They 10397 * must be the last functions to be called before exiting. 10398 */ 10399 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) 10400 bnxt_reset(bp, false); 10401 10402 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event)) 10403 bnxt_reset(bp, true); 10404 10405 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) 10406 bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT); 10407 10408 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) { 10409 if (!is_bnxt_fw_ok(bp)) 10410 bnxt_devlink_health_report(bp, 10411 BNXT_FW_EXCEPTION_SP_EVENT); 10412 } 10413 10414 smp_mb__before_atomic(); 10415 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 10416 } 10417 10418 /* Under rtnl_lock */ 10419 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, 10420 int tx_xdp) 10421 { 10422 int max_rx, max_tx, tx_sets = 1; 10423 int tx_rings_needed, stats; 10424 int rx_rings = rx; 10425 int cp, vnics, rc; 10426 10427 if (tcs) 10428 tx_sets = tcs; 10429 10430 rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh); 10431 if (rc) 10432 return rc; 10433 10434 if (max_rx < rx) 10435 return -ENOMEM; 10436 10437 tx_rings_needed = tx * tx_sets + tx_xdp; 10438 if (max_tx < tx_rings_needed) 10439 return -ENOMEM; 10440 10441 vnics = 1; 10442 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS) 10443 vnics += rx_rings; 10444 10445 if (bp->flags & BNXT_FLAG_AGG_RINGS) 10446 rx_rings <<= 1; 10447 cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx; 10448 stats = cp; 10449 if (BNXT_NEW_RM(bp)) { 10450 cp += bnxt_get_ulp_msix_num(bp); 10451 stats += bnxt_get_ulp_stat_ctxs(bp); 10452 } 10453 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp, 10454 stats, vnics); 10455 } 10456 10457 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) 10458 { 10459 if (bp->bar2) { 10460 pci_iounmap(pdev, bp->bar2); 10461 bp->bar2 = NULL; 10462 } 10463 10464 if (bp->bar1) { 10465 pci_iounmap(pdev, bp->bar1); 10466 bp->bar1 = NULL; 10467 } 10468 10469 if (bp->bar0) { 10470 pci_iounmap(pdev, bp->bar0); 10471 bp->bar0 = NULL; 10472 } 10473 } 10474 10475 static void bnxt_cleanup_pci(struct bnxt *bp) 10476 { 10477 bnxt_unmap_bars(bp, bp->pdev); 10478 pci_release_regions(bp->pdev); 10479 if (pci_is_enabled(bp->pdev)) 10480 pci_disable_device(bp->pdev); 10481 } 10482 10483 static void bnxt_init_dflt_coal(struct bnxt *bp) 10484 { 10485 struct bnxt_coal *coal; 10486 10487 /* Tick values in micro seconds. 10488 * 1 coal_buf x bufs_per_record = 1 completion record. 10489 */ 10490 coal = &bp->rx_coal; 10491 coal->coal_ticks = 10; 10492 coal->coal_bufs = 30; 10493 coal->coal_ticks_irq = 1; 10494 coal->coal_bufs_irq = 2; 10495 coal->idle_thresh = 50; 10496 coal->bufs_per_record = 2; 10497 coal->budget = 64; /* NAPI budget */ 10498 10499 coal = &bp->tx_coal; 10500 coal->coal_ticks = 28; 10501 coal->coal_bufs = 30; 10502 coal->coal_ticks_irq = 2; 10503 coal->coal_bufs_irq = 2; 10504 coal->bufs_per_record = 1; 10505 10506 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; 10507 } 10508 10509 static void bnxt_alloc_fw_health(struct bnxt *bp) 10510 { 10511 if (bp->fw_health) 10512 return; 10513 10514 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) && 10515 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 10516 return; 10517 10518 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL); 10519 if (!bp->fw_health) { 10520 netdev_warn(bp->dev, "Failed to allocate fw_health\n"); 10521 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET; 10522 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 10523 } 10524 } 10525 10526 static int bnxt_fw_init_one_p1(struct bnxt *bp) 10527 { 10528 int rc; 10529 10530 bp->fw_cap = 0; 10531 rc = bnxt_hwrm_ver_get(bp); 10532 if (rc) 10533 return rc; 10534 10535 if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) { 10536 rc = bnxt_alloc_kong_hwrm_resources(bp); 10537 if (rc) 10538 bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL; 10539 } 10540 10541 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || 10542 bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) { 10543 rc = bnxt_alloc_hwrm_short_cmd_req(bp); 10544 if (rc) 10545 return rc; 10546 } 10547 rc = bnxt_hwrm_func_reset(bp); 10548 if (rc) 10549 return -ENODEV; 10550 10551 bnxt_hwrm_fw_set_time(bp); 10552 return 0; 10553 } 10554 10555 static int bnxt_fw_init_one_p2(struct bnxt *bp) 10556 { 10557 int rc; 10558 10559 /* Get the MAX capabilities for this function */ 10560 rc = bnxt_hwrm_func_qcaps(bp); 10561 if (rc) { 10562 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n", 10563 rc); 10564 return -ENODEV; 10565 } 10566 10567 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp); 10568 if (rc) 10569 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n", 10570 rc); 10571 10572 bnxt_alloc_fw_health(bp); 10573 rc = bnxt_hwrm_error_recovery_qcfg(bp); 10574 if (rc) 10575 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n", 10576 rc); 10577 10578 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false); 10579 if (rc) 10580 return -ENODEV; 10581 10582 bnxt_hwrm_func_qcfg(bp); 10583 bnxt_hwrm_vnic_qcaps(bp); 10584 bnxt_hwrm_port_led_qcaps(bp); 10585 bnxt_ethtool_init(bp); 10586 bnxt_dcb_init(bp); 10587 return 0; 10588 } 10589 10590 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp) 10591 { 10592 bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP; 10593 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 | 10594 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 | 10595 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | 10596 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; 10597 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) { 10598 bp->flags |= BNXT_FLAG_UDP_RSS_CAP; 10599 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 | 10600 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 10601 } 10602 } 10603 10604 static void bnxt_set_dflt_rfs(struct bnxt *bp) 10605 { 10606 struct net_device *dev = bp->dev; 10607 10608 dev->hw_features &= ~NETIF_F_NTUPLE; 10609 dev->features &= ~NETIF_F_NTUPLE; 10610 bp->flags &= ~BNXT_FLAG_RFS; 10611 if (bnxt_rfs_supported(bp)) { 10612 dev->hw_features |= NETIF_F_NTUPLE; 10613 if (bnxt_rfs_capable(bp)) { 10614 bp->flags |= BNXT_FLAG_RFS; 10615 dev->features |= NETIF_F_NTUPLE; 10616 } 10617 } 10618 } 10619 10620 static void bnxt_fw_init_one_p3(struct bnxt *bp) 10621 { 10622 struct pci_dev *pdev = bp->pdev; 10623 10624 bnxt_set_dflt_rss_hash_type(bp); 10625 bnxt_set_dflt_rfs(bp); 10626 10627 bnxt_get_wol_settings(bp); 10628 if (bp->flags & BNXT_FLAG_WOL_CAP) 10629 device_set_wakeup_enable(&pdev->dev, bp->wol); 10630 else 10631 device_set_wakeup_capable(&pdev->dev, false); 10632 10633 bnxt_hwrm_set_cache_line_size(bp, cache_line_size()); 10634 bnxt_hwrm_coal_params_qcaps(bp); 10635 } 10636 10637 static int bnxt_fw_init_one(struct bnxt *bp) 10638 { 10639 int rc; 10640 10641 rc = bnxt_fw_init_one_p1(bp); 10642 if (rc) { 10643 netdev_err(bp->dev, "Firmware init phase 1 failed\n"); 10644 return rc; 10645 } 10646 rc = bnxt_fw_init_one_p2(bp); 10647 if (rc) { 10648 netdev_err(bp->dev, "Firmware init phase 2 failed\n"); 10649 return rc; 10650 } 10651 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false); 10652 if (rc) 10653 return rc; 10654 10655 /* In case fw capabilities have changed, destroy the unneeded 10656 * reporters and create newly capable ones. 10657 */ 10658 bnxt_dl_fw_reporters_destroy(bp, false); 10659 bnxt_dl_fw_reporters_create(bp); 10660 bnxt_fw_init_one_p3(bp); 10661 return 0; 10662 } 10663 10664 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx) 10665 { 10666 struct bnxt_fw_health *fw_health = bp->fw_health; 10667 u32 reg = fw_health->fw_reset_seq_regs[reg_idx]; 10668 u32 val = fw_health->fw_reset_seq_vals[reg_idx]; 10669 u32 reg_type, reg_off, delay_msecs; 10670 10671 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx]; 10672 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg); 10673 reg_off = BNXT_FW_HEALTH_REG_OFF(reg); 10674 switch (reg_type) { 10675 case BNXT_FW_HEALTH_REG_TYPE_CFG: 10676 pci_write_config_dword(bp->pdev, reg_off, val); 10677 break; 10678 case BNXT_FW_HEALTH_REG_TYPE_GRC: 10679 writel(reg_off & BNXT_GRC_BASE_MASK, 10680 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 10681 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000; 10682 /* fall through */ 10683 case BNXT_FW_HEALTH_REG_TYPE_BAR0: 10684 writel(val, bp->bar0 + reg_off); 10685 break; 10686 case BNXT_FW_HEALTH_REG_TYPE_BAR1: 10687 writel(val, bp->bar1 + reg_off); 10688 break; 10689 } 10690 if (delay_msecs) { 10691 pci_read_config_dword(bp->pdev, 0, &val); 10692 msleep(delay_msecs); 10693 } 10694 } 10695 10696 static void bnxt_reset_all(struct bnxt *bp) 10697 { 10698 struct bnxt_fw_health *fw_health = bp->fw_health; 10699 int i, rc; 10700 10701 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { 10702 #ifdef CONFIG_TEE_BNXT_FW 10703 rc = tee_bnxt_fw_load(); 10704 if (rc) 10705 netdev_err(bp->dev, "Unable to reset FW rc=%d\n", rc); 10706 bp->fw_reset_timestamp = jiffies; 10707 #endif 10708 return; 10709 } 10710 10711 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) { 10712 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) 10713 bnxt_fw_reset_writel(bp, i); 10714 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) { 10715 struct hwrm_fw_reset_input req = {0}; 10716 10717 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1); 10718 req.resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr); 10719 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP; 10720 req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP; 10721 req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; 10722 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 10723 if (rc) 10724 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc); 10725 } 10726 bp->fw_reset_timestamp = jiffies; 10727 } 10728 10729 static void bnxt_fw_reset_task(struct work_struct *work) 10730 { 10731 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work); 10732 int rc; 10733 10734 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 10735 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n"); 10736 return; 10737 } 10738 10739 switch (bp->fw_reset_state) { 10740 case BNXT_FW_RESET_STATE_POLL_VF: { 10741 int n = bnxt_get_registered_vfs(bp); 10742 int tmo; 10743 10744 if (n < 0) { 10745 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n", 10746 n, jiffies_to_msecs(jiffies - 10747 bp->fw_reset_timestamp)); 10748 goto fw_reset_abort; 10749 } else if (n > 0) { 10750 if (time_after(jiffies, bp->fw_reset_timestamp + 10751 (bp->fw_reset_max_dsecs * HZ / 10))) { 10752 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 10753 bp->fw_reset_state = 0; 10754 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n", 10755 n); 10756 return; 10757 } 10758 bnxt_queue_fw_reset_work(bp, HZ / 10); 10759 return; 10760 } 10761 bp->fw_reset_timestamp = jiffies; 10762 rtnl_lock(); 10763 bnxt_fw_reset_close(bp); 10764 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { 10765 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; 10766 tmo = HZ / 10; 10767 } else { 10768 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 10769 tmo = bp->fw_reset_min_dsecs * HZ / 10; 10770 } 10771 rtnl_unlock(); 10772 bnxt_queue_fw_reset_work(bp, tmo); 10773 return; 10774 } 10775 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: { 10776 u32 val; 10777 10778 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 10779 if (!(val & BNXT_FW_STATUS_SHUTDOWN) && 10780 !time_after(jiffies, bp->fw_reset_timestamp + 10781 (bp->fw_reset_max_dsecs * HZ / 10))) { 10782 bnxt_queue_fw_reset_work(bp, HZ / 5); 10783 return; 10784 } 10785 10786 if (!bp->fw_health->master) { 10787 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs; 10788 10789 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 10790 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10); 10791 return; 10792 } 10793 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; 10794 } 10795 /* fall through */ 10796 case BNXT_FW_RESET_STATE_RESET_FW: 10797 bnxt_reset_all(bp); 10798 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 10799 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10); 10800 return; 10801 case BNXT_FW_RESET_STATE_ENABLE_DEV: 10802 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) { 10803 u32 val; 10804 10805 val = bnxt_fw_health_readl(bp, 10806 BNXT_FW_RESET_INPROG_REG); 10807 if (val) 10808 netdev_warn(bp->dev, "FW reset inprog %x after min wait time.\n", 10809 val); 10810 } 10811 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); 10812 if (pci_enable_device(bp->pdev)) { 10813 netdev_err(bp->dev, "Cannot re-enable PCI device\n"); 10814 goto fw_reset_abort; 10815 } 10816 pci_set_master(bp->pdev); 10817 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW; 10818 /* fall through */ 10819 case BNXT_FW_RESET_STATE_POLL_FW: 10820 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT; 10821 rc = __bnxt_hwrm_ver_get(bp, true); 10822 if (rc) { 10823 if (time_after(jiffies, bp->fw_reset_timestamp + 10824 (bp->fw_reset_max_dsecs * HZ / 10))) { 10825 netdev_err(bp->dev, "Firmware reset aborted\n"); 10826 goto fw_reset_abort; 10827 } 10828 bnxt_queue_fw_reset_work(bp, HZ / 5); 10829 return; 10830 } 10831 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; 10832 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING; 10833 /* fall through */ 10834 case BNXT_FW_RESET_STATE_OPENING: 10835 while (!rtnl_trylock()) { 10836 bnxt_queue_fw_reset_work(bp, HZ / 10); 10837 return; 10838 } 10839 rc = bnxt_open(bp->dev); 10840 if (rc) { 10841 netdev_err(bp->dev, "bnxt_open_nic() failed\n"); 10842 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 10843 dev_close(bp->dev); 10844 } 10845 10846 bp->fw_reset_state = 0; 10847 /* Make sure fw_reset_state is 0 before clearing the flag */ 10848 smp_mb__before_atomic(); 10849 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 10850 bnxt_ulp_start(bp, rc); 10851 if (!rc) 10852 bnxt_reenable_sriov(bp); 10853 bnxt_dl_health_recovery_done(bp); 10854 bnxt_dl_health_status_update(bp, true); 10855 rtnl_unlock(); 10856 break; 10857 } 10858 return; 10859 10860 fw_reset_abort: 10861 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 10862 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) 10863 bnxt_dl_health_status_update(bp, false); 10864 bp->fw_reset_state = 0; 10865 rtnl_lock(); 10866 dev_close(bp->dev); 10867 rtnl_unlock(); 10868 } 10869 10870 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) 10871 { 10872 int rc; 10873 struct bnxt *bp = netdev_priv(dev); 10874 10875 SET_NETDEV_DEV(dev, &pdev->dev); 10876 10877 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 10878 rc = pci_enable_device(pdev); 10879 if (rc) { 10880 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 10881 goto init_err; 10882 } 10883 10884 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 10885 dev_err(&pdev->dev, 10886 "Cannot find PCI device base address, aborting\n"); 10887 rc = -ENODEV; 10888 goto init_err_disable; 10889 } 10890 10891 rc = pci_request_regions(pdev, DRV_MODULE_NAME); 10892 if (rc) { 10893 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); 10894 goto init_err_disable; 10895 } 10896 10897 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 && 10898 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) { 10899 dev_err(&pdev->dev, "System does not support DMA, aborting\n"); 10900 goto init_err_disable; 10901 } 10902 10903 pci_set_master(pdev); 10904 10905 bp->dev = dev; 10906 bp->pdev = pdev; 10907 10908 bp->bar0 = pci_ioremap_bar(pdev, 0); 10909 if (!bp->bar0) { 10910 dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); 10911 rc = -ENOMEM; 10912 goto init_err_release; 10913 } 10914 10915 bp->bar1 = pci_ioremap_bar(pdev, 2); 10916 if (!bp->bar1) { 10917 dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n"); 10918 rc = -ENOMEM; 10919 goto init_err_release; 10920 } 10921 10922 bp->bar2 = pci_ioremap_bar(pdev, 4); 10923 if (!bp->bar2) { 10924 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n"); 10925 rc = -ENOMEM; 10926 goto init_err_release; 10927 } 10928 10929 pci_enable_pcie_error_reporting(pdev); 10930 10931 INIT_WORK(&bp->sp_task, bnxt_sp_task); 10932 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task); 10933 10934 spin_lock_init(&bp->ntp_fltr_lock); 10935 #if BITS_PER_LONG == 32 10936 spin_lock_init(&bp->db_lock); 10937 #endif 10938 10939 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE; 10940 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE; 10941 10942 bnxt_init_dflt_coal(bp); 10943 10944 timer_setup(&bp->timer, bnxt_timer, 0); 10945 bp->current_interval = BNXT_TIMER_INTERVAL; 10946 10947 clear_bit(BNXT_STATE_OPEN, &bp->state); 10948 return 0; 10949 10950 init_err_release: 10951 bnxt_unmap_bars(bp, pdev); 10952 pci_release_regions(pdev); 10953 10954 init_err_disable: 10955 pci_disable_device(pdev); 10956 10957 init_err: 10958 return rc; 10959 } 10960 10961 /* rtnl_lock held */ 10962 static int bnxt_change_mac_addr(struct net_device *dev, void *p) 10963 { 10964 struct sockaddr *addr = p; 10965 struct bnxt *bp = netdev_priv(dev); 10966 int rc = 0; 10967 10968 if (!is_valid_ether_addr(addr->sa_data)) 10969 return -EADDRNOTAVAIL; 10970 10971 if (ether_addr_equal(addr->sa_data, dev->dev_addr)) 10972 return 0; 10973 10974 rc = bnxt_approve_mac(bp, addr->sa_data, true); 10975 if (rc) 10976 return rc; 10977 10978 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 10979 if (netif_running(dev)) { 10980 bnxt_close_nic(bp, false, false); 10981 rc = bnxt_open_nic(bp, false, false); 10982 } 10983 10984 return rc; 10985 } 10986 10987 /* rtnl_lock held */ 10988 static int bnxt_change_mtu(struct net_device *dev, int new_mtu) 10989 { 10990 struct bnxt *bp = netdev_priv(dev); 10991 10992 if (netif_running(dev)) 10993 bnxt_close_nic(bp, true, false); 10994 10995 dev->mtu = new_mtu; 10996 bnxt_set_ring_params(bp); 10997 10998 if (netif_running(dev)) 10999 return bnxt_open_nic(bp, true, false); 11000 11001 return 0; 11002 } 11003 11004 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) 11005 { 11006 struct bnxt *bp = netdev_priv(dev); 11007 bool sh = false; 11008 int rc; 11009 11010 if (tc > bp->max_tc) { 11011 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n", 11012 tc, bp->max_tc); 11013 return -EINVAL; 11014 } 11015 11016 if (netdev_get_num_tc(dev) == tc) 11017 return 0; 11018 11019 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 11020 sh = true; 11021 11022 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, 11023 sh, tc, bp->tx_nr_rings_xdp); 11024 if (rc) 11025 return rc; 11026 11027 /* Needs to close the device and do hw resource re-allocations */ 11028 if (netif_running(bp->dev)) 11029 bnxt_close_nic(bp, true, false); 11030 11031 if (tc) { 11032 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc; 11033 netdev_set_num_tc(dev, tc); 11034 } else { 11035 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 11036 netdev_reset_tc(dev); 11037 } 11038 bp->tx_nr_rings += bp->tx_nr_rings_xdp; 11039 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : 11040 bp->tx_nr_rings + bp->rx_nr_rings; 11041 11042 if (netif_running(bp->dev)) 11043 return bnxt_open_nic(bp, true, false); 11044 11045 return 0; 11046 } 11047 11048 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 11049 void *cb_priv) 11050 { 11051 struct bnxt *bp = cb_priv; 11052 11053 if (!bnxt_tc_flower_enabled(bp) || 11054 !tc_cls_can_offload_and_chain0(bp->dev, type_data)) 11055 return -EOPNOTSUPP; 11056 11057 switch (type) { 11058 case TC_SETUP_CLSFLOWER: 11059 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data); 11060 default: 11061 return -EOPNOTSUPP; 11062 } 11063 } 11064 11065 LIST_HEAD(bnxt_block_cb_list); 11066 11067 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, 11068 void *type_data) 11069 { 11070 struct bnxt *bp = netdev_priv(dev); 11071 11072 switch (type) { 11073 case TC_SETUP_BLOCK: 11074 return flow_block_cb_setup_simple(type_data, 11075 &bnxt_block_cb_list, 11076 bnxt_setup_tc_block_cb, 11077 bp, bp, true); 11078 case TC_SETUP_QDISC_MQPRIO: { 11079 struct tc_mqprio_qopt *mqprio = type_data; 11080 11081 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; 11082 11083 return bnxt_setup_mq_tc(dev, mqprio->num_tc); 11084 } 11085 default: 11086 return -EOPNOTSUPP; 11087 } 11088 } 11089 11090 #ifdef CONFIG_RFS_ACCEL 11091 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1, 11092 struct bnxt_ntuple_filter *f2) 11093 { 11094 struct flow_keys *keys1 = &f1->fkeys; 11095 struct flow_keys *keys2 = &f2->fkeys; 11096 11097 if (keys1->basic.n_proto != keys2->basic.n_proto || 11098 keys1->basic.ip_proto != keys2->basic.ip_proto) 11099 return false; 11100 11101 if (keys1->basic.n_proto == htons(ETH_P_IP)) { 11102 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src || 11103 keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst) 11104 return false; 11105 } else { 11106 if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src, 11107 sizeof(keys1->addrs.v6addrs.src)) || 11108 memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst, 11109 sizeof(keys1->addrs.v6addrs.dst))) 11110 return false; 11111 } 11112 11113 if (keys1->ports.ports == keys2->ports.ports && 11114 keys1->control.flags == keys2->control.flags && 11115 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) && 11116 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr)) 11117 return true; 11118 11119 return false; 11120 } 11121 11122 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, 11123 u16 rxq_index, u32 flow_id) 11124 { 11125 struct bnxt *bp = netdev_priv(dev); 11126 struct bnxt_ntuple_filter *fltr, *new_fltr; 11127 struct flow_keys *fkeys; 11128 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb); 11129 int rc = 0, idx, bit_id, l2_idx = 0; 11130 struct hlist_head *head; 11131 u32 flags; 11132 11133 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) { 11134 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 11135 int off = 0, j; 11136 11137 netif_addr_lock_bh(dev); 11138 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) { 11139 if (ether_addr_equal(eth->h_dest, 11140 vnic->uc_list + off)) { 11141 l2_idx = j + 1; 11142 break; 11143 } 11144 } 11145 netif_addr_unlock_bh(dev); 11146 if (!l2_idx) 11147 return -EINVAL; 11148 } 11149 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC); 11150 if (!new_fltr) 11151 return -ENOMEM; 11152 11153 fkeys = &new_fltr->fkeys; 11154 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) { 11155 rc = -EPROTONOSUPPORT; 11156 goto err_free; 11157 } 11158 11159 if ((fkeys->basic.n_proto != htons(ETH_P_IP) && 11160 fkeys->basic.n_proto != htons(ETH_P_IPV6)) || 11161 ((fkeys->basic.ip_proto != IPPROTO_TCP) && 11162 (fkeys->basic.ip_proto != IPPROTO_UDP))) { 11163 rc = -EPROTONOSUPPORT; 11164 goto err_free; 11165 } 11166 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) && 11167 bp->hwrm_spec_code < 0x10601) { 11168 rc = -EPROTONOSUPPORT; 11169 goto err_free; 11170 } 11171 flags = fkeys->control.flags; 11172 if (((flags & FLOW_DIS_ENCAPSULATION) && 11173 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) { 11174 rc = -EPROTONOSUPPORT; 11175 goto err_free; 11176 } 11177 11178 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN); 11179 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN); 11180 11181 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK; 11182 head = &bp->ntp_fltr_hash_tbl[idx]; 11183 rcu_read_lock(); 11184 hlist_for_each_entry_rcu(fltr, head, hash) { 11185 if (bnxt_fltr_match(fltr, new_fltr)) { 11186 rcu_read_unlock(); 11187 rc = 0; 11188 goto err_free; 11189 } 11190 } 11191 rcu_read_unlock(); 11192 11193 spin_lock_bh(&bp->ntp_fltr_lock); 11194 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, 11195 BNXT_NTP_FLTR_MAX_FLTR, 0); 11196 if (bit_id < 0) { 11197 spin_unlock_bh(&bp->ntp_fltr_lock); 11198 rc = -ENOMEM; 11199 goto err_free; 11200 } 11201 11202 new_fltr->sw_id = (u16)bit_id; 11203 new_fltr->flow_id = flow_id; 11204 new_fltr->l2_fltr_idx = l2_idx; 11205 new_fltr->rxq = rxq_index; 11206 hlist_add_head_rcu(&new_fltr->hash, head); 11207 bp->ntp_fltr_count++; 11208 spin_unlock_bh(&bp->ntp_fltr_lock); 11209 11210 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event); 11211 bnxt_queue_sp_work(bp); 11212 11213 return new_fltr->sw_id; 11214 11215 err_free: 11216 kfree(new_fltr); 11217 return rc; 11218 } 11219 11220 static void bnxt_cfg_ntp_filters(struct bnxt *bp) 11221 { 11222 int i; 11223 11224 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 11225 struct hlist_head *head; 11226 struct hlist_node *tmp; 11227 struct bnxt_ntuple_filter *fltr; 11228 int rc; 11229 11230 head = &bp->ntp_fltr_hash_tbl[i]; 11231 hlist_for_each_entry_safe(fltr, tmp, head, hash) { 11232 bool del = false; 11233 11234 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) { 11235 if (rps_may_expire_flow(bp->dev, fltr->rxq, 11236 fltr->flow_id, 11237 fltr->sw_id)) { 11238 bnxt_hwrm_cfa_ntuple_filter_free(bp, 11239 fltr); 11240 del = true; 11241 } 11242 } else { 11243 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, 11244 fltr); 11245 if (rc) 11246 del = true; 11247 else 11248 set_bit(BNXT_FLTR_VALID, &fltr->state); 11249 } 11250 11251 if (del) { 11252 spin_lock_bh(&bp->ntp_fltr_lock); 11253 hlist_del_rcu(&fltr->hash); 11254 bp->ntp_fltr_count--; 11255 spin_unlock_bh(&bp->ntp_fltr_lock); 11256 synchronize_rcu(); 11257 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap); 11258 kfree(fltr); 11259 } 11260 } 11261 } 11262 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event)) 11263 netdev_info(bp->dev, "Receive PF driver unload event!\n"); 11264 } 11265 11266 #else 11267 11268 static void bnxt_cfg_ntp_filters(struct bnxt *bp) 11269 { 11270 } 11271 11272 #endif /* CONFIG_RFS_ACCEL */ 11273 11274 static void bnxt_udp_tunnel_add(struct net_device *dev, 11275 struct udp_tunnel_info *ti) 11276 { 11277 struct bnxt *bp = netdev_priv(dev); 11278 11279 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET) 11280 return; 11281 11282 if (!netif_running(dev)) 11283 return; 11284 11285 switch (ti->type) { 11286 case UDP_TUNNEL_TYPE_VXLAN: 11287 if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port) 11288 return; 11289 11290 bp->vxlan_port_cnt++; 11291 if (bp->vxlan_port_cnt == 1) { 11292 bp->vxlan_port = ti->port; 11293 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event); 11294 bnxt_queue_sp_work(bp); 11295 } 11296 break; 11297 case UDP_TUNNEL_TYPE_GENEVE: 11298 if (bp->nge_port_cnt && bp->nge_port != ti->port) 11299 return; 11300 11301 bp->nge_port_cnt++; 11302 if (bp->nge_port_cnt == 1) { 11303 bp->nge_port = ti->port; 11304 set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event); 11305 } 11306 break; 11307 default: 11308 return; 11309 } 11310 11311 bnxt_queue_sp_work(bp); 11312 } 11313 11314 static void bnxt_udp_tunnel_del(struct net_device *dev, 11315 struct udp_tunnel_info *ti) 11316 { 11317 struct bnxt *bp = netdev_priv(dev); 11318 11319 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET) 11320 return; 11321 11322 if (!netif_running(dev)) 11323 return; 11324 11325 switch (ti->type) { 11326 case UDP_TUNNEL_TYPE_VXLAN: 11327 if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port) 11328 return; 11329 bp->vxlan_port_cnt--; 11330 11331 if (bp->vxlan_port_cnt != 0) 11332 return; 11333 11334 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event); 11335 break; 11336 case UDP_TUNNEL_TYPE_GENEVE: 11337 if (!bp->nge_port_cnt || bp->nge_port != ti->port) 11338 return; 11339 bp->nge_port_cnt--; 11340 11341 if (bp->nge_port_cnt != 0) 11342 return; 11343 11344 set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event); 11345 break; 11346 default: 11347 return; 11348 } 11349 11350 bnxt_queue_sp_work(bp); 11351 } 11352 11353 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 11354 struct net_device *dev, u32 filter_mask, 11355 int nlflags) 11356 { 11357 struct bnxt *bp = netdev_priv(dev); 11358 11359 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0, 11360 nlflags, filter_mask, NULL); 11361 } 11362 11363 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, 11364 u16 flags, struct netlink_ext_ack *extack) 11365 { 11366 struct bnxt *bp = netdev_priv(dev); 11367 struct nlattr *attr, *br_spec; 11368 int rem, rc = 0; 11369 11370 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp)) 11371 return -EOPNOTSUPP; 11372 11373 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 11374 if (!br_spec) 11375 return -EINVAL; 11376 11377 nla_for_each_nested(attr, br_spec, rem) { 11378 u16 mode; 11379 11380 if (nla_type(attr) != IFLA_BRIDGE_MODE) 11381 continue; 11382 11383 if (nla_len(attr) < sizeof(mode)) 11384 return -EINVAL; 11385 11386 mode = nla_get_u16(attr); 11387 if (mode == bp->br_mode) 11388 break; 11389 11390 rc = bnxt_hwrm_set_br_mode(bp, mode); 11391 if (!rc) 11392 bp->br_mode = mode; 11393 break; 11394 } 11395 return rc; 11396 } 11397 11398 int bnxt_get_port_parent_id(struct net_device *dev, 11399 struct netdev_phys_item_id *ppid) 11400 { 11401 struct bnxt *bp = netdev_priv(dev); 11402 11403 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) 11404 return -EOPNOTSUPP; 11405 11406 /* The PF and it's VF-reps only support the switchdev framework */ 11407 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID)) 11408 return -EOPNOTSUPP; 11409 11410 ppid->id_len = sizeof(bp->dsn); 11411 memcpy(ppid->id, bp->dsn, ppid->id_len); 11412 11413 return 0; 11414 } 11415 11416 static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev) 11417 { 11418 struct bnxt *bp = netdev_priv(dev); 11419 11420 return &bp->dl_port; 11421 } 11422 11423 static const struct net_device_ops bnxt_netdev_ops = { 11424 .ndo_open = bnxt_open, 11425 .ndo_start_xmit = bnxt_start_xmit, 11426 .ndo_stop = bnxt_close, 11427 .ndo_get_stats64 = bnxt_get_stats64, 11428 .ndo_set_rx_mode = bnxt_set_rx_mode, 11429 .ndo_do_ioctl = bnxt_ioctl, 11430 .ndo_validate_addr = eth_validate_addr, 11431 .ndo_set_mac_address = bnxt_change_mac_addr, 11432 .ndo_change_mtu = bnxt_change_mtu, 11433 .ndo_fix_features = bnxt_fix_features, 11434 .ndo_set_features = bnxt_set_features, 11435 .ndo_tx_timeout = bnxt_tx_timeout, 11436 #ifdef CONFIG_BNXT_SRIOV 11437 .ndo_get_vf_config = bnxt_get_vf_config, 11438 .ndo_set_vf_mac = bnxt_set_vf_mac, 11439 .ndo_set_vf_vlan = bnxt_set_vf_vlan, 11440 .ndo_set_vf_rate = bnxt_set_vf_bw, 11441 .ndo_set_vf_link_state = bnxt_set_vf_link_state, 11442 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk, 11443 .ndo_set_vf_trust = bnxt_set_vf_trust, 11444 #endif 11445 .ndo_setup_tc = bnxt_setup_tc, 11446 #ifdef CONFIG_RFS_ACCEL 11447 .ndo_rx_flow_steer = bnxt_rx_flow_steer, 11448 #endif 11449 .ndo_udp_tunnel_add = bnxt_udp_tunnel_add, 11450 .ndo_udp_tunnel_del = bnxt_udp_tunnel_del, 11451 .ndo_bpf = bnxt_xdp, 11452 .ndo_xdp_xmit = bnxt_xdp_xmit, 11453 .ndo_bridge_getlink = bnxt_bridge_getlink, 11454 .ndo_bridge_setlink = bnxt_bridge_setlink, 11455 .ndo_get_devlink_port = bnxt_get_devlink_port, 11456 }; 11457 11458 static void bnxt_remove_one(struct pci_dev *pdev) 11459 { 11460 struct net_device *dev = pci_get_drvdata(pdev); 11461 struct bnxt *bp = netdev_priv(dev); 11462 11463 if (BNXT_PF(bp)) 11464 bnxt_sriov_disable(bp); 11465 11466 bnxt_dl_fw_reporters_destroy(bp, true); 11467 pci_disable_pcie_error_reporting(pdev); 11468 unregister_netdev(dev); 11469 bnxt_dl_unregister(bp); 11470 bnxt_shutdown_tc(bp); 11471 bnxt_cancel_sp_work(bp); 11472 bp->sp_event = 0; 11473 11474 bnxt_clear_int_mode(bp); 11475 bnxt_hwrm_func_drv_unrgtr(bp); 11476 bnxt_free_hwrm_resources(bp); 11477 bnxt_free_hwrm_short_cmd_req(bp); 11478 bnxt_ethtool_free(bp); 11479 bnxt_dcb_free(bp); 11480 kfree(bp->edev); 11481 bp->edev = NULL; 11482 kfree(bp->fw_health); 11483 bp->fw_health = NULL; 11484 bnxt_cleanup_pci(bp); 11485 bnxt_free_ctx_mem(bp); 11486 kfree(bp->ctx); 11487 bp->ctx = NULL; 11488 bnxt_free_port_stats(bp); 11489 free_netdev(dev); 11490 } 11491 11492 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt) 11493 { 11494 int rc = 0; 11495 struct bnxt_link_info *link_info = &bp->link_info; 11496 11497 rc = bnxt_hwrm_phy_qcaps(bp); 11498 if (rc) { 11499 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n", 11500 rc); 11501 return rc; 11502 } 11503 if (!fw_dflt) 11504 return 0; 11505 11506 rc = bnxt_update_link(bp, false); 11507 if (rc) { 11508 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n", 11509 rc); 11510 return rc; 11511 } 11512 11513 /* Older firmware does not have supported_auto_speeds, so assume 11514 * that all supported speeds can be autonegotiated. 11515 */ 11516 if (link_info->auto_link_speeds && !link_info->support_auto_speeds) 11517 link_info->support_auto_speeds = link_info->support_speeds; 11518 11519 bnxt_init_ethtool_link_settings(bp); 11520 return 0; 11521 } 11522 11523 static int bnxt_get_max_irq(struct pci_dev *pdev) 11524 { 11525 u16 ctrl; 11526 11527 if (!pdev->msix_cap) 11528 return 1; 11529 11530 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl); 11531 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; 11532 } 11533 11534 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, 11535 int *max_cp) 11536 { 11537 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 11538 int max_ring_grps = 0, max_irq; 11539 11540 *max_tx = hw_resc->max_tx_rings; 11541 *max_rx = hw_resc->max_rx_rings; 11542 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp); 11543 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) - 11544 bnxt_get_ulp_msix_num(bp), 11545 hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp)); 11546 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 11547 *max_cp = min_t(int, *max_cp, max_irq); 11548 max_ring_grps = hw_resc->max_hw_ring_grps; 11549 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { 11550 *max_cp -= 1; 11551 *max_rx -= 2; 11552 } 11553 if (bp->flags & BNXT_FLAG_AGG_RINGS) 11554 *max_rx >>= 1; 11555 if (bp->flags & BNXT_FLAG_CHIP_P5) { 11556 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false); 11557 /* On P5 chips, max_cp output param should be available NQs */ 11558 *max_cp = max_irq; 11559 } 11560 *max_rx = min_t(int, *max_rx, max_ring_grps); 11561 } 11562 11563 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared) 11564 { 11565 int rx, tx, cp; 11566 11567 _bnxt_get_max_rings(bp, &rx, &tx, &cp); 11568 *max_rx = rx; 11569 *max_tx = tx; 11570 if (!rx || !tx || !cp) 11571 return -ENOMEM; 11572 11573 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared); 11574 } 11575 11576 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx, 11577 bool shared) 11578 { 11579 int rc; 11580 11581 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); 11582 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) { 11583 /* Not enough rings, try disabling agg rings. */ 11584 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 11585 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); 11586 if (rc) { 11587 /* set BNXT_FLAG_AGG_RINGS back for consistency */ 11588 bp->flags |= BNXT_FLAG_AGG_RINGS; 11589 return rc; 11590 } 11591 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 11592 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 11593 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 11594 bnxt_set_ring_params(bp); 11595 } 11596 11597 if (bp->flags & BNXT_FLAG_ROCE_CAP) { 11598 int max_cp, max_stat, max_irq; 11599 11600 /* Reserve minimum resources for RoCE */ 11601 max_cp = bnxt_get_max_func_cp_rings(bp); 11602 max_stat = bnxt_get_max_func_stat_ctxs(bp); 11603 max_irq = bnxt_get_max_func_irqs(bp); 11604 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS || 11605 max_irq <= BNXT_MIN_ROCE_CP_RINGS || 11606 max_stat <= BNXT_MIN_ROCE_STAT_CTXS) 11607 return 0; 11608 11609 max_cp -= BNXT_MIN_ROCE_CP_RINGS; 11610 max_irq -= BNXT_MIN_ROCE_CP_RINGS; 11611 max_stat -= BNXT_MIN_ROCE_STAT_CTXS; 11612 max_cp = min_t(int, max_cp, max_irq); 11613 max_cp = min_t(int, max_cp, max_stat); 11614 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared); 11615 if (rc) 11616 rc = 0; 11617 } 11618 return rc; 11619 } 11620 11621 /* In initial default shared ring setting, each shared ring must have a 11622 * RX/TX ring pair. 11623 */ 11624 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp) 11625 { 11626 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings); 11627 bp->rx_nr_rings = bp->cp_nr_rings; 11628 bp->tx_nr_rings_per_tc = bp->cp_nr_rings; 11629 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 11630 } 11631 11632 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) 11633 { 11634 int dflt_rings, max_rx_rings, max_tx_rings, rc; 11635 11636 if (!bnxt_can_reserve_rings(bp)) 11637 return 0; 11638 11639 if (sh) 11640 bp->flags |= BNXT_FLAG_SHARED_RINGS; 11641 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues(); 11642 /* Reduce default rings on multi-port cards so that total default 11643 * rings do not exceed CPU count. 11644 */ 11645 if (bp->port_count > 1) { 11646 int max_rings = 11647 max_t(int, num_online_cpus() / bp->port_count, 1); 11648 11649 dflt_rings = min_t(int, dflt_rings, max_rings); 11650 } 11651 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh); 11652 if (rc) 11653 return rc; 11654 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); 11655 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); 11656 if (sh) 11657 bnxt_trim_dflt_sh_rings(bp); 11658 else 11659 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings; 11660 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 11661 11662 rc = __bnxt_reserve_rings(bp); 11663 if (rc) 11664 netdev_warn(bp->dev, "Unable to reserve tx rings\n"); 11665 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 11666 if (sh) 11667 bnxt_trim_dflt_sh_rings(bp); 11668 11669 /* Rings may have been trimmed, re-reserve the trimmed rings. */ 11670 if (bnxt_need_reserve_rings(bp)) { 11671 rc = __bnxt_reserve_rings(bp); 11672 if (rc) 11673 netdev_warn(bp->dev, "2nd rings reservation failed.\n"); 11674 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 11675 } 11676 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 11677 bp->rx_nr_rings++; 11678 bp->cp_nr_rings++; 11679 } 11680 if (rc) { 11681 bp->tx_nr_rings = 0; 11682 bp->rx_nr_rings = 0; 11683 } 11684 return rc; 11685 } 11686 11687 static int bnxt_init_dflt_ring_mode(struct bnxt *bp) 11688 { 11689 int rc; 11690 11691 if (bp->tx_nr_rings) 11692 return 0; 11693 11694 bnxt_ulp_irq_stop(bp); 11695 bnxt_clear_int_mode(bp); 11696 rc = bnxt_set_dflt_rings(bp, true); 11697 if (rc) { 11698 netdev_err(bp->dev, "Not enough rings available.\n"); 11699 goto init_dflt_ring_err; 11700 } 11701 rc = bnxt_init_int_mode(bp); 11702 if (rc) 11703 goto init_dflt_ring_err; 11704 11705 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 11706 if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) { 11707 bp->flags |= BNXT_FLAG_RFS; 11708 bp->dev->features |= NETIF_F_NTUPLE; 11709 } 11710 init_dflt_ring_err: 11711 bnxt_ulp_irq_restart(bp, rc); 11712 return rc; 11713 } 11714 11715 int bnxt_restore_pf_fw_resources(struct bnxt *bp) 11716 { 11717 int rc; 11718 11719 ASSERT_RTNL(); 11720 bnxt_hwrm_func_qcaps(bp); 11721 11722 if (netif_running(bp->dev)) 11723 __bnxt_close_nic(bp, true, false); 11724 11725 bnxt_ulp_irq_stop(bp); 11726 bnxt_clear_int_mode(bp); 11727 rc = bnxt_init_int_mode(bp); 11728 bnxt_ulp_irq_restart(bp, rc); 11729 11730 if (netif_running(bp->dev)) { 11731 if (rc) 11732 dev_close(bp->dev); 11733 else 11734 rc = bnxt_open_nic(bp, true, false); 11735 } 11736 11737 return rc; 11738 } 11739 11740 static int bnxt_init_mac_addr(struct bnxt *bp) 11741 { 11742 int rc = 0; 11743 11744 if (BNXT_PF(bp)) { 11745 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN); 11746 } else { 11747 #ifdef CONFIG_BNXT_SRIOV 11748 struct bnxt_vf_info *vf = &bp->vf; 11749 bool strict_approval = true; 11750 11751 if (is_valid_ether_addr(vf->mac_addr)) { 11752 /* overwrite netdev dev_addr with admin VF MAC */ 11753 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); 11754 /* Older PF driver or firmware may not approve this 11755 * correctly. 11756 */ 11757 strict_approval = false; 11758 } else { 11759 eth_hw_addr_random(bp->dev); 11760 } 11761 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval); 11762 #endif 11763 } 11764 return rc; 11765 } 11766 11767 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[]) 11768 { 11769 struct pci_dev *pdev = bp->pdev; 11770 int pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN); 11771 u32 dw; 11772 11773 if (!pos) { 11774 netdev_info(bp->dev, "Unable do read adapter's DSN\n"); 11775 return -EOPNOTSUPP; 11776 } 11777 11778 /* DSN (two dw) is at an offset of 4 from the cap pos */ 11779 pos += 4; 11780 pci_read_config_dword(pdev, pos, &dw); 11781 put_unaligned_le32(dw, &dsn[0]); 11782 pci_read_config_dword(pdev, pos + 4, &dw); 11783 put_unaligned_le32(dw, &dsn[4]); 11784 bp->flags |= BNXT_FLAG_DSN_VALID; 11785 return 0; 11786 } 11787 11788 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 11789 { 11790 static int version_printed; 11791 struct net_device *dev; 11792 struct bnxt *bp; 11793 int rc, max_irqs; 11794 11795 if (pci_is_bridge(pdev)) 11796 return -ENODEV; 11797 11798 if (version_printed++ == 0) 11799 pr_info("%s", version); 11800 11801 /* Clear any pending DMA transactions from crash kernel 11802 * while loading driver in capture kernel. 11803 */ 11804 if (is_kdump_kernel()) { 11805 pci_clear_master(pdev); 11806 pcie_flr(pdev); 11807 } 11808 11809 max_irqs = bnxt_get_max_irq(pdev); 11810 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs); 11811 if (!dev) 11812 return -ENOMEM; 11813 11814 bp = netdev_priv(dev); 11815 bnxt_set_max_func_irqs(bp, max_irqs); 11816 11817 if (bnxt_vf_pciid(ent->driver_data)) 11818 bp->flags |= BNXT_FLAG_VF; 11819 11820 if (pdev->msix_cap) 11821 bp->flags |= BNXT_FLAG_MSIX_CAP; 11822 11823 rc = bnxt_init_board(pdev, dev); 11824 if (rc < 0) 11825 goto init_err_free; 11826 11827 dev->netdev_ops = &bnxt_netdev_ops; 11828 dev->watchdog_timeo = BNXT_TX_TIMEOUT; 11829 dev->ethtool_ops = &bnxt_ethtool_ops; 11830 pci_set_drvdata(pdev, dev); 11831 11832 rc = bnxt_alloc_hwrm_resources(bp); 11833 if (rc) 11834 goto init_err_pci_clean; 11835 11836 mutex_init(&bp->hwrm_cmd_lock); 11837 mutex_init(&bp->link_lock); 11838 11839 rc = bnxt_fw_init_one_p1(bp); 11840 if (rc) 11841 goto init_err_pci_clean; 11842 11843 if (BNXT_CHIP_P5(bp)) 11844 bp->flags |= BNXT_FLAG_CHIP_P5; 11845 11846 rc = bnxt_fw_init_one_p2(bp); 11847 if (rc) 11848 goto init_err_pci_clean; 11849 11850 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 11851 NETIF_F_TSO | NETIF_F_TSO6 | 11852 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | 11853 NETIF_F_GSO_IPXIP4 | 11854 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | 11855 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH | 11856 NETIF_F_RXCSUM | NETIF_F_GRO; 11857 11858 if (BNXT_SUPPORTS_TPA(bp)) 11859 dev->hw_features |= NETIF_F_LRO; 11860 11861 dev->hw_enc_features = 11862 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 11863 NETIF_F_TSO | NETIF_F_TSO6 | 11864 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | 11865 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | 11866 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL; 11867 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM | 11868 NETIF_F_GSO_GRE_CSUM; 11869 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA; 11870 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | 11871 NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX; 11872 if (BNXT_SUPPORTS_TPA(bp)) 11873 dev->hw_features |= NETIF_F_GRO_HW; 11874 dev->features |= dev->hw_features | NETIF_F_HIGHDMA; 11875 if (dev->features & NETIF_F_GRO_HW) 11876 dev->features &= ~NETIF_F_LRO; 11877 dev->priv_flags |= IFF_UNICAST_FLT; 11878 11879 #ifdef CONFIG_BNXT_SRIOV 11880 init_waitqueue_head(&bp->sriov_cfg_wait); 11881 mutex_init(&bp->sriov_lock); 11882 #endif 11883 if (BNXT_SUPPORTS_TPA(bp)) { 11884 bp->gro_func = bnxt_gro_func_5730x; 11885 if (BNXT_CHIP_P4(bp)) 11886 bp->gro_func = bnxt_gro_func_5731x; 11887 else if (BNXT_CHIP_P5(bp)) 11888 bp->gro_func = bnxt_gro_func_5750x; 11889 } 11890 if (!BNXT_CHIP_P4_PLUS(bp)) 11891 bp->flags |= BNXT_FLAG_DOUBLE_DB; 11892 11893 bp->ulp_probe = bnxt_ulp_probe; 11894 11895 rc = bnxt_init_mac_addr(bp); 11896 if (rc) { 11897 dev_err(&pdev->dev, "Unable to initialize mac address.\n"); 11898 rc = -EADDRNOTAVAIL; 11899 goto init_err_pci_clean; 11900 } 11901 11902 if (BNXT_PF(bp)) { 11903 /* Read the adapter's DSN to use as the eswitch switch_id */ 11904 rc = bnxt_pcie_dsn_get(bp, bp->dsn); 11905 } 11906 11907 /* MTU range: 60 - FW defined max */ 11908 dev->min_mtu = ETH_ZLEN; 11909 dev->max_mtu = bp->max_mtu; 11910 11911 rc = bnxt_probe_phy(bp, true); 11912 if (rc) 11913 goto init_err_pci_clean; 11914 11915 bnxt_set_rx_skb_mode(bp, false); 11916 bnxt_set_tpa_flags(bp); 11917 bnxt_set_ring_params(bp); 11918 rc = bnxt_set_dflt_rings(bp, true); 11919 if (rc) { 11920 netdev_err(bp->dev, "Not enough rings available.\n"); 11921 rc = -ENOMEM; 11922 goto init_err_pci_clean; 11923 } 11924 11925 bnxt_fw_init_one_p3(bp); 11926 11927 if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX) 11928 bp->flags |= BNXT_FLAG_STRIP_VLAN; 11929 11930 rc = bnxt_init_int_mode(bp); 11931 if (rc) 11932 goto init_err_pci_clean; 11933 11934 /* No TC has been set yet and rings may have been trimmed due to 11935 * limited MSIX, so we re-initialize the TX rings per TC. 11936 */ 11937 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 11938 11939 if (BNXT_PF(bp)) { 11940 if (!bnxt_pf_wq) { 11941 bnxt_pf_wq = 11942 create_singlethread_workqueue("bnxt_pf_wq"); 11943 if (!bnxt_pf_wq) { 11944 dev_err(&pdev->dev, "Unable to create workqueue.\n"); 11945 goto init_err_pci_clean; 11946 } 11947 } 11948 bnxt_init_tc(bp); 11949 } 11950 11951 bnxt_dl_register(bp); 11952 11953 rc = register_netdev(dev); 11954 if (rc) 11955 goto init_err_cleanup; 11956 11957 if (BNXT_PF(bp)) 11958 devlink_port_type_eth_set(&bp->dl_port, bp->dev); 11959 bnxt_dl_fw_reporters_create(bp); 11960 11961 netdev_info(dev, "%s found at mem %lx, node addr %pM\n", 11962 board_info[ent->driver_data].name, 11963 (long)pci_resource_start(pdev, 0), dev->dev_addr); 11964 pcie_print_link_status(pdev); 11965 11966 return 0; 11967 11968 init_err_cleanup: 11969 bnxt_dl_unregister(bp); 11970 bnxt_shutdown_tc(bp); 11971 bnxt_clear_int_mode(bp); 11972 11973 init_err_pci_clean: 11974 bnxt_hwrm_func_drv_unrgtr(bp); 11975 bnxt_free_hwrm_short_cmd_req(bp); 11976 bnxt_free_hwrm_resources(bp); 11977 kfree(bp->fw_health); 11978 bp->fw_health = NULL; 11979 bnxt_cleanup_pci(bp); 11980 bnxt_free_ctx_mem(bp); 11981 kfree(bp->ctx); 11982 bp->ctx = NULL; 11983 11984 init_err_free: 11985 free_netdev(dev); 11986 return rc; 11987 } 11988 11989 static void bnxt_shutdown(struct pci_dev *pdev) 11990 { 11991 struct net_device *dev = pci_get_drvdata(pdev); 11992 struct bnxt *bp; 11993 11994 if (!dev) 11995 return; 11996 11997 rtnl_lock(); 11998 bp = netdev_priv(dev); 11999 if (!bp) 12000 goto shutdown_exit; 12001 12002 if (netif_running(dev)) 12003 dev_close(dev); 12004 12005 bnxt_ulp_shutdown(bp); 12006 bnxt_clear_int_mode(bp); 12007 pci_disable_device(pdev); 12008 12009 if (system_state == SYSTEM_POWER_OFF) { 12010 pci_wake_from_d3(pdev, bp->wol); 12011 pci_set_power_state(pdev, PCI_D3hot); 12012 } 12013 12014 shutdown_exit: 12015 rtnl_unlock(); 12016 } 12017 12018 #ifdef CONFIG_PM_SLEEP 12019 static int bnxt_suspend(struct device *device) 12020 { 12021 struct net_device *dev = dev_get_drvdata(device); 12022 struct bnxt *bp = netdev_priv(dev); 12023 int rc = 0; 12024 12025 rtnl_lock(); 12026 bnxt_ulp_stop(bp); 12027 if (netif_running(dev)) { 12028 netif_device_detach(dev); 12029 rc = bnxt_close(dev); 12030 } 12031 bnxt_hwrm_func_drv_unrgtr(bp); 12032 pci_disable_device(bp->pdev); 12033 bnxt_free_ctx_mem(bp); 12034 kfree(bp->ctx); 12035 bp->ctx = NULL; 12036 rtnl_unlock(); 12037 return rc; 12038 } 12039 12040 static int bnxt_resume(struct device *device) 12041 { 12042 struct net_device *dev = dev_get_drvdata(device); 12043 struct bnxt *bp = netdev_priv(dev); 12044 int rc = 0; 12045 12046 rtnl_lock(); 12047 rc = pci_enable_device(bp->pdev); 12048 if (rc) { 12049 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n", 12050 rc); 12051 goto resume_exit; 12052 } 12053 pci_set_master(bp->pdev); 12054 if (bnxt_hwrm_ver_get(bp)) { 12055 rc = -ENODEV; 12056 goto resume_exit; 12057 } 12058 rc = bnxt_hwrm_func_reset(bp); 12059 if (rc) { 12060 rc = -EBUSY; 12061 goto resume_exit; 12062 } 12063 12064 if (bnxt_hwrm_queue_qportcfg(bp)) { 12065 rc = -ENODEV; 12066 goto resume_exit; 12067 } 12068 12069 if (bp->hwrm_spec_code >= 0x10803) { 12070 if (bnxt_alloc_ctx_mem(bp)) { 12071 rc = -ENODEV; 12072 goto resume_exit; 12073 } 12074 } 12075 if (BNXT_NEW_RM(bp)) 12076 bnxt_hwrm_func_resc_qcaps(bp, false); 12077 12078 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) { 12079 rc = -ENODEV; 12080 goto resume_exit; 12081 } 12082 12083 bnxt_get_wol_settings(bp); 12084 if (netif_running(dev)) { 12085 rc = bnxt_open(dev); 12086 if (!rc) 12087 netif_device_attach(dev); 12088 } 12089 12090 resume_exit: 12091 bnxt_ulp_start(bp, rc); 12092 rtnl_unlock(); 12093 return rc; 12094 } 12095 12096 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume); 12097 #define BNXT_PM_OPS (&bnxt_pm_ops) 12098 12099 #else 12100 12101 #define BNXT_PM_OPS NULL 12102 12103 #endif /* CONFIG_PM_SLEEP */ 12104 12105 /** 12106 * bnxt_io_error_detected - called when PCI error is detected 12107 * @pdev: Pointer to PCI device 12108 * @state: The current pci connection state 12109 * 12110 * This function is called after a PCI bus error affecting 12111 * this device has been detected. 12112 */ 12113 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, 12114 pci_channel_state_t state) 12115 { 12116 struct net_device *netdev = pci_get_drvdata(pdev); 12117 struct bnxt *bp = netdev_priv(netdev); 12118 12119 netdev_info(netdev, "PCI I/O error detected\n"); 12120 12121 rtnl_lock(); 12122 netif_device_detach(netdev); 12123 12124 bnxt_ulp_stop(bp); 12125 12126 if (state == pci_channel_io_perm_failure) { 12127 rtnl_unlock(); 12128 return PCI_ERS_RESULT_DISCONNECT; 12129 } 12130 12131 if (netif_running(netdev)) 12132 bnxt_close(netdev); 12133 12134 pci_disable_device(pdev); 12135 rtnl_unlock(); 12136 12137 /* Request a slot slot reset. */ 12138 return PCI_ERS_RESULT_NEED_RESET; 12139 } 12140 12141 /** 12142 * bnxt_io_slot_reset - called after the pci bus has been reset. 12143 * @pdev: Pointer to PCI device 12144 * 12145 * Restart the card from scratch, as if from a cold-boot. 12146 * At this point, the card has exprienced a hard reset, 12147 * followed by fixups by BIOS, and has its config space 12148 * set up identically to what it was at cold boot. 12149 */ 12150 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) 12151 { 12152 struct net_device *netdev = pci_get_drvdata(pdev); 12153 struct bnxt *bp = netdev_priv(netdev); 12154 int err = 0; 12155 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT; 12156 12157 netdev_info(bp->dev, "PCI Slot Reset\n"); 12158 12159 rtnl_lock(); 12160 12161 if (pci_enable_device(pdev)) { 12162 dev_err(&pdev->dev, 12163 "Cannot re-enable PCI device after reset.\n"); 12164 } else { 12165 pci_set_master(pdev); 12166 12167 err = bnxt_hwrm_func_reset(bp); 12168 if (!err && netif_running(netdev)) 12169 err = bnxt_open(netdev); 12170 12171 if (!err) 12172 result = PCI_ERS_RESULT_RECOVERED; 12173 bnxt_ulp_start(bp, err); 12174 } 12175 12176 if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) 12177 dev_close(netdev); 12178 12179 rtnl_unlock(); 12180 12181 return PCI_ERS_RESULT_RECOVERED; 12182 } 12183 12184 /** 12185 * bnxt_io_resume - called when traffic can start flowing again. 12186 * @pdev: Pointer to PCI device 12187 * 12188 * This callback is called when the error recovery driver tells 12189 * us that its OK to resume normal operation. 12190 */ 12191 static void bnxt_io_resume(struct pci_dev *pdev) 12192 { 12193 struct net_device *netdev = pci_get_drvdata(pdev); 12194 12195 rtnl_lock(); 12196 12197 netif_device_attach(netdev); 12198 12199 rtnl_unlock(); 12200 } 12201 12202 static const struct pci_error_handlers bnxt_err_handler = { 12203 .error_detected = bnxt_io_error_detected, 12204 .slot_reset = bnxt_io_slot_reset, 12205 .resume = bnxt_io_resume 12206 }; 12207 12208 static struct pci_driver bnxt_pci_driver = { 12209 .name = DRV_MODULE_NAME, 12210 .id_table = bnxt_pci_tbl, 12211 .probe = bnxt_init_one, 12212 .remove = bnxt_remove_one, 12213 .shutdown = bnxt_shutdown, 12214 .driver.pm = BNXT_PM_OPS, 12215 .err_handler = &bnxt_err_handler, 12216 #if defined(CONFIG_BNXT_SRIOV) 12217 .sriov_configure = bnxt_sriov_configure, 12218 #endif 12219 }; 12220 12221 static int __init bnxt_init(void) 12222 { 12223 bnxt_debug_init(); 12224 return pci_register_driver(&bnxt_pci_driver); 12225 } 12226 12227 static void __exit bnxt_exit(void) 12228 { 12229 pci_unregister_driver(&bnxt_pci_driver); 12230 if (bnxt_pf_wq) 12231 destroy_workqueue(bnxt_pf_wq); 12232 bnxt_debug_exit(); 12233 } 12234 12235 module_init(bnxt_init); 12236 module_exit(bnxt_exit); 12237