1 /* Broadcom NetXtreme-C/E network driver. 2 * 3 * Copyright (c) 2014-2016 Broadcom Corporation 4 * Copyright (c) 2016-2018 Broadcom Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 */ 10 11 #include <linux/module.h> 12 13 #include <linux/stringify.h> 14 #include <linux/kernel.h> 15 #include <linux/timer.h> 16 #include <linux/errno.h> 17 #include <linux/ioport.h> 18 #include <linux/slab.h> 19 #include <linux/vmalloc.h> 20 #include <linux/interrupt.h> 21 #include <linux/pci.h> 22 #include <linux/netdevice.h> 23 #include <linux/etherdevice.h> 24 #include <linux/skbuff.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/bitops.h> 27 #include <linux/io.h> 28 #include <linux/irq.h> 29 #include <linux/delay.h> 30 #include <asm/byteorder.h> 31 #include <asm/page.h> 32 #include <linux/time.h> 33 #include <linux/mii.h> 34 #include <linux/if.h> 35 #include <linux/if_vlan.h> 36 #include <linux/if_bridge.h> 37 #include <linux/rtc.h> 38 #include <linux/bpf.h> 39 #include <net/ip.h> 40 #include <net/tcp.h> 41 #include <net/udp.h> 42 #include <net/checksum.h> 43 #include <net/ip6_checksum.h> 44 #include <net/udp_tunnel.h> 45 #include <linux/workqueue.h> 46 #include <linux/prefetch.h> 47 #include <linux/cache.h> 48 #include <linux/log2.h> 49 #include <linux/aer.h> 50 #include <linux/bitmap.h> 51 #include <linux/cpu_rmap.h> 52 #include <linux/cpumask.h> 53 #include <net/pkt_cls.h> 54 #include <linux/hwmon.h> 55 #include <linux/hwmon-sysfs.h> 56 57 #include "bnxt_hsi.h" 58 #include "bnxt.h" 59 #include "bnxt_ulp.h" 60 #include "bnxt_sriov.h" 61 #include "bnxt_ethtool.h" 62 #include "bnxt_dcb.h" 63 #include "bnxt_xdp.h" 64 #include "bnxt_vfr.h" 65 #include "bnxt_tc.h" 66 #include "bnxt_devlink.h" 67 #include "bnxt_debugfs.h" 68 69 #define BNXT_TX_TIMEOUT (5 * HZ) 70 71 static const char version[] = 72 "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n"; 73 74 MODULE_LICENSE("GPL"); 75 MODULE_DESCRIPTION("Broadcom BCM573xx network driver"); 76 MODULE_VERSION(DRV_MODULE_VERSION); 77 78 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN) 79 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD 80 #define BNXT_RX_COPY_THRESH 256 81 82 #define BNXT_TX_PUSH_THRESH 164 83 84 enum board_idx { 85 BCM57301, 86 BCM57302, 87 BCM57304, 88 BCM57417_NPAR, 89 BCM58700, 90 BCM57311, 91 BCM57312, 92 BCM57402, 93 BCM57404, 94 BCM57406, 95 BCM57402_NPAR, 96 BCM57407, 97 BCM57412, 98 BCM57414, 99 BCM57416, 100 BCM57417, 101 BCM57412_NPAR, 102 BCM57314, 103 BCM57417_SFP, 104 BCM57416_SFP, 105 BCM57404_NPAR, 106 BCM57406_NPAR, 107 BCM57407_SFP, 108 BCM57407_NPAR, 109 BCM57414_NPAR, 110 BCM57416_NPAR, 111 BCM57452, 112 BCM57454, 113 BCM5745x_NPAR, 114 BCM57508, 115 BCM58802, 116 BCM58804, 117 BCM58808, 118 NETXTREME_E_VF, 119 NETXTREME_C_VF, 120 NETXTREME_S_VF, 121 }; 122 123 /* indexed by enum above */ 124 static const struct { 125 char *name; 126 } board_info[] = { 127 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" }, 128 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" }, 129 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, 130 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" }, 131 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" }, 132 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" }, 133 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" }, 134 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" }, 135 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" }, 136 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" }, 137 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" }, 138 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" }, 139 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" }, 140 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" }, 141 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" }, 142 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" }, 143 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" }, 144 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, 145 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" }, 146 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" }, 147 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" }, 148 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" }, 149 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" }, 150 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" }, 151 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" }, 152 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" }, 153 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" }, 154 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 155 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" }, 156 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, 157 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" }, 158 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 159 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 160 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" }, 161 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" }, 162 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" }, 163 }; 164 165 static const struct pci_device_id bnxt_pci_tbl[] = { 166 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR }, 167 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR }, 168 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 }, 169 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR }, 170 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 }, 171 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 }, 172 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 }, 173 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR }, 174 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 }, 175 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 }, 176 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 }, 177 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 }, 178 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 }, 179 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 }, 180 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR }, 181 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 }, 182 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 }, 183 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 }, 184 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 }, 185 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 }, 186 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR }, 187 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 }, 188 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP }, 189 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP }, 190 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR }, 191 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR }, 192 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP }, 193 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR }, 194 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR }, 195 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR }, 196 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR }, 197 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR }, 198 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR }, 199 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 }, 200 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 }, 201 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 }, 202 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 }, 203 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 }, 204 #ifdef CONFIG_BNXT_SRIOV 205 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF }, 206 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF }, 207 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF }, 208 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF }, 209 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF }, 210 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF }, 211 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF }, 212 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF }, 213 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF }, 214 #endif 215 { 0 } 216 }; 217 218 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl); 219 220 static const u16 bnxt_vf_req_snif[] = { 221 HWRM_FUNC_CFG, 222 HWRM_FUNC_VF_CFG, 223 HWRM_PORT_PHY_QCFG, 224 HWRM_CFA_L2_FILTER_ALLOC, 225 }; 226 227 static const u16 bnxt_async_events_arr[] = { 228 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE, 229 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD, 230 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED, 231 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE, 232 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, 233 }; 234 235 static struct workqueue_struct *bnxt_pf_wq; 236 237 static bool bnxt_vf_pciid(enum board_idx idx) 238 { 239 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF || 240 idx == NETXTREME_S_VF); 241 } 242 243 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) 244 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS) 245 #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS) 246 247 #define BNXT_CP_DB_IRQ_DIS(db) \ 248 writel(DB_CP_IRQ_DIS_FLAGS, db) 249 250 #define BNXT_DB_CQ(db, idx) \ 251 writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell) 252 253 #define BNXT_DB_NQ_P5(db, idx) \ 254 writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell) 255 256 #define BNXT_DB_CQ_ARM(db, idx) \ 257 writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell) 258 259 #define BNXT_DB_NQ_ARM_P5(db, idx) \ 260 writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell) 261 262 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 263 { 264 if (bp->flags & BNXT_FLAG_CHIP_P5) 265 BNXT_DB_NQ_P5(db, idx); 266 else 267 BNXT_DB_CQ(db, idx); 268 } 269 270 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 271 { 272 if (bp->flags & BNXT_FLAG_CHIP_P5) 273 BNXT_DB_NQ_ARM_P5(db, idx); 274 else 275 BNXT_DB_CQ_ARM(db, idx); 276 } 277 278 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 279 { 280 if (bp->flags & BNXT_FLAG_CHIP_P5) 281 writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx), 282 db->doorbell); 283 else 284 BNXT_DB_CQ(db, idx); 285 } 286 287 const u16 bnxt_lhint_arr[] = { 288 TX_BD_FLAGS_LHINT_512_AND_SMALLER, 289 TX_BD_FLAGS_LHINT_512_TO_1023, 290 TX_BD_FLAGS_LHINT_1024_TO_2047, 291 TX_BD_FLAGS_LHINT_1024_TO_2047, 292 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 293 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 294 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 295 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 296 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 297 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 298 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 299 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 300 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 301 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 302 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 303 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 304 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 305 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 306 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 307 }; 308 309 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb) 310 { 311 struct metadata_dst *md_dst = skb_metadata_dst(skb); 312 313 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX) 314 return 0; 315 316 return md_dst->u.port_info.port_id; 317 } 318 319 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) 320 { 321 struct bnxt *bp = netdev_priv(dev); 322 struct tx_bd *txbd; 323 struct tx_bd_ext *txbd1; 324 struct netdev_queue *txq; 325 int i; 326 dma_addr_t mapping; 327 unsigned int length, pad = 0; 328 u32 len, free_size, vlan_tag_flags, cfa_action, flags; 329 u16 prod, last_frag; 330 struct pci_dev *pdev = bp->pdev; 331 struct bnxt_tx_ring_info *txr; 332 struct bnxt_sw_tx_bd *tx_buf; 333 334 i = skb_get_queue_mapping(skb); 335 if (unlikely(i >= bp->tx_nr_rings)) { 336 dev_kfree_skb_any(skb); 337 return NETDEV_TX_OK; 338 } 339 340 txq = netdev_get_tx_queue(dev, i); 341 txr = &bp->tx_ring[bp->tx_ring_map[i]]; 342 prod = txr->tx_prod; 343 344 free_size = bnxt_tx_avail(bp, txr); 345 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) { 346 netif_tx_stop_queue(txq); 347 return NETDEV_TX_BUSY; 348 } 349 350 length = skb->len; 351 len = skb_headlen(skb); 352 last_frag = skb_shinfo(skb)->nr_frags; 353 354 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 355 356 txbd->tx_bd_opaque = prod; 357 358 tx_buf = &txr->tx_buf_ring[prod]; 359 tx_buf->skb = skb; 360 tx_buf->nr_frags = last_frag; 361 362 vlan_tag_flags = 0; 363 cfa_action = bnxt_xmit_get_cfa_action(skb); 364 if (skb_vlan_tag_present(skb)) { 365 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN | 366 skb_vlan_tag_get(skb); 367 /* Currently supports 8021Q, 8021AD vlan offloads 368 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated 369 */ 370 if (skb->vlan_proto == htons(ETH_P_8021Q)) 371 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT; 372 } 373 374 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) { 375 struct tx_push_buffer *tx_push_buf = txr->tx_push; 376 struct tx_push_bd *tx_push = &tx_push_buf->push_bd; 377 struct tx_bd_ext *tx_push1 = &tx_push->txbd2; 378 void __iomem *db = txr->tx_db.doorbell; 379 void *pdata = tx_push_buf->data; 380 u64 *end; 381 int j, push_len; 382 383 /* Set COAL_NOW to be ready quickly for the next push */ 384 tx_push->tx_bd_len_flags_type = 385 cpu_to_le32((length << TX_BD_LEN_SHIFT) | 386 TX_BD_TYPE_LONG_TX_BD | 387 TX_BD_FLAGS_LHINT_512_AND_SMALLER | 388 TX_BD_FLAGS_COAL_NOW | 389 TX_BD_FLAGS_PACKET_END | 390 (2 << TX_BD_FLAGS_BD_CNT_SHIFT)); 391 392 if (skb->ip_summed == CHECKSUM_PARTIAL) 393 tx_push1->tx_bd_hsize_lflags = 394 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); 395 else 396 tx_push1->tx_bd_hsize_lflags = 0; 397 398 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 399 tx_push1->tx_bd_cfa_action = 400 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); 401 402 end = pdata + length; 403 end = PTR_ALIGN(end, 8) - 1; 404 *end = 0; 405 406 skb_copy_from_linear_data(skb, pdata, len); 407 pdata += len; 408 for (j = 0; j < last_frag; j++) { 409 skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; 410 void *fptr; 411 412 fptr = skb_frag_address_safe(frag); 413 if (!fptr) 414 goto normal_tx; 415 416 memcpy(pdata, fptr, skb_frag_size(frag)); 417 pdata += skb_frag_size(frag); 418 } 419 420 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type; 421 txbd->tx_bd_haddr = txr->data_mapping; 422 prod = NEXT_TX(prod); 423 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 424 memcpy(txbd, tx_push1, sizeof(*txbd)); 425 prod = NEXT_TX(prod); 426 tx_push->doorbell = 427 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod); 428 txr->tx_prod = prod; 429 430 tx_buf->is_push = 1; 431 netdev_tx_sent_queue(txq, skb->len); 432 wmb(); /* Sync is_push and byte queue before pushing data */ 433 434 push_len = (length + sizeof(*tx_push) + 7) / 8; 435 if (push_len > 16) { 436 __iowrite64_copy(db, tx_push_buf, 16); 437 __iowrite32_copy(db + 4, tx_push_buf + 1, 438 (push_len - 16) << 1); 439 } else { 440 __iowrite64_copy(db, tx_push_buf, push_len); 441 } 442 443 goto tx_done; 444 } 445 446 normal_tx: 447 if (length < BNXT_MIN_PKT_SIZE) { 448 pad = BNXT_MIN_PKT_SIZE - length; 449 if (skb_pad(skb, pad)) { 450 /* SKB already freed. */ 451 tx_buf->skb = NULL; 452 return NETDEV_TX_OK; 453 } 454 length = BNXT_MIN_PKT_SIZE; 455 } 456 457 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE); 458 459 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) { 460 dev_kfree_skb_any(skb); 461 tx_buf->skb = NULL; 462 return NETDEV_TX_OK; 463 } 464 465 dma_unmap_addr_set(tx_buf, mapping, mapping); 466 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD | 467 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT); 468 469 txbd->tx_bd_haddr = cpu_to_le64(mapping); 470 471 prod = NEXT_TX(prod); 472 txbd1 = (struct tx_bd_ext *) 473 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 474 475 txbd1->tx_bd_hsize_lflags = 0; 476 if (skb_is_gso(skb)) { 477 u32 hdr_len; 478 479 if (skb->encapsulation) 480 hdr_len = skb_inner_network_offset(skb) + 481 skb_inner_network_header_len(skb) + 482 inner_tcp_hdrlen(skb); 483 else 484 hdr_len = skb_transport_offset(skb) + 485 tcp_hdrlen(skb); 486 487 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO | 488 TX_BD_FLAGS_T_IPID | 489 (hdr_len << (TX_BD_HSIZE_SHIFT - 1))); 490 length = skb_shinfo(skb)->gso_size; 491 txbd1->tx_bd_mss = cpu_to_le32(length); 492 length += hdr_len; 493 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 494 txbd1->tx_bd_hsize_lflags = 495 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); 496 txbd1->tx_bd_mss = 0; 497 } 498 499 length >>= 9; 500 flags |= bnxt_lhint_arr[length]; 501 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 502 503 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 504 txbd1->tx_bd_cfa_action = 505 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); 506 for (i = 0; i < last_frag; i++) { 507 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 508 509 prod = NEXT_TX(prod); 510 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 511 512 len = skb_frag_size(frag); 513 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len, 514 DMA_TO_DEVICE); 515 516 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) 517 goto tx_dma_error; 518 519 tx_buf = &txr->tx_buf_ring[prod]; 520 dma_unmap_addr_set(tx_buf, mapping, mapping); 521 522 txbd->tx_bd_haddr = cpu_to_le64(mapping); 523 524 flags = len << TX_BD_LEN_SHIFT; 525 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 526 } 527 528 flags &= ~TX_BD_LEN; 529 txbd->tx_bd_len_flags_type = 530 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags | 531 TX_BD_FLAGS_PACKET_END); 532 533 netdev_tx_sent_queue(txq, skb->len); 534 535 /* Sync BD data before updating doorbell */ 536 wmb(); 537 538 prod = NEXT_TX(prod); 539 txr->tx_prod = prod; 540 541 if (!skb->xmit_more || netif_xmit_stopped(txq)) 542 bnxt_db_write(bp, &txr->tx_db, prod); 543 544 tx_done: 545 546 mmiowb(); 547 548 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) { 549 if (skb->xmit_more && !tx_buf->is_push) 550 bnxt_db_write(bp, &txr->tx_db, prod); 551 552 netif_tx_stop_queue(txq); 553 554 /* netif_tx_stop_queue() must be done before checking 555 * tx index in bnxt_tx_avail() below, because in 556 * bnxt_tx_int(), we update tx index before checking for 557 * netif_tx_queue_stopped(). 558 */ 559 smp_mb(); 560 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) 561 netif_tx_wake_queue(txq); 562 } 563 return NETDEV_TX_OK; 564 565 tx_dma_error: 566 last_frag = i; 567 568 /* start back at beginning and unmap skb */ 569 prod = txr->tx_prod; 570 tx_buf = &txr->tx_buf_ring[prod]; 571 tx_buf->skb = NULL; 572 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 573 skb_headlen(skb), PCI_DMA_TODEVICE); 574 prod = NEXT_TX(prod); 575 576 /* unmap remaining mapped pages */ 577 for (i = 0; i < last_frag; i++) { 578 prod = NEXT_TX(prod); 579 tx_buf = &txr->tx_buf_ring[prod]; 580 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 581 skb_frag_size(&skb_shinfo(skb)->frags[i]), 582 PCI_DMA_TODEVICE); 583 } 584 585 dev_kfree_skb_any(skb); 586 return NETDEV_TX_OK; 587 } 588 589 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) 590 { 591 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; 592 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index); 593 u16 cons = txr->tx_cons; 594 struct pci_dev *pdev = bp->pdev; 595 int i; 596 unsigned int tx_bytes = 0; 597 598 for (i = 0; i < nr_pkts; i++) { 599 struct bnxt_sw_tx_bd *tx_buf; 600 struct sk_buff *skb; 601 int j, last; 602 603 tx_buf = &txr->tx_buf_ring[cons]; 604 cons = NEXT_TX(cons); 605 skb = tx_buf->skb; 606 tx_buf->skb = NULL; 607 608 if (tx_buf->is_push) { 609 tx_buf->is_push = 0; 610 goto next_tx_int; 611 } 612 613 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 614 skb_headlen(skb), PCI_DMA_TODEVICE); 615 last = tx_buf->nr_frags; 616 617 for (j = 0; j < last; j++) { 618 cons = NEXT_TX(cons); 619 tx_buf = &txr->tx_buf_ring[cons]; 620 dma_unmap_page( 621 &pdev->dev, 622 dma_unmap_addr(tx_buf, mapping), 623 skb_frag_size(&skb_shinfo(skb)->frags[j]), 624 PCI_DMA_TODEVICE); 625 } 626 627 next_tx_int: 628 cons = NEXT_TX(cons); 629 630 tx_bytes += skb->len; 631 dev_kfree_skb_any(skb); 632 } 633 634 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes); 635 txr->tx_cons = cons; 636 637 /* Need to make the tx_cons update visible to bnxt_start_xmit() 638 * before checking for netif_tx_queue_stopped(). Without the 639 * memory barrier, there is a small possibility that bnxt_start_xmit() 640 * will miss it and cause the queue to be stopped forever. 641 */ 642 smp_mb(); 643 644 if (unlikely(netif_tx_queue_stopped(txq)) && 645 (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) { 646 __netif_tx_lock(txq, smp_processor_id()); 647 if (netif_tx_queue_stopped(txq) && 648 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh && 649 txr->dev_state != BNXT_DEV_STATE_CLOSING) 650 netif_tx_wake_queue(txq); 651 __netif_tx_unlock(txq); 652 } 653 } 654 655 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, 656 gfp_t gfp) 657 { 658 struct device *dev = &bp->pdev->dev; 659 struct page *page; 660 661 page = alloc_page(gfp); 662 if (!page) 663 return NULL; 664 665 *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir, 666 DMA_ATTR_WEAK_ORDERING); 667 if (dma_mapping_error(dev, *mapping)) { 668 __free_page(page); 669 return NULL; 670 } 671 *mapping += bp->rx_dma_offset; 672 return page; 673 } 674 675 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping, 676 gfp_t gfp) 677 { 678 u8 *data; 679 struct pci_dev *pdev = bp->pdev; 680 681 data = kmalloc(bp->rx_buf_size, gfp); 682 if (!data) 683 return NULL; 684 685 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset, 686 bp->rx_buf_use_size, bp->rx_dir, 687 DMA_ATTR_WEAK_ORDERING); 688 689 if (dma_mapping_error(&pdev->dev, *mapping)) { 690 kfree(data); 691 data = NULL; 692 } 693 return data; 694 } 695 696 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 697 u16 prod, gfp_t gfp) 698 { 699 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 700 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod]; 701 dma_addr_t mapping; 702 703 if (BNXT_RX_PAGE_MODE(bp)) { 704 struct page *page = __bnxt_alloc_rx_page(bp, &mapping, gfp); 705 706 if (!page) 707 return -ENOMEM; 708 709 rx_buf->data = page; 710 rx_buf->data_ptr = page_address(page) + bp->rx_offset; 711 } else { 712 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp); 713 714 if (!data) 715 return -ENOMEM; 716 717 rx_buf->data = data; 718 rx_buf->data_ptr = data + bp->rx_offset; 719 } 720 rx_buf->mapping = mapping; 721 722 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 723 return 0; 724 } 725 726 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data) 727 { 728 u16 prod = rxr->rx_prod; 729 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 730 struct rx_bd *cons_bd, *prod_bd; 731 732 prod_rx_buf = &rxr->rx_buf_ring[prod]; 733 cons_rx_buf = &rxr->rx_buf_ring[cons]; 734 735 prod_rx_buf->data = data; 736 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr; 737 738 prod_rx_buf->mapping = cons_rx_buf->mapping; 739 740 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 741 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)]; 742 743 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr; 744 } 745 746 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) 747 { 748 u16 next, max = rxr->rx_agg_bmap_size; 749 750 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx); 751 if (next >= max) 752 next = find_first_zero_bit(rxr->rx_agg_bmap, max); 753 return next; 754 } 755 756 static inline int bnxt_alloc_rx_page(struct bnxt *bp, 757 struct bnxt_rx_ring_info *rxr, 758 u16 prod, gfp_t gfp) 759 { 760 struct rx_bd *rxbd = 761 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 762 struct bnxt_sw_rx_agg_bd *rx_agg_buf; 763 struct pci_dev *pdev = bp->pdev; 764 struct page *page; 765 dma_addr_t mapping; 766 u16 sw_prod = rxr->rx_sw_agg_prod; 767 unsigned int offset = 0; 768 769 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) { 770 page = rxr->rx_page; 771 if (!page) { 772 page = alloc_page(gfp); 773 if (!page) 774 return -ENOMEM; 775 rxr->rx_page = page; 776 rxr->rx_page_offset = 0; 777 } 778 offset = rxr->rx_page_offset; 779 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE; 780 if (rxr->rx_page_offset == PAGE_SIZE) 781 rxr->rx_page = NULL; 782 else 783 get_page(page); 784 } else { 785 page = alloc_page(gfp); 786 if (!page) 787 return -ENOMEM; 788 } 789 790 mapping = dma_map_page_attrs(&pdev->dev, page, offset, 791 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE, 792 DMA_ATTR_WEAK_ORDERING); 793 if (dma_mapping_error(&pdev->dev, mapping)) { 794 __free_page(page); 795 return -EIO; 796 } 797 798 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) 799 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); 800 801 __set_bit(sw_prod, rxr->rx_agg_bmap); 802 rx_agg_buf = &rxr->rx_agg_ring[sw_prod]; 803 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod); 804 805 rx_agg_buf->page = page; 806 rx_agg_buf->offset = offset; 807 rx_agg_buf->mapping = mapping; 808 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 809 rxbd->rx_bd_opaque = sw_prod; 810 return 0; 811 } 812 813 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 cp_cons, 814 u32 agg_bufs) 815 { 816 struct bnxt_napi *bnapi = cpr->bnapi; 817 struct bnxt *bp = bnapi->bp; 818 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 819 u16 prod = rxr->rx_agg_prod; 820 u16 sw_prod = rxr->rx_sw_agg_prod; 821 u32 i; 822 823 for (i = 0; i < agg_bufs; i++) { 824 u16 cons; 825 struct rx_agg_cmp *agg; 826 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf; 827 struct rx_bd *prod_bd; 828 struct page *page; 829 830 agg = (struct rx_agg_cmp *) 831 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 832 cons = agg->rx_agg_cmp_opaque; 833 __clear_bit(cons, rxr->rx_agg_bmap); 834 835 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) 836 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); 837 838 __set_bit(sw_prod, rxr->rx_agg_bmap); 839 prod_rx_buf = &rxr->rx_agg_ring[sw_prod]; 840 cons_rx_buf = &rxr->rx_agg_ring[cons]; 841 842 /* It is possible for sw_prod to be equal to cons, so 843 * set cons_rx_buf->page to NULL first. 844 */ 845 page = cons_rx_buf->page; 846 cons_rx_buf->page = NULL; 847 prod_rx_buf->page = page; 848 prod_rx_buf->offset = cons_rx_buf->offset; 849 850 prod_rx_buf->mapping = cons_rx_buf->mapping; 851 852 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 853 854 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping); 855 prod_bd->rx_bd_opaque = sw_prod; 856 857 prod = NEXT_RX_AGG(prod); 858 sw_prod = NEXT_RX_AGG(sw_prod); 859 cp_cons = NEXT_CMP(cp_cons); 860 } 861 rxr->rx_agg_prod = prod; 862 rxr->rx_sw_agg_prod = sw_prod; 863 } 864 865 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, 866 struct bnxt_rx_ring_info *rxr, 867 u16 cons, void *data, u8 *data_ptr, 868 dma_addr_t dma_addr, 869 unsigned int offset_and_len) 870 { 871 unsigned int payload = offset_and_len >> 16; 872 unsigned int len = offset_and_len & 0xffff; 873 struct skb_frag_struct *frag; 874 struct page *page = data; 875 u16 prod = rxr->rx_prod; 876 struct sk_buff *skb; 877 int off, err; 878 879 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 880 if (unlikely(err)) { 881 bnxt_reuse_rx_data(rxr, cons, data); 882 return NULL; 883 } 884 dma_addr -= bp->rx_dma_offset; 885 dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir, 886 DMA_ATTR_WEAK_ORDERING); 887 888 if (unlikely(!payload)) 889 payload = eth_get_headlen(data_ptr, len); 890 891 skb = napi_alloc_skb(&rxr->bnapi->napi, payload); 892 if (!skb) { 893 __free_page(page); 894 return NULL; 895 } 896 897 off = (void *)data_ptr - page_address(page); 898 skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE); 899 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN, 900 payload + NET_IP_ALIGN); 901 902 frag = &skb_shinfo(skb)->frags[0]; 903 skb_frag_size_sub(frag, payload); 904 frag->page_offset += payload; 905 skb->data_len -= payload; 906 skb->tail += payload; 907 908 return skb; 909 } 910 911 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, 912 struct bnxt_rx_ring_info *rxr, u16 cons, 913 void *data, u8 *data_ptr, 914 dma_addr_t dma_addr, 915 unsigned int offset_and_len) 916 { 917 u16 prod = rxr->rx_prod; 918 struct sk_buff *skb; 919 int err; 920 921 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 922 if (unlikely(err)) { 923 bnxt_reuse_rx_data(rxr, cons, data); 924 return NULL; 925 } 926 927 skb = build_skb(data, 0); 928 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, 929 bp->rx_dir, DMA_ATTR_WEAK_ORDERING); 930 if (!skb) { 931 kfree(data); 932 return NULL; 933 } 934 935 skb_reserve(skb, bp->rx_offset); 936 skb_put(skb, offset_and_len & 0xffff); 937 return skb; 938 } 939 940 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, 941 struct bnxt_cp_ring_info *cpr, 942 struct sk_buff *skb, u16 cp_cons, 943 u32 agg_bufs) 944 { 945 struct bnxt_napi *bnapi = cpr->bnapi; 946 struct pci_dev *pdev = bp->pdev; 947 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 948 u16 prod = rxr->rx_agg_prod; 949 u32 i; 950 951 for (i = 0; i < agg_bufs; i++) { 952 u16 cons, frag_len; 953 struct rx_agg_cmp *agg; 954 struct bnxt_sw_rx_agg_bd *cons_rx_buf; 955 struct page *page; 956 dma_addr_t mapping; 957 958 agg = (struct rx_agg_cmp *) 959 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 960 cons = agg->rx_agg_cmp_opaque; 961 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) & 962 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT; 963 964 cons_rx_buf = &rxr->rx_agg_ring[cons]; 965 skb_fill_page_desc(skb, i, cons_rx_buf->page, 966 cons_rx_buf->offset, frag_len); 967 __clear_bit(cons, rxr->rx_agg_bmap); 968 969 /* It is possible for bnxt_alloc_rx_page() to allocate 970 * a sw_prod index that equals the cons index, so we 971 * need to clear the cons entry now. 972 */ 973 mapping = cons_rx_buf->mapping; 974 page = cons_rx_buf->page; 975 cons_rx_buf->page = NULL; 976 977 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) { 978 struct skb_shared_info *shinfo; 979 unsigned int nr_frags; 980 981 shinfo = skb_shinfo(skb); 982 nr_frags = --shinfo->nr_frags; 983 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL); 984 985 dev_kfree_skb(skb); 986 987 cons_rx_buf->page = page; 988 989 /* Update prod since possibly some pages have been 990 * allocated already. 991 */ 992 rxr->rx_agg_prod = prod; 993 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs - i); 994 return NULL; 995 } 996 997 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE, 998 PCI_DMA_FROMDEVICE, 999 DMA_ATTR_WEAK_ORDERING); 1000 1001 skb->data_len += frag_len; 1002 skb->len += frag_len; 1003 skb->truesize += PAGE_SIZE; 1004 1005 prod = NEXT_RX_AGG(prod); 1006 cp_cons = NEXT_CMP(cp_cons); 1007 } 1008 rxr->rx_agg_prod = prod; 1009 return skb; 1010 } 1011 1012 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 1013 u8 agg_bufs, u32 *raw_cons) 1014 { 1015 u16 last; 1016 struct rx_agg_cmp *agg; 1017 1018 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs); 1019 last = RING_CMP(*raw_cons); 1020 agg = (struct rx_agg_cmp *) 1021 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)]; 1022 return RX_AGG_CMP_VALID(agg, *raw_cons); 1023 } 1024 1025 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data, 1026 unsigned int len, 1027 dma_addr_t mapping) 1028 { 1029 struct bnxt *bp = bnapi->bp; 1030 struct pci_dev *pdev = bp->pdev; 1031 struct sk_buff *skb; 1032 1033 skb = napi_alloc_skb(&bnapi->napi, len); 1034 if (!skb) 1035 return NULL; 1036 1037 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh, 1038 bp->rx_dir); 1039 1040 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN, 1041 len + NET_IP_ALIGN); 1042 1043 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh, 1044 bp->rx_dir); 1045 1046 skb_put(skb, len); 1047 return skb; 1048 } 1049 1050 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 1051 u32 *raw_cons, void *cmp) 1052 { 1053 struct rx_cmp *rxcmp = cmp; 1054 u32 tmp_raw_cons = *raw_cons; 1055 u8 cmp_type, agg_bufs = 0; 1056 1057 cmp_type = RX_CMP_TYPE(rxcmp); 1058 1059 if (cmp_type == CMP_TYPE_RX_L2_CMP) { 1060 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & 1061 RX_CMP_AGG_BUFS) >> 1062 RX_CMP_AGG_BUFS_SHIFT; 1063 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1064 struct rx_tpa_end_cmp *tpa_end = cmp; 1065 1066 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & 1067 RX_TPA_END_CMP_AGG_BUFS) >> 1068 RX_TPA_END_CMP_AGG_BUFS_SHIFT; 1069 } 1070 1071 if (agg_bufs) { 1072 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) 1073 return -EBUSY; 1074 } 1075 *raw_cons = tmp_raw_cons; 1076 return 0; 1077 } 1078 1079 static void bnxt_queue_sp_work(struct bnxt *bp) 1080 { 1081 if (BNXT_PF(bp)) 1082 queue_work(bnxt_pf_wq, &bp->sp_task); 1083 else 1084 schedule_work(&bp->sp_task); 1085 } 1086 1087 static void bnxt_cancel_sp_work(struct bnxt *bp) 1088 { 1089 if (BNXT_PF(bp)) 1090 flush_workqueue(bnxt_pf_wq); 1091 else 1092 cancel_work_sync(&bp->sp_task); 1093 } 1094 1095 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 1096 { 1097 if (!rxr->bnapi->in_reset) { 1098 rxr->bnapi->in_reset = true; 1099 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 1100 bnxt_queue_sp_work(bp); 1101 } 1102 rxr->rx_next_cons = 0xffff; 1103 } 1104 1105 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 1106 struct rx_tpa_start_cmp *tpa_start, 1107 struct rx_tpa_start_cmp_ext *tpa_start1) 1108 { 1109 u8 agg_id = TPA_START_AGG_ID(tpa_start); 1110 u16 cons, prod; 1111 struct bnxt_tpa_info *tpa_info; 1112 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 1113 struct rx_bd *prod_bd; 1114 dma_addr_t mapping; 1115 1116 cons = tpa_start->rx_tpa_start_cmp_opaque; 1117 prod = rxr->rx_prod; 1118 cons_rx_buf = &rxr->rx_buf_ring[cons]; 1119 prod_rx_buf = &rxr->rx_buf_ring[prod]; 1120 tpa_info = &rxr->rx_tpa[agg_id]; 1121 1122 if (unlikely(cons != rxr->rx_next_cons)) { 1123 bnxt_sched_reset(bp, rxr); 1124 return; 1125 } 1126 /* Store cfa_code in tpa_info to use in tpa_end 1127 * completion processing. 1128 */ 1129 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1); 1130 prod_rx_buf->data = tpa_info->data; 1131 prod_rx_buf->data_ptr = tpa_info->data_ptr; 1132 1133 mapping = tpa_info->mapping; 1134 prod_rx_buf->mapping = mapping; 1135 1136 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 1137 1138 prod_bd->rx_bd_haddr = cpu_to_le64(mapping); 1139 1140 tpa_info->data = cons_rx_buf->data; 1141 tpa_info->data_ptr = cons_rx_buf->data_ptr; 1142 cons_rx_buf->data = NULL; 1143 tpa_info->mapping = cons_rx_buf->mapping; 1144 1145 tpa_info->len = 1146 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >> 1147 RX_TPA_START_CMP_LEN_SHIFT; 1148 if (likely(TPA_START_HASH_VALID(tpa_start))) { 1149 u32 hash_type = TPA_START_HASH_TYPE(tpa_start); 1150 1151 tpa_info->hash_type = PKT_HASH_TYPE_L4; 1152 tpa_info->gso_type = SKB_GSO_TCPV4; 1153 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ 1154 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1)) 1155 tpa_info->gso_type = SKB_GSO_TCPV6; 1156 tpa_info->rss_hash = 1157 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash); 1158 } else { 1159 tpa_info->hash_type = PKT_HASH_TYPE_NONE; 1160 tpa_info->gso_type = 0; 1161 if (netif_msg_rx_err(bp)) 1162 netdev_warn(bp->dev, "TPA packet without valid hash\n"); 1163 } 1164 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2); 1165 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata); 1166 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info); 1167 1168 rxr->rx_prod = NEXT_RX(prod); 1169 cons = NEXT_RX(cons); 1170 rxr->rx_next_cons = NEXT_RX(cons); 1171 cons_rx_buf = &rxr->rx_buf_ring[cons]; 1172 1173 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data); 1174 rxr->rx_prod = NEXT_RX(rxr->rx_prod); 1175 cons_rx_buf->data = NULL; 1176 } 1177 1178 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 cp_cons, 1179 u32 agg_bufs) 1180 { 1181 if (agg_bufs) 1182 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs); 1183 } 1184 1185 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info, 1186 int payload_off, int tcp_ts, 1187 struct sk_buff *skb) 1188 { 1189 #ifdef CONFIG_INET 1190 struct tcphdr *th; 1191 int len, nw_off; 1192 u16 outer_ip_off, inner_ip_off, inner_mac_off; 1193 u32 hdr_info = tpa_info->hdr_info; 1194 bool loopback = false; 1195 1196 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); 1197 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); 1198 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); 1199 1200 /* If the packet is an internal loopback packet, the offsets will 1201 * have an extra 4 bytes. 1202 */ 1203 if (inner_mac_off == 4) { 1204 loopback = true; 1205 } else if (inner_mac_off > 4) { 1206 __be16 proto = *((__be16 *)(skb->data + inner_ip_off - 1207 ETH_HLEN - 2)); 1208 1209 /* We only support inner iPv4/ipv6. If we don't see the 1210 * correct protocol ID, it must be a loopback packet where 1211 * the offsets are off by 4. 1212 */ 1213 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6)) 1214 loopback = true; 1215 } 1216 if (loopback) { 1217 /* internal loopback packet, subtract all offsets by 4 */ 1218 inner_ip_off -= 4; 1219 inner_mac_off -= 4; 1220 outer_ip_off -= 4; 1221 } 1222 1223 nw_off = inner_ip_off - ETH_HLEN; 1224 skb_set_network_header(skb, nw_off); 1225 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) { 1226 struct ipv6hdr *iph = ipv6_hdr(skb); 1227 1228 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); 1229 len = skb->len - skb_transport_offset(skb); 1230 th = tcp_hdr(skb); 1231 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); 1232 } else { 1233 struct iphdr *iph = ip_hdr(skb); 1234 1235 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); 1236 len = skb->len - skb_transport_offset(skb); 1237 th = tcp_hdr(skb); 1238 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); 1239 } 1240 1241 if (inner_mac_off) { /* tunnel */ 1242 struct udphdr *uh = NULL; 1243 __be16 proto = *((__be16 *)(skb->data + outer_ip_off - 1244 ETH_HLEN - 2)); 1245 1246 if (proto == htons(ETH_P_IP)) { 1247 struct iphdr *iph = (struct iphdr *)skb->data; 1248 1249 if (iph->protocol == IPPROTO_UDP) 1250 uh = (struct udphdr *)(iph + 1); 1251 } else { 1252 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; 1253 1254 if (iph->nexthdr == IPPROTO_UDP) 1255 uh = (struct udphdr *)(iph + 1); 1256 } 1257 if (uh) { 1258 if (uh->check) 1259 skb_shinfo(skb)->gso_type |= 1260 SKB_GSO_UDP_TUNNEL_CSUM; 1261 else 1262 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; 1263 } 1264 } 1265 #endif 1266 return skb; 1267 } 1268 1269 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr)) 1270 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr)) 1271 1272 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info, 1273 int payload_off, int tcp_ts, 1274 struct sk_buff *skb) 1275 { 1276 #ifdef CONFIG_INET 1277 struct tcphdr *th; 1278 int len, nw_off, tcp_opt_len = 0; 1279 1280 if (tcp_ts) 1281 tcp_opt_len = 12; 1282 1283 if (tpa_info->gso_type == SKB_GSO_TCPV4) { 1284 struct iphdr *iph; 1285 1286 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len - 1287 ETH_HLEN; 1288 skb_set_network_header(skb, nw_off); 1289 iph = ip_hdr(skb); 1290 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); 1291 len = skb->len - skb_transport_offset(skb); 1292 th = tcp_hdr(skb); 1293 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); 1294 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) { 1295 struct ipv6hdr *iph; 1296 1297 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len - 1298 ETH_HLEN; 1299 skb_set_network_header(skb, nw_off); 1300 iph = ipv6_hdr(skb); 1301 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); 1302 len = skb->len - skb_transport_offset(skb); 1303 th = tcp_hdr(skb); 1304 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); 1305 } else { 1306 dev_kfree_skb_any(skb); 1307 return NULL; 1308 } 1309 1310 if (nw_off) { /* tunnel */ 1311 struct udphdr *uh = NULL; 1312 1313 if (skb->protocol == htons(ETH_P_IP)) { 1314 struct iphdr *iph = (struct iphdr *)skb->data; 1315 1316 if (iph->protocol == IPPROTO_UDP) 1317 uh = (struct udphdr *)(iph + 1); 1318 } else { 1319 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; 1320 1321 if (iph->nexthdr == IPPROTO_UDP) 1322 uh = (struct udphdr *)(iph + 1); 1323 } 1324 if (uh) { 1325 if (uh->check) 1326 skb_shinfo(skb)->gso_type |= 1327 SKB_GSO_UDP_TUNNEL_CSUM; 1328 else 1329 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; 1330 } 1331 } 1332 #endif 1333 return skb; 1334 } 1335 1336 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp, 1337 struct bnxt_tpa_info *tpa_info, 1338 struct rx_tpa_end_cmp *tpa_end, 1339 struct rx_tpa_end_cmp_ext *tpa_end1, 1340 struct sk_buff *skb) 1341 { 1342 #ifdef CONFIG_INET 1343 int payload_off; 1344 u16 segs; 1345 1346 segs = TPA_END_TPA_SEGS(tpa_end); 1347 if (segs == 1) 1348 return skb; 1349 1350 NAPI_GRO_CB(skb)->count = segs; 1351 skb_shinfo(skb)->gso_size = 1352 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len); 1353 skb_shinfo(skb)->gso_type = tpa_info->gso_type; 1354 payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & 1355 RX_TPA_END_CMP_PAYLOAD_OFFSET) >> 1356 RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT; 1357 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb); 1358 if (likely(skb)) 1359 tcp_gro_complete(skb); 1360 #endif 1361 return skb; 1362 } 1363 1364 /* Given the cfa_code of a received packet determine which 1365 * netdev (vf-rep or PF) the packet is destined to. 1366 */ 1367 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code) 1368 { 1369 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code); 1370 1371 /* if vf-rep dev is NULL, the must belongs to the PF */ 1372 return dev ? dev : bp->dev; 1373 } 1374 1375 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, 1376 struct bnxt_cp_ring_info *cpr, 1377 u32 *raw_cons, 1378 struct rx_tpa_end_cmp *tpa_end, 1379 struct rx_tpa_end_cmp_ext *tpa_end1, 1380 u8 *event) 1381 { 1382 struct bnxt_napi *bnapi = cpr->bnapi; 1383 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1384 u8 agg_id = TPA_END_AGG_ID(tpa_end); 1385 u8 *data_ptr, agg_bufs; 1386 u16 cp_cons = RING_CMP(*raw_cons); 1387 unsigned int len; 1388 struct bnxt_tpa_info *tpa_info; 1389 dma_addr_t mapping; 1390 struct sk_buff *skb; 1391 void *data; 1392 1393 if (unlikely(bnapi->in_reset)) { 1394 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end); 1395 1396 if (rc < 0) 1397 return ERR_PTR(-EBUSY); 1398 return NULL; 1399 } 1400 1401 tpa_info = &rxr->rx_tpa[agg_id]; 1402 data = tpa_info->data; 1403 data_ptr = tpa_info->data_ptr; 1404 prefetch(data_ptr); 1405 len = tpa_info->len; 1406 mapping = tpa_info->mapping; 1407 1408 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & 1409 RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT; 1410 1411 if (agg_bufs) { 1412 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons)) 1413 return ERR_PTR(-EBUSY); 1414 1415 *event |= BNXT_AGG_EVENT; 1416 cp_cons = NEXT_CMP(cp_cons); 1417 } 1418 1419 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) { 1420 bnxt_abort_tpa(cpr, cp_cons, agg_bufs); 1421 if (agg_bufs > MAX_SKB_FRAGS) 1422 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", 1423 agg_bufs, (int)MAX_SKB_FRAGS); 1424 return NULL; 1425 } 1426 1427 if (len <= bp->rx_copy_thresh) { 1428 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping); 1429 if (!skb) { 1430 bnxt_abort_tpa(cpr, cp_cons, agg_bufs); 1431 return NULL; 1432 } 1433 } else { 1434 u8 *new_data; 1435 dma_addr_t new_mapping; 1436 1437 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC); 1438 if (!new_data) { 1439 bnxt_abort_tpa(cpr, cp_cons, agg_bufs); 1440 return NULL; 1441 } 1442 1443 tpa_info->data = new_data; 1444 tpa_info->data_ptr = new_data + bp->rx_offset; 1445 tpa_info->mapping = new_mapping; 1446 1447 skb = build_skb(data, 0); 1448 dma_unmap_single_attrs(&bp->pdev->dev, mapping, 1449 bp->rx_buf_use_size, bp->rx_dir, 1450 DMA_ATTR_WEAK_ORDERING); 1451 1452 if (!skb) { 1453 kfree(data); 1454 bnxt_abort_tpa(cpr, cp_cons, agg_bufs); 1455 return NULL; 1456 } 1457 skb_reserve(skb, bp->rx_offset); 1458 skb_put(skb, len); 1459 } 1460 1461 if (agg_bufs) { 1462 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs); 1463 if (!skb) { 1464 /* Page reuse already handled by bnxt_rx_pages(). */ 1465 return NULL; 1466 } 1467 } 1468 1469 skb->protocol = 1470 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code)); 1471 1472 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE) 1473 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); 1474 1475 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) && 1476 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { 1477 u16 vlan_proto = tpa_info->metadata >> 1478 RX_CMP_FLAGS2_METADATA_TPID_SFT; 1479 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK; 1480 1481 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); 1482 } 1483 1484 skb_checksum_none_assert(skb); 1485 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) { 1486 skb->ip_summed = CHECKSUM_UNNECESSARY; 1487 skb->csum_level = 1488 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3; 1489 } 1490 1491 if (TPA_END_GRO(tpa_end)) 1492 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb); 1493 1494 return skb; 1495 } 1496 1497 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi, 1498 struct sk_buff *skb) 1499 { 1500 if (skb->dev != bp->dev) { 1501 /* this packet belongs to a vf-rep */ 1502 bnxt_vf_rep_rx(bp, skb); 1503 return; 1504 } 1505 skb_record_rx_queue(skb, bnapi->index); 1506 napi_gro_receive(&bnapi->napi, skb); 1507 } 1508 1509 /* returns the following: 1510 * 1 - 1 packet successfully received 1511 * 0 - successful TPA_START, packet not completed yet 1512 * -EBUSY - completion ring does not have all the agg buffers yet 1513 * -ENOMEM - packet aborted due to out of memory 1514 * -EIO - packet aborted due to hw error indicated in BD 1515 */ 1516 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 1517 u32 *raw_cons, u8 *event) 1518 { 1519 struct bnxt_napi *bnapi = cpr->bnapi; 1520 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1521 struct net_device *dev = bp->dev; 1522 struct rx_cmp *rxcmp; 1523 struct rx_cmp_ext *rxcmp1; 1524 u32 tmp_raw_cons = *raw_cons; 1525 u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons); 1526 struct bnxt_sw_rx_bd *rx_buf; 1527 unsigned int len; 1528 u8 *data_ptr, agg_bufs, cmp_type; 1529 dma_addr_t dma_addr; 1530 struct sk_buff *skb; 1531 void *data; 1532 int rc = 0; 1533 u32 misc; 1534 1535 rxcmp = (struct rx_cmp *) 1536 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1537 1538 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 1539 cp_cons = RING_CMP(tmp_raw_cons); 1540 rxcmp1 = (struct rx_cmp_ext *) 1541 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1542 1543 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 1544 return -EBUSY; 1545 1546 cmp_type = RX_CMP_TYPE(rxcmp); 1547 1548 prod = rxr->rx_prod; 1549 1550 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) { 1551 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp, 1552 (struct rx_tpa_start_cmp_ext *)rxcmp1); 1553 1554 *event |= BNXT_RX_EVENT; 1555 goto next_rx_no_prod_no_len; 1556 1557 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1558 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons, 1559 (struct rx_tpa_end_cmp *)rxcmp, 1560 (struct rx_tpa_end_cmp_ext *)rxcmp1, event); 1561 1562 if (IS_ERR(skb)) 1563 return -EBUSY; 1564 1565 rc = -ENOMEM; 1566 if (likely(skb)) { 1567 bnxt_deliver_skb(bp, bnapi, skb); 1568 rc = 1; 1569 } 1570 *event |= BNXT_RX_EVENT; 1571 goto next_rx_no_prod_no_len; 1572 } 1573 1574 cons = rxcmp->rx_cmp_opaque; 1575 rx_buf = &rxr->rx_buf_ring[cons]; 1576 data = rx_buf->data; 1577 data_ptr = rx_buf->data_ptr; 1578 if (unlikely(cons != rxr->rx_next_cons)) { 1579 int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp); 1580 1581 bnxt_sched_reset(bp, rxr); 1582 return rc1; 1583 } 1584 prefetch(data_ptr); 1585 1586 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1); 1587 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT; 1588 1589 if (agg_bufs) { 1590 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) 1591 return -EBUSY; 1592 1593 cp_cons = NEXT_CMP(cp_cons); 1594 *event |= BNXT_AGG_EVENT; 1595 } 1596 *event |= BNXT_RX_EVENT; 1597 1598 rx_buf->data = NULL; 1599 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) { 1600 bnxt_reuse_rx_data(rxr, cons, data); 1601 if (agg_bufs) 1602 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs); 1603 1604 rc = -EIO; 1605 goto next_rx; 1606 } 1607 1608 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT; 1609 dma_addr = rx_buf->mapping; 1610 1611 if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) { 1612 rc = 1; 1613 goto next_rx; 1614 } 1615 1616 if (len <= bp->rx_copy_thresh) { 1617 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr); 1618 bnxt_reuse_rx_data(rxr, cons, data); 1619 if (!skb) { 1620 rc = -ENOMEM; 1621 goto next_rx; 1622 } 1623 } else { 1624 u32 payload; 1625 1626 if (rx_buf->data_ptr == data_ptr) 1627 payload = misc & RX_CMP_PAYLOAD_OFFSET; 1628 else 1629 payload = 0; 1630 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr, 1631 payload | len); 1632 if (!skb) { 1633 rc = -ENOMEM; 1634 goto next_rx; 1635 } 1636 } 1637 1638 if (agg_bufs) { 1639 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs); 1640 if (!skb) { 1641 rc = -ENOMEM; 1642 goto next_rx; 1643 } 1644 } 1645 1646 if (RX_CMP_HASH_VALID(rxcmp)) { 1647 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp); 1648 enum pkt_hash_types type = PKT_HASH_TYPE_L4; 1649 1650 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ 1651 if (hash_type != 1 && hash_type != 3) 1652 type = PKT_HASH_TYPE_L3; 1653 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type); 1654 } 1655 1656 cfa_code = RX_CMP_CFA_CODE(rxcmp1); 1657 skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code)); 1658 1659 if ((rxcmp1->rx_cmp_flags2 & 1660 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) && 1661 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { 1662 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); 1663 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK; 1664 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; 1665 1666 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); 1667 } 1668 1669 skb_checksum_none_assert(skb); 1670 if (RX_CMP_L4_CS_OK(rxcmp1)) { 1671 if (dev->features & NETIF_F_RXCSUM) { 1672 skb->ip_summed = CHECKSUM_UNNECESSARY; 1673 skb->csum_level = RX_CMP_ENCAP(rxcmp1); 1674 } 1675 } else { 1676 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) { 1677 if (dev->features & NETIF_F_RXCSUM) 1678 cpr->rx_l4_csum_errors++; 1679 } 1680 } 1681 1682 bnxt_deliver_skb(bp, bnapi, skb); 1683 rc = 1; 1684 1685 next_rx: 1686 rxr->rx_prod = NEXT_RX(prod); 1687 rxr->rx_next_cons = NEXT_RX(cons); 1688 1689 cpr->rx_packets += 1; 1690 cpr->rx_bytes += len; 1691 1692 next_rx_no_prod_no_len: 1693 *raw_cons = tmp_raw_cons; 1694 1695 return rc; 1696 } 1697 1698 /* In netpoll mode, if we are using a combined completion ring, we need to 1699 * discard the rx packets and recycle the buffers. 1700 */ 1701 static int bnxt_force_rx_discard(struct bnxt *bp, 1702 struct bnxt_cp_ring_info *cpr, 1703 u32 *raw_cons, u8 *event) 1704 { 1705 u32 tmp_raw_cons = *raw_cons; 1706 struct rx_cmp_ext *rxcmp1; 1707 struct rx_cmp *rxcmp; 1708 u16 cp_cons; 1709 u8 cmp_type; 1710 1711 cp_cons = RING_CMP(tmp_raw_cons); 1712 rxcmp = (struct rx_cmp *) 1713 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1714 1715 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 1716 cp_cons = RING_CMP(tmp_raw_cons); 1717 rxcmp1 = (struct rx_cmp_ext *) 1718 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1719 1720 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 1721 return -EBUSY; 1722 1723 cmp_type = RX_CMP_TYPE(rxcmp); 1724 if (cmp_type == CMP_TYPE_RX_L2_CMP) { 1725 rxcmp1->rx_cmp_cfa_code_errors_v2 |= 1726 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); 1727 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1728 struct rx_tpa_end_cmp_ext *tpa_end1; 1729 1730 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1; 1731 tpa_end1->rx_tpa_end_cmp_errors_v2 |= 1732 cpu_to_le32(RX_TPA_END_CMP_ERRORS); 1733 } 1734 return bnxt_rx_pkt(bp, cpr, raw_cons, event); 1735 } 1736 1737 #define BNXT_GET_EVENT_PORT(data) \ 1738 ((data) & \ 1739 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK) 1740 1741 static int bnxt_async_event_process(struct bnxt *bp, 1742 struct hwrm_async_event_cmpl *cmpl) 1743 { 1744 u16 event_id = le16_to_cpu(cmpl->event_id); 1745 1746 /* TODO CHIMP_FW: Define event id's for link change, error etc */ 1747 switch (event_id) { 1748 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: { 1749 u32 data1 = le32_to_cpu(cmpl->event_data1); 1750 struct bnxt_link_info *link_info = &bp->link_info; 1751 1752 if (BNXT_VF(bp)) 1753 goto async_event_process_exit; 1754 1755 /* print unsupported speed warning in forced speed mode only */ 1756 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) && 1757 (data1 & 0x20000)) { 1758 u16 fw_speed = link_info->force_link_speed; 1759 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed); 1760 1761 if (speed != SPEED_UNKNOWN) 1762 netdev_warn(bp->dev, "Link speed %d no longer supported\n", 1763 speed); 1764 } 1765 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); 1766 } 1767 /* fall through */ 1768 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: 1769 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); 1770 break; 1771 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD: 1772 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event); 1773 break; 1774 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: { 1775 u32 data1 = le32_to_cpu(cmpl->event_data1); 1776 u16 port_id = BNXT_GET_EVENT_PORT(data1); 1777 1778 if (BNXT_VF(bp)) 1779 break; 1780 1781 if (bp->pf.port_id != port_id) 1782 break; 1783 1784 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event); 1785 break; 1786 } 1787 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE: 1788 if (BNXT_PF(bp)) 1789 goto async_event_process_exit; 1790 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event); 1791 break; 1792 default: 1793 goto async_event_process_exit; 1794 } 1795 bnxt_queue_sp_work(bp); 1796 async_event_process_exit: 1797 bnxt_ulp_async_events(bp, cmpl); 1798 return 0; 1799 } 1800 1801 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) 1802 { 1803 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id; 1804 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp; 1805 struct hwrm_fwd_req_cmpl *fwd_req_cmpl = 1806 (struct hwrm_fwd_req_cmpl *)txcmp; 1807 1808 switch (cmpl_type) { 1809 case CMPL_BASE_TYPE_HWRM_DONE: 1810 seq_id = le16_to_cpu(h_cmpl->sequence_id); 1811 if (seq_id == bp->hwrm_intr_seq_id) 1812 bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID; 1813 else 1814 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id); 1815 break; 1816 1817 case CMPL_BASE_TYPE_HWRM_FWD_REQ: 1818 vf_id = le16_to_cpu(fwd_req_cmpl->source_id); 1819 1820 if ((vf_id < bp->pf.first_vf_id) || 1821 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) { 1822 netdev_err(bp->dev, "Msg contains invalid VF id %x\n", 1823 vf_id); 1824 return -EINVAL; 1825 } 1826 1827 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap); 1828 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event); 1829 bnxt_queue_sp_work(bp); 1830 break; 1831 1832 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: 1833 bnxt_async_event_process(bp, 1834 (struct hwrm_async_event_cmpl *)txcmp); 1835 1836 default: 1837 break; 1838 } 1839 1840 return 0; 1841 } 1842 1843 static irqreturn_t bnxt_msix(int irq, void *dev_instance) 1844 { 1845 struct bnxt_napi *bnapi = dev_instance; 1846 struct bnxt *bp = bnapi->bp; 1847 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1848 u32 cons = RING_CMP(cpr->cp_raw_cons); 1849 1850 cpr->event_ctr++; 1851 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); 1852 napi_schedule(&bnapi->napi); 1853 return IRQ_HANDLED; 1854 } 1855 1856 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) 1857 { 1858 u32 raw_cons = cpr->cp_raw_cons; 1859 u16 cons = RING_CMP(raw_cons); 1860 struct tx_cmp *txcmp; 1861 1862 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 1863 1864 return TX_CMP_VALID(txcmp, raw_cons); 1865 } 1866 1867 static irqreturn_t bnxt_inta(int irq, void *dev_instance) 1868 { 1869 struct bnxt_napi *bnapi = dev_instance; 1870 struct bnxt *bp = bnapi->bp; 1871 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1872 u32 cons = RING_CMP(cpr->cp_raw_cons); 1873 u32 int_status; 1874 1875 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); 1876 1877 if (!bnxt_has_work(bp, cpr)) { 1878 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS); 1879 /* return if erroneous interrupt */ 1880 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id))) 1881 return IRQ_NONE; 1882 } 1883 1884 /* disable ring IRQ */ 1885 BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell); 1886 1887 /* Return here if interrupt is shared and is disabled. */ 1888 if (unlikely(atomic_read(&bp->intr_sem) != 0)) 1889 return IRQ_HANDLED; 1890 1891 napi_schedule(&bnapi->napi); 1892 return IRQ_HANDLED; 1893 } 1894 1895 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 1896 int budget) 1897 { 1898 struct bnxt_napi *bnapi = cpr->bnapi; 1899 u32 raw_cons = cpr->cp_raw_cons; 1900 u32 cons; 1901 int tx_pkts = 0; 1902 int rx_pkts = 0; 1903 u8 event = 0; 1904 struct tx_cmp *txcmp; 1905 1906 cpr->has_more_work = 0; 1907 while (1) { 1908 int rc; 1909 1910 cons = RING_CMP(raw_cons); 1911 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 1912 1913 if (!TX_CMP_VALID(txcmp, raw_cons)) 1914 break; 1915 1916 /* The valid test of the entry must be done first before 1917 * reading any further. 1918 */ 1919 dma_rmb(); 1920 cpr->had_work_done = 1; 1921 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) { 1922 tx_pkts++; 1923 /* return full budget so NAPI will complete. */ 1924 if (unlikely(tx_pkts > bp->tx_wake_thresh)) { 1925 rx_pkts = budget; 1926 raw_cons = NEXT_RAW_CMP(raw_cons); 1927 if (budget) 1928 cpr->has_more_work = 1; 1929 break; 1930 } 1931 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { 1932 if (likely(budget)) 1933 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event); 1934 else 1935 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons, 1936 &event); 1937 if (likely(rc >= 0)) 1938 rx_pkts += rc; 1939 /* Increment rx_pkts when rc is -ENOMEM to count towards 1940 * the NAPI budget. Otherwise, we may potentially loop 1941 * here forever if we consistently cannot allocate 1942 * buffers. 1943 */ 1944 else if (rc == -ENOMEM && budget) 1945 rx_pkts++; 1946 else if (rc == -EBUSY) /* partial completion */ 1947 break; 1948 } else if (unlikely((TX_CMP_TYPE(txcmp) == 1949 CMPL_BASE_TYPE_HWRM_DONE) || 1950 (TX_CMP_TYPE(txcmp) == 1951 CMPL_BASE_TYPE_HWRM_FWD_REQ) || 1952 (TX_CMP_TYPE(txcmp) == 1953 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) { 1954 bnxt_hwrm_handler(bp, txcmp); 1955 } 1956 raw_cons = NEXT_RAW_CMP(raw_cons); 1957 1958 if (rx_pkts && rx_pkts == budget) { 1959 cpr->has_more_work = 1; 1960 break; 1961 } 1962 } 1963 1964 if (event & BNXT_TX_EVENT) { 1965 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; 1966 u16 prod = txr->tx_prod; 1967 1968 /* Sync BD data before updating doorbell */ 1969 wmb(); 1970 1971 bnxt_db_write_relaxed(bp, &txr->tx_db, prod); 1972 } 1973 1974 cpr->cp_raw_cons = raw_cons; 1975 bnapi->tx_pkts += tx_pkts; 1976 bnapi->events |= event; 1977 return rx_pkts; 1978 } 1979 1980 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi) 1981 { 1982 if (bnapi->tx_pkts) { 1983 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts); 1984 bnapi->tx_pkts = 0; 1985 } 1986 1987 if (bnapi->events & BNXT_RX_EVENT) { 1988 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1989 1990 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 1991 if (bnapi->events & BNXT_AGG_EVENT) 1992 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 1993 } 1994 bnapi->events = 0; 1995 } 1996 1997 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 1998 int budget) 1999 { 2000 struct bnxt_napi *bnapi = cpr->bnapi; 2001 int rx_pkts; 2002 2003 rx_pkts = __bnxt_poll_work(bp, cpr, budget); 2004 2005 /* ACK completion ring before freeing tx ring and producing new 2006 * buffers in rx/agg rings to prevent overflowing the completion 2007 * ring. 2008 */ 2009 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons); 2010 2011 __bnxt_poll_work_done(bp, bnapi); 2012 return rx_pkts; 2013 } 2014 2015 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) 2016 { 2017 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 2018 struct bnxt *bp = bnapi->bp; 2019 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2020 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 2021 struct tx_cmp *txcmp; 2022 struct rx_cmp_ext *rxcmp1; 2023 u32 cp_cons, tmp_raw_cons; 2024 u32 raw_cons = cpr->cp_raw_cons; 2025 u32 rx_pkts = 0; 2026 u8 event = 0; 2027 2028 while (1) { 2029 int rc; 2030 2031 cp_cons = RING_CMP(raw_cons); 2032 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2033 2034 if (!TX_CMP_VALID(txcmp, raw_cons)) 2035 break; 2036 2037 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { 2038 tmp_raw_cons = NEXT_RAW_CMP(raw_cons); 2039 cp_cons = RING_CMP(tmp_raw_cons); 2040 rxcmp1 = (struct rx_cmp_ext *) 2041 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2042 2043 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 2044 break; 2045 2046 /* force an error to recycle the buffer */ 2047 rxcmp1->rx_cmp_cfa_code_errors_v2 |= 2048 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); 2049 2050 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event); 2051 if (likely(rc == -EIO) && budget) 2052 rx_pkts++; 2053 else if (rc == -EBUSY) /* partial completion */ 2054 break; 2055 } else if (unlikely(TX_CMP_TYPE(txcmp) == 2056 CMPL_BASE_TYPE_HWRM_DONE)) { 2057 bnxt_hwrm_handler(bp, txcmp); 2058 } else { 2059 netdev_err(bp->dev, 2060 "Invalid completion received on special ring\n"); 2061 } 2062 raw_cons = NEXT_RAW_CMP(raw_cons); 2063 2064 if (rx_pkts == budget) 2065 break; 2066 } 2067 2068 cpr->cp_raw_cons = raw_cons; 2069 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons); 2070 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 2071 2072 if (event & BNXT_AGG_EVENT) 2073 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 2074 2075 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) { 2076 napi_complete_done(napi, rx_pkts); 2077 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 2078 } 2079 return rx_pkts; 2080 } 2081 2082 static int bnxt_poll(struct napi_struct *napi, int budget) 2083 { 2084 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 2085 struct bnxt *bp = bnapi->bp; 2086 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2087 int work_done = 0; 2088 2089 while (1) { 2090 work_done += bnxt_poll_work(bp, cpr, budget - work_done); 2091 2092 if (work_done >= budget) { 2093 if (!budget) 2094 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 2095 break; 2096 } 2097 2098 if (!bnxt_has_work(bp, cpr)) { 2099 if (napi_complete_done(napi, work_done)) 2100 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 2101 break; 2102 } 2103 } 2104 if (bp->flags & BNXT_FLAG_DIM) { 2105 struct net_dim_sample dim_sample; 2106 2107 net_dim_sample(cpr->event_ctr, 2108 cpr->rx_packets, 2109 cpr->rx_bytes, 2110 &dim_sample); 2111 net_dim(&cpr->dim, dim_sample); 2112 } 2113 mmiowb(); 2114 return work_done; 2115 } 2116 2117 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) 2118 { 2119 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2120 int i, work_done = 0; 2121 2122 for (i = 0; i < 2; i++) { 2123 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i]; 2124 2125 if (cpr2) { 2126 work_done += __bnxt_poll_work(bp, cpr2, 2127 budget - work_done); 2128 cpr->has_more_work |= cpr2->has_more_work; 2129 } 2130 } 2131 return work_done; 2132 } 2133 2134 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi, 2135 u64 dbr_type, bool all) 2136 { 2137 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2138 int i; 2139 2140 for (i = 0; i < 2; i++) { 2141 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i]; 2142 struct bnxt_db_info *db; 2143 2144 if (cpr2 && (all || cpr2->had_work_done)) { 2145 db = &cpr2->cp_db; 2146 writeq(db->db_key64 | dbr_type | 2147 RING_CMP(cpr2->cp_raw_cons), db->doorbell); 2148 cpr2->had_work_done = 0; 2149 } 2150 } 2151 __bnxt_poll_work_done(bp, bnapi); 2152 } 2153 2154 static int bnxt_poll_p5(struct napi_struct *napi, int budget) 2155 { 2156 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 2157 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2158 u32 raw_cons = cpr->cp_raw_cons; 2159 struct bnxt *bp = bnapi->bp; 2160 struct nqe_cn *nqcmp; 2161 int work_done = 0; 2162 u32 cons; 2163 2164 if (cpr->has_more_work) { 2165 cpr->has_more_work = 0; 2166 work_done = __bnxt_poll_cqs(bp, bnapi, budget); 2167 if (cpr->has_more_work) { 2168 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, false); 2169 return work_done; 2170 } 2171 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, true); 2172 if (napi_complete_done(napi, work_done)) 2173 BNXT_DB_NQ_ARM_P5(&cpr->cp_db, cpr->cp_raw_cons); 2174 return work_done; 2175 } 2176 while (1) { 2177 cons = RING_CMP(raw_cons); 2178 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 2179 2180 if (!NQ_CMP_VALID(nqcmp, raw_cons)) { 2181 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, 2182 false); 2183 cpr->cp_raw_cons = raw_cons; 2184 if (napi_complete_done(napi, work_done)) 2185 BNXT_DB_NQ_ARM_P5(&cpr->cp_db, 2186 cpr->cp_raw_cons); 2187 return work_done; 2188 } 2189 2190 /* The valid test of the entry must be done first before 2191 * reading any further. 2192 */ 2193 dma_rmb(); 2194 2195 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) { 2196 u32 idx = le32_to_cpu(nqcmp->cq_handle_low); 2197 struct bnxt_cp_ring_info *cpr2; 2198 2199 cpr2 = cpr->cp_ring_arr[idx]; 2200 work_done += __bnxt_poll_work(bp, cpr2, 2201 budget - work_done); 2202 cpr->has_more_work = cpr2->has_more_work; 2203 } else { 2204 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp); 2205 } 2206 raw_cons = NEXT_RAW_CMP(raw_cons); 2207 if (cpr->has_more_work) 2208 break; 2209 } 2210 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, true); 2211 cpr->cp_raw_cons = raw_cons; 2212 return work_done; 2213 } 2214 2215 static void bnxt_free_tx_skbs(struct bnxt *bp) 2216 { 2217 int i, max_idx; 2218 struct pci_dev *pdev = bp->pdev; 2219 2220 if (!bp->tx_ring) 2221 return; 2222 2223 max_idx = bp->tx_nr_pages * TX_DESC_CNT; 2224 for (i = 0; i < bp->tx_nr_rings; i++) { 2225 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 2226 int j; 2227 2228 for (j = 0; j < max_idx;) { 2229 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; 2230 struct sk_buff *skb = tx_buf->skb; 2231 int k, last; 2232 2233 if (!skb) { 2234 j++; 2235 continue; 2236 } 2237 2238 tx_buf->skb = NULL; 2239 2240 if (tx_buf->is_push) { 2241 dev_kfree_skb(skb); 2242 j += 2; 2243 continue; 2244 } 2245 2246 dma_unmap_single(&pdev->dev, 2247 dma_unmap_addr(tx_buf, mapping), 2248 skb_headlen(skb), 2249 PCI_DMA_TODEVICE); 2250 2251 last = tx_buf->nr_frags; 2252 j += 2; 2253 for (k = 0; k < last; k++, j++) { 2254 int ring_idx = j & bp->tx_ring_mask; 2255 skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; 2256 2257 tx_buf = &txr->tx_buf_ring[ring_idx]; 2258 dma_unmap_page( 2259 &pdev->dev, 2260 dma_unmap_addr(tx_buf, mapping), 2261 skb_frag_size(frag), PCI_DMA_TODEVICE); 2262 } 2263 dev_kfree_skb(skb); 2264 } 2265 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i)); 2266 } 2267 } 2268 2269 static void bnxt_free_rx_skbs(struct bnxt *bp) 2270 { 2271 int i, max_idx, max_agg_idx; 2272 struct pci_dev *pdev = bp->pdev; 2273 2274 if (!bp->rx_ring) 2275 return; 2276 2277 max_idx = bp->rx_nr_pages * RX_DESC_CNT; 2278 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; 2279 for (i = 0; i < bp->rx_nr_rings; i++) { 2280 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2281 int j; 2282 2283 if (rxr->rx_tpa) { 2284 for (j = 0; j < MAX_TPA; j++) { 2285 struct bnxt_tpa_info *tpa_info = 2286 &rxr->rx_tpa[j]; 2287 u8 *data = tpa_info->data; 2288 2289 if (!data) 2290 continue; 2291 2292 dma_unmap_single_attrs(&pdev->dev, 2293 tpa_info->mapping, 2294 bp->rx_buf_use_size, 2295 bp->rx_dir, 2296 DMA_ATTR_WEAK_ORDERING); 2297 2298 tpa_info->data = NULL; 2299 2300 kfree(data); 2301 } 2302 } 2303 2304 for (j = 0; j < max_idx; j++) { 2305 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j]; 2306 dma_addr_t mapping = rx_buf->mapping; 2307 void *data = rx_buf->data; 2308 2309 if (!data) 2310 continue; 2311 2312 rx_buf->data = NULL; 2313 2314 if (BNXT_RX_PAGE_MODE(bp)) { 2315 mapping -= bp->rx_dma_offset; 2316 dma_unmap_page_attrs(&pdev->dev, mapping, 2317 PAGE_SIZE, bp->rx_dir, 2318 DMA_ATTR_WEAK_ORDERING); 2319 __free_page(data); 2320 } else { 2321 dma_unmap_single_attrs(&pdev->dev, mapping, 2322 bp->rx_buf_use_size, 2323 bp->rx_dir, 2324 DMA_ATTR_WEAK_ORDERING); 2325 kfree(data); 2326 } 2327 } 2328 2329 for (j = 0; j < max_agg_idx; j++) { 2330 struct bnxt_sw_rx_agg_bd *rx_agg_buf = 2331 &rxr->rx_agg_ring[j]; 2332 struct page *page = rx_agg_buf->page; 2333 2334 if (!page) 2335 continue; 2336 2337 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping, 2338 BNXT_RX_PAGE_SIZE, 2339 PCI_DMA_FROMDEVICE, 2340 DMA_ATTR_WEAK_ORDERING); 2341 2342 rx_agg_buf->page = NULL; 2343 __clear_bit(j, rxr->rx_agg_bmap); 2344 2345 __free_page(page); 2346 } 2347 if (rxr->rx_page) { 2348 __free_page(rxr->rx_page); 2349 rxr->rx_page = NULL; 2350 } 2351 } 2352 } 2353 2354 static void bnxt_free_skbs(struct bnxt *bp) 2355 { 2356 bnxt_free_tx_skbs(bp); 2357 bnxt_free_rx_skbs(bp); 2358 } 2359 2360 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) 2361 { 2362 struct pci_dev *pdev = bp->pdev; 2363 int i; 2364 2365 for (i = 0; i < rmem->nr_pages; i++) { 2366 if (!rmem->pg_arr[i]) 2367 continue; 2368 2369 dma_free_coherent(&pdev->dev, rmem->page_size, 2370 rmem->pg_arr[i], rmem->dma_arr[i]); 2371 2372 rmem->pg_arr[i] = NULL; 2373 } 2374 if (rmem->pg_tbl) { 2375 dma_free_coherent(&pdev->dev, rmem->nr_pages * 8, 2376 rmem->pg_tbl, rmem->pg_tbl_map); 2377 rmem->pg_tbl = NULL; 2378 } 2379 if (rmem->vmem_size && *rmem->vmem) { 2380 vfree(*rmem->vmem); 2381 *rmem->vmem = NULL; 2382 } 2383 } 2384 2385 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) 2386 { 2387 struct pci_dev *pdev = bp->pdev; 2388 u64 valid_bit = 0; 2389 int i; 2390 2391 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG)) 2392 valid_bit = PTU_PTE_VALID; 2393 if (rmem->nr_pages > 1) { 2394 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, 2395 rmem->nr_pages * 8, 2396 &rmem->pg_tbl_map, 2397 GFP_KERNEL); 2398 if (!rmem->pg_tbl) 2399 return -ENOMEM; 2400 } 2401 2402 for (i = 0; i < rmem->nr_pages; i++) { 2403 u64 extra_bits = valid_bit; 2404 2405 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev, 2406 rmem->page_size, 2407 &rmem->dma_arr[i], 2408 GFP_KERNEL); 2409 if (!rmem->pg_arr[i]) 2410 return -ENOMEM; 2411 2412 if (rmem->nr_pages > 1) { 2413 if (i == rmem->nr_pages - 2 && 2414 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 2415 extra_bits |= PTU_PTE_NEXT_TO_LAST; 2416 else if (i == rmem->nr_pages - 1 && 2417 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 2418 extra_bits |= PTU_PTE_LAST; 2419 rmem->pg_tbl[i] = 2420 cpu_to_le64(rmem->dma_arr[i] | extra_bits); 2421 } 2422 } 2423 2424 if (rmem->vmem_size) { 2425 *rmem->vmem = vzalloc(rmem->vmem_size); 2426 if (!(*rmem->vmem)) 2427 return -ENOMEM; 2428 } 2429 return 0; 2430 } 2431 2432 static void bnxt_free_rx_rings(struct bnxt *bp) 2433 { 2434 int i; 2435 2436 if (!bp->rx_ring) 2437 return; 2438 2439 for (i = 0; i < bp->rx_nr_rings; i++) { 2440 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2441 struct bnxt_ring_struct *ring; 2442 2443 if (rxr->xdp_prog) 2444 bpf_prog_put(rxr->xdp_prog); 2445 2446 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq)) 2447 xdp_rxq_info_unreg(&rxr->xdp_rxq); 2448 2449 kfree(rxr->rx_tpa); 2450 rxr->rx_tpa = NULL; 2451 2452 kfree(rxr->rx_agg_bmap); 2453 rxr->rx_agg_bmap = NULL; 2454 2455 ring = &rxr->rx_ring_struct; 2456 bnxt_free_ring(bp, &ring->ring_mem); 2457 2458 ring = &rxr->rx_agg_ring_struct; 2459 bnxt_free_ring(bp, &ring->ring_mem); 2460 } 2461 } 2462 2463 static int bnxt_alloc_rx_rings(struct bnxt *bp) 2464 { 2465 int i, rc, agg_rings = 0, tpa_rings = 0; 2466 2467 if (!bp->rx_ring) 2468 return -ENOMEM; 2469 2470 if (bp->flags & BNXT_FLAG_AGG_RINGS) 2471 agg_rings = 1; 2472 2473 if (bp->flags & BNXT_FLAG_TPA) 2474 tpa_rings = 1; 2475 2476 for (i = 0; i < bp->rx_nr_rings; i++) { 2477 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2478 struct bnxt_ring_struct *ring; 2479 2480 ring = &rxr->rx_ring_struct; 2481 2482 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i); 2483 if (rc < 0) 2484 return rc; 2485 2486 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 2487 if (rc) 2488 return rc; 2489 2490 ring->grp_idx = i; 2491 if (agg_rings) { 2492 u16 mem_size; 2493 2494 ring = &rxr->rx_agg_ring_struct; 2495 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 2496 if (rc) 2497 return rc; 2498 2499 ring->grp_idx = i; 2500 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1; 2501 mem_size = rxr->rx_agg_bmap_size / 8; 2502 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL); 2503 if (!rxr->rx_agg_bmap) 2504 return -ENOMEM; 2505 2506 if (tpa_rings) { 2507 rxr->rx_tpa = kcalloc(MAX_TPA, 2508 sizeof(struct bnxt_tpa_info), 2509 GFP_KERNEL); 2510 if (!rxr->rx_tpa) 2511 return -ENOMEM; 2512 } 2513 } 2514 } 2515 return 0; 2516 } 2517 2518 static void bnxt_free_tx_rings(struct bnxt *bp) 2519 { 2520 int i; 2521 struct pci_dev *pdev = bp->pdev; 2522 2523 if (!bp->tx_ring) 2524 return; 2525 2526 for (i = 0; i < bp->tx_nr_rings; i++) { 2527 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 2528 struct bnxt_ring_struct *ring; 2529 2530 if (txr->tx_push) { 2531 dma_free_coherent(&pdev->dev, bp->tx_push_size, 2532 txr->tx_push, txr->tx_push_mapping); 2533 txr->tx_push = NULL; 2534 } 2535 2536 ring = &txr->tx_ring_struct; 2537 2538 bnxt_free_ring(bp, &ring->ring_mem); 2539 } 2540 } 2541 2542 static int bnxt_alloc_tx_rings(struct bnxt *bp) 2543 { 2544 int i, j, rc; 2545 struct pci_dev *pdev = bp->pdev; 2546 2547 bp->tx_push_size = 0; 2548 if (bp->tx_push_thresh) { 2549 int push_size; 2550 2551 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) + 2552 bp->tx_push_thresh); 2553 2554 if (push_size > 256) { 2555 push_size = 0; 2556 bp->tx_push_thresh = 0; 2557 } 2558 2559 bp->tx_push_size = push_size; 2560 } 2561 2562 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) { 2563 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 2564 struct bnxt_ring_struct *ring; 2565 u8 qidx; 2566 2567 ring = &txr->tx_ring_struct; 2568 2569 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 2570 if (rc) 2571 return rc; 2572 2573 ring->grp_idx = txr->bnapi->index; 2574 if (bp->tx_push_size) { 2575 dma_addr_t mapping; 2576 2577 /* One pre-allocated DMA buffer to backup 2578 * TX push operation 2579 */ 2580 txr->tx_push = dma_alloc_coherent(&pdev->dev, 2581 bp->tx_push_size, 2582 &txr->tx_push_mapping, 2583 GFP_KERNEL); 2584 2585 if (!txr->tx_push) 2586 return -ENOMEM; 2587 2588 mapping = txr->tx_push_mapping + 2589 sizeof(struct tx_push_bd); 2590 txr->data_mapping = cpu_to_le64(mapping); 2591 2592 memset(txr->tx_push, 0, sizeof(struct tx_push_bd)); 2593 } 2594 qidx = bp->tc_to_qidx[j]; 2595 ring->queue_id = bp->q_info[qidx].queue_id; 2596 if (i < bp->tx_nr_rings_xdp) 2597 continue; 2598 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1)) 2599 j++; 2600 } 2601 return 0; 2602 } 2603 2604 static void bnxt_free_cp_rings(struct bnxt *bp) 2605 { 2606 int i; 2607 2608 if (!bp->bnapi) 2609 return; 2610 2611 for (i = 0; i < bp->cp_nr_rings; i++) { 2612 struct bnxt_napi *bnapi = bp->bnapi[i]; 2613 struct bnxt_cp_ring_info *cpr; 2614 struct bnxt_ring_struct *ring; 2615 int j; 2616 2617 if (!bnapi) 2618 continue; 2619 2620 cpr = &bnapi->cp_ring; 2621 ring = &cpr->cp_ring_struct; 2622 2623 bnxt_free_ring(bp, &ring->ring_mem); 2624 2625 for (j = 0; j < 2; j++) { 2626 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; 2627 2628 if (cpr2) { 2629 ring = &cpr2->cp_ring_struct; 2630 bnxt_free_ring(bp, &ring->ring_mem); 2631 kfree(cpr2); 2632 cpr->cp_ring_arr[j] = NULL; 2633 } 2634 } 2635 } 2636 } 2637 2638 static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp) 2639 { 2640 struct bnxt_ring_mem_info *rmem; 2641 struct bnxt_ring_struct *ring; 2642 struct bnxt_cp_ring_info *cpr; 2643 int rc; 2644 2645 cpr = kzalloc(sizeof(*cpr), GFP_KERNEL); 2646 if (!cpr) 2647 return NULL; 2648 2649 ring = &cpr->cp_ring_struct; 2650 rmem = &ring->ring_mem; 2651 rmem->nr_pages = bp->cp_nr_pages; 2652 rmem->page_size = HW_CMPD_RING_SIZE; 2653 rmem->pg_arr = (void **)cpr->cp_desc_ring; 2654 rmem->dma_arr = cpr->cp_desc_mapping; 2655 rmem->flags = BNXT_RMEM_RING_PTE_FLAG; 2656 rc = bnxt_alloc_ring(bp, rmem); 2657 if (rc) { 2658 bnxt_free_ring(bp, rmem); 2659 kfree(cpr); 2660 cpr = NULL; 2661 } 2662 return cpr; 2663 } 2664 2665 static int bnxt_alloc_cp_rings(struct bnxt *bp) 2666 { 2667 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS); 2668 int i, rc, ulp_base_vec, ulp_msix; 2669 2670 ulp_msix = bnxt_get_ulp_msix_num(bp); 2671 ulp_base_vec = bnxt_get_ulp_msix_base(bp); 2672 for (i = 0; i < bp->cp_nr_rings; i++) { 2673 struct bnxt_napi *bnapi = bp->bnapi[i]; 2674 struct bnxt_cp_ring_info *cpr; 2675 struct bnxt_ring_struct *ring; 2676 2677 if (!bnapi) 2678 continue; 2679 2680 cpr = &bnapi->cp_ring; 2681 cpr->bnapi = bnapi; 2682 ring = &cpr->cp_ring_struct; 2683 2684 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 2685 if (rc) 2686 return rc; 2687 2688 if (ulp_msix && i >= ulp_base_vec) 2689 ring->map_idx = i + ulp_msix; 2690 else 2691 ring->map_idx = i; 2692 2693 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 2694 continue; 2695 2696 if (i < bp->rx_nr_rings) { 2697 struct bnxt_cp_ring_info *cpr2 = 2698 bnxt_alloc_cp_sub_ring(bp); 2699 2700 cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2; 2701 if (!cpr2) 2702 return -ENOMEM; 2703 cpr2->bnapi = bnapi; 2704 } 2705 if ((sh && i < bp->tx_nr_rings) || 2706 (!sh && i >= bp->rx_nr_rings)) { 2707 struct bnxt_cp_ring_info *cpr2 = 2708 bnxt_alloc_cp_sub_ring(bp); 2709 2710 cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2; 2711 if (!cpr2) 2712 return -ENOMEM; 2713 cpr2->bnapi = bnapi; 2714 } 2715 } 2716 return 0; 2717 } 2718 2719 static void bnxt_init_ring_struct(struct bnxt *bp) 2720 { 2721 int i; 2722 2723 for (i = 0; i < bp->cp_nr_rings; i++) { 2724 struct bnxt_napi *bnapi = bp->bnapi[i]; 2725 struct bnxt_ring_mem_info *rmem; 2726 struct bnxt_cp_ring_info *cpr; 2727 struct bnxt_rx_ring_info *rxr; 2728 struct bnxt_tx_ring_info *txr; 2729 struct bnxt_ring_struct *ring; 2730 2731 if (!bnapi) 2732 continue; 2733 2734 cpr = &bnapi->cp_ring; 2735 ring = &cpr->cp_ring_struct; 2736 rmem = &ring->ring_mem; 2737 rmem->nr_pages = bp->cp_nr_pages; 2738 rmem->page_size = HW_CMPD_RING_SIZE; 2739 rmem->pg_arr = (void **)cpr->cp_desc_ring; 2740 rmem->dma_arr = cpr->cp_desc_mapping; 2741 rmem->vmem_size = 0; 2742 2743 rxr = bnapi->rx_ring; 2744 if (!rxr) 2745 goto skip_rx; 2746 2747 ring = &rxr->rx_ring_struct; 2748 rmem = &ring->ring_mem; 2749 rmem->nr_pages = bp->rx_nr_pages; 2750 rmem->page_size = HW_RXBD_RING_SIZE; 2751 rmem->pg_arr = (void **)rxr->rx_desc_ring; 2752 rmem->dma_arr = rxr->rx_desc_mapping; 2753 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; 2754 rmem->vmem = (void **)&rxr->rx_buf_ring; 2755 2756 ring = &rxr->rx_agg_ring_struct; 2757 rmem = &ring->ring_mem; 2758 rmem->nr_pages = bp->rx_agg_nr_pages; 2759 rmem->page_size = HW_RXBD_RING_SIZE; 2760 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring; 2761 rmem->dma_arr = rxr->rx_agg_desc_mapping; 2762 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; 2763 rmem->vmem = (void **)&rxr->rx_agg_ring; 2764 2765 skip_rx: 2766 txr = bnapi->tx_ring; 2767 if (!txr) 2768 continue; 2769 2770 ring = &txr->tx_ring_struct; 2771 rmem = &ring->ring_mem; 2772 rmem->nr_pages = bp->tx_nr_pages; 2773 rmem->page_size = HW_RXBD_RING_SIZE; 2774 rmem->pg_arr = (void **)txr->tx_desc_ring; 2775 rmem->dma_arr = txr->tx_desc_mapping; 2776 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages; 2777 rmem->vmem = (void **)&txr->tx_buf_ring; 2778 } 2779 } 2780 2781 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type) 2782 { 2783 int i; 2784 u32 prod; 2785 struct rx_bd **rx_buf_ring; 2786 2787 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr; 2788 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) { 2789 int j; 2790 struct rx_bd *rxbd; 2791 2792 rxbd = rx_buf_ring[i]; 2793 if (!rxbd) 2794 continue; 2795 2796 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) { 2797 rxbd->rx_bd_len_flags_type = cpu_to_le32(type); 2798 rxbd->rx_bd_opaque = prod; 2799 } 2800 } 2801 } 2802 2803 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) 2804 { 2805 struct net_device *dev = bp->dev; 2806 struct bnxt_rx_ring_info *rxr; 2807 struct bnxt_ring_struct *ring; 2808 u32 prod, type; 2809 int i; 2810 2811 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) | 2812 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP; 2813 2814 if (NET_IP_ALIGN == 2) 2815 type |= RX_BD_FLAGS_SOP; 2816 2817 rxr = &bp->rx_ring[ring_nr]; 2818 ring = &rxr->rx_ring_struct; 2819 bnxt_init_rxbd_pages(ring, type); 2820 2821 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) { 2822 rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1); 2823 if (IS_ERR(rxr->xdp_prog)) { 2824 int rc = PTR_ERR(rxr->xdp_prog); 2825 2826 rxr->xdp_prog = NULL; 2827 return rc; 2828 } 2829 } 2830 prod = rxr->rx_prod; 2831 for (i = 0; i < bp->rx_ring_size; i++) { 2832 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) { 2833 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n", 2834 ring_nr, i, bp->rx_ring_size); 2835 break; 2836 } 2837 prod = NEXT_RX(prod); 2838 } 2839 rxr->rx_prod = prod; 2840 ring->fw_ring_id = INVALID_HW_RING_ID; 2841 2842 ring = &rxr->rx_agg_ring_struct; 2843 ring->fw_ring_id = INVALID_HW_RING_ID; 2844 2845 if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) 2846 return 0; 2847 2848 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) | 2849 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; 2850 2851 bnxt_init_rxbd_pages(ring, type); 2852 2853 prod = rxr->rx_agg_prod; 2854 for (i = 0; i < bp->rx_agg_ring_size; i++) { 2855 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) { 2856 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n", 2857 ring_nr, i, bp->rx_ring_size); 2858 break; 2859 } 2860 prod = NEXT_RX_AGG(prod); 2861 } 2862 rxr->rx_agg_prod = prod; 2863 2864 if (bp->flags & BNXT_FLAG_TPA) { 2865 if (rxr->rx_tpa) { 2866 u8 *data; 2867 dma_addr_t mapping; 2868 2869 for (i = 0; i < MAX_TPA; i++) { 2870 data = __bnxt_alloc_rx_data(bp, &mapping, 2871 GFP_KERNEL); 2872 if (!data) 2873 return -ENOMEM; 2874 2875 rxr->rx_tpa[i].data = data; 2876 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset; 2877 rxr->rx_tpa[i].mapping = mapping; 2878 } 2879 } else { 2880 netdev_err(bp->dev, "No resource allocated for LRO/GRO\n"); 2881 return -ENOMEM; 2882 } 2883 } 2884 2885 return 0; 2886 } 2887 2888 static void bnxt_init_cp_rings(struct bnxt *bp) 2889 { 2890 int i, j; 2891 2892 for (i = 0; i < bp->cp_nr_rings; i++) { 2893 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; 2894 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 2895 2896 ring->fw_ring_id = INVALID_HW_RING_ID; 2897 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; 2898 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; 2899 for (j = 0; j < 2; j++) { 2900 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; 2901 2902 if (!cpr2) 2903 continue; 2904 2905 ring = &cpr2->cp_ring_struct; 2906 ring->fw_ring_id = INVALID_HW_RING_ID; 2907 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; 2908 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; 2909 } 2910 } 2911 } 2912 2913 static int bnxt_init_rx_rings(struct bnxt *bp) 2914 { 2915 int i, rc = 0; 2916 2917 if (BNXT_RX_PAGE_MODE(bp)) { 2918 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM; 2919 bp->rx_dma_offset = XDP_PACKET_HEADROOM; 2920 } else { 2921 bp->rx_offset = BNXT_RX_OFFSET; 2922 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET; 2923 } 2924 2925 for (i = 0; i < bp->rx_nr_rings; i++) { 2926 rc = bnxt_init_one_rx_ring(bp, i); 2927 if (rc) 2928 break; 2929 } 2930 2931 return rc; 2932 } 2933 2934 static int bnxt_init_tx_rings(struct bnxt *bp) 2935 { 2936 u16 i; 2937 2938 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2, 2939 MAX_SKB_FRAGS + 1); 2940 2941 for (i = 0; i < bp->tx_nr_rings; i++) { 2942 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 2943 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 2944 2945 ring->fw_ring_id = INVALID_HW_RING_ID; 2946 } 2947 2948 return 0; 2949 } 2950 2951 static void bnxt_free_ring_grps(struct bnxt *bp) 2952 { 2953 kfree(bp->grp_info); 2954 bp->grp_info = NULL; 2955 } 2956 2957 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init) 2958 { 2959 int i; 2960 2961 if (irq_re_init) { 2962 bp->grp_info = kcalloc(bp->cp_nr_rings, 2963 sizeof(struct bnxt_ring_grp_info), 2964 GFP_KERNEL); 2965 if (!bp->grp_info) 2966 return -ENOMEM; 2967 } 2968 for (i = 0; i < bp->cp_nr_rings; i++) { 2969 if (irq_re_init) 2970 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID; 2971 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; 2972 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID; 2973 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID; 2974 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; 2975 } 2976 return 0; 2977 } 2978 2979 static void bnxt_free_vnics(struct bnxt *bp) 2980 { 2981 kfree(bp->vnic_info); 2982 bp->vnic_info = NULL; 2983 bp->nr_vnics = 0; 2984 } 2985 2986 static int bnxt_alloc_vnics(struct bnxt *bp) 2987 { 2988 int num_vnics = 1; 2989 2990 #ifdef CONFIG_RFS_ACCEL 2991 if (bp->flags & BNXT_FLAG_RFS) 2992 num_vnics += bp->rx_nr_rings; 2993 #endif 2994 2995 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 2996 num_vnics++; 2997 2998 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info), 2999 GFP_KERNEL); 3000 if (!bp->vnic_info) 3001 return -ENOMEM; 3002 3003 bp->nr_vnics = num_vnics; 3004 return 0; 3005 } 3006 3007 static void bnxt_init_vnics(struct bnxt *bp) 3008 { 3009 int i; 3010 3011 for (i = 0; i < bp->nr_vnics; i++) { 3012 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 3013 int j; 3014 3015 vnic->fw_vnic_id = INVALID_HW_RING_ID; 3016 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) 3017 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID; 3018 3019 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID; 3020 3021 if (bp->vnic_info[i].rss_hash_key) { 3022 if (i == 0) 3023 prandom_bytes(vnic->rss_hash_key, 3024 HW_HASH_KEY_SIZE); 3025 else 3026 memcpy(vnic->rss_hash_key, 3027 bp->vnic_info[0].rss_hash_key, 3028 HW_HASH_KEY_SIZE); 3029 } 3030 } 3031 } 3032 3033 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg) 3034 { 3035 int pages; 3036 3037 pages = ring_size / desc_per_pg; 3038 3039 if (!pages) 3040 return 1; 3041 3042 pages++; 3043 3044 while (pages & (pages - 1)) 3045 pages++; 3046 3047 return pages; 3048 } 3049 3050 void bnxt_set_tpa_flags(struct bnxt *bp) 3051 { 3052 bp->flags &= ~BNXT_FLAG_TPA; 3053 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 3054 return; 3055 if (bp->dev->features & NETIF_F_LRO) 3056 bp->flags |= BNXT_FLAG_LRO; 3057 else if (bp->dev->features & NETIF_F_GRO_HW) 3058 bp->flags |= BNXT_FLAG_GRO; 3059 } 3060 3061 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must 3062 * be set on entry. 3063 */ 3064 void bnxt_set_ring_params(struct bnxt *bp) 3065 { 3066 u32 ring_size, rx_size, rx_space; 3067 u32 agg_factor = 0, agg_ring_size = 0; 3068 3069 /* 8 for CRC and VLAN */ 3070 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8); 3071 3072 rx_space = rx_size + NET_SKB_PAD + 3073 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 3074 3075 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH; 3076 ring_size = bp->rx_ring_size; 3077 bp->rx_agg_ring_size = 0; 3078 bp->rx_agg_nr_pages = 0; 3079 3080 if (bp->flags & BNXT_FLAG_TPA) 3081 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE); 3082 3083 bp->flags &= ~BNXT_FLAG_JUMBO; 3084 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) { 3085 u32 jumbo_factor; 3086 3087 bp->flags |= BNXT_FLAG_JUMBO; 3088 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT; 3089 if (jumbo_factor > agg_factor) 3090 agg_factor = jumbo_factor; 3091 } 3092 agg_ring_size = ring_size * agg_factor; 3093 3094 if (agg_ring_size) { 3095 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size, 3096 RX_DESC_CNT); 3097 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) { 3098 u32 tmp = agg_ring_size; 3099 3100 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES; 3101 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1; 3102 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n", 3103 tmp, agg_ring_size); 3104 } 3105 bp->rx_agg_ring_size = agg_ring_size; 3106 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1; 3107 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN); 3108 rx_space = rx_size + NET_SKB_PAD + 3109 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 3110 } 3111 3112 bp->rx_buf_use_size = rx_size; 3113 bp->rx_buf_size = rx_space; 3114 3115 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT); 3116 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1; 3117 3118 ring_size = bp->tx_ring_size; 3119 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT); 3120 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1; 3121 3122 ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size; 3123 bp->cp_ring_size = ring_size; 3124 3125 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT); 3126 if (bp->cp_nr_pages > MAX_CP_PAGES) { 3127 bp->cp_nr_pages = MAX_CP_PAGES; 3128 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1; 3129 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n", 3130 ring_size, bp->cp_ring_size); 3131 } 3132 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT; 3133 bp->cp_ring_mask = bp->cp_bit - 1; 3134 } 3135 3136 /* Changing allocation mode of RX rings. 3137 * TODO: Update when extending xdp_rxq_info to support allocation modes. 3138 */ 3139 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) 3140 { 3141 if (page_mode) { 3142 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) 3143 return -EOPNOTSUPP; 3144 bp->dev->max_mtu = 3145 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU); 3146 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 3147 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE; 3148 bp->rx_dir = DMA_BIDIRECTIONAL; 3149 bp->rx_skb_func = bnxt_rx_page_skb; 3150 /* Disable LRO or GRO_HW */ 3151 netdev_update_features(bp->dev); 3152 } else { 3153 bp->dev->max_mtu = bp->max_mtu; 3154 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE; 3155 bp->rx_dir = DMA_FROM_DEVICE; 3156 bp->rx_skb_func = bnxt_rx_skb; 3157 } 3158 return 0; 3159 } 3160 3161 static void bnxt_free_vnic_attributes(struct bnxt *bp) 3162 { 3163 int i; 3164 struct bnxt_vnic_info *vnic; 3165 struct pci_dev *pdev = bp->pdev; 3166 3167 if (!bp->vnic_info) 3168 return; 3169 3170 for (i = 0; i < bp->nr_vnics; i++) { 3171 vnic = &bp->vnic_info[i]; 3172 3173 kfree(vnic->fw_grp_ids); 3174 vnic->fw_grp_ids = NULL; 3175 3176 kfree(vnic->uc_list); 3177 vnic->uc_list = NULL; 3178 3179 if (vnic->mc_list) { 3180 dma_free_coherent(&pdev->dev, vnic->mc_list_size, 3181 vnic->mc_list, vnic->mc_list_mapping); 3182 vnic->mc_list = NULL; 3183 } 3184 3185 if (vnic->rss_table) { 3186 dma_free_coherent(&pdev->dev, PAGE_SIZE, 3187 vnic->rss_table, 3188 vnic->rss_table_dma_addr); 3189 vnic->rss_table = NULL; 3190 } 3191 3192 vnic->rss_hash_key = NULL; 3193 vnic->flags = 0; 3194 } 3195 } 3196 3197 static int bnxt_alloc_vnic_attributes(struct bnxt *bp) 3198 { 3199 int i, rc = 0, size; 3200 struct bnxt_vnic_info *vnic; 3201 struct pci_dev *pdev = bp->pdev; 3202 int max_rings; 3203 3204 for (i = 0; i < bp->nr_vnics; i++) { 3205 vnic = &bp->vnic_info[i]; 3206 3207 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) { 3208 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN; 3209 3210 if (mem_size > 0) { 3211 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL); 3212 if (!vnic->uc_list) { 3213 rc = -ENOMEM; 3214 goto out; 3215 } 3216 } 3217 } 3218 3219 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) { 3220 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN; 3221 vnic->mc_list = 3222 dma_alloc_coherent(&pdev->dev, 3223 vnic->mc_list_size, 3224 &vnic->mc_list_mapping, 3225 GFP_KERNEL); 3226 if (!vnic->mc_list) { 3227 rc = -ENOMEM; 3228 goto out; 3229 } 3230 } 3231 3232 if (bp->flags & BNXT_FLAG_CHIP_P5) 3233 goto vnic_skip_grps; 3234 3235 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 3236 max_rings = bp->rx_nr_rings; 3237 else 3238 max_rings = 1; 3239 3240 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL); 3241 if (!vnic->fw_grp_ids) { 3242 rc = -ENOMEM; 3243 goto out; 3244 } 3245 vnic_skip_grps: 3246 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) && 3247 !(vnic->flags & BNXT_VNIC_RSS_FLAG)) 3248 continue; 3249 3250 /* Allocate rss table and hash key */ 3251 vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, 3252 &vnic->rss_table_dma_addr, 3253 GFP_KERNEL); 3254 if (!vnic->rss_table) { 3255 rc = -ENOMEM; 3256 goto out; 3257 } 3258 3259 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16)); 3260 3261 vnic->rss_hash_key = ((void *)vnic->rss_table) + size; 3262 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size; 3263 } 3264 return 0; 3265 3266 out: 3267 return rc; 3268 } 3269 3270 static void bnxt_free_hwrm_resources(struct bnxt *bp) 3271 { 3272 struct pci_dev *pdev = bp->pdev; 3273 3274 if (bp->hwrm_cmd_resp_addr) { 3275 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr, 3276 bp->hwrm_cmd_resp_dma_addr); 3277 bp->hwrm_cmd_resp_addr = NULL; 3278 } 3279 } 3280 3281 static int bnxt_alloc_hwrm_resources(struct bnxt *bp) 3282 { 3283 struct pci_dev *pdev = bp->pdev; 3284 3285 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, 3286 &bp->hwrm_cmd_resp_dma_addr, 3287 GFP_KERNEL); 3288 if (!bp->hwrm_cmd_resp_addr) 3289 return -ENOMEM; 3290 3291 return 0; 3292 } 3293 3294 static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp) 3295 { 3296 if (bp->hwrm_short_cmd_req_addr) { 3297 struct pci_dev *pdev = bp->pdev; 3298 3299 dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len, 3300 bp->hwrm_short_cmd_req_addr, 3301 bp->hwrm_short_cmd_req_dma_addr); 3302 bp->hwrm_short_cmd_req_addr = NULL; 3303 } 3304 } 3305 3306 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp) 3307 { 3308 struct pci_dev *pdev = bp->pdev; 3309 3310 bp->hwrm_short_cmd_req_addr = 3311 dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len, 3312 &bp->hwrm_short_cmd_req_dma_addr, 3313 GFP_KERNEL); 3314 if (!bp->hwrm_short_cmd_req_addr) 3315 return -ENOMEM; 3316 3317 return 0; 3318 } 3319 3320 static void bnxt_free_stats(struct bnxt *bp) 3321 { 3322 u32 size, i; 3323 struct pci_dev *pdev = bp->pdev; 3324 3325 bp->flags &= ~BNXT_FLAG_PORT_STATS; 3326 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT; 3327 3328 if (bp->hw_rx_port_stats) { 3329 dma_free_coherent(&pdev->dev, bp->hw_port_stats_size, 3330 bp->hw_rx_port_stats, 3331 bp->hw_rx_port_stats_map); 3332 bp->hw_rx_port_stats = NULL; 3333 } 3334 3335 if (bp->hw_tx_port_stats_ext) { 3336 dma_free_coherent(&pdev->dev, sizeof(struct tx_port_stats_ext), 3337 bp->hw_tx_port_stats_ext, 3338 bp->hw_tx_port_stats_ext_map); 3339 bp->hw_tx_port_stats_ext = NULL; 3340 } 3341 3342 if (bp->hw_rx_port_stats_ext) { 3343 dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext), 3344 bp->hw_rx_port_stats_ext, 3345 bp->hw_rx_port_stats_ext_map); 3346 bp->hw_rx_port_stats_ext = NULL; 3347 } 3348 3349 if (!bp->bnapi) 3350 return; 3351 3352 size = sizeof(struct ctx_hw_stats); 3353 3354 for (i = 0; i < bp->cp_nr_rings; i++) { 3355 struct bnxt_napi *bnapi = bp->bnapi[i]; 3356 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3357 3358 if (cpr->hw_stats) { 3359 dma_free_coherent(&pdev->dev, size, cpr->hw_stats, 3360 cpr->hw_stats_map); 3361 cpr->hw_stats = NULL; 3362 } 3363 } 3364 } 3365 3366 static int bnxt_alloc_stats(struct bnxt *bp) 3367 { 3368 u32 size, i; 3369 struct pci_dev *pdev = bp->pdev; 3370 3371 size = sizeof(struct ctx_hw_stats); 3372 3373 for (i = 0; i < bp->cp_nr_rings; i++) { 3374 struct bnxt_napi *bnapi = bp->bnapi[i]; 3375 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3376 3377 cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size, 3378 &cpr->hw_stats_map, 3379 GFP_KERNEL); 3380 if (!cpr->hw_stats) 3381 return -ENOMEM; 3382 3383 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; 3384 } 3385 3386 if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) { 3387 bp->hw_port_stats_size = sizeof(struct rx_port_stats) + 3388 sizeof(struct tx_port_stats) + 1024; 3389 3390 bp->hw_rx_port_stats = 3391 dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size, 3392 &bp->hw_rx_port_stats_map, 3393 GFP_KERNEL); 3394 if (!bp->hw_rx_port_stats) 3395 return -ENOMEM; 3396 3397 bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) + 3398 512; 3399 bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map + 3400 sizeof(struct rx_port_stats) + 512; 3401 bp->flags |= BNXT_FLAG_PORT_STATS; 3402 3403 /* Display extended statistics only if FW supports it */ 3404 if (bp->hwrm_spec_code < 0x10804 || 3405 bp->hwrm_spec_code == 0x10900) 3406 return 0; 3407 3408 bp->hw_rx_port_stats_ext = 3409 dma_zalloc_coherent(&pdev->dev, 3410 sizeof(struct rx_port_stats_ext), 3411 &bp->hw_rx_port_stats_ext_map, 3412 GFP_KERNEL); 3413 if (!bp->hw_rx_port_stats_ext) 3414 return 0; 3415 3416 if (bp->hwrm_spec_code >= 0x10902) { 3417 bp->hw_tx_port_stats_ext = 3418 dma_zalloc_coherent(&pdev->dev, 3419 sizeof(struct tx_port_stats_ext), 3420 &bp->hw_tx_port_stats_ext_map, 3421 GFP_KERNEL); 3422 } 3423 bp->flags |= BNXT_FLAG_PORT_STATS_EXT; 3424 } 3425 return 0; 3426 } 3427 3428 static void bnxt_clear_ring_indices(struct bnxt *bp) 3429 { 3430 int i; 3431 3432 if (!bp->bnapi) 3433 return; 3434 3435 for (i = 0; i < bp->cp_nr_rings; i++) { 3436 struct bnxt_napi *bnapi = bp->bnapi[i]; 3437 struct bnxt_cp_ring_info *cpr; 3438 struct bnxt_rx_ring_info *rxr; 3439 struct bnxt_tx_ring_info *txr; 3440 3441 if (!bnapi) 3442 continue; 3443 3444 cpr = &bnapi->cp_ring; 3445 cpr->cp_raw_cons = 0; 3446 3447 txr = bnapi->tx_ring; 3448 if (txr) { 3449 txr->tx_prod = 0; 3450 txr->tx_cons = 0; 3451 } 3452 3453 rxr = bnapi->rx_ring; 3454 if (rxr) { 3455 rxr->rx_prod = 0; 3456 rxr->rx_agg_prod = 0; 3457 rxr->rx_sw_agg_prod = 0; 3458 rxr->rx_next_cons = 0; 3459 } 3460 } 3461 } 3462 3463 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit) 3464 { 3465 #ifdef CONFIG_RFS_ACCEL 3466 int i; 3467 3468 /* Under rtnl_lock and all our NAPIs have been disabled. It's 3469 * safe to delete the hash table. 3470 */ 3471 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 3472 struct hlist_head *head; 3473 struct hlist_node *tmp; 3474 struct bnxt_ntuple_filter *fltr; 3475 3476 head = &bp->ntp_fltr_hash_tbl[i]; 3477 hlist_for_each_entry_safe(fltr, tmp, head, hash) { 3478 hlist_del(&fltr->hash); 3479 kfree(fltr); 3480 } 3481 } 3482 if (irq_reinit) { 3483 kfree(bp->ntp_fltr_bmap); 3484 bp->ntp_fltr_bmap = NULL; 3485 } 3486 bp->ntp_fltr_count = 0; 3487 #endif 3488 } 3489 3490 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp) 3491 { 3492 #ifdef CONFIG_RFS_ACCEL 3493 int i, rc = 0; 3494 3495 if (!(bp->flags & BNXT_FLAG_RFS)) 3496 return 0; 3497 3498 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) 3499 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]); 3500 3501 bp->ntp_fltr_count = 0; 3502 bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR), 3503 sizeof(long), 3504 GFP_KERNEL); 3505 3506 if (!bp->ntp_fltr_bmap) 3507 rc = -ENOMEM; 3508 3509 return rc; 3510 #else 3511 return 0; 3512 #endif 3513 } 3514 3515 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) 3516 { 3517 bnxt_free_vnic_attributes(bp); 3518 bnxt_free_tx_rings(bp); 3519 bnxt_free_rx_rings(bp); 3520 bnxt_free_cp_rings(bp); 3521 bnxt_free_ntp_fltrs(bp, irq_re_init); 3522 if (irq_re_init) { 3523 bnxt_free_stats(bp); 3524 bnxt_free_ring_grps(bp); 3525 bnxt_free_vnics(bp); 3526 kfree(bp->tx_ring_map); 3527 bp->tx_ring_map = NULL; 3528 kfree(bp->tx_ring); 3529 bp->tx_ring = NULL; 3530 kfree(bp->rx_ring); 3531 bp->rx_ring = NULL; 3532 kfree(bp->bnapi); 3533 bp->bnapi = NULL; 3534 } else { 3535 bnxt_clear_ring_indices(bp); 3536 } 3537 } 3538 3539 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) 3540 { 3541 int i, j, rc, size, arr_size; 3542 void *bnapi; 3543 3544 if (irq_re_init) { 3545 /* Allocate bnapi mem pointer array and mem block for 3546 * all queues 3547 */ 3548 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) * 3549 bp->cp_nr_rings); 3550 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi)); 3551 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL); 3552 if (!bnapi) 3553 return -ENOMEM; 3554 3555 bp->bnapi = bnapi; 3556 bnapi += arr_size; 3557 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) { 3558 bp->bnapi[i] = bnapi; 3559 bp->bnapi[i]->index = i; 3560 bp->bnapi[i]->bp = bp; 3561 if (bp->flags & BNXT_FLAG_CHIP_P5) { 3562 struct bnxt_cp_ring_info *cpr = 3563 &bp->bnapi[i]->cp_ring; 3564 3565 cpr->cp_ring_struct.ring_mem.flags = 3566 BNXT_RMEM_RING_PTE_FLAG; 3567 } 3568 } 3569 3570 bp->rx_ring = kcalloc(bp->rx_nr_rings, 3571 sizeof(struct bnxt_rx_ring_info), 3572 GFP_KERNEL); 3573 if (!bp->rx_ring) 3574 return -ENOMEM; 3575 3576 for (i = 0; i < bp->rx_nr_rings; i++) { 3577 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3578 3579 if (bp->flags & BNXT_FLAG_CHIP_P5) { 3580 rxr->rx_ring_struct.ring_mem.flags = 3581 BNXT_RMEM_RING_PTE_FLAG; 3582 rxr->rx_agg_ring_struct.ring_mem.flags = 3583 BNXT_RMEM_RING_PTE_FLAG; 3584 } 3585 rxr->bnapi = bp->bnapi[i]; 3586 bp->bnapi[i]->rx_ring = &bp->rx_ring[i]; 3587 } 3588 3589 bp->tx_ring = kcalloc(bp->tx_nr_rings, 3590 sizeof(struct bnxt_tx_ring_info), 3591 GFP_KERNEL); 3592 if (!bp->tx_ring) 3593 return -ENOMEM; 3594 3595 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16), 3596 GFP_KERNEL); 3597 3598 if (!bp->tx_ring_map) 3599 return -ENOMEM; 3600 3601 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 3602 j = 0; 3603 else 3604 j = bp->rx_nr_rings; 3605 3606 for (i = 0; i < bp->tx_nr_rings; i++, j++) { 3607 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 3608 3609 if (bp->flags & BNXT_FLAG_CHIP_P5) 3610 txr->tx_ring_struct.ring_mem.flags = 3611 BNXT_RMEM_RING_PTE_FLAG; 3612 txr->bnapi = bp->bnapi[j]; 3613 bp->bnapi[j]->tx_ring = txr; 3614 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i; 3615 if (i >= bp->tx_nr_rings_xdp) { 3616 txr->txq_index = i - bp->tx_nr_rings_xdp; 3617 bp->bnapi[j]->tx_int = bnxt_tx_int; 3618 } else { 3619 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP; 3620 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp; 3621 } 3622 } 3623 3624 rc = bnxt_alloc_stats(bp); 3625 if (rc) 3626 goto alloc_mem_err; 3627 3628 rc = bnxt_alloc_ntp_fltrs(bp); 3629 if (rc) 3630 goto alloc_mem_err; 3631 3632 rc = bnxt_alloc_vnics(bp); 3633 if (rc) 3634 goto alloc_mem_err; 3635 } 3636 3637 bnxt_init_ring_struct(bp); 3638 3639 rc = bnxt_alloc_rx_rings(bp); 3640 if (rc) 3641 goto alloc_mem_err; 3642 3643 rc = bnxt_alloc_tx_rings(bp); 3644 if (rc) 3645 goto alloc_mem_err; 3646 3647 rc = bnxt_alloc_cp_rings(bp); 3648 if (rc) 3649 goto alloc_mem_err; 3650 3651 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG | 3652 BNXT_VNIC_UCAST_FLAG; 3653 rc = bnxt_alloc_vnic_attributes(bp); 3654 if (rc) 3655 goto alloc_mem_err; 3656 return 0; 3657 3658 alloc_mem_err: 3659 bnxt_free_mem(bp, true); 3660 return rc; 3661 } 3662 3663 static void bnxt_disable_int(struct bnxt *bp) 3664 { 3665 int i; 3666 3667 if (!bp->bnapi) 3668 return; 3669 3670 for (i = 0; i < bp->cp_nr_rings; i++) { 3671 struct bnxt_napi *bnapi = bp->bnapi[i]; 3672 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3673 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 3674 3675 if (ring->fw_ring_id != INVALID_HW_RING_ID) 3676 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); 3677 } 3678 } 3679 3680 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n) 3681 { 3682 struct bnxt_napi *bnapi = bp->bnapi[n]; 3683 struct bnxt_cp_ring_info *cpr; 3684 3685 cpr = &bnapi->cp_ring; 3686 return cpr->cp_ring_struct.map_idx; 3687 } 3688 3689 static void bnxt_disable_int_sync(struct bnxt *bp) 3690 { 3691 int i; 3692 3693 atomic_inc(&bp->intr_sem); 3694 3695 bnxt_disable_int(bp); 3696 for (i = 0; i < bp->cp_nr_rings; i++) { 3697 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 3698 3699 synchronize_irq(bp->irq_tbl[map_idx].vector); 3700 } 3701 } 3702 3703 static void bnxt_enable_int(struct bnxt *bp) 3704 { 3705 int i; 3706 3707 atomic_set(&bp->intr_sem, 0); 3708 for (i = 0; i < bp->cp_nr_rings; i++) { 3709 struct bnxt_napi *bnapi = bp->bnapi[i]; 3710 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3711 3712 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons); 3713 } 3714 } 3715 3716 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type, 3717 u16 cmpl_ring, u16 target_id) 3718 { 3719 struct input *req = request; 3720 3721 req->req_type = cpu_to_le16(req_type); 3722 req->cmpl_ring = cpu_to_le16(cmpl_ring); 3723 req->target_id = cpu_to_le16(target_id); 3724 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr); 3725 } 3726 3727 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, 3728 int timeout, bool silent) 3729 { 3730 int i, intr_process, rc, tmo_count; 3731 struct input *req = msg; 3732 u32 *data = msg; 3733 __le32 *resp_len; 3734 u8 *valid; 3735 u16 cp_ring_id, len = 0; 3736 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; 3737 u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN; 3738 struct hwrm_short_input short_input = {0}; 3739 3740 req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++); 3741 memset(resp, 0, PAGE_SIZE); 3742 cp_ring_id = le16_to_cpu(req->cmpl_ring); 3743 intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1; 3744 3745 if (msg_len > BNXT_HWRM_MAX_REQ_LEN) { 3746 if (msg_len > bp->hwrm_max_ext_req_len || 3747 !bp->hwrm_short_cmd_req_addr) 3748 return -EINVAL; 3749 } 3750 3751 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || 3752 msg_len > BNXT_HWRM_MAX_REQ_LEN) { 3753 void *short_cmd_req = bp->hwrm_short_cmd_req_addr; 3754 u16 max_msg_len; 3755 3756 /* Set boundary for maximum extended request length for short 3757 * cmd format. If passed up from device use the max supported 3758 * internal req length. 3759 */ 3760 max_msg_len = bp->hwrm_max_ext_req_len; 3761 3762 memcpy(short_cmd_req, req, msg_len); 3763 if (msg_len < max_msg_len) 3764 memset(short_cmd_req + msg_len, 0, 3765 max_msg_len - msg_len); 3766 3767 short_input.req_type = req->req_type; 3768 short_input.signature = 3769 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD); 3770 short_input.size = cpu_to_le16(msg_len); 3771 short_input.req_addr = 3772 cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr); 3773 3774 data = (u32 *)&short_input; 3775 msg_len = sizeof(short_input); 3776 3777 /* Sync memory write before updating doorbell */ 3778 wmb(); 3779 3780 max_req_len = BNXT_HWRM_SHORT_REQ_LEN; 3781 } 3782 3783 /* Write request msg to hwrm channel */ 3784 __iowrite32_copy(bp->bar0, data, msg_len / 4); 3785 3786 for (i = msg_len; i < max_req_len; i += 4) 3787 writel(0, bp->bar0 + i); 3788 3789 /* currently supports only one outstanding message */ 3790 if (intr_process) 3791 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id); 3792 3793 /* Ring channel doorbell */ 3794 writel(1, bp->bar0 + 0x100); 3795 3796 if (!timeout) 3797 timeout = DFLT_HWRM_CMD_TIMEOUT; 3798 /* convert timeout to usec */ 3799 timeout *= 1000; 3800 3801 i = 0; 3802 /* Short timeout for the first few iterations: 3803 * number of loops = number of loops for short timeout + 3804 * number of loops for standard timeout. 3805 */ 3806 tmo_count = HWRM_SHORT_TIMEOUT_COUNTER; 3807 timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER; 3808 tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT); 3809 resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET; 3810 if (intr_process) { 3811 /* Wait until hwrm response cmpl interrupt is processed */ 3812 while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID && 3813 i++ < tmo_count) { 3814 /* on first few passes, just barely sleep */ 3815 if (i < HWRM_SHORT_TIMEOUT_COUNTER) 3816 usleep_range(HWRM_SHORT_MIN_TIMEOUT, 3817 HWRM_SHORT_MAX_TIMEOUT); 3818 else 3819 usleep_range(HWRM_MIN_TIMEOUT, 3820 HWRM_MAX_TIMEOUT); 3821 } 3822 3823 if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) { 3824 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n", 3825 le16_to_cpu(req->req_type)); 3826 return -1; 3827 } 3828 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> 3829 HWRM_RESP_LEN_SFT; 3830 valid = bp->hwrm_cmd_resp_addr + len - 1; 3831 } else { 3832 int j; 3833 3834 /* Check if response len is updated */ 3835 for (i = 0; i < tmo_count; i++) { 3836 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> 3837 HWRM_RESP_LEN_SFT; 3838 if (len) 3839 break; 3840 /* on first few passes, just barely sleep */ 3841 if (i < DFLT_HWRM_CMD_TIMEOUT) 3842 usleep_range(HWRM_SHORT_MIN_TIMEOUT, 3843 HWRM_SHORT_MAX_TIMEOUT); 3844 else 3845 usleep_range(HWRM_MIN_TIMEOUT, 3846 HWRM_MAX_TIMEOUT); 3847 } 3848 3849 if (i >= tmo_count) { 3850 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n", 3851 HWRM_TOTAL_TIMEOUT(i), 3852 le16_to_cpu(req->req_type), 3853 le16_to_cpu(req->seq_id), len); 3854 return -1; 3855 } 3856 3857 /* Last byte of resp contains valid bit */ 3858 valid = bp->hwrm_cmd_resp_addr + len - 1; 3859 for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) { 3860 /* make sure we read from updated DMA memory */ 3861 dma_rmb(); 3862 if (*valid) 3863 break; 3864 udelay(1); 3865 } 3866 3867 if (j >= HWRM_VALID_BIT_DELAY_USEC) { 3868 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n", 3869 HWRM_TOTAL_TIMEOUT(i), 3870 le16_to_cpu(req->req_type), 3871 le16_to_cpu(req->seq_id), len, *valid); 3872 return -1; 3873 } 3874 } 3875 3876 /* Zero valid bit for compatibility. Valid bit in an older spec 3877 * may become a new field in a newer spec. We must make sure that 3878 * a new field not implemented by old spec will read zero. 3879 */ 3880 *valid = 0; 3881 rc = le16_to_cpu(resp->error_code); 3882 if (rc && !silent) 3883 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n", 3884 le16_to_cpu(resp->req_type), 3885 le16_to_cpu(resp->seq_id), rc); 3886 return rc; 3887 } 3888 3889 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) 3890 { 3891 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false); 3892 } 3893 3894 int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len, 3895 int timeout) 3896 { 3897 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true); 3898 } 3899 3900 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) 3901 { 3902 int rc; 3903 3904 mutex_lock(&bp->hwrm_cmd_lock); 3905 rc = _hwrm_send_message(bp, msg, msg_len, timeout); 3906 mutex_unlock(&bp->hwrm_cmd_lock); 3907 return rc; 3908 } 3909 3910 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len, 3911 int timeout) 3912 { 3913 int rc; 3914 3915 mutex_lock(&bp->hwrm_cmd_lock); 3916 rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true); 3917 mutex_unlock(&bp->hwrm_cmd_lock); 3918 return rc; 3919 } 3920 3921 int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap, 3922 int bmap_size) 3923 { 3924 struct hwrm_func_drv_rgtr_input req = {0}; 3925 DECLARE_BITMAP(async_events_bmap, 256); 3926 u32 *events = (u32 *)async_events_bmap; 3927 int i; 3928 3929 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1); 3930 3931 req.enables = 3932 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); 3933 3934 memset(async_events_bmap, 0, sizeof(async_events_bmap)); 3935 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) 3936 __set_bit(bnxt_async_events_arr[i], async_events_bmap); 3937 3938 if (bmap && bmap_size) { 3939 for (i = 0; i < bmap_size; i++) { 3940 if (test_bit(i, bmap)) 3941 __set_bit(i, async_events_bmap); 3942 } 3943 } 3944 3945 for (i = 0; i < 8; i++) 3946 req.async_event_fwd[i] |= cpu_to_le32(events[i]); 3947 3948 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3949 } 3950 3951 static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) 3952 { 3953 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr; 3954 struct hwrm_func_drv_rgtr_input req = {0}; 3955 int rc; 3956 3957 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1); 3958 3959 req.enables = 3960 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | 3961 FUNC_DRV_RGTR_REQ_ENABLES_VER); 3962 3963 req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); 3964 req.flags = cpu_to_le32(FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE); 3965 req.ver_maj_8b = DRV_VER_MAJ; 3966 req.ver_min_8b = DRV_VER_MIN; 3967 req.ver_upd_8b = DRV_VER_UPD; 3968 req.ver_maj = cpu_to_le16(DRV_VER_MAJ); 3969 req.ver_min = cpu_to_le16(DRV_VER_MIN); 3970 req.ver_upd = cpu_to_le16(DRV_VER_UPD); 3971 3972 if (BNXT_PF(bp)) { 3973 u32 data[8]; 3974 int i; 3975 3976 memset(data, 0, sizeof(data)); 3977 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) { 3978 u16 cmd = bnxt_vf_req_snif[i]; 3979 unsigned int bit, idx; 3980 3981 idx = cmd / 32; 3982 bit = cmd % 32; 3983 data[idx] |= 1 << bit; 3984 } 3985 3986 for (i = 0; i < 8; i++) 3987 req.vf_req_fwd[i] = cpu_to_le32(data[i]); 3988 3989 req.enables |= 3990 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); 3991 } 3992 3993 mutex_lock(&bp->hwrm_cmd_lock); 3994 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3995 if (rc) 3996 rc = -EIO; 3997 else if (resp->flags & 3998 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED)) 3999 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE; 4000 mutex_unlock(&bp->hwrm_cmd_lock); 4001 return rc; 4002 } 4003 4004 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp) 4005 { 4006 struct hwrm_func_drv_unrgtr_input req = {0}; 4007 4008 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1); 4009 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4010 } 4011 4012 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type) 4013 { 4014 u32 rc = 0; 4015 struct hwrm_tunnel_dst_port_free_input req = {0}; 4016 4017 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1); 4018 req.tunnel_type = tunnel_type; 4019 4020 switch (tunnel_type) { 4021 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN: 4022 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id; 4023 break; 4024 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE: 4025 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id; 4026 break; 4027 default: 4028 break; 4029 } 4030 4031 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4032 if (rc) 4033 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n", 4034 rc); 4035 return rc; 4036 } 4037 4038 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port, 4039 u8 tunnel_type) 4040 { 4041 u32 rc = 0; 4042 struct hwrm_tunnel_dst_port_alloc_input req = {0}; 4043 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr; 4044 4045 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1); 4046 4047 req.tunnel_type = tunnel_type; 4048 req.tunnel_dst_port_val = port; 4049 4050 mutex_lock(&bp->hwrm_cmd_lock); 4051 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4052 if (rc) { 4053 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n", 4054 rc); 4055 goto err_out; 4056 } 4057 4058 switch (tunnel_type) { 4059 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN: 4060 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id; 4061 break; 4062 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE: 4063 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id; 4064 break; 4065 default: 4066 break; 4067 } 4068 4069 err_out: 4070 mutex_unlock(&bp->hwrm_cmd_lock); 4071 return rc; 4072 } 4073 4074 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id) 4075 { 4076 struct hwrm_cfa_l2_set_rx_mask_input req = {0}; 4077 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 4078 4079 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1); 4080 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id); 4081 4082 req.num_mc_entries = cpu_to_le32(vnic->mc_list_count); 4083 req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); 4084 req.mask = cpu_to_le32(vnic->rx_mask); 4085 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4086 } 4087 4088 #ifdef CONFIG_RFS_ACCEL 4089 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, 4090 struct bnxt_ntuple_filter *fltr) 4091 { 4092 struct hwrm_cfa_ntuple_filter_free_input req = {0}; 4093 4094 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1); 4095 req.ntuple_filter_id = fltr->filter_id; 4096 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4097 } 4098 4099 #define BNXT_NTP_FLTR_FLAGS \ 4100 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \ 4101 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \ 4102 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \ 4103 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \ 4104 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \ 4105 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \ 4106 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \ 4107 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \ 4108 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \ 4109 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \ 4110 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \ 4111 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \ 4112 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \ 4113 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID) 4114 4115 #define BNXT_NTP_TUNNEL_FLTR_FLAG \ 4116 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 4117 4118 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, 4119 struct bnxt_ntuple_filter *fltr) 4120 { 4121 int rc = 0; 4122 struct hwrm_cfa_ntuple_filter_alloc_input req = {0}; 4123 struct hwrm_cfa_ntuple_filter_alloc_output *resp = 4124 bp->hwrm_cmd_resp_addr; 4125 struct flow_keys *keys = &fltr->fkeys; 4126 struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1]; 4127 4128 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1); 4129 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx]; 4130 4131 req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS); 4132 4133 req.ethertype = htons(ETH_P_IP); 4134 memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN); 4135 req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; 4136 req.ip_protocol = keys->basic.ip_proto; 4137 4138 if (keys->basic.n_proto == htons(ETH_P_IPV6)) { 4139 int i; 4140 4141 req.ethertype = htons(ETH_P_IPV6); 4142 req.ip_addr_type = 4143 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6; 4144 *(struct in6_addr *)&req.src_ipaddr[0] = 4145 keys->addrs.v6addrs.src; 4146 *(struct in6_addr *)&req.dst_ipaddr[0] = 4147 keys->addrs.v6addrs.dst; 4148 for (i = 0; i < 4; i++) { 4149 req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff); 4150 req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff); 4151 } 4152 } else { 4153 req.src_ipaddr[0] = keys->addrs.v4addrs.src; 4154 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff); 4155 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst; 4156 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff); 4157 } 4158 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) { 4159 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG); 4160 req.tunnel_type = 4161 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL; 4162 } 4163 4164 req.src_port = keys->ports.src; 4165 req.src_port_mask = cpu_to_be16(0xffff); 4166 req.dst_port = keys->ports.dst; 4167 req.dst_port_mask = cpu_to_be16(0xffff); 4168 4169 req.dst_id = cpu_to_le16(vnic->fw_vnic_id); 4170 mutex_lock(&bp->hwrm_cmd_lock); 4171 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4172 if (!rc) 4173 fltr->filter_id = resp->ntuple_filter_id; 4174 mutex_unlock(&bp->hwrm_cmd_lock); 4175 return rc; 4176 } 4177 #endif 4178 4179 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx, 4180 u8 *mac_addr) 4181 { 4182 u32 rc = 0; 4183 struct hwrm_cfa_l2_filter_alloc_input req = {0}; 4184 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; 4185 4186 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1); 4187 req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX); 4188 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) 4189 req.flags |= 4190 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST); 4191 req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id); 4192 req.enables = 4193 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR | 4194 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID | 4195 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK); 4196 memcpy(req.l2_addr, mac_addr, ETH_ALEN); 4197 req.l2_addr_mask[0] = 0xff; 4198 req.l2_addr_mask[1] = 0xff; 4199 req.l2_addr_mask[2] = 0xff; 4200 req.l2_addr_mask[3] = 0xff; 4201 req.l2_addr_mask[4] = 0xff; 4202 req.l2_addr_mask[5] = 0xff; 4203 4204 mutex_lock(&bp->hwrm_cmd_lock); 4205 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4206 if (!rc) 4207 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] = 4208 resp->l2_filter_id; 4209 mutex_unlock(&bp->hwrm_cmd_lock); 4210 return rc; 4211 } 4212 4213 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp) 4214 { 4215 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */ 4216 int rc = 0; 4217 4218 /* Any associated ntuple filters will also be cleared by firmware. */ 4219 mutex_lock(&bp->hwrm_cmd_lock); 4220 for (i = 0; i < num_of_vnics; i++) { 4221 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 4222 4223 for (j = 0; j < vnic->uc_filter_count; j++) { 4224 struct hwrm_cfa_l2_filter_free_input req = {0}; 4225 4226 bnxt_hwrm_cmd_hdr_init(bp, &req, 4227 HWRM_CFA_L2_FILTER_FREE, -1, -1); 4228 4229 req.l2_filter_id = vnic->fw_l2_filter_id[j]; 4230 4231 rc = _hwrm_send_message(bp, &req, sizeof(req), 4232 HWRM_CMD_TIMEOUT); 4233 } 4234 vnic->uc_filter_count = 0; 4235 } 4236 mutex_unlock(&bp->hwrm_cmd_lock); 4237 4238 return rc; 4239 } 4240 4241 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) 4242 { 4243 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 4244 struct hwrm_vnic_tpa_cfg_input req = {0}; 4245 4246 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) 4247 return 0; 4248 4249 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1); 4250 4251 if (tpa_flags) { 4252 u16 mss = bp->dev->mtu - 40; 4253 u32 nsegs, n, segs = 0, flags; 4254 4255 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA | 4256 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA | 4257 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE | 4258 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN | 4259 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ; 4260 if (tpa_flags & BNXT_FLAG_GRO) 4261 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO; 4262 4263 req.flags = cpu_to_le32(flags); 4264 4265 req.enables = 4266 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS | 4267 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS | 4268 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN); 4269 4270 /* Number of segs are log2 units, and first packet is not 4271 * included as part of this units. 4272 */ 4273 if (mss <= BNXT_RX_PAGE_SIZE) { 4274 n = BNXT_RX_PAGE_SIZE / mss; 4275 nsegs = (MAX_SKB_FRAGS - 1) * n; 4276 } else { 4277 n = mss / BNXT_RX_PAGE_SIZE; 4278 if (mss & (BNXT_RX_PAGE_SIZE - 1)) 4279 n++; 4280 nsegs = (MAX_SKB_FRAGS - n) / n; 4281 } 4282 4283 segs = ilog2(nsegs); 4284 req.max_agg_segs = cpu_to_le16(segs); 4285 req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX); 4286 4287 req.min_agg_len = cpu_to_le32(512); 4288 } 4289 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); 4290 4291 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4292 } 4293 4294 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring) 4295 { 4296 struct bnxt_ring_grp_info *grp_info; 4297 4298 grp_info = &bp->grp_info[ring->grp_idx]; 4299 return grp_info->cp_fw_ring_id; 4300 } 4301 4302 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 4303 { 4304 if (bp->flags & BNXT_FLAG_CHIP_P5) { 4305 struct bnxt_napi *bnapi = rxr->bnapi; 4306 struct bnxt_cp_ring_info *cpr; 4307 4308 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL]; 4309 return cpr->cp_ring_struct.fw_ring_id; 4310 } else { 4311 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct); 4312 } 4313 } 4314 4315 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr) 4316 { 4317 if (bp->flags & BNXT_FLAG_CHIP_P5) { 4318 struct bnxt_napi *bnapi = txr->bnapi; 4319 struct bnxt_cp_ring_info *cpr; 4320 4321 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL]; 4322 return cpr->cp_ring_struct.fw_ring_id; 4323 } else { 4324 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct); 4325 } 4326 } 4327 4328 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) 4329 { 4330 u32 i, j, max_rings; 4331 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 4332 struct hwrm_vnic_rss_cfg_input req = {0}; 4333 4334 if ((bp->flags & BNXT_FLAG_CHIP_P5) || 4335 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID) 4336 return 0; 4337 4338 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); 4339 if (set_rss) { 4340 req.hash_type = cpu_to_le32(bp->rss_hash_cfg); 4341 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; 4342 if (vnic->flags & BNXT_VNIC_RSS_FLAG) { 4343 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 4344 max_rings = bp->rx_nr_rings - 1; 4345 else 4346 max_rings = bp->rx_nr_rings; 4347 } else { 4348 max_rings = 1; 4349 } 4350 4351 /* Fill the RSS indirection table with ring group ids */ 4352 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) { 4353 if (j == max_rings) 4354 j = 0; 4355 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]); 4356 } 4357 4358 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); 4359 req.hash_key_tbl_addr = 4360 cpu_to_le64(vnic->rss_hash_key_dma_addr); 4361 } 4362 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 4363 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4364 } 4365 4366 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss) 4367 { 4368 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 4369 u32 i, j, k, nr_ctxs, max_rings = bp->rx_nr_rings; 4370 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; 4371 struct hwrm_vnic_rss_cfg_input req = {0}; 4372 4373 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); 4374 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); 4375 if (!set_rss) { 4376 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4377 return 0; 4378 } 4379 req.hash_type = cpu_to_le32(bp->rss_hash_cfg); 4380 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; 4381 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); 4382 req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); 4383 nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64); 4384 for (i = 0, k = 0; i < nr_ctxs; i++) { 4385 __le16 *ring_tbl = vnic->rss_table; 4386 int rc; 4387 4388 req.ring_table_pair_index = i; 4389 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]); 4390 for (j = 0; j < 64; j++) { 4391 u16 ring_id; 4392 4393 ring_id = rxr->rx_ring_struct.fw_ring_id; 4394 *ring_tbl++ = cpu_to_le16(ring_id); 4395 ring_id = bnxt_cp_ring_for_rx(bp, rxr); 4396 *ring_tbl++ = cpu_to_le16(ring_id); 4397 rxr++; 4398 k++; 4399 if (k == max_rings) { 4400 k = 0; 4401 rxr = &bp->rx_ring[0]; 4402 } 4403 } 4404 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4405 if (rc) 4406 return -EIO; 4407 } 4408 return 0; 4409 } 4410 4411 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id) 4412 { 4413 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 4414 struct hwrm_vnic_plcmodes_cfg_input req = {0}; 4415 4416 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1); 4417 req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT | 4418 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | 4419 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); 4420 req.enables = 4421 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID | 4422 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID); 4423 /* thresholds not implemented in firmware yet */ 4424 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh); 4425 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh); 4426 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id); 4427 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4428 } 4429 4430 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id, 4431 u16 ctx_idx) 4432 { 4433 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0}; 4434 4435 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1); 4436 req.rss_cos_lb_ctx_id = 4437 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]); 4438 4439 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4440 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID; 4441 } 4442 4443 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp) 4444 { 4445 int i, j; 4446 4447 for (i = 0; i < bp->nr_vnics; i++) { 4448 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 4449 4450 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) { 4451 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID) 4452 bnxt_hwrm_vnic_ctx_free_one(bp, i, j); 4453 } 4454 } 4455 bp->rsscos_nr_ctxs = 0; 4456 } 4457 4458 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx) 4459 { 4460 int rc; 4461 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0}; 4462 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp = 4463 bp->hwrm_cmd_resp_addr; 4464 4465 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1, 4466 -1); 4467 4468 mutex_lock(&bp->hwrm_cmd_lock); 4469 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4470 if (!rc) 4471 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = 4472 le16_to_cpu(resp->rss_cos_lb_ctx_id); 4473 mutex_unlock(&bp->hwrm_cmd_lock); 4474 4475 return rc; 4476 } 4477 4478 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp) 4479 { 4480 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP) 4481 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE; 4482 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE; 4483 } 4484 4485 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) 4486 { 4487 unsigned int ring = 0, grp_idx; 4488 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 4489 struct hwrm_vnic_cfg_input req = {0}; 4490 u16 def_vlan = 0; 4491 4492 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1); 4493 4494 if (bp->flags & BNXT_FLAG_CHIP_P5) { 4495 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; 4496 4497 req.default_rx_ring_id = 4498 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id); 4499 req.default_cmpl_ring_id = 4500 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr)); 4501 req.enables = 4502 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID | 4503 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID); 4504 goto vnic_mru; 4505 } 4506 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP); 4507 /* Only RSS support for now TBD: COS & LB */ 4508 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) { 4509 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 4510 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | 4511 VNIC_CFG_REQ_ENABLES_MRU); 4512 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) { 4513 req.rss_rule = 4514 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]); 4515 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | 4516 VNIC_CFG_REQ_ENABLES_MRU); 4517 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE); 4518 } else { 4519 req.rss_rule = cpu_to_le16(0xffff); 4520 } 4521 4522 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && 4523 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) { 4524 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]); 4525 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE); 4526 } else { 4527 req.cos_rule = cpu_to_le16(0xffff); 4528 } 4529 4530 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 4531 ring = 0; 4532 else if (vnic->flags & BNXT_VNIC_RFS_FLAG) 4533 ring = vnic_id - 1; 4534 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp)) 4535 ring = bp->rx_nr_rings - 1; 4536 4537 grp_idx = bp->rx_ring[ring].bnapi->index; 4538 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); 4539 req.lb_rule = cpu_to_le16(0xffff); 4540 vnic_mru: 4541 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + 4542 VLAN_HLEN); 4543 4544 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); 4545 #ifdef CONFIG_BNXT_SRIOV 4546 if (BNXT_VF(bp)) 4547 def_vlan = bp->vf.vlan; 4548 #endif 4549 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan) 4550 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); 4551 if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) 4552 req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp)); 4553 4554 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4555 } 4556 4557 static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id) 4558 { 4559 u32 rc = 0; 4560 4561 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) { 4562 struct hwrm_vnic_free_input req = {0}; 4563 4564 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1); 4565 req.vnic_id = 4566 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id); 4567 4568 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4569 if (rc) 4570 return rc; 4571 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID; 4572 } 4573 return rc; 4574 } 4575 4576 static void bnxt_hwrm_vnic_free(struct bnxt *bp) 4577 { 4578 u16 i; 4579 4580 for (i = 0; i < bp->nr_vnics; i++) 4581 bnxt_hwrm_vnic_free_one(bp, i); 4582 } 4583 4584 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, 4585 unsigned int start_rx_ring_idx, 4586 unsigned int nr_rings) 4587 { 4588 int rc = 0; 4589 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings; 4590 struct hwrm_vnic_alloc_input req = {0}; 4591 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr; 4592 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 4593 4594 if (bp->flags & BNXT_FLAG_CHIP_P5) 4595 goto vnic_no_ring_grps; 4596 4597 /* map ring groups to this vnic */ 4598 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) { 4599 grp_idx = bp->rx_ring[i].bnapi->index; 4600 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) { 4601 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n", 4602 j, nr_rings); 4603 break; 4604 } 4605 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id; 4606 } 4607 4608 vnic_no_ring_grps: 4609 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) 4610 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID; 4611 if (vnic_id == 0) 4612 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); 4613 4614 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1); 4615 4616 mutex_lock(&bp->hwrm_cmd_lock); 4617 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4618 if (!rc) 4619 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id); 4620 mutex_unlock(&bp->hwrm_cmd_lock); 4621 return rc; 4622 } 4623 4624 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) 4625 { 4626 struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 4627 struct hwrm_vnic_qcaps_input req = {0}; 4628 int rc; 4629 4630 if (bp->hwrm_spec_code < 0x10600) 4631 return 0; 4632 4633 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1); 4634 mutex_lock(&bp->hwrm_cmd_lock); 4635 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4636 if (!rc) { 4637 u32 flags = le32_to_cpu(resp->flags); 4638 4639 if (!(bp->flags & BNXT_FLAG_CHIP_P5) && 4640 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP)) 4641 bp->flags |= BNXT_FLAG_NEW_RSS_CAP; 4642 if (flags & 4643 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP) 4644 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP; 4645 } 4646 mutex_unlock(&bp->hwrm_cmd_lock); 4647 return rc; 4648 } 4649 4650 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp) 4651 { 4652 u16 i; 4653 u32 rc = 0; 4654 4655 if (bp->flags & BNXT_FLAG_CHIP_P5) 4656 return 0; 4657 4658 mutex_lock(&bp->hwrm_cmd_lock); 4659 for (i = 0; i < bp->rx_nr_rings; i++) { 4660 struct hwrm_ring_grp_alloc_input req = {0}; 4661 struct hwrm_ring_grp_alloc_output *resp = 4662 bp->hwrm_cmd_resp_addr; 4663 unsigned int grp_idx = bp->rx_ring[i].bnapi->index; 4664 4665 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1); 4666 4667 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id); 4668 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id); 4669 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id); 4670 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx); 4671 4672 rc = _hwrm_send_message(bp, &req, sizeof(req), 4673 HWRM_CMD_TIMEOUT); 4674 if (rc) 4675 break; 4676 4677 bp->grp_info[grp_idx].fw_grp_id = 4678 le32_to_cpu(resp->ring_group_id); 4679 } 4680 mutex_unlock(&bp->hwrm_cmd_lock); 4681 return rc; 4682 } 4683 4684 static int bnxt_hwrm_ring_grp_free(struct bnxt *bp) 4685 { 4686 u16 i; 4687 u32 rc = 0; 4688 struct hwrm_ring_grp_free_input req = {0}; 4689 4690 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5)) 4691 return 0; 4692 4693 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1); 4694 4695 mutex_lock(&bp->hwrm_cmd_lock); 4696 for (i = 0; i < bp->cp_nr_rings; i++) { 4697 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) 4698 continue; 4699 req.ring_group_id = 4700 cpu_to_le32(bp->grp_info[i].fw_grp_id); 4701 4702 rc = _hwrm_send_message(bp, &req, sizeof(req), 4703 HWRM_CMD_TIMEOUT); 4704 if (rc) 4705 break; 4706 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; 4707 } 4708 mutex_unlock(&bp->hwrm_cmd_lock); 4709 return rc; 4710 } 4711 4712 static int hwrm_ring_alloc_send_msg(struct bnxt *bp, 4713 struct bnxt_ring_struct *ring, 4714 u32 ring_type, u32 map_index) 4715 { 4716 int rc = 0, err = 0; 4717 struct hwrm_ring_alloc_input req = {0}; 4718 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr; 4719 struct bnxt_ring_mem_info *rmem = &ring->ring_mem; 4720 struct bnxt_ring_grp_info *grp_info; 4721 u16 ring_id; 4722 4723 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1); 4724 4725 req.enables = 0; 4726 if (rmem->nr_pages > 1) { 4727 req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map); 4728 /* Page size is in log2 units */ 4729 req.page_size = BNXT_PAGE_SHIFT; 4730 req.page_tbl_depth = 1; 4731 } else { 4732 req.page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]); 4733 } 4734 req.fbo = 0; 4735 /* Association of ring index with doorbell index and MSIX number */ 4736 req.logical_id = cpu_to_le16(map_index); 4737 4738 switch (ring_type) { 4739 case HWRM_RING_ALLOC_TX: { 4740 struct bnxt_tx_ring_info *txr; 4741 4742 txr = container_of(ring, struct bnxt_tx_ring_info, 4743 tx_ring_struct); 4744 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX; 4745 /* Association of transmit ring with completion ring */ 4746 grp_info = &bp->grp_info[ring->grp_idx]; 4747 req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr)); 4748 req.length = cpu_to_le32(bp->tx_ring_mask + 1); 4749 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); 4750 req.queue_id = cpu_to_le16(ring->queue_id); 4751 break; 4752 } 4753 case HWRM_RING_ALLOC_RX: 4754 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; 4755 req.length = cpu_to_le32(bp->rx_ring_mask + 1); 4756 if (bp->flags & BNXT_FLAG_CHIP_P5) { 4757 u16 flags = 0; 4758 4759 /* Association of rx ring with stats context */ 4760 grp_info = &bp->grp_info[ring->grp_idx]; 4761 req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size); 4762 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); 4763 req.enables |= cpu_to_le32( 4764 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); 4765 if (NET_IP_ALIGN == 2) 4766 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD; 4767 req.flags = cpu_to_le16(flags); 4768 } 4769 break; 4770 case HWRM_RING_ALLOC_AGG: 4771 if (bp->flags & BNXT_FLAG_CHIP_P5) { 4772 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG; 4773 /* Association of agg ring with rx ring */ 4774 grp_info = &bp->grp_info[ring->grp_idx]; 4775 req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id); 4776 req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE); 4777 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); 4778 req.enables |= cpu_to_le32( 4779 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID | 4780 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); 4781 } else { 4782 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; 4783 } 4784 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1); 4785 break; 4786 case HWRM_RING_ALLOC_CMPL: 4787 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; 4788 req.length = cpu_to_le32(bp->cp_ring_mask + 1); 4789 if (bp->flags & BNXT_FLAG_CHIP_P5) { 4790 /* Association of cp ring with nq */ 4791 grp_info = &bp->grp_info[map_index]; 4792 req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id); 4793 req.cq_handle = cpu_to_le64(ring->handle); 4794 req.enables |= cpu_to_le32( 4795 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID); 4796 } else if (bp->flags & BNXT_FLAG_USING_MSIX) { 4797 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; 4798 } 4799 break; 4800 case HWRM_RING_ALLOC_NQ: 4801 req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ; 4802 req.length = cpu_to_le32(bp->cp_ring_mask + 1); 4803 if (bp->flags & BNXT_FLAG_USING_MSIX) 4804 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; 4805 break; 4806 default: 4807 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n", 4808 ring_type); 4809 return -1; 4810 } 4811 4812 mutex_lock(&bp->hwrm_cmd_lock); 4813 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4814 err = le16_to_cpu(resp->error_code); 4815 ring_id = le16_to_cpu(resp->ring_id); 4816 mutex_unlock(&bp->hwrm_cmd_lock); 4817 4818 if (rc || err) { 4819 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n", 4820 ring_type, rc, err); 4821 return -EIO; 4822 } 4823 ring->fw_ring_id = ring_id; 4824 return rc; 4825 } 4826 4827 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx) 4828 { 4829 int rc; 4830 4831 if (BNXT_PF(bp)) { 4832 struct hwrm_func_cfg_input req = {0}; 4833 4834 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 4835 req.fid = cpu_to_le16(0xffff); 4836 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR); 4837 req.async_event_cr = cpu_to_le16(idx); 4838 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4839 } else { 4840 struct hwrm_func_vf_cfg_input req = {0}; 4841 4842 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); 4843 req.enables = 4844 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR); 4845 req.async_event_cr = cpu_to_le16(idx); 4846 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4847 } 4848 return rc; 4849 } 4850 4851 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type, 4852 u32 map_idx, u32 xid) 4853 { 4854 if (bp->flags & BNXT_FLAG_CHIP_P5) { 4855 if (BNXT_PF(bp)) 4856 db->doorbell = bp->bar1 + 0x10000; 4857 else 4858 db->doorbell = bp->bar1 + 0x4000; 4859 switch (ring_type) { 4860 case HWRM_RING_ALLOC_TX: 4861 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ; 4862 break; 4863 case HWRM_RING_ALLOC_RX: 4864 case HWRM_RING_ALLOC_AGG: 4865 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ; 4866 break; 4867 case HWRM_RING_ALLOC_CMPL: 4868 db->db_key64 = DBR_PATH_L2; 4869 break; 4870 case HWRM_RING_ALLOC_NQ: 4871 db->db_key64 = DBR_PATH_L2; 4872 break; 4873 } 4874 db->db_key64 |= (u64)xid << DBR_XID_SFT; 4875 } else { 4876 db->doorbell = bp->bar1 + map_idx * 0x80; 4877 switch (ring_type) { 4878 case HWRM_RING_ALLOC_TX: 4879 db->db_key32 = DB_KEY_TX; 4880 break; 4881 case HWRM_RING_ALLOC_RX: 4882 case HWRM_RING_ALLOC_AGG: 4883 db->db_key32 = DB_KEY_RX; 4884 break; 4885 case HWRM_RING_ALLOC_CMPL: 4886 db->db_key32 = DB_KEY_CP; 4887 break; 4888 } 4889 } 4890 } 4891 4892 static int bnxt_hwrm_ring_alloc(struct bnxt *bp) 4893 { 4894 int i, rc = 0; 4895 u32 type; 4896 4897 if (bp->flags & BNXT_FLAG_CHIP_P5) 4898 type = HWRM_RING_ALLOC_NQ; 4899 else 4900 type = HWRM_RING_ALLOC_CMPL; 4901 for (i = 0; i < bp->cp_nr_rings; i++) { 4902 struct bnxt_napi *bnapi = bp->bnapi[i]; 4903 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4904 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 4905 u32 map_idx = ring->map_idx; 4906 4907 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 4908 if (rc) 4909 goto err_out; 4910 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id); 4911 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); 4912 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; 4913 4914 if (!i) { 4915 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id); 4916 if (rc) 4917 netdev_warn(bp->dev, "Failed to set async event completion ring.\n"); 4918 } 4919 } 4920 4921 type = HWRM_RING_ALLOC_TX; 4922 for (i = 0; i < bp->tx_nr_rings; i++) { 4923 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 4924 struct bnxt_ring_struct *ring; 4925 u32 map_idx; 4926 4927 if (bp->flags & BNXT_FLAG_CHIP_P5) { 4928 struct bnxt_napi *bnapi = txr->bnapi; 4929 struct bnxt_cp_ring_info *cpr, *cpr2; 4930 u32 type2 = HWRM_RING_ALLOC_CMPL; 4931 4932 cpr = &bnapi->cp_ring; 4933 cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL]; 4934 ring = &cpr2->cp_ring_struct; 4935 ring->handle = BNXT_TX_HDL; 4936 map_idx = bnapi->index; 4937 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx); 4938 if (rc) 4939 goto err_out; 4940 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx, 4941 ring->fw_ring_id); 4942 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons); 4943 } 4944 ring = &txr->tx_ring_struct; 4945 map_idx = i; 4946 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 4947 if (rc) 4948 goto err_out; 4949 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id); 4950 } 4951 4952 type = HWRM_RING_ALLOC_RX; 4953 for (i = 0; i < bp->rx_nr_rings; i++) { 4954 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 4955 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; 4956 struct bnxt_napi *bnapi = rxr->bnapi; 4957 u32 map_idx = bnapi->index; 4958 4959 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 4960 if (rc) 4961 goto err_out; 4962 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id); 4963 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 4964 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; 4965 if (bp->flags & BNXT_FLAG_CHIP_P5) { 4966 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4967 u32 type2 = HWRM_RING_ALLOC_CMPL; 4968 struct bnxt_cp_ring_info *cpr2; 4969 4970 cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL]; 4971 ring = &cpr2->cp_ring_struct; 4972 ring->handle = BNXT_RX_HDL; 4973 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx); 4974 if (rc) 4975 goto err_out; 4976 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx, 4977 ring->fw_ring_id); 4978 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons); 4979 } 4980 } 4981 4982 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 4983 type = HWRM_RING_ALLOC_AGG; 4984 for (i = 0; i < bp->rx_nr_rings; i++) { 4985 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 4986 struct bnxt_ring_struct *ring = 4987 &rxr->rx_agg_ring_struct; 4988 u32 grp_idx = ring->grp_idx; 4989 u32 map_idx = grp_idx + bp->rx_nr_rings; 4990 4991 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 4992 if (rc) 4993 goto err_out; 4994 4995 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx, 4996 ring->fw_ring_id); 4997 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 4998 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; 4999 } 5000 } 5001 err_out: 5002 return rc; 5003 } 5004 5005 static int hwrm_ring_free_send_msg(struct bnxt *bp, 5006 struct bnxt_ring_struct *ring, 5007 u32 ring_type, int cmpl_ring_id) 5008 { 5009 int rc; 5010 struct hwrm_ring_free_input req = {0}; 5011 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr; 5012 u16 error_code; 5013 5014 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1); 5015 req.ring_type = ring_type; 5016 req.ring_id = cpu_to_le16(ring->fw_ring_id); 5017 5018 mutex_lock(&bp->hwrm_cmd_lock); 5019 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5020 error_code = le16_to_cpu(resp->error_code); 5021 mutex_unlock(&bp->hwrm_cmd_lock); 5022 5023 if (rc || error_code) { 5024 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n", 5025 ring_type, rc, error_code); 5026 return -EIO; 5027 } 5028 return 0; 5029 } 5030 5031 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) 5032 { 5033 u32 type; 5034 int i; 5035 5036 if (!bp->bnapi) 5037 return; 5038 5039 for (i = 0; i < bp->tx_nr_rings; i++) { 5040 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 5041 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 5042 u32 cmpl_ring_id; 5043 5044 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr); 5045 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 5046 hwrm_ring_free_send_msg(bp, ring, 5047 RING_FREE_REQ_RING_TYPE_TX, 5048 close_path ? cmpl_ring_id : 5049 INVALID_HW_RING_ID); 5050 ring->fw_ring_id = INVALID_HW_RING_ID; 5051 } 5052 } 5053 5054 for (i = 0; i < bp->rx_nr_rings; i++) { 5055 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 5056 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; 5057 u32 grp_idx = rxr->bnapi->index; 5058 u32 cmpl_ring_id; 5059 5060 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); 5061 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 5062 hwrm_ring_free_send_msg(bp, ring, 5063 RING_FREE_REQ_RING_TYPE_RX, 5064 close_path ? cmpl_ring_id : 5065 INVALID_HW_RING_ID); 5066 ring->fw_ring_id = INVALID_HW_RING_ID; 5067 bp->grp_info[grp_idx].rx_fw_ring_id = 5068 INVALID_HW_RING_ID; 5069 } 5070 } 5071 5072 if (bp->flags & BNXT_FLAG_CHIP_P5) 5073 type = RING_FREE_REQ_RING_TYPE_RX_AGG; 5074 else 5075 type = RING_FREE_REQ_RING_TYPE_RX; 5076 for (i = 0; i < bp->rx_nr_rings; i++) { 5077 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 5078 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; 5079 u32 grp_idx = rxr->bnapi->index; 5080 u32 cmpl_ring_id; 5081 5082 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); 5083 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 5084 hwrm_ring_free_send_msg(bp, ring, type, 5085 close_path ? cmpl_ring_id : 5086 INVALID_HW_RING_ID); 5087 ring->fw_ring_id = INVALID_HW_RING_ID; 5088 bp->grp_info[grp_idx].agg_fw_ring_id = 5089 INVALID_HW_RING_ID; 5090 } 5091 } 5092 5093 /* The completion rings are about to be freed. After that the 5094 * IRQ doorbell will not work anymore. So we need to disable 5095 * IRQ here. 5096 */ 5097 bnxt_disable_int_sync(bp); 5098 5099 if (bp->flags & BNXT_FLAG_CHIP_P5) 5100 type = RING_FREE_REQ_RING_TYPE_NQ; 5101 else 5102 type = RING_FREE_REQ_RING_TYPE_L2_CMPL; 5103 for (i = 0; i < bp->cp_nr_rings; i++) { 5104 struct bnxt_napi *bnapi = bp->bnapi[i]; 5105 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5106 struct bnxt_ring_struct *ring; 5107 int j; 5108 5109 for (j = 0; j < 2; j++) { 5110 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; 5111 5112 if (cpr2) { 5113 ring = &cpr2->cp_ring_struct; 5114 if (ring->fw_ring_id == INVALID_HW_RING_ID) 5115 continue; 5116 hwrm_ring_free_send_msg(bp, ring, 5117 RING_FREE_REQ_RING_TYPE_L2_CMPL, 5118 INVALID_HW_RING_ID); 5119 ring->fw_ring_id = INVALID_HW_RING_ID; 5120 } 5121 } 5122 ring = &cpr->cp_ring_struct; 5123 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 5124 hwrm_ring_free_send_msg(bp, ring, type, 5125 INVALID_HW_RING_ID); 5126 ring->fw_ring_id = INVALID_HW_RING_ID; 5127 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; 5128 } 5129 } 5130 } 5131 5132 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 5133 bool shared); 5134 5135 static int bnxt_hwrm_get_rings(struct bnxt *bp) 5136 { 5137 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 5138 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 5139 struct hwrm_func_qcfg_input req = {0}; 5140 int rc; 5141 5142 if (bp->hwrm_spec_code < 0x10601) 5143 return 0; 5144 5145 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); 5146 req.fid = cpu_to_le16(0xffff); 5147 mutex_lock(&bp->hwrm_cmd_lock); 5148 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5149 if (rc) { 5150 mutex_unlock(&bp->hwrm_cmd_lock); 5151 return -EIO; 5152 } 5153 5154 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings); 5155 if (BNXT_NEW_RM(bp)) { 5156 u16 cp, stats; 5157 5158 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings); 5159 hw_resc->resv_hw_ring_grps = 5160 le32_to_cpu(resp->alloc_hw_ring_grps); 5161 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics); 5162 cp = le16_to_cpu(resp->alloc_cmpl_rings); 5163 stats = le16_to_cpu(resp->alloc_stat_ctx); 5164 cp = min_t(u16, cp, stats); 5165 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5166 int rx = hw_resc->resv_rx_rings; 5167 int tx = hw_resc->resv_tx_rings; 5168 5169 if (bp->flags & BNXT_FLAG_AGG_RINGS) 5170 rx >>= 1; 5171 if (cp < (rx + tx)) { 5172 bnxt_trim_rings(bp, &rx, &tx, cp, false); 5173 if (bp->flags & BNXT_FLAG_AGG_RINGS) 5174 rx <<= 1; 5175 hw_resc->resv_rx_rings = rx; 5176 hw_resc->resv_tx_rings = tx; 5177 } 5178 cp = le16_to_cpu(resp->alloc_msix); 5179 hw_resc->resv_hw_ring_grps = rx; 5180 } 5181 hw_resc->resv_cp_rings = cp; 5182 } 5183 mutex_unlock(&bp->hwrm_cmd_lock); 5184 return 0; 5185 } 5186 5187 /* Caller must hold bp->hwrm_cmd_lock */ 5188 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) 5189 { 5190 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 5191 struct hwrm_func_qcfg_input req = {0}; 5192 int rc; 5193 5194 if (bp->hwrm_spec_code < 0x10601) 5195 return 0; 5196 5197 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); 5198 req.fid = cpu_to_le16(fid); 5199 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5200 if (!rc) 5201 *tx_rings = le16_to_cpu(resp->alloc_tx_rings); 5202 5203 return rc; 5204 } 5205 5206 static bool bnxt_rfs_supported(struct bnxt *bp); 5207 5208 static void 5209 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req, 5210 int tx_rings, int rx_rings, int ring_grps, 5211 int cp_rings, int vnics) 5212 { 5213 u32 enables = 0; 5214 5215 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1); 5216 req->fid = cpu_to_le16(0xffff); 5217 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; 5218 req->num_tx_rings = cpu_to_le16(tx_rings); 5219 if (BNXT_NEW_RM(bp)) { 5220 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; 5221 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5222 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0; 5223 enables |= tx_rings + ring_grps ? 5224 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | 5225 FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; 5226 enables |= rx_rings ? 5227 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 5228 } else { 5229 enables |= cp_rings ? 5230 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | 5231 FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; 5232 enables |= ring_grps ? 5233 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS | 5234 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 5235 } 5236 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0; 5237 5238 req->num_rx_rings = cpu_to_le16(rx_rings); 5239 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5240 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); 5241 req->num_msix = cpu_to_le16(cp_rings); 5242 req->num_rsscos_ctxs = 5243 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64)); 5244 } else { 5245 req->num_cmpl_rings = cpu_to_le16(cp_rings); 5246 req->num_hw_ring_grps = cpu_to_le16(ring_grps); 5247 req->num_rsscos_ctxs = cpu_to_le16(1); 5248 if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) && 5249 bnxt_rfs_supported(bp)) 5250 req->num_rsscos_ctxs = 5251 cpu_to_le16(ring_grps + 1); 5252 } 5253 req->num_stat_ctxs = req->num_cmpl_rings; 5254 req->num_vnics = cpu_to_le16(vnics); 5255 } 5256 req->enables = cpu_to_le32(enables); 5257 } 5258 5259 static void 5260 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, 5261 struct hwrm_func_vf_cfg_input *req, int tx_rings, 5262 int rx_rings, int ring_grps, int cp_rings, 5263 int vnics) 5264 { 5265 u32 enables = 0; 5266 5267 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1); 5268 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; 5269 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS | 5270 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 5271 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5272 enables |= tx_rings + ring_grps ? 5273 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS | 5274 FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; 5275 } else { 5276 enables |= cp_rings ? 5277 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS | 5278 FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; 5279 enables |= ring_grps ? 5280 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; 5281 } 5282 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; 5283 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS; 5284 5285 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); 5286 req->num_tx_rings = cpu_to_le16(tx_rings); 5287 req->num_rx_rings = cpu_to_le16(rx_rings); 5288 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5289 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); 5290 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64)); 5291 } else { 5292 req->num_cmpl_rings = cpu_to_le16(cp_rings); 5293 req->num_hw_ring_grps = cpu_to_le16(ring_grps); 5294 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX); 5295 } 5296 req->num_stat_ctxs = req->num_cmpl_rings; 5297 req->num_vnics = cpu_to_le16(vnics); 5298 5299 req->enables = cpu_to_le32(enables); 5300 } 5301 5302 static int 5303 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 5304 int ring_grps, int cp_rings, int vnics) 5305 { 5306 struct hwrm_func_cfg_input req = {0}; 5307 int rc; 5308 5309 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, 5310 cp_rings, vnics); 5311 if (!req.enables) 5312 return 0; 5313 5314 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5315 if (rc) 5316 return -ENOMEM; 5317 5318 if (bp->hwrm_spec_code < 0x10601) 5319 bp->hw_resc.resv_tx_rings = tx_rings; 5320 5321 rc = bnxt_hwrm_get_rings(bp); 5322 return rc; 5323 } 5324 5325 static int 5326 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 5327 int ring_grps, int cp_rings, int vnics) 5328 { 5329 struct hwrm_func_vf_cfg_input req = {0}; 5330 int rc; 5331 5332 if (!BNXT_NEW_RM(bp)) { 5333 bp->hw_resc.resv_tx_rings = tx_rings; 5334 return 0; 5335 } 5336 5337 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, 5338 cp_rings, vnics); 5339 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5340 if (rc) 5341 return -ENOMEM; 5342 5343 rc = bnxt_hwrm_get_rings(bp); 5344 return rc; 5345 } 5346 5347 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp, 5348 int cp, int vnic) 5349 { 5350 if (BNXT_PF(bp)) 5351 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, vnic); 5352 else 5353 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, vnic); 5354 } 5355 5356 static int bnxt_cp_rings_in_use(struct bnxt *bp) 5357 { 5358 int cp = bp->cp_nr_rings; 5359 int ulp_msix, ulp_base; 5360 5361 ulp_msix = bnxt_get_ulp_msix_num(bp); 5362 if (ulp_msix) { 5363 ulp_base = bnxt_get_ulp_msix_base(bp); 5364 cp += ulp_msix; 5365 if ((ulp_base + ulp_msix) > cp) 5366 cp = ulp_base + ulp_msix; 5367 } 5368 return cp; 5369 } 5370 5371 static bool bnxt_need_reserve_rings(struct bnxt *bp) 5372 { 5373 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 5374 int cp = bnxt_cp_rings_in_use(bp); 5375 int rx = bp->rx_nr_rings; 5376 int vnic = 1, grp = rx; 5377 5378 if (bp->hwrm_spec_code < 0x10601) 5379 return false; 5380 5381 if (hw_resc->resv_tx_rings != bp->tx_nr_rings) 5382 return true; 5383 5384 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5)) 5385 vnic = rx + 1; 5386 if (bp->flags & BNXT_FLAG_AGG_RINGS) 5387 rx <<= 1; 5388 if (BNXT_NEW_RM(bp) && 5389 (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp || 5390 hw_resc->resv_vnics != vnic || 5391 (hw_resc->resv_hw_ring_grps != grp && 5392 !(bp->flags & BNXT_FLAG_CHIP_P5)))) 5393 return true; 5394 return false; 5395 } 5396 5397 static int __bnxt_reserve_rings(struct bnxt *bp) 5398 { 5399 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 5400 int cp = bnxt_cp_rings_in_use(bp); 5401 int tx = bp->tx_nr_rings; 5402 int rx = bp->rx_nr_rings; 5403 int grp, rx_rings, rc; 5404 bool sh = false; 5405 int vnic = 1; 5406 5407 if (!bnxt_need_reserve_rings(bp)) 5408 return 0; 5409 5410 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 5411 sh = true; 5412 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5)) 5413 vnic = rx + 1; 5414 if (bp->flags & BNXT_FLAG_AGG_RINGS) 5415 rx <<= 1; 5416 grp = bp->rx_nr_rings; 5417 5418 rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, vnic); 5419 if (rc) 5420 return rc; 5421 5422 tx = hw_resc->resv_tx_rings; 5423 if (BNXT_NEW_RM(bp)) { 5424 rx = hw_resc->resv_rx_rings; 5425 cp = hw_resc->resv_cp_rings; 5426 grp = hw_resc->resv_hw_ring_grps; 5427 vnic = hw_resc->resv_vnics; 5428 } 5429 5430 rx_rings = rx; 5431 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 5432 if (rx >= 2) { 5433 rx_rings = rx >> 1; 5434 } else { 5435 if (netif_running(bp->dev)) 5436 return -ENOMEM; 5437 5438 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 5439 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 5440 bp->dev->hw_features &= ~NETIF_F_LRO; 5441 bp->dev->features &= ~NETIF_F_LRO; 5442 bnxt_set_ring_params(bp); 5443 } 5444 } 5445 rx_rings = min_t(int, rx_rings, grp); 5446 rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh); 5447 if (bp->flags & BNXT_FLAG_AGG_RINGS) 5448 rx = rx_rings << 1; 5449 cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings; 5450 bp->tx_nr_rings = tx; 5451 bp->rx_nr_rings = rx_rings; 5452 bp->cp_nr_rings = cp; 5453 5454 if (!tx || !rx || !cp || !grp || !vnic) 5455 return -ENOMEM; 5456 5457 return rc; 5458 } 5459 5460 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 5461 int ring_grps, int cp_rings, int vnics) 5462 { 5463 struct hwrm_func_vf_cfg_input req = {0}; 5464 u32 flags; 5465 int rc; 5466 5467 if (!BNXT_NEW_RM(bp)) 5468 return 0; 5469 5470 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, 5471 cp_rings, vnics); 5472 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST | 5473 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST | 5474 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | 5475 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | 5476 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST | 5477 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST; 5478 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 5479 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; 5480 5481 req.flags = cpu_to_le32(flags); 5482 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5483 if (rc) 5484 return -ENOMEM; 5485 return 0; 5486 } 5487 5488 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 5489 int ring_grps, int cp_rings, int vnics) 5490 { 5491 struct hwrm_func_cfg_input req = {0}; 5492 u32 flags; 5493 int rc; 5494 5495 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, 5496 cp_rings, vnics); 5497 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST; 5498 if (BNXT_NEW_RM(bp)) { 5499 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST | 5500 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | 5501 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | 5502 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; 5503 if (bp->flags & BNXT_FLAG_CHIP_P5) 5504 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST; 5505 else 5506 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; 5507 } 5508 5509 req.flags = cpu_to_le32(flags); 5510 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5511 if (rc) 5512 return -ENOMEM; 5513 return 0; 5514 } 5515 5516 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings, 5517 int ring_grps, int cp_rings, int vnics) 5518 { 5519 if (bp->hwrm_spec_code < 0x10801) 5520 return 0; 5521 5522 if (BNXT_PF(bp)) 5523 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings, 5524 ring_grps, cp_rings, vnics); 5525 5526 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps, 5527 cp_rings, vnics); 5528 } 5529 5530 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp) 5531 { 5532 struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 5533 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 5534 struct hwrm_ring_aggint_qcaps_input req = {0}; 5535 int rc; 5536 5537 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS; 5538 coal_cap->num_cmpl_dma_aggr_max = 63; 5539 coal_cap->num_cmpl_dma_aggr_during_int_max = 63; 5540 coal_cap->cmpl_aggr_dma_tmr_max = 65535; 5541 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535; 5542 coal_cap->int_lat_tmr_min_max = 65535; 5543 coal_cap->int_lat_tmr_max_max = 65535; 5544 coal_cap->num_cmpl_aggr_int_max = 65535; 5545 coal_cap->timer_units = 80; 5546 5547 if (bp->hwrm_spec_code < 0x10902) 5548 return; 5549 5550 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1); 5551 mutex_lock(&bp->hwrm_cmd_lock); 5552 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5553 if (!rc) { 5554 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params); 5555 coal_cap->nq_params = le32_to_cpu(resp->nq_params); 5556 coal_cap->num_cmpl_dma_aggr_max = 5557 le16_to_cpu(resp->num_cmpl_dma_aggr_max); 5558 coal_cap->num_cmpl_dma_aggr_during_int_max = 5559 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max); 5560 coal_cap->cmpl_aggr_dma_tmr_max = 5561 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max); 5562 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 5563 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max); 5564 coal_cap->int_lat_tmr_min_max = 5565 le16_to_cpu(resp->int_lat_tmr_min_max); 5566 coal_cap->int_lat_tmr_max_max = 5567 le16_to_cpu(resp->int_lat_tmr_max_max); 5568 coal_cap->num_cmpl_aggr_int_max = 5569 le16_to_cpu(resp->num_cmpl_aggr_int_max); 5570 coal_cap->timer_units = le16_to_cpu(resp->timer_units); 5571 } 5572 mutex_unlock(&bp->hwrm_cmd_lock); 5573 } 5574 5575 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec) 5576 { 5577 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 5578 5579 return usec * 1000 / coal_cap->timer_units; 5580 } 5581 5582 static void bnxt_hwrm_set_coal_params(struct bnxt *bp, 5583 struct bnxt_coal *hw_coal, 5584 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) 5585 { 5586 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 5587 u32 cmpl_params = coal_cap->cmpl_params; 5588 u16 val, tmr, max, flags = 0; 5589 5590 max = hw_coal->bufs_per_record * 128; 5591 if (hw_coal->budget) 5592 max = hw_coal->bufs_per_record * hw_coal->budget; 5593 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max); 5594 5595 val = clamp_t(u16, hw_coal->coal_bufs, 1, max); 5596 req->num_cmpl_aggr_int = cpu_to_le16(val); 5597 5598 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max); 5599 req->num_cmpl_dma_aggr = cpu_to_le16(val); 5600 5601 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1, 5602 coal_cap->num_cmpl_dma_aggr_during_int_max); 5603 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val); 5604 5605 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks); 5606 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max); 5607 req->int_lat_tmr_max = cpu_to_le16(tmr); 5608 5609 /* min timer set to 1/2 of interrupt timer */ 5610 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) { 5611 val = tmr / 2; 5612 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max); 5613 req->int_lat_tmr_min = cpu_to_le16(val); 5614 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); 5615 } 5616 5617 /* buf timer set to 1/4 of interrupt timer */ 5618 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max); 5619 req->cmpl_aggr_dma_tmr = cpu_to_le16(val); 5620 5621 if (cmpl_params & 5622 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) { 5623 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq); 5624 val = clamp_t(u16, tmr, 1, 5625 coal_cap->cmpl_aggr_dma_tmr_during_int_max); 5626 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr); 5627 req->enables |= 5628 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE); 5629 } 5630 5631 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET) 5632 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 5633 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) && 5634 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh) 5635 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE; 5636 req->flags = cpu_to_le16(flags); 5637 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES); 5638 } 5639 5640 /* Caller holds bp->hwrm_cmd_lock */ 5641 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi, 5642 struct bnxt_coal *hw_coal) 5643 { 5644 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0}; 5645 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5646 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 5647 u32 nq_params = coal_cap->nq_params; 5648 u16 tmr; 5649 5650 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN)) 5651 return 0; 5652 5653 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, 5654 -1, -1); 5655 req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id); 5656 req.flags = 5657 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ); 5658 5659 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2; 5660 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max); 5661 req.int_lat_tmr_min = cpu_to_le16(tmr); 5662 req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); 5663 return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5664 } 5665 5666 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi) 5667 { 5668 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0}; 5669 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5670 struct bnxt_coal coal; 5671 5672 /* Tick values in micro seconds. 5673 * 1 coal_buf x bufs_per_record = 1 completion record. 5674 */ 5675 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal)); 5676 5677 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks; 5678 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs; 5679 5680 if (!bnapi->rx_ring) 5681 return -ENODEV; 5682 5683 bnxt_hwrm_cmd_hdr_init(bp, &req_rx, 5684 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); 5685 5686 bnxt_hwrm_set_coal_params(bp, &coal, &req_rx); 5687 5688 req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring)); 5689 5690 return hwrm_send_message(bp, &req_rx, sizeof(req_rx), 5691 HWRM_CMD_TIMEOUT); 5692 } 5693 5694 int bnxt_hwrm_set_coal(struct bnxt *bp) 5695 { 5696 int i, rc = 0; 5697 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0}, 5698 req_tx = {0}, *req; 5699 5700 bnxt_hwrm_cmd_hdr_init(bp, &req_rx, 5701 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); 5702 bnxt_hwrm_cmd_hdr_init(bp, &req_tx, 5703 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); 5704 5705 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx); 5706 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx); 5707 5708 mutex_lock(&bp->hwrm_cmd_lock); 5709 for (i = 0; i < bp->cp_nr_rings; i++) { 5710 struct bnxt_napi *bnapi = bp->bnapi[i]; 5711 struct bnxt_coal *hw_coal; 5712 u16 ring_id; 5713 5714 req = &req_rx; 5715 if (!bnapi->rx_ring) { 5716 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring); 5717 req = &req_tx; 5718 } else { 5719 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring); 5720 } 5721 req->ring_id = cpu_to_le16(ring_id); 5722 5723 rc = _hwrm_send_message(bp, req, sizeof(*req), 5724 HWRM_CMD_TIMEOUT); 5725 if (rc) 5726 break; 5727 5728 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) 5729 continue; 5730 5731 if (bnapi->rx_ring && bnapi->tx_ring) { 5732 req = &req_tx; 5733 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring); 5734 req->ring_id = cpu_to_le16(ring_id); 5735 rc = _hwrm_send_message(bp, req, sizeof(*req), 5736 HWRM_CMD_TIMEOUT); 5737 if (rc) 5738 break; 5739 } 5740 if (bnapi->rx_ring) 5741 hw_coal = &bp->rx_coal; 5742 else 5743 hw_coal = &bp->tx_coal; 5744 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal); 5745 } 5746 mutex_unlock(&bp->hwrm_cmd_lock); 5747 return rc; 5748 } 5749 5750 static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp) 5751 { 5752 int rc = 0, i; 5753 struct hwrm_stat_ctx_free_input req = {0}; 5754 5755 if (!bp->bnapi) 5756 return 0; 5757 5758 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 5759 return 0; 5760 5761 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1); 5762 5763 mutex_lock(&bp->hwrm_cmd_lock); 5764 for (i = 0; i < bp->cp_nr_rings; i++) { 5765 struct bnxt_napi *bnapi = bp->bnapi[i]; 5766 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5767 5768 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) { 5769 req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id); 5770 5771 rc = _hwrm_send_message(bp, &req, sizeof(req), 5772 HWRM_CMD_TIMEOUT); 5773 if (rc) 5774 break; 5775 5776 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; 5777 } 5778 } 5779 mutex_unlock(&bp->hwrm_cmd_lock); 5780 return rc; 5781 } 5782 5783 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) 5784 { 5785 int rc = 0, i; 5786 struct hwrm_stat_ctx_alloc_input req = {0}; 5787 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr; 5788 5789 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 5790 return 0; 5791 5792 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1); 5793 5794 req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000); 5795 5796 mutex_lock(&bp->hwrm_cmd_lock); 5797 for (i = 0; i < bp->cp_nr_rings; i++) { 5798 struct bnxt_napi *bnapi = bp->bnapi[i]; 5799 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5800 5801 req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map); 5802 5803 rc = _hwrm_send_message(bp, &req, sizeof(req), 5804 HWRM_CMD_TIMEOUT); 5805 if (rc) 5806 break; 5807 5808 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id); 5809 5810 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; 5811 } 5812 mutex_unlock(&bp->hwrm_cmd_lock); 5813 return rc; 5814 } 5815 5816 static int bnxt_hwrm_func_qcfg(struct bnxt *bp) 5817 { 5818 struct hwrm_func_qcfg_input req = {0}; 5819 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 5820 u16 flags; 5821 int rc; 5822 5823 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); 5824 req.fid = cpu_to_le16(0xffff); 5825 mutex_lock(&bp->hwrm_cmd_lock); 5826 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5827 if (rc) 5828 goto func_qcfg_exit; 5829 5830 #ifdef CONFIG_BNXT_SRIOV 5831 if (BNXT_VF(bp)) { 5832 struct bnxt_vf_info *vf = &bp->vf; 5833 5834 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK; 5835 } 5836 #endif 5837 flags = le16_to_cpu(resp->flags); 5838 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED | 5839 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) { 5840 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT; 5841 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED) 5842 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT; 5843 } 5844 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)) 5845 bp->flags |= BNXT_FLAG_MULTI_HOST; 5846 5847 switch (resp->port_partition_type) { 5848 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0: 5849 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5: 5850 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0: 5851 bp->port_partition_type = resp->port_partition_type; 5852 break; 5853 } 5854 if (bp->hwrm_spec_code < 0x10707 || 5855 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB) 5856 bp->br_mode = BRIDGE_MODE_VEB; 5857 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA) 5858 bp->br_mode = BRIDGE_MODE_VEPA; 5859 else 5860 bp->br_mode = BRIDGE_MODE_UNDEF; 5861 5862 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured); 5863 if (!bp->max_mtu) 5864 bp->max_mtu = BNXT_MAX_MTU; 5865 5866 func_qcfg_exit: 5867 mutex_unlock(&bp->hwrm_cmd_lock); 5868 return rc; 5869 } 5870 5871 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) 5872 { 5873 struct hwrm_func_backing_store_qcaps_input req = {0}; 5874 struct hwrm_func_backing_store_qcaps_output *resp = 5875 bp->hwrm_cmd_resp_addr; 5876 int rc; 5877 5878 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx) 5879 return 0; 5880 5881 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1); 5882 mutex_lock(&bp->hwrm_cmd_lock); 5883 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5884 if (!rc) { 5885 struct bnxt_ctx_pg_info *ctx_pg; 5886 struct bnxt_ctx_mem_info *ctx; 5887 int i; 5888 5889 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 5890 if (!ctx) { 5891 rc = -ENOMEM; 5892 goto ctx_err; 5893 } 5894 ctx_pg = kzalloc(sizeof(*ctx_pg) * (bp->max_q + 1), GFP_KERNEL); 5895 if (!ctx_pg) { 5896 kfree(ctx); 5897 rc = -ENOMEM; 5898 goto ctx_err; 5899 } 5900 for (i = 0; i < bp->max_q + 1; i++, ctx_pg++) 5901 ctx->tqm_mem[i] = ctx_pg; 5902 5903 bp->ctx = ctx; 5904 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries); 5905 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries); 5906 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries); 5907 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size); 5908 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries); 5909 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries); 5910 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size); 5911 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries); 5912 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries); 5913 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size); 5914 ctx->vnic_max_vnic_entries = 5915 le16_to_cpu(resp->vnic_max_vnic_entries); 5916 ctx->vnic_max_ring_table_entries = 5917 le16_to_cpu(resp->vnic_max_ring_table_entries); 5918 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size); 5919 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries); 5920 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size); 5921 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size); 5922 ctx->tqm_min_entries_per_ring = 5923 le32_to_cpu(resp->tqm_min_entries_per_ring); 5924 ctx->tqm_max_entries_per_ring = 5925 le32_to_cpu(resp->tqm_max_entries_per_ring); 5926 ctx->tqm_entries_multiple = resp->tqm_entries_multiple; 5927 if (!ctx->tqm_entries_multiple) 5928 ctx->tqm_entries_multiple = 1; 5929 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries); 5930 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size); 5931 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size); 5932 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries); 5933 } else { 5934 rc = 0; 5935 } 5936 ctx_err: 5937 mutex_unlock(&bp->hwrm_cmd_lock); 5938 return rc; 5939 } 5940 5941 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr, 5942 __le64 *pg_dir) 5943 { 5944 u8 pg_size = 0; 5945 5946 if (BNXT_PAGE_SHIFT == 13) 5947 pg_size = 1 << 4; 5948 else if (BNXT_PAGE_SIZE == 16) 5949 pg_size = 2 << 4; 5950 5951 *pg_attr = pg_size; 5952 if (rmem->nr_pages > 1) { 5953 *pg_attr |= 1; 5954 *pg_dir = cpu_to_le64(rmem->pg_tbl_map); 5955 } else { 5956 *pg_dir = cpu_to_le64(rmem->dma_arr[0]); 5957 } 5958 } 5959 5960 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \ 5961 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \ 5962 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \ 5963 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \ 5964 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \ 5965 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) 5966 5967 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) 5968 { 5969 struct hwrm_func_backing_store_cfg_input req = {0}; 5970 struct bnxt_ctx_mem_info *ctx = bp->ctx; 5971 struct bnxt_ctx_pg_info *ctx_pg; 5972 __le32 *num_entries; 5973 __le64 *pg_dir; 5974 u8 *pg_attr; 5975 int i, rc; 5976 u32 ena; 5977 5978 if (!ctx) 5979 return 0; 5980 5981 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1); 5982 req.enables = cpu_to_le32(enables); 5983 5984 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) { 5985 ctx_pg = &ctx->qp_mem; 5986 req.qp_num_entries = cpu_to_le32(ctx_pg->entries); 5987 req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries); 5988 req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries); 5989 req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size); 5990 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 5991 &req.qpc_pg_size_qpc_lvl, 5992 &req.qpc_page_dir); 5993 } 5994 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) { 5995 ctx_pg = &ctx->srq_mem; 5996 req.srq_num_entries = cpu_to_le32(ctx_pg->entries); 5997 req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries); 5998 req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size); 5999 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 6000 &req.srq_pg_size_srq_lvl, 6001 &req.srq_page_dir); 6002 } 6003 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) { 6004 ctx_pg = &ctx->cq_mem; 6005 req.cq_num_entries = cpu_to_le32(ctx_pg->entries); 6006 req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries); 6007 req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size); 6008 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl, 6009 &req.cq_page_dir); 6010 } 6011 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) { 6012 ctx_pg = &ctx->vnic_mem; 6013 req.vnic_num_vnic_entries = 6014 cpu_to_le16(ctx->vnic_max_vnic_entries); 6015 req.vnic_num_ring_table_entries = 6016 cpu_to_le16(ctx->vnic_max_ring_table_entries); 6017 req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size); 6018 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 6019 &req.vnic_pg_size_vnic_lvl, 6020 &req.vnic_page_dir); 6021 } 6022 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) { 6023 ctx_pg = &ctx->stat_mem; 6024 req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries); 6025 req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size); 6026 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 6027 &req.stat_pg_size_stat_lvl, 6028 &req.stat_page_dir); 6029 } 6030 for (i = 0, num_entries = &req.tqm_sp_num_entries, 6031 pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl, 6032 pg_dir = &req.tqm_sp_page_dir, 6033 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP; 6034 i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) { 6035 if (!(enables & ena)) 6036 continue; 6037 6038 req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size); 6039 ctx_pg = ctx->tqm_mem[i]; 6040 *num_entries = cpu_to_le32(ctx_pg->entries); 6041 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir); 6042 } 6043 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6044 if (rc) 6045 rc = -EIO; 6046 return rc; 6047 } 6048 6049 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, 6050 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size) 6051 { 6052 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 6053 6054 if (!mem_size) 6055 return 0; 6056 6057 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); 6058 if (rmem->nr_pages > MAX_CTX_PAGES) { 6059 rmem->nr_pages = 0; 6060 return -EINVAL; 6061 } 6062 rmem->page_size = BNXT_PAGE_SIZE; 6063 rmem->pg_arr = ctx_pg->ctx_pg_arr; 6064 rmem->dma_arr = ctx_pg->ctx_dma_arr; 6065 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; 6066 return bnxt_alloc_ring(bp, rmem); 6067 } 6068 6069 static void bnxt_free_ctx_mem(struct bnxt *bp) 6070 { 6071 struct bnxt_ctx_mem_info *ctx = bp->ctx; 6072 int i; 6073 6074 if (!ctx) 6075 return; 6076 6077 if (ctx->tqm_mem[0]) { 6078 for (i = 0; i < bp->max_q + 1; i++) 6079 bnxt_free_ring(bp, &ctx->tqm_mem[i]->ring_mem); 6080 kfree(ctx->tqm_mem[0]); 6081 ctx->tqm_mem[0] = NULL; 6082 } 6083 6084 bnxt_free_ring(bp, &ctx->stat_mem.ring_mem); 6085 bnxt_free_ring(bp, &ctx->vnic_mem.ring_mem); 6086 bnxt_free_ring(bp, &ctx->cq_mem.ring_mem); 6087 bnxt_free_ring(bp, &ctx->srq_mem.ring_mem); 6088 bnxt_free_ring(bp, &ctx->qp_mem.ring_mem); 6089 ctx->flags &= ~BNXT_CTX_FLAG_INITED; 6090 } 6091 6092 static int bnxt_alloc_ctx_mem(struct bnxt *bp) 6093 { 6094 struct bnxt_ctx_pg_info *ctx_pg; 6095 struct bnxt_ctx_mem_info *ctx; 6096 u32 mem_size, ena, entries; 6097 int i, rc; 6098 6099 rc = bnxt_hwrm_func_backing_store_qcaps(bp); 6100 if (rc) { 6101 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n", 6102 rc); 6103 return rc; 6104 } 6105 ctx = bp->ctx; 6106 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) 6107 return 0; 6108 6109 ctx_pg = &ctx->qp_mem; 6110 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries; 6111 mem_size = ctx->qp_entry_size * ctx_pg->entries; 6112 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size); 6113 if (rc) 6114 return rc; 6115 6116 ctx_pg = &ctx->srq_mem; 6117 ctx_pg->entries = ctx->srq_max_l2_entries; 6118 mem_size = ctx->srq_entry_size * ctx_pg->entries; 6119 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size); 6120 if (rc) 6121 return rc; 6122 6123 ctx_pg = &ctx->cq_mem; 6124 ctx_pg->entries = ctx->cq_max_l2_entries; 6125 mem_size = ctx->cq_entry_size * ctx_pg->entries; 6126 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size); 6127 if (rc) 6128 return rc; 6129 6130 ctx_pg = &ctx->vnic_mem; 6131 ctx_pg->entries = ctx->vnic_max_vnic_entries + 6132 ctx->vnic_max_ring_table_entries; 6133 mem_size = ctx->vnic_entry_size * ctx_pg->entries; 6134 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size); 6135 if (rc) 6136 return rc; 6137 6138 ctx_pg = &ctx->stat_mem; 6139 ctx_pg->entries = ctx->stat_max_entries; 6140 mem_size = ctx->stat_entry_size * ctx_pg->entries; 6141 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size); 6142 if (rc) 6143 return rc; 6144 6145 entries = ctx->qp_max_l2_entries; 6146 entries = roundup(entries, ctx->tqm_entries_multiple); 6147 entries = clamp_t(u32, entries, ctx->tqm_min_entries_per_ring, 6148 ctx->tqm_max_entries_per_ring); 6149 for (i = 0, ena = 0; i < bp->max_q + 1; i++) { 6150 ctx_pg = ctx->tqm_mem[i]; 6151 ctx_pg->entries = entries; 6152 mem_size = ctx->tqm_entry_size * entries; 6153 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size); 6154 if (rc) 6155 return rc; 6156 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i; 6157 } 6158 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES; 6159 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena); 6160 if (rc) 6161 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n", 6162 rc); 6163 else 6164 ctx->flags |= BNXT_CTX_FLAG_INITED; 6165 6166 return 0; 6167 } 6168 6169 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all) 6170 { 6171 struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 6172 struct hwrm_func_resource_qcaps_input req = {0}; 6173 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 6174 int rc; 6175 6176 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1); 6177 req.fid = cpu_to_le16(0xffff); 6178 6179 mutex_lock(&bp->hwrm_cmd_lock); 6180 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6181 if (rc) { 6182 rc = -EIO; 6183 goto hwrm_func_resc_qcaps_exit; 6184 } 6185 6186 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs); 6187 if (!all) 6188 goto hwrm_func_resc_qcaps_exit; 6189 6190 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx); 6191 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 6192 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings); 6193 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 6194 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings); 6195 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 6196 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings); 6197 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings); 6198 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps); 6199 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps); 6200 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs); 6201 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); 6202 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics); 6203 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics); 6204 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx); 6205 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 6206 6207 if (bp->flags & BNXT_FLAG_CHIP_P5) { 6208 u16 max_msix = le16_to_cpu(resp->max_msix); 6209 6210 hw_resc->max_irqs = min_t(u16, hw_resc->max_irqs, max_msix); 6211 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings; 6212 } 6213 6214 if (BNXT_PF(bp)) { 6215 struct bnxt_pf_info *pf = &bp->pf; 6216 6217 pf->vf_resv_strategy = 6218 le16_to_cpu(resp->vf_reservation_strategy); 6219 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) 6220 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL; 6221 } 6222 hwrm_func_resc_qcaps_exit: 6223 mutex_unlock(&bp->hwrm_cmd_lock); 6224 return rc; 6225 } 6226 6227 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) 6228 { 6229 int rc = 0; 6230 struct hwrm_func_qcaps_input req = {0}; 6231 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 6232 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 6233 u32 flags; 6234 6235 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); 6236 req.fid = cpu_to_le16(0xffff); 6237 6238 mutex_lock(&bp->hwrm_cmd_lock); 6239 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6240 if (rc) 6241 goto hwrm_func_qcaps_exit; 6242 6243 flags = le32_to_cpu(resp->flags); 6244 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED) 6245 bp->flags |= BNXT_FLAG_ROCEV1_CAP; 6246 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED) 6247 bp->flags |= BNXT_FLAG_ROCEV2_CAP; 6248 6249 bp->tx_push_thresh = 0; 6250 if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) 6251 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH; 6252 6253 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 6254 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 6255 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 6256 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings); 6257 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps); 6258 if (!hw_resc->max_hw_ring_grps) 6259 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings; 6260 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); 6261 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics); 6262 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 6263 6264 if (BNXT_PF(bp)) { 6265 struct bnxt_pf_info *pf = &bp->pf; 6266 6267 pf->fw_fid = le16_to_cpu(resp->fid); 6268 pf->port_id = le16_to_cpu(resp->port_id); 6269 bp->dev->dev_port = pf->port_id; 6270 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); 6271 pf->first_vf_id = le16_to_cpu(resp->first_vf_id); 6272 pf->max_vfs = le16_to_cpu(resp->max_vfs); 6273 pf->max_encap_records = le32_to_cpu(resp->max_encap_records); 6274 pf->max_decap_records = le32_to_cpu(resp->max_decap_records); 6275 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows); 6276 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows); 6277 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows); 6278 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows); 6279 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED) 6280 bp->flags |= BNXT_FLAG_WOL_CAP; 6281 } else { 6282 #ifdef CONFIG_BNXT_SRIOV 6283 struct bnxt_vf_info *vf = &bp->vf; 6284 6285 vf->fw_fid = le16_to_cpu(resp->fid); 6286 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN); 6287 #endif 6288 } 6289 6290 hwrm_func_qcaps_exit: 6291 mutex_unlock(&bp->hwrm_cmd_lock); 6292 return rc; 6293 } 6294 6295 static int bnxt_hwrm_func_qcaps(struct bnxt *bp) 6296 { 6297 int rc; 6298 6299 rc = __bnxt_hwrm_func_qcaps(bp); 6300 if (rc) 6301 return rc; 6302 if (bp->hwrm_spec_code >= 0x10803) { 6303 rc = bnxt_alloc_ctx_mem(bp); 6304 if (rc) 6305 return rc; 6306 rc = bnxt_hwrm_func_resc_qcaps(bp, true); 6307 if (!rc) 6308 bp->fw_cap |= BNXT_FW_CAP_NEW_RM; 6309 } 6310 return 0; 6311 } 6312 6313 static int bnxt_hwrm_func_reset(struct bnxt *bp) 6314 { 6315 struct hwrm_func_reset_input req = {0}; 6316 6317 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1); 6318 req.enables = 0; 6319 6320 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT); 6321 } 6322 6323 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) 6324 { 6325 int rc = 0; 6326 struct hwrm_queue_qportcfg_input req = {0}; 6327 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr; 6328 u8 i, j, *qptr; 6329 bool no_rdma; 6330 6331 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1); 6332 6333 mutex_lock(&bp->hwrm_cmd_lock); 6334 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6335 if (rc) 6336 goto qportcfg_exit; 6337 6338 if (!resp->max_configurable_queues) { 6339 rc = -EINVAL; 6340 goto qportcfg_exit; 6341 } 6342 bp->max_tc = resp->max_configurable_queues; 6343 bp->max_lltc = resp->max_configurable_lossless_queues; 6344 if (bp->max_tc > BNXT_MAX_QUEUE) 6345 bp->max_tc = BNXT_MAX_QUEUE; 6346 6347 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP); 6348 qptr = &resp->queue_id0; 6349 for (i = 0, j = 0; i < bp->max_tc; i++) { 6350 bp->q_info[j].queue_id = *qptr; 6351 bp->q_ids[i] = *qptr++; 6352 bp->q_info[j].queue_profile = *qptr++; 6353 bp->tc_to_qidx[j] = j; 6354 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) || 6355 (no_rdma && BNXT_PF(bp))) 6356 j++; 6357 } 6358 bp->max_q = bp->max_tc; 6359 bp->max_tc = max_t(u8, j, 1); 6360 6361 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG) 6362 bp->max_tc = 1; 6363 6364 if (bp->max_lltc > bp->max_tc) 6365 bp->max_lltc = bp->max_tc; 6366 6367 qportcfg_exit: 6368 mutex_unlock(&bp->hwrm_cmd_lock); 6369 return rc; 6370 } 6371 6372 static int bnxt_hwrm_ver_get(struct bnxt *bp) 6373 { 6374 int rc; 6375 struct hwrm_ver_get_input req = {0}; 6376 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr; 6377 u32 dev_caps_cfg; 6378 6379 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; 6380 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1); 6381 req.hwrm_intf_maj = HWRM_VERSION_MAJOR; 6382 req.hwrm_intf_min = HWRM_VERSION_MINOR; 6383 req.hwrm_intf_upd = HWRM_VERSION_UPDATE; 6384 mutex_lock(&bp->hwrm_cmd_lock); 6385 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6386 if (rc) 6387 goto hwrm_ver_get_exit; 6388 6389 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output)); 6390 6391 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 | 6392 resp->hwrm_intf_min_8b << 8 | 6393 resp->hwrm_intf_upd_8b; 6394 if (resp->hwrm_intf_maj_8b < 1) { 6395 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n", 6396 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, 6397 resp->hwrm_intf_upd_8b); 6398 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n"); 6399 } 6400 snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d", 6401 resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b, 6402 resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b); 6403 6404 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout); 6405 if (!bp->hwrm_cmd_timeout) 6406 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; 6407 6408 if (resp->hwrm_intf_maj_8b >= 1) { 6409 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len); 6410 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len); 6411 } 6412 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN) 6413 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN; 6414 6415 bp->chip_num = le16_to_cpu(resp->chip_num); 6416 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev && 6417 !resp->chip_metal) 6418 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0; 6419 6420 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg); 6421 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) && 6422 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) 6423 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD; 6424 6425 hwrm_ver_get_exit: 6426 mutex_unlock(&bp->hwrm_cmd_lock); 6427 return rc; 6428 } 6429 6430 int bnxt_hwrm_fw_set_time(struct bnxt *bp) 6431 { 6432 struct hwrm_fw_set_time_input req = {0}; 6433 struct tm tm; 6434 time64_t now = ktime_get_real_seconds(); 6435 6436 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) || 6437 bp->hwrm_spec_code < 0x10400) 6438 return -EOPNOTSUPP; 6439 6440 time64_to_tm(now, 0, &tm); 6441 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1); 6442 req.year = cpu_to_le16(1900 + tm.tm_year); 6443 req.month = 1 + tm.tm_mon; 6444 req.day = tm.tm_mday; 6445 req.hour = tm.tm_hour; 6446 req.minute = tm.tm_min; 6447 req.second = tm.tm_sec; 6448 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6449 } 6450 6451 static int bnxt_hwrm_port_qstats(struct bnxt *bp) 6452 { 6453 int rc; 6454 struct bnxt_pf_info *pf = &bp->pf; 6455 struct hwrm_port_qstats_input req = {0}; 6456 6457 if (!(bp->flags & BNXT_FLAG_PORT_STATS)) 6458 return 0; 6459 6460 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1); 6461 req.port_id = cpu_to_le16(pf->port_id); 6462 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map); 6463 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map); 6464 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6465 return rc; 6466 } 6467 6468 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp) 6469 { 6470 struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr; 6471 struct hwrm_port_qstats_ext_input req = {0}; 6472 struct bnxt_pf_info *pf = &bp->pf; 6473 int rc; 6474 6475 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) 6476 return 0; 6477 6478 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1); 6479 req.port_id = cpu_to_le16(pf->port_id); 6480 req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext)); 6481 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map); 6482 req.tx_stat_size = cpu_to_le16(sizeof(struct tx_port_stats_ext)); 6483 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map); 6484 mutex_lock(&bp->hwrm_cmd_lock); 6485 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6486 if (!rc) { 6487 bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8; 6488 bp->fw_tx_stats_ext_size = le16_to_cpu(resp->tx_stat_size) / 8; 6489 } else { 6490 bp->fw_rx_stats_ext_size = 0; 6491 bp->fw_tx_stats_ext_size = 0; 6492 } 6493 mutex_unlock(&bp->hwrm_cmd_lock); 6494 return rc; 6495 } 6496 6497 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) 6498 { 6499 if (bp->vxlan_port_cnt) { 6500 bnxt_hwrm_tunnel_dst_port_free( 6501 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 6502 } 6503 bp->vxlan_port_cnt = 0; 6504 if (bp->nge_port_cnt) { 6505 bnxt_hwrm_tunnel_dst_port_free( 6506 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 6507 } 6508 bp->nge_port_cnt = 0; 6509 } 6510 6511 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa) 6512 { 6513 int rc, i; 6514 u32 tpa_flags = 0; 6515 6516 if (set_tpa) 6517 tpa_flags = bp->flags & BNXT_FLAG_TPA; 6518 for (i = 0; i < bp->nr_vnics; i++) { 6519 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags); 6520 if (rc) { 6521 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n", 6522 i, rc); 6523 return rc; 6524 } 6525 } 6526 return 0; 6527 } 6528 6529 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp) 6530 { 6531 int i; 6532 6533 for (i = 0; i < bp->nr_vnics; i++) 6534 bnxt_hwrm_vnic_set_rss(bp, i, false); 6535 } 6536 6537 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, 6538 bool irq_re_init) 6539 { 6540 if (bp->vnic_info) { 6541 bnxt_hwrm_clear_vnic_filter(bp); 6542 /* clear all RSS setting before free vnic ctx */ 6543 bnxt_hwrm_clear_vnic_rss(bp); 6544 bnxt_hwrm_vnic_ctx_free(bp); 6545 /* before free the vnic, undo the vnic tpa settings */ 6546 if (bp->flags & BNXT_FLAG_TPA) 6547 bnxt_set_tpa(bp, false); 6548 bnxt_hwrm_vnic_free(bp); 6549 } 6550 bnxt_hwrm_ring_free(bp, close_path); 6551 bnxt_hwrm_ring_grp_free(bp); 6552 if (irq_re_init) { 6553 bnxt_hwrm_stat_ctx_free(bp); 6554 bnxt_hwrm_free_tunnel_ports(bp); 6555 } 6556 } 6557 6558 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode) 6559 { 6560 struct hwrm_func_cfg_input req = {0}; 6561 int rc; 6562 6563 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 6564 req.fid = cpu_to_le16(0xffff); 6565 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE); 6566 if (br_mode == BRIDGE_MODE_VEB) 6567 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB; 6568 else if (br_mode == BRIDGE_MODE_VEPA) 6569 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA; 6570 else 6571 return -EINVAL; 6572 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6573 if (rc) 6574 rc = -EIO; 6575 return rc; 6576 } 6577 6578 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size) 6579 { 6580 struct hwrm_func_cfg_input req = {0}; 6581 int rc; 6582 6583 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803) 6584 return 0; 6585 6586 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 6587 req.fid = cpu_to_le16(0xffff); 6588 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE); 6589 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64; 6590 if (size == 128) 6591 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128; 6592 6593 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6594 if (rc) 6595 rc = -EIO; 6596 return rc; 6597 } 6598 6599 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) 6600 { 6601 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 6602 int rc; 6603 6604 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) 6605 goto skip_rss_ctx; 6606 6607 /* allocate context for vnic */ 6608 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0); 6609 if (rc) { 6610 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 6611 vnic_id, rc); 6612 goto vnic_setup_err; 6613 } 6614 bp->rsscos_nr_ctxs++; 6615 6616 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 6617 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1); 6618 if (rc) { 6619 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n", 6620 vnic_id, rc); 6621 goto vnic_setup_err; 6622 } 6623 bp->rsscos_nr_ctxs++; 6624 } 6625 6626 skip_rss_ctx: 6627 /* configure default vnic, ring grp */ 6628 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); 6629 if (rc) { 6630 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", 6631 vnic_id, rc); 6632 goto vnic_setup_err; 6633 } 6634 6635 /* Enable RSS hashing on vnic */ 6636 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true); 6637 if (rc) { 6638 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n", 6639 vnic_id, rc); 6640 goto vnic_setup_err; 6641 } 6642 6643 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 6644 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id); 6645 if (rc) { 6646 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", 6647 vnic_id, rc); 6648 } 6649 } 6650 6651 vnic_setup_err: 6652 return rc; 6653 } 6654 6655 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id) 6656 { 6657 int rc, i, nr_ctxs; 6658 6659 nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64); 6660 for (i = 0; i < nr_ctxs; i++) { 6661 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i); 6662 if (rc) { 6663 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n", 6664 vnic_id, i, rc); 6665 break; 6666 } 6667 bp->rsscos_nr_ctxs++; 6668 } 6669 if (i < nr_ctxs) 6670 return -ENOMEM; 6671 6672 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true); 6673 if (rc) { 6674 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n", 6675 vnic_id, rc); 6676 return rc; 6677 } 6678 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); 6679 if (rc) { 6680 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", 6681 vnic_id, rc); 6682 return rc; 6683 } 6684 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 6685 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id); 6686 if (rc) { 6687 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", 6688 vnic_id, rc); 6689 } 6690 } 6691 return rc; 6692 } 6693 6694 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) 6695 { 6696 if (bp->flags & BNXT_FLAG_CHIP_P5) 6697 return __bnxt_setup_vnic_p5(bp, vnic_id); 6698 else 6699 return __bnxt_setup_vnic(bp, vnic_id); 6700 } 6701 6702 static int bnxt_alloc_rfs_vnics(struct bnxt *bp) 6703 { 6704 #ifdef CONFIG_RFS_ACCEL 6705 int i, rc = 0; 6706 6707 for (i = 0; i < bp->rx_nr_rings; i++) { 6708 struct bnxt_vnic_info *vnic; 6709 u16 vnic_id = i + 1; 6710 u16 ring_id = i; 6711 6712 if (vnic_id >= bp->nr_vnics) 6713 break; 6714 6715 vnic = &bp->vnic_info[vnic_id]; 6716 vnic->flags |= BNXT_VNIC_RFS_FLAG; 6717 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) 6718 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG; 6719 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1); 6720 if (rc) { 6721 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 6722 vnic_id, rc); 6723 break; 6724 } 6725 rc = bnxt_setup_vnic(bp, vnic_id); 6726 if (rc) 6727 break; 6728 } 6729 return rc; 6730 #else 6731 return 0; 6732 #endif 6733 } 6734 6735 /* Allow PF and VF with default VLAN to be in promiscuous mode */ 6736 static bool bnxt_promisc_ok(struct bnxt *bp) 6737 { 6738 #ifdef CONFIG_BNXT_SRIOV 6739 if (BNXT_VF(bp) && !bp->vf.vlan) 6740 return false; 6741 #endif 6742 return true; 6743 } 6744 6745 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp) 6746 { 6747 unsigned int rc = 0; 6748 6749 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1); 6750 if (rc) { 6751 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", 6752 rc); 6753 return rc; 6754 } 6755 6756 rc = bnxt_hwrm_vnic_cfg(bp, 1); 6757 if (rc) { 6758 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", 6759 rc); 6760 return rc; 6761 } 6762 return rc; 6763 } 6764 6765 static int bnxt_cfg_rx_mode(struct bnxt *); 6766 static bool bnxt_mc_list_updated(struct bnxt *, u32 *); 6767 6768 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) 6769 { 6770 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 6771 int rc = 0; 6772 unsigned int rx_nr_rings = bp->rx_nr_rings; 6773 6774 if (irq_re_init) { 6775 rc = bnxt_hwrm_stat_ctx_alloc(bp); 6776 if (rc) { 6777 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n", 6778 rc); 6779 goto err_out; 6780 } 6781 } 6782 6783 rc = bnxt_hwrm_ring_alloc(bp); 6784 if (rc) { 6785 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc); 6786 goto err_out; 6787 } 6788 6789 rc = bnxt_hwrm_ring_grp_alloc(bp); 6790 if (rc) { 6791 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc); 6792 goto err_out; 6793 } 6794 6795 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 6796 rx_nr_rings--; 6797 6798 /* default vnic 0 */ 6799 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings); 6800 if (rc) { 6801 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc); 6802 goto err_out; 6803 } 6804 6805 rc = bnxt_setup_vnic(bp, 0); 6806 if (rc) 6807 goto err_out; 6808 6809 if (bp->flags & BNXT_FLAG_RFS) { 6810 rc = bnxt_alloc_rfs_vnics(bp); 6811 if (rc) 6812 goto err_out; 6813 } 6814 6815 if (bp->flags & BNXT_FLAG_TPA) { 6816 rc = bnxt_set_tpa(bp, true); 6817 if (rc) 6818 goto err_out; 6819 } 6820 6821 if (BNXT_VF(bp)) 6822 bnxt_update_vf_mac(bp); 6823 6824 /* Filter for default vnic 0 */ 6825 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr); 6826 if (rc) { 6827 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); 6828 goto err_out; 6829 } 6830 vnic->uc_filter_count = 1; 6831 6832 vnic->rx_mask = 0; 6833 if (bp->dev->flags & IFF_BROADCAST) 6834 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 6835 6836 if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp)) 6837 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 6838 6839 if (bp->dev->flags & IFF_ALLMULTI) { 6840 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 6841 vnic->mc_list_count = 0; 6842 } else { 6843 u32 mask = 0; 6844 6845 bnxt_mc_list_updated(bp, &mask); 6846 vnic->rx_mask |= mask; 6847 } 6848 6849 rc = bnxt_cfg_rx_mode(bp); 6850 if (rc) 6851 goto err_out; 6852 6853 rc = bnxt_hwrm_set_coal(bp); 6854 if (rc) 6855 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n", 6856 rc); 6857 6858 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 6859 rc = bnxt_setup_nitroa0_vnic(bp); 6860 if (rc) 6861 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n", 6862 rc); 6863 } 6864 6865 if (BNXT_VF(bp)) { 6866 bnxt_hwrm_func_qcfg(bp); 6867 netdev_update_features(bp->dev); 6868 } 6869 6870 return 0; 6871 6872 err_out: 6873 bnxt_hwrm_resource_free(bp, 0, true); 6874 6875 return rc; 6876 } 6877 6878 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init) 6879 { 6880 bnxt_hwrm_resource_free(bp, 1, irq_re_init); 6881 return 0; 6882 } 6883 6884 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init) 6885 { 6886 bnxt_init_cp_rings(bp); 6887 bnxt_init_rx_rings(bp); 6888 bnxt_init_tx_rings(bp); 6889 bnxt_init_ring_grps(bp, irq_re_init); 6890 bnxt_init_vnics(bp); 6891 6892 return bnxt_init_chip(bp, irq_re_init); 6893 } 6894 6895 static int bnxt_set_real_num_queues(struct bnxt *bp) 6896 { 6897 int rc; 6898 struct net_device *dev = bp->dev; 6899 6900 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings - 6901 bp->tx_nr_rings_xdp); 6902 if (rc) 6903 return rc; 6904 6905 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings); 6906 if (rc) 6907 return rc; 6908 6909 #ifdef CONFIG_RFS_ACCEL 6910 if (bp->flags & BNXT_FLAG_RFS) 6911 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings); 6912 #endif 6913 6914 return rc; 6915 } 6916 6917 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 6918 bool shared) 6919 { 6920 int _rx = *rx, _tx = *tx; 6921 6922 if (shared) { 6923 *rx = min_t(int, _rx, max); 6924 *tx = min_t(int, _tx, max); 6925 } else { 6926 if (max < 2) 6927 return -ENOMEM; 6928 6929 while (_rx + _tx > max) { 6930 if (_rx > _tx && _rx > 1) 6931 _rx--; 6932 else if (_tx > 1) 6933 _tx--; 6934 } 6935 *rx = _rx; 6936 *tx = _tx; 6937 } 6938 return 0; 6939 } 6940 6941 static void bnxt_setup_msix(struct bnxt *bp) 6942 { 6943 const int len = sizeof(bp->irq_tbl[0].name); 6944 struct net_device *dev = bp->dev; 6945 int tcs, i; 6946 6947 tcs = netdev_get_num_tc(dev); 6948 if (tcs > 1) { 6949 int i, off, count; 6950 6951 for (i = 0; i < tcs; i++) { 6952 count = bp->tx_nr_rings_per_tc; 6953 off = i * count; 6954 netdev_set_tc_queue(dev, i, count, off); 6955 } 6956 } 6957 6958 for (i = 0; i < bp->cp_nr_rings; i++) { 6959 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 6960 char *attr; 6961 6962 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 6963 attr = "TxRx"; 6964 else if (i < bp->rx_nr_rings) 6965 attr = "rx"; 6966 else 6967 attr = "tx"; 6968 6969 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name, 6970 attr, i); 6971 bp->irq_tbl[map_idx].handler = bnxt_msix; 6972 } 6973 } 6974 6975 static void bnxt_setup_inta(struct bnxt *bp) 6976 { 6977 const int len = sizeof(bp->irq_tbl[0].name); 6978 6979 if (netdev_get_num_tc(bp->dev)) 6980 netdev_reset_tc(bp->dev); 6981 6982 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx", 6983 0); 6984 bp->irq_tbl[0].handler = bnxt_inta; 6985 } 6986 6987 static int bnxt_setup_int_mode(struct bnxt *bp) 6988 { 6989 int rc; 6990 6991 if (bp->flags & BNXT_FLAG_USING_MSIX) 6992 bnxt_setup_msix(bp); 6993 else 6994 bnxt_setup_inta(bp); 6995 6996 rc = bnxt_set_real_num_queues(bp); 6997 return rc; 6998 } 6999 7000 #ifdef CONFIG_RFS_ACCEL 7001 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp) 7002 { 7003 return bp->hw_resc.max_rsscos_ctxs; 7004 } 7005 7006 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp) 7007 { 7008 return bp->hw_resc.max_vnics; 7009 } 7010 #endif 7011 7012 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp) 7013 { 7014 return bp->hw_resc.max_stat_ctxs; 7015 } 7016 7017 void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max) 7018 { 7019 bp->hw_resc.max_stat_ctxs = max; 7020 } 7021 7022 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp) 7023 { 7024 return bp->hw_resc.max_cp_rings; 7025 } 7026 7027 unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp) 7028 { 7029 return bp->hw_resc.max_cp_rings - bnxt_get_ulp_msix_num(bp); 7030 } 7031 7032 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) 7033 { 7034 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 7035 7036 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings); 7037 } 7038 7039 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs) 7040 { 7041 bp->hw_resc.max_irqs = max_irqs; 7042 } 7043 7044 int bnxt_get_avail_msix(struct bnxt *bp, int num) 7045 { 7046 int max_cp = bnxt_get_max_func_cp_rings(bp); 7047 int max_irq = bnxt_get_max_func_irqs(bp); 7048 int total_req = bp->cp_nr_rings + num; 7049 int max_idx, avail_msix; 7050 7051 max_idx = min_t(int, bp->total_irqs, max_cp); 7052 avail_msix = max_idx - bp->cp_nr_rings; 7053 if (!BNXT_NEW_RM(bp) || avail_msix >= num) 7054 return avail_msix; 7055 7056 if (max_irq < total_req) { 7057 num = max_irq - bp->cp_nr_rings; 7058 if (num <= 0) 7059 return 0; 7060 } 7061 return num; 7062 } 7063 7064 static int bnxt_get_num_msix(struct bnxt *bp) 7065 { 7066 if (!BNXT_NEW_RM(bp)) 7067 return bnxt_get_max_func_irqs(bp); 7068 7069 return bnxt_cp_rings_in_use(bp); 7070 } 7071 7072 static int bnxt_init_msix(struct bnxt *bp) 7073 { 7074 int i, total_vecs, max, rc = 0, min = 1, ulp_msix; 7075 struct msix_entry *msix_ent; 7076 7077 total_vecs = bnxt_get_num_msix(bp); 7078 max = bnxt_get_max_func_irqs(bp); 7079 if (total_vecs > max) 7080 total_vecs = max; 7081 7082 if (!total_vecs) 7083 return 0; 7084 7085 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL); 7086 if (!msix_ent) 7087 return -ENOMEM; 7088 7089 for (i = 0; i < total_vecs; i++) { 7090 msix_ent[i].entry = i; 7091 msix_ent[i].vector = 0; 7092 } 7093 7094 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) 7095 min = 2; 7096 7097 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs); 7098 ulp_msix = bnxt_get_ulp_msix_num(bp); 7099 if (total_vecs < 0 || total_vecs < ulp_msix) { 7100 rc = -ENODEV; 7101 goto msix_setup_exit; 7102 } 7103 7104 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL); 7105 if (bp->irq_tbl) { 7106 for (i = 0; i < total_vecs; i++) 7107 bp->irq_tbl[i].vector = msix_ent[i].vector; 7108 7109 bp->total_irqs = total_vecs; 7110 /* Trim rings based upon num of vectors allocated */ 7111 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings, 7112 total_vecs - ulp_msix, min == 1); 7113 if (rc) 7114 goto msix_setup_exit; 7115 7116 bp->cp_nr_rings = (min == 1) ? 7117 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : 7118 bp->tx_nr_rings + bp->rx_nr_rings; 7119 7120 } else { 7121 rc = -ENOMEM; 7122 goto msix_setup_exit; 7123 } 7124 bp->flags |= BNXT_FLAG_USING_MSIX; 7125 kfree(msix_ent); 7126 return 0; 7127 7128 msix_setup_exit: 7129 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc); 7130 kfree(bp->irq_tbl); 7131 bp->irq_tbl = NULL; 7132 pci_disable_msix(bp->pdev); 7133 kfree(msix_ent); 7134 return rc; 7135 } 7136 7137 static int bnxt_init_inta(struct bnxt *bp) 7138 { 7139 bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL); 7140 if (!bp->irq_tbl) 7141 return -ENOMEM; 7142 7143 bp->total_irqs = 1; 7144 bp->rx_nr_rings = 1; 7145 bp->tx_nr_rings = 1; 7146 bp->cp_nr_rings = 1; 7147 bp->flags |= BNXT_FLAG_SHARED_RINGS; 7148 bp->irq_tbl[0].vector = bp->pdev->irq; 7149 return 0; 7150 } 7151 7152 static int bnxt_init_int_mode(struct bnxt *bp) 7153 { 7154 int rc = 0; 7155 7156 if (bp->flags & BNXT_FLAG_MSIX_CAP) 7157 rc = bnxt_init_msix(bp); 7158 7159 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) { 7160 /* fallback to INTA */ 7161 rc = bnxt_init_inta(bp); 7162 } 7163 return rc; 7164 } 7165 7166 static void bnxt_clear_int_mode(struct bnxt *bp) 7167 { 7168 if (bp->flags & BNXT_FLAG_USING_MSIX) 7169 pci_disable_msix(bp->pdev); 7170 7171 kfree(bp->irq_tbl); 7172 bp->irq_tbl = NULL; 7173 bp->flags &= ~BNXT_FLAG_USING_MSIX; 7174 } 7175 7176 int bnxt_reserve_rings(struct bnxt *bp) 7177 { 7178 int tcs = netdev_get_num_tc(bp->dev); 7179 int rc; 7180 7181 if (!bnxt_need_reserve_rings(bp)) 7182 return 0; 7183 7184 rc = __bnxt_reserve_rings(bp); 7185 if (rc) { 7186 netdev_err(bp->dev, "ring reservation failure rc: %d\n", rc); 7187 return rc; 7188 } 7189 if (BNXT_NEW_RM(bp) && (bnxt_get_num_msix(bp) != bp->total_irqs)) { 7190 bnxt_ulp_irq_stop(bp); 7191 bnxt_clear_int_mode(bp); 7192 rc = bnxt_init_int_mode(bp); 7193 bnxt_ulp_irq_restart(bp, rc); 7194 if (rc) 7195 return rc; 7196 } 7197 if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) { 7198 netdev_err(bp->dev, "tx ring reservation failure\n"); 7199 netdev_reset_tc(bp->dev); 7200 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 7201 return -ENOMEM; 7202 } 7203 bp->num_stat_ctxs = bp->cp_nr_rings; 7204 return 0; 7205 } 7206 7207 static void bnxt_free_irq(struct bnxt *bp) 7208 { 7209 struct bnxt_irq *irq; 7210 int i; 7211 7212 #ifdef CONFIG_RFS_ACCEL 7213 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap); 7214 bp->dev->rx_cpu_rmap = NULL; 7215 #endif 7216 if (!bp->irq_tbl || !bp->bnapi) 7217 return; 7218 7219 for (i = 0; i < bp->cp_nr_rings; i++) { 7220 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 7221 7222 irq = &bp->irq_tbl[map_idx]; 7223 if (irq->requested) { 7224 if (irq->have_cpumask) { 7225 irq_set_affinity_hint(irq->vector, NULL); 7226 free_cpumask_var(irq->cpu_mask); 7227 irq->have_cpumask = 0; 7228 } 7229 free_irq(irq->vector, bp->bnapi[i]); 7230 } 7231 7232 irq->requested = 0; 7233 } 7234 } 7235 7236 static int bnxt_request_irq(struct bnxt *bp) 7237 { 7238 int i, j, rc = 0; 7239 unsigned long flags = 0; 7240 #ifdef CONFIG_RFS_ACCEL 7241 struct cpu_rmap *rmap; 7242 #endif 7243 7244 rc = bnxt_setup_int_mode(bp); 7245 if (rc) { 7246 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n", 7247 rc); 7248 return rc; 7249 } 7250 #ifdef CONFIG_RFS_ACCEL 7251 rmap = bp->dev->rx_cpu_rmap; 7252 #endif 7253 if (!(bp->flags & BNXT_FLAG_USING_MSIX)) 7254 flags = IRQF_SHARED; 7255 7256 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { 7257 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 7258 struct bnxt_irq *irq = &bp->irq_tbl[map_idx]; 7259 7260 #ifdef CONFIG_RFS_ACCEL 7261 if (rmap && bp->bnapi[i]->rx_ring) { 7262 rc = irq_cpu_rmap_add(rmap, irq->vector); 7263 if (rc) 7264 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n", 7265 j); 7266 j++; 7267 } 7268 #endif 7269 rc = request_irq(irq->vector, irq->handler, flags, irq->name, 7270 bp->bnapi[i]); 7271 if (rc) 7272 break; 7273 7274 irq->requested = 1; 7275 7276 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) { 7277 int numa_node = dev_to_node(&bp->pdev->dev); 7278 7279 irq->have_cpumask = 1; 7280 cpumask_set_cpu(cpumask_local_spread(i, numa_node), 7281 irq->cpu_mask); 7282 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask); 7283 if (rc) { 7284 netdev_warn(bp->dev, 7285 "Set affinity failed, IRQ = %d\n", 7286 irq->vector); 7287 break; 7288 } 7289 } 7290 } 7291 return rc; 7292 } 7293 7294 static void bnxt_del_napi(struct bnxt *bp) 7295 { 7296 int i; 7297 7298 if (!bp->bnapi) 7299 return; 7300 7301 for (i = 0; i < bp->cp_nr_rings; i++) { 7302 struct bnxt_napi *bnapi = bp->bnapi[i]; 7303 7304 napi_hash_del(&bnapi->napi); 7305 netif_napi_del(&bnapi->napi); 7306 } 7307 /* We called napi_hash_del() before netif_napi_del(), we need 7308 * to respect an RCU grace period before freeing napi structures. 7309 */ 7310 synchronize_net(); 7311 } 7312 7313 static void bnxt_init_napi(struct bnxt *bp) 7314 { 7315 int i; 7316 unsigned int cp_nr_rings = bp->cp_nr_rings; 7317 struct bnxt_napi *bnapi; 7318 7319 if (bp->flags & BNXT_FLAG_USING_MSIX) { 7320 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll; 7321 7322 if (bp->flags & BNXT_FLAG_CHIP_P5) 7323 poll_fn = bnxt_poll_p5; 7324 else if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 7325 cp_nr_rings--; 7326 for (i = 0; i < cp_nr_rings; i++) { 7327 bnapi = bp->bnapi[i]; 7328 netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64); 7329 } 7330 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 7331 bnapi = bp->bnapi[cp_nr_rings]; 7332 netif_napi_add(bp->dev, &bnapi->napi, 7333 bnxt_poll_nitroa0, 64); 7334 } 7335 } else { 7336 bnapi = bp->bnapi[0]; 7337 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64); 7338 } 7339 } 7340 7341 static void bnxt_disable_napi(struct bnxt *bp) 7342 { 7343 int i; 7344 7345 if (!bp->bnapi) 7346 return; 7347 7348 for (i = 0; i < bp->cp_nr_rings; i++) { 7349 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; 7350 7351 if (bp->bnapi[i]->rx_ring) 7352 cancel_work_sync(&cpr->dim.work); 7353 7354 napi_disable(&bp->bnapi[i]->napi); 7355 } 7356 } 7357 7358 static void bnxt_enable_napi(struct bnxt *bp) 7359 { 7360 int i; 7361 7362 for (i = 0; i < bp->cp_nr_rings; i++) { 7363 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; 7364 bp->bnapi[i]->in_reset = false; 7365 7366 if (bp->bnapi[i]->rx_ring) { 7367 INIT_WORK(&cpr->dim.work, bnxt_dim_work); 7368 cpr->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE; 7369 } 7370 napi_enable(&bp->bnapi[i]->napi); 7371 } 7372 } 7373 7374 void bnxt_tx_disable(struct bnxt *bp) 7375 { 7376 int i; 7377 struct bnxt_tx_ring_info *txr; 7378 7379 if (bp->tx_ring) { 7380 for (i = 0; i < bp->tx_nr_rings; i++) { 7381 txr = &bp->tx_ring[i]; 7382 txr->dev_state = BNXT_DEV_STATE_CLOSING; 7383 } 7384 } 7385 /* Stop all TX queues */ 7386 netif_tx_disable(bp->dev); 7387 netif_carrier_off(bp->dev); 7388 } 7389 7390 void bnxt_tx_enable(struct bnxt *bp) 7391 { 7392 int i; 7393 struct bnxt_tx_ring_info *txr; 7394 7395 for (i = 0; i < bp->tx_nr_rings; i++) { 7396 txr = &bp->tx_ring[i]; 7397 txr->dev_state = 0; 7398 } 7399 netif_tx_wake_all_queues(bp->dev); 7400 if (bp->link_info.link_up) 7401 netif_carrier_on(bp->dev); 7402 } 7403 7404 static void bnxt_report_link(struct bnxt *bp) 7405 { 7406 if (bp->link_info.link_up) { 7407 const char *duplex; 7408 const char *flow_ctrl; 7409 u32 speed; 7410 u16 fec; 7411 7412 netif_carrier_on(bp->dev); 7413 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL) 7414 duplex = "full"; 7415 else 7416 duplex = "half"; 7417 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH) 7418 flow_ctrl = "ON - receive & transmit"; 7419 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX) 7420 flow_ctrl = "ON - transmit"; 7421 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX) 7422 flow_ctrl = "ON - receive"; 7423 else 7424 flow_ctrl = "none"; 7425 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); 7426 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n", 7427 speed, duplex, flow_ctrl); 7428 if (bp->flags & BNXT_FLAG_EEE_CAP) 7429 netdev_info(bp->dev, "EEE is %s\n", 7430 bp->eee.eee_active ? "active" : 7431 "not active"); 7432 fec = bp->link_info.fec_cfg; 7433 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED)) 7434 netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n", 7435 (fec & BNXT_FEC_AUTONEG) ? "on" : "off", 7436 (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" : 7437 (fec & BNXT_FEC_ENC_RS) ? "RS" : "None"); 7438 } else { 7439 netif_carrier_off(bp->dev); 7440 netdev_err(bp->dev, "NIC Link is Down\n"); 7441 } 7442 } 7443 7444 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) 7445 { 7446 int rc = 0; 7447 struct hwrm_port_phy_qcaps_input req = {0}; 7448 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 7449 struct bnxt_link_info *link_info = &bp->link_info; 7450 7451 if (bp->hwrm_spec_code < 0x10201) 7452 return 0; 7453 7454 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1); 7455 7456 mutex_lock(&bp->hwrm_cmd_lock); 7457 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7458 if (rc) 7459 goto hwrm_phy_qcaps_exit; 7460 7461 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) { 7462 struct ethtool_eee *eee = &bp->eee; 7463 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode); 7464 7465 bp->flags |= BNXT_FLAG_EEE_CAP; 7466 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 7467 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) & 7468 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK; 7469 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) & 7470 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK; 7471 } 7472 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED) { 7473 if (bp->test_info) 7474 bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK; 7475 } 7476 if (resp->supported_speeds_auto_mode) 7477 link_info->support_auto_speeds = 7478 le16_to_cpu(resp->supported_speeds_auto_mode); 7479 7480 bp->port_count = resp->port_cnt; 7481 7482 hwrm_phy_qcaps_exit: 7483 mutex_unlock(&bp->hwrm_cmd_lock); 7484 return rc; 7485 } 7486 7487 static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) 7488 { 7489 int rc = 0; 7490 struct bnxt_link_info *link_info = &bp->link_info; 7491 struct hwrm_port_phy_qcfg_input req = {0}; 7492 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 7493 u8 link_up = link_info->link_up; 7494 u16 diff; 7495 7496 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1); 7497 7498 mutex_lock(&bp->hwrm_cmd_lock); 7499 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7500 if (rc) { 7501 mutex_unlock(&bp->hwrm_cmd_lock); 7502 return rc; 7503 } 7504 7505 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp)); 7506 link_info->phy_link_status = resp->link; 7507 link_info->duplex = resp->duplex_cfg; 7508 if (bp->hwrm_spec_code >= 0x10800) 7509 link_info->duplex = resp->duplex_state; 7510 link_info->pause = resp->pause; 7511 link_info->auto_mode = resp->auto_mode; 7512 link_info->auto_pause_setting = resp->auto_pause; 7513 link_info->lp_pause = resp->link_partner_adv_pause; 7514 link_info->force_pause_setting = resp->force_pause; 7515 link_info->duplex_setting = resp->duplex_cfg; 7516 if (link_info->phy_link_status == BNXT_LINK_LINK) 7517 link_info->link_speed = le16_to_cpu(resp->link_speed); 7518 else 7519 link_info->link_speed = 0; 7520 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed); 7521 link_info->support_speeds = le16_to_cpu(resp->support_speeds); 7522 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask); 7523 link_info->lp_auto_link_speeds = 7524 le16_to_cpu(resp->link_partner_adv_speeds); 7525 link_info->preemphasis = le32_to_cpu(resp->preemphasis); 7526 link_info->phy_ver[0] = resp->phy_maj; 7527 link_info->phy_ver[1] = resp->phy_min; 7528 link_info->phy_ver[2] = resp->phy_bld; 7529 link_info->media_type = resp->media_type; 7530 link_info->phy_type = resp->phy_type; 7531 link_info->transceiver = resp->xcvr_pkg_type; 7532 link_info->phy_addr = resp->eee_config_phy_addr & 7533 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK; 7534 link_info->module_status = resp->module_status; 7535 7536 if (bp->flags & BNXT_FLAG_EEE_CAP) { 7537 struct ethtool_eee *eee = &bp->eee; 7538 u16 fw_speeds; 7539 7540 eee->eee_active = 0; 7541 if (resp->eee_config_phy_addr & 7542 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) { 7543 eee->eee_active = 1; 7544 fw_speeds = le16_to_cpu( 7545 resp->link_partner_adv_eee_link_speed_mask); 7546 eee->lp_advertised = 7547 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 7548 } 7549 7550 /* Pull initial EEE config */ 7551 if (!chng_link_state) { 7552 if (resp->eee_config_phy_addr & 7553 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED) 7554 eee->eee_enabled = 1; 7555 7556 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask); 7557 eee->advertised = 7558 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 7559 7560 if (resp->eee_config_phy_addr & 7561 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) { 7562 __le32 tmr; 7563 7564 eee->tx_lpi_enabled = 1; 7565 tmr = resp->xcvr_identifier_type_tx_lpi_timer; 7566 eee->tx_lpi_timer = le32_to_cpu(tmr) & 7567 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK; 7568 } 7569 } 7570 } 7571 7572 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED; 7573 if (bp->hwrm_spec_code >= 0x10504) 7574 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg); 7575 7576 /* TODO: need to add more logic to report VF link */ 7577 if (chng_link_state) { 7578 if (link_info->phy_link_status == BNXT_LINK_LINK) 7579 link_info->link_up = 1; 7580 else 7581 link_info->link_up = 0; 7582 if (link_up != link_info->link_up) 7583 bnxt_report_link(bp); 7584 } else { 7585 /* alwasy link down if not require to update link state */ 7586 link_info->link_up = 0; 7587 } 7588 mutex_unlock(&bp->hwrm_cmd_lock); 7589 7590 if (!BNXT_SINGLE_PF(bp)) 7591 return 0; 7592 7593 diff = link_info->support_auto_speeds ^ link_info->advertising; 7594 if ((link_info->support_auto_speeds | diff) != 7595 link_info->support_auto_speeds) { 7596 /* An advertised speed is no longer supported, so we need to 7597 * update the advertisement settings. Caller holds RTNL 7598 * so we can modify link settings. 7599 */ 7600 link_info->advertising = link_info->support_auto_speeds; 7601 if (link_info->autoneg & BNXT_AUTONEG_SPEED) 7602 bnxt_hwrm_set_link_setting(bp, true, false); 7603 } 7604 return 0; 7605 } 7606 7607 static void bnxt_get_port_module_status(struct bnxt *bp) 7608 { 7609 struct bnxt_link_info *link_info = &bp->link_info; 7610 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp; 7611 u8 module_status; 7612 7613 if (bnxt_update_link(bp, true)) 7614 return; 7615 7616 module_status = link_info->module_status; 7617 switch (module_status) { 7618 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX: 7619 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN: 7620 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG: 7621 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n", 7622 bp->pf.port_id); 7623 if (bp->hwrm_spec_code >= 0x10201) { 7624 netdev_warn(bp->dev, "Module part number %s\n", 7625 resp->phy_vendor_partnumber); 7626 } 7627 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX) 7628 netdev_warn(bp->dev, "TX is disabled\n"); 7629 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN) 7630 netdev_warn(bp->dev, "SFP+ module is shutdown\n"); 7631 } 7632 } 7633 7634 static void 7635 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) 7636 { 7637 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) { 7638 if (bp->hwrm_spec_code >= 0x10201) 7639 req->auto_pause = 7640 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE; 7641 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 7642 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; 7643 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 7644 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX; 7645 req->enables |= 7646 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 7647 } else { 7648 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 7649 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX; 7650 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 7651 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX; 7652 req->enables |= 7653 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE); 7654 if (bp->hwrm_spec_code >= 0x10201) { 7655 req->auto_pause = req->force_pause; 7656 req->enables |= cpu_to_le32( 7657 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 7658 } 7659 } 7660 } 7661 7662 static void bnxt_hwrm_set_link_common(struct bnxt *bp, 7663 struct hwrm_port_phy_cfg_input *req) 7664 { 7665 u8 autoneg = bp->link_info.autoneg; 7666 u16 fw_link_speed = bp->link_info.req_link_speed; 7667 u16 advertising = bp->link_info.advertising; 7668 7669 if (autoneg & BNXT_AUTONEG_SPEED) { 7670 req->auto_mode |= 7671 PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK; 7672 7673 req->enables |= cpu_to_le32( 7674 PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK); 7675 req->auto_link_speed_mask = cpu_to_le16(advertising); 7676 7677 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE); 7678 req->flags |= 7679 cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG); 7680 } else { 7681 req->force_link_speed = cpu_to_le16(fw_link_speed); 7682 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE); 7683 } 7684 7685 /* tell chimp that the setting takes effect immediately */ 7686 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); 7687 } 7688 7689 int bnxt_hwrm_set_pause(struct bnxt *bp) 7690 { 7691 struct hwrm_port_phy_cfg_input req = {0}; 7692 int rc; 7693 7694 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 7695 bnxt_hwrm_set_pause_common(bp, &req); 7696 7697 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) || 7698 bp->link_info.force_link_chng) 7699 bnxt_hwrm_set_link_common(bp, &req); 7700 7701 mutex_lock(&bp->hwrm_cmd_lock); 7702 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7703 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) { 7704 /* since changing of pause setting doesn't trigger any link 7705 * change event, the driver needs to update the current pause 7706 * result upon successfully return of the phy_cfg command 7707 */ 7708 bp->link_info.pause = 7709 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl; 7710 bp->link_info.auto_pause_setting = 0; 7711 if (!bp->link_info.force_link_chng) 7712 bnxt_report_link(bp); 7713 } 7714 bp->link_info.force_link_chng = false; 7715 mutex_unlock(&bp->hwrm_cmd_lock); 7716 return rc; 7717 } 7718 7719 static void bnxt_hwrm_set_eee(struct bnxt *bp, 7720 struct hwrm_port_phy_cfg_input *req) 7721 { 7722 struct ethtool_eee *eee = &bp->eee; 7723 7724 if (eee->eee_enabled) { 7725 u16 eee_speeds; 7726 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE; 7727 7728 if (eee->tx_lpi_enabled) 7729 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE; 7730 else 7731 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE; 7732 7733 req->flags |= cpu_to_le32(flags); 7734 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised); 7735 req->eee_link_speed_mask = cpu_to_le16(eee_speeds); 7736 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer); 7737 } else { 7738 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE); 7739 } 7740 } 7741 7742 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee) 7743 { 7744 struct hwrm_port_phy_cfg_input req = {0}; 7745 7746 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 7747 if (set_pause) 7748 bnxt_hwrm_set_pause_common(bp, &req); 7749 7750 bnxt_hwrm_set_link_common(bp, &req); 7751 7752 if (set_eee) 7753 bnxt_hwrm_set_eee(bp, &req); 7754 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7755 } 7756 7757 static int bnxt_hwrm_shutdown_link(struct bnxt *bp) 7758 { 7759 struct hwrm_port_phy_cfg_input req = {0}; 7760 7761 if (!BNXT_SINGLE_PF(bp)) 7762 return 0; 7763 7764 if (pci_num_vf(bp->pdev)) 7765 return 0; 7766 7767 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 7768 req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN); 7769 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7770 } 7771 7772 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) 7773 { 7774 struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr; 7775 struct hwrm_func_drv_if_change_input req = {0}; 7776 bool resc_reinit = false; 7777 int rc; 7778 7779 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE)) 7780 return 0; 7781 7782 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1); 7783 if (up) 7784 req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP); 7785 mutex_lock(&bp->hwrm_cmd_lock); 7786 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7787 if (!rc && (resp->flags & 7788 cpu_to_le32(FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE))) 7789 resc_reinit = true; 7790 mutex_unlock(&bp->hwrm_cmd_lock); 7791 7792 if (up && resc_reinit && BNXT_NEW_RM(bp)) { 7793 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 7794 7795 rc = bnxt_hwrm_func_resc_qcaps(bp, true); 7796 hw_resc->resv_cp_rings = 0; 7797 hw_resc->resv_tx_rings = 0; 7798 hw_resc->resv_rx_rings = 0; 7799 hw_resc->resv_hw_ring_grps = 0; 7800 hw_resc->resv_vnics = 0; 7801 bp->tx_nr_rings = 0; 7802 bp->rx_nr_rings = 0; 7803 } 7804 return rc; 7805 } 7806 7807 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) 7808 { 7809 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 7810 struct hwrm_port_led_qcaps_input req = {0}; 7811 struct bnxt_pf_info *pf = &bp->pf; 7812 int rc; 7813 7814 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601) 7815 return 0; 7816 7817 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1); 7818 req.port_id = cpu_to_le16(pf->port_id); 7819 mutex_lock(&bp->hwrm_cmd_lock); 7820 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7821 if (rc) { 7822 mutex_unlock(&bp->hwrm_cmd_lock); 7823 return rc; 7824 } 7825 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) { 7826 int i; 7827 7828 bp->num_leds = resp->num_leds; 7829 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) * 7830 bp->num_leds); 7831 for (i = 0; i < bp->num_leds; i++) { 7832 struct bnxt_led_info *led = &bp->leds[i]; 7833 __le16 caps = led->led_state_caps; 7834 7835 if (!led->led_group_id || 7836 !BNXT_LED_ALT_BLINK_CAP(caps)) { 7837 bp->num_leds = 0; 7838 break; 7839 } 7840 } 7841 } 7842 mutex_unlock(&bp->hwrm_cmd_lock); 7843 return 0; 7844 } 7845 7846 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp) 7847 { 7848 struct hwrm_wol_filter_alloc_input req = {0}; 7849 struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; 7850 int rc; 7851 7852 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1); 7853 req.port_id = cpu_to_le16(bp->pf.port_id); 7854 req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT; 7855 req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS); 7856 memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN); 7857 mutex_lock(&bp->hwrm_cmd_lock); 7858 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7859 if (!rc) 7860 bp->wol_filter_id = resp->wol_filter_id; 7861 mutex_unlock(&bp->hwrm_cmd_lock); 7862 return rc; 7863 } 7864 7865 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp) 7866 { 7867 struct hwrm_wol_filter_free_input req = {0}; 7868 int rc; 7869 7870 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1); 7871 req.port_id = cpu_to_le16(bp->pf.port_id); 7872 req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID); 7873 req.wol_filter_id = bp->wol_filter_id; 7874 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7875 return rc; 7876 } 7877 7878 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle) 7879 { 7880 struct hwrm_wol_filter_qcfg_input req = {0}; 7881 struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 7882 u16 next_handle = 0; 7883 int rc; 7884 7885 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1); 7886 req.port_id = cpu_to_le16(bp->pf.port_id); 7887 req.handle = cpu_to_le16(handle); 7888 mutex_lock(&bp->hwrm_cmd_lock); 7889 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 7890 if (!rc) { 7891 next_handle = le16_to_cpu(resp->next_handle); 7892 if (next_handle != 0) { 7893 if (resp->wol_type == 7894 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) { 7895 bp->wol = 1; 7896 bp->wol_filter_id = resp->wol_filter_id; 7897 } 7898 } 7899 } 7900 mutex_unlock(&bp->hwrm_cmd_lock); 7901 return next_handle; 7902 } 7903 7904 static void bnxt_get_wol_settings(struct bnxt *bp) 7905 { 7906 u16 handle = 0; 7907 7908 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP)) 7909 return; 7910 7911 do { 7912 handle = bnxt_hwrm_get_wol_fltrs(bp, handle); 7913 } while (handle && handle != 0xffff); 7914 } 7915 7916 #ifdef CONFIG_BNXT_HWMON 7917 static ssize_t bnxt_show_temp(struct device *dev, 7918 struct device_attribute *devattr, char *buf) 7919 { 7920 struct hwrm_temp_monitor_query_input req = {0}; 7921 struct hwrm_temp_monitor_query_output *resp; 7922 struct bnxt *bp = dev_get_drvdata(dev); 7923 u32 temp = 0; 7924 7925 resp = bp->hwrm_cmd_resp_addr; 7926 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1); 7927 mutex_lock(&bp->hwrm_cmd_lock); 7928 if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT)) 7929 temp = resp->temp * 1000; /* display millidegree */ 7930 mutex_unlock(&bp->hwrm_cmd_lock); 7931 7932 return sprintf(buf, "%u\n", temp); 7933 } 7934 static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0); 7935 7936 static struct attribute *bnxt_attrs[] = { 7937 &sensor_dev_attr_temp1_input.dev_attr.attr, 7938 NULL 7939 }; 7940 ATTRIBUTE_GROUPS(bnxt); 7941 7942 static void bnxt_hwmon_close(struct bnxt *bp) 7943 { 7944 if (bp->hwmon_dev) { 7945 hwmon_device_unregister(bp->hwmon_dev); 7946 bp->hwmon_dev = NULL; 7947 } 7948 } 7949 7950 static void bnxt_hwmon_open(struct bnxt *bp) 7951 { 7952 struct pci_dev *pdev = bp->pdev; 7953 7954 bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, 7955 DRV_MODULE_NAME, bp, 7956 bnxt_groups); 7957 if (IS_ERR(bp->hwmon_dev)) { 7958 bp->hwmon_dev = NULL; 7959 dev_warn(&pdev->dev, "Cannot register hwmon device\n"); 7960 } 7961 } 7962 #else 7963 static void bnxt_hwmon_close(struct bnxt *bp) 7964 { 7965 } 7966 7967 static void bnxt_hwmon_open(struct bnxt *bp) 7968 { 7969 } 7970 #endif 7971 7972 static bool bnxt_eee_config_ok(struct bnxt *bp) 7973 { 7974 struct ethtool_eee *eee = &bp->eee; 7975 struct bnxt_link_info *link_info = &bp->link_info; 7976 7977 if (!(bp->flags & BNXT_FLAG_EEE_CAP)) 7978 return true; 7979 7980 if (eee->eee_enabled) { 7981 u32 advertising = 7982 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); 7983 7984 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 7985 eee->eee_enabled = 0; 7986 return false; 7987 } 7988 if (eee->advertised & ~advertising) { 7989 eee->advertised = advertising & eee->supported; 7990 return false; 7991 } 7992 } 7993 return true; 7994 } 7995 7996 static int bnxt_update_phy_setting(struct bnxt *bp) 7997 { 7998 int rc; 7999 bool update_link = false; 8000 bool update_pause = false; 8001 bool update_eee = false; 8002 struct bnxt_link_info *link_info = &bp->link_info; 8003 8004 rc = bnxt_update_link(bp, true); 8005 if (rc) { 8006 netdev_err(bp->dev, "failed to update link (rc: %x)\n", 8007 rc); 8008 return rc; 8009 } 8010 if (!BNXT_SINGLE_PF(bp)) 8011 return 0; 8012 8013 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 8014 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) != 8015 link_info->req_flow_ctrl) 8016 update_pause = true; 8017 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 8018 link_info->force_pause_setting != link_info->req_flow_ctrl) 8019 update_pause = true; 8020 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 8021 if (BNXT_AUTO_MODE(link_info->auto_mode)) 8022 update_link = true; 8023 if (link_info->req_link_speed != link_info->force_link_speed) 8024 update_link = true; 8025 if (link_info->req_duplex != link_info->duplex_setting) 8026 update_link = true; 8027 } else { 8028 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE) 8029 update_link = true; 8030 if (link_info->advertising != link_info->auto_link_speeds) 8031 update_link = true; 8032 } 8033 8034 /* The last close may have shutdown the link, so need to call 8035 * PHY_CFG to bring it back up. 8036 */ 8037 if (!netif_carrier_ok(bp->dev)) 8038 update_link = true; 8039 8040 if (!bnxt_eee_config_ok(bp)) 8041 update_eee = true; 8042 8043 if (update_link) 8044 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee); 8045 else if (update_pause) 8046 rc = bnxt_hwrm_set_pause(bp); 8047 if (rc) { 8048 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n", 8049 rc); 8050 return rc; 8051 } 8052 8053 return rc; 8054 } 8055 8056 /* Common routine to pre-map certain register block to different GRC window. 8057 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows 8058 * in PF and 3 windows in VF that can be customized to map in different 8059 * register blocks. 8060 */ 8061 static void bnxt_preset_reg_win(struct bnxt *bp) 8062 { 8063 if (BNXT_PF(bp)) { 8064 /* CAG registers map to GRC window #4 */ 8065 writel(BNXT_CAG_REG_BASE, 8066 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12); 8067 } 8068 } 8069 8070 static int bnxt_init_dflt_ring_mode(struct bnxt *bp); 8071 8072 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 8073 { 8074 int rc = 0; 8075 8076 bnxt_preset_reg_win(bp); 8077 netif_carrier_off(bp->dev); 8078 if (irq_re_init) { 8079 /* Reserve rings now if none were reserved at driver probe. */ 8080 rc = bnxt_init_dflt_ring_mode(bp); 8081 if (rc) { 8082 netdev_err(bp->dev, "Failed to reserve default rings at open\n"); 8083 return rc; 8084 } 8085 } 8086 rc = bnxt_reserve_rings(bp); 8087 if (rc) 8088 return rc; 8089 if ((bp->flags & BNXT_FLAG_RFS) && 8090 !(bp->flags & BNXT_FLAG_USING_MSIX)) { 8091 /* disable RFS if falling back to INTA */ 8092 bp->dev->hw_features &= ~NETIF_F_NTUPLE; 8093 bp->flags &= ~BNXT_FLAG_RFS; 8094 } 8095 8096 rc = bnxt_alloc_mem(bp, irq_re_init); 8097 if (rc) { 8098 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); 8099 goto open_err_free_mem; 8100 } 8101 8102 if (irq_re_init) { 8103 bnxt_init_napi(bp); 8104 rc = bnxt_request_irq(bp); 8105 if (rc) { 8106 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc); 8107 goto open_err_irq; 8108 } 8109 } 8110 8111 bnxt_enable_napi(bp); 8112 bnxt_debug_dev_init(bp); 8113 8114 rc = bnxt_init_nic(bp, irq_re_init); 8115 if (rc) { 8116 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); 8117 goto open_err; 8118 } 8119 8120 if (link_re_init) { 8121 mutex_lock(&bp->link_lock); 8122 rc = bnxt_update_phy_setting(bp); 8123 mutex_unlock(&bp->link_lock); 8124 if (rc) { 8125 netdev_warn(bp->dev, "failed to update phy settings\n"); 8126 if (BNXT_SINGLE_PF(bp)) { 8127 bp->link_info.phy_retry = true; 8128 bp->link_info.phy_retry_expires = 8129 jiffies + 5 * HZ; 8130 } 8131 } 8132 } 8133 8134 if (irq_re_init) 8135 udp_tunnel_get_rx_info(bp->dev); 8136 8137 set_bit(BNXT_STATE_OPEN, &bp->state); 8138 bnxt_enable_int(bp); 8139 /* Enable TX queues */ 8140 bnxt_tx_enable(bp); 8141 mod_timer(&bp->timer, jiffies + bp->current_interval); 8142 /* Poll link status and check for SFP+ module status */ 8143 bnxt_get_port_module_status(bp); 8144 8145 /* VF-reps may need to be re-opened after the PF is re-opened */ 8146 if (BNXT_PF(bp)) 8147 bnxt_vf_reps_open(bp); 8148 return 0; 8149 8150 open_err: 8151 bnxt_debug_dev_exit(bp); 8152 bnxt_disable_napi(bp); 8153 8154 open_err_irq: 8155 bnxt_del_napi(bp); 8156 8157 open_err_free_mem: 8158 bnxt_free_skbs(bp); 8159 bnxt_free_irq(bp); 8160 bnxt_free_mem(bp, true); 8161 return rc; 8162 } 8163 8164 /* rtnl_lock held */ 8165 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 8166 { 8167 int rc = 0; 8168 8169 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init); 8170 if (rc) { 8171 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc); 8172 dev_close(bp->dev); 8173 } 8174 return rc; 8175 } 8176 8177 /* rtnl_lock held, open the NIC half way by allocating all resources, but 8178 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline 8179 * self tests. 8180 */ 8181 int bnxt_half_open_nic(struct bnxt *bp) 8182 { 8183 int rc = 0; 8184 8185 rc = bnxt_alloc_mem(bp, false); 8186 if (rc) { 8187 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); 8188 goto half_open_err; 8189 } 8190 rc = bnxt_init_nic(bp, false); 8191 if (rc) { 8192 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); 8193 goto half_open_err; 8194 } 8195 return 0; 8196 8197 half_open_err: 8198 bnxt_free_skbs(bp); 8199 bnxt_free_mem(bp, false); 8200 dev_close(bp->dev); 8201 return rc; 8202 } 8203 8204 /* rtnl_lock held, this call can only be made after a previous successful 8205 * call to bnxt_half_open_nic(). 8206 */ 8207 void bnxt_half_close_nic(struct bnxt *bp) 8208 { 8209 bnxt_hwrm_resource_free(bp, false, false); 8210 bnxt_free_skbs(bp); 8211 bnxt_free_mem(bp, false); 8212 } 8213 8214 static int bnxt_open(struct net_device *dev) 8215 { 8216 struct bnxt *bp = netdev_priv(dev); 8217 int rc; 8218 8219 bnxt_hwrm_if_change(bp, true); 8220 rc = __bnxt_open_nic(bp, true, true); 8221 if (rc) 8222 bnxt_hwrm_if_change(bp, false); 8223 8224 bnxt_hwmon_open(bp); 8225 8226 return rc; 8227 } 8228 8229 static bool bnxt_drv_busy(struct bnxt *bp) 8230 { 8231 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) || 8232 test_bit(BNXT_STATE_READ_STATS, &bp->state)); 8233 } 8234 8235 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, 8236 bool link_re_init) 8237 { 8238 /* Close the VF-reps before closing PF */ 8239 if (BNXT_PF(bp)) 8240 bnxt_vf_reps_close(bp); 8241 8242 /* Change device state to avoid TX queue wake up's */ 8243 bnxt_tx_disable(bp); 8244 8245 clear_bit(BNXT_STATE_OPEN, &bp->state); 8246 smp_mb__after_atomic(); 8247 while (bnxt_drv_busy(bp)) 8248 msleep(20); 8249 8250 /* Flush rings and and disable interrupts */ 8251 bnxt_shutdown_nic(bp, irq_re_init); 8252 8253 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */ 8254 8255 bnxt_debug_dev_exit(bp); 8256 bnxt_disable_napi(bp); 8257 del_timer_sync(&bp->timer); 8258 bnxt_free_skbs(bp); 8259 8260 if (irq_re_init) { 8261 bnxt_free_irq(bp); 8262 bnxt_del_napi(bp); 8263 } 8264 bnxt_free_mem(bp, irq_re_init); 8265 } 8266 8267 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 8268 { 8269 int rc = 0; 8270 8271 #ifdef CONFIG_BNXT_SRIOV 8272 if (bp->sriov_cfg) { 8273 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait, 8274 !bp->sriov_cfg, 8275 BNXT_SRIOV_CFG_WAIT_TMO); 8276 if (rc) 8277 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n"); 8278 } 8279 #endif 8280 __bnxt_close_nic(bp, irq_re_init, link_re_init); 8281 return rc; 8282 } 8283 8284 static int bnxt_close(struct net_device *dev) 8285 { 8286 struct bnxt *bp = netdev_priv(dev); 8287 8288 bnxt_hwmon_close(bp); 8289 bnxt_close_nic(bp, true, true); 8290 bnxt_hwrm_shutdown_link(bp); 8291 bnxt_hwrm_if_change(bp, false); 8292 return 0; 8293 } 8294 8295 /* rtnl_lock held */ 8296 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 8297 { 8298 switch (cmd) { 8299 case SIOCGMIIPHY: 8300 /* fallthru */ 8301 case SIOCGMIIREG: { 8302 if (!netif_running(dev)) 8303 return -EAGAIN; 8304 8305 return 0; 8306 } 8307 8308 case SIOCSMIIREG: 8309 if (!netif_running(dev)) 8310 return -EAGAIN; 8311 8312 return 0; 8313 8314 default: 8315 /* do nothing */ 8316 break; 8317 } 8318 return -EOPNOTSUPP; 8319 } 8320 8321 static void 8322 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 8323 { 8324 u32 i; 8325 struct bnxt *bp = netdev_priv(dev); 8326 8327 set_bit(BNXT_STATE_READ_STATS, &bp->state); 8328 /* Make sure bnxt_close_nic() sees that we are reading stats before 8329 * we check the BNXT_STATE_OPEN flag. 8330 */ 8331 smp_mb__after_atomic(); 8332 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 8333 clear_bit(BNXT_STATE_READ_STATS, &bp->state); 8334 return; 8335 } 8336 8337 /* TODO check if we need to synchronize with bnxt_close path */ 8338 for (i = 0; i < bp->cp_nr_rings; i++) { 8339 struct bnxt_napi *bnapi = bp->bnapi[i]; 8340 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 8341 struct ctx_hw_stats *hw_stats = cpr->hw_stats; 8342 8343 stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts); 8344 stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts); 8345 stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts); 8346 8347 stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts); 8348 stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts); 8349 stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts); 8350 8351 stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes); 8352 stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes); 8353 stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes); 8354 8355 stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes); 8356 stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes); 8357 stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes); 8358 8359 stats->rx_missed_errors += 8360 le64_to_cpu(hw_stats->rx_discard_pkts); 8361 8362 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts); 8363 8364 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts); 8365 } 8366 8367 if (bp->flags & BNXT_FLAG_PORT_STATS) { 8368 struct rx_port_stats *rx = bp->hw_rx_port_stats; 8369 struct tx_port_stats *tx = bp->hw_tx_port_stats; 8370 8371 stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames); 8372 stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames); 8373 stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) + 8374 le64_to_cpu(rx->rx_ovrsz_frames) + 8375 le64_to_cpu(rx->rx_runt_frames); 8376 stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) + 8377 le64_to_cpu(rx->rx_jbr_frames); 8378 stats->collisions = le64_to_cpu(tx->tx_total_collisions); 8379 stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns); 8380 stats->tx_errors = le64_to_cpu(tx->tx_err); 8381 } 8382 clear_bit(BNXT_STATE_READ_STATS, &bp->state); 8383 } 8384 8385 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) 8386 { 8387 struct net_device *dev = bp->dev; 8388 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 8389 struct netdev_hw_addr *ha; 8390 u8 *haddr; 8391 int mc_count = 0; 8392 bool update = false; 8393 int off = 0; 8394 8395 netdev_for_each_mc_addr(ha, dev) { 8396 if (mc_count >= BNXT_MAX_MC_ADDRS) { 8397 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 8398 vnic->mc_list_count = 0; 8399 return false; 8400 } 8401 haddr = ha->addr; 8402 if (!ether_addr_equal(haddr, vnic->mc_list + off)) { 8403 memcpy(vnic->mc_list + off, haddr, ETH_ALEN); 8404 update = true; 8405 } 8406 off += ETH_ALEN; 8407 mc_count++; 8408 } 8409 if (mc_count) 8410 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; 8411 8412 if (mc_count != vnic->mc_list_count) { 8413 vnic->mc_list_count = mc_count; 8414 update = true; 8415 } 8416 return update; 8417 } 8418 8419 static bool bnxt_uc_list_updated(struct bnxt *bp) 8420 { 8421 struct net_device *dev = bp->dev; 8422 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 8423 struct netdev_hw_addr *ha; 8424 int off = 0; 8425 8426 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1)) 8427 return true; 8428 8429 netdev_for_each_uc_addr(ha, dev) { 8430 if (!ether_addr_equal(ha->addr, vnic->uc_list + off)) 8431 return true; 8432 8433 off += ETH_ALEN; 8434 } 8435 return false; 8436 } 8437 8438 static void bnxt_set_rx_mode(struct net_device *dev) 8439 { 8440 struct bnxt *bp = netdev_priv(dev); 8441 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 8442 u32 mask = vnic->rx_mask; 8443 bool mc_update = false; 8444 bool uc_update; 8445 8446 if (!netif_running(dev)) 8447 return; 8448 8449 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS | 8450 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST | 8451 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST | 8452 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST); 8453 8454 if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp)) 8455 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 8456 8457 uc_update = bnxt_uc_list_updated(bp); 8458 8459 if (dev->flags & IFF_BROADCAST) 8460 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 8461 if (dev->flags & IFF_ALLMULTI) { 8462 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 8463 vnic->mc_list_count = 0; 8464 } else { 8465 mc_update = bnxt_mc_list_updated(bp, &mask); 8466 } 8467 8468 if (mask != vnic->rx_mask || uc_update || mc_update) { 8469 vnic->rx_mask = mask; 8470 8471 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event); 8472 bnxt_queue_sp_work(bp); 8473 } 8474 } 8475 8476 static int bnxt_cfg_rx_mode(struct bnxt *bp) 8477 { 8478 struct net_device *dev = bp->dev; 8479 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 8480 struct netdev_hw_addr *ha; 8481 int i, off = 0, rc; 8482 bool uc_update; 8483 8484 netif_addr_lock_bh(dev); 8485 uc_update = bnxt_uc_list_updated(bp); 8486 netif_addr_unlock_bh(dev); 8487 8488 if (!uc_update) 8489 goto skip_uc; 8490 8491 mutex_lock(&bp->hwrm_cmd_lock); 8492 for (i = 1; i < vnic->uc_filter_count; i++) { 8493 struct hwrm_cfa_l2_filter_free_input req = {0}; 8494 8495 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1, 8496 -1); 8497 8498 req.l2_filter_id = vnic->fw_l2_filter_id[i]; 8499 8500 rc = _hwrm_send_message(bp, &req, sizeof(req), 8501 HWRM_CMD_TIMEOUT); 8502 } 8503 mutex_unlock(&bp->hwrm_cmd_lock); 8504 8505 vnic->uc_filter_count = 1; 8506 8507 netif_addr_lock_bh(dev); 8508 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) { 8509 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 8510 } else { 8511 netdev_for_each_uc_addr(ha, dev) { 8512 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN); 8513 off += ETH_ALEN; 8514 vnic->uc_filter_count++; 8515 } 8516 } 8517 netif_addr_unlock_bh(dev); 8518 8519 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) { 8520 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off); 8521 if (rc) { 8522 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", 8523 rc); 8524 vnic->uc_filter_count = i; 8525 return rc; 8526 } 8527 } 8528 8529 skip_uc: 8530 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); 8531 if (rc) 8532 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n", 8533 rc); 8534 8535 return rc; 8536 } 8537 8538 static bool bnxt_can_reserve_rings(struct bnxt *bp) 8539 { 8540 #ifdef CONFIG_BNXT_SRIOV 8541 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) { 8542 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 8543 8544 /* No minimum rings were provisioned by the PF. Don't 8545 * reserve rings by default when device is down. 8546 */ 8547 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings) 8548 return true; 8549 8550 if (!netif_running(bp->dev)) 8551 return false; 8552 } 8553 #endif 8554 return true; 8555 } 8556 8557 /* If the chip and firmware supports RFS */ 8558 static bool bnxt_rfs_supported(struct bnxt *bp) 8559 { 8560 if (bp->flags & BNXT_FLAG_CHIP_P5) 8561 return false; 8562 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) 8563 return true; 8564 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) 8565 return true; 8566 return false; 8567 } 8568 8569 /* If runtime conditions support RFS */ 8570 static bool bnxt_rfs_capable(struct bnxt *bp) 8571 { 8572 #ifdef CONFIG_RFS_ACCEL 8573 int vnics, max_vnics, max_rss_ctxs; 8574 8575 if (bp->flags & BNXT_FLAG_CHIP_P5) 8576 return false; 8577 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp)) 8578 return false; 8579 8580 vnics = 1 + bp->rx_nr_rings; 8581 max_vnics = bnxt_get_max_func_vnics(bp); 8582 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp); 8583 8584 /* RSS contexts not a limiting factor */ 8585 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) 8586 max_rss_ctxs = max_vnics; 8587 if (vnics > max_vnics || vnics > max_rss_ctxs) { 8588 if (bp->rx_nr_rings > 1) 8589 netdev_warn(bp->dev, 8590 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n", 8591 min(max_rss_ctxs - 1, max_vnics - 1)); 8592 return false; 8593 } 8594 8595 if (!BNXT_NEW_RM(bp)) 8596 return true; 8597 8598 if (vnics == bp->hw_resc.resv_vnics) 8599 return true; 8600 8601 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, vnics); 8602 if (vnics <= bp->hw_resc.resv_vnics) 8603 return true; 8604 8605 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n"); 8606 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 1); 8607 return false; 8608 #else 8609 return false; 8610 #endif 8611 } 8612 8613 static netdev_features_t bnxt_fix_features(struct net_device *dev, 8614 netdev_features_t features) 8615 { 8616 struct bnxt *bp = netdev_priv(dev); 8617 8618 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp)) 8619 features &= ~NETIF_F_NTUPLE; 8620 8621 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 8622 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 8623 8624 if (!(features & NETIF_F_GRO)) 8625 features &= ~NETIF_F_GRO_HW; 8626 8627 if (features & NETIF_F_GRO_HW) 8628 features &= ~NETIF_F_LRO; 8629 8630 /* Both CTAG and STAG VLAN accelaration on the RX side have to be 8631 * turned on or off together. 8632 */ 8633 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) != 8634 (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) { 8635 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) 8636 features &= ~(NETIF_F_HW_VLAN_CTAG_RX | 8637 NETIF_F_HW_VLAN_STAG_RX); 8638 else 8639 features |= NETIF_F_HW_VLAN_CTAG_RX | 8640 NETIF_F_HW_VLAN_STAG_RX; 8641 } 8642 #ifdef CONFIG_BNXT_SRIOV 8643 if (BNXT_VF(bp)) { 8644 if (bp->vf.vlan) { 8645 features &= ~(NETIF_F_HW_VLAN_CTAG_RX | 8646 NETIF_F_HW_VLAN_STAG_RX); 8647 } 8648 } 8649 #endif 8650 return features; 8651 } 8652 8653 static int bnxt_set_features(struct net_device *dev, netdev_features_t features) 8654 { 8655 struct bnxt *bp = netdev_priv(dev); 8656 u32 flags = bp->flags; 8657 u32 changes; 8658 int rc = 0; 8659 bool re_init = false; 8660 bool update_tpa = false; 8661 8662 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS; 8663 if (features & NETIF_F_GRO_HW) 8664 flags |= BNXT_FLAG_GRO; 8665 else if (features & NETIF_F_LRO) 8666 flags |= BNXT_FLAG_LRO; 8667 8668 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 8669 flags &= ~BNXT_FLAG_TPA; 8670 8671 if (features & NETIF_F_HW_VLAN_CTAG_RX) 8672 flags |= BNXT_FLAG_STRIP_VLAN; 8673 8674 if (features & NETIF_F_NTUPLE) 8675 flags |= BNXT_FLAG_RFS; 8676 8677 changes = flags ^ bp->flags; 8678 if (changes & BNXT_FLAG_TPA) { 8679 update_tpa = true; 8680 if ((bp->flags & BNXT_FLAG_TPA) == 0 || 8681 (flags & BNXT_FLAG_TPA) == 0) 8682 re_init = true; 8683 } 8684 8685 if (changes & ~BNXT_FLAG_TPA) 8686 re_init = true; 8687 8688 if (flags != bp->flags) { 8689 u32 old_flags = bp->flags; 8690 8691 bp->flags = flags; 8692 8693 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 8694 if (update_tpa) 8695 bnxt_set_ring_params(bp); 8696 return rc; 8697 } 8698 8699 if (re_init) { 8700 bnxt_close_nic(bp, false, false); 8701 if (update_tpa) 8702 bnxt_set_ring_params(bp); 8703 8704 return bnxt_open_nic(bp, false, false); 8705 } 8706 if (update_tpa) { 8707 rc = bnxt_set_tpa(bp, 8708 (flags & BNXT_FLAG_TPA) ? 8709 true : false); 8710 if (rc) 8711 bp->flags = old_flags; 8712 } 8713 } 8714 return rc; 8715 } 8716 8717 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi) 8718 { 8719 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; 8720 int i = bnapi->index; 8721 8722 if (!txr) 8723 return; 8724 8725 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n", 8726 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod, 8727 txr->tx_cons); 8728 } 8729 8730 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi) 8731 { 8732 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 8733 int i = bnapi->index; 8734 8735 if (!rxr) 8736 return; 8737 8738 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n", 8739 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod, 8740 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod, 8741 rxr->rx_sw_agg_prod); 8742 } 8743 8744 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi) 8745 { 8746 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 8747 int i = bnapi->index; 8748 8749 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n", 8750 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons); 8751 } 8752 8753 static void bnxt_dbg_dump_states(struct bnxt *bp) 8754 { 8755 int i; 8756 struct bnxt_napi *bnapi; 8757 8758 for (i = 0; i < bp->cp_nr_rings; i++) { 8759 bnapi = bp->bnapi[i]; 8760 if (netif_msg_drv(bp)) { 8761 bnxt_dump_tx_sw_state(bnapi); 8762 bnxt_dump_rx_sw_state(bnapi); 8763 bnxt_dump_cp_sw_state(bnapi); 8764 } 8765 } 8766 } 8767 8768 static void bnxt_reset_task(struct bnxt *bp, bool silent) 8769 { 8770 if (!silent) 8771 bnxt_dbg_dump_states(bp); 8772 if (netif_running(bp->dev)) { 8773 int rc; 8774 8775 if (!silent) 8776 bnxt_ulp_stop(bp); 8777 bnxt_close_nic(bp, false, false); 8778 rc = bnxt_open_nic(bp, false, false); 8779 if (!silent && !rc) 8780 bnxt_ulp_start(bp); 8781 } 8782 } 8783 8784 static void bnxt_tx_timeout(struct net_device *dev) 8785 { 8786 struct bnxt *bp = netdev_priv(dev); 8787 8788 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n"); 8789 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 8790 bnxt_queue_sp_work(bp); 8791 } 8792 8793 static void bnxt_timer(struct timer_list *t) 8794 { 8795 struct bnxt *bp = from_timer(bp, t, timer); 8796 struct net_device *dev = bp->dev; 8797 8798 if (!netif_running(dev)) 8799 return; 8800 8801 if (atomic_read(&bp->intr_sem) != 0) 8802 goto bnxt_restart_timer; 8803 8804 if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) && 8805 bp->stats_coal_ticks) { 8806 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event); 8807 bnxt_queue_sp_work(bp); 8808 } 8809 8810 if (bnxt_tc_flower_enabled(bp)) { 8811 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event); 8812 bnxt_queue_sp_work(bp); 8813 } 8814 8815 if (bp->link_info.phy_retry) { 8816 if (time_after(jiffies, bp->link_info.phy_retry_expires)) { 8817 bp->link_info.phy_retry = 0; 8818 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n"); 8819 } else { 8820 set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event); 8821 bnxt_queue_sp_work(bp); 8822 } 8823 } 8824 bnxt_restart_timer: 8825 mod_timer(&bp->timer, jiffies + bp->current_interval); 8826 } 8827 8828 static void bnxt_rtnl_lock_sp(struct bnxt *bp) 8829 { 8830 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK 8831 * set. If the device is being closed, bnxt_close() may be holding 8832 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we 8833 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl(). 8834 */ 8835 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 8836 rtnl_lock(); 8837 } 8838 8839 static void bnxt_rtnl_unlock_sp(struct bnxt *bp) 8840 { 8841 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 8842 rtnl_unlock(); 8843 } 8844 8845 /* Only called from bnxt_sp_task() */ 8846 static void bnxt_reset(struct bnxt *bp, bool silent) 8847 { 8848 bnxt_rtnl_lock_sp(bp); 8849 if (test_bit(BNXT_STATE_OPEN, &bp->state)) 8850 bnxt_reset_task(bp, silent); 8851 bnxt_rtnl_unlock_sp(bp); 8852 } 8853 8854 static void bnxt_cfg_ntp_filters(struct bnxt *); 8855 8856 static void bnxt_sp_task(struct work_struct *work) 8857 { 8858 struct bnxt *bp = container_of(work, struct bnxt, sp_task); 8859 8860 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 8861 smp_mb__after_atomic(); 8862 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 8863 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 8864 return; 8865 } 8866 8867 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) 8868 bnxt_cfg_rx_mode(bp); 8869 8870 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event)) 8871 bnxt_cfg_ntp_filters(bp); 8872 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) 8873 bnxt_hwrm_exec_fwd_req(bp); 8874 if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) { 8875 bnxt_hwrm_tunnel_dst_port_alloc( 8876 bp, bp->vxlan_port, 8877 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 8878 } 8879 if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) { 8880 bnxt_hwrm_tunnel_dst_port_free( 8881 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 8882 } 8883 if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) { 8884 bnxt_hwrm_tunnel_dst_port_alloc( 8885 bp, bp->nge_port, 8886 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 8887 } 8888 if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) { 8889 bnxt_hwrm_tunnel_dst_port_free( 8890 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 8891 } 8892 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) { 8893 bnxt_hwrm_port_qstats(bp); 8894 bnxt_hwrm_port_qstats_ext(bp); 8895 } 8896 8897 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { 8898 int rc; 8899 8900 mutex_lock(&bp->link_lock); 8901 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, 8902 &bp->sp_event)) 8903 bnxt_hwrm_phy_qcaps(bp); 8904 8905 rc = bnxt_update_link(bp, true); 8906 mutex_unlock(&bp->link_lock); 8907 if (rc) 8908 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", 8909 rc); 8910 } 8911 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) { 8912 int rc; 8913 8914 mutex_lock(&bp->link_lock); 8915 rc = bnxt_update_phy_setting(bp); 8916 mutex_unlock(&bp->link_lock); 8917 if (rc) { 8918 netdev_warn(bp->dev, "update phy settings retry failed\n"); 8919 } else { 8920 bp->link_info.phy_retry = false; 8921 netdev_info(bp->dev, "update phy settings retry succeeded\n"); 8922 } 8923 } 8924 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) { 8925 mutex_lock(&bp->link_lock); 8926 bnxt_get_port_module_status(bp); 8927 mutex_unlock(&bp->link_lock); 8928 } 8929 8930 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event)) 8931 bnxt_tc_flow_stats_work(bp); 8932 8933 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They 8934 * must be the last functions to be called before exiting. 8935 */ 8936 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) 8937 bnxt_reset(bp, false); 8938 8939 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event)) 8940 bnxt_reset(bp, true); 8941 8942 smp_mb__before_atomic(); 8943 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 8944 } 8945 8946 /* Under rtnl_lock */ 8947 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, 8948 int tx_xdp) 8949 { 8950 int max_rx, max_tx, tx_sets = 1; 8951 int tx_rings_needed; 8952 int rx_rings = rx; 8953 int cp, vnics, rc; 8954 8955 if (tcs) 8956 tx_sets = tcs; 8957 8958 rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh); 8959 if (rc) 8960 return rc; 8961 8962 if (max_rx < rx) 8963 return -ENOMEM; 8964 8965 tx_rings_needed = tx * tx_sets + tx_xdp; 8966 if (max_tx < tx_rings_needed) 8967 return -ENOMEM; 8968 8969 vnics = 1; 8970 if (bp->flags & BNXT_FLAG_RFS) 8971 vnics += rx_rings; 8972 8973 if (bp->flags & BNXT_FLAG_AGG_RINGS) 8974 rx_rings <<= 1; 8975 cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx; 8976 if (BNXT_NEW_RM(bp)) 8977 cp += bnxt_get_ulp_msix_num(bp); 8978 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp, 8979 vnics); 8980 } 8981 8982 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) 8983 { 8984 if (bp->bar2) { 8985 pci_iounmap(pdev, bp->bar2); 8986 bp->bar2 = NULL; 8987 } 8988 8989 if (bp->bar1) { 8990 pci_iounmap(pdev, bp->bar1); 8991 bp->bar1 = NULL; 8992 } 8993 8994 if (bp->bar0) { 8995 pci_iounmap(pdev, bp->bar0); 8996 bp->bar0 = NULL; 8997 } 8998 } 8999 9000 static void bnxt_cleanup_pci(struct bnxt *bp) 9001 { 9002 bnxt_unmap_bars(bp, bp->pdev); 9003 pci_release_regions(bp->pdev); 9004 pci_disable_device(bp->pdev); 9005 } 9006 9007 static void bnxt_init_dflt_coal(struct bnxt *bp) 9008 { 9009 struct bnxt_coal *coal; 9010 9011 /* Tick values in micro seconds. 9012 * 1 coal_buf x bufs_per_record = 1 completion record. 9013 */ 9014 coal = &bp->rx_coal; 9015 coal->coal_ticks = 14; 9016 coal->coal_bufs = 30; 9017 coal->coal_ticks_irq = 1; 9018 coal->coal_bufs_irq = 2; 9019 coal->idle_thresh = 50; 9020 coal->bufs_per_record = 2; 9021 coal->budget = 64; /* NAPI budget */ 9022 9023 coal = &bp->tx_coal; 9024 coal->coal_ticks = 28; 9025 coal->coal_bufs = 30; 9026 coal->coal_ticks_irq = 2; 9027 coal->coal_bufs_irq = 2; 9028 coal->bufs_per_record = 1; 9029 9030 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; 9031 } 9032 9033 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) 9034 { 9035 int rc; 9036 struct bnxt *bp = netdev_priv(dev); 9037 9038 SET_NETDEV_DEV(dev, &pdev->dev); 9039 9040 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 9041 rc = pci_enable_device(pdev); 9042 if (rc) { 9043 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 9044 goto init_err; 9045 } 9046 9047 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 9048 dev_err(&pdev->dev, 9049 "Cannot find PCI device base address, aborting\n"); 9050 rc = -ENODEV; 9051 goto init_err_disable; 9052 } 9053 9054 rc = pci_request_regions(pdev, DRV_MODULE_NAME); 9055 if (rc) { 9056 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); 9057 goto init_err_disable; 9058 } 9059 9060 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 && 9061 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) { 9062 dev_err(&pdev->dev, "System does not support DMA, aborting\n"); 9063 goto init_err_disable; 9064 } 9065 9066 pci_set_master(pdev); 9067 9068 bp->dev = dev; 9069 bp->pdev = pdev; 9070 9071 bp->bar0 = pci_ioremap_bar(pdev, 0); 9072 if (!bp->bar0) { 9073 dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); 9074 rc = -ENOMEM; 9075 goto init_err_release; 9076 } 9077 9078 bp->bar1 = pci_ioremap_bar(pdev, 2); 9079 if (!bp->bar1) { 9080 dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n"); 9081 rc = -ENOMEM; 9082 goto init_err_release; 9083 } 9084 9085 bp->bar2 = pci_ioremap_bar(pdev, 4); 9086 if (!bp->bar2) { 9087 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n"); 9088 rc = -ENOMEM; 9089 goto init_err_release; 9090 } 9091 9092 pci_enable_pcie_error_reporting(pdev); 9093 9094 INIT_WORK(&bp->sp_task, bnxt_sp_task); 9095 9096 spin_lock_init(&bp->ntp_fltr_lock); 9097 #if BITS_PER_LONG == 32 9098 spin_lock_init(&bp->db_lock); 9099 #endif 9100 9101 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE; 9102 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE; 9103 9104 bnxt_init_dflt_coal(bp); 9105 9106 timer_setup(&bp->timer, bnxt_timer, 0); 9107 bp->current_interval = BNXT_TIMER_INTERVAL; 9108 9109 clear_bit(BNXT_STATE_OPEN, &bp->state); 9110 return 0; 9111 9112 init_err_release: 9113 bnxt_unmap_bars(bp, pdev); 9114 pci_release_regions(pdev); 9115 9116 init_err_disable: 9117 pci_disable_device(pdev); 9118 9119 init_err: 9120 return rc; 9121 } 9122 9123 /* rtnl_lock held */ 9124 static int bnxt_change_mac_addr(struct net_device *dev, void *p) 9125 { 9126 struct sockaddr *addr = p; 9127 struct bnxt *bp = netdev_priv(dev); 9128 int rc = 0; 9129 9130 if (!is_valid_ether_addr(addr->sa_data)) 9131 return -EADDRNOTAVAIL; 9132 9133 if (ether_addr_equal(addr->sa_data, dev->dev_addr)) 9134 return 0; 9135 9136 rc = bnxt_approve_mac(bp, addr->sa_data, true); 9137 if (rc) 9138 return rc; 9139 9140 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 9141 if (netif_running(dev)) { 9142 bnxt_close_nic(bp, false, false); 9143 rc = bnxt_open_nic(bp, false, false); 9144 } 9145 9146 return rc; 9147 } 9148 9149 /* rtnl_lock held */ 9150 static int bnxt_change_mtu(struct net_device *dev, int new_mtu) 9151 { 9152 struct bnxt *bp = netdev_priv(dev); 9153 9154 if (netif_running(dev)) 9155 bnxt_close_nic(bp, false, false); 9156 9157 dev->mtu = new_mtu; 9158 bnxt_set_ring_params(bp); 9159 9160 if (netif_running(dev)) 9161 return bnxt_open_nic(bp, false, false); 9162 9163 return 0; 9164 } 9165 9166 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) 9167 { 9168 struct bnxt *bp = netdev_priv(dev); 9169 bool sh = false; 9170 int rc; 9171 9172 if (tc > bp->max_tc) { 9173 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n", 9174 tc, bp->max_tc); 9175 return -EINVAL; 9176 } 9177 9178 if (netdev_get_num_tc(dev) == tc) 9179 return 0; 9180 9181 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 9182 sh = true; 9183 9184 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, 9185 sh, tc, bp->tx_nr_rings_xdp); 9186 if (rc) 9187 return rc; 9188 9189 /* Needs to close the device and do hw resource re-allocations */ 9190 if (netif_running(bp->dev)) 9191 bnxt_close_nic(bp, true, false); 9192 9193 if (tc) { 9194 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc; 9195 netdev_set_num_tc(dev, tc); 9196 } else { 9197 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 9198 netdev_reset_tc(dev); 9199 } 9200 bp->tx_nr_rings += bp->tx_nr_rings_xdp; 9201 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : 9202 bp->tx_nr_rings + bp->rx_nr_rings; 9203 bp->num_stat_ctxs = bp->cp_nr_rings; 9204 9205 if (netif_running(bp->dev)) 9206 return bnxt_open_nic(bp, true, false); 9207 9208 return 0; 9209 } 9210 9211 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 9212 void *cb_priv) 9213 { 9214 struct bnxt *bp = cb_priv; 9215 9216 if (!bnxt_tc_flower_enabled(bp) || 9217 !tc_cls_can_offload_and_chain0(bp->dev, type_data)) 9218 return -EOPNOTSUPP; 9219 9220 switch (type) { 9221 case TC_SETUP_CLSFLOWER: 9222 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data); 9223 default: 9224 return -EOPNOTSUPP; 9225 } 9226 } 9227 9228 static int bnxt_setup_tc_block(struct net_device *dev, 9229 struct tc_block_offload *f) 9230 { 9231 struct bnxt *bp = netdev_priv(dev); 9232 9233 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 9234 return -EOPNOTSUPP; 9235 9236 switch (f->command) { 9237 case TC_BLOCK_BIND: 9238 return tcf_block_cb_register(f->block, bnxt_setup_tc_block_cb, 9239 bp, bp, f->extack); 9240 case TC_BLOCK_UNBIND: 9241 tcf_block_cb_unregister(f->block, bnxt_setup_tc_block_cb, bp); 9242 return 0; 9243 default: 9244 return -EOPNOTSUPP; 9245 } 9246 } 9247 9248 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, 9249 void *type_data) 9250 { 9251 switch (type) { 9252 case TC_SETUP_BLOCK: 9253 return bnxt_setup_tc_block(dev, type_data); 9254 case TC_SETUP_QDISC_MQPRIO: { 9255 struct tc_mqprio_qopt *mqprio = type_data; 9256 9257 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; 9258 9259 return bnxt_setup_mq_tc(dev, mqprio->num_tc); 9260 } 9261 default: 9262 return -EOPNOTSUPP; 9263 } 9264 } 9265 9266 #ifdef CONFIG_RFS_ACCEL 9267 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1, 9268 struct bnxt_ntuple_filter *f2) 9269 { 9270 struct flow_keys *keys1 = &f1->fkeys; 9271 struct flow_keys *keys2 = &f2->fkeys; 9272 9273 if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src && 9274 keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst && 9275 keys1->ports.ports == keys2->ports.ports && 9276 keys1->basic.ip_proto == keys2->basic.ip_proto && 9277 keys1->basic.n_proto == keys2->basic.n_proto && 9278 keys1->control.flags == keys2->control.flags && 9279 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) && 9280 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr)) 9281 return true; 9282 9283 return false; 9284 } 9285 9286 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, 9287 u16 rxq_index, u32 flow_id) 9288 { 9289 struct bnxt *bp = netdev_priv(dev); 9290 struct bnxt_ntuple_filter *fltr, *new_fltr; 9291 struct flow_keys *fkeys; 9292 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb); 9293 int rc = 0, idx, bit_id, l2_idx = 0; 9294 struct hlist_head *head; 9295 9296 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) { 9297 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 9298 int off = 0, j; 9299 9300 netif_addr_lock_bh(dev); 9301 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) { 9302 if (ether_addr_equal(eth->h_dest, 9303 vnic->uc_list + off)) { 9304 l2_idx = j + 1; 9305 break; 9306 } 9307 } 9308 netif_addr_unlock_bh(dev); 9309 if (!l2_idx) 9310 return -EINVAL; 9311 } 9312 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC); 9313 if (!new_fltr) 9314 return -ENOMEM; 9315 9316 fkeys = &new_fltr->fkeys; 9317 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) { 9318 rc = -EPROTONOSUPPORT; 9319 goto err_free; 9320 } 9321 9322 if ((fkeys->basic.n_proto != htons(ETH_P_IP) && 9323 fkeys->basic.n_proto != htons(ETH_P_IPV6)) || 9324 ((fkeys->basic.ip_proto != IPPROTO_TCP) && 9325 (fkeys->basic.ip_proto != IPPROTO_UDP))) { 9326 rc = -EPROTONOSUPPORT; 9327 goto err_free; 9328 } 9329 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) && 9330 bp->hwrm_spec_code < 0x10601) { 9331 rc = -EPROTONOSUPPORT; 9332 goto err_free; 9333 } 9334 if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) && 9335 bp->hwrm_spec_code < 0x10601) { 9336 rc = -EPROTONOSUPPORT; 9337 goto err_free; 9338 } 9339 9340 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN); 9341 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN); 9342 9343 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK; 9344 head = &bp->ntp_fltr_hash_tbl[idx]; 9345 rcu_read_lock(); 9346 hlist_for_each_entry_rcu(fltr, head, hash) { 9347 if (bnxt_fltr_match(fltr, new_fltr)) { 9348 rcu_read_unlock(); 9349 rc = 0; 9350 goto err_free; 9351 } 9352 } 9353 rcu_read_unlock(); 9354 9355 spin_lock_bh(&bp->ntp_fltr_lock); 9356 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, 9357 BNXT_NTP_FLTR_MAX_FLTR, 0); 9358 if (bit_id < 0) { 9359 spin_unlock_bh(&bp->ntp_fltr_lock); 9360 rc = -ENOMEM; 9361 goto err_free; 9362 } 9363 9364 new_fltr->sw_id = (u16)bit_id; 9365 new_fltr->flow_id = flow_id; 9366 new_fltr->l2_fltr_idx = l2_idx; 9367 new_fltr->rxq = rxq_index; 9368 hlist_add_head_rcu(&new_fltr->hash, head); 9369 bp->ntp_fltr_count++; 9370 spin_unlock_bh(&bp->ntp_fltr_lock); 9371 9372 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event); 9373 bnxt_queue_sp_work(bp); 9374 9375 return new_fltr->sw_id; 9376 9377 err_free: 9378 kfree(new_fltr); 9379 return rc; 9380 } 9381 9382 static void bnxt_cfg_ntp_filters(struct bnxt *bp) 9383 { 9384 int i; 9385 9386 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 9387 struct hlist_head *head; 9388 struct hlist_node *tmp; 9389 struct bnxt_ntuple_filter *fltr; 9390 int rc; 9391 9392 head = &bp->ntp_fltr_hash_tbl[i]; 9393 hlist_for_each_entry_safe(fltr, tmp, head, hash) { 9394 bool del = false; 9395 9396 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) { 9397 if (rps_may_expire_flow(bp->dev, fltr->rxq, 9398 fltr->flow_id, 9399 fltr->sw_id)) { 9400 bnxt_hwrm_cfa_ntuple_filter_free(bp, 9401 fltr); 9402 del = true; 9403 } 9404 } else { 9405 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, 9406 fltr); 9407 if (rc) 9408 del = true; 9409 else 9410 set_bit(BNXT_FLTR_VALID, &fltr->state); 9411 } 9412 9413 if (del) { 9414 spin_lock_bh(&bp->ntp_fltr_lock); 9415 hlist_del_rcu(&fltr->hash); 9416 bp->ntp_fltr_count--; 9417 spin_unlock_bh(&bp->ntp_fltr_lock); 9418 synchronize_rcu(); 9419 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap); 9420 kfree(fltr); 9421 } 9422 } 9423 } 9424 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event)) 9425 netdev_info(bp->dev, "Receive PF driver unload event!"); 9426 } 9427 9428 #else 9429 9430 static void bnxt_cfg_ntp_filters(struct bnxt *bp) 9431 { 9432 } 9433 9434 #endif /* CONFIG_RFS_ACCEL */ 9435 9436 static void bnxt_udp_tunnel_add(struct net_device *dev, 9437 struct udp_tunnel_info *ti) 9438 { 9439 struct bnxt *bp = netdev_priv(dev); 9440 9441 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET) 9442 return; 9443 9444 if (!netif_running(dev)) 9445 return; 9446 9447 switch (ti->type) { 9448 case UDP_TUNNEL_TYPE_VXLAN: 9449 if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port) 9450 return; 9451 9452 bp->vxlan_port_cnt++; 9453 if (bp->vxlan_port_cnt == 1) { 9454 bp->vxlan_port = ti->port; 9455 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event); 9456 bnxt_queue_sp_work(bp); 9457 } 9458 break; 9459 case UDP_TUNNEL_TYPE_GENEVE: 9460 if (bp->nge_port_cnt && bp->nge_port != ti->port) 9461 return; 9462 9463 bp->nge_port_cnt++; 9464 if (bp->nge_port_cnt == 1) { 9465 bp->nge_port = ti->port; 9466 set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event); 9467 } 9468 break; 9469 default: 9470 return; 9471 } 9472 9473 bnxt_queue_sp_work(bp); 9474 } 9475 9476 static void bnxt_udp_tunnel_del(struct net_device *dev, 9477 struct udp_tunnel_info *ti) 9478 { 9479 struct bnxt *bp = netdev_priv(dev); 9480 9481 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET) 9482 return; 9483 9484 if (!netif_running(dev)) 9485 return; 9486 9487 switch (ti->type) { 9488 case UDP_TUNNEL_TYPE_VXLAN: 9489 if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port) 9490 return; 9491 bp->vxlan_port_cnt--; 9492 9493 if (bp->vxlan_port_cnt != 0) 9494 return; 9495 9496 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event); 9497 break; 9498 case UDP_TUNNEL_TYPE_GENEVE: 9499 if (!bp->nge_port_cnt || bp->nge_port != ti->port) 9500 return; 9501 bp->nge_port_cnt--; 9502 9503 if (bp->nge_port_cnt != 0) 9504 return; 9505 9506 set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event); 9507 break; 9508 default: 9509 return; 9510 } 9511 9512 bnxt_queue_sp_work(bp); 9513 } 9514 9515 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 9516 struct net_device *dev, u32 filter_mask, 9517 int nlflags) 9518 { 9519 struct bnxt *bp = netdev_priv(dev); 9520 9521 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0, 9522 nlflags, filter_mask, NULL); 9523 } 9524 9525 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, 9526 u16 flags) 9527 { 9528 struct bnxt *bp = netdev_priv(dev); 9529 struct nlattr *attr, *br_spec; 9530 int rem, rc = 0; 9531 9532 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp)) 9533 return -EOPNOTSUPP; 9534 9535 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 9536 if (!br_spec) 9537 return -EINVAL; 9538 9539 nla_for_each_nested(attr, br_spec, rem) { 9540 u16 mode; 9541 9542 if (nla_type(attr) != IFLA_BRIDGE_MODE) 9543 continue; 9544 9545 if (nla_len(attr) < sizeof(mode)) 9546 return -EINVAL; 9547 9548 mode = nla_get_u16(attr); 9549 if (mode == bp->br_mode) 9550 break; 9551 9552 rc = bnxt_hwrm_set_br_mode(bp, mode); 9553 if (!rc) 9554 bp->br_mode = mode; 9555 break; 9556 } 9557 return rc; 9558 } 9559 9560 static int bnxt_get_phys_port_name(struct net_device *dev, char *buf, 9561 size_t len) 9562 { 9563 struct bnxt *bp = netdev_priv(dev); 9564 int rc; 9565 9566 /* The PF and it's VF-reps only support the switchdev framework */ 9567 if (!BNXT_PF(bp)) 9568 return -EOPNOTSUPP; 9569 9570 rc = snprintf(buf, len, "p%d", bp->pf.port_id); 9571 9572 if (rc >= len) 9573 return -EOPNOTSUPP; 9574 return 0; 9575 } 9576 9577 int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr) 9578 { 9579 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) 9580 return -EOPNOTSUPP; 9581 9582 /* The PF and it's VF-reps only support the switchdev framework */ 9583 if (!BNXT_PF(bp)) 9584 return -EOPNOTSUPP; 9585 9586 switch (attr->id) { 9587 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: 9588 attr->u.ppid.id_len = sizeof(bp->switch_id); 9589 memcpy(attr->u.ppid.id, bp->switch_id, attr->u.ppid.id_len); 9590 break; 9591 default: 9592 return -EOPNOTSUPP; 9593 } 9594 return 0; 9595 } 9596 9597 static int bnxt_swdev_port_attr_get(struct net_device *dev, 9598 struct switchdev_attr *attr) 9599 { 9600 return bnxt_port_attr_get(netdev_priv(dev), attr); 9601 } 9602 9603 static const struct switchdev_ops bnxt_switchdev_ops = { 9604 .switchdev_port_attr_get = bnxt_swdev_port_attr_get 9605 }; 9606 9607 static const struct net_device_ops bnxt_netdev_ops = { 9608 .ndo_open = bnxt_open, 9609 .ndo_start_xmit = bnxt_start_xmit, 9610 .ndo_stop = bnxt_close, 9611 .ndo_get_stats64 = bnxt_get_stats64, 9612 .ndo_set_rx_mode = bnxt_set_rx_mode, 9613 .ndo_do_ioctl = bnxt_ioctl, 9614 .ndo_validate_addr = eth_validate_addr, 9615 .ndo_set_mac_address = bnxt_change_mac_addr, 9616 .ndo_change_mtu = bnxt_change_mtu, 9617 .ndo_fix_features = bnxt_fix_features, 9618 .ndo_set_features = bnxt_set_features, 9619 .ndo_tx_timeout = bnxt_tx_timeout, 9620 #ifdef CONFIG_BNXT_SRIOV 9621 .ndo_get_vf_config = bnxt_get_vf_config, 9622 .ndo_set_vf_mac = bnxt_set_vf_mac, 9623 .ndo_set_vf_vlan = bnxt_set_vf_vlan, 9624 .ndo_set_vf_rate = bnxt_set_vf_bw, 9625 .ndo_set_vf_link_state = bnxt_set_vf_link_state, 9626 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk, 9627 .ndo_set_vf_trust = bnxt_set_vf_trust, 9628 #endif 9629 .ndo_setup_tc = bnxt_setup_tc, 9630 #ifdef CONFIG_RFS_ACCEL 9631 .ndo_rx_flow_steer = bnxt_rx_flow_steer, 9632 #endif 9633 .ndo_udp_tunnel_add = bnxt_udp_tunnel_add, 9634 .ndo_udp_tunnel_del = bnxt_udp_tunnel_del, 9635 .ndo_bpf = bnxt_xdp, 9636 .ndo_bridge_getlink = bnxt_bridge_getlink, 9637 .ndo_bridge_setlink = bnxt_bridge_setlink, 9638 .ndo_get_phys_port_name = bnxt_get_phys_port_name 9639 }; 9640 9641 static void bnxt_remove_one(struct pci_dev *pdev) 9642 { 9643 struct net_device *dev = pci_get_drvdata(pdev); 9644 struct bnxt *bp = netdev_priv(dev); 9645 9646 if (BNXT_PF(bp)) { 9647 bnxt_sriov_disable(bp); 9648 bnxt_dl_unregister(bp); 9649 } 9650 9651 pci_disable_pcie_error_reporting(pdev); 9652 unregister_netdev(dev); 9653 bnxt_shutdown_tc(bp); 9654 bnxt_cancel_sp_work(bp); 9655 bp->sp_event = 0; 9656 9657 bnxt_clear_int_mode(bp); 9658 bnxt_hwrm_func_drv_unrgtr(bp); 9659 bnxt_free_hwrm_resources(bp); 9660 bnxt_free_hwrm_short_cmd_req(bp); 9661 bnxt_ethtool_free(bp); 9662 bnxt_dcb_free(bp); 9663 kfree(bp->edev); 9664 bp->edev = NULL; 9665 bnxt_free_ctx_mem(bp); 9666 kfree(bp->ctx); 9667 bp->ctx = NULL; 9668 bnxt_cleanup_pci(bp); 9669 free_netdev(dev); 9670 } 9671 9672 static int bnxt_probe_phy(struct bnxt *bp) 9673 { 9674 int rc = 0; 9675 struct bnxt_link_info *link_info = &bp->link_info; 9676 9677 rc = bnxt_hwrm_phy_qcaps(bp); 9678 if (rc) { 9679 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n", 9680 rc); 9681 return rc; 9682 } 9683 mutex_init(&bp->link_lock); 9684 9685 rc = bnxt_update_link(bp, false); 9686 if (rc) { 9687 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n", 9688 rc); 9689 return rc; 9690 } 9691 9692 /* Older firmware does not have supported_auto_speeds, so assume 9693 * that all supported speeds can be autonegotiated. 9694 */ 9695 if (link_info->auto_link_speeds && !link_info->support_auto_speeds) 9696 link_info->support_auto_speeds = link_info->support_speeds; 9697 9698 /*initialize the ethool setting copy with NVM settings */ 9699 if (BNXT_AUTO_MODE(link_info->auto_mode)) { 9700 link_info->autoneg = BNXT_AUTONEG_SPEED; 9701 if (bp->hwrm_spec_code >= 0x10201) { 9702 if (link_info->auto_pause_setting & 9703 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE) 9704 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 9705 } else { 9706 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 9707 } 9708 link_info->advertising = link_info->auto_link_speeds; 9709 } else { 9710 link_info->req_link_speed = link_info->force_link_speed; 9711 link_info->req_duplex = link_info->duplex_setting; 9712 } 9713 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 9714 link_info->req_flow_ctrl = 9715 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH; 9716 else 9717 link_info->req_flow_ctrl = link_info->force_pause_setting; 9718 return rc; 9719 } 9720 9721 static int bnxt_get_max_irq(struct pci_dev *pdev) 9722 { 9723 u16 ctrl; 9724 9725 if (!pdev->msix_cap) 9726 return 1; 9727 9728 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl); 9729 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; 9730 } 9731 9732 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, 9733 int *max_cp) 9734 { 9735 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 9736 int max_ring_grps = 0; 9737 9738 *max_tx = hw_resc->max_tx_rings; 9739 *max_rx = hw_resc->max_rx_rings; 9740 *max_cp = min_t(int, bnxt_get_max_func_cp_rings_for_en(bp), 9741 hw_resc->max_irqs - bnxt_get_ulp_msix_num(bp)); 9742 *max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs); 9743 max_ring_grps = hw_resc->max_hw_ring_grps; 9744 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { 9745 *max_cp -= 1; 9746 *max_rx -= 2; 9747 } 9748 if (bp->flags & BNXT_FLAG_AGG_RINGS) 9749 *max_rx >>= 1; 9750 *max_rx = min_t(int, *max_rx, max_ring_grps); 9751 } 9752 9753 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared) 9754 { 9755 int rx, tx, cp; 9756 9757 _bnxt_get_max_rings(bp, &rx, &tx, &cp); 9758 *max_rx = rx; 9759 *max_tx = tx; 9760 if (!rx || !tx || !cp) 9761 return -ENOMEM; 9762 9763 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared); 9764 } 9765 9766 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx, 9767 bool shared) 9768 { 9769 int rc; 9770 9771 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); 9772 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) { 9773 /* Not enough rings, try disabling agg rings. */ 9774 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 9775 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); 9776 if (rc) { 9777 /* set BNXT_FLAG_AGG_RINGS back for consistency */ 9778 bp->flags |= BNXT_FLAG_AGG_RINGS; 9779 return rc; 9780 } 9781 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 9782 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 9783 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 9784 bnxt_set_ring_params(bp); 9785 } 9786 9787 if (bp->flags & BNXT_FLAG_ROCE_CAP) { 9788 int max_cp, max_stat, max_irq; 9789 9790 /* Reserve minimum resources for RoCE */ 9791 max_cp = bnxt_get_max_func_cp_rings(bp); 9792 max_stat = bnxt_get_max_func_stat_ctxs(bp); 9793 max_irq = bnxt_get_max_func_irqs(bp); 9794 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS || 9795 max_irq <= BNXT_MIN_ROCE_CP_RINGS || 9796 max_stat <= BNXT_MIN_ROCE_STAT_CTXS) 9797 return 0; 9798 9799 max_cp -= BNXT_MIN_ROCE_CP_RINGS; 9800 max_irq -= BNXT_MIN_ROCE_CP_RINGS; 9801 max_stat -= BNXT_MIN_ROCE_STAT_CTXS; 9802 max_cp = min_t(int, max_cp, max_irq); 9803 max_cp = min_t(int, max_cp, max_stat); 9804 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared); 9805 if (rc) 9806 rc = 0; 9807 } 9808 return rc; 9809 } 9810 9811 /* In initial default shared ring setting, each shared ring must have a 9812 * RX/TX ring pair. 9813 */ 9814 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp) 9815 { 9816 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings); 9817 bp->rx_nr_rings = bp->cp_nr_rings; 9818 bp->tx_nr_rings_per_tc = bp->cp_nr_rings; 9819 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 9820 } 9821 9822 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) 9823 { 9824 int dflt_rings, max_rx_rings, max_tx_rings, rc; 9825 9826 if (!bnxt_can_reserve_rings(bp)) 9827 return 0; 9828 9829 if (sh) 9830 bp->flags |= BNXT_FLAG_SHARED_RINGS; 9831 dflt_rings = netif_get_num_default_rss_queues(); 9832 /* Reduce default rings on multi-port cards so that total default 9833 * rings do not exceed CPU count. 9834 */ 9835 if (bp->port_count > 1) { 9836 int max_rings = 9837 max_t(int, num_online_cpus() / bp->port_count, 1); 9838 9839 dflt_rings = min_t(int, dflt_rings, max_rings); 9840 } 9841 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh); 9842 if (rc) 9843 return rc; 9844 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); 9845 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); 9846 if (sh) 9847 bnxt_trim_dflt_sh_rings(bp); 9848 else 9849 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings; 9850 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 9851 9852 rc = __bnxt_reserve_rings(bp); 9853 if (rc) 9854 netdev_warn(bp->dev, "Unable to reserve tx rings\n"); 9855 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 9856 if (sh) 9857 bnxt_trim_dflt_sh_rings(bp); 9858 9859 /* Rings may have been trimmed, re-reserve the trimmed rings. */ 9860 if (bnxt_need_reserve_rings(bp)) { 9861 rc = __bnxt_reserve_rings(bp); 9862 if (rc) 9863 netdev_warn(bp->dev, "2nd rings reservation failed.\n"); 9864 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 9865 } 9866 bp->num_stat_ctxs = bp->cp_nr_rings; 9867 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 9868 bp->rx_nr_rings++; 9869 bp->cp_nr_rings++; 9870 } 9871 return rc; 9872 } 9873 9874 static int bnxt_init_dflt_ring_mode(struct bnxt *bp) 9875 { 9876 int rc; 9877 9878 if (bp->tx_nr_rings) 9879 return 0; 9880 9881 bnxt_ulp_irq_stop(bp); 9882 bnxt_clear_int_mode(bp); 9883 rc = bnxt_set_dflt_rings(bp, true); 9884 if (rc) { 9885 netdev_err(bp->dev, "Not enough rings available.\n"); 9886 goto init_dflt_ring_err; 9887 } 9888 rc = bnxt_init_int_mode(bp); 9889 if (rc) 9890 goto init_dflt_ring_err; 9891 9892 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 9893 if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) { 9894 bp->flags |= BNXT_FLAG_RFS; 9895 bp->dev->features |= NETIF_F_NTUPLE; 9896 } 9897 init_dflt_ring_err: 9898 bnxt_ulp_irq_restart(bp, rc); 9899 return rc; 9900 } 9901 9902 int bnxt_restore_pf_fw_resources(struct bnxt *bp) 9903 { 9904 int rc; 9905 9906 ASSERT_RTNL(); 9907 bnxt_hwrm_func_qcaps(bp); 9908 9909 if (netif_running(bp->dev)) 9910 __bnxt_close_nic(bp, true, false); 9911 9912 bnxt_ulp_irq_stop(bp); 9913 bnxt_clear_int_mode(bp); 9914 rc = bnxt_init_int_mode(bp); 9915 bnxt_ulp_irq_restart(bp, rc); 9916 9917 if (netif_running(bp->dev)) { 9918 if (rc) 9919 dev_close(bp->dev); 9920 else 9921 rc = bnxt_open_nic(bp, true, false); 9922 } 9923 9924 return rc; 9925 } 9926 9927 static int bnxt_init_mac_addr(struct bnxt *bp) 9928 { 9929 int rc = 0; 9930 9931 if (BNXT_PF(bp)) { 9932 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN); 9933 } else { 9934 #ifdef CONFIG_BNXT_SRIOV 9935 struct bnxt_vf_info *vf = &bp->vf; 9936 bool strict_approval = true; 9937 9938 if (is_valid_ether_addr(vf->mac_addr)) { 9939 /* overwrite netdev dev_addr with admin VF MAC */ 9940 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); 9941 /* Older PF driver or firmware may not approve this 9942 * correctly. 9943 */ 9944 strict_approval = false; 9945 } else { 9946 eth_hw_addr_random(bp->dev); 9947 } 9948 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval); 9949 #endif 9950 } 9951 return rc; 9952 } 9953 9954 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 9955 { 9956 static int version_printed; 9957 struct net_device *dev; 9958 struct bnxt *bp; 9959 int rc, max_irqs; 9960 9961 if (pci_is_bridge(pdev)) 9962 return -ENODEV; 9963 9964 if (version_printed++ == 0) 9965 pr_info("%s", version); 9966 9967 max_irqs = bnxt_get_max_irq(pdev); 9968 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs); 9969 if (!dev) 9970 return -ENOMEM; 9971 9972 bp = netdev_priv(dev); 9973 bnxt_set_max_func_irqs(bp, max_irqs); 9974 9975 if (bnxt_vf_pciid(ent->driver_data)) 9976 bp->flags |= BNXT_FLAG_VF; 9977 9978 if (pdev->msix_cap) 9979 bp->flags |= BNXT_FLAG_MSIX_CAP; 9980 9981 rc = bnxt_init_board(pdev, dev); 9982 if (rc < 0) 9983 goto init_err_free; 9984 9985 dev->netdev_ops = &bnxt_netdev_ops; 9986 dev->watchdog_timeo = BNXT_TX_TIMEOUT; 9987 dev->ethtool_ops = &bnxt_ethtool_ops; 9988 SWITCHDEV_SET_OPS(dev, &bnxt_switchdev_ops); 9989 pci_set_drvdata(pdev, dev); 9990 9991 rc = bnxt_alloc_hwrm_resources(bp); 9992 if (rc) 9993 goto init_err_pci_clean; 9994 9995 mutex_init(&bp->hwrm_cmd_lock); 9996 rc = bnxt_hwrm_ver_get(bp); 9997 if (rc) 9998 goto init_err_pci_clean; 9999 10000 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || 10001 bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) { 10002 rc = bnxt_alloc_hwrm_short_cmd_req(bp); 10003 if (rc) 10004 goto init_err_pci_clean; 10005 } 10006 10007 if (BNXT_CHIP_P5(bp)) 10008 bp->flags |= BNXT_FLAG_CHIP_P5; 10009 10010 rc = bnxt_hwrm_func_reset(bp); 10011 if (rc) 10012 goto init_err_pci_clean; 10013 10014 bnxt_hwrm_fw_set_time(bp); 10015 10016 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 10017 NETIF_F_TSO | NETIF_F_TSO6 | 10018 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | 10019 NETIF_F_GSO_IPXIP4 | 10020 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | 10021 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH | 10022 NETIF_F_RXCSUM | NETIF_F_GRO; 10023 10024 if (BNXT_SUPPORTS_TPA(bp)) 10025 dev->hw_features |= NETIF_F_LRO; 10026 10027 dev->hw_enc_features = 10028 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 10029 NETIF_F_TSO | NETIF_F_TSO6 | 10030 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | 10031 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | 10032 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL; 10033 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM | 10034 NETIF_F_GSO_GRE_CSUM; 10035 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA; 10036 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | 10037 NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX; 10038 if (BNXT_SUPPORTS_TPA(bp)) 10039 dev->hw_features |= NETIF_F_GRO_HW; 10040 dev->features |= dev->hw_features | NETIF_F_HIGHDMA; 10041 if (dev->features & NETIF_F_GRO_HW) 10042 dev->features &= ~NETIF_F_LRO; 10043 dev->priv_flags |= IFF_UNICAST_FLT; 10044 10045 #ifdef CONFIG_BNXT_SRIOV 10046 init_waitqueue_head(&bp->sriov_cfg_wait); 10047 mutex_init(&bp->sriov_lock); 10048 #endif 10049 if (BNXT_SUPPORTS_TPA(bp)) { 10050 bp->gro_func = bnxt_gro_func_5730x; 10051 if (BNXT_CHIP_P4(bp)) 10052 bp->gro_func = bnxt_gro_func_5731x; 10053 } 10054 if (!BNXT_CHIP_P4_PLUS(bp)) 10055 bp->flags |= BNXT_FLAG_DOUBLE_DB; 10056 10057 rc = bnxt_hwrm_func_drv_rgtr(bp); 10058 if (rc) 10059 goto init_err_pci_clean; 10060 10061 rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0); 10062 if (rc) 10063 goto init_err_pci_clean; 10064 10065 bp->ulp_probe = bnxt_ulp_probe; 10066 10067 rc = bnxt_hwrm_queue_qportcfg(bp); 10068 if (rc) { 10069 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n", 10070 rc); 10071 rc = -1; 10072 goto init_err_pci_clean; 10073 } 10074 /* Get the MAX capabilities for this function */ 10075 rc = bnxt_hwrm_func_qcaps(bp); 10076 if (rc) { 10077 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n", 10078 rc); 10079 rc = -1; 10080 goto init_err_pci_clean; 10081 } 10082 rc = bnxt_init_mac_addr(bp); 10083 if (rc) { 10084 dev_err(&pdev->dev, "Unable to initialize mac address.\n"); 10085 rc = -EADDRNOTAVAIL; 10086 goto init_err_pci_clean; 10087 } 10088 10089 bnxt_hwrm_func_qcfg(bp); 10090 bnxt_hwrm_port_led_qcaps(bp); 10091 bnxt_ethtool_init(bp); 10092 bnxt_dcb_init(bp); 10093 10094 /* MTU range: 60 - FW defined max */ 10095 dev->min_mtu = ETH_ZLEN; 10096 dev->max_mtu = bp->max_mtu; 10097 10098 rc = bnxt_probe_phy(bp); 10099 if (rc) 10100 goto init_err_pci_clean; 10101 10102 bnxt_set_rx_skb_mode(bp, false); 10103 bnxt_set_tpa_flags(bp); 10104 bnxt_set_ring_params(bp); 10105 rc = bnxt_set_dflt_rings(bp, true); 10106 if (rc) { 10107 netdev_err(bp->dev, "Not enough rings available.\n"); 10108 rc = -ENOMEM; 10109 goto init_err_pci_clean; 10110 } 10111 10112 /* Default RSS hash cfg. */ 10113 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 | 10114 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 | 10115 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | 10116 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; 10117 if (BNXT_CHIP_P4(bp) && bp->hwrm_spec_code >= 0x10501) { 10118 bp->flags |= BNXT_FLAG_UDP_RSS_CAP; 10119 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 | 10120 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 10121 } 10122 10123 bnxt_hwrm_vnic_qcaps(bp); 10124 if (bnxt_rfs_supported(bp)) { 10125 dev->hw_features |= NETIF_F_NTUPLE; 10126 if (bnxt_rfs_capable(bp)) { 10127 bp->flags |= BNXT_FLAG_RFS; 10128 dev->features |= NETIF_F_NTUPLE; 10129 } 10130 } 10131 10132 if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX) 10133 bp->flags |= BNXT_FLAG_STRIP_VLAN; 10134 10135 rc = bnxt_init_int_mode(bp); 10136 if (rc) 10137 goto init_err_pci_clean; 10138 10139 /* No TC has been set yet and rings may have been trimmed due to 10140 * limited MSIX, so we re-initialize the TX rings per TC. 10141 */ 10142 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 10143 10144 bnxt_get_wol_settings(bp); 10145 if (bp->flags & BNXT_FLAG_WOL_CAP) 10146 device_set_wakeup_enable(&pdev->dev, bp->wol); 10147 else 10148 device_set_wakeup_capable(&pdev->dev, false); 10149 10150 bnxt_hwrm_set_cache_line_size(bp, cache_line_size()); 10151 10152 bnxt_hwrm_coal_params_qcaps(bp); 10153 10154 if (BNXT_PF(bp)) { 10155 if (!bnxt_pf_wq) { 10156 bnxt_pf_wq = 10157 create_singlethread_workqueue("bnxt_pf_wq"); 10158 if (!bnxt_pf_wq) { 10159 dev_err(&pdev->dev, "Unable to create workqueue.\n"); 10160 goto init_err_pci_clean; 10161 } 10162 } 10163 bnxt_init_tc(bp); 10164 } 10165 10166 rc = register_netdev(dev); 10167 if (rc) 10168 goto init_err_cleanup_tc; 10169 10170 if (BNXT_PF(bp)) 10171 bnxt_dl_register(bp); 10172 10173 netdev_info(dev, "%s found at mem %lx, node addr %pM\n", 10174 board_info[ent->driver_data].name, 10175 (long)pci_resource_start(pdev, 0), dev->dev_addr); 10176 pcie_print_link_status(pdev); 10177 10178 return 0; 10179 10180 init_err_cleanup_tc: 10181 bnxt_shutdown_tc(bp); 10182 bnxt_clear_int_mode(bp); 10183 10184 init_err_pci_clean: 10185 bnxt_free_hwrm_resources(bp); 10186 bnxt_free_ctx_mem(bp); 10187 kfree(bp->ctx); 10188 bp->ctx = NULL; 10189 bnxt_cleanup_pci(bp); 10190 10191 init_err_free: 10192 free_netdev(dev); 10193 return rc; 10194 } 10195 10196 static void bnxt_shutdown(struct pci_dev *pdev) 10197 { 10198 struct net_device *dev = pci_get_drvdata(pdev); 10199 struct bnxt *bp; 10200 10201 if (!dev) 10202 return; 10203 10204 rtnl_lock(); 10205 bp = netdev_priv(dev); 10206 if (!bp) 10207 goto shutdown_exit; 10208 10209 if (netif_running(dev)) 10210 dev_close(dev); 10211 10212 bnxt_ulp_shutdown(bp); 10213 10214 if (system_state == SYSTEM_POWER_OFF) { 10215 bnxt_clear_int_mode(bp); 10216 pci_wake_from_d3(pdev, bp->wol); 10217 pci_set_power_state(pdev, PCI_D3hot); 10218 } 10219 10220 shutdown_exit: 10221 rtnl_unlock(); 10222 } 10223 10224 #ifdef CONFIG_PM_SLEEP 10225 static int bnxt_suspend(struct device *device) 10226 { 10227 struct pci_dev *pdev = to_pci_dev(device); 10228 struct net_device *dev = pci_get_drvdata(pdev); 10229 struct bnxt *bp = netdev_priv(dev); 10230 int rc = 0; 10231 10232 rtnl_lock(); 10233 if (netif_running(dev)) { 10234 netif_device_detach(dev); 10235 rc = bnxt_close(dev); 10236 } 10237 bnxt_hwrm_func_drv_unrgtr(bp); 10238 rtnl_unlock(); 10239 return rc; 10240 } 10241 10242 static int bnxt_resume(struct device *device) 10243 { 10244 struct pci_dev *pdev = to_pci_dev(device); 10245 struct net_device *dev = pci_get_drvdata(pdev); 10246 struct bnxt *bp = netdev_priv(dev); 10247 int rc = 0; 10248 10249 rtnl_lock(); 10250 if (bnxt_hwrm_ver_get(bp) || bnxt_hwrm_func_drv_rgtr(bp)) { 10251 rc = -ENODEV; 10252 goto resume_exit; 10253 } 10254 rc = bnxt_hwrm_func_reset(bp); 10255 if (rc) { 10256 rc = -EBUSY; 10257 goto resume_exit; 10258 } 10259 bnxt_get_wol_settings(bp); 10260 if (netif_running(dev)) { 10261 rc = bnxt_open(dev); 10262 if (!rc) 10263 netif_device_attach(dev); 10264 } 10265 10266 resume_exit: 10267 rtnl_unlock(); 10268 return rc; 10269 } 10270 10271 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume); 10272 #define BNXT_PM_OPS (&bnxt_pm_ops) 10273 10274 #else 10275 10276 #define BNXT_PM_OPS NULL 10277 10278 #endif /* CONFIG_PM_SLEEP */ 10279 10280 /** 10281 * bnxt_io_error_detected - called when PCI error is detected 10282 * @pdev: Pointer to PCI device 10283 * @state: The current pci connection state 10284 * 10285 * This function is called after a PCI bus error affecting 10286 * this device has been detected. 10287 */ 10288 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, 10289 pci_channel_state_t state) 10290 { 10291 struct net_device *netdev = pci_get_drvdata(pdev); 10292 struct bnxt *bp = netdev_priv(netdev); 10293 10294 netdev_info(netdev, "PCI I/O error detected\n"); 10295 10296 rtnl_lock(); 10297 netif_device_detach(netdev); 10298 10299 bnxt_ulp_stop(bp); 10300 10301 if (state == pci_channel_io_perm_failure) { 10302 rtnl_unlock(); 10303 return PCI_ERS_RESULT_DISCONNECT; 10304 } 10305 10306 if (netif_running(netdev)) 10307 bnxt_close(netdev); 10308 10309 pci_disable_device(pdev); 10310 rtnl_unlock(); 10311 10312 /* Request a slot slot reset. */ 10313 return PCI_ERS_RESULT_NEED_RESET; 10314 } 10315 10316 /** 10317 * bnxt_io_slot_reset - called after the pci bus has been reset. 10318 * @pdev: Pointer to PCI device 10319 * 10320 * Restart the card from scratch, as if from a cold-boot. 10321 * At this point, the card has exprienced a hard reset, 10322 * followed by fixups by BIOS, and has its config space 10323 * set up identically to what it was at cold boot. 10324 */ 10325 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) 10326 { 10327 struct net_device *netdev = pci_get_drvdata(pdev); 10328 struct bnxt *bp = netdev_priv(netdev); 10329 int err = 0; 10330 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT; 10331 10332 netdev_info(bp->dev, "PCI Slot Reset\n"); 10333 10334 rtnl_lock(); 10335 10336 if (pci_enable_device(pdev)) { 10337 dev_err(&pdev->dev, 10338 "Cannot re-enable PCI device after reset.\n"); 10339 } else { 10340 pci_set_master(pdev); 10341 10342 err = bnxt_hwrm_func_reset(bp); 10343 if (!err && netif_running(netdev)) 10344 err = bnxt_open(netdev); 10345 10346 if (!err) { 10347 result = PCI_ERS_RESULT_RECOVERED; 10348 bnxt_ulp_start(bp); 10349 } 10350 } 10351 10352 if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) 10353 dev_close(netdev); 10354 10355 rtnl_unlock(); 10356 10357 return PCI_ERS_RESULT_RECOVERED; 10358 } 10359 10360 /** 10361 * bnxt_io_resume - called when traffic can start flowing again. 10362 * @pdev: Pointer to PCI device 10363 * 10364 * This callback is called when the error recovery driver tells 10365 * us that its OK to resume normal operation. 10366 */ 10367 static void bnxt_io_resume(struct pci_dev *pdev) 10368 { 10369 struct net_device *netdev = pci_get_drvdata(pdev); 10370 10371 rtnl_lock(); 10372 10373 netif_device_attach(netdev); 10374 10375 rtnl_unlock(); 10376 } 10377 10378 static const struct pci_error_handlers bnxt_err_handler = { 10379 .error_detected = bnxt_io_error_detected, 10380 .slot_reset = bnxt_io_slot_reset, 10381 .resume = bnxt_io_resume 10382 }; 10383 10384 static struct pci_driver bnxt_pci_driver = { 10385 .name = DRV_MODULE_NAME, 10386 .id_table = bnxt_pci_tbl, 10387 .probe = bnxt_init_one, 10388 .remove = bnxt_remove_one, 10389 .shutdown = bnxt_shutdown, 10390 .driver.pm = BNXT_PM_OPS, 10391 .err_handler = &bnxt_err_handler, 10392 #if defined(CONFIG_BNXT_SRIOV) 10393 .sriov_configure = bnxt_sriov_configure, 10394 #endif 10395 }; 10396 10397 static int __init bnxt_init(void) 10398 { 10399 bnxt_debug_init(); 10400 return pci_register_driver(&bnxt_pci_driver); 10401 } 10402 10403 static void __exit bnxt_exit(void) 10404 { 10405 pci_unregister_driver(&bnxt_pci_driver); 10406 if (bnxt_pf_wq) 10407 destroy_workqueue(bnxt_pf_wq); 10408 bnxt_debug_exit(); 10409 } 10410 10411 module_init(bnxt_init); 10412 module_exit(bnxt_exit); 10413